path
stringlengths
7
265
concatenated_notebook
stringlengths
46
17M
Machine Learning Projects/Useful_Code_Examples/Data_Preprocessing.ipynb
###Markdown Data Preprocessing ###Code import numpy as np import matplotlib.pyplot as plt import pandas as pd ###Output _____no_output_____ ###Markdown Data Loading ###Code PATH = "../../Data_Preprocessing/Python/Data.csv" dataset = pd.read_csv(PATH) X = dataset.iloc[:, :-1].values y = dataset.iloc[:, -1].values print(X) print(y) ###Output ['No' 'Yes' 'No' 'No' 'Yes' 'Yes' 'No' 'Yes' 'No' 'Yes'] ###Markdown Missing Values ###Code from sklearn.impute import SimpleImputer imputer = SimpleImputer(missing_values=np.nan, strategy='mean') imputer.fit(X[:, 1:]) X[:, 1:] = imputer.transform(X[:, 1:]) print(X) ###Output [['France' 44.0 72000.0] ['Spain' 27.0 48000.0] ['Germany' 30.0 54000.0] ['Spain' 38.0 61000.0] ['Germany' 40.0 63777.77777777778] ['France' 35.0 58000.0] ['Spain' 38.77777777777778 52000.0] ['France' 48.0 79000.0] ['Germany' 50.0 83000.0] ['France' 37.0 67000.0]] ###Markdown One-Hot Encoding ###Code from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [0])], remainder='passthrough') X = np.array(ct.fit_transform(X)) print(X) ###Output [[1.0 0.0 0.0 44.0 72000.0] [0.0 0.0 1.0 27.0 48000.0] [0.0 1.0 0.0 30.0 54000.0] [0.0 0.0 1.0 38.0 61000.0] [0.0 1.0 0.0 40.0 63777.77777777778] [1.0 0.0 0.0 35.0 58000.0] [0.0 0.0 1.0 38.77777777777778 52000.0] [1.0 0.0 0.0 48.0 79000.0] [0.0 1.0 0.0 50.0 83000.0] [1.0 0.0 0.0 37.0 67000.0]] ###Markdown Label Encoding ###Code from sklearn.preprocessing import LabelEncoder le = LabelEncoder() y = le.fit_transform(y) print(y) ###Output [0 1 0 0 1 1 0 1 0 1] ###Markdown Train Test Split ###Code from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42) print(X_train) print(X_test) print(y_train) print(y_test) ###Output [0 1] ###Markdown Feature Scaling (Optional) ###Code from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train[:, 3:] = sc.fit_transform(X_train[:, 3:]) X_test[:, 3:] = sc.transform(X_test[:, 3:]) print(X_train) print(X_test) ###Output [[0.0 1.0 0.0 2.1827180802863797 2.3008920936249107] [0.0 0.0 1.0 -2.3186282969916334 -1.7968097268236927]]
CNN And Transfer LEarning.ipynb
###Markdown Creating CNN Using Scratch And Transfer Learning Please download the dataset from the below url ###Code # import the libraries as shown below from tensorflow.keras.layers import Input, Lambda, Dense, Flatten,Conv2D from tensorflow.keras.models import Model from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.applications.resnet50 import preprocess_input from tensorflow.keras.preprocessing import image from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img from tensorflow.keras.models import Sequential import numpy as np from glob import glob import matplotlib.pyplot as plt # re-size all the images to this IMAGE_SIZE = [224, 224] train_path = 'cell_images/Train' valid_path = 'cell_images/Test' # useful for getting number of output classes folders = glob('Dataset/Train/*') folders from tensorflow.keras.layers import MaxPooling2D ### Create Model from scratch using CNN model=Sequential() model.add(Conv2D(filters=16,kernel_size=2,padding="same",activation="relu",input_shape=(224,224,3))) model.add(MaxPooling2D(pool_size=2)) model.add(Conv2D(filters=32,kernel_size=2,padding="same",activation ="relu")) model.add(MaxPooling2D(pool_size=2)) model.add(Conv2D(filters=64,kernel_size=2,padding="same",activation="relu")) model.add(MaxPooling2D(pool_size=2)) model.add(Flatten()) model.add(Dense(500,activation="relu")) model.add(Dense(2,activation="softmax")) model.summary() # tell the model what cost and optimization method to use model.compile( loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'] ) # Use the Image Data Generator to import the images from the dataset from tensorflow.keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True) test_datagen = ImageDataGenerator(rescale = 1./255) # Make sure you provide the same target size as initialied for the image size training_set = train_datagen.flow_from_directory('Dataset/Train', target_size = (224, 224), batch_size = 32, class_mode = 'categorical') training_set test_set = test_datagen.flow_from_directory('Dataset/Test', target_size = (224, 224), batch_size = 32, class_mode = 'categorical') # fit the model # Run the cell. It will take some time to execute r = model..fit_generator( training_set, validation_data=test_set, epochs=50, steps_per_epoch=len(training_set), validation_steps=len(test_set) ) # plot the loss plt.plot(r.history['loss'], label='train loss') plt.plot(r.history['val_loss'], label='val loss') plt.legend() plt.show() plt.savefig('LossVal_loss') # plot the accuracy plt.plot(r.history['accuracy'], label='train acc') plt.plot(r.history['val_accuracy'], label='val acc') plt.legend() plt.show() plt.savefig('AccVal_acc') # save it as a h5 file from tensorflow.keras.models import load_model model.save('model_vgg19.h5') y_pred = model.predict(test_set) y_pred import numpy as np y_pred = np.argmax(y_pred, axis=1) y_pred from tensorflow.keras.models import load_model from tensorflow.keras.preprocessing import image model=load_model('model_vgg19.h5') img=image.load_img('Dataset/Test/Uninfected/2.png',target_size=(224,224)) x=image.img_to_array(img) x x.shape x=x/255 x=np.expand_dims(x,axis=0) img_data=preprocess_input(x) img_data.shape model.predict(img_data) a=np.argmax(model.predict(img_data), axis=1) if(a==1): print("Uninfected") else: print("Infected") ###Output Uninfected ###Markdown Creating CNN Using Scratch And Transfer Learning Please download the dataset from the below url ###Code # import the libraries as shown below from tensorflow.keras.layers import Input, Lambda, Dense, Flatten,Conv2D from tensorflow.keras.models import Model from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.applications.resnet50 import preprocess_input from tensorflow.keras.preprocessing import image from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img from tensorflow.keras.models import Sequential import numpy as np from glob import glob import matplotlib.pyplot as plt # re-size all the images to this IMAGE_SIZE = [224, 224] train_path = 'Dataset/Train' valid_path = 'Dataset/Test' # Import the Vgg 16 library as shown below and add preprocessing layer to the front of VGG # Here we will be using imagenet weights vgg19 = VGG19(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False) vgg19.summary() # don't train existing weights for layer in vgg19.layers: layer.trainable = False # useful for getting number of output classes folders = glob('Dataset/Train/*') folders # our layers - you can add more if you want x = Flatten()(vgg19.output) prediction = Dense(len(folders), activation='softmax')(x) # create a model object model = Model(inputs=vgg19.input, outputs=prediction) # view the structure of the model model.summary() from tensorflow.keras.layers import MaxPooling2D ### Create Model from scratch using CNN model=Sequential() model.add(Conv2D(filters=16,kernel_size=2,padding="same",activation="relu",input_shape=(224,224,3))) model.add(MaxPooling2D(pool_size=2)) model.add(Conv2D(filters=32,kernel_size=2,padding="same",activation ="relu")) model.add(MaxPooling2D(pool_size=2)) model.add(Conv2D(filters=64,kernel_size=2,padding="same",activation="relu")) model.add(MaxPooling2D(pool_size=2)) model.add(Flatten()) model.add(Dense(500,activation="relu")) model.add(Dense(2,activation="softmax")) model.summary() # tell the model what cost and optimization method to use model.compile( loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'] ) # Use the Image Data Generator to import the images from the dataset from tensorflow.keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True) test_datagen = ImageDataGenerator(rescale = 1./255) # Make sure you provide the same target size as initialied for the image size training_set = train_datagen.flow_from_directory('Dataset/Train', target_size = (224, 224), batch_size = 32, class_mode = 'categorical') training_set test_set = test_datagen.flow_from_directory('Dataset/Test', target_size = (224, 224), batch_size = 32, class_mode = 'categorical') # fit the model # Run the cell. It will take some time to execute r = model.fit( training_set, validation_data=test_set, epochs=10, steps_per_epoch=len(training_set), validation_steps=len(test_set) ) # plot the loss plt.plot(r.history['loss'], label='train loss') plt.plot(r.history['val_loss'], label='val loss') plt.legend() plt.show() plt.savefig('LossVal_loss') # plot the accuracy plt.plot(r.history['accuracy'], label='train acc') plt.plot(r.history['val_accuracy'], label='val acc') plt.legend() plt.show() plt.savefig('AccVal_acc') # save it as a h5 file from tensorflow.keras.models import load_model model.save('model_vgg19.h5') y_pred = model.predict(test_set) y_pred import numpy as np y_pred = np.argmax(y_pred, axis=1) y_pred from tensorflow.keras.models import load_model from tensorflow.keras.preprocessing import image model=load_model('model_vgg19.h5') img=image.load_img('Dataset/Test/Uninfected/2.png',target_size=(224,224)) img=image.load_img('Dataset/Train/Parasite/2.png',target_size=(224,224)) x=image.img_to_array(img) x x.shape x=x/255 x=np.expand_dims(x,axis=0) img_data=preprocess_input(x) img_data.shape model.predict(img_data) a=np.argmax(model.predict(img_data), axis=1) if(a==1): print("Uninfected") else: print("Infected") ###Output Infected ###Markdown Creating CNN Using Scratch And Transfer Learning Please download the dataset from the below url ###Code # import the libraries as shown below from tensorflow.keras.layers import Input, Lambda, Dense, Flatten,Conv2D from tensorflow.keras.models import Model from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.applications.resnet50 import preprocess_input from tensorflow.keras.preprocessing import image from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img from tensorflow.keras.models import Sequential import numpy as np from glob import glob import matplotlib.pyplot as plt # re-size all the images to this IMAGE_SIZE = [224, 224] train_path = 'cell_images/Train' valid_path = 'cell_images/Test' # Import the Vgg 16 library as shown below and add preprocessing layer to the front of VGG # Here we will be using imagenet weights mobilnet = VGG19(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False) # don't train existing weights for layer in mobilnet.layers: layer.trainable = False # useful for getting number of output classes folders = glob('Dataset/Train/*') folders # our layers - you can add more if you want x = Flatten()(mobilnet.output) prediction = Dense(len(folders), activation='softmax')(x) # create a model object model = Model(inputs=mobilnet.input, outputs=prediction) # view the structure of the model model.summary() from tensorflow.keras.layers import MaxPooling2D ### Create Model from scratch using CNN model=Sequential() model.add(Conv2D(filters=16,kernel_size=2,padding="same",activation="relu",input_shape=(224,224,3))) model.add(MaxPooling2D(pool_size=2)) model.add(Conv2D(filters=32,kernel_size=2,padding="same",activation ="relu")) model.add(MaxPooling2D(pool_size=2)) model.add(Conv2D(filters=64,kernel_size=2,padding="same",activation="relu")) model.add(MaxPooling2D(pool_size=2)) model.add(Flatten()) model.add(Dense(500,activation="relu")) model.add(Dense(2,activation="softmax")) model.summary() # tell the model what cost and optimization method to use model.compile( loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'] ) # Use the Image Data Generator to import the images from the dataset from tensorflow.keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True) test_datagen = ImageDataGenerator(rescale = 1./255) # Make sure you provide the same target size as initialied for the image size training_set = train_datagen.flow_from_directory('Dataset/Train', target_size = (224, 224), batch_size = 32, class_mode = 'categorical') training_set test_set = test_datagen.flow_from_directory('Dataset/Test', target_size = (224, 224), batch_size = 32, class_mode = 'categorical') # fit the model # Run the cell. It will take some time to execute r = model..fit_generator( training_set, validation_data=test_set, epochs=50, steps_per_epoch=len(training_set), validation_steps=len(test_set) ) # plot the loss plt.plot(r.history['loss'], label='train loss') plt.plot(r.history['val_loss'], label='val loss') plt.legend() plt.show() plt.savefig('LossVal_loss') # plot the accuracy plt.plot(r.history['accuracy'], label='train acc') plt.plot(r.history['val_accuracy'], label='val acc') plt.legend() plt.show() plt.savefig('AccVal_acc') # save it as a h5 file from tensorflow.keras.models import load_model model.save('model_vgg19.h5') y_pred = model.predict(test_set) y_pred import numpy as np y_pred = np.argmax(y_pred, axis=1) y_pred from tensorflow.keras.models import load_model from tensorflow.keras.preprocessing import image model=load_model('model_vgg19.h5') img=image.load_img('Dataset/Test/Uninfected/2.png',target_size=(224,224)) x=image.img_to_array(img) x x.shape x=x/255 x=np.expand_dims(x,axis=0) img_data=preprocess_input(x) img_data.shape model.predict(img_data) a=np.argmax(model.predict(img_data), axis=1) if(a==1): print("Uninfected") else: print("Infected") ###Output Uninfected ###Markdown Creating CNN Using Scratch And Transfer Learning ###Code # import the libraries as shown below from tensorflow.keras.layers import Input, Lambda, Dense, Flatten,Conv2D from tensorflow.keras.models import Model from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.applications.resnet50 import preprocess_input from tensorflow.keras.preprocessing import image from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img from tensorflow.keras.models import Sequential import numpy as np from glob import glob import matplotlib.pyplot as plt # re-size all the images to this IMAGE_SIZE = [224, 224] train_path = 'Dataset/Train' valid_path = 'Dataset/Test' # Import the Vgg 16 library as shown below and add preprocessing layer to the front of VGG # Here we will be using imagenet weights vgg19 = VGG19(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False) vgg19.summary() # don't train existing weights for layer in vgg19.layers: layer.trainable = False # useful for getting number of output classes folders = glob('Dataset/Train/*') folders # our layers - you can add more if you want x = Flatten()(vgg19.output) prediction = Dense(len(folders), activation='softmax')(x) # create a model object model = Model(inputs=vgg19.input, outputs=prediction) # view the structure of the model model.summary() from tensorflow.keras.layers import MaxPooling2D ### Create Model from scratch using CNN model=Sequential() model.add(Conv2D(filters=16,kernel_size=2,padding="same",activation="relu",input_shape=(224,224,3))) model.add(MaxPooling2D(pool_size=2)) model.add(Conv2D(filters=32,kernel_size=2,padding="same",activation ="relu")) model.add(MaxPooling2D(pool_size=2)) model.add(Conv2D(filters=64,kernel_size=2,padding="same",activation="relu")) model.add(MaxPooling2D(pool_size=2)) model.add(Flatten()) model.add(Dense(500,activation="relu")) model.add(Dense(2,activation="softmax")) model.summary() # tell the model what cost and optimization method to use model.compile( loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'] ) # Use the Image Data Generator to import the images from the dataset from tensorflow.keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True) test_datagen = ImageDataGenerator(rescale = 1./255) # Make sure you provide the same target size as initialied for the image size training_set = train_datagen.flow_from_directory('Dataset/Train', target_size = (224, 224), batch_size = 32, class_mode = 'categorical') training_set test_set = test_datagen.flow_from_directory('Dataset/Test', target_size = (224, 224), batch_size = 32, class_mode = 'categorical') # fit the model # Run the cell. It will take some time to execute r = model.fit( training_set, validation_data=test_set, epochs=10, steps_per_epoch=len(training_set), validation_steps=len(test_set) ) # plot the loss plt.plot(r.history['loss'], label='train loss') plt.plot(r.history['val_loss'], label='val loss') plt.legend() plt.show() plt.savefig('LossVal_loss') # plot the accuracy plt.plot(r.history['accuracy'], label='train acc') plt.plot(r.history['val_accuracy'], label='val acc') plt.legend() plt.show() plt.savefig('AccVal_acc') # save it as a h5 file from tensorflow.keras.models import load_model model.save('model_vgg19.h5') y_pred = model.predict(test_set) y_pred import numpy as np y_pred = np.argmax(y_pred, axis=1) y_pred from tensorflow.keras.models import load_model from tensorflow.keras.preprocessing import image model=load_model('model_vgg19.h5') img=image.load_img('Dataset/Test/Uninfected/2.png',target_size=(224,224)) img=image.load_img('Dataset/Train/Parasite/2.png',target_size=(224,224)) x=image.img_to_array(img) x x.shape x=x/255 x=np.expand_dims(x,axis=0) img_data=preprocess_input(x) img_data.shape model.predict(img_data) a=np.argmax(model.predict(img_data), axis=1) if(a==1): print("Uninfected") else: print("Infected") ###Output Infected ###Markdown Creating CNN Using Scratch And Transfer Learning Please download the dataset from the below url ###Code # import the libraries as shown below from tensorflow.keras.layers import Input, Lambda, Dense, Flatten,Conv2D from tensorflow.keras.models import Model from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.applications.resnet50 import preprocess_input from tensorflow.keras.preprocessing import image from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img from tensorflow.keras.models import Sequential import numpy as np from glob import glob import matplotlib.pyplot as plt # re-size all the images to this IMAGE_SIZE = [224, 224] train_path = '/home/jc/project/Malaria-Detection-master/Dataset/Train' valid_path = '/home/jc/project/Malaria-Detection-master/Dataset/Test' # Import the Vgg 16 library as shown below and add preprocessing layer to the front of VGG # Here we will be using imagenet weights mobilnet = VGG19(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False) # don't train existing weights for layer in mobilnet.layers: layer.trainable = False # useful for getting number of output classes folders = glob('Dataset/Train/*') folders # our layers - you can add more if you want x = Flatten()(mobilnet.output) prediction = Dense(len(folders), activation='softmax')(x) # create a model object model = Model(inputs=mobilnet.input, outputs=prediction) # view the structure of the model model.summary() from tensorflow.keras.layers import MaxPooling2D ### Create Model from scratch using CNN model=Sequential() model.add(Conv2D(filters=16,kernel_size=2,padding="same",activation="relu",input_shape=(224,224,3))) model.add(MaxPooling2D(pool_size=2)) model.add(Conv2D(filters=32,kernel_size=2,padding="same",activation ="relu")) model.add(MaxPooling2D(pool_size=2)) model.add(Conv2D(filters=64,kernel_size=2,padding="same",activation="relu")) model.add(MaxPooling2D(pool_size=2)) model.add(Flatten()) model.add(Dense(500,activation="relu")) model.add(Dense(2,activation="softmax")) model.summary() # tell the model what cost and optimization method to use model.compile( loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'] ) # Use the Image Data Generator to import the images from the dataset from tensorflow.keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True) test_datagen = ImageDataGenerator(rescale = 1./255) # Make sure you provide the same target size as initialied for the image size training_set = train_datagen.flow_from_directory('Dataset/Train', target_size = (224, 224), batch_size = 32, class_mode = 'categorical') training_set test_set = test_datagen.flow_from_directory('Dataset/Test', target_size = (224, 224), batch_size = 32, class_mode = 'categorical') # fit the model # Run the cell. It will take some time to execute r = model.fit_generator( training_set, validation_data=test_set, epochs=5, steps_per_epoch=len(training_set), validation_steps=len(test_set) ) # plot the loss plt.plot(r.history['loss'], label='train loss') plt.plot(r.history['val_loss'], label='val loss') plt.legend() plt.show() plt.savefig('LossVal_loss') # plot the accuracy plt.plot(r.history['accuracy'], label='train acc') plt.plot(r.history['val_accuracy'], label='val acc') plt.legend() plt.show() plt.savefig('AccVal_acc') # save it as a h5 file from tensorflow.keras.models import load_model model.save('model_vgg19.h5') y_pred = model.predict(test_set) y_pred import numpy as np y_pred = np.argmax(y_pred, axis=1) y_pred from tensorflow.keras.models import load_model from tensorflow.keras.preprocessing import image model=load_model('model_vgg19.h5') img=image.load_img('/home/jc/project/Malaria-Detection-master/Dataset/Test/Uninfected/C3thin_original_IMG_20150608_162922_cell_191.png',target_size=(224,224)) x=image.img_to_array(img) x x.shape x=x/255 x x=np.expand_dims(x,axis=0) img_data=preprocess_input(x) img_data.shape model.predict(img_data) a=np.argmax(model.predict(img_data), axis=1) print(a) if a==1: print("Uninfected") if a==0: print("Infected") ###Output [0] Infected
exercise-1/exercise.ipynb
###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `my_name` that contains your name. ###Code my_name = "Anthony" ###Output _____no_output_____ ###Markdown Create a variable `name_length` that holds how many letters are in your name. Print the number of letters. ###Code name_length = len (my_name) print (name_length ) ###Output 7 ###Markdown Print out your name with the uppercase letters made lowercase, and the lowercase letters made uppercase. **Hint:** look for a [string method](https://docs.python.org/3/library/stdtypes.htmlstring-methods) that will modify the _case_ of the string.- Try to do this without creating a separate variable! ###Code lower_mes ###Output _____no_output_____ ###Markdown Pick two of your favorite numbers (between 1 and 100) and assign them to `favorite_1` and `favorite_2` ###Code favorite_1 = 18 favorite_2 = 13 ###Output _____no_output_____ ###Markdown Divide each number by the length of your name raised to the power of `.598` (use the built-in `pow()` function for practice), and save it in the same variable. ###Code favorite_1 = favorite_1/(pow(name_length, .598)) ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `my_name` that contains your name. ###Code my_name = "Marie Williams Chant" ###Output _____no_output_____ ###Markdown Create a variable `name_length` that holds how many letters are in your name. Print the number of letters. ###Code name_length = len(my_name) print(name_length) ###Output 20 ###Markdown Print out your name with the uppercase letters made lowercase, and the lowercase letters made uppercase. **Hint:** look for a [string method](https://docs.python.org/3/library/stdtypes.htmlstring-methods) that will modify the _case_ of the string.- Try to do this without creating a separate variable! ###Code print(my_name.swapcase()) ###Output mARIE wILLIAMS cHANT ###Markdown Pick two of your favorite numbers (between 1 and 100) and assign them to `favorite_1` and `favorite_2` ###Code favorite_1 = 7 favorite_2 = 65 ###Output _____no_output_____ ###Markdown Divide each number by the length of your name raised to the power of `.598` (use the built-in `pow()` function for practice), and save it in the same variable. ###Code favorite_1 = favorite_1/pow(name_length, .598) favorite_2 = favorite_2/pow(name_length, .598) favorite_1, favorite_2 ###Output _____no_output_____ ###Markdown Create a variable `raw_sum` that is the sum of those two variables. Note you _cannot_ use the `sum()` function for this, so just use a normal operator! ###Code raw_sum = favorite_1 + favorite_2 raw_sum ###Output _____no_output_____ ###Markdown Create a variable `round_sum` that is the `raw_sum` rounded to 1 decimal place. Use the `round()` function. ###Code round_sum = round(raw_sum, 1) round_sum ###Output _____no_output_____ ###Markdown Create two new variables `rounded_1` and `rounded_2` that are your `favorite_1` and `favorite_2` variables rounded to 1 decimal place. Print them out on a single line (hint: pass them as two different arguments). ###Code rounded_1 = round(favorite_1, 1) rounded_2 = round(favorite_2, 1) print(rounded_1, rounded_2) ###Output 0.2 1.8 ###Markdown Create a variable `sum_round` that is the sum of the rounded values (use a normal math operator). ###Code sum_round = rounded_1 + rounded_2 sum_round ###Output _____no_output_____ ###Markdown Which is bigger, `round_sum` or `sum_round`? (You can use the `max()` function!) ###Code max(round_sum, sum_round) ###Output _____no_output_____ ###Markdown Create a variable `fruits` that contains the string `"apples and bananas"` ###Code fruits = "apples and bananas" ###Output _____no_output_____ ###Markdown Use the `replace()` function to substitute all the "a"s in `fruits` with "ee". Store the result in a variable called `fruits_e`. ###Code fruits_e = fruits.replace("a", "ee") ###Output _____no_output_____ ###Markdown Use the `replace()` function to substitute all the "a"s in `fruits` with "o". Store the result in a variable called `fruits_o`. ###Code fruits_o = fruits.replace("a", "o") ###Output _____no_output_____ ###Markdown Print out the string "I like to eat " followed by each of `fruits`, `fruits_e` and `fruits_o` (three sentences). ###Code print("I like to eat " +fruits) print("I like to eat " +fruits_e) print("I like to eat " +fruits_o) ###Output I like to eat apples and bananas I like to eat eepples eend beeneenees I like to eat opples ond bononos ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Define a new list **`words`** that contains each of the words to the following [song lyric](https://www.youtube.com/watch?v=StTqXEQ2l-Y) (don't include the notes):```♫ everything is awesome everything is cool when you are part of a TEEEEAM ♫```_Hint:_ use the `split()` string method. ###Code lyrics = 'everything is awesome everything is cool when you are part of a TEEEEAM' words = lyrics.split() words ###Output _____no_output_____ ###Markdown Define a function **`abbreviate()`** that takes in a string as an argument and _returns_ the first letter of that string capitalized and followed by a period (`.`). For example:```pythonabbreviate("dog") returns "D."```_Hint:_ you can use bracket notation to access the first (0th) character of a string! ###Code def abberviate(str_arg): temp = str_arg[0].upper() + '.' return temp abberviate('dog') ###Output _____no_output_____ ###Markdown Use the **`map()`** function to transform the list of lyric words into a list of abbreviations (technically an initialism). ###Code abberviate_list = list(map(abberviate, words)) abberviate_list ###Output _____no_output_____ ###Markdown Use the **`filter()`** function to remove each word from the list of `words` that is 3 or fewer letters in length. You can define a named callback function, or use an anonymous lambda. ###Code def three_or_less(str): if len(str) > 3: return str list(filter(three_or_less, words)) ###Output _____no_output_____ ###Markdown **In a single statement**, use both the `map()` and `filter()` functions to get a list of the initials of the long words in the the `words` list. _Hint:_ try pasting your filtering code into your mapping code from the previous problems! ###Code #best to break up to: filtered items, then abbr, thennn on. kinda like working backwards ###Output _____no_output_____ ###Markdown Use the **`reduce()`** function to combine the list of initials into a single string. You can define a named callback function, or use an anonymous lambda. Remember to `import` the `reduce()` function! _Note:_ normally you would do this with the `join()` string method. Use the **`reduce()`** function to get a list of _unique_ words in the original lyrics (not the initials).- Think about what to do with each "new" list element to include it in the aggregation (you can use an `if` statement to decide whether to include it or not!). Try defining an `add_if_unique()` callback function.- You will also need to use the optional third argument to [reduce()](https://docs.python.org/3/library/functools.htmlfunctools.reduce) to specify that the "aggregation" should start with an empty list `[]`.- We do this with `reduce()` instead of `filter()` because it allows us to more directly check the list beyond the specific element being considered. ###Code #look if an element is there, if not there add to the list ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `food` that stores your favorite kind of food. Print or output the variable. ###Code food="pizza" print(food) ###Output pizza ###Markdown Create a variable `restaurant` that stores your favorite place to eat that kind of food. ###Code restaurant="Canam" ###Output _____no_output_____ ###Markdown Print the message `"I'm going to RESTAURANT for some FOOD"`, replacing the restaurant and food with your variables. ###Code print("I'm going to "+restaurant+" for some "+food) ###Output I'm going to Canam for some pizza ###Markdown Create a variable `num_friends` equal to the number of friends you would like to eat with. ###Code num_friends=4 ###Output _____no_output_____ ###Markdown Print a message `"I'm going with X friends"`, replacing the X with the number of friends. ###Code print("I'm going with "+str(num_friends)+" friends" ) ###Output I'm going with 4 friends ###Markdown Create a variable `meal_price`, which is how expensive you think one meal at the restaurant would be. This price should be a `float`. ###Code meal_price=20.15 ###Output _____no_output_____ ###Markdown Update (re-assign) the `meal_price` variable so it includes a 15% tip&mdash;that is, so the price is 15% higher. Output the variable. ###Code meal_price=20.15*1.15 print(meal_price) ###Output 23.172499999999996 ###Markdown Create a variable `total_cost` that has the total estimated cost of the bill for you and all of your friends. Output or print the variable ###Code total_cost=meal_price*num_friends print (total_cost) ###Output 92.68999999999998 ###Markdown Create a variable `budget` representing your spending budget for a night out. ###Code budget=30 ###Output _____no_output_____ ###Markdown Create a variable `max_friends`, which is the maximum number of friends you can invite, at the estimated meal price, while staying within your budget. Output or print this value.- Be carefully that you only invite whole people! ###Code max_friends=int(total_cost/budget) print(max_friends) ###Output 3 ###Markdown Bonus: Create a variable `chorus` that is the string `"FOOD time!"` _repeated_ once for each of the friends you are able to bring. _Hint_ use the **`*`** operator. Print out the variable. ###Code chorus="FOOD time!" * num_friends print(chorus) ###Output FOOD time!FOOD time!FOOD time!FOOD time! ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a dictionary **`rectangle`** that has a key `'width'` with a value of `100`, a key `'height'` with a value of `50`, a key `'x'` with a value of `25`, and a key `'y'` with a value of `40`. ###Code rectangle = {'width': 100, 'height': 50, 'x': 25, 'y': 40} ###Output _____no_output_____ ###Markdown Print the width of the rectangle (from the dictionary) ###Code print(rectangle['width']) ###Output 100 ###Markdown Print the x- and y- coordinates of the rectangle (its location). Your output should have the format `"X, Y"`. ###Code location = [rectangle['x'], rectangle['y']] print(location) ###Output [25, 40] ###Markdown Print the rectangle's area (based on the dictionary values). ###Code area = rectangle['width'] * rectangle['height'] print(area) ###Output 5000 ###Markdown "Move" the rectangle by increasing its x-coordinate by `20` and _decreasing_ its y-coordinate by `10`. Print out the rectangle to see its new location. ###Code rectangle['x'] = rectangle['x'] + 20 rectangle['y'] = rectangle['y'] - 10 print(rectangle['x'],",", rectangle['y']) ###Output 45 , 30 ###Markdown Add a new key `'color'` to the rectangle with a value equal to your favorite color (as a string is fine). Output the rectangle to confirm your change. ###Code rectangle['color'] = "red" rectangle ###Output _____no_output_____ ###Markdown Create a dictionary **`circle`** that has a `radius` of 35 and a `center` that is a **tuple** representing an x-coordinate of `59` and a y-coordinate of `80`. Output the circle. ###Code circle = {'radius':35, 'center':(59, 80)} circle ###Output _____no_output_____ ###Markdown What is the distance between the circle's center and the rectangle's location? _Hint:_ use the [distance formula](http://www.mathwarehouse.com/algebra/distance_formula/index.php); the [math.hypot()](https://docs.python.org/3/library/math.htmlmath.hypot) can help. ###Code import math distance = math.hypot(rectangle['x'] - circle['center'][0], rectangle['y'] - circle['center'][1]) print(distance) ###Output 51.92301994298868 ###Markdown Define a function **`get_area()`** that takes in a dictionary representing a shape (such as _either_ a circle or a rectangle like you defined above) and returns the _area_ of that shape. You will need to determine whether the shape is a rectangle or a circle (e.g., whether it has a radius or a width/height). Calculate rectange area as $width*height$ and circle area as $\pi * radius^2$. ###Code def get_area(shape): area = 0 for key in shape: if key == "radius": area = math.pi * shape['radius'] * shape['radius'] if key == "width": area = shape['width'] * shape['height'] return area ###Output _____no_output_____ ###Markdown Use your `get_area()` function and print the areas of both your `rectangle` and your `circle`. ###Code print("The area of circle is ", get_area(circle), ", and the area of reactangle is ", get_area(rectangle)) ###Output The area of circle is 3848.4510006474966 , and the area of reactangle is 5000 ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `num` that contains a random number between 1 and 20 (use the [random.randint()](https://docs.python.org/3/library/random.htmlrandom.randint) function). Print the number. ###Code import random num = random.randint(1, 20) num ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether or not `num` is equal to 12. ###Code var = 19 > 12 var ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether `num` is either less than 5 or greater than 15. (Note that in programming, we always assume "strictly greater/less than" unless otherwis specified). ###Code var = num < 5 or num > 15 var ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether `num` is between 3 and 18 (inclusive), but is _not_ equal to 11. (You can temporarily assign a non-random value to `num` to test this). ###Code var = num >= 3 and num <= 18 and num != 11 var ###Output _____no_output_____ ###Markdown _Re-run the above cells multiple times to confirm that the logic holds for different random numbers!_ Write a boolean expression that represents whether `5.1 - 2.5` is equal to `2.6`.- What does this tell you about the precision of _floating point_ (decimal) operations in Python? _Tip:_ try printing the value of `5.1-2.6`. (See also: https://en.wikipedia.org/wiki/Floating-point_arithmetic) - How might you ensure that this equality is correct? ###Code 5.1 - 2.5 == 2.6 ###Output _____no_output_____ ###Markdown Define a function `de_morgan()` that takes in two boolean values (`P` and `Q`) and _returns_ whether the [De Morgan laws](https://en.wikipedia.org/wiki/De_Morgan%27s_laws) hold for those two values. That is:> the negation of P **and** Q is the same as the negation of P **or** the negation of Q> and> the negation of P **or** Q is the same as the negation of P **and** the negation of QYour function should return a boolean representing whether or not _both_ of these statements are true for the given `P` and `Q`. _Use parentheses to enforce order of operations!_ ###Code def de_morgan(p, q): v1 = ((not (p and q)) == ((not p) or (not q))) v2 = (((not p) or (not q)) == ((not p) and (not q))) return v1 and v2 ###Output _____no_output_____ ###Markdown Call your `de_morgan()` function for each possible value of `P` and `Q` (there are **4** possible combinations), printing the result, in order to demonstrate that these laws hold. ###Code x1 = de_morgan(True, True) x2 = de_morgan(True, False) x3 = de_morgan(False, True) x4 = de_morgan(False, False) print(x1, x2, x3, x4) ###Output True False False True ###Markdown Use the built-in [input()](https://docs.python.org/3/library/functions.htmlinput) function to prompt the user for their favorite word.- This function will wait for the user to provide an input before it finished executing, and will then _return_ a string of whatever the user typed in. ###Code word = input("What's your name?") ###Output What's your name?Julie ###Markdown Write a boolean expression that represents if the inputted word has more than 8 letters. ###Code var = len(word) > 8 var ###Output _____no_output_____ ###Markdown Use the `islower()` string method to check if the inputted word was entered with all lower-case letters. ###Code word.islower() ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether or not the word _starts with_ the letter `i` or _ends with_ the letter `n`. Use appropriate string methods. _Hint_: convert the word to a specific case (upper or lower) for the comparison. ###Code word.lower().startswith('i') word.lower().endswith('n') ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `my_name` that contains your name. ###Code my_name = "Serena" ###Output _____no_output_____ ###Markdown Create a variable `name_length` that holds how many letters are in your name. Print the number of letters. ###Code name_length = len(my_name) print(name_length) ###Output 6 ###Markdown Print out your name with the uppercase letters made lowercase, and the lowercase letters made uppercase. **Hint:** look for a [string method](https://docs.python.org/3/library/stdtypes.htmlstring-methods) that will modify the _case_ of the string.- Try to do this without creating a separate variable! ###Code print(my_name.swapcase()) ###Output sERENA ###Markdown Pick two of your favorite numbers (between 1 and 100) and assign them to `favorite_1` and `favorite_2` ###Code favorite_1 = 3 favorite_2 = 7 ###Output _____no_output_____ ###Markdown Divide each number by the length of your name raised to the power of `.598` (use the built-in `pow()` function for practice), and save it in the same variable. ###Code favorite_1 = favorite_1/pow(name_length, 0.598) favorite_2 = favorite_2/pow(name_length, 0.598) ###Output _____no_output_____ ###Markdown Create a variable `raw_sum` that is the sum of those two variables. Note you _cannot_ use the `sum()` function for this, so just use a normal operator! ###Code raw_sum = favorite_1 + favorite_2 ###Output _____no_output_____ ###Markdown Create a variable `round_sum` that is the `raw_sum` rounded to 1 decimal place. Use the `round()` function. ###Code round_sum = round(raw_sum, 1) print(round_sum) ###Output 1.8 ###Markdown Create two new variables `rounded_1` and `rounded_2` that are your `favorite_1` and `favorite_2` variables rounded to 1 decimal place. Print them out on a single line (hint: pass them as two different arguments). ###Code rounded_1 = round(favorite_1, 1) rounded_2 = round(favorite_2, 1) print(rounded_1, rounded_2) ###Output 0.7 1.1 ###Markdown Create a variable `sum_round` that is the sum of the rounded values (use a normal math operator). ###Code sum_round = rounded_1 + rounded_2 print(sum_round) ###Output 1.8 ###Markdown Which is bigger, `round_sum` or `sum_round`? (You can use the `max()` function!) ###Code print(max(round_sum, sum_round)) ###Output 1.8 ###Markdown Create a variable `fruits` that contains the string `"apples and bananas"` ###Code fruits = "apples and bananas" ###Output _____no_output_____ ###Markdown Use the `replace()` function to substitute all the "a"s in `fruits` with "ee". Store the result in a variable called `fruits_e`. ###Code fruits_e = fruits.replace("a", "ee") ###Output _____no_output_____ ###Markdown Use the `replace()` function to substitute all the "a"s in `fruits` with "o". Store the result in a variable called `fruits_o`. ###Code fruits_o = fruits.replace("a", "o") ###Output _____no_output_____ ###Markdown Print out the string "I like to eat " followed by each of `fruits`, `fruits_e` and `fruits_o` (three sentences). ###Code print("I like to eat", fruits) print("I like to eat", fruits_e) print("I like to eat", fruits_o) ###Output I like to eat apples and bananas I like to eat eepples eend beeneenees I like to eat opples ond bononos ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable **`deck`** that represents a deck of [playing cards](https://en.wikipedia.org/wiki/Playing_card). This variable should be a _list_ of 52 elements, each of which is a **tuple** with two values:- a string representing the suit, either `'hearts'`, `'diamonds'`, `'clubs'`, or `'spades'`.- an interger representing the rank, ranging from 2 to 14 inclusive (11-14 represent a Jack, Queen, King, or Ace respectively)._Tip:_ use a pair of nested loops to add each combination of suit and rank to the `deck` list! ###Code deck = [] for suit in ['hearts', 'diamonds', 'clubs', 'spades']: for rank in range(2, 15): deck.append({'suit': suit, 'rank': rank}) ###Output _____no_output_____ ###Markdown Create a list **hand** that represents a hand of [Poker](https://en.wikipedia.org/wiki/Poker) (5-card draw). Add 5 cards from the `deck` to this list.- You can add 5 specific cards for testing (check their indices!), or use the `random.shuffle()` function to shuffle the deck and then _slice_ 5 cards from the top.Print out the `hand` so you know what you are dealing with! ###Code import random random.shuffle(deck) hand = deck[:5] print(hand) ###Output [{'suit': 'spades', 'rank': 4}, {'suit': 'diamonds', 'rank': 7}, {'suit': 'hearts', 'rank': 14}, {'suit': 'diamonds', 'rank': 13}, {'suit': 'diamonds', 'rank': 10}] ###Markdown Define a function **`contains_queen_of_hearts()`** that takes in a list of cards (e.g., a hand) and returns whether or not the Queen of Hearts is in that list. _For practice, don't use the `in` operator to check for containment_. ###Code def contains_queen_of_hearts(hand): for card in hand: if card['suit'] == 'hearts' and card['rank'] == 12: return True return False contains_queen_of_hearts(hand) ###Output _____no_output_____ ###Markdown Define a function **`get_high_card()`** that takes in a list of cards and returns the card (tuple) of the highest value. The "high card" is the one with the highest rank. Cards of different suits but the same rank are considered to have the same value and either may be returned.- Hint: use a "king-of-the-hill" search! ###Code def get_high_card(hand): highest = hand[0] for card in hand: if card['rank'] > highest['rank']: highest = card return highest get_high_card(hand) ###Output _____no_output_____ ###Markdown Define a function **`is_flush()`** that takes in a list of cards and returns whether or not the list represents a _flush_&mdash;that is, all of the cards have the same _suit_. ###Code def is_flush(hand): suit = hand[0]['suit'] for card in hand: if card['suit'] != suit: return False return suit ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `food` that stores your favorite kind of food. Print or output the variable. ###Code food = "meat" print (food) ###Output meat ###Markdown Create a variable `restaurant` that stores your favorite place to eat that kind of food. ###Code restaurant = "McDonald" ###Output _____no_output_____ ###Markdown Print the message `"I'm going to RESTAURANT for some FOOD"`, replacing the restaurant and food with your variables. ###Code print ("I'm going to " + restaurant + " for some " + food) ###Output I'm going to McDonald for some meat ###Markdown Create a variable `num_friends` equal to the number of friends you would like to eat with. ###Code num_friends = 3 ###Output _____no_output_____ ###Markdown Print a message `"I'm going with X friends"`, replacing the X with the number of friends. ###Code print ("I'm going with " + str(num_friends) + " friends") ###Output I'm going with 3 friends ###Markdown Create a variable `meal_price`, which is how expensive you think one meal at the restaurant would be. This price should be a `float`. ###Code meal_price = 4.5 ###Output _____no_output_____ ###Markdown Update (re-assign) the `meal_price` variable so it includes a 15% tip&mdash;that is, so the price is 15% higher. Output the variable. ###Code meal_price = meal_price * 1.15 print (meal_price) ###Output 5.175 ###Markdown Create a variable `total_cost` that has the total estimated cost of the bill for you and all of your friends. Output or print the variable ###Code total_cost = meal_price * num_friends print (total_cost) ###Output 15.524999999999999 ###Markdown Create a variable `budget` representing your spending budget for a night out. ###Code budget = 25 ###Output _____no_output_____ ###Markdown Create a variable `max_friends`, which is the maximum number of friends you can invite, at the estimated meal price, while staying within your budget. Output or print this value.- Be carefully that you only invite whole people! ###Code max_friends = budget / meal_price print(int(max_friends)) ###Output 4 ###Markdown Bonus: Create a variable `chorus` that is the string `"FOOD time!"` _repeated_ once for each of the friends you are able to bring. _Hint_ use the **`*`** operator. Print out the variable. ###Code chorus = "FOOD time!" print (chorus[:10] * int(max_friends)) ###Output FOOD time!FOOD time!FOOD time!FOOD time! ###Markdown Modify the above cell so that each `"FOOD time!"` is on a separate line (_hint_: use a newline character!), then rerun the cell. ###Code chorus = "FOOD time!" + "\n" print (chorus[:15] * int(max_friends)) ###Output FOOD time! FOOD time! FOOD time! FOOD time! ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable **`names`** that is a list containing the names of 2 people next to you. ###Code names = ["Claire", "Iris"] print(names) ###Output ['Claire', 'Iris'] ###Markdown Create a second variable **`absent`** that is an empty list. Then add two names of people who you _wish_ were next to you. ###Code absent = ["Hilary", "Barack"] print(absent) ###Output ['Hilary', 'Barack'] ###Markdown Create a variable **`all_names`** that is the `names` and `absent` lists added together. Output the list. ###Code all_names = names + absent print(all_names) ###Output ['Claire', 'Iris', 'Hilary', 'Barack'] ###Markdown What happens if you try and `append()` the `absent` list to the `names` list? Print the resulting `names` variable, as well as the length of that list. Is it what you expect?- Be careful about running this cell multiple times; you can always **Reset the Kernel** if needed. Add _your name_ at the **beginning** of the `all_names` list. _Hint_: find an appropriate [list method](https://docs.python.org/3/tutorial/datastructures.htmlmore-on-lists). ###Code all_names.insert(0, "Brynn") print(all_names) ###Output ['Brynn', 'Claire', 'Iris', 'Hilary', 'Barack'] ###Markdown Use the `range()` function to create a sequence of numbers from 10 to 99. Then use the [random.sample()](https://docs.python.org/3/library/random.htmlrandom.sample) to produce a list of 11 random numbers from that range. Store that list in a variable called **`numbers`**, and print the list so you know what it is. ###Code import random numbers = random.sample(range(10, 99), 11) print(numbers) ###Output [88, 25, 76, 92, 62, 73, 11, 72, 50, 10, 46] ###Markdown Find the [sum](https://docs.python.org/3/library/functions.htmlsum) of the `numbers` list (note that this function _only_ works for sequences!) ###Code sum(numbers) ###Output _____no_output_____ ###Markdown [Sort](https://wiki.python.org/moin/HowTo/Sorting) the `numbers` list. ###Code numbers.sort(reverse = True) # sorts descending order numbers.sort() sorts ascending print(numbers) ###Output [92, 88, 76, 73, 72, 62, 50, 46, 25, 11, 10] ###Markdown Print the _last_ element of the `numbers` list (which should now be the biggest) ###Code numbers[-1] ###Output _____no_output_____ ###Markdown Create a list **`first_three`** that contains the first three elements of the `numbers` list. Use the _slice_ (**`:`**) operator. ###Code numbers[:3] ###Output _____no_output_____ ###Markdown Replace the first three elements in `numbers` with values that are _double_ (2x) the original value. You can use 3 assignment statements (or just 1 if you are tricky!) ###Code numbers[:3] = numbers[0] *2, numbers[1] *2, numbers[2] *2 print(numbers) ###Output [184, 176, 152, 73, 72, 62, 50, 46, 25, 11, 10] ###Markdown Create a list **`middle_three`** that contains the three elements in the _middle_ of the list (these need not be the three median values). _Hint:_ calculate the middle index. ###Code middle_index = len(numbers)//2 middle_three = numbers[middle_index-1:middle_index+2] print(middle_three) ###Output [72, 62, 50] ###Markdown **Bonus Challenge**Define a function **`pig_latinize()`** that takes in a single word as an argument, and returns a [pig-latin](https://en.wikipedia.org/wiki/Pig_LatinRules) version of that word. That is, if the word starts with a vowel (a,e,i,o,u), then the function returns that word with `"way"` added to the end. If it starts with a consonant, the function returns that word with the first consonent moved to the end, followed by `"ay"`. ###Code def pig_latinize(word): if word[0] in 'a,e,i,o,u': return word + 'way' else: return word[1:] + word[0] + 'ay' print(pig_latinize("hello")) ###Output ellohay ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Write a **`while`** loop that prints out every 5th number (multiples of 5) from 0 to 100 (inclusive).- _Tip:_ use an **`end=','`** keyword argument to the `print()` function to print all the numbers on the same line. ###Code num = 5 while num <= 100: print(num, end=',') num += 5 ###Output 5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100, ###Markdown Use a **`while`** loop to print out the first 15 [Triangular numbers](https://en.wikipedia.org/wiki/Triangular_number). This is a sequence of numbers for which the _nth_ value is the sum of the numbers from 0 to _n_. **Do this only using addition!**- _Hint:_ use an additional variable to keep track of the `total` value, and have that value increase by the number of times you've been through the loop each iteration! ###Code num = 0 count = 0 while count <= 14: num = count * (count + 1)/2 count += 1 print(int(num), end=",") ###Output 0,1,3,6,10,15,21,28,36,45,55,66,78,91,105, ###Markdown _Challenge_ Use a **`while`** loop to print out 20 numbers, each of which is larger than the previous by the the _sum_ of the **two** previous numbers (the [Fibonacci sequence](https://en.wikipedia.org/wiki/Fibonacci_number).- _Hint_: you'll need to keep track of the two previous values (start them at 0 and 1), and then "update" them each time through the loop, storing the "new total" in the first previous variable, and the first previous variable in the second (be careful about the ordering of this!) Use a **`while`** loop to print out a sequence of random numbers from 0 to 10, stopping after the number `4` is printed for the first time. You will need to import the `random` module. Modify the below "coin flipping" example from the course text so that it keeps flipping coins until you get two "heads" in a row. ###Code # flip a coin until it shows up heads still_flipping = True while still_flipping: flip = randint(0,1) if flip == 0: flip = "Heads" else: flip = "Tails" print(flip, end=", ") if flip == "Heads": still_flipping = False ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Define a new list **`words`** that contains each of the words to the following [song lyric](https://www.youtube.com/watch?v=StTqXEQ2l-Y) (don't include the notes):```♫ everything is awesome everything is cool when you are part of a TEEEEAM ♫```_Hint:_ use the `split()` string method. ###Code lyrics = "everything is awesome everything is cool when you are part of a TEEEEAM" words = lyrics.split() words ###Output _____no_output_____ ###Markdown Define a function **`abbreviate()`** that takes in a string as an argument and _returns_ the first letter of that string capitalized and followed by a period (`.`). For example:```pythonabbreviate("dog") returns "D."```_Hint:_ you can use bracket notation to access the first (0th) character of a string! ###Code def abbreviate(string): temp = string[0].upper() + '.' return temp abbreviate('Steve') ###Output _____no_output_____ ###Markdown Use the **`map()`** function to transform the list of lyric words into a list of abbreviations (technically an initialism). ###Code abbreviated_list = list(map(abbreviate, words)) abbreviated_list ###Output _____no_output_____ ###Markdown Use the **`filter()`** function to remove each word from the list of `words` that is 3 or fewer letters in length. You can define a named callback function, or use an anonymous lambda. ###Code def three_or_less(string): if len(string)>=3: return str list(filter(three_or_less, words)) ###Output _____no_output_____ ###Markdown **In a single statement**, use both the `map()` and `filter()` functions to get a list of the initials of the long words in the the `words` list. _Hint:_ try pasting your filtering code into your mapping code from the previous problems! ###Code small_list = list(map(abbreviate, (filter(three_or_less, words)))) small_list ###Output _____no_output_____ ###Markdown Use the **`reduce()`** function to combine the list of initials into a single string. You can define a named callback function, or use an anonymous lambda. Remember to `import` the `reduce()` function! _Note:_ normally you would do this with the `join()` string method. Use the **`reduce()`** function to get a list of _unique_ words in the original lyrics (not the initials).- Think about what to do with each "new" list element to include it in the aggregation (you can use an `if` statement to decide whether to include it or not!). Try defining an `add_if_unique()` callback function.- You will also need to use the optional third argument to [reduce()](https://docs.python.org/3/library/functools.htmlfunctools.reduce) to specify that the "aggregation" should start with an empty list `[]`.- We do this with `reduce()` instead of `filter()` because it allows us to more directly check the list beyond the specific element being considered. ###Code def add_if_unique(a_list, element): if element in a_list: return a_list else: a_list.append(element) return a_list from functools import reduce unique = reduce(add_if_unique, words, []) print(unique) ###Output ['everything', 'is', 'awesome', 'cool', 'when', 'you', 'are', 'part', 'of', 'a', 'TEEEEAM'] ###Markdown Access the response's JSON content as a Python _list_. How many repositories does the organization have? (e.g., how many items are in the list?) ###Code data = response.json() type(data) #It's a list! print(len(data)) #just public stuff ##EXTRA: Find all the names of items in this list... ##How? We have a list of dictionaries. How do I get from a list of ##Dictionaries to a list of names? Map? List Comprehension? ##For each dictionary return the name [repo_dict['name'] for repo_dict in data] ###Output _____no_output_____ ###Markdown Create a variable **`search_endpoint`** that stores the endpoint used to search for repositories. ###Code ##Exercise in reading documentation ##What category? Check documentation ##Search? Seems good. Repositories? search_endpoint = "/search/repositories" ###Output _____no_output_____ ###Markdown Use the `search_endpoint` to search for repositories about _"visualization"_. Access the response's JSON content, saving that data in a variable (e.g., `vis_repos`). ###Code #In order to specify query paramaters, we'll need a # dictionary of those query params query_params = {'q': 'visualization'} response = requests.get(base_uri + search_endpoint, params = query_params) #Now let's get our viz repos from the response viz_repos = response.json() ##Q/A: This is searching titles and descriptions ###Output _____no_output_____ ###Markdown Print how many repositories your search returned. (Hint: yuo can first check the dictionary keys for the information you want). ###Code #is there a key that tells me the count of things? viz_repos.keys() print(viz_repos['total_count']) ###Output _____no_output_____ ###Markdown Print out the "full name" (i.e., the user and repo name) of the top result from your search. ###Code viz_repos['items'][0]['full_name'] ###Output _____no_output_____ ###Markdown _Challenge_: Use the GitHub API to get a list of the contributors to the **`python/cpython`** repository (the source code for the Python programming language!) and the number of commits they've made. This will be a really big response with lots of data! Check the schema description to understand how to navigate it. ###Code ##!!!!!!!!!!!!!!! ##Let them try this and then show them contrib_endpoint = '/repos/python/cpython/stats/contributors' response = requests.get(base_uri + contrib_endpoint) contributors = response.json() # print(contributors[0]) #Ask them... #What is the object of interest???????? print(contributors[0]['author']['login']) #etrepum ##IOPub data rate exceeded. ##https://github.com/jupyter/notebook/issues/2287 ###Output _____no_output_____ ###Markdown Who were the top 10 contributors in terms of total commits (and how many commits did each make)? _Hint:_ use a list comprehension to _map_ and _filter_ the list for the information you care about, then sort it in descending order by commit count, then output the top 10 items. ###Code ##An example of list comprension that does several things. #1. sorts #2. pulls in two data elements of interest contributor_counts = sorted([(item['total'], item['author']['login']) for item in contributors], reverse=True) top_ten = contributor_counts[:10] top_ten ##This "sorted" bit is important for the homework!!!! a5, I'll give another example in the hw help ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a dictionary **`rectangle`** that has a key `'width'` with a value of `100`, a key `'height'` with a value of `50`, a key `'x'` with a value of `25`, and a key `'y'` with a value of `40`. ###Code rectangle = { 'width' : 100, 'height' : 50, 'x' : 25, 'y' : 40 } ###Output _____no_output_____ ###Markdown Print the width of the rectangle (from the dictionary) ###Code print(rectangle['width']) ###Output 100 ###Markdown Print the x- and y- coordinates of the rectangle (its location). Your output should have the format `"X, Y"`. ###Code print(rectangle['x'], rectangle['y'], sep = ", ") ###Output 25, 40 ###Markdown Print the rectangle's area (based on the dictionary values). ###Code area = rectangle['width'] * rectangle['height'] print(area) ###Output 5000 ###Markdown "Move" the rectangle by increasing its x-coordinate by `20` and _decreasing_ its y-coordinate by `10`. Print out the rectangle to see its new location. ###Code rectangle['x'] = rectangle['x'] + 20 rectangle['y'] = rectangle['y'] - 10 rectangle ###Output _____no_output_____ ###Markdown Add a new key `'color'` to the rectangle with a value equal to your favorite color (as a string is fine). Output the rectangle to confirm your change. ###Code rectangle['color'] = "purple" rectangle ###Output _____no_output_____ ###Markdown Create a dictionary **`circle`** that has a `radius` of 35 and a `center` that is a **tuple** representing an x-coordinate of `59` and a y-coordinate of `80`. Output the circle. ###Code circle = { 'radius' : 35, 'center' : (59, 80) } circle # circle['center'][1] ###Output _____no_output_____ ###Markdown What is the distance between the circle's center and the rectangle's location? _Hint:_ use the [distance formula](http://www.mathwarehouse.com/algebra/distance_formula/index.php); the [math.hypot()](https://docs.python.org/3/library/math.htmlmath.hypot) can help. ###Code import math xdiff = circle['center'][0] - rectangle['x'] ydiff = circle['center'][1] - rectangle['y'] math.hypot(xdiff, ydiff) ###Output _____no_output_____ ###Markdown Define a function **`get_area()`** that takes in a dictionary representing a shape (such as _either_ a circle or a rectangle like you defined above) and returns the _area_ of that shape. You will need to determine whether the shape is a rectangle or a circle (e.g., whether it has a radius or a width/height). Calculate rectange area as $width*height$ and circle area as $\pi * radius^2$. ###Code def get_area(shape): ''' returns area of shape ''' if 'radius' in shape: return(math.pi * (shape['radius'] * shape['radius'])) else: return(shape['width'] * shape['height']) ###Output _____no_output_____ ###Markdown Use your `get_area()` function and print the areas of both your `rectangle` and your `circle`. ###Code get_area(circle) get_area(rectangle) ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `food` that stores your favorite kind of food. Print or output the variable. ###Code food = "bread" print(food) ###Output bread ###Markdown Create a variable `restaurant` that stores your favorite place to eat that kind of food. ###Code restaurant = "Mysore Bakery" print(restaurant) ###Output Mysore Bakery ###Markdown Print the message `"I'm going to RESTAURANT for some FOOD"`, replacing the restaurant and food with your variables. ###Code print("I'm going to "+restaurant+" for some "+food) ###Output I'm going to Mysore Bakery for some bread ###Markdown Create a variable `num_friends` equal to the number of friends you would like to eat with. ###Code num_friends = 3 ###Output _____no_output_____ ###Markdown Print a message `"I'm going with X friends"`, replacing the X with the number of friends. ###Code print("I'm going with "+str(num_friends)+" friends") ###Output I'm going with 3 friends ###Markdown Create a variable `meal_price`, which is how expensive you think one meal at the restaurant would be. This price should be a `float`. ###Code meal_price = 9.99 ###Output _____no_output_____ ###Markdown Update (re-assign) the `meal_price` variable so it includes a 15% tip&mdash;that is, so the price is 15% higher. Output the variable. ###Code meal_price = 1.15 * meal_price print(meal_price) ###Output 11.4885 ###Markdown Create a variable `total_cost` that has the total estimated cost of the bill for you and all of your friends. Output or print the variable ###Code total_cost = num_friends * meal_price print(total_cost) ###Output 34.4655 ###Markdown Create a variable `budget` representing your spending budget for a night out. ###Code budget = 35 ###Output _____no_output_____ ###Markdown Create a variable `max_friends`, which is the maximum number of friends you can invite, at the estimated meal price, while staying within your budget. Output or print this value.- Be carefully that you only invite whole people! ###Code max_friends = int(budget/meal_price) print(max_friends) ###Output 3 ###Markdown Bonus: Create a variable `chorus` that is the string `"FOOD time!"` _repeated_ once for each of the friends you are able to bring. _Hint_ use the **`*`** operator. Print out the variable. ###Code chorus = food + " time! " print(chorus * max_friends) ###Output bread time! bread time! bread time! ###Markdown Modify the above cell so that each `"FOOD time!"` is on a separate line (_hint_: use a newline character!), then rerun the cell. ###Code chorus = food.upper() + " time! \n" print(chorus * max_friends) ###Output BREAD time! BREAD time! BREAD time! ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `num` that contains a random number between 1 and 20 (use the [random.randint()](https://docs.python.org/3/library/random.htmlrandom.randint) function). Print the number. ###Code import random num = random.randint(1,20) print(num) ###Output 9 ###Markdown Write a boolean expression that represents whether or not `num` is equal to 12. ###Code num == 4 ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether `num` is either less than 5 or greater than 15. (Note that in programming, we always assume "strictly greater/less than" unless otherwis specified). ###Code print(num < 5 or num > 15) ###Output False ###Markdown Write a boolean expression that represents whether `num` is between 3 and 18 (inclusive), but is _not_ equal to 11. (You can temporarily assign a non-random value to `num` to test this). ###Code print(num >=3 and num <=18 and num !=11) #or print((3 <= num <= 18) and num != 11) ###Output True True ###Markdown _Re-run the above cells multiple times to confirm that the logic holds for different random numbers!_ Write a boolean expression that represents whether `5.1 - 2.5` is equal to `2.6`.- What does this tell you about the precision of _floating point_ (decimal) operations in Python? _Tip:_ try printing the value of `5.1-2.6`. (See also: https://en.wikipedia.org/wiki/Floating-point_arithmetic) - How might you ensure that this equality is correct? ###Code # #vs w/o rounding print(round(5.1-2.5, 1)==2.6) print((5.1-2.5)==2.6) print(5.1-2.6) (5.1-2.5)==2.6 # #vs w/o rounding print(round(5.1-2.5, 1)==2.6) print((5.1-2.5)==2.6) print(5.1-2.6) (5.1-2.5)==2.6 ###Output True False 2.4999999999999996 ###Markdown Define a function `de_morgan()` that takes in two boolean values (`P` and `Q`) and _returns_ whether the [De Morgan laws](https://en.wikipedia.org/wiki/De_Morgan%27s_laws) hold for those two values. That is:> the negation of P **and** Q is the same as the negation of P **or** the negation of Q> and> the negation of P **or** Q is the same as the negation of P **and** the negation of QYour function should return a boolean representing whether or not _both_ of these statements are true for the given `P` and `Q`. _Use parentheses to enforce order of operations!_ ###Code def de_morgan(p, q): first_rule = (not (p and q)) == ((not p) or (not q)) second_rule = (not (p or q)) == ((not p) and (not q)) return (first_rule and second_rule) # print(de_morgan(True, True)) ###Output _____no_output_____ ###Markdown Call your `de_morgan()` function for each possible value of `P` and `Q` (there are **4** possible combinations), printing the result, in order to demonstrate that these laws hold. ###Code print(de_morgan(True, True)) print(de_morgan(True, False)) print(de_morgan(False, True)) print(de_morgan(False, False)) ###Output True True True True ###Markdown Use the built-in [input()](https://docs.python.org/3/library/functions.htmlinput) function to prompt the user for their favorite word.- This function will wait for the user to provide an input before it finished executing, and will then _return_ a string of whatever the user typed in. ###Code fave_word = input("What is your favorite word? ") print(fave_word) ###Output What is your favorite word? bologna bologna ###Markdown Write a boolean expression that represents if the inputted word has more than 8 letters. ###Code len(fave_word) > 8 ###Output _____no_output_____ ###Markdown Use the `islower()` string method to check if the inputted word was entered with all lower-case letters. ###Code fave_word.islower() ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether or not the word _starts with_ the letter `i` or _ends with_ the letter `n`. Use appropriate string methods. _Hint_: convert the word to a specific case (upper or lower) for the comparison. ###Code lower_word = fave_word.lower() lower_word.startswith('i') or lower_word.endswith('n') ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Write a **`while`** loop that prints out every 5th number (multiples of 5) from 0 to 100 (inclusive).- _Tip:_ use an **`end=','`** keyword argument to the `print()` function to print all the numbers on the same line. ###Code num = 0 while num <= 100: print(num, end=',') num = num +5 ###Output 0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100, ###Markdown Use a **`while`** loop to print out the first 15 [Triangular numbers](https://en.wikipedia.org/wiki/Triangular_number). This is a sequence of numbers for which the _nth_ value is the sum of the numbers from 0 to _n_. **Do this only using addition!**- _Hint:_ use an additional variable to keep track of the `total` value, and have that value increase by the number of times you've been through the loop each iteration! ###Code num = 0 total = 0 while num < 16: total = num + total print(total, end=',') num = num + 1 ###Output 0,1,3,6,10,15,21,28,36,45,55,66,78,91,105,120, ###Markdown _Challenge_ Use a **`while`** loop to print out 20 numbers, each of which is larger than the previous by the the _sum_ of the **two** previous numbers (the [Fibonacci sequence](https://en.wikipedia.org/wiki/Fibonacci_number).- _Hint_: you'll need to keep track of the two previous values (start them at 0 and 1), and then "update" them each time through the loop, storing the "new total" in the first previous variable, and the first previous variable in the second (be careful about the ordering of this!) ###Code num = 0 prev1 = 0 prev2 = 1 fib = 0 while num < 21: fib = prev1 + prev2 print(fib, end=",") prev1 = prev2 prev2 = fib num = num + 1 ###Output 1,2,3,5,8,13,21,34,55,89,144,233,377,610,987,1597,2584,4181,6765,10946,17711, ###Markdown Use a **`while`** loop to print out a sequence of random numbers from 0 to 10, stopping after the number `4` is printed for the first time. You will need to import the `random` module. ###Code from random import randint num = 0 while num != 4: print(num) num = randint(0,10) print(num) ###Output 0 6 2 4 ###Markdown Modify the below "coin flipping" example from the course text so that it keeps flipping coins until you get two "heads" in a row. ###Code # flip a coin until it shows up heads still_flipping = True is_heads = False while still_flipping: flip = randint(0,1) if flip == 0: flip = "Heads" is_heads = True else: flip = "Tails" is_heads = False print(flip, end=", ") if flip == "Heads" and is_heads == True: still_flipping = False print(flip + str(is_heads)) ###Output Heads, HeadsTrue ###Markdown Define a function **`input_number()`** that takes a minimum and maximum value as arguments. This function should prompt the user to input a number within the range, repeating the prompt if the provided value is not acceptable. Once an acceptable value has been provided, the function should return that number. You can assume that the user-entered input will always be numeric.Be sure and call your function and print its results to test it! ###Code def input_number(min, max): num = 0 is_in_range = False while is_in_range == False: num = int(input("Input number within range, " + str(min) + "-" + str(max) + ": ")) if min <= num <= max: print(num) is_in_range = True return input_number(10,100) ###Output Input number within range, 10-100: 10000 Input number within range, 10-100: 2222 Input number within range, 10-100: 1 Input number within range, 10-100: 2 Input number within range, 10-100: 3 Input number within range, 10-100: 4 Input number within range, 10-100: 5 Input number within range, 10-100: 6 Input number within range, 10-100: 7 Input number within range, 10-100: 8 Input number within range, 10-100: 9 Input number within range, 10-100: 10 10 ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `my_name` that contains your name. ###Code my_name = "Julie" ###Output _____no_output_____ ###Markdown Create a variable `name_length` that holds how many letters are in your name. Print the number of letters. ###Code name_length = len(my_name) print(name_length) ###Output 5 ###Markdown Print out your name with the uppercase letters made lowercase, and the lowercase letters made uppercase. **Hint:** look for a [string method](https://docs.python.org/3/library/stdtypes.htmlstring-methods) that will modify the _case_ of the string.- Try to do this without creating a separate variable! Pick two of your favorite numbers (between 1 and 100) and assign them to `favorite_1` and `favorite_2` ###Code favorite_1 = 1 favorite_2 = 3 ###Output _____no_output_____ ###Markdown Divide each number by the length of your name raised to the power of `.598` (use the built-in `pow()` function for practice), and save it in the same variable. ###Code import math favorite_1 = favorite_1 / (math.pow(0.598, name_length)) favorite_2 = favorite_2 / (math.pow(0.598, name_length)) ###Output _____no_output_____ ###Markdown Create a variable `raw_sum` that is the sum of those two variables. Note you _cannot_ use the `sum()` function for this, so just use a normal operator! ###Code raw_sum = favorite_1 + favorite_2 raw_sum ###Output _____no_output_____ ###Markdown Create a variable `round_sum` that is the `raw_sum` rounded to 1 decimal place. Use the `round()` function. ###Code round_sum = round(raw_sum, 1) round_sum ###Output _____no_output_____ ###Markdown Create two new variables `rounded_1` and `rounded_2` that are your `favorite_1` and `favorite_2` variables rounded to 1 decimal place. Print them out on a single line (hint: pass them as two different arguments). ###Code round_1 = round(favorite_1, 1) round_2 = round(favorite_2, 1) print(round_1, round_2) ###Output 13.1 39.2 ###Markdown Create a variable `sum_round` that is the sum of the rounded values (use a normal math operator). ###Code sum_round = round_1 + round_2 sum_round ###Output _____no_output_____ ###Markdown Which is bigger, `round_sum` or `sum_round`? (You can use the `max()` function!) ###Code max(round_sum, sum_round) ###Output _____no_output_____ ###Markdown Create a variable `fruits` that contains the string `"apples and bananas"` ###Code fruits = "apples and bananas" ###Output _____no_output_____ ###Markdown Use the `replace()` function to substitute all the "a"s in `fruits` with "ee". Store the result in a variable called `fruits_e`. ###Code fruits_e = fruits.replace("a", "ee") ###Output _____no_output_____ ###Markdown Use the `replace()` function to substitute all the "a"s in `fruits` with "o". Store the result in a variable called `fruits_o`. ###Code fruits_o = fruits.replace("a", "o") ###Output _____no_output_____ ###Markdown Print out the string "I like to eat " followed by each of `fruits`, `fruits_e` and `fruits_o` (three sentences). ###Code print("I like to eat %s" % fruits) print("I like to eat %s" % fruits_e) print("I like to eat %s" % fruits_o) ###Output I like to eat apples and bananas I like to eat eepples eend beeneenees I like to eat opples ond bononos ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Write a **`while`** loop that prints out every 5th number (multiples of 5) from 0 to 100 (inclusive).- _Tip:_ use an **`end=','`** keyword argument to the `print()` function to print all the numbers on the same line. ###Code n = 0 while n <= 100: print(n, end=",") n = n + 5 ###Output 0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100, ###Markdown Use a **`while`** loop to print out the first 15 [Triangular numbers](https://en.wikipedia.org/wiki/Triangular_number). This is a sequence of numbers for which the _nth_ value is the sum of the numbers from 0 to _n_. **Do this only using addition!**- _Hint:_ use an additional variable to keep track of the `total` value, and have that value increase by the number of times you've been through the loop each iteration! ###Code total = 1 while total <= 15: t = int(total * (total + 1)/2) print(t, end=',') total = total + 1 ###Output 1,3,6,10,15,21,28,36,45,55,66,78,91,105,120, ###Markdown _Challenge_ Use a **`while`** loop to print out 20 numbers, each of which is larger than the previous by the the _sum_ of the **two** previous numbers (the [Fibonacci sequence](https://en.wikipedia.org/wiki/Fibonacci_number).- _Hint_: you'll need to keep track of the two previous values (start them at 0 and 1), and then "update" them each time through the loop, storing the "new total" in the first previous variable, and the first previous variable in the second (be careful about the ordering of this!) ###Code total = 1 n1 = 0 n2 = 1 while total <= 20: n2 = n1 + n2 n1 = n2 - n1 print(n2) total = total + 1 ###Output 1 2 3 5 8 13 21 34 55 89 144 233 377 610 987 1597 2584 4181 6765 10946 ###Markdown Use a **`while`** loop to print out a sequence of random numbers from 0 to 10, stopping after the number `4` is printed for the first time. You will need to import the `random` module. ###Code import random n = 0 while n != 4: n = random.randint(0, 10) print(n) ###Output 10 5 8 5 3 1 9 4 ###Markdown Modify the below "coin flipping" example from the course text so that it keeps flipping coins until you get two "heads" in a row. ###Code # flip a coin until it shows up heads twice in a row lastflip = "none" still_flipping = True while still_flipping: flip = random.randint(0,1) if flip == 0: flip = "Heads" else: flip = "Tails" print(flip, end=", ") if flip == "Heads" and lastflip == "Heads": still_flipping = False lastflip = flip ###Output Tails, Heads, Tails, Tails, Tails, Heads, Tails, Heads, Heads, ###Markdown Define a function **`input_number()`** that takes a minimum and maximum value as arguments. This function should prompt the user to input a number within the range, repeating the prompt if the provided value is not acceptable. Once an acceptable value has been provided, the function should return that number. You can assume that the user-entered input will always be numeric.Be sure and call your function and print its results to test it! ###Code def input_number(min, max): min = int(min) max = int(max) n = input("Enter a number within the range: ") n = int(n) while n < min or n > max: n = input("Enter a number within the range: ") n = int(n) return n input_number(7, 200) ###Output Enter a number within the range: 202 Enter a number within the range: 300 Enter a number within the range: -30 Enter a number within the range: 33 ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `my_name` that contains your name. ###Code my_name = "Emily" print(my_name) ###Output Emily ###Markdown Create a variable `name_length` that holds how many letters are in your name. Print the number of letters. ###Code name_length = 5 print(name_length) ###Output _____no_output_____ ###Markdown Print out your name with the uppercase letters made lowercase, and the lowercase letters made uppercase. **Hint:** look for a [string method](https://docs.python.org/3/library/stdtypes.htmlstring-methods) that will modify the _case_ of the string.- Try to do this without creating a separate variable! ###Code my_name = "Emily" print(str.swapcase(my_name)) ###Output eMILY ###Markdown Pick two of your favorite numbers (between 1 and 100) and assign them to `favorite_1` and `favorite_2` ###Code favorite_1 = 29 favorite_2 = 99 ###Output _____no_output_____ ###Markdown Divide each number by the length of your name raised to the power of `.598` (use the built-in `pow()` function for practice), and save it in the same variable. ###Code favorite_1 = favorite_1/pow(name_length, .598) favorite_2 = favorite_2/pow(name_length, .598) print(favorite_1, favorite_2) ###Output 11.076790334491088 37.813870452228194 ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable **`deck`** that represents a deck of [playing cards](https://en.wikipedia.org/wiki/Playing_card). This variable should be a _list_ of 52 elements, each of which is a dictionary with the following keys:- `suit`, with a string value that is either `'hearts'`, `'diamonds'`, `'clubs'`, or `'spades'`.- `rank`, with an interger value ranging from 2 to 14 inclusive (11-14 represent a Jack, Queen, King, or Ace respectively)._Tip:_ use a pair of nested loops to add each combination of suit and rank to the `deck` list! ###Code ##First create empty list deck = [] ##Could create an intermediate suits var, but don't have to #suits = ['hearts', 'diamonds', 'clubs', 'spades'] for suit in ['hearts', 'diamonds', 'clubs', 'spades']: #I could write this out, or could use a loop to create #how?...Range! But 14 is not inclusive, so +1 for rank in range(2, 14+1): card = {'suit': suit, 'rank': rank} ##This is totally optional. Could just move this down to append! ##test it out! #print(card) ##Am I done? No, need to "put them in the list" ##How do I do that? append deck.append(card) deck ##Good example to follow for HW 4 ###Output _____no_output_____ ###Markdown Create a list **hand** that represents a hand of [Poker](https://en.wikipedia.org/wiki/Poker) (5-card draw). Add 5 cards from the `deck` to this list.- You can add 5 specific cards for testing (check their indices!), or use the `random.shuffle()` function to shuffle the deck and then _slice_ 5 cards from the top.Print out the `hand` so you know what you are dealing with! ###Code import random random.shuffle(deck) #Shuffle! runs in place, no need for variable #deck ##Test: PASS ##Grab first 5 cards = first 5 elements from list ##Means first five dictionaries hand = deck[0:5] hand ############################# ##STOP HERE ##Go back to slides and talk about searching ###Output _____no_output_____ ###Markdown Define a function **`contains_queen_of_hearts()`** that takes in a list of cards (e.g., a hand) and returns whether or not the Queen of Hearts is in that list. _For practice, don't use the `in` operator to check for containment_. ###Code ##Alright, your turn, take 5 min, try to solve ##Simple linear search example def contains_queen_of_hearts(list_of_cards): for card in list_of_cards: if card == {'suit': 'hearts', 'rank': 12}: return True ##here? #return False ##Nope, has to be outside the loop return False contains_queen_of_hearts(hand) ############################# ##STOP HERE ##Go back to slides and talk about biggestS ###Output _____no_output_____ ###Markdown Define a function **`get_high_card()`** that takes in a list of cards and returns the card (dictionary) of the highest value. The "high card" is the one with the highest rank. Cards of different suits but the same rank are considered to have the same value and either may be returned.- Hint: use a "king-of-the-hill" search! ###Code ##Take 5 minutes and try to complete it yourself def get_high_card(a_list): ##First, how do I find the rank? #highest_card = a_list[0]['rank'] #print(highest_card) highest_card = a_list[0] for card in a_list: if card['rank'] > highest_card['rank']: highest_card = card ##here?....No! #return highest_card return highest_card get_high_card(hand) ################STOP ##Go to the Are All Items Big Slide ###Output _____no_output_____ ###Markdown Define a function **`is_flush()`** that takes in a list of cards and returns whether or not the list represents a _flush_&mdash;that is, all of the cards have the same _suit_. ###Code ##One way: # def is_flush(a_list): # same_suit = True # prev_card = a_list[0] # for card in a_list: # if not card['suit'] == prev_card['suit']: # same_suit = False # break # else: # prev_card = card # return same_suit ##Other way: def is_flush(a_list): #First go through loop, then come back here target_suit = a_list[0]['suit'] is_flush = True #Actually, not necessary, can delete for card in a_list: ##if card['suit'] == ...what...go back and create if card['suit'] != target_suit: #is_flush = False #could include, could skip and just return false return False return True is_flush(hand) ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `food` that stores your favorite kind of food. Print or output the variable. ###Code food="pizza" print (food) restaurant="delancey" print (restaurant) ###Output delancey ###Markdown Create a variable `restaurant` that stores your favorite place to eat that kind of food. ###Code print ("i'm going to" ,(restaurant) ,"for some" ,(food)) ###Output i'm going to delancey for some pizza ###Markdown Print the message `"I'm going to RESTAURANT for some FOOD"`, replacing the restaurant and food with your variables. Create a variable `num_friends` equal to the number of friends you would like to eat with. ###Code number_friends=3 print (number_friends) ###Output 3 ###Markdown Print a message `"I'm going with X friends"`, replacing the X with the number of friends. ###Code print("I'm going with " + str(number_friends) + " friends") ###Output I'm going with 3 friends ###Markdown Create a variable `meal_price`, which is how expensive you think one meal at the restaurant would be. This price should be a `float`. ###Code meal_price=18.50 print (meal_price) ###Output 18.5 ###Markdown Update (re-assign) the `meal_price` variable so it includes a 15% tip&mdash;that is, so the price is 15% higher. Output the variable. ###Code meal_price=18.50 + 18.50*.15 print (meal_price) ###Output 21.275 ###Markdown Create a variable `total_cost` that has the total estimated cost of the bill for you and all of your friends. Output or print the variable ###Code total_cost=meal_price * number_friends print (total_cost) ###Output 63.824999999999996 ###Markdown Create a variable `budget` representing your spending budget for a night out. ###Code budget=150 print (budget) ###Output 150 ###Markdown Create a variable `max_friends`, which is the maximum number of friends you can invite, at the estimated meal price, while staying within your budget. Output or print this value.- Be carefully that you only invite whole people! ###Code max_friends=(budget)//(meal_price) print (max_friends) ###Output 7.0 ###Markdown Bonus: Create a variable `chorus` that is the string `"FOOD time!"` _repeated_ once for each of the friends you are able to bring. _Hint_ use the **`*`** operator. Print out the variable. ###Code chorus=((food.upper()) + " time!\n") * int (max_friends) print (chorus) ###Output PIZZA time! PIZZA time! PIZZA time! PIZZA time! PIZZA time! PIZZA time! PIZZA time! ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a dictionary **`rectangle`** that has a key `'width'` with a value of `100`, a key `'height'` with a value of `50`, a key `'x'` with a value of `25`, and a key `'y'` with a value of `40`. ###Code rectangle = {'width': 100, 'height':50, 'x': 25, 'y':40} ###Output _____no_output_____ ###Markdown Print the width of the rectangle (from the dictionary) ###Code print(rectangle['width']) ###Output 100 ###Markdown Print the x- and y- coordinates of the rectangle (its location). Your output should have the format `"X, Y"`. ###Code print(str(rectangle['x']) + ', ' + str(rectangle['y'])) ###Output 25, 40 ###Markdown Print the rectangle's area (based on the dictionary values). ###Code print(rectangle['width'] * rectangle['height']) ###Output 5000 ###Markdown "Move" the rectangle by increasing its x-coordinate by `20` and _decreasing_ its y-coordinate by `10`. Print out the rectangle to see its new location. ###Code rectangle['x'] = rectangle['x'] + 20 rectangle['y'] = rectangle['y'] - 10 print(str(rectangle['x']) + ", " + str(rectangle['y'])) ###Output 45, 30 ###Markdown Add a new key `'color'` to the rectangle with a value equal to your favorite color (as a string is fine). Output the rectangle to confirm your change. ###Code rectangle['color'] = 'blue' print(rectangle['color']) ###Output blue ###Markdown Create a dictionary **`circle`** that has a `radius` of 35 and a `center` that is a **tuple** representing an x-coordinate of `59` and a y-coordinate of `80`. Output the circle. ###Code circle = {'radius': 35, 'center': (59, 80)} print(circle) ###Output {'radius': 35, 'center': (59, 80)} ###Markdown What is the distance between the circle's center and the rectangle's location? _Hint:_ use the [distance formula](http://www.mathwarehouse.com/algebra/distance_formula/index.php); the [math.hypot()](https://docs.python.org/3/library/math.htmlmath.hypot) can help. Define a function **`get_area()`** that takes in a dictionary representing a shape (such as _either_ a circle or a rectangle like you defined above) and returns the _area_ of that shape. You will need to determine whether the shape is a rectangle or a circle (e.g., whether it has a radius or a width/height). Calculate rectange area as $width*height$ and circle area as $\pi * radius^2$. ###Code import math def get_area(shape): area = 0 if 'radius' in shape: area = math.pi * shape['radius'] * shape['radius'] else: area = shape['width'] * shape['height'] return(area) ###Output _____no_output_____ ###Markdown Use your `get_area()` function and print the areas of both your `rectangle` and your `circle`. ###Code print(get_area(rectangle)) print(get_area(circle)) ###Output 5000 3848.4510006474966 ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable **`deck`** that represents a deck of [playing cards](https://en.wikipedia.org/wiki/Playing_card). This variable should be a _list_ of 52 elements, each of which is a dictionary with the following keys:- `suit`, with a string value that is either `'hearts'`, `'diamonds'`, `'clubs'`, or `'spades'`.- `rank`, with an interger value ranging from 2 to 14 inclusive (11-14 represent a Jack, Queen, King, or Ace respectively)._Tip:_ use a pair of nested loops to add each combination of suit and rank to the `deck` list! ###Code #create a deck of cards deck = [] #start with an empty deck #suits = ['hearts', 'diamonds', 'clubs', 'spades'] #can write a loop for suit in ['hearts', 'diamonds', 'clubs', 'spades']: for rank in range (2, 15): #15 so that it is incluseive of 2-14 # card = {'suit' : suit, 'rank' : rank} #make a dict #print(card) deck.append(card)#put card into the empty dictionary by using append deck ###Output _____no_output_____ ###Markdown Create a list **hand** that represents a hand of [Poker](https://en.wikipedia.org/wiki/Poker) (5-card draw). Add 5 cards from the `deck` to this list.- You can add 5 specific cards for testing (check their indices!), or use the `random.shuffle()` function to shuffle the deck and then _slice_ 5 cards from the top.Print out the `hand` so you know what you are dealing with! ###Code #accsessing list with bracket notation import random random.shuffle(deck) #dealing with a list we always use bracket notation hand = deck[:5] hand ###Output _____no_output_____ ###Markdown Define a function **`contains_queen_of_hearts()`** that takes in a list of cards (e.g., a hand) and returns whether or not the Queen of Hearts is in that list. _For practice, don't use the `in` operator to check for containment_. ###Code def contains_queen_of_hearts(list_of_cards): for card in list_of_cards: if card =={'suit' : 'hearts', 'rank' : 12}: return True return False contains_queen_of_hearts(hand) ###Output _____no_output_____ ###Markdown Define a function **`get_high_card()`** that takes in a list of cards and returns the card (dictionary) of the highest value. The "high card" is the one with the highest rank. Cards of different suits but the same rank are considered to have the same value and either may be returned.- Hint: use a "king-of-the-hill" search! ###Code def get_high_card(list_of_cards): highest_card = a_list[0] for card in a_list: if card['rank'] > highest_card['rank']: highest_card = card return highest_card get_high_card(deck) ###Output _____no_output_____ ###Markdown Define a function **`is_flush()`** that takes in a list of cards and returns whether or not the list represents a _flush_&mdash;that is, all of the cards have the same _suit_. ###Code def is_flush(a_list): target_suit = a_list[0]['suit'] is_flush = True for card in a_list: if card['suit'] != target_suit: return False return True is_flush(hand) ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Write a **`while`** loop that prints out every 5th number (multiples of 5) from 0 to 100 (inclusive).- _Tip:_ use an **`end=','`** keyword argument to the `print()` function to print all the numbers on the same line. ###Code numbers = 0 while numbers <=100: print(numbers, end = ',') numbers = numbers + 5 ###Output 0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100, ###Markdown Use a **`while`** loop to print out the first 15 [Triangular numbers](https://en.wikipedia.org/wiki/Triangular_number). This is a sequence of numbers for which the _nth_ value is the sum of the numbers from 0 to _n_. **Do this only using addition!**- _Hint:_ use an additional variable to keep track of the `total` value, and have that value increase by the number of times you've been through the loop each iteration! ###Code nums = 0 total = 0 while nums <+15: total = total + nums print(total, end = ',') nums +=1 ###Output 0,1,3,6,10,15,21,28,36,45,55,66,78,91,105, ###Markdown _Challenge_ Use a **`while`** loop to print out 20 numbers, each of which is larger than the previous by the the _sum_ of the **two** previous numbers (the [Fibonacci sequence](https://en.wikipedia.org/wiki/Fibonacci_number).- _Hint_: you'll need to keep track of the two previous values (start them at 0 and 1), and then "update" them each time through the loop, storing the "new total" in the first previous variable, and the first previous variable in the second (be careful about the ordering of this!) Use a **`while`** loop to print out a sequence of random numbers from 0 to 10, stopping after the number `4` is printed for the first time. You will need to import the `random` module. Modify the below "coin flipping" example from the course text so that it keeps flipping coins until you get two "heads" in a row. ###Code # flip a coin until it shows up heads still_flipping = True while still_flipping: flip = randint(0,1) if flip == 0: flip = "Heads" else: flip = "Tails" print(flip, end=", ") if flip == "Heads": still_flipping = False ###Output _____no_output_____ ###Markdown Define a function **`input_number()`** that takes a minimum and maximum value as arguments. This function should prompt the user to input a number within the range, repeating the prompt if the provided value is not acceptable. Once an acceptable value has been provided, the function should return that number. You can assume that the user-entered input will always be numeric.Be sure and call your function and print its results to test it! ###Code def input_number(min, max): valid = False while not valid: number = int(input("pick a number between " + str(min) + " and " + str(max) + ": ")) if (min<=number<=max): valid = True print("great choice, that's within range") else print("invalid number... try again") return number input number(1,20) ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `num` that contains a random number between 1 and 20 (use the [random.randint()](https://docs.python.org/3/library/random.htmlrandom.randint) function). Print the number. ###Code import random num= random.randint(1, 20) print (num) ###Output 6 ###Markdown Write a boolean expression that represents whether or not `num` is equal to 12. ###Code num ==12 ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether `num` is either less than 5 or greater than 15. (Note that in programming, we always assume "strictly greater/less than" unless otherwis specified). ###Code num < 5 or num >15 ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether `num` is between 3 and 18 (inclusive), but is _not_ equal to 11. (You can temporarily assign a non-random value to `num` to test this). ###Code num>3 and num<18 num!=11 ###Output _____no_output_____ ###Markdown _Re-run the above cells multiple times to confirm that the logic holds for different random numbers!_ Write a boolean expression that represents whether `5.1 - 2.5` is equal to `2.6`.- What does this tell you about the precision of _floating point_ (decimal) operations in Python? _Tip:_ try printing the value of `5.1-2.6`. (See also: https://en.wikipedia.org/wiki/Floating-point_arithmetic) - How might you ensure that this equality is correct? Define a function `de_morgan()` that takes in two boolean values (`P` and `Q`) and _returns_ whether the [De Morgan laws](https://en.wikipedia.org/wiki/De_Morgan%27s_laws) hold for those two values. That is:> the negation of P **and** Q is the same as the negation of P **or** the negation of Q> and> the negation of P **or** Q is the same as the negation of P **and** the negation of QYour function should return a boolean representing whether or not _both_ of these statements are true for the given `P` and `Q`. _Use parentheses to enforce order of operations!_ Call your `de_morgan()` function for each possible value of `P` and `Q` (there are **4** possible combinations), printing the result, in order to demonstrate that these laws hold. Use the built-in [input()](https://docs.python.org/3/library/functions.htmlinput) function to prompt the user for their favorite word.- This function will wait for the user to provide an input before it finished executing, and will then _return_ a string of whatever the user typed in. ###Code fave_word=input("what is your favorite word? ") ###Output what is your favorite word? lugubrious ###Markdown Write a boolean expression that represents if the inputted word has more than 8 letters. ###Code len(fave_word) > 8 ###Output _____no_output_____ ###Markdown Use the `islower()` string method to check if the inputted word was entered with all lower-case letters. ###Code fave_word.islower() ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether or not the word _starts with_ the letter `i` or _ends with_ the letter `n`. Use appropriate string methods. _Hint_: convert the word to a specific case (upper or lower) for the comparison. ###Code str=fave_word print (str.startswith("i") or str.endswith("n")) ###Output False ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Define a new list **`words`** that contains each of the words to the following [song lyric](https://www.youtube.com/watch?v=StTqXEQ2l-Y) (don't include the notes):```♫ everything is awesome everything is cool when you are part of a TEEEEAM ♫```_Hint:_ use the `split()` string method. ###Code song = "everything is awesome everything is cool when you are part of a TEEEEAM" words = song.split() words ###Output _____no_output_____ ###Markdown Define a function **`abbreviate()`** that takes in a string as an argument and _returns_ the first letter of that string capitalized and followed by a period (`.`). For example:```pythonabbreviate("dog") returns "D."```_Hint:_ you can use bracket notation to access the first (0th) character of a string! ###Code def abbreviate(string): first_letter = string[0] a = first_letter.upper() + "." return a abbreviate("gog") ###Output _____no_output_____ ###Markdown Use the **`map()`** function to transform the list of lyric words into a list of abbreviations (technically an initialism). ###Code lyric_list = list(map(abbreviate, words)) print(lyric_list) ###Output ['E.', 'I.', 'A.', 'E.', 'I.', 'C.', 'W.', 'Y.', 'A.', 'P.', 'O.', 'A.', 'T.'] ###Markdown Use the **`filter()`** function to remove each word from the list of `words` that is 3 or fewer letters in length. You can define a named callback function, or use an anonymous lambda. ###Code def remove_three(string): if len(string) > 3: return string print(list(filter(remove_three, words))) ###Output ['everything', 'awesome', 'everything', 'cool', 'when', 'part', 'TEEEEAM'] ###Markdown **In a single statement**, use both the `map()` and `filter()` functions to get a list of the initials of the long words in the the `words` list. _Hint:_ try pasting your filtering code into your mapping code from the previous problems! ###Code abbreviate_list = list(map(abbreviate, (filter(remove_three, words)))) abbreviate_list ###Output _____no_output_____ ###Markdown Use the **`reduce()`** function to combine the list of initials into a single string. You can define a named callback function, or use an anonymous lambda. Remember to `import` the `reduce()` function! _Note:_ normally you would do this with the `join()` string method. ###Code from functools import reduce def combine_string(string): string.join("a") return string.join("a") #combine_string() #reduce(combine_string, abbreviate_list) ###Output _____no_output_____ ###Markdown Use the **`reduce()`** function to get a list of _unique_ words in the original lyrics (not the initials).- Think about what to do with each "new" list element to include it in the aggregation (you can use an `if` statement to decide whether to include it or not!). Try defining an `add_if_unique()` callback function.- You will also need to use the optional third argument to [reduce()](https://docs.python.org/3/library/functools.htmlfunctools.reduce) to specify that the "aggregation" should start with an empty list `[]`.- We do this with `reduce()` instead of `filter()` because it allows us to more directly check the list beyond the specific element being considered. ###Code def add_if_unique(a_list, element): if element in a_list: return a_list else: a_list.append(element) return a_list test_list = ['a', 'b'] add_if_unique(test_list, "c") ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `food` that stores your favorite kind of food. Print or output the variable. ###Code food="rice" print (food) ###Output rice ###Markdown Create a variable `restaurant` that stores your favorite place to eat that kind of food. ###Code restaurant="ihop" print(restaurant) ###Output ihop ###Markdown Print the message `"I'm going to RESTAURANT for some FOOD"`, replacing the restaurant and food with your variables. ###Code print("I'm going to "+restaurant+" for some "+food) ###Output I'm going to ihop for some rice ###Markdown Create a variable `num_friends` equal to the number of friends you would like to eat with. ###Code num_friends=3 ###Output _____no_output_____ ###Markdown Print a message `"I'm going with X friends"`, replacing the X with the number of friends. ###Code message="I'm going with "+ (str(num_friends))+(" friends") print (message) ###Output I'm going with 3 friends ###Markdown Create a variable `meal_price`, which is how expensive you think one meal at the restaurant would be. This price should be a `float`. ###Code meal_price=4.5 ###Output _____no_output_____ ###Markdown Update (re-assign) the `meal_price` variable so it includes a 15% tip&mdash;that is, so the price is 15% higher. Output the variable. ###Code meal_price=meal_price*1.15 print(meal_price) ###Output 5.175 ###Markdown Create a variable `total_cost` that has the total estimated cost of the bill for you and all of your friends. Output or print the variable ###Code total_cost=205.00 print(total_cost) ###Output 205.0 ###Markdown Create a variable `budget` representing your spending budget for a night out. ###Code budget=50.00 max_friends=int(total_cost/budget) print (max_friends) ###Output 4 ###Markdown Create a variable `max_friends`, which is the maximum number of friends you can invite, at the estimated meal price, while staying within your budget. Output or print this value.- Be carefully that you only invite whole people! ###Code chorus=("Food time!\n")*max_friends ###Output _____no_output_____ ###Markdown Bonus: Create a variable `chorus` that is the string `"FOOD time!"` _repeated_ once for each of the friends you are able to bring. _Hint_ use the **`*`** operator. Print out the variable. ###Code print(chorus) ###Output Food time! Food time! Food time! Food time! ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Write a **`while`** loop that prints out every 5th number (multiples of 5) from 0 to 100 (inclusive).- _Tip:_ use an **`end=','`** keyword argument to the `print()` function to print all the numbers on the same line. ###Code nums = 0 while nums <= 100: print(nums, end = ',') nums = nums + 5 ###Output _____no_output_____ ###Markdown Use a **`while`** loop to print out the first 15 [Triangular numbers](https://en.wikipedia.org/wiki/Triangular_number). This is a sequence of numbers for which the _nth_ value is the sum of the numbers from 0 to _n_. **Do this only using addition!**- _Hint:_ use an additional variable to keep track of the `total` value, and have that value increase by the number of times you've been through the loop each iteration! ###Code total = 0 nums = 0 while nums <= 15: total = total + nums print(total, end = ',') nums = nums + 1 ###Output _____no_output_____ ###Markdown _Challenge_ Use a **`while`** loop to print out 20 numbers, each of which is larger than the previous by the the _sum_ of the **two** previous numbers (the [Fibonacci sequence](https://en.wikipedia.org/wiki/Fibonacci_number).- _Hint_: you'll need to keep track of the two previous values (start them at 0 and 1), and then "update" them each time through the loop, storing the "new total" in the first previous variable, and the first previous variable in the second (be careful about the ordering of this!) ###Code start = 0 #Not the best names next = 1 count = 0 while count < 20: print(next, end =',') #print initial 1 value. Print before calcs sum = start + next start = next next = sum count = count + 1 ###Output _____no_output_____ ###Markdown Use a **`while`** loop to print out a sequence of random numbers from 0 to 10, stopping after the number `4` is printed for the first time. You will need to import the `random` module. ###Code import random test = True while test: #The syntax "while test == True" isn't necessary here; works w/o it num =random.randint(0,11) print(num, end = ', ') if num == 4: test = False #An "else" statement isn't necessary here # else: # test = True ###Output _____no_output_____ ###Markdown Modify the below "coin flipping" example from the course text so that it keeps flipping coins until you get two "heads" in a row. ###Code # # flip a coin until it shows up heads # still_flipping = True # while still_flipping: # flip = randint(0,1) # if flip == 0: # flip = "Heads" # else: # flip = "Tails" # print(flip, end=", ") # if flip == "Heads": # still_flipping = False # flip a coin until it shows up heads twice import random still_flipping = True previous_flip = None while still_flipping: flip = random.randint(0,1) if flip == 0: flip = "Heads" else: flip = "Tails" print(flip, end=", ") if previous_flip == "Heads" and flip =="Heads": still_flipping = False previous_flip = flip ###Output _____no_output_____ ###Markdown Define a function **`input_number()`** that takes a minimum and maximum value as arguments. This function should prompt the user to input a number within the range, repeating the prompt if the provided value is not acceptable. Once an acceptable value has been provided, the function should return that number. You can assume that the user-entered input will always be numeric.Be sure and call your function and print its results to test it! ###Code def input_number(min, max): valid = False while (not valid): number = int(input("Pick a # between " + str(min)+ " and " + str(max)+ ": ")) if (min <= number <= max): valid = True print("Good Choice") else: print("Invalid number!") return number print(input_number (2,10)) ###Output Pick a # between 2 and 10: 1 Invalid number! 1 ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `num` that contains a random number between 1 and 20 (use the [random.randint()](https://docs.python.org/3/library/random.htmlrandom.randint) function). Print the number. ###Code import random num = random.randint(1,20) num ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether or not `num` is equal to 12. ###Code num == 12 ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether `num` is either less than 5 or greater than 15. (Note that in programming, we always assume "strictly greater/less than" unless otherwis specified). ###Code num < 5 or num > 15 ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether `num` is between 3 and 18 (inclusive), but is _not_ equal to 11. (You can temporarily assign a non-random value to `num` to test this). ###Code num <= 18 and num >= 3 and num != 11 ###Output _____no_output_____ ###Markdown _Re-run the above cells multiple times to confirm that the logic holds for different random numbers!_ Write a boolean expression that represents whether `5.1 - 2.5` is equal to `2.6`.- What does this tell you about the precision of _floating point_ (decimal) operations in Python? _Tip:_ try printing the value of `5.1-2.6`. (See also: https://en.wikipedia.org/wiki/Floating-point_arithmetic) - How might you ensure that this equality is correct? ###Code 5.1 - 2.5 == 2.6 round(5.1 - 2.5,2) == 2.6 ###Output _____no_output_____ ###Markdown Define a function `de_morgan()` that takes in two boolean values (`P` and `Q`) and _returns_ whether the [De Morgan laws](https://en.wikipedia.org/wiki/De_Morgan%27s_laws) hold for those two values. That is:> the negation of P **and** Q is the same as the negation of P **or** the negation of Q> and> the negation of P **or** Q is the same as the negation of P **and** the negation of QYour function should return a boolean representing whether or not _both_ of these statements are true for the given `P` and `Q`. _Use parentheses to enforce order of operations!_ ###Code def de_morgan(P, Q): return (not P and Q) == (not P or not Q) and (not P or Q == (not P) and not Q) ###Output _____no_output_____ ###Markdown Call your `de_morgan()` function for each possible value of `P` and `Q` (there are **4** possible combinations), printing the result, in order to demonstrate that these laws hold. ###Code print(de_morgan(True, True)) print(de_morgan(True, False)) print(de_morgan(False, True)) print(de_morgan(False, False)) ###Output False False True False ###Markdown Use the built-in [input()](https://docs.python.org/3/library/functions.htmlinput) function to prompt the user for their favorite word.- This function will wait for the user to provide an input before it finished executing, and will then _return_ a string of whatever the user typed in. ###Code word = input() ###Output helloworld ###Markdown Write a boolean expression that represents if the inputted word has more than 8 letters. ###Code len(word) > 8 ###Output _____no_output_____ ###Markdown Use the `islower()` string method to check if the inputted word was entered with all lower-case letters. ###Code word.islower() ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether or not the word _starts with_ the letter `i` or _ends with_ the letter `n`. Use appropriate string methods. _Hint_: convert the word to a specific case (upper or lower) for the comparison. ###Code word.lower().startswith('i') or word.endswith('n') ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Write a **`while`** loop that prints out every 5th number (multiples of 5) from 0 to 100 (inclusive).- _Tip:_ use an **`end=','`** keyword argument to the `print()` function to print all the numbers on the same line. ###Code my_num = 0 while my_num <= 100: print(my_num, end=',') my_num = my_num + 5 ###Output 0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100, ###Markdown Use a **`while`** loop to print out the first 15 [Triangular numbers](https://en.wikipedia.org/wiki/Triangular_number). This is a sequence of numbers for which the _nth_ value is the sum of the numbers from 0 to _n_. **Do this only using addition!**- _Hint:_ use an additional variable to keep track of the `total` value, and have that value increase by the number of times you've been through the loop each iteration! ###Code total = 0 count = 0 while count <= 15: total = total + count print(total, end=',') count = count + 1 ###Output 0,1,3,6,10,15,21,28,36,45,55,66,78,91,105,120, ###Markdown _Challenge_ Use a **`while`** loop to print out 20 numbers, each of which is larger than the previous by the the _sum_ of the **two** previous numbers (the [Fibonacci sequence](https://en.wikipedia.org/wiki/Fibonacci_number).- _Hint_: you'll need to keep track of the two previous values (start them at 0 and 1), and then "update" them each time through the loop, storing the "new total" in the first previous variable, and the first previous variable in the second (be careful about the ordering of this!) ###Code count = 0 fib1 = 0 fib2 = 1 while count <= 20: fib = fib1 + fib2 fib2 = fib1 fib1 = fib print(fib, end=',') count = count + 1 ###Output 1,1,2,3,5,8,13,21,34,55,89,144,233,377,610,987,1597,2584,4181,6765,10946, ###Markdown Use a **`while`** loop to print out a sequence of random numbers from 0 to 10, stopping after the number `4` is printed for the first time. You will need to import the `random` module. ###Code import random cur = None while cur != 4: cur = random.randint(0, 10) print(cur, end=',') ###Output 10,9,2,10,5,8,9,5,4, ###Markdown Modify the below "coin flipping" example from the course text so that it keeps flipping coins until you get two "heads" in a row. ###Code # flip a coin until it shows up heads still_flipping = True last_flip = None while still_flipping: flip = random.randint(0,1) if flip == 0: flip = "Heads" else: flip = "Tails" print("Flip: ", flip, "Last flip: ", last_flip, end="\n") if flip == "Heads" and last_flip == "Heads": still_flipping = False last_flip = flip ###Output Flip: Heads Last flip: None Flip: Tails Last flip: Heads Flip: Heads Last flip: Tails Flip: Tails Last flip: Heads Flip: Tails Last flip: Tails Flip: Heads Last flip: Tails Flip: Heads Last flip: Heads ###Markdown Define a function **`input_number()`** that takes a minimum and maximum value as arguments. This function should prompt the user to input a number within the range, repeating the prompt if the provided value is not acceptable. Once an acceptable value has been provided, the function should return that number. You can assume that the user-entered input will always be numeric.Be sure and call your function and print its results to test it! ###Code def input_number(low, high): got_it = False while not got_it: user_input = int(input("Enter a value in the range:"+str(low)+" to "+str(high))) if (low <= user_input <= high): print("Got it!") got_it = True else: print("Try again!") return user_input input_number(low = 10, high = 20) ###Output Enter a value in the range:10 to 2021 Try again! Enter a value in the range:10 to 2015 Got it! ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. In this exercise, you'll be accessing data from the [Github API](https://developer.github.com/v3/). You will need to read the documentation at **** in order to discover appropriate endpoints, their parameters, and their schemas. (Categories of endpoints are listed on the right, with multiple endpoints described per page). First, import the **`requests`** module for accessing web data. ###Code import requests ###Output _____no_output_____ ###Markdown Create a variable **`base_uri`** that stores the base URI (as a string) for the Github API (`https://api.github.com`) ###Code base_uri = "https://api.github.com" ###Output _____no_output_____ ###Markdown Under the "Repositories" category of the API documentation, find the endpoint that will list _repos in an organization._ Then create a variable **`org_resource`** that stores the endpoint for the `infx511` organization repos (this is the _path_ to the resource of interest).- (FYI: this is where we keep the book code and master exercise sets!) ###Code org_resource = "/orgs/imt511a-wi19/repos" ###Output _____no_output_____ ###Markdown Send a `get()` request to this endpoint (the `base_uri` followed by the `org_resource` path). Print the response's `status_code` and `url` to show that your request worked worked. (This will also allow you to inspect the JSON in the browser easily). ###Code response = requests.get(base_uri+org_resource) print(response.status_code) print(response.url) ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `num` that contains a random number between 1 and 20 (use the [random.randint()](https://docs.python.org/3/library/random.htmlrandom.randint) function). Print the number. ###Code import random num = random.randint(1, 20) ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether or not `num` is equal to 12. ###Code num == 12 ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether `num` is either less than 5 or greater than 15. (Note that in programming, we always assume "strictly greater/less than" unless otherwis specified). ###Code (num < 5) or (num > 15) ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether `num` is between 3 and 18 (inclusive), but is _not_ equal to 11. (You can temporarily assign a non-random value to `num` to test this). ###Code 3 <= num <= 18 and num != 11 ###Output _____no_output_____ ###Markdown _Re-run the above cells multiple times to confirm that the logic holds for different random numbers!_ Write a boolean expression that represents whether `5.1 - 2.5` is equal to `2.6`.- What does this tell you about the precision of _floating point_ (decimal) operations in Python? _Tip:_ try printing the value of `5.1-2.6`. (See also: https://en.wikipedia.org/wiki/Floating-point_arithmetic) - How might you ensure that this equality is correct? ###Code 5.1 - 2.5 == 2.6 #returns False #this is because floats are not exact print(5.1 - 2.5) #try rounding so that this yields True print(round(5.1-2.5, 1)) # returns 2.6 round(5.1 - 2.5, 1) == 2.6 #works ###Output 2.5999999999999996 2.6 ###Markdown Define a function `de_morgan()` that takes in two boolean values (`P` and `Q`) and _returns_ whether the [De Morgan laws](https://en.wikipedia.org/wiki/De_Morgan%27s_laws) hold for those two values. That is:> the negation of P **and** Q is the same as the negation of P **or** the negation of Q> and> the negation of P **or** Q is the same as the negation of P **and** the negation of QYour function should return a boolean representing whether or not _both_ of these statements are true for the given `P` and `Q`. _Use parentheses to enforce order of operations!_ ###Code def de_morgan(P, Q): output_1 = P and not Q print(output_1) output_2 = not P and Q print(output_2) output_3 = not(P and Q) print(output_3) output_4 = (not P) or (not Q) print(output_4) ###Output _____no_output_____ ###Markdown Call your `de_morgan()` function for each possible value of `P` and `Q` (there are **4** possible combinations), printing the result, in order to demonstrate that these laws hold. ###Code P=True Q=False de_morgan (P, Q) ###Output True False True True ###Markdown Use the built-in [input()](https://docs.python.org/3/library/functions.htmlinput) function to prompt the user for their favorite word.- This function will wait for the user to provide an input before it finished executing, and will then _return_ a string of whatever the user typed in. ###Code fav_word = input("Enter your favorite word: ") ###Output Enter your favorite word: omnipotent ###Markdown Write a boolean expression that represents if the inputted word has more than 8 letters. ###Code len(fav_word) > 8 ###Output _____no_output_____ ###Markdown Use the `islower()` string method to check if the inputted word was entered with all lower-case letters. ###Code fav_word.islower() ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether or not the word _starts with_ the letter `i` or _ends with_ the letter `n`. Use appropriate string methods. _Hint_: convert the word to a specific case (upper or lower) for the comparison. ###Code fav_word == fav_word.endswith("n") or fav_word.startswith("i") ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable **`names`** that is a list containing the names of 2 people next to you. ###Code name = ["Fatima, Clair"] print(name) ###Output ['Fatima, Clair'] ###Markdown Create a second variable **`absent`** that is an empty list. Then add two names of people who you _wish_ were next to you. ###Code absent = ["kelsey, stef"] print(absent) ###Output ['kelsey, stef'] ###Markdown Create a variable **`all_names`** that is the `names` and `absent` lists added together. Output the list. ###Code all_names = ["Fatima, Clair"] + ["kelsey, stef"] print(all_names) ###Output ['Fatima, Clair', 'kelsey, stef'] ###Markdown What happens if you try and `append()` the `absent` list to the `names` list? Print the resulting `names` variable, as well as the length of that list. Is it what you expect?- Be careful about running this cell multiple times; you can always **Reset the Kernel** if needed. ###Code name.append(absent) print(name) print(len(name)) ###Output ['Fatima, Clair', ['kelsey, stef']] 2 ###Markdown Add _your name_ at the **beginning** of the `all_names` list. _Hint_: find an appropriate [list method](https://docs.python.org/3/tutorial/datastructures.htmlmore-on-lists). ###Code all_names.append('aj') print(all_names) ###Output ['Fatima, Clair', 'kelsey, stef', 'aj', 'aj'] ###Markdown Use the `range()` function to create a sequence of numbers from 10 to 99. Then use the [random.sample()](https://docs.python.org/3/library/random.htmlrandom.sample) to produce a list of 11 random numbers from that range. Store that list in a variable called **`numbers`**, and print the list so you know what it is. ###Code import random numbers = random.sample(range(10, 99), 11) numbers ###Output _____no_output_____ ###Markdown Find the [sum](https://docs.python.org/3/library/functions.htmlsum) of the `numbers` list (note that this function _only_ works for sequences!) ###Code sum(numbers) ###Output _____no_output_____ ###Markdown [Sort](https://wiki.python.org/moin/HowTo/Sorting) the `numbers` list. ###Code sorted(numbers) ###Output _____no_output_____ ###Markdown Print the _last_ element of the `numbers` list (which should now be the biggest) ###Code numbers[-1] ###Output _____no_output_____ ###Markdown Create a list **`first_three`** that contains the first three elements of the `numbers` list. Use the _slice_ (**`:`**) operator. ###Code first_three = numbers[:3] first_three ###Output _____no_output_____ ###Markdown Replace the first three elements in `numbers` with values that are _double_ (2x) the original value. You can use 3 assignment statements (or just 1 if you are tricky!) ###Code #numbers[0] = numbers[0] * 2 #numbers[1] = numbers[1] * 2 #numbers[2] = numbers[2] * 2 numbers[:3] = numbers[:3] *2 print(numbers) ###Output [48, 26, 46, 48, 26, 46, 24, 13, 23, 30, 35, 50, 53, 60, 71, 76, 92] ###Markdown Create a list **`middle_three`** that contains the three elements in the _middle_ of the list (these need not be the three median values). _Hint:_ calculate the middle index. ###Code middle_index = len(numbers)//2 middle_three = numbers[middle_index-1:middle_index+2] middle_three def delete_first(a_list): a_list = a_list[1:] # creates new local var (w/in function only) letters = ['a','b','c'] delete_first(letters) print(letters) ###Output ['a', 'b', 'c'] ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `my_name` that contains your name. ###Code my_name = "Amanda" ###Output _____no_output_____ ###Markdown Create a variable `name_length` that holds how many letters are in your name. Print the number of letters. ###Code name_length = len(my_name) print(name_length) ###Output 6 ###Markdown Print out your name with the uppercase letters made lowercase, and the lowercase letters made uppercase. **Hint:** look for a [string method](https://docs.python.org/3/library/stdtypes.htmlstring-methods) that will modify the _case_ of the string.- Try to do this without creating a separate variable! ###Code print(my_name.swapcase()) ###Output aMANDA ###Markdown Pick two of your favorite numbers (between 1 and 100) and assign them to `favorite_1` and `favorite_2` ###Code favorite_1 = 22 favorite_2 = 44 ###Output _____no_output_____ ###Markdown Divide each number by the length of your name raised to the power of `.598` (use the built-in `pow()` function for practice), and save it in the same variable. ###Code favorite_1 = favorite_1/pow (name_length, 0.598) favorite_2 = favorite_1/pow (name_length, 0.598) ###Output _____no_output_____ ###Markdown Create a variable `raw_sum` that is the sum of those two variables. Note you _cannot_ use the `sum()` function for this, so just use a normal operator! ###Code raw_sum = favorite_1 + favorite_2 ###Output _____no_output_____ ###Markdown Create a variable `round_sum` that is the `raw_sum` rounded to 1 decimal place. Use the `round()` function. ###Code round_sum = round(raw_sum, 1) ###Output _____no_output_____ ###Markdown Create two new variables `rounded_1` and `rounded_2` that are your `favorite_1` and `favorite_2` variables rounded to 1 decimal place. Print them out on a single line (hint: pass them as two different arguments). ###Code rounded_1 = round(favorite_1,1) rounded_2 = round(favorite_2,1) print(rounded_1, rounded_2) ###Output 7.5 2.6 ###Markdown Create a variable `sum_round` that is the sum of the rounded values (use a normal math operator). ###Code sum_round = rounded_1 + rounded_2 ###Output _____no_output_____ ###Markdown Which is bigger, `round_sum` or `sum_round`? (You can use the `max()` function!) ###Code print(max(rounded_1, rounded_2)) ###Output 7.5 ###Markdown Create a variable `fruits` that contains the string `"apples and bananas"` ###Code fruits = "apples and bananas" ###Output _____no_output_____ ###Markdown Use the `replace()` function to substitute all the "a"s in `fruits` with "ee". Store the result in a variable called `fruits_e`. ###Code fruits_e = fruits.replace("a", "ee") ###Output _____no_output_____ ###Markdown Use the `replace()` function to substitute all the "a"s in `fruits` with "o". Store the result in a variable called `fruits_o`. ###Code fruits_o = fruits.replace("a", "o") ###Output _____no_output_____ ###Markdown Print out the string "I like to eat " followed by each of `fruits`, `fruits_e` and `fruits_o` (three sentences). ###Code print("I like to eat " + fruits) print("I like to eat " + fruits_e) print("I like to eat " + fruits_o) ###Output I like to eat apples and bananas I like to eat eepples eend beeneenees I like to eat opples ond bononos ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `my_name` that contains your name. ###Code my_name = "CLUNT, Python Queen" print(my_name) ###Output CLUNT, Python Queen ###Markdown Create a variable `name_length` that holds how many letters are in your name. Print the number of letters. ###Code name_length = len(my_name) print(name_length) ###Output 19 ###Markdown Print out your name with the uppercase letters made lowercase, and the lowercase letters made uppercase. **Hint:** look for a [string method](https://docs.python.org/3/library/stdtypes.htmlstring-methods) that will modify the _case_ of the string.- Try to do this without creating a separate variable! ###Code print(str.lower(my_name)) ###Output clunt, python queen ###Markdown Pick two of your favorite numbers (between 1 and 100) and assign them to `favorite_1` and `favorite_2` ###Code favorite_1 = 1 favorite_2 = 99 ###Output _____no_output_____ ###Markdown Divide each number by the length of your name raised to the power of `.598` (use the built-in `pow()` function for practice), and save it in the same variable. ###Code OneProblem = favorite_1 / pow(name_length, .598) NinetyNineProblems = favorite_2 / pow(name_length, .598) print(OneProblem) print(NinetyNineProblems) ###Output 0.17191166502297733 17.019254837274758 ###Markdown Create a variable `raw_sum` that is the sum of those two variables. Note you _cannot_ use the `sum()` function for this, so just use a normal operator! ###Code raw_sum = OneProblem + NinetyNineProblems print(raw_sum) ###Output 17.191166502297737 ###Markdown Create a variable `round_sum` that is the `raw_sum` rounded to 1 decimal place. Use the `round()` function. ###Code round_sum = round(raw_sum) print(round_sum) ###Output 17 ###Markdown Create two new variables `rounded_1` and `rounded_2` that are your `favorite_1` and `favorite_2` variables rounded to 1 decimal place. Print them out on a single line (hint: pass them as two different arguments). ###Code rounded_1 = round(favorite_1) rounded_2 = round(favorite_2) print(rounded_1, rounded_2) ###Output 1 99 ###Markdown Create a variable `sum_round` that is the sum of the rounded values (use a normal math operator). ###Code sum_round = rounded_1 + rounded_2 print(sum_round) ###Output 100 ###Markdown Which is bigger, `round_sum` or `sum_round`? (You can use the `max()` function!) ###Code max(round_sum, sum_round) ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `food` that stores your favorite kind of food. Print or output the variable. ###Code food = "Steak" ###Output _____no_output_____ ###Markdown Create a variable `restaurant` that stores your favorite place to eat that kind of food. ###Code restaurant = "Steakhouse" ###Output _____no_output_____ ###Markdown Print the message `"I'm going to RESTAURANT for some FOOD"`, replacing the restaurant and food with your variables. ###Code print("I'm going to", restaurant, "for some", food) ###Output I'm going to Steakhouse for some Steak ###Markdown Create a variable `num_friends` equal to the number of friends you would like to eat with. ###Code num_friends = 3 ###Output _____no_output_____ ###Markdown Print a message `"I'm going with X friends"`, replacing the X with the number of friends. ###Code print("I'm going with", num_friends, "friends") ###Output I'm going with 3 friends ###Markdown Create a variable `meal_price`, which is how expensive you think one meal at the restaurant would be. This price should be a `float`. ###Code meal_price = 150 ###Output _____no_output_____ ###Markdown Update (re-assign) the `meal_price` variable so it includes a 15% tip&mdash;that is, so the price is 15% higher. Output the variable. ###Code meal_price = round(meal_price*1.15, ) print(meal_price) ###Output 172 ###Markdown Create a variable `total_cost` that has the total estimated cost of the bill for you and all of your friends. Output or print the variable ###Code total_cost = round(meal_price*num_friends, 2) print(total_cost) ###Output 516 ###Markdown Create a variable `budget` representing your spending budget for a night out. ###Code budget = float(1000) ###Output _____no_output_____ ###Markdown Create a variable `max_friends`, which is the maximum number of friends you can invite, at the estimated meal price, while staying within your budget. Output or print this value.- Be carefully that you only invite whole people! ###Code max_friends = int(budget/meal_price) print(max_friends) ###Output 5 ###Markdown Bonus: Create a variable `chorus` that is the string `"FOOD time!"` _repeated_ once for each of the friends you are able to bring. _Hint_ use the **`*`** operator. Print out the variable. ###Code chorus = food.upper()+" time!\n" print(chorus*max_friends) ###Output STEAK time! STEAK time! STEAK time! STEAK time! STEAK time! ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `my_name` that contains your name. ###Code my_name="Yufei Li" ###Output _____no_output_____ ###Markdown Create a variable `name_length` that holds how many letters are in your name. Print the number of letters. ###Code name_length=len(my_name) name_length ###Output _____no_output_____ ###Markdown Print out your name with the uppercase letters made lowercase, and the lowercase letters made uppercase. **Hint:** look for a [string method](https://docs.python.org/3/library/stdtypes.htmlstring-methods) that will modify the _case_ of the string.- Try to do this without creating a separate variable! ###Code my_name=my_name.swapcase() ###Output _____no_output_____ ###Markdown Pick two of your favorite numbers (between 1 and 100) and assign them to `favorite_1` and `favorite_2` ###Code favorite_1=44 favorite_2=64 ###Output _____no_output_____ ###Markdown Divide each number by the length of your name raised to the power of `.598` (use the built-in `pow()` function for practice), and save it in the same variable. ###Code favorite_1=pow(favorite_1/name_length,0.598) favorite_2=pow(favorite_2/name_length,0.598) ###Output _____no_output_____ ###Markdown Create a variable `raw_sum` that is the sum of those two variables. Note you _cannot_ use the `sum()` function for this, so just use a normal operator! ###Code raw_sum=favorite_1+favorite_2 ###Output _____no_output_____ ###Markdown Create a variable `round_sum` that is the `raw_sum` rounded to 1 decimal place. Use the `round()` function. ###Code round_sum=round(raw_sum,1) ###Output _____no_output_____ ###Markdown Create two new variables `rounded_1` and `rounded_2` that are your `favorite_1` and `favorite_2` variables rounded to 1 decimal place. Print them out on a single line (hint: pass them as two different arguments). ###Code rounded_1=round(favorite_1,1) rounded_2=round(favorite_2,1) print(rounded_1,rounded_2) ###Output _____no_output_____ ###Markdown Create a variable `sum_round` that is the sum of the rounded values (use a normal math operator). ###Code sum_round=rounded_1+rounded_2 ###Output _____no_output_____ ###Markdown Which is bigger, `round_sum` or `sum_round`? (You can use the `max()` function!) ###Code max(round_sum,sum_round) ###Output _____no_output_____ ###Markdown Create a variable `fruits` that contains the string `"apples and bananas"` ###Code fruits="apples and bananas" ###Output _____no_output_____ ###Markdown Use the `replace()` function to substitute all the "a"s in `fruits` with "ee". Store the result in a variable called `fruits_e`. ###Code fruits_e=fruits.replace("a","ee") ###Output _____no_output_____ ###Markdown Use the `replace()` function to substitute all the "a"s in `fruits` with "o". Store the result in a variable called `fruits_o`. ###Code fruits_o=fruits.replace("a","o") ###Output _____no_output_____ ###Markdown Print out the string "I like to eat " followed by each of `fruits`, `fruits_e` and `fruits_o` (three sentences). ###Code print("I like to eat", fruits) print("I like to eat", fruits_e) print("I like to eat", fruits_o) ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Write a **`while`** loop that prints out every 5th number (multiples of 5) from 0 to 100 (inclusive).- _Tip:_ use an **`end=','`** keyword argument to the `print()` function to print all the numbers on the same line. ###Code n = 0 while n <= 100: n += 5 print(n, end = ', ') ###Output 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100, 105, ###Markdown Use a **`while`** loop to print out the first 15 [Triangular numbers](https://en.wikipedia.org/wiki/Triangular_number). This is a sequence of numbers for which the _nth_ value is the sum of the numbers from 0 to _n_. **Do this only using addition!**- _Hint:_ use an additional variable to keep track of the `total` value, and have that value increase by the number of times you've been through the loop each iteration! ###Code n=0 total=0 while n<=15: total=total+n print(total,end=",") n+=1 ###Output 0,1,3,6,10,15,21,28,36,45,55,66,78,91,105,120, ###Markdown _Challenge_ Use a **`while`** loop to print out 20 numbers, each of which is larger than the previous by the the _sum_ of the **two** previous numbers (the [Fibonacci sequence](https://en.wikipedia.org/wiki/Fibonacci_number).- _Hint_: you'll need to keep track of the two previous values (start them at 0 and 1), and then "update" them each time through the loop, storing the "new total" in the first previous variable, and the first previous variable in the second (be careful about the ordering of this!) Use a **`while`** loop to print out a sequence of random numbers from 0 to 10, stopping after the number `4` is printed for the first time. You will need to import the `random` module. ###Code import random n=random.randint(0,10) while n!=4: n=random.randint(0,10) print(n,end=",") ###Output 4, ###Markdown Modify the below "coin flipping" example from the course text so that it keeps flipping coins until you get two "heads" in a row. ###Code # flip a coin until it shows up heads still_flipping = True while still_flipping: flip = randint(0,1) if flip == 0: flip = "Heads" else: flip = "Tails" print(flip, end=", ") if flip == "Heads": still_flipping = False ###Output _____no_output_____ ###Markdown Define a function **`input_number()`** that takes a minimum and maximum value as arguments. This function should prompt the user to input a number within the range, repeating the prompt if the provided value is not acceptable. Once an acceptable value has been provided, the function should return that number. You can assume that the user-entered input will always be numeric.Be sure and call your function and print its results to test it! ###Code def input_number(min, max): valid = False while(not valid): number = int(input("Pick a number between "+str(min)+" and "+str(max)+": ")) if(min <= number <= max): valid = True else: print("Invalid number.") return number #call function and print result print(input_number(1,10)) ###Output Pick a number between 1 and 10: 7 7 ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Write a **`while`** loop that prints out every 5th number (multiples of 5) from 0 to 100 (inclusive).- _Tip:_ use an **`end=','`** keyword argument to the `print()` function to print all the numbers on the same line. Use a **`while`** loop to print out the first 15 [Triangular numbers](https://en.wikipedia.org/wiki/Triangular_number). This is a sequence of numbers for which the _nth_ value is the sum of the numbers from 0 to _n_. **Do this only using addition!**- _Hint:_ use an additional variable to keep track of the `total` value, and have that value increase by the number of times you've been through the loop each iteration! _Challenge_ Use a **`while`** loop to print out 20 numbers, each of which is larger than the previous by the the _sum_ of the **two** previous numbers (the [Fibonacci sequence](https://en.wikipedia.org/wiki/Fibonacci_number).- _Hint_: you'll need to keep track of the two previous values (start them at 0 and 1), and then "update" them each time through the loop, storing the "new total" in the first previous variable, and the first previous variable in the second (be careful about the ordering of this!) Use a **`while`** loop to print out a sequence of random numbers from 0 to 10, stopping after the number `4` is printed for the first time. You will need to import the `random` module. Modify the below "coin flipping" example from the course text so that it keeps flipping coins until you get two "heads" in a row. ###Code # flip a coin until it shows up heads still_flipping = True while still_flipping: flip = randint(0,1) if flip == 0: flip = "Heads" else: flip = "Tails" print(flip, end=", ") if flip == "Heads": still_flipping = False ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `my_name` that contains your name. ###Code my_name = "Ellie" print(my_name) ###Output Ellie ###Markdown Create a variable `name_length` that holds how many letters are in your name. Print the number of letters. ###Code name_length = 5 print(name_length) ###Output 5 ###Markdown Print out your name with the uppercase letters made lowercase, and the lowercase letters made uppercase. **Hint:** look for a [string method](https://docs.python.org/3/library/stdtypes.htmlstring-methods) that will modify the _case_ of the string.- Try to do this without creating a separate variable! ###Code flipped_letters = str.swapcase(my_name) print(flipped_letters) ###Output eLLIE ###Markdown Pick two of your favorite numbers (between 1 and 100) and assign them to `favorite_1` and `favorite_2` ###Code favorite_1 = 3 favorite_2 = 7 print(favorite_1, favorite_2) ###Output 3 7 ###Markdown Divide each number by the length of your name raised to the power of `.598` (use the built-in `pow()` function for practice), and save it in the same variable. ###Code same_variable = favorite_1/(pow(name_length, .598)), favorite_2/(pow(name_length, .598)) same_variable ###Output _____no_output_____ ###Markdown Create a variable `raw_sum` that is the sum of those two variables. Note you _cannot_ use the `sum()` function for this, so just use a normal operator! ###Code raw_sum = favorite_1/(pow(name_length, .598))+ favorite_2/(pow(name_length, .598)) raw_sum ###Output _____no_output_____ ###Markdown Create a variable `round_sum` that is the `raw_sum` rounded to 1 decimal place. Use the `round()` function. ###Code round_sum = round(raw_sum, 1) round_sum ###Output _____no_output_____ ###Markdown Create two new variables `rounded_1` and `rounded_2` that are your `favorite_1` and `favorite_2` variables rounded to 1 decimal place. Print them out on a single line (hint: pass them as two different arguments). ###Code rounded_1 = round(favorite_1, 1) rounded_2 = round(favorite_2, 1) rounded_1, rounded_2 ###Output _____no_output_____ ###Markdown Create a variable `sum_round` that is the sum of the rounded values (use a normal math operator). ###Code sum_round = rounded_1 + rounded_2 sum_round ###Output _____no_output_____ ###Markdown Which is bigger, `round_sum` or `sum_round`? (You can use the `max()` function!) ###Code max(round_sum, sum_round) ###Output _____no_output_____ ###Markdown Create a variable `fruits` that contains the string `"apples and bananas"` ###Code fruits = "apples and bananas" fruits ###Output _____no_output_____ ###Markdown Use the `replace()` function to substitute all the "a"s in `fruits` with "ee". Store the result in a variable called `fruits_e`. ###Code fruits_e = fruits.replace("a", "ee") fruits_e ###Output _____no_output_____ ###Markdown Use the `replace()` function to substitute all the "a"s in `fruits` with "o". Store the result in a variable called `fruits_o`. ###Code fruits_o = fruits.replace("a", "o") fruits_o ###Output _____no_output_____ ###Markdown Print out the string "I like to eat " followed by each of `fruits`, `fruits_e` and `fruits_o` (three sentences). ###Code print("I like to eat " + fruits + " I like to eat " + fruits_e + " I like to eat " + fruits_o) ###Output I like to eat apples and bananas I like to eat eepples eend beeneenees I like to eat opples ond bononos ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `food` that stores your favorite kind of food. Print or output the variable. ###Code food = "pizza" print(food) ###Output pizza ###Markdown Create a variable `restaurant` that stores your favorite place to eat that kind of food. ###Code restaurant = "Pagliacci's" ###Output _____no_output_____ ###Markdown Print the message `"I'm going to RESTAURANT for some FOOD"`, replacing the restaurant and food with your variables. ###Code print("I'm going to " + restaurant + " for some " + food) ###Output I'm going to Pagliacci's for some pizza ###Markdown Create a variable `num_friends` equal to the number of friends you would like to eat with. ###Code num_friends = 3 ###Output _____no_output_____ ###Markdown Print a message `"I'm going with X friends"`, replacing the X with the number of friends. ###Code print("I'm going with " + str(num_friends) + " friends") ###Output I'm going with 3 friends ###Markdown Create a variable `meal_price`, which is how expensive you think one meal at the restaurant would be. This price should be a `float`. ###Code meal_price = 25.00 ###Output _____no_output_____ ###Markdown Update (re-assign) the `meal_price` variable so it includes a 15% tip&mdash;that is, so the price is 15% higher. Output the variable. ###Code meal_price = round(meal_price * 1.15, 2) print(meal_price) ###Output 28.75 ###Markdown Create a variable `total_cost` that has the total estimated cost of the bill for you and all of your friends. Output or print the variable ###Code total_cost = meal_price * (num_friends+1) print(total_cost) ###Output 115.0 ###Markdown Create a variable `budget` representing your spending budget for a night out. ###Code budget = 200.00 ###Output _____no_output_____ ###Markdown Create a variable `max_friends`, which is the maximum number of friends you can invite, at the estimated meal price, while staying within your budget. Output or print this value.- Be carefully that you only invite whole people! ###Code max_friends = int((budget//meal_price)-1) #double slash rounds down to nearest whole print(max_friends) ###Output 5 ###Markdown Bonus: Create a variable `chorus` that is the string `"FOOD time!"` _repeated_ once for each of the friends you are able to bring. _Hint_ use the **`*`** operator. Print out the variable. ###Code chorus = (food +" time!\n") * max_friends print(chorus) ###Output pizza time! pizza time! pizza time! pizza time! pizza time! pizza time! ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `num` that contains a random number between 1 and 20 (use the [random.randint()](https://docs.python.org/3/library/random.htmlrandom.randint) function). Print the number. ###Code import random num = random.randint(1, 20) print(num) ###Output 16 ###Markdown Write a boolean expression that represents whether or not `num` is equal to 12. ###Code num==12 ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether `num` is either less than 5 or greater than 15. (Note that in programming, we always assume "strictly greater/less than" unless otherwis specified). ###Code 5 < num < 15 ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether `num` is between 3 and 18 (inclusive), but is _not_ equal to 11. (You can temporarily assign a non-random value to `num` to test this). ###Code 3 < num < 18 and num != 11 ###Output _____no_output_____ ###Markdown _Re-run the above cells multiple times to confirm that the logic holds for different random numbers!_ Write a boolean expression that represents whether `5.1 - 2.5` is equal to `2.6`.- What does this tell you about the precision of _floating point_ (decimal) operations in Python? _Tip:_ try printing the value of `5.1-2.6`. (See also: https://en.wikipedia.org/wiki/Floating-point_arithmetic) - How might you ensure that this equality is correct? ###Code is_equal = (5.1-2.5) == 2.6 print(is_equal) is_equal = round(5.1-2.5, 1) == 2.6 print(is_equal) ###Output False True ###Markdown Define a function `de_morgan()` that takes in two boolean values (`P` and `Q`) and _returns_ whether the [De Morgan laws](https://en.wikipedia.org/wiki/De_Morgan%27s_laws) hold for those two values. That is:> the negation of P **and** Q is the same as the negation of P **or** the negation of Q> and> the negation of P **or** Q is the same as the negation of P **and** the negation of QYour function should return a boolean representing whether or not _both_ of these statements are true for the given `P` and `Q`. _Use parentheses to enforce order of operations!_ ###Code def de_morgan(P, Q): """Takes two boolean values and tests the De Morgan laws""" validity = ((not (P and Q)) == (not P or not Q)) and ((not (P or Q)) == (not P and not Q)) return validity ###Output _____no_output_____ ###Markdown Call your `de_morgan()` function for each possible value of `P` and `Q` (there are **4** possible combinations), printing the result, in order to demonstrate that these laws hold. ###Code one = de_morgan(True, True) two = de_morgan(True, False) three = de_morgan(False, False) four = de_morgan(False, True) print(one, two, three, four) ###Output True True True True ###Markdown Use the built-in [input()](https://docs.python.org/3/library/functions.htmlinput) function to prompt the user for their favorite word.- This function will wait for the user to provide an input before it finished executing, and will then _return_ a string of whatever the user typed in. ###Code user_word = input("What is your favorite word? ") ###Output What is your favorite word? wow ###Markdown Write a boolean expression that represents if the inputted word has more than 8 letters. ###Code len(user_word) > 8 ###Output _____no_output_____ ###Markdown Use the `islower()` string method to check if the inputted word was entered with all lower-case letters. ###Code user_word.islower() ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether or not the word _starts with_ the letter `i` or _ends with_ the letter `n`. Use appropriate string methods. _Hint_: convert the word to a specific case (upper or lower) for the comparison. ###Code user_word = user_word.lower() user_word[0] == 'i' or user_word[-1] == 'n' ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Write a **`while`** loop that prints out every 5th number (multiples of 5) from 0 to 100 (inclusive).- _Tip:_ use an **`end=','`** keyword argument to the `print()` function to print all the numbers on the same line. ###Code my_num = 0 while my_num <= 100: print(my_num, end = ',') my_num = my_num + 5 ###Output 0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100, ###Markdown Use a **`while`** loop to print out the first 15 [Triangular numbers](https://en.wikipedia.org/wiki/Triangular_number). This is a sequence of numbers for which the _nth_ value is the sum of the numbers from 0 to _n_. **Do this only using addition!**- _Hint:_ use an additional variable to keep track of the `total` value, and have that value increase by the number of times you've been through the loop each iteration! ###Code total = 0 count = 0 while count <= 15: total = total + count print(total, end = ',') count = count + 1 ###Output 0,1,3,6,10,15,21,28,36,45,55,66,78,91,105,120, ###Markdown _Challenge_ Use a **`while`** loop to print out 20 numbers, each of which is larger than the previous by the the _sum_ of the **two** previous numbers (the [Fibonacci sequence](https://en.wikipedia.org/wiki/Fibonacci_number).- _Hint_: you'll need to keep track of the two previous values (start them at 0 and 1), and then "update" them each time through the loop, storing the "new total" in the first previous variable, and the first previous variable in the second (be careful about the ordering of this!) ###Code count = 0 fib1 = 0 fib2 = 1 while count <= 20: fib = fib1 + fib2 #calc total fib2 = fib1 #get lower number from total fib1 = fib #updating new variable to be the total count = count + 1 print(fib, end = ",") ###Output 1,1,2,3,5,8,13,21,34,55,89,144,233,377,610,987,1597,2584,4181,6765,10946, ###Markdown Use a **`while`** loop to print out a sequence of random numbers from 0 to 10, stopping after the number `4` is printed for the first time. You will need to import the `random` module. ###Code import random cur = None while cur != 4: cur = random.randint(0, 10) print(cur, end = ",") ###Output 9,5,7,9,1,9,8,5,5,0,7,6,0,4, ###Markdown Modify the below "coin flipping" example from the course text so that it keeps flipping coins until you get two "heads" in a row. ###Code # flip a coin until it shows up heads still_flipping = True last_flip = None while still_flipping: flip = random.randint(0,1) if flip == 0: flip = "H" else: flip = "T" #print("Flip: ", flip, "Last flip:", last_flip, end="\n ") print(flip, last_flip, sep = "") if flip == "H" and last_flip == "H": still_flipping = False last_flip = flip ###Output TNone TT TT TT HT HH ###Markdown Define a function **`input_number()`** that takes a minimum and maximum value as arguments. This function should prompt the user to input a number within the range, repeating the prompt if the provided value is not acceptable. Once an acceptable value has been provided, the function should return that number. You can assume that the user-entered input will always be numeric.Be sure and call your function and print its results to test it! ###Code def input_number(lo, hi): ok = False while not(ok): user_input = int(input("Enter a number in the range:"+str(lo)+" - "+str(hi))) if(lo <= user_input <= hi): print("\nok") ok = True else: print("no no try again") return user_input input_number(lo = 10, hi = 20) ###Output Enter a number in the range:10 - 2015 ok ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `food` that stores your favorite kind of food. Print or output the variable. ###Code food = "pasta" print(food) ###Output pasta ###Markdown Create a variable `restaurant` that stores your favorite place to eat that kind of food. ###Code restaurant = "ziti pasta" ###Output _____no_output_____ ###Markdown Print the message `"I'm going to RESTAURANT for some FOOD"`, replacing the restaurant and food with your variables. ###Code print("I'm going to " + restaurant + " for some " + food + ".") ###Output I'm going to ziti pasta for some pasta. ###Markdown Create a variable `num_friends` equal to the number of friends you would like to eat with. ###Code num_friends = 3 ###Output _____no_output_____ ###Markdown Print a message `"I'm going with X friends"`, replacing the X with the number of friends. ###Code print("I'm going with " + str(num_friends) + " friends.") ###Output I'm going with 3 friends. ###Markdown Create a variable `meal_price`, which is how expensive you think one meal at the restaurant would be. This price should be a `float`. ###Code meal_price = 12.2 meal_tip = meal_price * .15 print(meal_tip) ###Output 1.8299999999999998 ###Markdown Update (re-assign) the `meal_price` variable so it includes a 15% tip&mdash;that is, so the price is 15% higher. Output the variable. ###Code meal_price = meal_tip + meal_price print(meal_price) ###Output 14.03 ###Markdown Create a variable `total_cost` that has the total estimated cost of the bill for you and all of your friends. Output or print the variable ###Code total_cost = meal_price * num_friends print(total_cost) ###Output 42.089999999999996 ###Markdown Create a variable `budget` representing your spending budget for a night out. ###Code budget = 75 ###Output _____no_output_____ ###Markdown Create a variable `max_friends`, which is the maximum number of friends you can invite, at the estimated meal price, while staying within your budget. Output or print this value.- Be carefully that you only invite whole people! ###Code max_friends = budget // meal_price print(max_friends) ###Output 5.0 ###Markdown Bonus: Create a variable `chorus` that is the string `"FOOD time!"` _repeated_ once for each of the friends you are able to bring. _Hint_ use the **`*`** operator. Print out the variable. ###Code chorus = "FOOD time! " * int(max_friends) print(chorus) ###Output FOOD time! FOOD time! FOOD time! FOOD time! FOOD time! ###Markdown Modify the above cell so that each `"FOOD time!"` is on a separate line (_hint_: use a newline character!), then rerun the cell. ###Code chorus = "FOOD time!\n" * int(max_friends) print(chorus) ###Output FOOD time! FOOD time! FOOD time! FOOD time! FOOD time! ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `num` that contains a random number between 1 and 20 (use the [random.randint()](https://docs.python.org/3/library/random.htmlrandom.randint) function). Print the number. ###Code import random num = random.randint(1,20) ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether or not `num` is equal to 12. ###Code num == 12 ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether `num` is either less than 5 or greater than 15. (Note that in programming, we always assume "strictly greater/less than" unless otherwis specified). ###Code num < 5 or num > 15 ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether `num` is between 3 and 18 (inclusive), but is _not_ equal to 11. (You can temporarily assign a non-random value to `num` to test this). ###Code num >= 3 and num <= 18 and num != 11 ###Output _____no_output_____ ###Markdown _Re-run the above cells multiple times to confirm that the logic holds for different random numbers!_ Write a boolean expression that represents whether `5.1 - 2.5` is equal to `2.6`.- What does this tell you about the precision of _floating point_ (decimal) operations in Python? _Tip:_ try printing the value of `5.1-2.6`. (See also: https://en.wikipedia.org/wiki/Floating-point_arithmetic) - How might you ensure that this equality is correct? ###Code round(5.1-2.5, 1) == 2.6 ###Output _____no_output_____ ###Markdown Define a function `de_morgan()` that takes in two boolean values (`P` and `Q`) and _returns_ whether the [De Morgan laws](https://en.wikipedia.org/wiki/De_Morgan%27s_laws) hold for those two values. That is:> the negation of P **and** Q is the same as the negation of P **or** the negation of Q> and> the negation of P **or** Q is the same as the negation of P **and** the negation of QYour function should return a boolean representing whether or not _both_ of these statements are true for the given `P` and `Q`. _Use parentheses to enforce order of operations!_ ###Code def de_morgan(P, Q): P and Q ###Output _____no_output_____ ###Markdown Call your `de_morgan()` function for each possible value of `P` and `Q` (there are **4** possible combinations), printing the result, in order to demonstrate that these laws hold. Use the built-in [input()](https://docs.python.org/3/library/functions.htmlinput) function to prompt the user for their favorite word.- This function will wait for the user to provide an input before it finished executing, and will then _return_ a string of whatever the user typed in. ###Code firstname = input("What is your first name? ") ###Output What is your first name? Chelsea ###Markdown Write a boolean expression that represents if the inputted word has more than 8 letters. ###Code len(firstname) > 8 ###Output _____no_output_____ ###Markdown Use the `islower()` string method to check if the inputted word was entered with all lower-case letters. ###Code firstname.islower() ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether or not the word _starts with_ the letter `i` or _ends with_ the letter `n`. Use appropriate string methods. _Hint_: convert the word to a specific case (upper or lower) for the comparison. ###Code print(firstname.startswith('C') or firstname.endswith('a')) ###Output True ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `my_name` that contains your name. ###Code import math my_name = "J Rea" ###Output _____no_output_____ ###Markdown Create a variable `name_length` that holds how many letters are in your name. Print the number of letters. ###Code name_length = len(my_name) print(name_length) ###Output 5 ###Markdown Print out your name with the uppercase letters made lowercase, and the lowercase letters made uppercase. **Hint:** look for a [string method](https://docs.python.org/3/library/stdtypes.htmlstring-methods) that will modify the _case_ of the string.- Try to do this without creating a separate variable! ###Code my_name.swapcase() ###Output _____no_output_____ ###Markdown Pick two of your favorite numbers (between 1 and 100) and assign them to `favorite_1` and `favorite_2` ###Code favorite_1 = 9 favorite_2 = 10 ###Output _____no_output_____ ###Markdown Divide each number by the length of your name raised to the power of `.598` (use the built-in `pow()` function for practice), and save it in the same variable. ###Code new_favorite_1 = pow(favorite_1/name_length, .598) print(new_favorite_1) new_favorite_2 = pow(favorite_2/name_length, .598) print (new_favorite_2) ###Output 1.4211926672618127 1.5136167929695792 ###Markdown Create a variable `raw_sum` that is the sum of those two variables. Note you _cannot_ use the `sum()` function for this, so just use a normal operator! ###Code raw_sum = (new_favorite_1 + new_favorite_2) print(raw_sum) ###Output 2.934809460231392 ###Markdown Create a variable `round_sum` that is the `raw_sum` rounded to 1 decimal place. Use the `round()` function. ###Code round_sum = round(raw_sum, 1) print(round_sum) ###Output 2.9 ###Markdown Create two new variables `rounded_1` and `rounded_2` that are your `favorite_1` and `favorite_2` variables rounded to 1 decimal place. Print them out on a single line (hint: pass them as two different arguments). ###Code rounded_1 = round(new_favorite_1, 1) rounded_2 = round(new_favorite_2, 2) print(rounded_1, rounded_2, sep=", ") ###Output 1.4, 1.51 ###Markdown Create a variable `sum_round` that is the sum of the rounded values (use a normal math operator). ###Code sum_round = rounded_1 + rounded_2 print(sum_round) ###Output 2.91 ###Markdown Which is bigger, `round_sum` or `sum_round`? (You can use the `max()` function!) ###Code max(round_sum, sum_round) ###Output _____no_output_____ ###Markdown Create a variable `fruits` that contains the string `"apples and bananas"` ###Code fruits = "apples and bananas" ###Output _____no_output_____ ###Markdown Use the `replace()` function to substitute all the "a"s in `fruits` with "ee". Store the result in a variable called `fruits_e`. ###Code fruits_e = fruits.replace("a", "ee") ###Output _____no_output_____ ###Markdown Use the `replace()` function to substitute all the "a"s in `fruits` with "o". Store the result in a variable called `fruits_o`. ###Code fruits_o = fruits.replace("a", "o") ###Output _____no_output_____ ###Markdown Print out the string "I like to eat " followed by each of `fruits`, `fruits_e` and `fruits_o` (three sentences). ###Code print("I like to eat", fruits, sep=" ") print("I like to eat", fruits_e, sep= "") print("I like to eat", fruits_o, sep= "") ###Output I like to eat apples and bananas I like to eateepples eend beeneenees I like to eatopples ond bononos ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `food` that stores your favorite kind of food. Print or output the variable. ###Code food = "crab noodle" print (food) ###Output crab noodle ###Markdown Create a variable `restaurant` that stores your favorite place to eat that kind of food. ###Code restaurant = "Revel" ###Output _____no_output_____ ###Markdown Print the message `"I'm going to RESTAURANT for some FOOD"`, replacing the restaurant and food with your variables. ###Code print ("I'm going to " + restaurant + " for some " + food) ###Output I'm going to Revel for some crab noodle ###Markdown Create a variable `num_friends` equal to the number of friends you would like to eat with. ###Code num_friends = 4 ###Output _____no_output_____ ###Markdown Print a message `"I'm going with X friends"`, replacing the X with the number of friends. ###Code print ("I'm going with " + str(num_friends) + " friends") ###Output I'm going with 4 friends ###Markdown Create a variable `meal_price`, which is how expensive you think one meal at the restaurant would be. This price should be a `float`. ###Code meal_price = 17.00 ###Output _____no_output_____ ###Markdown Update (re-assign) the `meal_price` variable so it includes a 15% tip&mdash;that is, so the price is 15% higher. Output the variable. ###Code meal_price = 17.00*0.85 print(meal_price) ###Output 14.45 ###Markdown Create a variable `total_cost` that has the total estimated cost of the bill for you and all of your friends. Output or print the variable ###Code total_cost = meal_price*float(num_friends) print(total_cost) ###Output 57.8 ###Markdown Create a variable `budget` representing your spending budget for a night out. ###Code budget = 50.00 ###Output _____no_output_____ ###Markdown Create a variable `max_friends`, which is the maximum number of friends you can invite, at the estimated meal price, while staying within your budget. Output or print this value.- Be carefully that you only invite whole people! ###Code max_friends = round(50.00/17.00) print (max_friends) ###Output 3 ###Markdown Bonus: Create a variable `chorus` that is the string `"FOOD time!"` _repeated_ once for each of the friends you are able to bring. _Hint_ use the **`*`** operator. Print out the variable. ###Code chorus = print ((food + " time! ")*max_friends) ###Output crab noodle time! crab noodle time! crab noodle time! ###Markdown Modify the above cell so that each `"FOOD time!"` is on a separate line (_hint_: use a newline character!), then rerun the cell. ###Code chorus = print ((food.upper() + " time! \n")*max_friends) ###Output CRAB NOODLE time! CRAB NOODLE time! CRAB NOODLE time! ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Write a **`while`** loop that prints out every 5th number (multiples of 5) from 0 to 100 (inclusive).- _Tip:_ use an **`end=','`** keyword argument to the `print()` function to print all the numbers on the same line. ###Code numbers = 0 while numbers <=100: print(numbers, end = ', ') numbers +=5 ###Output _____no_output_____ ###Markdown Use a **`while`** loop to print out the first 15 [Triangular numbers](https://en.wikipedia.org/wiki/Triangular_number). This is a sequence of numbers for which the _nth_ value is the sum of the numbers from 0 to _n_. **Do this only using addition!**- _Hint:_ use an additional variable to keep track of the `total` value, and have that value increase by the number of times you've been through the loop each iteration! ###Code nums = 0 total = 0 while nums <=15: total = total + nums print(total, end = ', ') nums +=1 ###Output _____no_output_____ ###Markdown _Challenge_ Use a **`while`** loop to print out 20 numbers, each of which is larger than the previous by the the _sum_ of the **two** previous numbers (the [Fibonacci sequence](https://en.wikipedia.org/wiki/Fibonacci_number).- _Hint_: you'll need to keep track of the two previous values (start them at 0 and 1), and then "update" them each time through the loop, storing the "new total" in the first previous variable, and the first previous variable in the second (be careful about the ordering of this!) Use a **`while`** loop to print out a sequence of random numbers from 0 to 10, stopping after the number `4` is printed for the first time. You will need to import the `random` module. Modify the below "coin flipping" example from the course text so that it keeps flipping coins until you get two "heads" in a row. ###Code # flip a coin until it shows up heads still_flipping = True while still_flipping: flip = randint(0,1) if flip == 0: flip = "Heads" else: flip = "Tails" print(flip, end=", ") if flip == "Heads": still_flipping = False ###Output _____no_output_____ ###Markdown Define a function **`input_number()`** that takes a minimum and maximum value as arguments. This function should prompt the user to input a number within the range, repeating the prompt if the provided value is not acceptable. Once an acceptable value has been provided, the function should return that number. You can assume that the user-entered input will always be numeric.Be sure and call your function and print its results to test it! ###Code def input_number(min, max): valid = False while not valid: number = int(input("Pick a number between " +str(min)+ " and " +str(max)+ ": " )) if (min<=number<=max): valid = True print("Great choice") else: print("That's going to be a no from me") return number input_number(1,20) ###Output Pick a number between 1 and 20: 30 That's going to be a no from me ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `my_name` that contains your name. ###Code my_name = "Jori Grant" ###Output _____no_output_____ ###Markdown Create a variable `name_length` that holds how many letters are in your name. Print the number of letters. ###Code name_length = len(my_name) print(name_length) ###Output 10 ###Markdown Print out your name with the uppercase letters made lowercase, and the lowercase letters made uppercase. **Hint:** look for a [string method](https://docs.python.org/3/library/stdtypes.htmlstring-methods) that will modify the _case_ of the string.- Try to do this without creating a separate variable! ###Code print(my_name.swapcase()) ###Output jORI gRANT ###Markdown Pick two of your favorite numbers (between 1 and 100) and assign them to `favorite_1` and `favorite_2` ###Code favorite_1 = 7 favorite_2 = 40 ###Output _____no_output_____ ###Markdown Divide each number by the length of your name raised to the power of `.598` (use the built-in `pow()` function for practice), and save it in the same variable. ###Code favorite_1 = favorite_1 / (pow(name_length, .598)) favorite_2 = favorite_2 / (pow(name_length, .598)) ###Output _____no_output_____ ###Markdown Create a variable `raw_sum` that is the sum of those two variables. Note you _cannot_ use the `sum()` function for this, so just use a normal operator! ###Code raw_sum = favorite_1 + favorite_2 ###Output _____no_output_____ ###Markdown Create a variable `round_sum` that is the `raw_sum` rounded to 1 decimal place. Use the `round()` function. ###Code round_sum = round(raw_sum, 1) print(round_sum) ###Output 11.9 ###Markdown Create two new variables `rounded_1` and `rounded_2` that are your `favorite_1` and `favorite_2` variables rounded to 1 decimal place. Print them out on a single line (hint: pass them as two different arguments). ###Code rounded_1 = round(favorite_1, 1) rounded_2 = round(favorite_2, 1) print(rounded_1, rounded_2) ###Output 1.8 10.1 ###Markdown Create a variable `sum_round` that is the sum of the rounded values (use a normal math operator). ###Code sum_round = rounded_1 + rounded_2 print(sum_round) ###Output 11.9 ###Markdown Which is bigger, `round_sum` or `sum_round`? (You can use the `max()` function!) ###Code max(round_sum, sum_round) ###Output _____no_output_____ ###Markdown Create a variable `fruits` that contains the string `"apples and bananas"` ###Code fruits = "apples and bananas" ###Output _____no_output_____ ###Markdown Use the `replace()` function to substitute all the "a"s in `fruits` with "ee". Store the result in a variable called `fruits_e`. ###Code fruits_e = fruits.replace("a", "ee") print(fruits_e) ###Output eepples eend beeneenees ###Markdown Use the `replace()` function to substitute all the "a"s in `fruits` with "o". Store the result in a variable called `fruits_o`. ###Code fruits_o = fruits.replace("a", "o") print(fruits_o) ###Output opples ond bononos ###Markdown Print out the string "I like to eat " followed by each of `fruits`, `fruits_e` and `fruits_o` (three sentences). ###Code print("I like to eat", fruits) print("I like to eat", fruits_e) print("I like to eat", fruits_o) ###Output I like to eat apples and bananas I like to eat eepples eend beeneenees I like to eat opples ond bononos ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `my_name` that contains your name. ###Code my_name = "Elizabeth Claire Giacobbi" print (my_name) ###Output Elizabeth Claire Giacobbi ###Markdown Create a variable `name_length` that holds how many letters are in your name. Print the number of letters. ###Code name_length =len(my_name) print(name_length) ###Output 25 ###Markdown Print out your name with the uppercase letters made lowercase, and the lowercase letters made uppercase. **Hint:** look for a [string method](https://docs.python.org/3/library/stdtypes.htmlstring-methods) that will modify the _case_ of the string.- Try to do this without creating a separate variable! Pick two of your favorite numbers (between 1 and 100) and assign them to `favorite_1` and `favorite_2` ###Code favorite_1 = 13 favorite_2 = 69 print (favorite_1, favorite_2) ###Output 13 69 ###Markdown Divide each number by the length of your name raised to the power of `.598` (use the built-in `pow()` function for practice), and save it in the same variable. ###Code favorite_1 = favorite_1/(pow(name_length, 0.598)) favorite_2 = favorite_2/(pow(name_length, 0.598)) print (favorite_1, favorite_2) ###Output 1.8965977330387362 10.066557198436369 ###Markdown Create a variable `raw_sum` that is the sum of those two variables. Note you _cannot_ use the `sum()` function for this, so just use a normal operator! ###Code raw_sum = favorite_1 + favorite_2 print(raw_sum) ###Output 11.963154931475106 ###Markdown Create a variable `round_sum` that is the `raw_sum` rounded to 1 decimal place. Use the `round()` function. ###Code round_sum = round(raw_sum) print (round_sum) ###Output 12 ###Markdown Create two new variables `rounded_1` and `rounded_2` that are your `favorite_1` and `favorite_2` variables rounded to 1 decimal place. Print them out on a single line (hint: pass them as two different arguments). ###Code rounded_1 = round(favorite_1) rounded_2 = round(favorite_2) print(rounded_1, rounded_2) ###Output 2 10 ###Markdown Create a variable `sum_round` that is the sum of the rounded values (use a normal math operator). ###Code sum_round = rounded_1 + rounded_2 print (sum_round) ###Output 12 ###Markdown Which is bigger, `round_sum` or `sum_round`? (You can use the `max()` function!) ###Code print (max(round_sum, sum_round)) ###Output 12 ###Markdown Create a variable `fruits` that contains the string `"apples and bananas"` ###Code fruits = "apples and bananas" print (fruits) ###Output apples and bananas ###Markdown Use the `replace()` function to substitute all the "a"s in `fruits` with "ee". Store the result in a variable called `fruits_e`. ###Code fruits_e = fruits.replace ("a", "ee") print(fruits_e) ###Output eepples eend beeneenees ###Markdown Use the `replace()` function to substitute all the "a"s in `fruits` with "o". Store the result in a variable called `fruits_o`. ###Code fruits_o = fruits.replace("a", "o") print(fruits_o) ###Output opples ond bononos ###Markdown Print out the string "I like to eat " followed by each of `fruits`, `fruits_e` and `fruits_o` (three sentences). ###Code print ("I like to eat " + fruits) print ("I like to eat " + fruits_e) print ("I like to eat " + fruits_o) ###Output I like to eat apples and bananas I like to eat eepples eend beeneenees I like to eat opples ond bononos ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `num` that contains a random number between 1 and 20 (use the [random.randint()](https://docs.python.org/3/library/random.htmlrandom.randint) function). Print the number. ###Code import random as random num = random.randint(1, 20) print(num) ###Output 9 ###Markdown Write a boolean expression that represents whether or not `num` is equal to 12. ###Code num == 12 ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether `num` is either less than 5 or greater than 15. (Note that in programming, we always assume "strictly greater/less than" unless otherwis specified). ###Code num < 5 or num > 15 ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether `num` is between 3 and 18 (inclusive), but is _not_ equal to 11. (You can temporarily assign a non-random value to `num` to test this). ###Code num >= 3 and num < 18 and not 11 ###Output _____no_output_____ ###Markdown _Re-run the above cells multiple times to confirm that the logic holds for different random numbers!_ Write a boolean expression that represents whether `5.1 - 2.5` is equal to `2.6`.- What does this tell you about the precision of _floating point_ (decimal) operations in Python? _Tip:_ try printing the value of `5.1-2.6`. (See also: https://en.wikipedia.org/wiki/Floating-point_arithmetic) - How might you ensure that this equality is correct? ###Code round(5.1 - 2.5, 1) == 2.6 ###Output _____no_output_____ ###Markdown Define a function `de_morgan()` that takes in two boolean values (`P` and `Q`) and _returns_ whether the [De Morgan laws](https://en.wikipedia.org/wiki/De_Morgan%27s_laws) hold for those two values. That is:> the negation of P **and** Q is the same as the negation of P **or** the negation of Q> and> the negation of P **or** Q is the same as the negation of P **and** the negation of QYour function should return a boolean representing whether or not _both_ of these statements are true for the given `P` and `Q`. _Use parentheses to enforce order of operations!_ Call your `de_morgan()` function for each possible value of `P` and `Q` (there are **4** possible combinations), printing the result, in order to demonstrate that these laws hold. Use the built-in [input()](https://docs.python.org/3/library/functions.htmlinput) function to prompt the user for their favorite word.- This function will wait for the user to provide an input before it finished executing, and will then _return_ a string of whatever the user typed in. ###Code your_fav = input("What is your favorite word?") ###Output What is your favorite word?Stoops ###Markdown Write a boolean expression that represents if the inputted word has more than 8 letters. ###Code len(your_fav) > 8 ###Output _____no_output_____ ###Markdown Use the `islower()` string method to check if the inputted word was entered with all lower-case letters. ###Code your_fav.islower() ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether or not the word _starts with_ the letter `i` or _ends with_ the letter `n`. Use appropriate string methods. _Hint_: convert the word to a specific case (upper or lower) for the comparison. ###Code str = your_fav print(str.startswith('i') or str.endswith('n')) ###Output False ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable **`deck`** that represents a deck of [playing cards](https://en.wikipedia.org/wiki/Playing_card). This variable should be a _list_ of 52 elements, each of which is a dictionary with the following keys:- `suit`, with a string value that is either `'hearts'`, `'diamonds'`, `'clubs'`, or `'spades'`.- `rank`, with an interger value ranging from 2 to 14 inclusive (11-14 represent a Jack, Queen, King, or Ace respectively)._Tip:_ use a pair of nested loops to add each combination of suit and rank to the `deck` list! ###Code deck = [] suit = ['hearts', 'daimonds', 'clubs', 'spades'] for suit in ['hearts', 'daimonds', 'clubs', 'spades']: for rank in range (2, 15): cards = {"suit": suit, "rank": rank} deck.append(cards) deck ###Output _____no_output_____ ###Markdown Create a list **hand** that represents a hand of [Poker](https://en.wikipedia.org/wiki/Poker) (5-card draw). Add 5 cards from the `deck` to this list.- You can add 5 specific cards for testing (check their indices!), or use the `random.shuffle()` function to shuffle the deck and then _slice_ 5 cards from the top.Print out the `hand` so you know what you are dealing with! ###Code import random random.shuffle(hand) hand = deck[:5] hand ###Output _____no_output_____ ###Markdown Define a function **`contains_queen_of_hearts()`** that takes in a list of cards (e.g., a hand) and returns whether or not the Queen of Hearts is in that list. _For practice, don't use the `in` operator to check for containment_. ###Code def contains_queen_of_hearts(list_of_cards): for card in list_of_cards: if card =={"suit": 'hearts', "ramk": 12}: return True return False contains_queen_of_hearts(hand) ###Output _____no_output_____ ###Markdown Define a function **`get_high_card()`** that takes in a list of cards and returns the card (dictionary) of the highest value. The "high card" is the one with the highest rank. Cards of different suits but the same rank are considered to have the same value and either may be returned.- Hint: use a "king-of-the-hill" search! ###Code def get_high_cards(a_list): highest_card = a_;ist[0] for card in a_list: if card["rank"] > highest_card["rank"]: highest_card = card return highest_card print(hand) ###Output [] ###Markdown Define a function **`is_flush()`** that takes in a list of cards and returns whether or not the list represents a _flush_&mdash;that is, all of the cards have the same _suit_. ###Code def is_flush(a_list): target_suit = a_list[0]['suit'] is_flush = True for card in a_list: if card['suit'] != target_suit: return False return True is_flush(hand) ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `num` that contains a random number between 1 and 20 (use the [random.randint()](https://docs.python.org/3/library/random.htmlrandom.randint) function). Print the number. ###Code import random num = random.randint(1, 20) print(num) ###Output 10 ###Markdown Write a boolean expression that represents whether or not `num` is equal to 12. ###Code num != 12 ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether `num` is either less than 5 or greater than 15. (Note that in programming, we always assume "strictly greater/less than" unless otherwis specified). ###Code num < 5 or num > 15 ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether `num` is between 3 and 18 (inclusive), but is _not_ equal to 11. (You can temporarily assign a non-random value to `num` to test this). ###Code num > 3 or num < 18 and not num == 11 ###Output _____no_output_____ ###Markdown _Re-run the above cells multiple times to confirm that the logic holds for different random numbers!_ Write a boolean expression that represents whether `5.1 - 2.5` is equal to `2.6`.- What does this tell you about the precision of _floating point_ (decimal) operations in Python? _Tip:_ try printing the value of `5.1-2.6`. (See also: https://en.wikipedia.org/wiki/Floating-point_arithmetic) - How might you ensure that this equality is correct? ###Code x = 5.1 - 2.5 x == 2.6 ###Output _____no_output_____ ###Markdown Define a function `de_morgan()` that takes in two boolean values (`P` and `Q`) and _returns_ whether the [De Morgan laws](https://en.wikipedia.org/wiki/De_Morgan%27s_laws) hold for those two values. That is:> the negation of P **and** Q is the same as the negation of P **or** the negation of Q> and> the negation of P **or** Q is the same as the negation of P **and** the negation of QYour function should return a boolean representing whether or not _both_ of these statements are true for the given `P` and `Q`. _Use parentheses to enforce order of operations!_ ###Code def de_morgan(P, Q): result_1 = not(P and Q) == (not P) or (not Q) result_2 = not(P or Q) == (not P) and (not Q) return(bool(result_1 and result_2)) ###Output _____no_output_____ ###Markdown Call your `de_morgan()` function for each possible value of `P` and `Q` (there are **4** possible combinations), printing the result, in order to demonstrate that these laws hold. ###Code print(de_morgan(P, Q)) print(de_morgan(P, not Q)) print(de_morgan(not P, Q)) print(de_morgan(not P, not Q)) ###Output True False True False ###Markdown Use the built-in [input()](https://docs.python.org/3/library/functions.htmlinput) function to prompt the user for their favorite word.- This function will wait for the user to provide an input before it finished executing, and will then _return_ a string of whatever the user typed in. ###Code fav_word = input() ###Output bastille ###Markdown Write a boolean expression that represents if the inputted word has more than 8 letters. ###Code len(fav_word) < 8 ###Output _____no_output_____ ###Markdown Use the `islower()` string method to check if the inputted word was entered with all lower-case letters. ###Code fav_word.islower() ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether or not the word _starts with_ the letter `i` or _ends with_ the letter `n`. Use appropriate string methods. _Hint_: convert the word to a specific case (upper or lower) for the comparison. ###Code fav_word.startswith("i") or fav_word.endswith("n") ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `food` that stores your favorite kind of food. Print or output the variable. ###Code food = "pizza" ###Output _____no_output_____ ###Markdown Create a variable `restaurant` that stores your favorite place to eat that kind of food. ###Code restaurant = "Delfinos pizza" ###Output _____no_output_____ ###Markdown Print the message `"I'm going to RESTAURANT for some FOOD"`, replacing the restaurant and food with your variables. ###Code print ("I'm going to " + restaurant + " for some " + food) ###Output I'm going to Delfinos pizza for some pizza ###Markdown Create a variable `num_friends` equal to the number of friends you would like to eat with. ###Code num_friends = 5 ###Output _____no_output_____ ###Markdown Print a message `"I'm going with X friends"`, replacing the X with the number of friends. ###Code print ("I'm going with " + str(num_friends) + " friends ") ###Output I'm going with 5 friends ###Markdown Create a variable `meal_price`, which is how expensive you think one meal at the restaurant would be. This price should be a `float`. ###Code meal_price = 35.90 ###Output _____no_output_____ ###Markdown Update (re-assign) the `meal_price` variable so it includes a 15% tip&mdash;that is, so the price is 15% higher. Output the variable. ###Code meal_price = meal_price * 1.15 ###Output _____no_output_____ ###Markdown Create a variable `total_cost` that has the total estimated cost of the bill for you and all of your friends. Output or print the variable ###Code total_cost = meal_price * num_friends ###Output _____no_output_____ ###Markdown Create a variable `budget` representing your spending budget for a night out. ###Code budget = 500 ###Output _____no_output_____ ###Markdown Create a variable `max_friends`, which is the maximum number of friends you can invite, at the estimated meal price, while staying within your budget. Output or print this value.- Be carefully that you only invite whole people! ###Code max_friends = int (budget/meal_price) ###Output _____no_output_____ ###Markdown Bonus: Create a variable `chorus` that is the string `"FOOD time!"` _repeated_ once for each of the friends you are able to bring. _Hint_ use the **`*`** operator. Print out the variable. ###Code print ("food time!\n " * 5) ###Output food time! food time! food time! food time! food time! ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `num` that contains a random number between 1 and 20 (use the [random.randint()](https://docs.python.org/3/library/random.htmlrandom.randint) function). Print the number. ###Code import random num=random.randint(1,20) ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether or not `num` is equal to 12. ###Code num == 12 ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether `num` is either less than 5 or greater than 15. (Note that in programming, we always assume "strictly greater/less than" unless otherwis specified). ###Code num <5 or num > 15 ###Output _____no_output_____ ###Markdown Write a boolean expression that represents whether `num` is between 3 and 18 (inclusive), but is _not_ equal to 11. (You can temporarily assign a non-random value to `num` to test this). ###Code num =11 (3 <=num <= 18) and num != 11 ###Output _____no_output_____ ###Markdown _Re-run the above cells multiple times to confirm that the logic holds for different random numbers!_ Write a boolean expression that represents whether `5.1 - 2.5` is equal to `2.6`.- What does this tell you about the precision of _floating point_ (decimal) operations in Python? _Tip:_ try printing the value of `5.1-2.6`. (See also: https://en.wikipedia.org/wiki/Floating-point_arithmetic) - How might you ensure that this equality is correct? ###Code 5.1-2.5 == 2.6 #Precision is more than one decimal point 5.1-2.5 #floating point operations are not precise #avoid floats (use ints) #round floating point values round(5.1-2.5,1)==round(2.6,1) ###Output _____no_output_____ ###Markdown Define a function `de_morgan()` that takes in two boolean values (`P` and `Q`) and _returns_ whether the [De Morgan laws](https://en.wikipedia.org/wiki/De_Morgan%27s_laws) hold for those two values. That is:> the negation of P **and** Q is the same as the negation of P **or** the negation of Q> and> the negation of P **or** Q is the same as the negation of P **and** the negation of QYour function should return a boolean representing whether or not _both_ of these statements are true for the given `P` and `Q`. _Use parentheses to enforce order of operations!_ ###Code def de_morgan( P,Q): result1 = (not (P and Q)) == ((not P) or (not Q)) result2 = (not(P or Q)) == ((not P) and (not Q)) return (result1 and result2) ###Output _____no_output_____ ###Markdown Call your `de_morgan()` function for each possible value of `P` and `Q` (there are **4** possible combinations), printing the result, in order to demonstrate that these laws hold. ###Code print(de_morgan(False,False)) print(de_morgan(False,True)) print(de_morgan(True,False)) print(de_morgan(True,True)) ###Output True True True True ###Markdown Use the built-in [input()](https://docs.python.org/3/library/functions.htmlinput) function to prompt the user for their favorite word.- This function will wait for the user to provide an input before it finished executing, and will then _return_ a string of whatever the user typed in. ###Code word = input("what's your favorite word ? ") ###Output what's your favorite word ? hello ###Markdown Write a boolean expression that represents if the inputted word has more than 8 letters. ###Code print(greater_than_8==len(word) > 8) ###Output False ###Markdown Use the `islower()` string method to check if the inputted word was entered with all lower-case letters. ###Code print(word.islower()) ###Output True ###Markdown Write a boolean expression that represents whether or not the word _starts with_ the letter `i` or _ends with_ the letter `n`. Use appropriate string methods. _Hint_: convert the word to a specific case (upper or lower) for the comparison. ###Code word.lower() print((word[0]=='i') or(word[-1]=='n')) word.startswith('i') or word.endswith('n') ###Output False ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a dictionary **`rectangle`** that has a key `'width'` with a value of `100`, a key `'height'` with a value of `50`, a key `'x'` with a value of `25`, and a key `'y'` with a value of `40`. ###Code rectangle = {'width': 100, 'height': 50, 'x': 25, 'y': 40} ###Output _____no_output_____ ###Markdown Print the width of the rectangle (from the dictionary) ###Code print(rectangle['width']) ###Output _____no_output_____ ###Markdown Print the x- and y- coordinates of the rectangle (its location). Your output should have the format `"X, Y"`. ###Code print(rectangle['x'], ', ', rectangle['y'], sep = '') ###Output _____no_output_____ ###Markdown Print the rectangle's area (based on the dictionary values). ###Code # area = length * width area = rectangle['width'] * rectangle['height'] print(area) ###Output _____no_output_____ ###Markdown "Move" the rectangle by increasing its x-coordinate by `20` and _decreasing_ its y-coordinate by `10`. Print out the rectangle to see its new location. ###Code rectangle['x'] = rectangle['x'] + 20 rectangle['y'] = rectangle['y'] - 10 print(rectangle) ###Output _____no_output_____ ###Markdown Add a new key `'color'` to the rectangle with a value equal to your favorite color (as a string is fine). Output the rectangle to confirm your change. ###Code rectangle['color'] = 'blue' print(rectangle) ###Output _____no_output_____ ###Markdown Create a dictionary **`circle`** that has a `radius` of 35 and a `center` that is a **tuple** representing an x-coordinate of `59` and a y-coordinate of `80`. Output the circle. ###Code circle = {'radius': 35, 'center': (59, 80)} circle ###Output _____no_output_____ ###Markdown What is the distance between the circle's center and the rectangle's location? _Hint:_ use the [distance formula](http://www.mathwarehouse.com/algebra/distance_formula/index.php); the [math.hypot()](https://docs.python.org/3/library/math.htmlmath.hypot) can help. ###Code import math ##Find distance b/w x's and y's, then plus into math.hypot() function x_dist = circle['center'][0] - rectangle['x'] y_dist = circle['center'][1] - rectangle['y'] math.hypot(x_dist, y_dist) ###Output _____no_output_____ ###Markdown Define a function **`get_area()`** that takes in a dictionary representing a shape (such as _either_ a circle or a rectangle like you defined above) and returns the _area_ of that shape. You will need to determine whether the shape is a rectangle or a circle (e.g., whether it has a radius or a width/height). Calculate rectange area as $width*height$ and circle area as $\pi * radius^2$. ###Code #import math def get_area(shape): """A function that takes as an input a dictionary representing a shape and returns the area for the shape""" ##determine if something is a circle, then calc area radius = shape.get('radius') if radius != None: #If the shape has a radius (i.e. is a circle) area = math.pi * (radius ** 2) else: area = rectangle['width'] * rectangle['height'] return area shape = circle print(get_area(shape)) ###Output _____no_output_____ ###Markdown Use your `get_area()` function and print the areas of both your `rectangle` and your `circle`. ###Code shape = circle print(get_area(shape)) shape = rectangle print(get_area(shape)) ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `my_name` that contains your name. ###Code my_name="Joe" ###Output _____no_output_____ ###Markdown Create a variable `name_length` that holds how many letters are in your name. Print the number of letters. ###Code name_length=len(my_name) print(name_length) ###Output 3 ###Markdown Print out your name with the uppercase letters made lowercase, and the lowercase letters made uppercase. **Hint:** look for a [string method](https://docs.python.org/3/library/stdtypes.htmlstring-methods) that will modify the _case_ of the string.- Try to do this without creating a separate variable! ###Code print((my_name).swapcase()) ###Output jOE ###Markdown Pick two of your favorite numbers (between 1 and 100) and assign them to `favorite_1` and `favorite_2` ###Code favorite_1=50 favorite_2=23 ###Output _____no_output_____ ###Markdown Divide each number by the length of your name raised to the power of `.598` (use the built-in `pow()` function for practice), and save it in the same variable. ###Code favorite_1=pow((favorite_1/name_length),.598) favorite_2=pow((favorite_2/name_length),.598) ###Output _____no_output_____ ###Markdown Create a variable `raw_sum` that is the sum of those two variables. Note you _cannot_ use the `sum()` function for this, so just use a normal operator! ###Code raw_sum=favorite_1+favorite_2 ###Output _____no_output_____ ###Markdown Create a variable `round_sum` that is the `raw_sum` rounded to 1 decimal place. Use the `round()` function. ###Code round_sum=round(raw_sum,1) ###Output _____no_output_____ ###Markdown Create two new variables `rounded_1` and `rounded_2` that are your `favorite_1` and `favorite_2` variables rounded to 1 decimal place. Print them out on a single line (hint: pass them as two different arguments). ###Code rounded_1=round(favorite_1,1) rounded_2=round(favorite_2,1) print(rounded_1,rounded_2) ###Output 1.4 3.4 ###Markdown Create a variable `sum_round` that is the sum of the rounded values (use a normal math operator). ###Code sum_round=rounded_1+rounded_2 ###Output _____no_output_____ ###Markdown Which is bigger, `round_sum` or `sum_round`? (You can use the `max()` function!) ###Code print(max(round_sum,sum_round)) ###Output 4.8 ###Markdown Create a variable `fruits` that contains the string `"apples and bananas"` ###Code fruits="apples and bananas" print(fruits) ###Output apples and bananas ###Markdown Use the `replace()` function to substitute all the "a"s in `fruits` with "ee". Store the result in a variable called `fruits_e`. ###Code fruits_e=fruits.replace("a","ee") ###Output _____no_output_____ ###Markdown Use the `replace()` function to substitute all the "a"s in `fruits` with "o". Store the result in a variable called `fruits_o`. ###Code fruits_o=fruits.replace("a","o") ###Output _____no_output_____ ###Markdown Print out the string "I like to eat " followed by each of `fruits`, `fruits_e` and `fruits_o` (three sentences). ###Code print("I like to eat ",fruits) print("I like to eat ",fruits_e) print("I like to eat ",fruits_o) empty ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Write a **`while`** loop that prints out every 5th number (multiples of 5) from 0 to 100 (inclusive).- _Tip:_ use an **`end=','`** keyword argument to the `print()` function to print all the numbers on the same line. ###Code numbers = 0 while numbers <= 100: print(numbers, end = ', ') numbers += 5 ###Output _____no_output_____ ###Markdown Use a **`while`** loop to print out the first 15 [Triangular numbers](https://en.wikipedia.org/wiki/Triangular_number). This is a sequence of numbers for which the _nth_ value is the sum of the numbers from 0 to _n_. **Do this only using addition!**- _Hint:_ use an additional variable to keep track of the `total` value, and have that value increase by the number of times you've been through the loop each iteration! ###Code nums = 0 total = 0 while nums <=15: total = total + nums print(total, end = ',') nums +=1 ###Output _____no_output_____ ###Markdown _Challenge_ Use a **`while`** loop to print out 20 numbers, each of which is larger than the previous by the the _sum_ of the **two** previous numbers (the [Fibonacci sequence](https://en.wikipedia.org/wiki/Fibonacci_number).- _Hint_: you'll need to keep track of the two previous values (start them at 0 and 1), and then "update" them each time through the loop, storing the "new total" in the first previous variable, and the first previous variable in the second (be careful about the ordering of this!) ###Code nums_fib = 0 while nums_fib ###Output _____no_output_____ ###Markdown Use a **`while`** loop to print out a sequence of random numbers from 0 to 10, stopping after the number `4` is printed for the first time. You will need to import the `random` module. Modify the below "coin flipping" example from the course text so that it keeps flipping coins until you get two "heads" in a row. ###Code # flip a coin until it shows up heads still_flipping = True while still_flipping: flip = randint(0,1) if flip == 0: flip = "Heads" else: flip = "Tails" print(flip, end=", ") if flip == "Heads": still_flipping = False ###Output _____no_output_____ ###Markdown Define a function **`input_number()`** that takes a minimum and maximum value as arguments. This function should prompt the user to input a number within the range, repeating the prompt if the provided value is not acceptable. Once an acceptable value has been provided, the function should return that number. You can assume that the user-entered input will always be numeric.Be sure and call your function and print its results to test it! ###Code def input_number(min, max): valid = False while not valid: number = int(input("Pick a number between "+ str(min) + " and " + str(max) + ": ")) if (min<=number<=max): valid = True print("great choice") else: print("Invalid number.") input_number(1, 20) ###Output Pick a number between 1 and 20: 21 Invalid number. Pick a number between 1 and 20: 1 great choice ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Write a **`while`** loop that prints out every 5th number (multiples of 5) from 0 to 100 (inclusive).- _Tip:_ use an **`end=','`** keyword argument to the `print()` function to print all the numbers on the same line. ###Code num = 0 while num <= 100: if num % 5 == 0: print(num, end=',') num = num + 1 else: num = num +1 ###Output 0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100, ###Markdown Use a **`while`** loop to print out the first 15 [Triangular numbers](https://en.wikipedia.org/wiki/Triangular_number). This is a sequence of numbers for which the _nth_ value is the sum of the numbers from 0 to _n_. **Do this only using addition!**- _Hint:_ use an additional variable to keep track of the `total` value, and have that value increase by the number of times you've been through the loop each iteration! ###Code n = 1 num = 0 while n <= 15: num = num + n print(num, end=',') n += 1 ###Output 1,3,6,10,15,21,28,36,45,55,66,78,91,105,120, ###Markdown _Challenge_ Use a **`while`** loop to print out 20 numbers, each of which is larger than the previous by the the _sum_ of the **two** previous numbers (the [Fibonacci sequence](https://en.wikipedia.org/wiki/Fibonacci_number).- _Hint_: you'll need to keep track of the two previous values (start them at 0 and 1), and then "update" them each time through the loop, storing the "new total" in the first previous variable, and the first previous variable in the second (be careful about the ordering of this!) ###Code i = 1 a1 = 0 a2 = while i <= 20: ###Output _____no_output_____ ###Markdown Use a **`while`** loop to print out a sequence of random numbers from 0 to 10, stopping after the number `4` is printed for the first time. You will need to import the `random` module. Modify the below "coin flipping" example from the course text so that it keeps flipping coins until you get two "heads" in a row. ###Code # flip a coin until it shows up heads still_flipping = True while still_flipping: flip = randint(0,1) if flip == 0: flip = "Heads" else: flip = "Tails" print(flip, end=", ") if flip == "Heads": still_flipping = False ###Output _____no_output_____ ###Markdown Define a function **`input_number()`** that takes a minimum and maximum value as arguments. This function should prompt the user to input a number within the range, repeating the prompt if the provided value is not acceptable. Once an acceptable value has been provided, the function should return that number. You can assume that the user-entered input will always be numeric.Be sure and call your function and print its results to test it! ###Code def input_number(min, max): valid = False while not valid: number = int(input("Pick a number between " + str(min) +" and " + str(max) + " :")) if (min <= number <= max): valid = True print("Great job! That's in the range.") else: print("Invalid. Try again!") return number input_number(1, 20) ###Output Pick a number between 1 and 20 :25 Invalid. Try again! Pick a number between 1 and 20 :23 Invalid. Try again! Pick a number between 1 and 20 :22 Invalid. Try again! Pick a number between 1 and 20 :19 Great job! That's in the range. ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Define a new list **`words`** that contains each of the words to the following [song lyric](https://www.youtube.com/watch?v=StTqXEQ2l-Y) (don't include the notes):```♫ everything is awesome everything is cool when you are part of a TEEEEAM ♫But I have to be sure when I walk out that door oh how I want to break free```_Hint:_ use the `split()` string method. ###Code lyric = "But I have to be sure when I walk out that door oh how I want to break free" words = lyric.split() print(words) ###Output ['But', 'I', 'have', 'to', 'be', 'sure', 'when', 'I', 'walk', 'out', 'that', 'door', 'oh', 'how', 'I', 'want', 'to', 'break', 'free'] ###Markdown Define a function **`abbreviate()`** that takes in a string as an argument and _returns_ the first letter of that string capitalized and followed by a period (`.`). For example:```pythonabbreviate("dog") returns "D."```_Hint:_ you can use bracket notation to access the first (0th) character of a string! ###Code def abbreviate(word): print(word[0].upper() + '.') abbreviate('but') ###Output B. ###Markdown Use the **`map()`** function to transform the list of lyric words into a list of abbreviations (technically an initialism). ###Code initials = list(map(abbreviate, words)) ###Output B. I. H. T. B. S. W. I. W. O. T. D. O. H. I. W. T. B. F. ###Markdown Use the **`filter()`** function to remove each word from the list of `words` that is 3 or fewer letters in length. You can define a named callback function, or use an anonymous lambda. ###Code list(filter(lambda x: len(x) > 3, words)) ###Output _____no_output_____ ###Markdown **In a single statement**, use both the `map()` and `filter()` functions to get a list of the initials of the long words in the the `words` list. _Hint:_ try pasting your filtering code into your mapping code from the previous problems! ###Code l_init = list(map(abbreviate, filter(lambda x: len(x) > 3, words))) print(l_init) ###Output H. S. W. W. T. D. W. B. F. [None, None, None, None, None, None, None, None, None] ###Markdown Use the **`reduce()`** function to combine the list of initials into a single string. You can define a named callback function, or use an anonymous lambda. Remember to `import` the `reduce()` function! _Note:_ normally you would do this with the `join()` string method. ###Code from functools import reduce reduce(lambda x, y: x + y, l_init) ###Output _____no_output_____ ###Markdown Use the **`reduce()`** function to get a list of _unique_ words in the original lyrics (not the initials).- Think about what to do with each "new" list element to include it in the aggregation (you can use an `if` statement to decide whether to include it or not!). Try defining an `add_if_unique()` callback function.- You will also need to use the optional third argument to [reduce()](https://docs.python.org/3/library/functools.htmlfunctools.reduce) to specify that the "aggregation" should start with an empty list `[]`.- We do this with `reduce()` instead of `filter()` because it allows us to more directly check the list beyond the specific element being considered. ###Code def add_if_unique(item_list, element): if element in item_list: return item_list else: item_list.append(element) return item_list reduce(add_if_unique, words, []) ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable **`names`** that is a list containing the names of 2 people next to you. ###Code names = ["Brynn", "Sarrah"] print(names[1]) ###Output Sarrah ###Markdown Create a second variable **`absent`** that is an empty list. Then add two names of people who you _wish_ were next to you. ###Code absent = ["Theo", "Tom"] print(absent) ###Output ['Theo', 'Tom'] ###Markdown Create a variable **`all_names`** that is the `names` and `absent` lists added together. Output the list. ###Code all_names = names + absent print(all_names) ###Output ['Brynn', 'Sarrah', 'Theo', 'Tom'] ###Markdown What happens if you try and `append()` the `absent` list to the `names` list? Print the resulting `names` variable, as well as the length of that list. Is it what you expect?- Be careful about running this cell multiple times; you can always **Reset the Kernel** if needed. ###Code names.append(absent) print(names) ###Output ['Brynn', 'Sarrah', ['Theo', 'Tom']] ###Markdown Add _your name_ at the **beginning** of the `all_names` list. _Hint_: find an appropriate [list method](https://docs.python.org/3/tutorial/datastructures.htmlmore-on-lists). ###Code all_names.insert(0, "Shuqi") print(all_names) ###Output ['Shuqi', 'Brynn', 'Sarrah', 'Theo', 'Tom'] ###Markdown Use the `range()` function to create a sequence of numbers from 10 to 99. Then use the [random.sample()](https://docs.python.org/3/library/random.htmlrandom.sample) to produce a list of 11 random numbers from that range. Store that list in a variable called **`numbers`**, and print the list so you know what it is. ###Code import random numbers = random.sample(range(10,99), k = 11) print(numbers) ###Output [65, 42, 51, 31, 48, 93, 86, 74, 24, 92, 35] ###Markdown Find the [sum](https://docs.python.org/3/library/functions.htmlsum) of the `numbers` list (note that this function _only_ works for sequences!) ###Code sum(numbers) ###Output _____no_output_____ ###Markdown [Sort](https://wiki.python.org/moin/HowTo/Sorting) the `numbers` list. ###Code numbers.sort() print(numbers) ###Output [24, 31, 35, 42, 48, 51, 65, 74, 86, 92, 93] ###Markdown Print the _last_ element of the `numbers` list (which should now be the biggest) ###Code print(numbers[-1]) ###Output 93 ###Markdown Create a list **`first_three`** that contains the first three elements of the `numbers` list. Use the _slice_ (**`:`**) operator. ###Code numbers[:3] ###Output _____no_output_____ ###Markdown Replace the first three elements in `numbers` with values that are _double_ (2x) the original value. You can use 3 assignment statements (or just 1 if you are tricky!) ###Code numbers[:3] = [numbers[0]*2, numbers[1]*2, numbers[2]*2] print(numbers) ###Output [48, 62, 70, 42, 48, 51, 65, 74, 86, 92, 93] ###Markdown Create a list **`middle_three`** that contains the three elements in the _middle_ of the list (these need not be the three median values). _Hint:_ calculate the middle index. ###Code middle_three = numbers[4: -4] print(middle_three) ###Output [48, 51, 65] ###Markdown **Bonus Challenge**Define a function **`pig_latinize()`** that takes in a single word as an argument, and returns a [pig-latin](https://en.wikipedia.org/wiki/Pig_LatinRules) version of that word. That is, if the word starts with a vowel (a,e,i,o,u), then the function returns that word with `"way"` added to the end. If it starts with a consonant, the function returns that word with the first consonent moved to the end, followed by `"ay"`. ###Code def pig_latinize(word): vowel = ["a", "e", "i", "o", "u"] if word[0] in vowel: return word + "way" else: return word[1:] + word[0] + "ay" pig_latinize("too") ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `food` that stores your favorite kind of food. Print or output the variable. ###Code food = "steak" print(food) ###Output steak ###Markdown Create a variable `restaurant` that stores your favorite place to eat that kind of food. ###Code restaurant ="Bateau" ###Output _____no_output_____ ###Markdown Print the message `"I'm going to RESTAURANT for some FOOD"`, replacing the restaurant and food with your variables. ###Code print("I'm going to " + restaurant + " for some " + food) ###Output I'm going to Bateau for some steak ###Markdown Create a variable `num_friends` equal to the number of friends you would like to eat with. ###Code num_friends ="5" ###Output _____no_output_____ ###Markdown Print a message `"I'm going with X friends"`, replacing the X with the number of friends. ###Code print("I'm going with " + num_friends + " friends") ###Output I'm going with 5 friends ###Markdown Create a variable `meal_price`, which is how expensive you think one meal at the restaurant would be. This price should be a `float`. ###Code meal_price="100.00" ###Output _____no_output_____ ###Markdown Update (re-assign) the `meal_price` variable so it includes a 15% tip&mdash;that is, so the price is 15% higher. Output the variable. ###Code meal_price="115.00" print(meal_price) ###Output 115.00 ###Markdown Create a variable `total_cost` that has the total estimated cost of the bill for you and all of your friends. Output or print the variable ###Code total_cost="690.00" print(total_cost) ###Output 690.00 ###Markdown Create a variable `budget` representing your spending budget for a night out. ###Code budget="500.00" print(budget) ###Output 500.00 ###Markdown Create a variable `max_friends`, which is the maximum number of friends you can invite, at the estimated meal price, while staying within your budget. Output or print this value.- Be carefully that you only invite whole people! ###Code max_friends="4" print(max_friends) ###Output 4 ###Markdown Bonus: Create a variable `chorus` that is the string `"FOOD time!"` _repeated_ once for each of the friends you are able to bring. _Hint_ use the **`*`** operator. Print out the variable. ###Code chorus="STEAK time! \n"*4 print(chorus) print("First line \n Second line") ###Output First line Second line ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `food` that stores your favorite kind of food. Print or output the variable. ###Code food = "mussels" ###Output _____no_output_____ ###Markdown Create a variable `restaurant` that stores your favorite place to eat that kind of food. ###Code restaurant = "Crabby Bill's" ###Output _____no_output_____ ###Markdown Print the message `"I'm going to RESTAURANT for some FOOD"`, replacing the restaurant and food with your variables. ###Code print("I'm going to " + restaurant + " for some " + food) ###Output I'm going to Crabby Bill's for some mussels ###Markdown Create a variable `num_friends` equal to the number of friends you would like to eat with. ###Code num_friends = 4 ###Output _____no_output_____ ###Markdown Print a message `"I'm going with X friends"`, replacing the X with the number of friends. ###Code print("I'm going with " + str(num_friends) + " friends.") ###Output I'm going with 4 friends. ###Markdown Create a variable `meal_price`, which is how expensive you think one meal at the restaurant would be. This price should be a `float`. ###Code meal_price = 15.00 ###Output _____no_output_____ ###Markdown Update (re-assign) the `meal_price` variable so it includes a 15% tip&mdash;that is, so the price is 15% higher. Output the variable. ###Code meal_price = meal_price * 1.15 ###Output _____no_output_____ ###Markdown Create a variable `total_cost` that has the total estimated cost of the bill for you and all of your friends. Output or print the variable ###Code total_cost = meal_price * num_friends ###Output _____no_output_____ ###Markdown Create a variable `budget` representing your spending budget for a night out. ###Code budget = 50.00 ###Output _____no_output_____ ###Markdown Create a variable `max_friends`, which is the maximum number of friends you can invite, at the estimated meal price, while staying within your budget. Output or print this value.- Be carefully that you only invite whole people! ###Code max_friends = int(budget/meal_price) ###Output _____no_output_____ ###Markdown Bonus: Create a variable `chorus` that is the string `"FOOD time!"` _repeated_ once for each of the friends you are able to bring. _Hint_ use the **`*`** operator. Print out the variable. ###Code def Chorus() print ((upper(food) + "time!"/n) * max_friends) ###Output _____no_output_____ ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable `my_name` that contains your name. ###Code my_name = "Sarah Nguyen" ###Output _____no_output_____ ###Markdown Create a variable `name_length` that holds how many letters are in your name. Print the number of letters. ###Code name_length = len(my_name) print(name_length) ###Output 12 ###Markdown Print out your name with the uppercase letters made lowercase, and the lowercase letters made uppercase. **Hint:** look for a [string method](https://docs.python.org/3/library/stdtypes.htmlstring-methods) that will modify the _case_ of the string.- Try to do this without creating a separate variable! ###Code print(str.swapcase(my_name)) ###Output sARAH nGUYEN ###Markdown Pick two of your favorite numbers (between 1 and 100) and assign them to `favorite_1` and `favorite_2` ###Code favorite_1 = 81 favorite_2 = 31 ###Output _____no_output_____ ###Markdown Divide each number by the length of your name raised to the power of `.598` (use the built-in `pow()` function for practice), and save it in the same variable. ###Code favorite_1 = favorite_1/pow(name_length, .598) favorite_2 = favorite_2/pow(name_length, .598) # print(favorite_1) # print(favorite_2) ###Output 0.9384991064624025 1.5873069710283991 ###Markdown Create a variable `raw_sum` that is the sum of those two variables. Note you _cannot_ use the `sum()` function for this, so just use a normal operator! ###Code raw_sum = favorite_1 + favorite_2 # print(raw_sum) ###Output 2.5258060774908015 ###Markdown Create a variable `round_sum` that is the `raw_sum` rounded to 1 decimal place. Use the `round()` function. ###Code round_sum = round(raw_sum, 1) # print(round_sum) ###Output 2.5 ###Markdown Create two new variables `rounded_1` and `rounded_2` that are your `favorite_1` and `favorite_2` variables rounded to 1 decimal place. Print them out on a single line (hint: pass them as two different arguments). ###Code rounded_1 = round(favorite_1, 1) rounded_2 = round(favorite_2, 1) print(rounded_1, rounded_2) ###Output 0.9 1.6 ###Markdown Create a variable `sum_round` that is the sum of the rounded values (use a normal math operator). ###Code sum_round = rounded_1 + rounded_2 # print(sum_round) ###Output 2.5 ###Markdown Which is bigger, `round_sum` or `sum_round`? (You can use the `max()` function!) ###Code max(round_sum, sum_round) ###Output _____no_output_____ ###Markdown Create a variable `fruits` that contains the string `"apples and bananas"` ###Code fruits = "apples and bananas" ###Output _____no_output_____ ###Markdown Use the `replace()` function to substitute all the "a"s in `fruits` with "ee". Store the result in a variable called `fruits_e`. ###Code fruits_e = fruits.replace("a", "ee") # print(fruits_e) ###Output eepples eend beeneenees ###Markdown Use the `replace()` function to substitute all the "a"s in `fruits` with "o". Store the result in a variable called `fruits_o`. ###Code fruits_o = fruits.replace("a", "o") # print(fruits_o) ###Output opples ond bononos ###Markdown Print out the string "I like to eat " followed by each of `fruits`, `fruits_e` and `fruits_o` (three sentences). ###Code print("I like to eat ", fruits,".") print("I like to eat ", fruits_e,".") print("I like to eat ", fruits_o,".") ###Output I like to eat apples and bananas . I like to eat eepples eend beeneenees . I like to eat opples ond bononos . ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable **`names`** that is a list containing the names of 2 people next to you. ###Code names = ['Paul', 'John'] print(names) ###Output _____no_output_____ ###Markdown Create a second variable **`absent`** that is an empty list. Then add two names of people who you _wish_ were next to you. ###Code absent = [] absent.append('George') absent.append('Ringo') print(absent) ###Output _____no_output_____ ###Markdown Create a variable **`all_names`** that is the `names` and `absent` lists added together. Output the list. ###Code all_names = names + absent print(all_names) ###Output _____no_output_____ ###Markdown What happens if you try and `append()` the `absent` list to the `names` list? Print the resulting `names` variable, as well as the length of that list. Is it what you expect?- Be careful about running this cell multiple times; you can always **Reset the Kernel** if needed. ###Code names.append(absent) print(names) print(len(names)) ###Output _____no_output_____ ###Markdown Add _your name_ at the **beginning** of the `all_names` list. _Hint_: find an appropriate [list method](https://docs.python.org/3/tutorial/datastructures.htmlmore-on-lists). ###Code all_names.insert(0,'Matt') print(all_names) ###Output _____no_output_____ ###Markdown Use the `range()` function to create a sequence of numbers from 10 to 99. Then use the [random.sample()](https://docs.python.org/3/library/random.htmlrandom.sample) to produce a list of 11 random numbers from that range. Store that list in a variable called **`numbers`**, and print the list so you know what it is. ###Code import random numbers = random.sample(range(10, 100),11) numbers ###Output _____no_output_____ ###Markdown Find the [sum](https://docs.python.org/3/library/functions.htmlsum) of the `numbers` list (note that this function _only_ works for sequences!) ###Code sum(numbers) ###Output _____no_output_____ ###Markdown [Sort](https://wiki.python.org/moin/HowTo/Sorting) the `numbers` list. ###Code numbers.sort() print(numbers) ###Output _____no_output_____ ###Markdown Print the _last_ element of the `numbers` list (which should now be the biggest) ###Code print(numbers[-1]) ###Output _____no_output_____ ###Markdown Create a list **`first_three`** that contains the first three elements of the `numbers` list. Use the _slice_ (**`:`**) operator. ###Code first_three = numbers[:3] print(first_three) ###Output _____no_output_____ ###Markdown Replace the first three elements in `numbers` with values that are _double_ (2x) the original value. You can use 3 assignment statements (or just 1 if you are tricky!) ###Code # using 3 assignment statements # numbers[0] = numbers[0]*2 # numbers[1] = numbers[1]*2 # numbers[2] = numbers[2]*2 # using multiple-assignment! numbers[:3] = [numbers[0]*2, numbers[1]*2, numbers[2]*2] numbers ###Output _____no_output_____ ###Markdown Create a list **`middle_three`** that contains the three elements in the _middle_ of the list (these need not be the three median values). _Hint:_ calculate the middle index. ###Code middle_index = (len(numbers)//2) #this // rounds down # numbers[middle_index] #test: pass ##Explicit Process #middle_three= [numbers[(middle_index-1)], numbers[middle_index], numbers[(middle_index+1)]] ##Efficient Process #Should look like a slice: middle_three = numbers[n:n] middle_three = numbers[middle_index-1: middle_index+2] middle_three ###Output _____no_output_____ ###Markdown **Bonus Challenge**Define a function **`pig_latinize()`** that takes in a single word as an argument, and returns a [pig-latin](https://en.wikipedia.org/wiki/Pig_LatinRules) version of that word. That is, if the word starts with a vowel (a,e,i,o,u), then the function returns that word with `"way"` added to the end. If it starts with a consonant, the function returns that word with the first consonent moved to the end, followed by `"ay"`. ###Code def pig_latinize(word): if word[0] in "aeiou": return word + "way" else: return word[1:] + word[0] + "ay" print(pig_latinize("porky Pig")) print(pig_latinize("says")) print(pig_latinize("hello")) print(pig_latinize("everyone")) ###Output orky Pigpay ayssay ellohay everyoneway ###Markdown Exercise 1Add the specified code for each code cell, running the cells _in order_. Create a variable **`deck`** that represents a deck of [playing cards](https://en.wikipedia.org/wiki/Playing_card). This variable should be a _list_ of 52 elements, each of which is a dictionary with the following keys:- `suit`, with a string value that is either `'hearts'`, `'diamonds'`, `'clubs'`, or `'spades'`.- `rank`, with an interger value ranging from 2 to 14 inclusive (11-14 represent a Jack, Queen, King, or Ace respectively)._Tip:_ use a pair of nested loops to add each combination of suit and rank to the `deck` list! ###Code deck = [] suits = ['hearts', 'diamonds', 'clubs', 'spades'] for suit in suits: for rank in range(2,15): card = {'suit': suit, 'rank':rank} deck.append(card) deck ###Output _____no_output_____ ###Markdown Create a list **hand** that represents a hand of [Poker](https://en.wikipedia.org/wiki/Poker) (5-card draw). Add 5 cards from the `deck` to this list.- You can add 5 specific cards for testing (check their indices!), or use the `random.shuffle()` function to shuffle the deck and then _slice_ 5 cards from the top.Print out the `hand` so you know what you are dealing with! ###Code import random random.shuffle(deck) hand = deck[:5] hand ###Output _____no_output_____ ###Markdown Define a function **`contains_queen_of_hearts()`** that takes in a list of cards (e.g., a hand) and returns whether or not the Queen of Hearts is in that list. _For practice, don't use the `in` operator to check for containment_. ###Code def contains_queen_of_hearts(a_list): for dict in a_list: if dict == {'suit':'hearts', 'rank': 12}: return True return False contains_queen_of_hearts(deck) ###Output _____no_output_____ ###Markdown Define a function **`get_high_card()`** that takes in a list of cards and returns the card (dictionary) of the highest value. The "high card" is the one with the highest rank. Cards of different suits but the same rank are considered to have the same value and either may be returned.- Hint: use a "king-of-the-hill" search! ###Code def get_high_card(a_list): max_num = a_list[0]['rank'] max_dict = a_list[0] for dict in a_list: if dict['rank'] > max_num: max_num = dict['rank'] max_dict = dict return max_dict get_high_card(hand) ###Output _____no_output_____ ###Markdown Define a function **`is_flush()`** that takes in a list of cards and returns whether or not the list represents a _flush_&mdash;that is, all of the cards have the same _suit_. ###Code def is_flush(a_list): tar_suit = a_list[0]['suit'] for dict in a_list: if dict['suit'] != tar_suit: return False return True a = [{'suit': 'diamonds', 'rank': 10}, {'suit': 'diamonds', 'rank': 13}, {'suit': 'diamonds', 'rank': 5}] is_flush(a) ###Output _____no_output_____ ###Markdown _Challenge_: Define a functon **`has_pair()`** that takes in a list of cards and returns whether or not there is at least one _pair_ (two cards with the same _rank_) in the list._Bonus challenge:_ Return the rank of the pair of cards with the highest rank (e.g., if there is more than one pair!) ###Code def has_pair(a_list): n = len(a_list) num_pair = 0 pair_list = [] for i in range(n): a = a_list[i]['rank'] for j in range(i + 1, n): if a == a_list[j]['rank']: num_pair += 1 pair_list.append(a_list[i]) #pair_list.append(a_list[j]) #print(pair_list) #return True if num_pair > 1: max_num = pair_list[0]['rank'] for k in range(num_pair): if pair_list[k]['rank'] > max_num: max_num = pair_list[k]['rank'] print(pair_list[k]) a_list = [{'suit': 'hearts', 'rank': 13}, {'suit': 'diamonds', 'rank': 12}, {'suit': 'diamonds', 'rank': 5}, {'suit': 'hearts', 'rank': 4}, {'suit': 'diamonds', 'rank': 4}] has_pair(a_list) ###Output _____no_output_____
notebooks/stephan_notebooks/22-Evaluate-full-models.ipynb
###Markdown Load data ###Code name = 'single_forecast_tp_pure_sr_pretraining' mrms = xr.open_dataarray('tmp/mrms.nc') ens_pred = xr.open_dataarray(f'tmp/ens_pred_{name}.nc') ens_pred_stitched = xr.open_dataarray(f'tmp/ens_pred_stitched_{name}.nc') interp_ens = xr.open_dataarray('tmp/interp_ens.nc') href = xr.open_dataarray('tmp/href.nc') mask = xr.open_dataarray('../tmp/total_mask.nc') href href.isel(valid_time=0).plot(col='member') ###Output _____no_output_____ ###Markdown Make data compatible ###Code overlap_times = np.intersect1d(interp_ens.valid_time, href.valid_time) ###Output _____no_output_____ ###Markdown Time ###Code ens_pred = ens_pred.sel(valid_time=overlap_times) ens_pred_stitched = ens_pred_stitched.sel(valid_time=overlap_times) interp_ens = interp_ens.sel(valid_time=overlap_times) href = href.sel(valid_time=overlap_times) mrms = mrms.sel(valid_time=overlap_times) ###Output _____no_output_____ ###Markdown Mask ###Code ens_pred = ens_pred.where(mask) ens_pred_stitched = ens_pred_stitched.where(mask) interp_ens = interp_ens.where(mask) href = href.where(mask) mrms = mrms.where(mask) ###Output _____no_output_____ ###Markdown Domain ###Code interp_ens = interp_ens.sel(lat=ens_pred.lat, lon=ens_pred.lon) href = href.sel(lat=ens_pred.lat, lon=ens_pred.lon) mrms = mrms.sel(lat=ens_pred.lat, lon=ens_pred.lon) ens_pred_stitched = ens_pred_stitched.sel(lat=ens_pred.lat, lon=ens_pred.lon) ###Output _____no_output_____ ###Markdown Plotting ###Code def plot_comparison(valid_time, member): fig, axs = plt.subplots(2, 2, figsize=(20, 12)) axs = axs.flatten() mrms.isel(valid_time=valid_time).plot(vmin=0, vmax=20, cmap='gist_ncar_r', ax=axs[0]) axs[0].set_title('MRMS') href.isel(member=0, valid_time=0).plot(vmin=0, vmax=20, cmap='gist_ncar_r', ax=axs[1]) axs[1].set_title('HREF') interp_ens.isel(member=0, valid_time=0).plot(vmin=0, vmax=20, cmap='gist_ncar_r', ax=axs[2]) axs[2].set_title('Interpolated TIGGE') ens_pred.isel(member=0, valid_time=0).plot(vmin=0, vmax=20, cmap='gist_ncar_r', ax=axs[3]) axs[3].set_title(name) for ax in axs: ax.grid() plt.tight_layout() plot_comparison(0, 0) ###Output _____no_output_____ ###Markdown Scores Histograms ###Code bins = np.logspace(0, 2, 25)-1 mid_bin = (bins[1:] + bins[:-1])/2 def plot_hist(ds, bins, label): nums, bins = np.histogram(ds.values, bins=bins, density=True) plt.plot(mid_bin, nums, marker='o', label=label) plt.figure(figsize=(10, 5)) plot_hist(mrms, bins, 'MRMS') plot_hist(href, bins, 'HREF') plot_hist(interp_ens, bins, 'Interp') plot_hist(ens_pred, bins, name) plot_hist(ens_pred_stitched, bins, name + ' stitched') plt.yscale('log') plt.legend() ###Output _____no_output_____ ###Markdown RMSE (ensemble mean) ###Code exps = {'HREF': href, 'Interpolated TIGGE': interp_ens, name: ens_pred, f'{name} stitched': ens_pred_stitched} for n, e in exps.items(): score = xs.rmse(e.mean('member'), mrms, dim=['lat', 'lon', 'valid_time'], skipna=True).values print(n, score) ###Output HREF 1.8427570316954107 Interpolated TIGGE 1.525522928514815 GAN 1.5316364093763462 GAN stitched 1.5232642061961048 ###Markdown CRPS ###Code for n, e in exps.items(): score = xs.crps_ensemble(mrms, e, dim=['lat', 'lon', 'valid_time']).values print(n, score) ###Output HREF 0.39631195977493405 Interpolated TIGGE 0.36168230149834013 GAN 0.40008550965487566 GAN stitched 0.3885886403787917 ###Markdown Brier score ###Code threshold = 1 for n, e in exps.items(): score = xs.brier_score(mrms>threshold, (e>threshold).mean('member'), dim=['lat', 'lon', 'valid_time']).values print(n, score) threshold = 5 for n, e in exps.items(): score = xs.brier_score(mrms>threshold, (e>threshold).mean('member'), dim=['lat', 'lon', 'valid_time']).values print(n, score) threshold = 10 for n, e in exps.items(): score = xs.brier_score(mrms>threshold, (e>threshold).mean('member'), dim=['lat', 'lon', 'valid_time']).values print(n, score) ###Output HREF 0.006735240939141041 Interpolated TIGGE 0.008165393399131039 GAN 0.00835529927403726 GAN stitched 0.00819015789103526 ###Markdown Reliability diagram ###Code threshold = 1 for n, e in exps.items(): rel = xs.reliability(mrms>threshold, (e>threshold).mean('member')) plt.plot(rel.forecast_probability, rel, marker='o', label=n) plt.plot([0, 1], [0, 1], c='gray') plt.legend() threshold = 10 for n, e in exps.items(): rel = xs.reliability(mrms>threshold, (e>threshold).mean('member')) plt.plot(rel.forecast_probability, rel, marker='o', label=n) plt.plot([0, 1], [0, 1], c='gray') plt.legend() ###Output _____no_output_____
oreilly-source/notebooks/Chapter 1 - Mining Twitter.ipynb
###Markdown Mining TwitterTwitter implements OAuth 1.0A as its standard authentication mechanism, and in order to use it to make requests to Twitter's API, you'll need to go to https://developer.twitter.com/en/apps and create a sample application. It is possible that Twitter no longer supports sandboxed applications and you may need to submit a request for permission to develop an app on Twitter.There are four primary identifiers you'll need to note for an OAuth 1.0A workflow: consumer key, consumer secret, access token, and access token secret. Note that you will need an ordinary Twitter account in order to login, create an app, and get these credentials. If you are running this code on Binder or from the Docker container, you should just be able to execute the code in this notebook without any worries whatsoever about installing dependencies. If you are running the code from your own development envioronment, however, be advised that these examples in this chapter take advantage of a Python package called [twitter](https://github.com/sixohsix/twitter) to make API calls. You can install this package in a terminal with [pip](https://pypi.python.org/pypi/pip) with the command `pip install twitter`, preferably from within a [Python virtual environment](https://pypi.python.org/pypi/virtualenv). Once installed, you should be able to open up a Python interpreter (or better yet, your [IPython](http://ipython.org/) interpreter) and get rolling. Authorizing an application to access Twitter account data ###Code from dotenv import load_dotenv import os import twitter # Load environment variables load_dotenv() # Go to https://developer.twitter.com/en/apps to create an app and get values # for these credentials, which you'll need to provide in place of these # empty string values that are defined as placeholders. # See https://developer.twitter.com/en/docs/basics/authentication/overview/oauth # for more information on Twitter's OAuth implementation. CONSUMER_KEY = os.getenv('TWITTER_CONSUMER_KEY') CONSUMER_SECRET = os.getenv('TWITTER_CONSUMER_SECRET') OAUTH_TOKEN = os.getenv('TWITTER_ACCESS_TOKEN') OAUTH_TOKEN_SECRET = os.getenv('TWITTER_ACCESS_TOKEN_SECRET') auth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET, CONSUMER_KEY, CONSUMER_SECRET) twitter_api = twitter.Twitter(auth=auth) # Nothing to see by displaying twitter_api except that it's now a # defined variable print(twitter_api) ###Output <twitter.api.Twitter object at 0x7f11600e3d90> ###Markdown Retrieving trends ###Code # The Yahoo! Where On Earth ID for the entire world is 1. # See https://dev.twitter.com/docs/api/1.1/get/trends/place and # http://developer.yahoo.com/geo/geoplanet/ WORLD_WOE_ID = 1 US_WOE_ID = 23424977 # Prefix ID with the underscore for query string parameterization. # Without the underscore, the twitter package appends the ID value # to the URL itself as a special case keyword argument. world_trends = twitter_api.trends.place(_id=WORLD_WOE_ID) us_trends = twitter_api.trends.place(_id=US_WOE_ID) print(world_trends) print() print(us_trends) for trend in world_trends[0]['trends']: print(trend['name']) for trend in us_trends[0]['trends']: print(trend['name']) world_trends_set = set([trend['name'] for trend in world_trends[0]['trends']]) us_trends_set = set([trend['name'] for trend in us_trends[0]['trends']]) common_trends = world_trends_set.intersection(us_trends_set) print(common_trends) ###Output {'#28DaysUntilWalls', 'All-Pro', '#wwlll', '#CashAppFriday', 'Tyler Perry', 'Drummond', '#JBYummy', '#TrumpsWar', '#FreeCodeFridayContest', 'Cameron Diaz'} ###Markdown Anatomy of a Tweet ###Code import json # Set this variable to a trending topic, # or anything else for that matter. The example query below # was a trending topic when this content was being developed # and is used throughout the remainder of this chapter. q = '#TrumpsWar' count = 100 # Import unquote to prevent url encoding errors in next_results from urllib.parse import unquote # See https://dev.twitter.com/rest/reference/get/search/tweets search_results = twitter_api.search.tweets(q=q, count=count) statuses = search_results['statuses'] # Iterate through 5 more batches of results by following the cursor for _ in range(5): print('Length of statuses', len(statuses)) try: next_results = search_results['search_metadata']['next_results'] except KeyError as e: # No more results when next_results doesn't exist break # Create a dictionary from next_results, which has the following form: # ?max_id=847960489447628799&q=%23RIPSelena&count=100&include_entities=1 kwargs = dict([ kv.split('=') for kv in unquote(next_results[1:]).split("&") ]) search_results = twitter_api.search.tweets(**kwargs) statuses += search_results['statuses'] # Show one sample search result by slicing the list... print(json.dumps(statuses[0], indent=1)) for i in range(10): print() print(statuses[i]['text']) print('Favorites: ', statuses[i]['favorite_count']) print('Retweets: ', statuses[i]['retweet_count']) ###Output _____no_output_____ ###Markdown Extracting text, screen names, and hashtags from tweets ###Code status_texts = [ status['text'] for status in statuses ] screen_names = [ user_mention['screen_name'] for status in statuses for user_mention in status['entities']['user_mentions'] ] hashtags = [ hashtag['text'] for status in statuses for hashtag in status['entities']['hashtags'] ] # Compute a collection of all words from all tweets words = [ w for t in status_texts for w in t.split() ] # Explore the first 5 items for each... print(json.dumps(status_texts[0:5], indent=1)) print(json.dumps(screen_names[0:5], indent=1) ) print(json.dumps(hashtags[0:5], indent=1)) print(json.dumps(words[0:5], indent=1)) ###Output _____no_output_____ ###Markdown Creating a basic frequency distribution from the words in tweets ###Code from collections import Counter for item in [words, screen_names, hashtags]: c = Counter(item) print(c.most_common()[:10]) # top 10 print() ###Output _____no_output_____ ###Markdown Using prettytable to display tuples in a nice tabular format ###Code from prettytable import PrettyTable for label, data in (('Word', words), ('Screen Name', screen_names), ('Hashtag', hashtags)): pt = PrettyTable(field_names=[label, 'Count']) c = Counter(data) [ pt.add_row(kv) for kv in c.most_common()[:10] ] pt.align[label], pt.align['Count'] = 'l', 'r' # Set column alignment print(pt) ###Output _____no_output_____ ###Markdown Calculating lexical diversity for tweets ###Code # A function for computing lexical diversity def lexical_diversity(tokens): return len(set(tokens))/len(tokens) # A function for computing the average number of words per tweet def average_words(statuses): total_words = sum([ len(s.split()) for s in statuses ]) return total_words/len(statuses) print(lexical_diversity(words)) print(lexical_diversity(screen_names)) print(lexical_diversity(hashtags)) print(average_words(status_texts)) ###Output _____no_output_____ ###Markdown Finding the most popular retweets ###Code retweets = [ # Store out a tuple of these three values ... (status['retweet_count'], status['retweeted_status']['user']['screen_name'], status['retweeted_status']['id'], status['text']) # ... for each status ... for status in statuses # ... so long as the status meets this condition. if 'retweeted_status' in status.keys() ] # Slice off the first 5 from the sorted results and display each item in the tuple pt = PrettyTable(field_names=['Count', 'Screen Name', 'Tweet ID', 'Text']) [ pt.add_row(row) for row in sorted(retweets, reverse=True)[:5] ] pt.max_width['Text'] = 50 pt.align= 'l' print(pt) ###Output _____no_output_____ ###Markdown Looking up users who have retweeted a status ###Code # Get the original tweet id for a tweet from its retweeted_status node # and insert it here _retweets = twitter_api.statuses.retweets(id=862359093398261760) print([r['user']['screen_name'] for r in _retweets]) ###Output _____no_output_____ ###Markdown Plotting frequencies of words ###Code import matplotlib.pyplot as plt %matplotlib inline word_counts = sorted(Counter(words).values(), reverse=True) plt.loglog(word_counts) plt.ylabel("Freq") plt.xlabel("Word Rank") ###Output _____no_output_____ ###Markdown Generating histograms of words, screen names, and hashtags ###Code for label, data in (('Words', words), ('Screen Names', screen_names), ('Hashtags', hashtags)): # Build a frequency map for each set of data # and plot the values c = Counter(data) plt.hist(list(c.values())) # Add a title and y-label ... plt.title(label) plt.ylabel("Number of items in bin") plt.xlabel("Bins (number of times an item appeared)") # ... and display as a new figure plt.figure() ###Output _____no_output_____ ###Markdown Generating a histogram of retweet counts ###Code # Using underscores while unpacking values in # a tuple is idiomatic for discarding them counts = [count for count, _, _, _ in retweets] plt.hist(counts) plt.title('Retweets') plt.xlabel('Bins (number of times retweeted)') plt.ylabel('Number of tweets in bin') ###Output _____no_output_____ ###Markdown Sentiment Analysis ###Code # pip install nltk import nltk nltk.download('vader_lexicon') import numpy as np from nltk.sentiment.vader import SentimentIntensityAnalyzer twitter_stream = twitter.TwitterStream(auth=auth) iterator = twitter_stream.statuses.sample() tweets = [] for tweet in iterator: try: if tweet['lang'] == 'en': tweets.append(tweet) except: pass if len(tweets) == 100: break analyzer = SentimentIntensityAnalyzer() analyzer.polarity_scores('Hello') analyzer.polarity_scores('I really enjoy this video series.') analyzer.polarity_scores('I REALLY enjoy this video series.') analyzer.polarity_scores('I REALLY enjoy this video series!!!') analyzer.polarity_scores('I REALLY did not enjoy this video series!!!') scores = np.zeros(len(tweets)) for i, t in enumerate(tweets): # Extract the text portion of the tweet text = t['text'] # Measure the polarity of the tweet polarity = analyzer.polarity_scores(text) # Store the normalized, weighted composite score scores[i] = polarity['compound'] most_positive = np.argmax(scores) most_negative = np.argmin(scores) print('{0:6.3f} : "{1}"'.format(scores[most_positive], tweets[most_positive]['text'])) print('{0:6.3f} : "{1}"'.format(scores[most_negative], tweets[most_negative]['text'])) ###Output _____no_output_____
assignments/2021/assignment1/softmax.ipynb
###Markdown Softmax exercise*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*This exercise is analogous to the SVM exercise. You will:- implement a fully-vectorized **loss function** for the Softmax classifier- implement the fully-vectorized expression for its **analytic gradient**- **check your implementation** with numerical gradient- use a validation set to **tune the learning rate and regularization** strength- **optimize** the loss function with **SGD**- **visualize** the final learned weights ###Code import random import numpy as np from cs231n.data_utils import load_CIFAR10 import matplotlib.pyplot as plt %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # for auto-reloading extenrnal modules # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000, num_dev=500): """ Load the CIFAR-10 dataset from disk and perform preprocessing to prepare it for the linear classifier. These are the same steps as we used for the SVM, but condensed to a single function. """ # Load the raw CIFAR-10 data cifar10_dir = 'cs231n/datasets/cifar-10-batches-py' # Cleaning up variables to prevent loading data multiple times (which may cause memory issue) try: del X_train, y_train del X_test, y_test print('Clear previously loaded data.') except: pass X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir) # subsample the data mask = list(range(num_training, num_training + num_validation)) X_val = X_train[mask] y_val = y_train[mask] mask = list(range(num_training)) X_train = X_train[mask] y_train = y_train[mask] mask = list(range(num_test)) X_test = X_test[mask] y_test = y_test[mask] mask = np.random.choice(num_training, num_dev, replace=False) X_dev = X_train[mask] y_dev = y_train[mask] # Preprocessing: reshape the image data into rows X_train = np.reshape(X_train, (X_train.shape[0], -1)) X_val = np.reshape(X_val, (X_val.shape[0], -1)) X_test = np.reshape(X_test, (X_test.shape[0], -1)) X_dev = np.reshape(X_dev, (X_dev.shape[0], -1)) # Normalize the data: subtract the mean image mean_image = np.mean(X_train, axis = 0) X_train -= mean_image X_val -= mean_image X_test -= mean_image X_dev -= mean_image # add bias dimension and transform into columns X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))]) X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))]) X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))]) X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))]) return X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev # Invoke the above function to get our data. X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev = get_CIFAR10_data() print('Train data shape: ', X_train.shape) print('Train labels shape: ', y_train.shape) print('Validation data shape: ', X_val.shape) print('Validation labels shape: ', y_val.shape) print('Test data shape: ', X_test.shape) print('Test labels shape: ', y_test.shape) print('dev data shape: ', X_dev.shape) print('dev labels shape: ', y_dev.shape) ###Output Train data shape: (49000, 3073) Train labels shape: (49000,) Validation data shape: (1000, 3073) Validation labels shape: (1000,) Test data shape: (1000, 3073) Test labels shape: (1000,) dev data shape: (500, 3073) dev labels shape: (500,) ###Markdown Softmax ClassifierYour code for this section will all be written inside `cs231n/classifiers/softmax.py`. ###Code # First implement the naive softmax loss function with nested loops. # Open the file cs231n/classifiers/softmax.py and implement the # softmax_loss_naive function. from cs231n.classifiers.softmax import softmax_loss_naive import time # Generate a random softmax weight matrix and use it to compute the loss. W = np.random.randn(3073, 10) * 0.0001 loss, grad = softmax_loss_naive(W, X_dev, y_dev, 0.0) # As a rough sanity check, our loss should be something close to -log(0.1). print('loss: %f' % loss) print('sanity check: %f' % (-np.log(0.1))) ###Output loss: 2.371165 sanity check: 2.302585 ###Markdown **Inline Question 1**Why do we expect our loss to be close to -log(0.1)? Explain briefly.**$\color{blue}{\textit Your Answer:}$ *Because at first, the scores are nearly 0, and there is totally 10 classes. So the Loss is -log(1/10) = -log(0.1)* ###Code # Complete the implementation of softmax_loss_naive and implement a (naive) # version of the gradient that uses nested loops. loss, grad = softmax_loss_naive(W, X_dev, y_dev, 0.0) # As we did for the SVM, use numeric gradient checking as a debugging tool. # The numeric gradient should be close to the analytic gradient. from cs231n.gradient_check import grad_check_sparse f = lambda w: softmax_loss_naive(w, X_dev, y_dev, 0.0)[0] grad_numerical = grad_check_sparse(f, W, grad, 10) # similar to SVM case, do another gradient check with regularization loss, grad = softmax_loss_naive(W, X_dev, y_dev, 5e1) f = lambda w: softmax_loss_naive(w, X_dev, y_dev, 5e1)[0] grad_numerical = grad_check_sparse(f, W, grad, 10) # Now that we have a naive implementation of the softmax loss function and its gradient, # implement a vectorized version in softmax_loss_vectorized. # The two versions should compute the same results, but the vectorized version should be # much faster. tic = time.time() loss_naive, grad_naive = softmax_loss_naive(W, X_dev, y_dev, 0.000005) toc = time.time() print('naive loss: %e computed in %fs' % (loss_naive, toc - tic)) from cs231n.classifiers.softmax import softmax_loss_vectorized tic = time.time() loss_vectorized, grad_vectorized = softmax_loss_vectorized(W, X_dev, y_dev, 0.000005) toc = time.time() print('vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic)) # As we did for the SVM, we use the Frobenius norm to compare the two versions # of the gradient. grad_difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro') print('Loss difference: %f' % np.abs(loss_naive - loss_vectorized)) print('Gradient difference: %f' % grad_difference) # Use the validation set to tune hyperparameters (regularization strength and # learning rate). You should experiment with different ranges for the learning # rates and regularization strengths; if you are careful you should be able to # get a classification accuracy of over 0.35 on the validation set. from cs231n.classifiers import Softmax results = {} best_val = -1 best_softmax = None ################################################################################ # TODO: # # Use the validation set to set the learning rate and regularization strength. # # This should be identical to the validation that you did for the SVM; save # # the best trained softmax classifer in best_softmax. # ################################################################################ # Provided as a reference. You may or may not want to change these hyperparameters learning_rates = [1e-7, 5e-7] regularization_strengths = [2.5e4, 5e4] # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** for lr in learning_rates: for reg in regularization_strengths: softmax = Softmax() loss = softmax.train(X_train, y_train, learning_rate=lr, reg=reg, num_iters=4000, verbose=True) y_train_pred = softmax.predict(X_train) y_val_pred = softmax.predict(X_val) y_train_acc = np.mean(y_train_pred==y_train) y_val_acc = np.mean(y_val_pred==y_val) results[(lr,reg)] = [y_train_acc, y_val_acc] if y_val_acc > best_val: best_val = y_val_acc best_softmax = softmax # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** # Print out results. for lr, reg in sorted(results): train_accuracy, val_accuracy = results[(lr, reg)] print('lr %e reg %e train accuracy: %f val accuracy: %f' % ( lr, reg, train_accuracy, val_accuracy)) print('best validation accuracy achieved during cross-validation: %f' % best_val) # evaluate on test set # Evaluate the best softmax on test set y_test_pred = best_softmax.predict(X_test) test_accuracy = np.mean(y_test == y_test_pred) print('softmax on raw pixels final test set accuracy: %f' % (test_accuracy, )) ###Output softmax on raw pixels final test set accuracy: 0.346000 ###Markdown **Inline Question 2** - *True or False*Suppose the overall training loss is defined as the sum of the per-datapoint loss over all training examples. It is possible to add a new datapoint to a training set that would leave the SVM loss unchanged, but this is not the case with the Softmax classifier loss.$\color{blue}{\textit Your Answer:}$$\color{blue}{\textit Your Explanation:}$ ###Code # Visualize the learned weights for each class w = best_softmax.W[:-1,:] # strip out the bias w = w.reshape(32, 32, 3, 10) w_min, w_max = np.min(w), np.max(w) classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] for i in range(10): plt.subplot(2, 5, i + 1) # Rescale the weights to be between 0 and 255 wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) / (w_max - w_min) plt.imshow(wimg.astype('uint8')) plt.axis('off') plt.title(classes[i]) ###Output _____no_output_____ ###Markdown ###Code # This mounts your Google Drive to the Colab VM. from google.colab import drive drive.mount('/content/drive', force_remount=True) # Enter the foldername in your Drive where you have saved the unzipped # assignment folder, e.g. 'cs231n/assignments/assignment1/' FOLDERNAME = 'cs231n/assignments/assignment1/' assert FOLDERNAME is not None, "[!] Enter the foldername." # Now that we've mounted your Drive, this ensures that # the Python interpreter of the Colab VM can load # python files from within it. import sys sys.path.append('/content/drive/My Drive/{}'.format(FOLDERNAME)) # This downloads the CIFAR-10 dataset to your Drive # if it doesn't already exist. %cd drive/My\ Drive/$FOLDERNAME/cs231n/datasets/ !bash get_datasets.sh %cd /content/drive/My\ Drive/$FOLDERNAME ###Output Mounted at /content/drive /content/drive/My Drive/cs231n/assignments/assignment1/cs231n/datasets /content/drive/My Drive/cs231n/assignments/assignment1 ###Markdown Softmax exercise*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*This exercise is analogous to the SVM exercise. You will:- implement a fully-vectorized **loss function** for the Softmax classifier- implement the fully-vectorized expression for its **analytic gradient**- **check your implementation** with numerical gradient- use a validation set to **tune the learning rate and regularization** strength- **optimize** the loss function with **SGD**- **visualize** the final learned weights ###Code import random import numpy as np from cs231n.data_utils import load_CIFAR10 import matplotlib.pyplot as plt %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # for auto-reloading extenrnal modules # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000, num_dev=500): """ Load the CIFAR-10 dataset from disk and perform preprocessing to prepare it for the linear classifier. These are the same steps as we used for the SVM, but condensed to a single function. """ # Load the raw CIFAR-10 data cifar10_dir = 'cs231n/datasets/cifar-10-batches-py' # Cleaning up variables to prevent loading data multiple times (which may cause memory issue) try: del X_train, y_train del X_test, y_test print('Clear previously loaded data.') except: pass X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir) # subsample the data mask = list(range(num_training, num_training + num_validation)) X_val = X_train[mask] y_val = y_train[mask] mask = list(range(num_training)) X_train = X_train[mask] y_train = y_train[mask] mask = list(range(num_test)) X_test = X_test[mask] y_test = y_test[mask] mask = np.random.choice(num_training, num_dev, replace=False) X_dev = X_train[mask] y_dev = y_train[mask] # Preprocessing: reshape the image data into rows X_train = np.reshape(X_train, (X_train.shape[0], -1)) X_val = np.reshape(X_val, (X_val.shape[0], -1)) X_test = np.reshape(X_test, (X_test.shape[0], -1)) X_dev = np.reshape(X_dev, (X_dev.shape[0], -1)) # Normalize the data: subtract the mean image mean_image = np.mean(X_train, axis = 0) X_train -= mean_image X_val -= mean_image X_test -= mean_image X_dev -= mean_image # add bias dimension and transform into columns X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))]) X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))]) X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))]) X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))]) return X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev # Invoke the above function to get our data. X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev = get_CIFAR10_data() print('Train data shape: ', X_train.shape) print('Train labels shape: ', y_train.shape) print('Validation data shape: ', X_val.shape) print('Validation labels shape: ', y_val.shape) print('Test data shape: ', X_test.shape) print('Test labels shape: ', y_test.shape) print('dev data shape: ', X_dev.shape) print('dev labels shape: ', y_dev.shape) ###Output Train data shape: (49000, 3073) Train labels shape: (49000,) Validation data shape: (1000, 3073) Validation labels shape: (1000,) Test data shape: (1000, 3073) Test labels shape: (1000,) dev data shape: (500, 3073) dev labels shape: (500,) ###Markdown Softmax ClassifierYour code for this section will all be written inside `cs231n/classifiers/softmax.py`. ###Code # First implement the naive softmax loss function with nested loops. # Open the file cs231n/classifiers/softmax.py and implement the # softmax_loss_naive function. from cs231n.classifiers.softmax import softmax_loss_naive import time # Generate a random softmax weight matrix and use it to compute the loss. W = np.random.randn(3073, 10) * 0.0001 loss, grad = softmax_loss_naive(W, X_dev, y_dev, 0.0) # As a rough sanity check, our loss should be something close to -log(0.1). print('loss: %f' % loss) print('sanity check: %f' % (-np.log(0.1))) ###Output loss: 2.371123 sanity check: 2.302585 ###Markdown **Inline Question 1**Why do we expect our loss to be close to -log(0.1)? Explain briefly.**$\color{blue}{\textit Your Answer:}$ *On average, scores are initially equally distributed, and loss is approximated by exp(a)/(10*exp(a)) * ###Code # Complete the implementation of softmax_loss_naive and implement a (naive) # version of the gradient that uses nested loops. loss, grad = softmax_loss_naive(W, X_dev, y_dev, 0.0) # As we did for the SVM, use numeric gradient checking as a debugging tool. # The numeric gradient should be close to the analytic gradient. from cs231n.gradient_check import grad_check_sparse f = lambda w: softmax_loss_naive(w, X_dev, y_dev, 0.0)[0] grad_numerical = grad_check_sparse(f, W, grad, 10) # similar to SVM case, do another gradient check with regularization loss, grad = softmax_loss_naive(W, X_dev, y_dev, 5e1) f = lambda w: softmax_loss_naive(w, X_dev, y_dev, 5e1)[0] grad_numerical = grad_check_sparse(f, W, grad, 10) # Now that we have a naive implementation of the softmax loss function and its gradient, # implement a vectorized version in softmax_loss_vectorized. # The two versions should compute the same results, but the vectorized version should be # much faster. tic = time.time() loss_naive, grad_naive = softmax_loss_naive(W, X_dev, y_dev, 0.000005) toc = time.time() print('naive loss: %e computed in %fs' % (loss_naive, toc - tic)) from cs231n.classifiers.softmax import softmax_loss_vectorized tic = time.time() loss_vectorized, grad_vectorized = softmax_loss_vectorized(W, X_dev, y_dev, 0.000005) toc = time.time() print('vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic)) # As we did for the SVM, we use the Frobenius norm to compare the two versions # of the gradient. grad_difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro') print('Loss difference: %f' % np.abs(loss_naive - loss_vectorized)) print('Gradient difference: %f' % grad_difference) # Use the validation set to tune hyperparameters (regularization strength and # learning rate). You should experiment with different ranges for the learning # rates and regularization strengths; if you are careful you should be able to # get a classification accuracy of over 0.35 on the validation set. from cs231n.classifiers import Softmax results = {} best_val = -1 best_softmax = None ################################################################################ # TODO: # # Use the validation set to set the learning rate and regularization strength. # # This should be identical to the validation that you did for the SVM; save # # the best trained softmax classifer in best_softmax. # ################################################################################ # Provided as a reference. You may or may not want to change these hyperparameters learning_rates = [1e-7, 5e-7] regularization_strengths = [2.5e4, 5e4] # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** # Wider range at the low end learning_rates = [1e-8, 5e-5] regularization_strengths = [1e-1, 5e4] best_val = 0 best_lr = 0 best_reg = 0 best_softmax = None for i in range(100): # Use log uniform sampling lr = np.exp(np.random.uniform(np.log(learning_rates[0]), np.log(learning_rates[1]))) reg = np.exp(np.random.uniform(np.log(regularization_strengths[0]), np.log(regularization_strengths[1]))) softmax = Softmax() loss_hist = softmax.train(X_train, y_train, learning_rate=lr, reg=reg, num_iters=1500, verbose=False) y_train_pred = softmax.predict(X_train) y_val_pred = softmax.predict(X_val) train_accuracy = np.mean(y_train == y_train_pred) val_accuracy = np.mean(y_val == y_val_pred) if best_val == 0 or val_accuracy > best_val: best_val = val_accuracy best_lr = lr best_reg = reg best_softmax = softmax print('%d: lr %e reg %e train accuracy: %f val accuracy: %f' % ( i+1, lr, reg, train_accuracy, val_accuracy)) results[(lr, reg)] = (train_accuracy, val_accuracy) print("Hyperparameter tuning completed") # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** # Print out results. for lr, reg in sorted(results): train_accuracy, val_accuracy = results[(lr, reg)] print('lr %e reg %e train accuracy: %f val accuracy: %f' % ( lr, reg, train_accuracy, val_accuracy)) print('best validation accuracy achieved during cross-validation: %f' % best_val) # evaluate on test set # Evaluate the best softmax on test set y_test_pred = best_softmax.predict(X_test) test_accuracy = np.mean(y_test == y_test_pred) print('softmax on raw pixels final test set accuracy: %f' % (test_accuracy, )) ###Output softmax on raw pixels final test set accuracy: 0.357000 ###Markdown **Inline Question 2** - *True or False*Suppose the overall training loss is defined as the sum of the per-datapoint loss over all training examples. It is possible to add a new datapoint to a training set that would leave the SVM loss unchanged, but this is not the case with the Softmax classifier loss.$\color{blue}{\textit Your Answer:}$ True.$\color{blue}{\textit Your Explanation:}$ Adding a datapoint to Softmax always increases the total loss, given that the softmax datapoint loss is > 0. Whereas a correctly classified datapoint to SVM has loss zero, so adding it as a datapoint does not increse the total loss. ###Code # Visualize the learned weights for each class w = best_softmax.W[:-1,:] # strip out the bias w = w.reshape(32, 32, 3, 10) w_min, w_max = np.min(w), np.max(w) classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] for i in range(10): plt.subplot(2, 5, i + 1) # Rescale the weights to be between 0 and 255 wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) / (w_max - w_min) plt.imshow(wimg.astype('uint8')) plt.axis('off') plt.title(classes[i]) ###Output _____no_output_____ ###Markdown Softmax exercise*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*This exercise is analogous to the SVM exercise. You will:- implement a fully-vectorized **loss function** for the Softmax classifier- implement the fully-vectorized expression for its **analytic gradient**- **check your implementation** with numerical gradient- use a validation set to **tune the learning rate and regularization** strength- **optimize** the loss function with **SGD**- **visualize** the final learned weights ###Code import random import numpy as np from cs231n.data_utils import load_CIFAR10 import matplotlib.pyplot as plt %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # for auto-reloading extenrnal modules # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000, num_dev=500): """ Load the CIFAR-10 dataset from disk and perform preprocessing to prepare it for the linear classifier. These are the same steps as we used for the SVM, but condensed to a single function. """ # Load the raw CIFAR-10 data cifar10_dir = 'cs231n/datasets/cifar-10-batches-py' # Cleaning up variables to prevent loading data multiple times (which may cause memory issue) try: del X_train, y_train del X_test, y_test print('Clear previously loaded data.') except: pass X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir) # subsample the data mask = list(range(num_training, num_training + num_validation)) X_val = X_train[mask] y_val = y_train[mask] mask = list(range(num_training)) X_train = X_train[mask] y_train = y_train[mask] mask = list(range(num_test)) X_test = X_test[mask] y_test = y_test[mask] mask = np.random.choice(num_training, num_dev, replace=False) X_dev = X_train[mask] y_dev = y_train[mask] # Preprocessing: reshape the image data into rows X_train = np.reshape(X_train, (X_train.shape[0], -1)) X_val = np.reshape(X_val, (X_val.shape[0], -1)) X_test = np.reshape(X_test, (X_test.shape[0], -1)) X_dev = np.reshape(X_dev, (X_dev.shape[0], -1)) # Normalize the data: subtract the mean image mean_image = np.mean(X_train, axis = 0) X_train -= mean_image X_val -= mean_image X_test -= mean_image X_dev -= mean_image # add bias dimension and transform into columns X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))]) X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))]) X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))]) X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))]) return X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev # Invoke the above function to get our data. X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev = get_CIFAR10_data() print('Train data shape: ', X_train.shape) print('Train labels shape: ', y_train.shape) print('Validation data shape: ', X_val.shape) print('Validation labels shape: ', y_val.shape) print('Test data shape: ', X_test.shape) print('Test labels shape: ', y_test.shape) print('dev data shape: ', X_dev.shape) print('dev labels shape: ', y_dev.shape) ###Output Train data shape: (49000, 3073) Train labels shape: (49000,) Validation data shape: (1000, 3073) Validation labels shape: (1000,) Test data shape: (1000, 3073) Test labels shape: (1000,) dev data shape: (500, 3073) dev labels shape: (500,) ###Markdown Softmax ClassifierYour code for this section will all be written inside `cs231n/classifiers/softmax.py`. ###Code # First implement the naive softmax loss function with nested loops. # Open the file cs231n/classifiers/softmax.py and implement the # softmax_loss_naive function. from cs231n.classifiers.softmax import softmax_loss_naive import time # Generate a random softmax weight matrix and use it to compute the loss. W = np.random.randn(3073, 10) * 0.0001 loss, grad = softmax_loss_naive(W, X_dev, y_dev, 0.0) # As a rough sanity check, our loss should be something close to -log(0.1). print('loss: %f' % loss) print('sanity check: %f' % (-np.log(0.1))) ###Output loss: 2.370905 sanity check: 2.302585 ###Markdown **Inline Question 1**Why do we expect our loss to be close to -log(0.1)? Explain briefly.**$\color{blue}{\textit Your Answer:}$ *Fill this in* ###Code # Complete the implementation of softmax_loss_naive and implement a (naive) # version of the gradient that uses nested loops. loss, grad = softmax_loss_naive(W, X_dev, y_dev, 0.0) # As we did for the SVM, use numeric gradient checking as a debugging tool. # The numeric gradient should be close to the analytic gradient. from cs231n.gradient_check import grad_check_sparse f = lambda w: softmax_loss_naive(w, X_dev, y_dev, 0.0)[0] grad_numerical = grad_check_sparse(f, W, grad, 10) # similar to SVM case, do another gradient check with regularization loss, grad = softmax_loss_naive(W, X_dev, y_dev, 5e1) f = lambda w: softmax_loss_naive(w, X_dev, y_dev, 5e1)[0] grad_numerical = grad_check_sparse(f, W, grad, 10) # Now that we have a naive implementation of the softmax loss function and its gradient, # implement a vectorized version in softmax_loss_vectorized. # The two versions should compute the same results, but the vectorized version should be # much faster. tic = time.time() loss_naive, grad_naive = softmax_loss_naive(W, X_dev, y_dev, 0.000005) toc = time.time() print('naive loss: %e computed in %fs' % (loss_naive, toc - tic)) from cs231n.classifiers.softmax import softmax_loss_vectorized tic = time.time() loss_vectorized, grad_vectorized = softmax_loss_vectorized(W, X_dev, y_dev, 0.000005) toc = time.time() print('vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic)) # As we did for the SVM, we use the Frobenius norm to compare the two versions # of the gradient. grad_difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro') print('Loss difference: %f' % np.abs(loss_naive - loss_vectorized)) print('Gradient difference: %f' % grad_difference) # Use the validation set to tune hyperparameters (regularization strength and # learning rate). You should experiment with different ranges for the learning # rates and regularization strengths; if you are careful you should be able to # get a classification accuracy of over 0.35 on the validation set. from cs231n.classifiers import Softmax results = {} best_val = -1 best_softmax = None ################################################################################ # TODO: # # Use the validation set to set the learning rate and regularization strength. # # This should be identical to the validation that you did for the SVM; save # # the best trained softmax classifer in best_softmax. # ################################################################################ # Provided as a reference. You may or may not want to change these hyperparameters learning_rates = [1e-7, 5e-7] regularization_strengths = [2.5e4, 5e4] # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** for lr in learning_rates: for r in regularization_strengths: sm = Softmax() sm.train(X_train, y_train, learning_rate = lr, reg = r, num_iters = 500) y_pred_train = sm.predict(X_train) train_accuracy = np.mean(y_pred_train == y_train) y_pred = sm.predict(X_dev) dev_accuracy = np.mean(y_pred == y_dev) if dev_accuracy > best_val: best_val = dev_accuracy best_softmax = sm results[(lr, r)] = (train_accuracy, dev_accuracy) pass # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** # Print out results. for lr, reg in sorted(results): train_accuracy, val_accuracy = results[(lr, reg)] print('lr %e reg %e train accuracy: %f val accuracy: %f' % ( lr, reg, train_accuracy, val_accuracy)) print('best validation accuracy achieved during cross-validation: %f' % best_val) # evaluate on test set # Evaluate the best softmax on test set y_test_pred = best_softmax.predict(X_test) test_accuracy = np.mean(y_test == y_test_pred) print('softmax on raw pixels final test set accuracy: %f' % (test_accuracy, )) ###Output softmax on raw pixels final test set accuracy: 0.337000 ###Markdown **Inline Question 2** - *True or False*Suppose the overall training loss is defined as the sum of the per-datapoint loss over all training examples. It is possible to add a new datapoint to a training set that would leave the SVM loss unchanged, but this is not the case with the Softmax classifier loss.$\color{blue}{\textit Your Answer:}$$\color{blue}{\textit Your Explanation:}$ ###Code # Visualize the learned weights for each class w = best_softmax.W[:-1,:] # strip out the bias w = w.reshape(32, 32, 3, 10) w_min, w_max = np.min(w), np.max(w) classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] for i in range(10): plt.subplot(2, 5, i + 1) # Rescale the weights to be between 0 and 255 wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) / (w_max - w_min) plt.imshow(wimg.astype('uint8')) plt.axis('off') plt.title(classes[i]) ###Output _____no_output_____
examples/Tutorials/Brush Interval Selector.ipynb
###Markdown Linking Plots Using Brush Interval Selector Details on how to use the brush interval selector can be found in [this](../Interactions/Selectors.ipynbbrushselectors) notebook.Brush interval selectors can be used where *continuous updates* are **not** desirable (for example, in callbacks performing slower computations)The boolean trait `brushing` can be used to control *continuous updates* in the `BrushSelector`. `brushing` will be set to `False` when the interval selector is not brushing. We can register callbacks by listening to the `brushing` trait of the brush selector. We can check the value of `brushing` trait in the callback and perform updates *only* at the end of brushing.Let's now look at an example of linking a time series plot to a scatter plot using a `BrushIntervalSelector` ###Code import numpy as np from ipywidgets import * import bqplot.pyplot as plt ###Output _____no_output_____ ###Markdown Let's set up an interval selector on a figure containing two time series plots. The interval selector can be activated by clicking on the figure ###Code from bqplot.interacts import BrushIntervalSelector y1, y2 = np.random.randn(2, 200).cumsum(axis=1) # two simple random walks fig_layout = Layout(width='900px', height='500px') time_series_fig = plt.figure(layout=fig_layout) line = plt.plot([y1, y2]) # create a fast interval selector by passing in the X scale and the line mark on which the selector operates intsel = BrushIntervalSelector(marks=[line], scale=line.scales['x']) time_series_fig.interaction = intsel # set the interval selector on the figure ###Output _____no_output_____ ###Markdown Let's now create a scatter plot of the two time series and stack it below the time series plot using a `VBox` ###Code scat_fig = plt.figure(layout=fig_layout, animation_duration=750, title='Scatter of time series slice selected by the interval selector') # set the x and y attributes to the y values of line.y scat = plt.scatter(*line.y, colors=['red'], stroke='black') # define a callback for the interval selector def update_scatter(*args): brushing = intsel.brushing # update scatter *only* when the interval selector # is not brushing to rpevent continuous updates if not brushing: # interval selector is active if line.selected is not None: # get the start and end indices of the interval start_ix, end_ix = line.selected[0], line.selected[-1] else: # interval selector is *not* active start_ix, end_ix = 0, -1 #update the x and y attributes of the scatter by slicing line.y with scat.hold_sync(): scat.x, scat.y = line.y[:, start_ix:end_ix] # register the callback with brushing trait of interval selector intsel.observe(update_scatter, 'brushing') help_label = HTML('<div style="color: blue; font-size: 16px; margin:20px 0px 0px 50px">\ Brush on the time series plot to activate the interval selector</div>') VBox([help_label, time_series_fig, scat_fig]) ###Output _____no_output_____ ###Markdown Linking Plots Using Brush Interval Selector Details on how to use the brush interval selector can be found in [this](../Interactions/Selectors.ipynbbrushselectors) notebook.Brush interval selectors can be used where *continuous updates* are **not** desirable (for example, in callbacks performing slower computations)The boolean trait `brushing` can be used to control *continuous updates* in the `BrushSelector`. `brushing` will be set to `False` when the interval selector is not brushing. We can register callbacks by listening to the `brushing` trait of the brush selector. We can check the value of `brushing` trait in the callback and perform updates *only* at the end of brushing.Let's now look at an example of linking a time series plot to a scatter plot using a `BrushIntervalSelector` ###Code import numpy as np from ipywidgets import Layout, HTML, VBox import bqplot.pyplot as plt ###Output _____no_output_____ ###Markdown Let's set up an interval selector on a figure containing two time series plots. The interval selector can be activated by clicking on the figure ###Code from bqplot.interacts import BrushIntervalSelector y1, y2 = np.random.randn(2, 200).cumsum(axis=1) # two simple random walks fig_layout = Layout(width='900px', height='500px') time_series_fig = plt.figure(layout=fig_layout) line = plt.plot([y1, y2]) # create a fast interval selector by passing in the X scale and the line mark on which the selector operates intsel = BrushIntervalSelector(marks=[line], scale=line.scales['x']) time_series_fig.interaction = intsel # set the interval selector on the figure ###Output _____no_output_____ ###Markdown Let's now create a scatter plot of the two time series and stack it below the time series plot using a `VBox` ###Code scat_fig = plt.figure(layout=fig_layout, animation_duration=750, title='Scatter of time series slice selected by the interval selector') # set the x and y attributes to the y values of line.y scat = plt.scatter(*line.y, colors=['red'], stroke='black') # define a callback for the interval selector def update_scatter(*args): brushing = intsel.brushing # update scatter *only* when the interval selector # is not brushing to rpevent continuous updates if not brushing: # interval selector is active if line.selected is not None: # get the start and end indices of the interval start_ix, end_ix = line.selected[0], line.selected[-1] else: # interval selector is *not* active start_ix, end_ix = 0, -1 #update the x and y attributes of the scatter by slicing line.y with scat.hold_sync(): scat.x, scat.y = line.y[:, start_ix:end_ix] # register the callback with brushing trait of interval selector intsel.observe(update_scatter, 'brushing') help_label = HTML('<div style="color: blue; font-size: 16px; margin:20px 0px 0px 50px">\ Brush on the time series plot to activate the interval selector</div>') VBox([help_label, time_series_fig, scat_fig]) ###Output _____no_output_____ ###Markdown Linking Plots Using Brush Interval Selector Details on how to use the brush interval selector can be found in [this](../Interactions/Selectors.ipynbbrushselectors) notebook.Brush interval selectors can be used where *continuous updates* are **not** desirable (for example, in callbacks performing slower computations)The boolean trait `brushing` can be used to control *continuous updates* in the `BrushSelector`. `brushing` will be set to `False` when the interval selector is not brushing. We can register callbacks by listening to the `brushing` trait of the brush selector. We can check the value of `brushing` trait in the callback and perform updates *only* at the end of brushing.Let's now look at an example of linking a time series plot to a scatter plot using a `BrushIntervalSelector` ###Code import numpy as np from ipywidgets import Layout, HTML, VBox import bqplot.pyplot as plt ###Output _____no_output_____ ###Markdown Let's set up an interval selector on a figure containing two time series plots. The interval selector can be activated by clicking on the figure ###Code from bqplot.interacts import BrushIntervalSelector y1, y2 = np.random.randn(2, 200).cumsum(axis=1) # two simple random walks fig_layout = Layout(width='900px', height='500px') time_series_fig = plt.figure(layout=fig_layout) line = plt.plot([y1, y2]) # create a fast interval selector by passing in the X scale and the line mark on which the selector operates intsel = BrushIntervalSelector(marks=[line], scale=line.scales['x']) time_series_fig.interaction = intsel # set the interval selector on the figure ###Output _____no_output_____ ###Markdown Let's now create a scatter plot of the two time series and stack it below the time series plot using a `VBox` ###Code scat_fig = plt.figure(layout=fig_layout, animation_duration=750, title='Scatter of time series slice selected by the interval selector') # set the x and y attributes to the y values of line.y scat = plt.scatter(*line.y, colors=['red'], stroke='black') # define a callback for the interval selector def update_scatter(*args): brushing = intsel.brushing # update scatter *only* when the interval selector # is not brushing to prevent continuous updates if not brushing: # interval selector is active if line.selected is not None: # get the start and end indices of the interval start_ix, end_ix = line.selected[0], line.selected[-1] else: # interval selector is *not* active start_ix, end_ix = 0, -1 #update the x and y attributes of the scatter by slicing line.y with scat.hold_sync(): scat.x, scat.y = line.y[:, start_ix:end_ix] # register the callback with brushing trait of interval selector intsel.observe(update_scatter, 'brushing') help_label = HTML('<div style="color: blue; font-size: 16px; margin:20px 0px 0px 50px">\ Brush on the time series plot to activate the interval selector</div>') VBox([help_label, time_series_fig, scat_fig]) ###Output _____no_output_____ ###Markdown Linking Plots Using Brush Interval Selector Details on how to use the brush interval selector can be found in [this](../Interactions/Selectors.ipynbbrushselectors) notebook.Brush interval selectors can be used where *continuous updates* are **not** desirable (for example, in callbacks performing slower computations)The boolean trait `brushing` can be used to control *continuous updates* in the `BrushSelector`. `brushing` will be set to `False` when the interval selector is not brushing. We can register callbacks by listening to the `brushing` trait of the brush selector. We can check the value of `brushing` trait in the callback and perform updates *only* at the end of brushing.Let's now look at an example of linking a time series plot to a scatter plot using a `BrushIntervalSelector` ###Code import numpy as np from ipywidgets import Layout, HTML, VBox import bqplot.pyplot as plt ###Output _____no_output_____ ###Markdown Let's set up an interval selector on a figure containing two time series plots. The interval selector can be activated by clicking on the figure ###Code from bqplot.interacts import BrushIntervalSelector y1, y2 = np.random.randn(2, 200).cumsum(axis=1) # two simple random walks fig_layout = Layout(width="900px", height="500px") time_series_fig = plt.figure(layout=fig_layout) line = plt.plot([y1, y2]) # create a fast interval selector by passing in the X scale and the line mark on which the selector operates intsel = BrushIntervalSelector(marks=[line], scale=line.scales["x"]) time_series_fig.interaction = intsel # set the interval selector on the figure ###Output _____no_output_____ ###Markdown Let's now create a scatter plot of the two time series and stack it below the time series plot using a `VBox` ###Code scat_fig = plt.figure( layout=fig_layout, animation_duration=750, title="Scatter of time series slice selected by the interval selector", ) # set the x and y attributes to the y values of line.y scat = plt.scatter(*line.y, colors=["red"], stroke="black") # define a callback for the interval selector def update_scatter(*args): brushing = intsel.brushing # update scatter *only* when the interval selector # is not brushing to prevent continuous updates if not brushing: # interval selector is active if line.selected is not None: # get the start and end indices of the interval start_ix, end_ix = line.selected[0], line.selected[-1] else: # interval selector is *not* active start_ix, end_ix = 0, -1 # update the x and y attributes of the scatter by slicing line.y with scat.hold_sync(): scat.x, scat.y = line.y[:, start_ix:end_ix] # register the callback with brushing trait of interval selector intsel.observe(update_scatter, "brushing") help_label = HTML( '<div style="color: blue; font-size: 16px; margin:20px 0px 0px 50px">\ Brush on the time series plot to activate the interval selector</div>' ) VBox([help_label, time_series_fig, scat_fig]) ###Output _____no_output_____
notebooks/DS_Session4_GradientDescent.ipynb
###Markdown Gradient Descent Beispiel-----Beispielfunktion: f(x) = x^2 – 4x + 11. Ableitung: f'(x) = 2x - 4 ![image.png](attachment:ac90278a-ef6b-4e5f-8b86-ef545d740c53.png) ###Code import numpy as np #Beginnwert von x x = -2 #Beginnwert von Intervall d = 2.2 #Maximale Anzahl der Schritte max = 50 i = 1 #x & y-Werte liste x_y_werte = [] #Initialisieren von 'Steigung im vorherigen Schritt' steigung_alt = np.nan #Schleife durchlaufen while i <= max: print (i) steigung = 2*x - 4 y_wert = x**2 - 4*x + 1 x_y_werte.append([x, y_wert, steigung]) if steigung > 0: steigung_r = "Steigt" elif steigung < 0: steigung_r = "Fällt" else: steigung_r = "Minimum erreicht" print("Wert von x: {x} und d: {d}; und Steigung: {s} ({r}) ".format(x=x,d=d,s=steigung, r=steigung_r)) if steigung == 0: print("Minimum erreicht!") break elif (steigung_alt < 0 and steigung > 0): print("Richtung ändern und Schrittlänge verkleinern!") d = d/2 x -= d elif (steigung_alt > 0 and steigung < 0): print("Richtung ändern und Schrittlänge verkleinern!") d = d/2 x += d elif steigung > 0: #print("Steigung: {a} und Steigung_alt: {b}".format(a=steigung, b=steigung_alt)) x -= d else: #print("Steigung: {a} und Steigung_alt: {b}".format(a=steigung, b=steigung_alt)) x += d steigung_alt = steigung i += 1 print() ###Output 1 Wert von x: -2 und d: 2.2; und Steigung: -8 (Fällt) 2 Wert von x: 0.20000000000000018 und d: 2.2; und Steigung: -3.5999999999999996 (Fällt) 3 Wert von x: 2.4000000000000004 und d: 2.2; und Steigung: 0.8000000000000007 (Steigt) Richtung ändern und Schrittlänge verkleinern! 4 Wert von x: 1.3000000000000003 und d: 1.1; und Steigung: -1.3999999999999995 (Fällt) Richtung ändern und Schrittlänge verkleinern! 5 Wert von x: 1.8500000000000003 und d: 0.55; und Steigung: -0.2999999999999994 (Fällt) 6 Wert von x: 2.4000000000000004 und d: 0.55; und Steigung: 0.8000000000000007 (Steigt) Richtung ändern und Schrittlänge verkleinern! 7 Wert von x: 2.1250000000000004 und d: 0.275; und Steigung: 0.2500000000000009 (Steigt) 8 Wert von x: 1.8500000000000005 und d: 0.275; und Steigung: -0.29999999999999893 (Fällt) Richtung ändern und Schrittlänge verkleinern! 9 Wert von x: 1.9875000000000005 und d: 0.1375; und Steigung: -0.024999999999999023 (Fällt) 10 Wert von x: 2.1250000000000004 und d: 0.1375; und Steigung: 0.2500000000000009 (Steigt) Richtung ändern und Schrittlänge verkleinern! 11 Wert von x: 2.0562500000000004 und d: 0.06875; und Steigung: 0.11250000000000071 (Steigt) 12 Wert von x: 1.9875000000000003 und d: 0.06875; und Steigung: -0.024999999999999467 (Fällt) Richtung ändern und Schrittlänge verkleinern! 13 Wert von x: 2.021875 und d: 0.034375; und Steigung: 0.04375000000000018 (Steigt) Richtung ändern und Schrittlänge verkleinern! 14 Wert von x: 2.0046875 und d: 0.0171875; und Steigung: 0.009375000000000355 (Steigt) 15 Wert von x: 1.9875000000000003 und d: 0.0171875; und Steigung: -0.024999999999999467 (Fällt) Richtung ändern und Schrittlänge verkleinern! 16 Wert von x: 1.9960937500000002 und d: 0.00859375; und Steigung: -0.007812499999999556 (Fällt) 17 Wert von x: 2.0046875 und d: 0.00859375; und Steigung: 0.009375000000000355 (Steigt) Richtung ändern und Schrittlänge verkleinern! 18 Wert von x: 2.000390625 und d: 0.004296875; und Steigung: 0.0007812500000001776 (Steigt) 19 Wert von x: 1.99609375 und d: 0.004296875; und Steigung: -0.0078125 (Fällt) Richtung ändern und Schrittlänge verkleinern! 20 Wert von x: 1.9982421875 und d: 0.0021484375; und Steigung: -0.003515624999999911 (Fällt) 21 Wert von x: 2.000390625 und d: 0.0021484375; und Steigung: 0.0007812500000001776 (Steigt) Richtung ändern und Schrittlänge verkleinern! 22 Wert von x: 1.9993164062500002 und d: 0.00107421875; und Steigung: -0.0013671874999996447 (Fällt) Richtung ändern und Schrittlänge verkleinern! 23 Wert von x: 1.9998535156250001 und d: 0.000537109375; und Steigung: -0.00029296874999973355 (Fällt) 24 Wert von x: 2.000390625 und d: 0.000537109375; und Steigung: 0.0007812500000001776 (Steigt) Richtung ändern und Schrittlänge verkleinern! 25 Wert von x: 2.0001220703125 und d: 0.0002685546875; und Steigung: 0.000244140625 (Steigt) 26 Wert von x: 1.999853515625 und d: 0.0002685546875; und Steigung: -0.00029296875000017764 (Fällt) Richtung ändern und Schrittlänge verkleinern! 27 Wert von x: 1.99998779296875 und d: 0.00013427734375; und Steigung: -2.4414062500088818e-05 (Fällt) 28 Wert von x: 2.0001220703125 und d: 0.00013427734375; und Steigung: 0.000244140625 (Steigt) Richtung ändern und Schrittlänge verkleinern! 29 Wert von x: 2.000054931640625 und d: 6.7138671875e-05; und Steigung: 0.00010986328125017764 (Steigt) 30 Wert von x: 1.9999877929687502 und d: 6.7138671875e-05; und Steigung: -2.441406249964473e-05 (Fällt) Richtung ändern und Schrittlänge verkleinern! 31 Wert von x: 2.000021362304688 und d: 3.35693359375e-05; und Steigung: 4.272460937571054e-05 (Steigt) Richtung ändern und Schrittlänge verkleinern! 32 Wert von x: 2.000004577636719 und d: 1.678466796875e-05; und Steigung: 9.155273438032907e-06 (Steigt) 33 Wert von x: 1.9999877929687502 und d: 1.678466796875e-05; und Steigung: -2.441406249964473e-05 (Fällt) Richtung ändern und Schrittlänge verkleinern! 34 Wert von x: 1.9999961853027346 und d: 8.392333984375e-06; und Steigung: -7.629394530805911e-06 (Fällt) 35 Wert von x: 2.000004577636719 und d: 8.392333984375e-06; und Steigung: 9.155273438032907e-06 (Steigt) Richtung ändern und Schrittlänge verkleinern! 36 Wert von x: 2.000000381469727 und d: 4.1961669921875e-06; und Steigung: 7.629394538355427e-07 (Steigt) 37 Wert von x: 1.9999961853027348 und d: 4.1961669921875e-06; und Steigung: -7.629394530361822e-06 (Fällt) Richtung ändern und Schrittlänge verkleinern! 38 Wert von x: 1.9999982833862309 und d: 2.09808349609375e-06; und Steigung: -3.4332275382631394e-06 (Fällt) 39 Wert von x: 2.000000381469727 und d: 2.09808349609375e-06; und Steigung: 7.629394538355427e-07 (Steigt) Richtung ändern und Schrittlänge verkleinern! 40 Wert von x: 1.9999993324279788 und d: 1.049041748046875e-06; und Steigung: -1.335144042435843e-06 (Fällt) Richtung ändern und Schrittlänge verkleinern! 41 Wert von x: 1.9999998569488528 und d: 5.245208740234375e-07; und Steigung: -2.861022943001501e-07 (Fällt) 42 Wert von x: 2.000000381469727 und d: 5.245208740234375e-07; und Steigung: 7.629394538355427e-07 (Steigt) Richtung ändern und Schrittlänge verkleinern! 43 Wert von x: 2.00000011920929 und d: 2.6226043701171877e-07; und Steigung: 2.384185799897409e-07 (Steigt) 44 Wert von x: 1.999999856948853 und d: 2.6226043701171877e-07; und Steigung: -2.861022938560609e-07 (Fällt) Richtung ändern und Schrittlänge verkleinern! 45 Wert von x: 1.9999999880790715 und d: 1.3113021850585939e-07; und Steigung: -2.3841856933159988e-08 (Fällt) 46 Wert von x: 2.00000011920929 und d: 1.3113021850585939e-07; und Steigung: 2.384185799897409e-07 (Steigt) Richtung ändern und Schrittlänge verkleinern! 47 Wert von x: 2.0000000536441807 und d: 6.556510925292969e-08; und Steigung: 1.0728836130624586e-07 (Steigt) 48 Wert von x: 1.9999999880790713 und d: 6.556510925292969e-08; und Steigung: -2.3841857377249198e-08 (Fällt) Richtung ändern und Schrittlänge verkleinern! 49 Wert von x: 2.0000000208616258 und d: 3.2782554626464846e-08; und Steigung: 4.172325152040912e-08 (Steigt) Richtung ändern und Schrittlänge verkleinern! 50 Wert von x: 2.0000000044703485 und d: 1.6391277313232423e-08; und Steigung: 8.940697071579962e-09 (Steigt) ###Markdown Plotten-------- ###Code import matplotlib.pyplot as plt x_fun = np.arange(-8, 10, 0.01) # Start, Stop, Step y_fun = x_fun**2 - 4*x_fun + 1 x_steigung = np.arange(-8, 10, 0.01) # Start, Stop, Step # Funktion plotten: plt.axis( [-10, 8, -7, 80] ) plt.grid(True) plt.plot(x_fun, y_fun, color='blue', linewidth=2.5, linestyle='-') for i in range(0,len(x_y_werte)): plt.plot(x_y_werte[i][0], x_y_werte[i][1], marker = 'o', markersize=12, color = 'red') y_0 = x_y_werte[i][1] - x_y_werte[i][0] * x_y_werte[i][2] y_steigung = x_fun * x_y_werte[i][2] + y_0 plt.plot(x_steigung, y_steigung, color = 'green') plt.show() x_y_werte ###Output _____no_output_____
Module2/.ipynb_checkpoints/Module2 - Lab4-checkpoint.ipynb
###Markdown DAT210x - Programming with Python for DS Module2 - Lab4 Import and alias Pandas: ###Code import pandas as pd dfHeaders = ['motor', 'screw', 'pgain', 'vgain', 'class'] ###Output _____no_output_____ ###Markdown Load up the table from the link, and extract the dataset out of it. If you're having issues with this, look carefully at the sample code provided in the reading: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Next up, rename the columns so that they are _similar_ to the column definitions provided to you on the website. Be careful and don't accidentally use any column names twice. If a column uses special characters, you can replace them with regular characters to make it easier to work with: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Get rid of any row that has at least 4 NANs in it. That is, any rows that do not contain player points statistics: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown At this point, look through your dataset by printing it. There probably still are some erroneous rows in there. What indexing command(s) will you use to select all rows EXCEPT those rows? ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Get rid of the 'RK' column: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Make sure there are no holes in your index by resetting it. There is an example of this in the reading material. By the way, drop the original index. ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Check the data type of all columns, and ensure those that should be numeric are numeric. ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Your dataframe is now ready! Use the appropriate commands to answer the questions on the course lab page. ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown DAT210x - Programming with Python for DS Module2 - Lab4 Import and alias Pandas: ###Code import pandas as pd ###Output _____no_output_____ ###Markdown Load up the table from the link, and extract the dataset out of it. If you're having issues with this, look carefully at the sample code provided in the reading: ###Code NHL_stats = pd.read_html('http://www.espn.com/nhl/statistics/player/_/stat/points/sort/points/year/2015/seasontype/2', header = 1) NHL_stats = NHL_stats[0] ###Output _____no_output_____ ###Markdown Next up, rename the columns so that they are _similar_ to the column definitions provided to you on the website. Be careful and don't accidentally use any column names twice. If a column uses special characters, you can replace them with regular characters to make it easier to work with: ###Code NHL_stats.columns = ['Rank', 'Player', 'Team', 'Games Played', 'Goals', 'Assists', 'Points', '+/- Rating', 'Penalty Minutes', 'Points Per Game', 'Shots on Goal', 'Shooting Percentage', 'Game-Winning Goals', 'Power-Play Goals', 'Power-Play Assists', 'Short Handed Goals', 'Short Handed Assists'] ###Output _____no_output_____ ###Markdown Get rid of any row that has at least 4 NANs in it. That is, any rows that do not contain player points statistics: ###Code NHL_stats = NHL_stats.dropna(axis = 0, thresh = 4) ###Output _____no_output_____ ###Markdown At this point, look through your dataset by printing it. There probably still are some erroneous rows in there. What indexing command(s) will you use to select all rows EXCEPT those rows? ###Code NHL_stats = NHL_stats[NHL_stats.Player != 'PLAYER'] ###Output _____no_output_____ ###Markdown Get rid of the 'RK' column: ###Code NHL_stats = NHL_stats.drop('Rank', axis = 1) ###Output _____no_output_____ ###Markdown Make sure there are no holes in your index by resetting it. There is an example of this in the reading material. By the way, drop the original index. ###Code NHL_stats = NHL_stats.reset_index(drop = True) ###Output _____no_output_____ ###Markdown Check the data type of all columns, and ensure those that should be numeric are numeric. ###Code NHL_stats = pd.concat([NHL_stats.loc[:,'Player' : 'Team'],NHL_stats.loc[:,'Games Played' : : ].apply(pd.to_numeric)], axis = 1) NHL_stats.dtypes ###Output _____no_output_____ ###Markdown Your dataframe is now ready! Use the appropriate commands to answer the questions on the course lab page. ###Code print(len(NHL_stats.index)) print(len(NHL_stats['Shooting Percentage'].unique())) print(NHL_stats.loc[15 : 16, 'Games Played'].sum()) ###Output 164.0 ###Markdown DAT210x - Programming with Python for DS Module2 - Lab4 Import and alias Pandas: ###Code # .. your code here .. import pandas as pd ###Output _____no_output_____ ###Markdown Load up the table from the link, and extract the dataset out of it. If you're having issues with this, look carefully at the sample code provided in the reading: ###Code # .. your code here .. df = pd.read_html('http://www.espn.com/nhl/statistics/player?stat=points&sort=points&year=2015&seasontype=2') values = df[0].iloc[2:, 0:] headers = df[0].iloc[1] df = values df df.columns = headers df df.reset_index(inplace=True) df ###Output _____no_output_____ ###Markdown Next up, rename the columns so that they are _similar_ to the column definitions provided to you on the website. Be careful and don't accidentally use any column names twice. If a column uses special characters, you can replace them with regular characters to make it easier to work with: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Get rid of any row that has at least 4 NANs in it. That is, any rows that do not contain player points statistics: ###Code # .. your code here .. df = df.dropna(axis=0, thresh=4) df ###Output _____no_output_____ ###Markdown At this point, look through your dataset by printing it. There probably still are some erroneous rows in there. What indexing command(s) will you use to select all rows EXCEPT those rows? ###Code # .. your code here . ###Output C:\Users\Divyansh\Anaconda3\lib\site-packages\ipykernel_launcher.py:2: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy C:\Users\Divyansh\Anaconda3\lib\site-packages\ipykernel_launcher.py:3: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy This is separate from the ipykernel package so we can avoid doing imports until C:\Users\Divyansh\Anaconda3\lib\site-packages\ipykernel_launcher.py:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy after removing the cwd from sys.path. C:\Users\Divyansh\Anaconda3\lib\site-packages\ipykernel_launcher.py:5: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy """ C:\Users\Divyansh\Anaconda3\lib\site-packages\ipykernel_launcher.py:6: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy ###Markdown Get rid of the 'RK' column: ###Code # .. your code here .. df = df.drop(['RK'], axis=1) df ###Output _____no_output_____ ###Markdown Make sure there are no holes in your index by resetting it. There is an example of this in the reading material. By the way, drop the original index. ###Code # .. your code here .. df = df.drop(['index'], axis=1) df ###Output _____no_output_____ ###Markdown Check the data type of all columns, and ensure those that should be numeric are numeric. ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Your dataframe is now ready! Use the appropriate commands to answer the questions on the course lab page. ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown DAT210x - Programming with Python for DS Module2 - Lab4 Import and alias Pandas: ###Code import pandas as pd ###Output _____no_output_____ ###Markdown Load up the table from the link, and extract the dataset out of it. If you're having issues with this, look carefully at the sample code provided in the reading: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Next up, rename the columns so that they are _similar_ to the column definitions provided to you on the website. Be careful and don't accidentally use any column names twice. If a column uses special characters, you can replace them with regular characters to make it easier to work with: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Get rid of any row that has at least 4 NANs in it. That is, any rows that do not contain player points statistics: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown At this point, look through your dataset by printing it. There probably still are some erroneous rows in there. What indexing command(s) will you use to select all rows EXCEPT those rows? ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Get rid of the 'RK' column: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Make sure there are no holes in your index by resetting it. There is an example of this in the reading material. By the way, drop the original index. ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Check the data type of all columns, and ensure those that should be numeric are numeric. ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Your dataframe is now ready! Use the appropriate commands to answer the questions on the course lab page. ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown DAT210x - Programming with Python for DS Module2 - Lab4 Import and alias Pandas: ###Code # .. your code here .. import pandas ###Output _____no_output_____ ###Markdown Load up the table from the link, and extract the dataset out of it. If you're having issues with this, look carefully at the sample code provided in the reading: ###Code # .. your code here .. df = read_html() ###Output _____no_output_____ ###Markdown Next up, rename the columns so that they are _similar_ to the column definitions provided to you on the website. Be careful and don't accidentally use any column names twice. If a column uses special characters, you can replace them with regular characters to make it easier to work with: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Get rid of any row that has at least 4 NANs in it. That is, any rows that do not contain player points statistics: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown At this point, look through your dataset by printing it. There probably still are some erroneous rows in there. What indexing command(s) will you use to select all rows EXCEPT those rows? ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Get rid of the 'RK' column: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Make sure there are no holes in your index by resetting it. There is an example of this in the reading material. By the way, drop the original index. ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Check the data type of all columns, and ensure those that should be numeric are numeric. ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Your dataframe is now ready! Use the appropriate commands to answer the questions on the course lab page. ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown DAT210x - Programming with Python for DS Module2 - Lab4 Import and alias Pandas: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Load up the table from the link, and extract the dataset out of it. If you're having issues with this, look carefully at the sample code provided in the reading: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Next up, rename the columns so that they are _similar_ to the column definitions provided to you on the website. Be careful and don't accidentally use any column names twice. If a column uses special characters, you can replace them with regular characters to make it easier to work with: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Get rid of any row that has at least 4 NANs in it. That is, any rows that do not contain player points statistics: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown At this point, look through your dataset by printing it. There probably still are some erroneous rows in there. What indexing command(s) will you use to select all rows EXCEPT those rows? ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Get rid of the 'RK' column: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Make sure there are no holes in your index by resetting it. There is an example of this in the reading material. By the way, drop the original index. ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Check the data type of all columns, and ensure those that should be numeric are numeric. ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Your dataframe is now ready! Use the appropriate commands to answer the questions on the course lab page. ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown DAT210x - Programming with Python for DS Module2 - Lab4 Import and alias Pandas: ###Code # .. your code here .. import pandas as pd ###Output _____no_output_____ ###Markdown Load up the table from the link, and extract the dataset out of it. If you're having issues with this, look carefully at the sample code provided in the reading: ###Code # .. your code here .. df = pd.read_html('http://www.espn.com/nhl/statistics/player/_/stat/points/sort/points/year/2015/seasontype/2') df ###Output _____no_output_____ ###Markdown Next up, rename the columns so that they are _similar_ to the column definitions provided to you on the website. Be careful and don't accidentally use any column names twice. If a column uses special characters, you can replace them with regular characters to make it easier to work with: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Get rid of any row that has at least 4 NANs in it. That is, any rows that do not contain player points statistics: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown At this point, look through your dataset by printing it. There probably still are some erroneous rows in there. What indexing command(s) will you use to select all rows EXCEPT those rows? ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Get rid of the 'RK' column: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Make sure there are no holes in your index by resetting it. There is an example of this in the reading material. By the way, drop the original index. ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Check the data type of all columns, and ensure those that should be numeric are numeric. ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Your dataframe is now ready! Use the appropriate commands to answer the questions on the course lab page. ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown DAT210x - Programming with Python for DS Module2 - Lab4 Import and alias Pandas: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Load up the table from the link, and extract the dataset out of it. If you're having issues with this, look carefully at the sample code provided in the reading: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Next up, rename the columns so that they are _similar_ to the column definitions provided to you on the website. Be careful and don't accidentally use any column names twice. If a column uses special characters, you can replace them with regular characters to make it easier to work with: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Get rid of any row that has at least 4 NANs in it. That is, any rows that do not contain player points statistics: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown At this point, look through your dataset by printing it. There probably still are some erroneous rows in there. What indexing command(s) will you use to select all rows EXCEPT those rows? ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Get rid of the 'RK' column: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Make sure there are no holes in your index by resetting it. There is an example of this in the reading material. By the way, drop the original index. ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Check the data type of all columns, and ensure those that should be numeric are numeric. ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Your dataframe is now ready! Use the appropriate commands to answer the questions on the course lab page. ###Code # .. your code here .. ###Output _____no_output_____
Merge Charts.ipynb
###Markdown Global ###Code ## Basic stuff %load_ext autoreload %autoreload from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) display(HTML("""<style>div.output_area{max-height:10000px;overflow:scroll;}</style>""")) #IPython.Cell.options_default.cm_config.lineNumbers = true; ################################################################################ ## Python Version ################################################################################ import sys ################################################################################ ## General Stuff ################################################################################ from multiprocessing import Pool from tqdm import tqdm ################################################################################ ## Util Stuff ################################################################################ from timeUtils import clock, elapsed from ioUtils import saveFile, getFile ################################################################################ ## Music DB ################################################################################ from mainDB import mainDB from musicDBMap import musicDBMap from masterDBMatchClass import masterDBMatchClass from matchDBArtist import matchDBArtist ################################################################################ ## Music Names ################################################################################ from masterArtistNameDB import masterArtistNameDB ################################################################################ ## Chart Stuff ################################################################################ from artistIgnores import getArtistIgnores from billboardData import billboardData from top40Data import top40Data from spotifyData import spotifyData from chartArtistAlbumData import chartArtistAlbumData from chartUtils import * from extraArtists import extraKnownArtists ################################################################################ ## Pandas Stuff ################################################################################ import pandas as pd from pandas import DataFrame pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) chartType = None print("Python: {0}".format(sys.version)) import datetime as dt start = dt.datetime.now() print("Notebook Last Run Initiated: "+str(start)) ###Output _____no_output_____ ###Markdown Charts To Merge ###Code chartsToMerge = ["RYMList", "RYMList2", "RYMAlbum", "RYMSong", "Billboard", "BillboardYE", "MusicVF"] #, "Spotify", "SpotifyViral"] mdbmaps = {} for chartType in chartsToMerge: mdbmaps[chartType] = musicDBMap(chartType) dbs = ['Discogs', 'AllMusic', 'MusicBrainz', 'AceBootlegs', 'RateYourMusic', 'LastFM', 'DatPiff', 'RockCorner', 'CDandLP', 'MusicStack', 'MetalStorm', 'Deezer', 'AppleMusic', 'AlbumOfTheYear', 'Genius', 'IHeart', 'KWorbSpotify', 'KWorbiTunes', 'KWorbYouTube'] dbToChartMap = {db: {} for db in dbs} nameToChartMap = {} for chartType, mdbmap in mdbmaps.items(): for primaryKey, primaryData in mdbmap.get().items(): artistName = primaryData.artistName for db,dbData in primaryData.get().items(): if dbToChartMap.get(db) is None: raise ValueError("Unknown DB [{0}]".format(db)) dbID,name = dbData.get() if dbID is None: continue if dbToChartMap[db].get(dbID) is None: dbToChartMap[db][dbID] = {} if dbToChartMap[db][dbID].get(chartType) is None: dbToChartMap[db][dbID][chartType] = {} dbToChartMap[db][dbID][chartType][primaryKey] = artistName ## Check For Duplicates dbToChartReduceMap = {} for db,dbData in dbToChartMap.items(): dbToChartReduceMap[db] = {} for dbID,dbIDData in dbData.items(): dbToChartReduceMap[db][dbID] = {"Artist": None, "Charts": None} artistNames = [] chartData = {} for chartType,chartTypeData in dbIDData.items(): if len(chartTypeData) > 1: print("MultiMatches [{0} , {1}, {2}] --> {3}".format(db, dbID, chartType, chartTypeData)) continue artistNames += chartTypeData.values() chartData[chartType] = list(chartTypeData.keys())[0] artistNames = list(set(artistNames)) if len(chartTypeData) > 1: print("MultiMatches [{0} , {1}] --> {2}".format(db, dbID, [x.encode('UTF-8') for x in artistNames])) continue artistName = artistNames[0] dbToChartReduceMap[db][dbID]["Artist"] = artistName dbToChartReduceMap[db][dbID]["Charts"] = chartData #dbToChartMap["Discogs"] artistNameToChartData = {} dbIDMap = {} dbIDNameMap = {} for db,dbData in dbToChartReduceMap.items(): for dbID,dbIDData in dbData.items(): artistName = dbIDData["Artist"] chartData = dbIDData["Charts"] if artistNameToChartData.get(artistName) is None: artistNameToChartData[artistName] = [] key = tuple([db,dbID]) value = chartData if dbIDMap.get(key) is None: dbIDMap[key] = value else: if dbIDMap[key] != chartData: raise ValueError("Multiple Matches: {0} --> {1}".format(key,chartData)) if dbIDNameMap.get(key) is None: dbIDNameMap[key] = artistName else: if dbIDNameMap[key] != artistName: raise ValueError("Multiple Matches: {0} --> {1}".format(key,chartData)) artistNameToChartData[artistName].append(key) artistNameToChartData["Miles Davis"] from pandas import Series dbIDToNameDF = DataFrame(Series(dbIDNameMap)) dbIDToNameDF.columns = ["Artist"] dbIDToNameDF.head() dbIDToChartDF = DataFrame(dbIDMap).T dbIDToChartDF.head() testDF = dbIDToNameDF.join(dbIDToChartDF) finalDF = {} for i,(key,df) in enumerate(testDF.groupby("Artist")): print("Name ======>",key) for key,val in df.to_dict().items(): print(key,val) break #finalDF[key] = df df tmp1 = b'Mike Will Made-It' tmp2 = b'Mike Will Made\xe2\x80\x90It' #s=s.replace(b'PatientName',name) tmp1.replace(b"\xe2\x80\x90", b"-") tmp2.replace(b"\xe2\x80\x90", b"-") def checkForMultipleMatches(mdbmap): dbChartMap = {} for primaryKey, primaryData in mdbmap.get().items(): artistName = primaryData.artistName for db,dbData in primaryData.get().items(): if dbChartMap.get(db) is None: dbChartMap[db] = {} dbID,name = dbData.get() if dbID is None: continue if dbChartMap[db].get(dbID) is None: dbChartMap[db][dbID] = {} dbChartMap[db][dbID][primaryKey] = artistName retval = {} for db,dbData in dbChartMap.items(): for dbID,dbIDData in dbData.items(): if len(dbIDData) > 1: if retval.get(db) is None: retval[db] = {} retval[db][dbID] = dbIDData print("{0: <20}{1}".format(db,dbID)) print("{0: <20}{1}".format("", dbIDData)) return retval mdf = DataFrame(multiMatches).T from pandas import Series for colname in mdf.columns: mdfData = mdf[colname].dropna() print(colname) for key,value in mdfData.iteritems(): vd = {dbID: list(dbData.values()) for dbID,dbData in value.items()} vd = [list(dbData.values()) for dbID,dbData in value.items()] print("\t",key,vd) ## AllMusic Ritual multiMatches = {} for chartType,mdbmap in mdbmaps.items(): print("="*15,chartType,"="*15) multiMatches[chartType] = checkForMultipleMatches(mdbmap) def artistChartInfo dbChartMap = {} for chartType in chartsToMerge: for primaryKey, primaryData in mdbmaps[chartType].get().items(): artistName = primaryData.artistName for db,dbData in primaryData.get().items(): if dbChartMap.get(db) is None: dbChartMap[db] = {} dbID,name = dbData.get() if dbID is not None: if dbChartMap[db].get(dbID) is None: dbChartMap[db][dbID] = {} if dbChartMap[db][dbID][chartType] = {} is None: dbChartMap[db][dbID][artistName] = {} dbChartMap[db][dbID][artistName][chartType] = primaryKey dbChartMap["LastFM"] primaryData.get() ###Output _____no_output_____
refactor-clinic/Refactor Clinic #2.ipynb
###Markdown Refactor Clinic 2In this example, we're going to create a dictionary based on some fields on a Python class.We'll define a dummy class `Campaign` for this post, and instantiate a dummy object. ###Code class Campaign: def __init__(self, id, name, status, customer_id, uid): self.id = id self.name = name self.status = status self.customer_id = customer_id self.uid = uid campaign = Campaign('Test Name', 111, 'Ready', 50, 'my-super-uid') ###Output _____no_output_____ ###Markdown Let's say we want to grab some of the fields from this class, and put them into a Python dictionary. Perhaps this dictionary will be served as an API response via JSON.You may write code like the following - importantly, note that the dictionary keys have the same name as the fields on the class. ###Code campaign_dict = {} campaign_dict["id"] = campaign.id campaign_dict["name"] = campaign.name campaign_dict["status"] = campaign.status campaign_dict["customer_id"] = campaign.customer_id campaign_dict["uid"] = campaign.uid campaign_dict ###Output _____no_output_____ ###Markdown The resulting dictionary has the key/values we need.However, this can be quite laborious to write a line for each field. This is not a big problem when you have have 1-5 fields, but beyond that, you have to write a lot of code to achieve this task. For example, setting 20 key/values from a class is going to look bad in your codebase. You may also miss something, or you may have requirements later on to add or remove particular fields, which might mean digging through many lines to find what you need.Let's look at one potential way to refactor this code. We are going to take advantage of the fact that the dictionary keys have the same name as the fields on the class. ###Code keys = ('id', 'name', 'status', 'customer_id', 'uid') campaign_dict = {key: getattr(campaign, key) for key in keys} print(campaign_dict) ###Output {'id': 'Test Name', 'name': 111, 'status': 'Ready', 'customer_id': 50, 'uid': 'my-super-uid'} ###Markdown Let's break this down.- We define the keys that we are interested in getting from the class. This is a tuple, and can be extended later if required.- We use a dictionary-comprehension to set up a dictionary with these key names, and use the `getattr()` function to get the associated attribute on the class to set as the value.Defining `keys` like this is vaguely reminiscent of the `fields` attribute of Django Forms and Django REST Framework serializers, if you are familiar with these.The tuple can be extended to include (or remove) whatever fields are required, and a single-line of code creates our dictionary. The only line needing changed in future is the `keys` tuple.**Pitfall** - if your class's field names might change, you need to be careful to update the `keys` to reflect this change! This would also be true for the original code, though - in fact you'd need to change both the dictionary key *and* the class field name.Overall, we feel this is a better and more flexible, maintainable approach. The `getattr()` and `setattr()` methods are handy for these types of tasks where you want to get/set class attributes based on strings. AddendumIf you need *all* the class's fields as a dictionary, you could use the `__dict__` property on the class.This is less flexible than explicitly defining the `keys`, therefore it should be used with caution, depending on your use-case! ###Code campaign.__dict__ ###Output _____no_output_____
20200411/event-parser.ipynb
###Markdown RequirementsLoop through the CSV files in the `raw_files` folder.For each file:* Create a folder named after the file.* Get the rows in between events and save them in a separate file named after the event. Output locationSave everything in a folder named `parsed_files`. The directory tree should look like this:* raw_files * sample00.csv * sample01.csv * sample02.csv * ...* parsed_files * sample00 * event00.csv * event01.csv * event02.csv * ... * sample01 * event00.csv * event01.csv * event02.csv * ... * ... ScriptImport os and pandas modules. ###Code import os import pandas as pd ###Output _____no_output_____ ###Markdown Define the paths of the source and destination directories. ###Code SRC_PATH = "raw_files/" DST_PATH = "parsed_files/" if not os.path.exists(DST_PATH): os.mkdir(DST_PATH) # loop through the files in parsed_files for file in os.listdir(SRC_PATH): # skip non-CSV files if ".csv" in file: # create a folder named after the file folder = DST_PATH + file.replace(".csv", "") + "/" if not os.path.exists(folder): os.mkdir(folder) # log which file is being read print("------------------------------") print("Reading " + file) # extract the file data and save it to a dataframe df = pd.read_csv(SRC_PATH + file) # get the indices of the rows where Event contains !E TRIAL_EVENT_VAR indices = df.index[df["Event"].str.contains("!E TRIAL_EVENT_VAR", na=False)].tolist() # loop through every other element of indices (i.e. 0, 2, 4, etc.) for i in range(0, len(indices), 2): # get start and end indices start_index = indices[i] end_index = indices[i + 1] # extract file name from event name filename = df["Event"][indices[i]] # remove !E TRIAL_EVENT_VAR from file name filename = filename.replace("!E TRIAL_EVENT_VAR ", "") # remove start/stop from file name filename = filename.split("_")[0] print(f"- Saving rows {start_index} to {end_index} to {filename}.csv") parsed_df = df[start_index:end_index + 1] parsed_df.to_csv(folder + filename + ".csv", index=False) ###Output ------------------------------ Reading sample02.csv - Saving rows 27538 to 28027 to normal 1.csv - Saving rows 38609 to 39138 to nonhazard 2.csv - Saving rows 53319 to 53706 to hazard 3.csv - Saving rows 66680 to 67318 to nonhazard 4.csv - Saving rows 87903 to 88274 to hazard 5.csv - Saving rows 100748 to 101267 to normal 6.csv - Saving rows 115651 to 116022 to hazard 7.csv - Saving rows 124820 to 125283 to normal 8.csv - Saving rows 135281 to 135783 to nonhazard 9.csv ------------------------------ Reading sample01.csv - Saving rows 19069 to 19590 to normal 1.csv - Saving rows 31214 to 31833 to nonhazard 2.csv - Saving rows 49877 to 50252 to hazard 3.csv - Saving rows 67513 to 68124 to nonhazard 4.csv - Saving rows 85214 to 85549 to hazard 5.csv - Saving rows 109236 to 109647 to normal 6.csv - Saving rows 125941 to 126311 to hazard 7.csv - Saving rows 138739 to 139211 to normal 8.csv - Saving rows 150974 to 151599 to nonhazard 9.csv
rhedit_03-spatial-joins.ipynb
###Markdown Spatial joins Goals of this notebook:- Based on the `countries` and `cities` dataframes, determine for each city the country in which it is located.- To solve this problem, we will use the the concept of a 'spatial join' operation: combining information of geospatial datasets based on their spatial relationship. ###Code %matplotlib inline import pandas as pd import geopandas pd.options.display.max_rows = 10 countries = geopandas.read_file("zip://./data/ne_110m_admin_0_countries.zip") cities = geopandas.read_file("zip://./data/ne_110m_populated_places.zip") rivers = geopandas.read_file("zip://./data/ne_50m_rivers_lake_centerlines.zip") countries.head() cities.head() rivers.head() ###Output _____no_output_____ ###Markdown Recap - joining dataframesPandas provides functionality to join or merge dataframes in different ways, see https://chrisalbon.com/python/data_wrangling/pandas_join_merge_dataframe/ for an overview and https://pandas.pydata.org/pandas-docs/stable/merging.html for the full documentation. To illustrate the concept of joining the information of two dataframes with pandas, let's take a small subset of our `cities` and `countries` datasets: ###Code cities2 = cities[cities['name'].isin(['Bern', 'Brussels', 'London', 'Paris'])].copy() cities2['iso_a3'] = ['CHE', 'BEL', 'GBR', 'FRA'] cities2 countries2 = countries[['iso_a3', 'name', 'continent']] countries2.head(8) ###Output _____no_output_____ ###Markdown We added a 'iso_a3' column to the `cities` dataset, indicating a code of the country of the city. This country code is also present in the `countries` dataset, which allows us to merge those two dataframes based on the common column.Joining the `cities` dataframe with `countries` will transfer extra information about the countries (the full name, the continent) to the `cities` dataframe, based on a common key: ###Code cities2.merge(countries2, on='iso_a3') ###Output _____no_output_____ ###Markdown **But**, for this illustrative example, we added the common column manually, it is not present in the original dataset. However, we can still know how to join those two datasets based on their spatial coordinates. Recap - spatial relationships between objectsIn the previous notebook [02-spatial-relationships.ipynb](./02-spatial-relationships-operations.ipynb), we have seen the notion of spatial relationships between geometry objects: within, contains, intersects, ...In this case, we know that each of the cities is located *within* one of the countries, or the other way around that each country can *contain* multiple cities.We can test such relationships using the methods we have seen in the previous notebook: ###Code france = countries.loc[countries['name'] == 'France', 'geometry'].squeeze() france countries.query('name == "France"') cities.within(france) ###Output _____no_output_____ ###Markdown The above gives us a boolean series, indicating for each point in our `cities` dataframe whether it is located within the area of France or not. Because this is a boolean series as result, we can use it to filter the original dataframe to only show those cities that are actually within France: ###Code cities[cities.within(france)] ###Output _____no_output_____ ###Markdown We could now repeat the above analysis for each of the countries, and add a column to the `cities` dataframe indicating this country. However, that would be tedious to do manually, and is also exactly what the spatial join operation provides us.*(note: the above result is incorrect, but this is just because of the coarse-ness of the countries dataset)* Spatial join operationSPATIAL JOIN = *transferring attributes from one layer to another based on their spatial relationship* Different parts of this operations: The GeoDataFrame to which we want add information The GeoDataFrame that contains the information we want to add The spatial relationship we want to use to match both datasets ('intersects', 'contains', 'within') The type of join: left or inner join In this case, we want to join the `cities` dataframe with the information of the `countries` dataframe, based on the spatial relationship between both datasets.We use the [`geopandas.sjoin`](http://geopandas.readthedocs.io/en/latest/reference/geopandas.sjoin.html) function: ###Code joined = geopandas.sjoin(cities, countries, op='within', how='left') # for `op` attributes, see lecture 02-spatial-relationships-operations notebook. # options available: within, touches, crosses, overlaps. # the dataset from `countries` are joined into `cities` data cities joined joined['continent'].value_counts() ###Output _____no_output_____ ###Markdown The overlay operationIn the spatial join operation above, we are not changing the geometries itself. We are not joining geometries, but joining attributes based on a spatial relationship between the geometries. This also means that the geometries need to at least overlap partially.If you want to create new geometries based on joining (combining) geometries of different dataframes into one new dataframe (eg by taking the intersection of the geometries), you want an **overlay** operation. ###Code africa = countries[countries['continent'] == 'Africa'] africa.plot() ###Output _____no_output_____ ###Markdown Let's draw a buffer around the cities ###Code cities['geometry'] = cities.buffer(2) cities.head() geopandas.overlay(africa, cities, how='difference').plot() ###Output _____no_output_____
so-co2-airborne-obs/gradients-methane.ipynb
###Markdown Observed concentrations and gradients of methane ###Code %load_ext autoreload %autoreload 2 import numpy as np import xarray as xr import matplotlib.pyplot as plt import matplotlib.colors as colors import matplotlib.gridspec as gridspec import datasets import emergent_constraint as ec import figure_panels import obs_aircraft import regression_models import util ###Output _____no_output_____ ###Markdown Load aircraft data ###Code lat_lo_aircraft = -90. lat_hi_aircraft = -15. profiles_only = True bin_aggregation_method = 'median' parameters = ec.get_parameters() campaign_info = obs_aircraft.get_campaign_info(verbose=False) def load_data_aircraft(m): df = obs_aircraft.open_aircraft_data(m) sel_loc = obs_aircraft.groups_select_profiles(df, lat_lo_aircraft, lat_hi_aircraft, profiles_only=profiles_only) return df.loc[sel_loc] dfs_obs = {m: load_data_aircraft(m) for m in ['pfp', 'medusa', 'obs',]} ###Output loading pfp loading medusa ###Markdown Upper troposphere CO2 vs. CH4 ###Code from mpl_toolkits.axes_grid1.inset_locator import inset_axes def gas_scatter_upper_trop(theta_crit, ax, df_obs, constituent): txt_box_props = dict(facecolor='w', alpha=0.75, edgecolor='None', boxstyle='square,pad=0') CO2, Y = obs_aircraft.get_property_property( df=df_obs, campaign_sel_list=campaign_info.keys(), xname='co2', yname=constituent, theta_bin=(theta_crit, np.Inf), lat_range=(-90., -45.), filter_strat=True, ) sc = ax.hexbin(Y, CO2, mincnt=1, vmax=50, gridsize=50, cmap='cividis', ) #, C=df_sub.theta) cax = inset_axes(ax, width="2%", height="45%", loc='lower left', bbox_to_anchor=(0.85, 0., 1, 1), bbox_transform=ax.transAxes, ) cb = plt.colorbar(sc, ax=ax, cax=cax) #cb.set_ticks(np.arange(310, 370, 10)) cb.ax.set_title('N', loc='left') fit = regression_models.linreg_odr(Y, CO2) #linreg_odr x = np.array((min(Y), max(Y))) ax.plot(x, fit.predict(x), 'k-') ax.set_ylabel('$\Delta$CO$_2$ [ppm]') if constituent == 'ch4': ax.set_xlabel('$\Delta$CH$_4$ [ppb]') ax.set_title(f'Aircraft obs: CO$_2$:CH$_4$, θ > {theta_crit:0.0f} K') #cb.ax.set_title('θ [K]', loc='left') units = 'ppb' c_str = 'CH$_4$' if constituent == 'sf6': ax.set_xlabel('$\Delta$SF$_6$ [ppt]') units = 'ppt' c_str = 'SF$_6$' str_text = ( f'{1e3 * fit.beta[0]:0.1f}±{1e3 * fit.stderr_beta[0]:0.2f} ppb-CO$_2$:{units}-{c_str}\n' + f'r$^2$={fit.r2:0.3f}' ) xoff = np.diff(ax.get_xlim()) * 0.025 yoff = -np.diff(ax.get_ylim()) * 0.15 ax.text( ax.get_xlim()[0]+xoff, ax.get_ylim()[1]+yoff, str_text, fontsize=9, fontweight='bold', bbox=txt_box_props, ) return fit.beta[0] slope = {} fig, axs = util.canvas(1, 2) for p, constituent in enumerate(['ch4', 'sf6']): i, j = np.unravel_index(p, axs.shape) ax = axs[i, j] slope[constituent] = gas_scatter_upper_trop( 305., ax, dfs_obs['obs'], constituent=constituent, ) ###Output _____no_output_____ ###Markdown Vertical gradients ###Code theta_bins = obs_aircraft.make_theta_bins( **{k: parameters[k] for k in ['lbin', 'ubin', 'udθ', 'lbin_as_upper_bound',]} ) dfs_campaign_gradients = {}; dfs_flight_gradients = {} for c in ['co2', 'ch4']: dfs_campaign_gradients[c] = obs_aircraft.campaign_gradients( dfs_obs, campaign_info.keys(), theta_bins, gradient_lat_range=parameters['gradient_lat_range'], constituent=c, ) dfs_flight_gradients[c] = obs_aircraft.flight_gradients( dfs_obs, theta_bins, gradient_lat_range=parameters['gradient_lat_range'], constituent=c, ) dfs_campaign_gradients['ch4'] fig, axs = util.canvas(1, 1) xco2, yco2 = figure_panels.obs_theta_gradient(dfs_flight_gradients['co2'], axs[0, 0]) ylm = axs[0, 0].get_ylim() fig, axs = util.canvas(1, 1, figsize=(6, 5), use_gridspec=True, hspace=0.3) axs = axs.ravel() figure_panels.obs_theta_gradient( dfs_flight_gradients['ch4'], axs[0], constituent='ch4', theta_bins=theta_bins ); ###Output _____no_output_____ ###Markdown Load section ###Code ds = datasets.aircraft_sections('obs').groupby('time.season').mean().compute() ds ###Output _____no_output_____ ###Markdown Visualization ###Code fig = plt.figure(figsize=(13, 9)) #dpi=300) # set up plot grid gs = gridspec.GridSpec( nrows=2, ncols=4, width_ratios=(1, 0.02, 1, 0.02), hspace=0.35, wspace=0.2, ) axs = np.empty((2, 2)).astype(object) axs[0, 0] = plt.subplot(gs[0, 0]) axs[0, 1] = plt.subplot(gs[0, 2]) axs[1, 0] = plt.subplot(gs[1, 0]) axs[1, 1] = plt.subplot(gs[1, 2]) cax = plt.subplot(gs[0, -1]) ax = axs[1, 0] co2_per_ch4 = gas_scatter_upper_trop(305., ax, dfs_obs['obs'], constituent='ch4') for n, season in enumerate(['DJF', 'JJA',]): ax = axs[0, n] cf = ax.pcolormesh( ds.y, ds.z, ds.DCH4_binned.sel(season=season), norm=colors.TwoSlopeNorm( vmin=figure_panels.levels[0]/co2_per_ch4, vcenter=0., vmax=figure_panels.levels[-1]/co2_per_ch4 ), cmap=figure_panels.cmap, shading='nearest', ) cs = ax.contour( ds.LAT, ds.ALT, ds.THETA.sel(season=season), levels=np.arange(255., 350., 10.), linewidths=1, colors='gray') lb = plt.clabel(cs, fontsize=8, inline=True, fmt='%d') ax.set_title(f'Aircraft obs {season}: CH$_4$ minus 295–305K mean') ax.set_ylim((0, 10.5)) ax.set_xlim(-81.25, -28.75) ax.set_ylabel('Altitude [km]') ax.set_xlabel('Latitude [°N]') cb = plt.colorbar(cf, cax=cax) cb.ax.set_title('$\Delta$CH$_4$ [ppb]') ax = axs[1, 1] figure_panels.obs_theta_gradient( dfs_flight_gradients['ch4'], ax, constituent='ch4', theta_bins=theta_bins, ) ylm_def = np.array(ax.get_ylim()) * co2_per_ch4 ylm = np.array([-1.5, 0.75]) assert ylm[0] <= ylm_def[0] assert ylm_def[1] <= ylm[1] ax.set_ylim(ylm / co2_per_ch4) axR = ax.twinx() axR.set_ylim(np.array(ylm)) axR.set_ylabel('Equivalent $\Delta_{θ}$CO$_2$ [ppm]') axR.plot(xco2, yco2, 'k--') util.label_plots(fig, [ax for ax in axs.ravel()], xoff=-0.02) util.savefig('methane') ###Output _____no_output_____
module2-regression-2/Adewale_Adeagbo_DS13_LS_DS_212_assignment.ipynb
###Markdown Lambda School Data Science*Unit 2, Sprint 1, Module 2*--- Regression 2 AssignmentYou'll continue to **predict how much it costs to rent an apartment in NYC,** using the dataset from renthop.com.- [ ] Do train/test split. Use data from April & May 2016 to train. Use data from June 2016 to test.- [ ] Engineer at least two new features. (See below for explanation & ideas.)- [ ] Fit a linear regression model with at least two features.- [ ] Get the model's coefficients and intercept.- [ ] Get regression metrics RMSE, MAE, and $R^2$, for both the train and test data.- [ ] What's the best test MAE you can get? Share your score and features used with your cohort on Slack!- [ ] As always, commit your notebook to your fork of the GitHub repo. [Feature Engineering](https://en.wikipedia.org/wiki/Feature_engineering)> "Some machine learning projects succeed and some fail. What makes the difference? Easily the most important factor is the features used." — Pedro Domingos, ["A Few Useful Things to Know about Machine Learning"](https://homes.cs.washington.edu/~pedrod/papers/cacm12.pdf)> "Coming up with features is difficult, time-consuming, requires expert knowledge. 'Applied machine learning' is basically feature engineering." — Andrew Ng, [Machine Learning and AI via Brain simulations](https://forum.stanford.edu/events/2011/2011slides/plenary/2011plenaryNg.pdf) > Feature engineering is the process of using domain knowledge of the data to create features that make machine learning algorithms work. Feature Ideas- Does the apartment have a description?- How long is the description?- How many total perks does each apartment have?- Are cats _or_ dogs allowed?- Are cats _and_ dogs allowed?- Total number of rooms (beds + baths)- Ratio of beds to baths- What's the neighborhood, based on address or latitude & longitude? Stretch Goals- [ ] If you want more math, skim [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/ISLR%20Seventh%20Printing.pdf), Chapter 3.1, Simple Linear Regression, & Chapter 3.2, Multiple Linear Regression- [ ] If you want more introduction, watch [Brandon Foltz, Statistics 101: Simple Linear Regression](https://www.youtube.com/watch?v=ZkjP5RJLQF4)(20 minutes, over 1 million views)- [ ] Add your own stretch goal(s) ! ###Code %%capture import sys # If you're on Colab: if 'google.colab' in sys.modules: DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/' !pip install category_encoders==2.* # If you're working locally: else: DATA_PATH = '../data/' # Ignore this Numpy warning when using Plotly Express: # FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead. import warnings warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy') import numpy as np import pandas as pd # Read New York City apartment rental listing data df = pd.read_csv(DATA_PATH+'apartments/renthop-nyc.csv') assert df.shape == (49352, 34) # Remove the most extreme 1% prices, # the most extreme .1% latitudes, & # the most extreme .1% longitudes df = df[(df['price'] >= np.percentile(df['price'], 0.5)) & (df['price'] <= np.percentile(df['price'], 99.5)) & (df['latitude'] >= np.percentile(df['latitude'], 0.05)) & (df['latitude'] < np.percentile(df['latitude'], 99.95)) & (df['longitude'] >= np.percentile(df['longitude'], 0.05)) & (df['longitude'] <= np.percentile(df['longitude'], 99.95))] # Engineer new feature from interest level df.groupby('interest_level').count() # function to generate dummies def int_level(cell): if cell == 'high': return 2 elif cell == 'medium': return 1 else: return 0 df['interest_level'] = df['interest_level'].apply(int_level) # Second feature to check if address description is null df['display_address'].isnull().sum() # check if cell is empty def display_address(cell): if pd.isnull(cell): return 0 else: return 1 # apply function df['display_add_present'] = df['display_address'].apply(display_address) # confirm function worked df['display_add_present'].unique() # more confirmation df.groupby('display_add_present').count() # split train and test data train = df[df['created'].str.contains('2016-04') | df['created'].str.contains('2016-05')] test = df[df['created'].str.contains('2016-06')] # Linear regression from sklearn.linear_model import LinearRegression df.head() # instantiate linear regression model = LinearRegression() # specify features and target features = ['bathrooms','doorman','fitness_center','outdoor_space','balcony'] x_train = train[features] x_test = test[features] target = 'price' y_train = train[target] y_test = test[target] # fit the model model.fit(x_train,y_train) y_pred = model.predict(x_test) # model intercept print(model.intercept_) coeff_df = pd.DataFrame(model.coef_,features,columns=['Coefficient']) coeff_df from sklearn import metrics print('MAE:', metrics.mean_absolute_error(y_test, y_pred)) print('R2:', metrics.r2_score(y_test,y_pred) ) print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test,y_pred))) ###Output _____no_output_____
jigsaw-unintended-bias-multi-sample-dropout.ipynb
###Markdown Summary* I ensembled BERT-base-uncased, BERT-large-uncased, GPT2, XLNET models (simple average)* Though I tried text preprocessing, but it seems to have no obvious improvement on Leaderboard score in this competition.* Label preprocessing -- Most effective trick for me.* Multi-Sample Dropout from here https://arxiv.org/abs/1905.09788 -- increases LB score a little and the training gets stabler (loss decreases stabler).* Training model using toxicity score as additional label (Thanks to Dieter for this kernel: https://www.kaggle.com/christofhenkel/how-to-preprocessing-for-glove-part2-usage ) -- Improved my BERT-base and GPT2* Training model using BCELoss with sample weight (Thanks to Dieter for the same kernel). I applied this trick to all of my models. Label PreprocessingI did something like this:`labels = [(df['target'].values&gt;=rate/(args.bin+1)).astype(np.int) for rate in range(1, args.bin+1)]`Here `args.bin` is a hyper-parameter defined how many classes to generate. For example, if we have origin label `[0.33, 0.05, 1.0, 0.58, 0.0]` , and `args.bin = 9` , we will get output like this: `array([[1, 1, 1, 0, 0, 0, 0, 0, 0], 0.33 [0, 0, 0, 0, 0, 0, 0, 0, 0], 0.05 [1, 1, 1, 1, 1, 1, 1, 1, 1], 1.0 [1, 1, 1, 1, 1, 0, 0, 0, 0], 0.58 [0, 0, 0, 0, 0, 0, 0, 0, 0]]) 0.0` Then I treated each column as binary class label, and trained it using BCELoss. While predicting, I simply took the mean value of all columns as final prediction.I used `args.bin=29` for BERT-base and GPT2, `args.bin=99` for BERT-large, `args.bin=9` for XLNET, for final submission. These are decided by only a few experiments since the limitation of compute resource, maybe not the best ones. Multi-Sample Dropouthttps://arxiv.org/abs/1905.09788One advantage of models like BERT, GPT2, XLNET is that they were already pretrained on large scale text data. That means we can fine-tune them on our own dataset in a relatively short time period and get good results, compared to training from the scratch. However, there is also a drawback, that is, we cannot modify the architecture of these models.In this situation, I came up with Multi-Sample Dropout, which can be easily applied to fc layers at the end of these models. Experiments showed that it can decrease the loss stabler while training and make the model generalization process better.I applied this trick to all of my models. 5 dropout layers with different mask and p=0.5 have been used parallel followed the basic BERT model, then pass the output of each dropout layer to a shared weight fc layer, finally I take the average of the outputs from fc layer as final output. This process is shown in the following figure.![BERT](https://www.googleapis.com/download/storage/v1/b/kaggle-user-content/o/inbox%2F448347%2Fe7da45582176dd2a4dbdc7e7bd37ca8b%2F2019-07-22%2019.11.42.png?generation=1563790324431533&alt=media) ###Code debug = False import time start_time = time.time() import sys package_dir = "../input/pytorchpretrainedberthaqishen/pytorch-pretrained-bert/pytorch-pretrained-BERT/" sys.path = [package_dir] + sys.path import os import pickle import argparse import multiprocessing import numpy as np import pandas as pd import torch from torch import nn from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from pytorch_pretrained_bert.modeling import BertForSequenceClassification, BertConfig, BertModel, BertPreTrainedModel from pytorch_pretrained_bert.tokenization import BertTokenizer from pytorch_pretrained_bert.modeling_gpt2 import GPT2Config, GPT2Model, GPT2PreTrainedModel from pytorch_pretrained_bert.tokenization_gpt2 import GPT2Tokenizer from pytorch_pretrained_bert.modeling_xlnet import XLNetConfig, XLNetModel, XLNetPreTrainedModel from pytorch_pretrained_bert.tokenization_xlnet import XLNetTokenizer from scipy.stats import rankdata from nltk.tokenize.treebank import TreebankWordTokenizer Ttokenizer = TreebankWordTokenizer() symbols_to_isolate = '.,?!-;*"…:—()%#$&_/@\・ω+=”“[]^–>\\°<~•≠™ˈʊɒ∞§{}·τα❤☺ɡ|¢→̶`❥━┣┫┗O►★©―ɪ✔®\x96\x92●£♥➤´¹☕≈÷♡◐║▬′ɔː€۩۞†μ✒➥═☆ˌ◄½ʻπδηλσερνʃ✬SUPERIT☻±♍µº¾✓◾؟.⬅℅»Вав❣⋅¿¬♫CMβ█▓▒░⇒⭐›¡₂₃❧▰▔◞▀▂▃▄▅▆▇↙γ̄″☹➡«φ⅓„✋:¥̲̅́∙‛◇✏▷❓❗¶˚˙)сиʿ✨。ɑ\x80◕!%¯−flfi₁²ʌ¼⁴⁄₄⌠♭✘╪▶☭✭♪☔☠♂☃☎✈✌✰❆☙○‣⚓年∎ℒ▪▙☏⅛casǀ℮¸w‚∼‖ℳ❄←☼⋆ʒ⊂、⅔¨͡๏⚾⚽Φ×θ₩?(℃⏩☮⚠月✊❌⭕▸■⇌☐☑⚡☄ǫ╭∩╮,例>ʕɐ̣Δ₀✞┈╱╲▏▕┃╰▊▋╯┳┊≥☒↑☝ɹ✅☛♩☞AJB◔◡↓♀⬆̱ℏ\x91⠀ˤ╚↺⇤∏✾◦♬³の|/∵∴√Ω¤☜▲↳▫‿⬇✧ovm-208'‰≤∕ˆ⚜☁' symbols_to_delete = '\n🍕\r🐵😑\xa0\ue014\t\uf818\uf04a\xad😢🐶️\uf0e0😜😎👊\u200b\u200e😁عدويهصقأناخلىبمغر😍💖💵Е👎😀😂\u202a\u202c🔥😄🏻💥ᴍʏʀᴇɴᴅᴏᴀᴋʜᴜʟᴛᴄᴘʙғᴊᴡɢ😋👏שלוםבי😱‼\x81エンジ故障\u2009🚌ᴵ͞🌟😊😳😧🙀😐😕\u200f👍😮😃😘אעכח💩💯⛽🚄🏼ஜ😖ᴠ🚲‐😟😈💪🙏🎯🌹😇💔😡\x7f👌ἐὶήιὲκἀίῃἴξ🙄H😠\ufeff\u2028😉😤⛺🙂\u3000تحكسة👮💙فزط😏🍾🎉😞\u2008🏾😅😭👻😥😔😓🏽🎆🍻🍽🎶🌺🤔😪\x08‑🐰🐇🐱🙆😨🙃💕𝘊𝘦𝘳𝘢𝘵𝘰𝘤𝘺𝘴𝘪𝘧𝘮𝘣💗💚地獄谷улкнПоАН🐾🐕😆ה🔗🚽歌舞伎🙈😴🏿🤗🇺🇸мυтѕ⤵🏆🎃😩\u200a🌠🐟💫💰💎эпрд\x95🖐🙅⛲🍰🤐👆🙌\u2002💛🙁👀🙊🙉\u2004ˢᵒʳʸᴼᴷᴺʷᵗʰᵉᵘ\x13🚬🤓\ue602😵άοόςέὸתמדףנרךצט😒͝🆕👅👥👄🔄🔤👉👤👶👲🔛🎓\uf0b7\uf04c\x9f\x10成都😣⏺😌🤑🌏😯ех😲Ἰᾶὁ💞🚓🔔📚🏀👐\u202d💤🍇\ue613小土豆🏡❔⁉\u202f👠》कर्मा🇹🇼🌸蔡英文🌞🎲レクサス😛外国人关系Сб💋💀🎄💜🤢َِьыгя不是\x9c\x9d🗑\u2005💃📣👿༼つ༽😰ḷЗз▱ц🤣卖温哥华议会下降你失去所有的钱加拿大坏税骗子🐝ツ🎅\x85🍺آإشء🎵🌎͟ἔ油别克🤡🤥😬🤧й\u2003🚀🤴ʲшчИОРФДЯМюж😝🖑ὐύύ特殊作戦群щ💨圆明园קℐ🏈😺🌍⏏ệ🍔🐮🍁🍆🍑🌮🌯🤦\u200d𝓒𝓲𝓿𝓵안영하세요ЖљКћ🍀😫🤤ῦ我出生在了可以说普通话汉语好极🎼🕺🍸🥂🗽🎇🎊🆘🤠👩🖒🚪天一家⚲\u2006⚭⚆⬭⬯⏖新✀╌🇫🇷🇩🇪🇮🇬🇧😷🇨🇦ХШ🌐\x1f杀鸡给猴看ʁ𝗪𝗵𝗲𝗻𝘆𝗼𝘂𝗿𝗮𝗹𝗶𝘇𝗯𝘁𝗰𝘀𝘅𝗽𝘄𝗱📺ϖ\u2000үսᴦᎥһͺ\u2007հ\u2001ɩye൦lƽh𝐓𝐡𝐞𝐫𝐮𝐝𝐚𝐃𝐜𝐩𝐭𝐢𝐨𝐧Ƅᴨןᑯ໐ΤᏧ௦Іᴑ܁𝐬𝐰𝐲𝐛𝐦𝐯𝐑𝐙𝐣𝐇𝐂𝐘𝟎ԜТᗞ౦〔Ꭻ𝐳𝐔𝐱𝟔𝟓𝐅🐋ffi💘💓ё𝘥𝘯𝘶💐🌋🌄🌅𝙬𝙖𝙨𝙤𝙣𝙡𝙮𝙘𝙠𝙚𝙙𝙜𝙧𝙥𝙩𝙪𝙗𝙞𝙝𝙛👺🐷ℋ𝐀𝐥𝐪🚶𝙢Ἱ🤘ͦ💸ج패티W𝙇ᵻ👂👃ɜ🎫\uf0a7БУі🚢🚂ગુજરાતીῆ🏃𝓬𝓻𝓴𝓮𝓽𝓼☘﴾̯﴿₽\ue807𝑻𝒆𝒍𝒕𝒉𝒓𝒖𝒂𝒏𝒅𝒔𝒎𝒗𝒊👽😙\u200cЛ‒🎾👹⎌🏒⛸公寓养宠物吗🏄🐀🚑🤷操美𝒑𝒚𝒐𝑴🤙🐒欢迎来到阿拉斯ספ𝙫🐈𝒌𝙊𝙭𝙆𝙋𝙍𝘼𝙅ﷻ🦄巨收赢得白鬼愤怒要买额ẽ🚗🐳𝟏𝐟𝟖𝟑𝟕𝒄𝟗𝐠𝙄𝙃👇锟斤拷𝗢𝟳𝟱𝟬⦁マルハニチロ株式社⛷한국어ㄸㅓ니͜ʖ𝘿𝙔₵𝒩ℯ𝒾𝓁𝒶𝓉𝓇𝓊𝓃𝓈𝓅ℴ𝒻𝒽𝓀𝓌𝒸𝓎𝙏ζ𝙟𝘃𝗺𝟮𝟭𝟯𝟲👋🦊多伦🐽🎻🎹⛓🏹🍷🦆为和中友谊祝贺与其想象对法如直接问用自己猜本传教士没积唯认识基督徒曾经让相信耶稣复活死怪他但当们聊些政治题时候战胜因圣把全堂结婚孩恐惧且栗谓这样还♾🎸🤕🤒⛑🎁批判检讨🏝🦁🙋😶쥐스탱트뤼도석유가격인상이경제황을렵게만들지않록잘관리해야합다캐나에서대마초와화약금의품런성분갈때는반드시허된사용🔫👁凸ὰ💲🗯𝙈Ἄ𝒇𝒈𝒘𝒃𝑬𝑶𝕾𝖙𝖗𝖆𝖎𝖌𝖍𝖕𝖊𝖔𝖑𝖉𝖓𝖐𝖜𝖞𝖚𝖇𝕿𝖘𝖄𝖛𝖒𝖋𝖂𝕴𝖟𝖈𝕸👑🚿💡知彼百\uf005𝙀𝒛𝑲𝑳𝑾𝒋𝟒😦𝙒𝘾𝘽🏐𝘩𝘨ὼṑ𝑱𝑹𝑫𝑵𝑪🇰🇵👾ᓇᒧᔭᐃᐧᐦᑳᐨᓃᓂᑲᐸᑭᑎᓀᐣ🐄🎈🔨🐎🤞🐸💟🎰🌝🛳点击查版🍭𝑥𝑦𝑧NG👣\uf020っ🏉ф💭🎥Ξ🐴👨🤳🦍\x0b🍩𝑯𝒒😗𝟐🏂👳🍗🕉🐲چی𝑮𝗕𝗴🍒ꜥⲣⲏ🐑⏰鉄リ事件ї💊「」\uf203\uf09a\uf222\ue608\uf202\uf099\uf469\ue607\uf410\ue600燻製シ虚偽屁理屈Г𝑩𝑰𝒀𝑺🌤𝗳𝗜𝗙𝗦𝗧🍊ὺἈἡχῖΛ⤏🇳𝒙ψՁմեռայինրւդձ冬至ὀ𝒁🔹🤚🍎𝑷🐂💅𝘬𝘱𝘸𝘷𝘐𝘭𝘓𝘖𝘹𝘲𝘫کΒώ💢ΜΟΝΑΕ🇱♲𝝈↴💒⊘Ȼ🚴🖕🖤🥘📍👈➕🚫🎨🌑🐻𝐎𝐍𝐊𝑭🤖🎎😼🕷grntidufbk𝟰🇴🇭🇻🇲𝗞𝗭𝗘𝗤👼📉🍟🍦🌈🔭《🐊🐍\uf10aლڡ🐦\U0001f92f\U0001f92a🐡💳ἱ🙇𝗸𝗟𝗠𝗷🥜さようなら🔼' CONTRACTION_MAPPING = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not", "he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", "I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would", "i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would", "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have","mightn't": "might not","mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have","o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have","so's": "so as", "this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would", "there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have", "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have","you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", "you're": "you are", "you've": "you have" } isolate_dict = {ord(c):f' {c} ' for c in symbols_to_isolate} remove_dict = {ord(c):f'' for c in symbols_to_delete} from tqdm import tqdm import warnings import traceback warnings.filterwarnings(action='once') device = torch.device('cuda') print('Import done! Time past %.2f secs' % (time.time() - start_time)) # Pandas multiprocessing def _apply_df(args): df, func, kwargs = args return df.apply(func, **kwargs) def apply_by_multiprocessing(df, func, **kwargs): workers = kwargs.pop('workers') pool = multiprocessing.Pool(processes=workers) result = pool.map(_apply_df, [(d, func, kwargs) for d in np.array_split(df, workers)]) pool.close() return pd.concat(list(result)) class BertForJigsaw(BertPreTrainedModel): def __init__(self, config, out_dim=7): super(BertForJigsaw, self).__init__(config) self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.linear = nn.Linear(config.hidden_size, out_dim) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None): _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) pooled_output = self.dropout(pooled_output) logits = self.linear(pooled_output) return logits class GPT2ClassificationHeadModel(GPT2PreTrainedModel): def __init__(self, config, clf_dropout=0.4, out_dim=8): super(GPT2ClassificationHeadModel, self).__init__(config) self.transformer = GPT2Model(config) self.linear = nn.Linear(config.n_embd * 2, out_dim) nn.init.normal_(self.linear.weight, std = 0.02) nn.init.normal_(self.linear.bias, 0) self.apply(self.init_weights) def set_num_special_tokens(self, num_special_tokens): pass def forward(self, input_ids, position_ids=None, token_type_ids=None, lm_labels=None, past=None, **kwargs): hidden_states, presents = self.transformer(input_ids, position_ids, token_type_ids, past) if isinstance(hidden_states, list): hidden_states = hidden_states[-1] avg_pool = torch.mean(hidden_states, 1) max_pool, _ = torch.max(hidden_states, 1) h_conc = torch.cat((avg_pool, max_pool), 1) return self.linear(h_conc) class XLNetForJigSaw(XLNetPreTrainedModel): def __init__(self, config, out_dim): super(XLNetForJigSaw, self).__init__(config) self.attn_type = config.attn_type self.same_length = config.same_length self.summary_type = "last" self.transformer = XLNetModel(config, output_attentions=False, keep_multihead_output=False) self.dense = nn.Linear(config.d_model, config.d_model) self.activation = nn.Tanh() self.linear = nn.Linear(config.d_model, out_dim, bias=True) self.apply(self.init_xlnet_weights) def forward(self, input_ids, seg_id=None, input_mask=None, mems=None, perm_mask=None, target_mapping=None, inp_q=None, target=None, output_all_encoded_layers=True, head_mask=None, **kargs): output, hidden_states, new_mems = self.transformer(input_ids, seg_id, input_mask, mems, perm_mask, target_mapping, inp_q, output_all_encoded_layers, head_mask) first_token_tensor = output[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return self.linear(pooled_output) def convert_line(row, max_seq_length, tokenizer, model_name='bert'): guid = row['id'] text_a = row['comment_text'] if 'label' in row.keys(): label = row['label'] else: label = None tokens_a = tokenizer.tokenize(text_a) if 'bert' in model_name: if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[:(max_seq_length - 2)] tokens = ["[CLS]"] + tokens_a + ["[SEP]"] input_ids = tokenizer.convert_tokens_to_ids(tokens) else: if len(tokens_a) > max_seq_length: tokens_a = tokens_a[:max_seq_length] tokens = tokens_a input_ids = tokenizer.convert_tokens_to_ids(tokens_a) segment_ids = [0] * len(tokens) input_mask = [1] * len(input_ids) padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding return input_ids, input_mask, segment_ids, label def preprocess(x): for k, v in CONTRACTION_MAPPING.items(): x = x.replace(' %s ' % k, ' %s ' % v) x = x.translate(remove_dict) x = x.translate(isolate_dict) x = Ttokenizer.tokenize(x) x = [x_[1:] if x_.startswith("'") else x_ for x_ in x] x = ' '.join(x) return x def get_input_data(test_data): all_input_ids, all_input_mask, all_segment_ids, all_label_ids = [], [], [], [] for i, (input_ids, input_mask, segment_ids, label) in test_data.items(): all_input_ids.append(input_ids) all_input_mask.append(input_mask) all_segment_ids.append(segment_ids) all_label_ids.append(label) all_input_ids = torch.tensor(all_input_ids, dtype=torch.long) all_input_mask = torch.tensor(all_input_mask, dtype=torch.long) all_segment_ids = torch.tensor(all_segment_ids, dtype=torch.long) try: all_label_ids = torch.tensor(all_label_ids, dtype=torch.float32) except: pass return all_input_ids, all_input_mask, all_segment_ids, all_label_ids print('Def functions done! Time past %.2f secs' % (time.time() - start_time)) print('Loading data...') df = pd.read_csv('../input/jigsaw-unintended-bias-in-toxicity-classification/test.csv') if debug: df = df.loc[:25] test_ids = df['id'].tolist() df['comment_text'] = df['comment_text'].astype(str) print('Preprocessing...') df['comment_text'] = apply_by_multiprocessing(df['comment_text'], preprocess, workers=4) print('Done! Time past %.2f secs' % (time.time() - start_time)) ###Output Loading data... Preprocessing... Done! Time past 44.53 secs ###Markdown Bert Small V2 29bin 300seq NAUX ###Code try: model_dir = '../input/jigsawmodels/bert_small_v2_29bin_naus_300seq/bert_small_v2_29bin_naus_300seq/' max_seq_length = 300 short_length = 100 tokenizer = BertTokenizer.from_pretrained(model_dir, do_lower_case=True) print('Converting data to sequences...') test_data = apply_by_multiprocessing(df, convert_line, axis=1, max_seq_length=max_seq_length, tokenizer=tokenizer, model_name='bert', workers=4) # takes 2 mins all_input_ids, all_input_mask, all_segment_ids, all_label_ids = get_input_data(test_data) long_idx = (all_input_ids[:, short_length-max_seq_length:].sum(1) > 0).nonzero().squeeze().numpy() short_idx = (all_input_ids[:, short_length-max_seq_length:].sum(1) == 0).nonzero().squeeze().numpy() print('Done! Time past %.2f secs' % (time.time() - start_time)) # Load a trained model and vocabulary that you have fine-tuned print('Loading model from %s ...' % model_dir) bert_config = BertConfig(os.path.join(model_dir, 'config.json')) model = BertForJigsaw(bert_config, out_dim=29+6) # with NAUX model.load_state_dict(torch.load('%s/pytorch_model.bin' % model_dir)) model.to(device) model.eval() print('Done! Time past %.2f secs' % (time.time() - start_time)) print('Predicting model Bert Small...') predictions_bert_small = np.zeros(df.shape[0]) with torch.no_grad(): for i, idx in enumerate([short_idx, long_idx]): test_data = TensorDataset(all_input_ids[idx], all_input_mask[idx], all_segment_ids[idx]) if i == 1 else \ TensorDataset(all_input_ids[idx, :short_length], all_input_mask[idx, :short_length], all_segment_ids[idx, :short_length]) test_sampler = SequentialSampler(test_data) test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=128) pred = [] for input_ids, input_mask, segment_ids in tqdm(test_dataloader): input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) logits = model(input_ids, segment_ids, input_mask, labels=None) logits = torch.sigmoid(logits).detach().cpu().numpy() pred.append(logits[:, :-6]) # remove NAUX predictions_bert_small[idx] = np.vstack(pred).mean(1) print('Done! Time past %.2f secs' % (time.time() - start_time)) except: print('Something wrong with Bert Small.') traceback.print_exc() ###Output Converting data to sequences... ###Markdown Bert Large V2 99bin 250seq ###Code try: model_dir = '../input/jigsawmodels/bert_large_v2_99bin_250seq/bert_large_v2_99bin_250seq/' max_seq_length = 250 short_length = 100 tokenizer = BertTokenizer.from_pretrained(model_dir, do_lower_case=True) print('Converting data to sequences...') test_data = apply_by_multiprocessing(df, convert_line, axis=1, max_seq_length=max_seq_length, tokenizer=tokenizer, model_name='bert', workers=4) # takes 2 mins all_input_ids, all_input_mask, all_segment_ids, all_label_ids = get_input_data(test_data) long_idx = (all_input_ids[:, short_length-max_seq_length:].sum(1) > 0).nonzero().squeeze().numpy() short_idx = (all_input_ids[:, short_length-max_seq_length:].sum(1) == 0).nonzero().squeeze().numpy() print('Done! Time past %.2f secs' % (time.time() - start_time)) # Load a trained model and vocabulary that you have fine-tuned print('Loading model from %s ...' % model_dir) bert_config = BertConfig(os.path.join(model_dir, 'config.json')) model = BertForJigsaw(bert_config, out_dim=99) model.load_state_dict(torch.load('%s/pytorch_model.bin' % model_dir)) model.to(device) model.eval() print('Done! Time past %.2f secs' % (time.time() - start_time)) print('Predicting model Bert Large') predictions_bert_large = np.zeros(df.shape[0]) with torch.no_grad(): for i, idx in enumerate([short_idx, long_idx]): test_data = TensorDataset(all_input_ids[idx], all_input_mask[idx], all_segment_ids[idx]) if i == 1 else \ TensorDataset(all_input_ids[idx, :short_length], all_input_mask[idx, :short_length], all_segment_ids[idx, :short_length]) test_sampler = SequentialSampler(test_data) test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=64) pred = [] for input_ids, input_mask, segment_ids in tqdm(test_dataloader): input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) logits = model(input_ids, segment_ids, input_mask, labels=None) logits = torch.sigmoid(logits).detach().cpu().numpy() pred.append(logits) predictions_bert_large[idx] = np.vstack(pred).mean(1) print('Done! Time past %.2f secs' % (time.time() - start_time)) except: print('Something wrong with Bert Large.') traceback.print_exc() ###Output Converting data to sequences... Done! Time past 783.48 secs Loading model from ../input/jigsawmodels/bert_large_v2_99bin_250seq/bert_large_v2_99bin_250seq/ ... Done! Time past 809.28 secs Predicting model Bert Large ###Markdown XLNet 9bin 220seq ###Code try: model_dir = '../input/jigsawmodels/xlnet_large_9bin_220seq/xlnet_large_9bin_220seq/' max_seq_length = 220 tokenizer = XLNetTokenizer.from_pretrained(model_dir) print('Converting to sequences...') test_data = [] for i, row in tqdm(df.iterrows()): test_data.append(convert_line( row, max_seq_length=max_seq_length, tokenizer=tokenizer, model_name='xlnet' )) test_data = pd.Series(test_data) all_input_ids, all_input_mask, all_segment_ids, all_label_ids = get_input_data(test_data) print('Done! Time past %.2f secs' % (time.time() - start_time)) test_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids) test_sampler = SequentialSampler(test_data) test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=32) # Load a trained model and vocabulary that you have fine-tuned print('Loading model from %s ...' % model_dir) xlnet_config = XLNetConfig(os.path.join(model_dir, 'config.json')) model = XLNetForJigSaw(xlnet_config, out_dim=9) model.load_state_dict(torch.load('%s/pytorch_model.bin' % model_dir)) model.to(device) model.eval() print('Done! Time past %.2f secs' % (time.time() - start_time)) print('Predicting model xlnet_large_v2_9bin_220seq') predictions_xlnet = [] with torch.no_grad(): for input_ids, input_mask, segment_ids in tqdm(test_dataloader): input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) logits = model(input_ids) logits = torch.sigmoid(logits).detach().cpu().numpy() predictions_xlnet.append(logits) predictions_xlnet = np.vstack(predictions_xlnet).mean(1) print('Done! Time past %.2f secs' % (time.time() - start_time)) except: print('Something wrong with xlnet_large_v2_9bin_220seq.') traceback.print_exc() ###Output ../input/pytorchpretrainedberthaqishen/pytorch-pretrained-bert/pytorch-pretrained-BERT/pytorch_pretrained_bert/tokenization_xlnet.py:124: ResourceWarning: unclosed file <_io.TextIOWrapper name='../input/jigsawmodels/xlnet_large_9bin_220seq/xlnet_large_9bin_220seq/special_tokens.txt' mode='r' encoding='utf-8'> special_tokens = open(special_tokens_file, encoding='utf-8').read().split('\n')[:-1] ResourceWarning: Enable tracemalloc to get the object allocation traceback 466it [00:00, 2345.73it/s] ###Markdown GPT2 V2 29bin 350seq NAUX ###Code try: model_dir = '../input/jigsawmodels/gpt2_29bin_350seq_aus/gpt2_29bin_350seq_aus/' max_seq_length = 350 tokenizer = GPT2Tokenizer.from_pretrained(model_dir) print('Converting data to sequences...') test_data = apply_by_multiprocessing(df, convert_line, axis=1, max_seq_length=max_seq_length, tokenizer=tokenizer, model_name='gpt2', workers=4) # takes 2 mins all_input_ids, all_input_mask, all_segment_ids, all_label_ids = get_input_data(test_data) print('Done! Time past %.2f secs' % (time.time() - start_time)) test_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids) test_sampler = SequentialSampler(test_data) test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=64) # Load a trained model and vocabulary that you have fine-tuned print('Loading model from %s ...' % model_dir) gpt2_config = GPT2Config(os.path.join(model_dir, 'config.json')) model = GPT2ClassificationHeadModel(gpt2_config, out_dim=29+6) # with NAUX model.load_state_dict(torch.load('%s/pytorch_model.bin' % model_dir)) model.to(device) model.eval() print('Done! Time past %.2f secs' % (time.time() - start_time)) print('Predicting model GPT2...') predictions_gpt2 = [] with torch.no_grad(): for input_ids, input_mask, segment_ids in tqdm(test_dataloader): if (time.time() - start_time) > 7140: print('STOP GPT2 FOR THE TIME SACK.') break input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) logits = model( input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask, ) logits = torch.sigmoid(logits).detach().cpu().numpy() predictions_gpt2.append(logits[:, :-6]) # remove NAUX predictions_gpt2 = np.vstack(predictions_gpt2).mean(1) print('Done! Time past %.2f secs' % (time.time() - start_time)) except: print('Something wrong with GPT2:') traceback.print_exc() print('But the program is still running') ###Output Converting data to sequences... ###Markdown Output ###Code predictions = np.zeros(df.shape[0]) try: predictions += predictions_bert_small * 0.9 except: pass try: predictions += predictions_bert_large * 1.0 except: pass try: predictions += predictions_gpt2 * 0.7 except: pass try: predictions += predictions_xlnet * 0.8 except: pass df_prediction = pd.DataFrame({ 'id': test_ids, 'prediction': predictions, }) df_prediction.to_csv('./submission.csv', index=False) print('Output done! Time past %.2f secs' % (time.time() - start_time)) print(df_prediction.head(25)) ###Output id prediction 0 7097320 0.033548 1 7097321 0.323419 2 7097322 0.720957 3 7097323 0.163971 4 7097324 0.074378 5 7097325 0.028733 6 7097326 2.784101 7 7097327 1.447162 8 7097328 0.015274 9 7097329 0.257397 10 7097330 0.065558 11 7097331 1.174090 12 7097332 0.371684 13 7097333 0.062194 14 7097334 0.606756 15 7097335 0.023541 16 7097336 0.088089 17 7097337 0.016890 18 7097338 0.026258 19 7097339 2.117447 20 7097340 0.047244 21 7097341 0.326539 22 7097342 0.069543 23 7097343 0.015998 24 7097344 0.067549
docs/ulab-manual.ipynb
###Markdown Notebook conversion ###Code import nbformat as nb import nbformat.v4.nbbase as nb4 from nbconvert import RSTExporter def convert_notebook(node, fn): (rst, resources) = rstexporter.from_notebook_node(notebook) with open(fn, 'w') as fout: fout.write(rst) rstexporter = RSTExporter() rstexporter.template_file = './templates/manual.tpl' source = nb.read('ulab-manual.ipynb', nb.NO_CONVERT) append_cell = False notebook = nb4.new_notebook() for j, cell in enumerate(source['cells']): if cell['cell_type'] == 'markdown': # skip everything before Introduction if cell['source'].split('\n')[0].startswith('# Introduction'): append_cell = True if append_cell: notebook.cells.append(cell) convert_notebook(notebook,'./manual/source/ulab.rst') %%writefile ./templates/manual.tpl {%- extends 'display_priority.tpl' -%} {% block in_prompt %} {% endblock in_prompt %} {% block output_prompt %} {% endblock output_prompt %} {% block input scoped%} {%- if cell.source.split('\n')[0].startswith('%%micropython') -%} .. code:: {{ '\n'.join(['# code to be run in micropython'] + cell.source.strip().split('\n')[1:]) | indent}} {%- else -%} .. code:: {{ '\n'.join(['# code to be run in CPython\n'] + cell.source.strip().split('\n')) | indent}} {%- endif -%} {% endblock input %} {% block error %} :: {{ super() }} {% endblock error %} {% block traceback_line %} {{ line | indent | strip_ansi }} {% endblock traceback_line %} {% block execute_result %} {% block data_priority scoped %} {{ super() }} {% endblock %} {% endblock execute_result %} {% block stream %} .. parsed-literal:: {{ output.text | indent }} {% endblock stream %} {% block data_svg %} .. image:: {{ output.metadata.filenames['image/svg+xml'] | urlencode }} {% endblock data_svg %} {% block data_png %} .. image:: {{ output.metadata.filenames['image/png'] | urlencode }} {%- set width=output | get_metadata('width', 'image/png') -%} {%- if width is not none %} :width: {{ width }}px {%- endif %} {%- set height=output | get_metadata('height', 'image/png') -%} {%- if height is not none %} :height: {{ height }}px {%- endif %} {% endblock data_png %} {% block data_jpg %} .. image:: {{ output.metadata.filenames['image/jpeg'] | urlencode }} {%- set width=output | get_metadata('width', 'image/jpeg') -%} {%- if width is not none %} :width: {{ width }}px {%- endif %} {%- set height=output | get_metadata('height', 'image/jpeg') -%} {%- if height is not none %} :height: {{ height }}px {%- endif %} {% endblock data_jpg %} {% block data_markdown %} {{ output.data['text/markdown'] | convert_pandoc("markdown", "rst") }} {% endblock data_markdown %} {% block data_latex %} .. math:: {{ output.data['text/latex'] | strip_dollars | indent }} {% endblock data_latex %} {% block data_text scoped %} .. parsed-literal:: {{ output.data['text/plain'] | indent }} {% endblock data_text %} {% block data_html scoped %} .. raw:: html {{ output.data['text/html'] | indent }} {% endblock data_html %} {% block markdowncell scoped %} {{ cell.source | convert_pandoc("markdown", "rst") }} {% endblock markdowncell %} {%- block rawcell scoped -%} {%- if cell.metadata.get('raw_mimetype', '').lower() in resources.get('raw_mimetypes', ['']) %} {{cell.source}} {% endif -%} {%- endblock rawcell -%} {% block headingcell scoped %} {{ ("#" * cell.level + cell.source) | replace('\n', ' ') | convert_pandoc("markdown", "rst") }} {% endblock headingcell %} {% block unknowncell scoped %} unknown type {{cell.type}} {% endblock unknowncell %} ###Output Overwriting ./templates/manual.tpl ###Markdown Notebook magic ###Code from IPython.core.magic import Magics, magics_class, line_cell_magic from IPython.core.magic import cell_magic, register_cell_magic, register_line_magic from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring import subprocess import os @magics_class class PyboardMagic(Magics): @cell_magic @magic_arguments() @argument('-skip') @argument('-unix') @argument('-pyboard') @argument('-file') @argument('-data') @argument('-time') @argument('-memory') def micropython(self, line='', cell=None): args = parse_argstring(self.micropython, line) if args.skip: # doesn't care about the cell's content print('skipped execution') return None # do not parse the rest if args.unix: # tests the code on the unix port. Note that this works on unix only with open('/dev/shm/micropython.py', 'w') as fout: fout.write(cell) proc = subprocess.Popen(["../../micropython/ports/unix/micropython", "/dev/shm/micropython.py"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(proc.stdout.read().decode("utf-8")) print(proc.stderr.read().decode("utf-8")) return None if args.file: # can be used to copy the cell content onto the pyboard's flash spaces = " " try: with open(args.file, 'w') as fout: fout.write(cell.replace('\t', spaces)) printf('written cell to {}'.format(args.file)) except: print('Failed to write to disc!') return None # do not parse the rest if args.data: # can be used to load data from the pyboard directly into kernel space message = pyb.exec(cell) if len(message) == 0: print('pyboard >>>') else: print(message.decode('utf-8')) # register new variable in user namespace self.shell.user_ns[args.data] = string_to_matrix(message.decode("utf-8")) if args.time: # measures the time of executions pyb.exec('import utime') message = pyb.exec('t = utime.ticks_us()\n' + cell + '\ndelta = utime.ticks_diff(utime.ticks_us(), t)' + "\nprint('execution time: {:d} us'.format(delta))") print(message.decode('utf-8')) if args.memory: # prints out memory information message = pyb.exec('from micropython import mem_info\nprint(mem_info())\n') print("memory before execution:\n========================\n", message.decode('utf-8')) message = pyb.exec(cell) print(">>> ", message.decode('utf-8')) message = pyb.exec('print(mem_info())') print("memory after execution:\n========================\n", message.decode('utf-8')) if args.pyboard: message = pyb.exec(cell) print(message.decode('utf-8')) ip = get_ipython() ip.register_magics(PyboardMagic) ###Output _____no_output_____ ###Markdown pyboard ###Code import pyboard pyb = pyboard.Pyboard('/dev/ttyACM0') pyb.enter_raw_repl() pyb.exit_raw_repl() pyb.close() %%micropython -pyboard 1 import utime import ulab as np def timeit(n=1000): def wrapper(f, *args, **kwargs): func_name = str(f).split(' ')[1] def new_func(*args, **kwargs): run_times = np.zeros(n, dtype=np.uint16) for i in range(n): t = utime.ticks_us() result = f(*args, **kwargs) run_times[i] = utime.ticks_diff(utime.ticks_us(), t) print('{}() execution times based on {} cycles'.format(func_name, n, (delta2-delta1)/n)) print('\tbest: %d us'%np.min(run_times)) print('\tworst: %d us'%np.max(run_times)) print('\taverage: %d us'%np.mean(run_times)) print('\tdeviation: +/-%.3f us'%np.std(run_times)) return result return new_func return wrapper def timeit(f, *args, **kwargs): func_name = str(f).split(' ')[1] def new_func(*args, **kwargs): t = utime.ticks_us() result = f(*args, **kwargs) print('execution time: ', utime.ticks_diff(utime.ticks_us(), t), ' us') return result return new_func ###Output ###Markdown Introduction In the [last chapter](https://micropython-usermod.readthedocs.io/en/latest/usermods_15.html) of the usermod documentation, I mentioned that I have another story, for another day. The day has come, so here is my story. Enter ulab`ulab` is a `numpy`-like module for `micropython`, meant to simplify and speed up common mathematical operations on arrays. The primary goal was to implement a small subset of `numpy` that might be useful in the context of a microcontroller. This means low-level data processing of linear (array) and two-dimensional (matrix) data. PurposeOf course, the first question that one has to answer is, why on Earth one would need a fast math library on a microcontroller. After all, it is not expected that heavy number crunching is going to take place on bare metal. It is not meant to. On a PC, the main reason for writing fast code is the sheer amount of data that one wants to process. On a microcontroller, the data volume is probably small, but it might lead to catastrophic system failure, if these data are not processed in time, because the microcontroller is supposed to interact with the outside world in a timely fashion. In fact, this latter objective was the initiator of this project: I needed the Fourier transform of a signal coming from the ADC of the pyboard, and all available options were simply too slow. In addition to speed, another issue that one has to keep in mind when working with embedded systems is the amount of available RAM: I believe, everything here could be implemented in pure python with relatively little effort, but the price we would have to pay for that is not only speed, but RAM, too. python code, if is not frozen, and compiled into the firmware, has to be compiled at runtime, which is not exactly a cheap process. On top of that, if numbers are stored in a list or tuple, which would be the high-level container, then they occupy 8 bytes, no matter, whether they are all smaller than 100, or larger than one hundred million. This is obviously a waste of resources in an environment, where resources are scarce. Finally, there is a reason for using `micropython` in the first place. Namely, that a microcontroller can be programmed in a very elegant, and *pythonic* way. But if it is so, why should we not extend this idea to other tasks and concepts that might come up in this context? If there was no other reason than this *elegance*, I would find that convincing enough.Based on the above-mentioned considerations, all functions in `ulab` are implemented in a way that 1. conforms to `numpy` as much as possible2. is so frugal with RAM as possible,3. and yet, fast. Much faster than pure python. Think of a number between 30 and 50!The main points of `ulab` are - compact, iterable and slicable containers of numerical data in 1, and 2 dimensions (arrays and matrices). These containers support all the relevant unary and binary operators (e.g., `len`, ==, +, *, etc.)- vectorised computations on micropython iterables and numerical arrays/matrices (in `numpy`-speak, universal functions)- basic linear algebra routines (matrix inversion, multiplication, reshaping, transposition, determinant, and eigenvalues)- polynomial fits to numerical data- fast Fourier transformsAt the time of writing this manual (for version 0.54.0), the library adds approximately 40 kB of extra compiled code to the micropython (pyboard.v.11) firmware. However, if you are tight with flash space, you can easily shave off a couple of kB. See the section on [customising ulab](Custom_builds). Resources and legal mattersThe source code of the module can be found under https://github.com/v923z/micropython-ulab/tree/master/code. The source of this user manual is under https://github.com/v923z/micropython-ulab/tree/master/docs, while the technical details of the implementation are discussed at great length in https://github.com/v923z/micropython-ulab/tree/master/docs/ulab.ipynb. If you want an even thorougher explanation on why the various constructs of the implementation work, and work in that particular way, you can read more on the subject under https://micropython-usermod.readthedocs.io/en/latest/, where I demonstrate, what you have to do, if you want to make a C object behave in a *pythonic* way. The MIT licence applies to all material. Friendly requestIf you use `ulab`, and bump into a bug, or think that a particular function is missing, or its behaviour does not conform to `numpy`, please, raise a [ulab issue](https://github.com/v923z/micropython-ulab/issues) on github, so that the community can profit from your experiences. Even better, if you find the project useful, and think that it could be made better, faster, tighter, and shinier, please, consider contributing, and issue a pull request with the implementation of your improvements and new features. `ulab` can only become successful, if it offers what the community needs.These last comments apply to the documentation, too. If, in your opinion, the documentation is obscure, misleading, or not detailed enough, please, let me know, so that *we* can fix it. Differences between micropython-ulab and circuitpython-ulab`ulab` has originally been developed for `micropython`, but has since been integrated into a number of its flavours. Most of these flavours are simply forks of `micropython` itself, with some additional functionality. One of the notable exceptions is `circuitpython`, which has slightly diverged at the core level, and this has some minor consequences. Some of these concern the C implementation details only, which all have been sorted out with the generous and enthusiastic support of Jeff Epler from [Adafruit Industries](http://www.adafruit.com).There are, however, a couple of instances, where the usage in the two environments is slightly different at the python level. These are how the packages can be imported, and how the class properties can be accessed. In both cases, the `circuitpython` implementation results in `numpy`-conform code. `numpy`-compatibility in `micropython` will be implemented as soon as `micropython` itself has the required tools. Till then we have to live with a workaround, which I will point out at the relevant places. Customising `ulab``ulab` implements a great number of functions, which are organised in sub-modules. E.g., functions related to Fourier transforms are located in the `ulab.fft` sub-module, so you would import `fft` as```pythonimport ulabfrom ulab import fft```by which point you can get the FFT of your data by calling `fft.fft(...)`. The idea of such grouping of functions and methods is to provide a means for granularity: It is quite possible that you do not need all functions in a particular application. If you want to save some flash space, you can easily exclude arbitrary sub-modules from the firmware. The [ulab.h](https://github.com/v923z/micropython-ulab/blob/master/code/ulab.h) header file contains a pre-processor flag for each sub-module. The default setting is 1 for each of them. Setting them to 0 removes the module from the compiled firmware. The first couple of lines of the file look like this```c// vectorise (all functions) takes approx. 6 kB of flash spacedefine ULAB_VECTORISE_MODULE (1)// linalg adds around 6 kBdefine ULAB_LINALG_MODULE (1)// poly requires approx. 2.5 kBdefine ULAB_POLY_MODULE (1)```In order to simplify navigation in the header, each flag begins with `ULAB_`, and continues with the name of the sub-module. This name is also the `.c` file, where the sub-module is implemented. So, e.g., the linear algebra routines can be found in `linalg.c`, and the corresponding compiler flag is `ULAB_LINALG_MODULE`. Each section displays a hint as to how much space you can save by un-setting the flag.At first, having to import everything in this way might appear to be overly complicated, but there is a very good reason behind all this: you can find out at the time of importing, whether a function or sub-module is part of your `ulab` firmware, or not. The alternative, namely, that you do not have to import anything beyond `ulab`, could prove catastrophic: you would learn only at run time (at the moment of calling the function in your code) that a particular function is not in the firmware, and that is most probably too late.Except for `fft`, the standard sub-modules, `vector`, `linalg`, `numerical`, and `poly` are all `numpy`-compatible. User-defined functions that accept `ndarray`s as their argument should be implemented in the `user` sub-module, or its sub-modules. Hints as to how to do that can be found in the section [Extending ulab](Extending-ulab). Supported functions and methods`ulab` supports a number of array operators, which are listed here. I tried to follow the specifications of the `numpy` interface as closely as possible, though, it was not always practical to implement verbatim behaviour. The differences, if any, are in each case small (e.g., a function cannot take all possible keyword arguments), and should not hinder everyday use. In the list below, a single asterisk denotes slight deviations from `numpy`'s nomenclature, and a double asterisk denotes those cases, where a bit more caution should be exercised, though this usually means functions that are not supported by `numpy`.The detailed discussion of the various functions always contains a link to the corresponding `numpy` documentation. However, before going down the rabbit hole, the module also defines a constant, the version, which can always be queried as ###Code %%micropython -unix 1 import ulab as np print('you are running ulab version', np.__version__) ###Output you are running ulab version 0.50.2 ###Markdown If you find a bug, please, include this number in your report! Basic ndarray operations[Unary operators](Unary-operators)[Binary operators](Binary-operators)[Indexing and slicing](Slicing-and-indexing)[ndarray iterators](Iterating-over-arrays)[Comparison operators*](Comparison-operators)[Universal functions](Universal-functions) (also support function calls on general iterables, and vectorisation of user-defined `python` functions.) Methods of ndarrays[.shape*](.shape)[size*](size)[itemsize*](itemsize)[.reshape](.reshape)[.transpose](.transpose)[.flatten**](.flatten) Matrix methods[inv](inv)[dot](dot)[det](det)[roll](roll)[flip](flip) Array initialisation functions[arange](arange)[eye](eye)[ones](ones,-zeros)[zeros](ones,-zeros)[linspace](linspace) Statistical and other properties of arrays[min](min,-argmin,-max,-argmax)[argmin](min,-argmin,-max,-argmax)[max](min,-argmin,-max,-argmax)[argmax](min,-argmin,-max,-argmax)[sum](sum,-std,-mean)[std](sum,-std,-mean)[mean](sum,-std,-mean)[diff](diff)[sort](sort)[argsort](argsort) Linear algebra functions[size](size)[inv](inv)[norm](norm)[dot](dot)[det](det)[eig](eig)[cholesky](cholesky)[trace](trace) Manipulation of polynomials[polyval](polyval)[polyfit](polyfit) FFT routines[fft**](fft)[ifft**](ifft)[spectrogram**](spectrogram) Filter functions[convolve](convolve)[sosfilt](sosfilt) Comparison of arrays[equal](equal,-not_equal)[not_equal](equal,-not_equal)[minimum](minimum)[maximum](maximum)[clip](clip) Interpolation, root finding, function minimasation[interp](interp)[newton](newton)[fmin](fmin)[bisect](bisect)[trapz](trapz) ndarray, the basic containerThe `ndarray` is the underlying container of numerical data. It is derived from micropython's own `array` object, but has a great number of extra features starting with how it can be initialised, which operations can be done on it, and which functions can accept it as an argument. One important property of an `ndarray` is that it is also a proper `micropython` iterable.Since the `ndarray` is a binary container, it is also compact, meaning that it takes only a couple of bytes of extra RAM in addition to what is required for storing the numbers themselves. `ndarray`s are also type-aware, i.e., one can save RAM by specifying a data type, and using the smallest reasonable one. Five such types are defined, namely `uint8`, `int8`, which occupy a single byte of memory per datum, `uint16`, and `int16`, which occupy two bytes per datum, and `float`, which occupies four or eight bytes per datum. The precision/size of the `float` type depends on the definition of `mp_float_t`. Some platforms, e.g., the PYBD, implement `double`s, but some, e.g., the pyboard.v.11, don't. You can find out, what type of float your particular platform implements by looking at the output of the [.itemsize](.itemsize) class property.On the following pages, we will see how one can work with `ndarray`s. Those familiar with `numpy` should find that the nomenclature and naming conventions of `numpy` are adhered to as closely as possible. I will point out the few differences, where necessary.For the sake of comparison, in addition to the `ulab` code snippets, sometimes the equivalent `numpy` code is also presented. You can find out, where the snippet is supposed to run by looking at its first line, the header.Hint: you can easily port existing `numpy` code, if you `import ulab as np`. Initialising an arrayA new array can be created by passing either a standard micropython iterable, or another `ndarray` into the constructor. Initialising by passing iterablesIf the iterable is one-dimensional, i.e., one whose elements are numbers, then a row vector will be created and returned. If the iterable is two-dimensional, i.e., one whose elements are again iterables, a matrix will be created. If the lengths of the iterables are not consistent, a `ValueError` will be raised. Iterables of different types can be mixed in the initialisation function. If the `dtype` keyword with the possible `uint8/int8/uint16/int16/float` values is supplied, the new `ndarray` will have that type, otherwise, it assumes `float` as default. ###Code %%micropython -unix 1 import ulab as np a = [1, 2, 3, 4, 5, 6, 7, 8] b = np.array(a) print("a:\t", a) print("b:\t", b) # a two-dimensional array with mixed-type initialisers c = np.array([range(5), range(20, 25, 1), [44, 55, 66, 77, 88]], dtype=np.uint8) print("\nc:\t", c) # and now we throw an exception d = np.array([range(5), range(10), [44, 55, 66, 77, 88]], dtype=np.uint8) print("\nd:\t", d) ###Output a: [1, 2, 3, 4, 5, 6, 7, 8] b: array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) c: array([[0, 1, 2, 3, 4], [20, 21, 22, 23, 24], [44, 55, 66, 77, 88]], dtype=uint8) Traceback (most recent call last): File "/dev/shm/micropython.py", line 15, in <module> ValueError: iterables are not of the same length ###Markdown Initialising by passing arraysAn `ndarray` can be initialised by supplying another array. This statement is almost trivial, since `ndarray`s are iterables themselves, though it should be pointed out that initialising through arrays is faster, because simply a new copy is created, without inspection, iteration etc. It is also possible to coerce type conversion of the output (with type conversion, the iteration cannot be avoided, therefore, this case will always be slower than straight copying): ###Code %%micropython -unix 1 import ulab as np a = [1, 2, 3, 4, 5, 6, 7, 8] b = np.array(a) c = np.array(b) d = np.array(b, dtype=np.uint8) print("a:\t", a) print("\nb:\t", b) print("\nc:\t", c) print("\nd:\t", d) ###Output a: [1, 2, 3, 4, 5, 6, 7, 8] b: array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) c: array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) d: array([1, 2, 3, 4, 5, 6, 7, 8], dtype=uint8) ###Markdown Note that the default type of the `ndarray` is `float`. Hence, if the array is initialised from another array, type conversion will always take place, except, when the output type is specifically supplied. I.e., ###Code %%micropython -unix 1 import ulab as np a = np.array(range(5), dtype=np.uint8) b = np.array(a) print("a:\t", a) print("\nb:\t", b) ###Output a: array([0, 1, 2, 3, 4], dtype=uint8) b: array([0.0, 1.0, 2.0, 3.0, 4.0], dtype=float) ###Markdown will iterate over the elements in `a`, since in the assignment `b = np.array(a)` no output type was given, therefore, `float` was assumed. On the other hand, ###Code %%micropython -unix 1 import ulab as np a = np.array(range(5), dtype=np.uint8) b = np.array(a, dtype=np.uint8) print("a:\t", a) print("\nb:\t", b) ###Output a: array([0, 1, 2, 3, 4], dtype=uint8) b: array([0, 1, 2, 3, 4], dtype=uint8) ###Markdown will simply copy the content of `a` into `b` without any iteration, and will, therefore, be faster. Keep this in mind, whenever the output type, or performance is important. Array initialisation functionsThere are four functions that can be used for initialising an array. These are bound to `ulab` itself at the top level, i.e., no module has to be imported for the function invocations. arange`numpy`: https://numpy.org/doc/stable/reference/generated/numpy.arange.htmlThe function returns a one-dimensional array with evenly spaced values. Takes 3 positional arguments (two are optional), and the `dtype` keyword argument. ###Code %%micropython -unix 1 import ulab print(ulab.arange(10)) print(ulab.arange(2, 10)) print(ulab.arange(2, 10, 3)) print(ulab.arange(2, 10, 3, dtype=ulab.float)) ###Output array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=int16) array([2, 3, 4, 5, 6, 7, 8, 9], dtype=int16) array([2, 5, 8], dtype=int16) array([2.0, 5.0, 8.0], dtype=float) ###Markdown ones, zeros`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.zeros.html`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ones.htmlA couple of special arrays and matrices can easily be initialised by calling one of the `ones`, or `zeros` functions. `ones` and `zeros` follow the same pattern, and have the call signature```pythonones(shape, dtype=float)zeros(shape, dtype=float)```where shape is either an integer, or a 2-tuple. ###Code %%micropython -unix 1 import ulab as np print(np.ones(6, dtype=np.uint8)) print(np.zeros((6, 4))) ###Output array([1, 1, 1, 1, 1, 1], dtype=uint8) array([[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], dtype=float) ###Markdown eye`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.eye.htmlAnother special array method is the `eye` function, whose call signature is ```pythoneye(N, M, k=0, dtype=float)```where `N` (`M`) specify the dimensions of the matrix (if only `N` is supplied, then we get a square matrix, otherwise one with `M` rows, and `N` columns), and `k` is the shift of the ones (the main diagonal corresponds to `k=0`). Here are a couple of examples. With a single argument ###Code %%micropython -unix 1 import ulab as np print(np.eye(5)) ###Output array([[1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0]], dtype=float) ###Markdown Specifying the dimensions of the matrix ###Code ### Shifting the diagonal %%micropython -unix 1 import ulab as np print(np.eye(4, M=6, k=-1, dtype=np.int16)) %%micropython -unix 1 import ulab as np print(np.eye(4, M=6, dtype=np.int8)) ###Output array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=int8) ###Markdown linspace`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.htmlThis function returns an array, whose elements are uniformly spaced between the `start`, and `stop` points. The number of intervals is determined by the `num` keyword argument, whose default value is 50. With the `endpoint` keyword argument (defaults to `True`) one can include `stop` in the sequence. In addition, the `dtype` keyword can be supplied to force type conversion of the output. The default is `float`. Note that, when `dtype` is of integer type, the sequence is not necessarily evenly spaced. This is not an error, rather a consequence of rounding. (This is also the `numpy` behaviour.) ###Code %%micropython -unix 1 import ulab as np # generate a sequence with defaults print('default sequence:\t', np.linspace(0, 10)) # num=5 print('num=5:\t\t\t', np.linspace(0, 10, num=5)) # num=5, endpoint=False print('num=5:\t\t\t', np.linspace(0, 10, num=5, endpoint=False)) # num=5, endpoint=False, dtype=uint8 print('num=5:\t\t\t', np.linspace(0, 5, num=7, endpoint=False, dtype=np.uint8)) ###Output default sequence: array([0.0, 0.2040816396474838, 0.4081632792949677, ..., 9.591833114624023, 9.795914649963379, 9.999996185302734], dtype=float) num=5: array([0.0, 2.5, 5.0, 7.5, 10.0], dtype=float) num=5: array([0.0, 2.0, 4.0, 6.0, 8.0], dtype=float) num=5: array([0, 0, 1, 2, 2, 3, 4], dtype=uint8) ###Markdown Customising array printouts `ndarray`s are pretty-printed, i.e., if the length is larger than 10 (default value), then only the first and last three entries will be printed. Also note that, as opposed to `numpy`, the printout always contains the `dtype`. ###Code %%micropython -unix 1 import ulab as np a = np.array(range(200)) print("a:\t", a) ###Output a: array([0.0, 1.0, 2.0, ..., 197.0, 198.0, 199.0], dtype=float) ###Markdown set_printoptionsThe default values can be overwritten by means of the `set_printoptions` function [numpy.set_printoptions](https://numpy.org/doc/1.18/reference/generated/numpy.set_printoptions.html), which accepts two keywords arguments, the `threshold`, and the `edgeitems`. The first of these arguments determines the length of the longest array that will be printed in full, while the second is the number of items that will be printed on the left and right hand side of the ellipsis, if the array is longer than `threshold`. ###Code %%micropython -unix 1 import ulab as np a = np.array(range(20)) print("a printed with defaults:\t", a) np.set_printoptions(threshold=200) print("\na printed in full:\t\t", a) np.set_printoptions(threshold=10, edgeitems=2) print("\na truncated with 2 edgeitems:\t", a) ###Output a printed with defaults: array([0.0, 1.0, 2.0, ..., 17.0, 18.0, 19.0], dtype=float) a printed in full: array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0], dtype=float) a truncated with 2 edgeitems: array([0.0, 1.0, ..., 18.0, 19.0], dtype=float) ###Markdown get_printoptionsThe set value of the `threshold` and `edgeitems` can be retrieved by calling the `get_printoptions` function with no arguments. The function returns a dictionary with two keys. ###Code %%micropython -unix 1 import ulab as np np.set_printoptions(threshold=100, edgeitems=20) print(np.get_printoptions()) ###Output {'threshold': 100, 'edgeitems': 20} ###Markdown Methods of ndarrays .shapeThe `.shape` method (property) returns a 2-tuple with the number of rows, and columns. **WARNING:** In `circuitpython`, you can call the method as a property, i.e., ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4], dtype=np.int8) print("a:\n", a) print("shape of a:", a.shape) b= np.array([[1, 2], [3, 4]], dtype=np.int8) print("\nb:\n", b) print("shape of b:", b.shape) ###Output a: array([1, 2, 3, 4], dtype=int8) shape of a: (1, 4) b: array([[1, 2], [3, 4]], dtype=int8) shape of b: (2, 2) ###Markdown **WARNING:** On the other hand, since properties are not implemented in `micropython`, there you would call the method as a function, i.e., ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4], dtype=np.int8) print("a:\n", a) print("shape of a:", a.shape) b= np.array([[1, 2], [3, 4]], dtype=np.int8) print("\nb:\n", b) print("shape of b:", b.shape()) ###Output a: array([1, 2, 3, 4], dtype=int8) shape of a: (1, 4) b: array([[1, 2], [3, 4]], dtype=int8) shape of b: (2, 2) ###Markdown .sizeThe `.size` method (property) returns an integer with the number of elements in the array. **WARNING:** In `circuitpython`, the `numpy` nomenclature applies, i.e., ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3], dtype=np.int8) print("a:\n", a) print("size of a:", a.size) b= np.array([[1, 2], [3, 4]], dtype=np.int8) print("\nb:\n", b) print("size of b:", b.size) ###Output a: array([1, 2, 3], dtype=int8) size of a: 3 b: array([[1, 2], [3, 4]], dtype=int8) size of b: 4 ###Markdown **WARNING:** In `micropython`, `size` is a method, i.e., ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3], dtype=np.int8) print("a:\n", a) print("size of a:", a.size) b= np.array([[1, 2], [3, 4]], dtype=np.int8) print("\nb:\n", b) print("size of b:", b.size()) ###Output a: array([1, 2, 3], dtype=int8) size of a: 3 b: array([[1, 2], [3, 4]], dtype=int8) size of b: 4 ###Markdown .itemsizeThe `.itemsize` method (property) returns an integer with the siz enumber of elements in the array.**WARNING:** In `circuitpython`: ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3], dtype=np.int8) print("a:\n", a) print("itemsize of a:", a.itemsize) b= np.array([[1, 2], [3, 4]], dtype=np.float) print("\nb:\n", b) print("itemsize of b:", b.itemsize) ###Output a: array([1, 2, 3], dtype=int8) itemsize of a: 1 b: array([[1.0, 2.0], [3.0, 4.0]], dtype=float) itemsize of b: 8 ###Markdown **WARNING:** In `micropython`: ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3], dtype=np.int8) print("a:\n", a) print("itemsize of a:", a.itemsize) b= np.array([[1, 2], [3, 4]], dtype=np.float) print("\nb:\n", b) print("itemsize of b:", b.itemsize()) ###Output a: array([1, 2, 3], dtype=int8) itemsize of a: 1 b: array([[1.0, 2.0], [3.0, 4.0]], dtype=float) itemsize of b: 8 ###Markdown .reshape`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.reshape.html`reshape` re-writes the shape properties of an `ndarray`, but the array will not be modified in any other way. The function takes a single 2-tuple with two integers as its argument. The 2-tuple should specify the desired number of rows and columns. If the new shape is not consistent with the old, a `ValueError` exception will be raised. ###Code %%micropython -unix 1 import ulab as np a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]], dtype=np.uint8) print('a (4 by 4):', a) print('a (2 by 8):', a.reshape((2, 8))) print('a (1 by 16):', a.reshape((1, 16))) ###Output a (4 by 4): array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]], dtype=uint8) a (2 by 8): array([[1, 2, 3, 4, 5, 6, 7, 8], [9, 10, 11, 12, 13, 14, 15, 16]], dtype=uint8) a (1 by 16): array([1, 2, 3, ..., 14, 15, 16], dtype=uint8) ###Markdown .flatten`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.flatten.htm`.flatten` returns the flattened array. The array can be flattened in `C` style (i.e., moving horizontally in the matrix), or in `fortran` style (i.e., moving vertically in the matrix). The `C`-style flattening is the default, and it is also fast, because this is just a verbatim copy of the contents. ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4], dtype=np.int8) print("a: \t\t", a) print("a flattened: \t", a.flatten()) b = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int8) print("\nb:", b) print("b flattened (C): \t", b.flatten()) print("b flattened (F): \t", b.flatten(order='F')) ###Output a: array([1, 2, 3, 4], dtype=int8) a flattened: array([1, 2, 3, 4], dtype=int8) b: array([[1, 2, 3], [4, 5, 6]], dtype=int8) b flattened (C): array([1, 2, 3, 4, 5, 6], dtype=int8) b flattened (F): array([1, 4, 2, 5, 3, 6], dtype=int8) ###Markdown .transpose`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.transpose.html ###Code %%micropython -unix 1 import ulab as np a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=np.uint8) print('a:\n', a) print('shape of a:', a.shape()) a.transpose() print('\ntranspose of a:\n', a) print('shape of a:', a.shape()) ###Output a: array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=uint8) shape of a: (4, 3) transpose of a: array([[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]], dtype=uint8) shape of a: (3, 4) ###Markdown .sort`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.sort.htmlIn-place sorting of an `ndarray`. For a more detailed exposition, see [sort](sort). ###Code %%micropython -unix 1 import ulab as np a = np.array([[1, 12, 3, 0], [5, 3, 4, 1], [9, 11, 1, 8], [7, 10, 0, 1]], dtype=np.uint8) print('\na:\n', a) a.sort(axis=0) print('\na sorted along vertical axis:\n', a) a = np.array([[1, 12, 3, 0], [5, 3, 4, 1], [9, 11, 1, 8], [7, 10, 0, 1]], dtype=np.uint8) a.sort(a, axis=1) print('\na sorted along horizontal axis:\n', a) a = np.array([[1, 12, 3, 0], [5, 3, 4, 1], [9, 11, 1, 8], [7, 10, 0, 1]], dtype=np.uint8) a.sort(a, axis=None) print('\nflattened a sorted:\n', a) ###Output a: array([[1, 12, 3, 0], [5, 3, 4, 1], [9, 11, 1, 8], [7, 10, 0, 1]], dtype=uint8) a sorted along vertical axis: array([[1, 3, 0, 0], [5, 10, 1, 1], [7, 11, 3, 1], [9, 12, 4, 8]], dtype=uint8) a sorted along horizontal axis: array([[0, 1, 3, 12], [1, 3, 4, 5], [1, 8, 9, 11], [0, 1, 7, 10]], dtype=uint8) flattened a sorted: array([0, 0, 1, ..., 10, 11, 12], dtype=uint8) ###Markdown Unary operatorsWith the exception of `len`, which returns a single number, all unary operators manipulate the underlying data element-wise. lenThis operator takes a single argument, and returns either the length (for row vectors), or the number of rows (for matrices) of its argument. ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4, 5], dtype=np.uint8) b = np.array([range(5), range(5), range(5), range(5)], dtype=np.uint8) print("a:\t", a) print("length of a: ", len(a)) print("shape of a: ", a.shape()) print("\nb:\t", b) print("length of b: ", len(b)) print("shape of b: ", b.shape()) ###Output a: array([1, 2, 3, 4, 5], dtype=uint8) length of a: 5 shape of a: (1, 5) b: array([[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], dtype=uint8) length of b: 4 shape of b: (4, 5) ###Markdown The number returned by `len` is also the length of the iterations, when the array supplies the elements for an iteration (see later). invertThe function is defined for integer data types (`uint8`, `int8`, `uint16`, and `int16`) only, takes a single argument, and returns the element-by-element, bit-wise inverse of the array. If a `float` is supplied, the function raises a `ValueError` exception.With signed integers (`int8`, and `int16`), the results might be unexpected, as in the example below: ###Code %%micropython -unix 1 import ulab as np a = np.array([0, -1, -100], dtype=np.int8) print("a:\t\t", a) print("inverse of a:\t", ~a) a = np.array([0, 1, 254, 255], dtype=np.uint8) print("\na:\t\t", a) print("inverse of a:\t", ~a) ###Output a: array([0, -1, -100], dtype=int8) inverse of a: array([-1, 0, 99], dtype=int8) a: array([0, 1, 254, 255], dtype=uint8) inverse of a: array([255, 254, 1, 0], dtype=uint8) ###Markdown absThis function takes a single argument, and returns the element-by-element absolute value of the array. When the data type is unsigned (`uint8`, or `uint16`), a copy of the array will be returned immediately, and no calculation takes place. ###Code %%micropython -unix 1 import ulab as np a = np.array([0, -1, -100], dtype=np.int8) print("a:\t\t\t ", a) print("absolute value of a:\t ", abs(a)) ###Output a: array([0, -1, -100], dtype=int8) absolute value of a: array([0, 1, 100], dtype=int8) ###Markdown negThis operator takes a single argument, and changes the sign of each element in the array. Unsigned values are wrapped. ###Code %%micropython -unix 1 import ulab as np a = np.array([10, -1, 1], dtype=np.int8) print("a:\t\t", a) print("negative of a:\t", -a) b = np.array([0, 100, 200], dtype=np.uint8) print("\nb:\t\t", b) print("negative of b:\t", -b) ###Output a: array([10, -1, 1], dtype=int8) negative of a: array([-10, 1, -1], dtype=int8) b: array([0, 100, 200], dtype=uint8) negative of b: array([0, 156, 56], dtype=uint8) ###Markdown posThis function takes a single argument, and simply returns a copy of the array. ###Code %%micropython -unix 1 import ulab as np a = np.array([10, -1, 1], dtype=np.int8) print("a:\t\t", a) print("positive of a:\t", +a) ###Output a: array([10, -1, 1], dtype=int8) positive of a: array([10, -1, 1], dtype=int8) ###Markdown Binary operators`ulab` implements the `+`, `-`, `*`, `/`, `**`, ``, `=`, `==`, `!=` binary operators that work element-wise. Partial broadcasting is available, meaning that the operands either must have the same shape, or one of them must be a scalar.The operators raise a `ValueError` exception, if partial broadcasting is not possible. The only exceptions are the `==` and `!=` operators that will return `False` in this case. **WARNING**: note that relational operators (``, `=`, `==`, `!=`) should have the `ndarray` on their left hand side, when compared to scalars. This means that the following works ###Code %%micropython -unix 1 import ulab a = ulab.array([1, 2, 3]) print(a > 2) ###Output [False, False, True] ###Markdown while the equivalent statement, `2 < a`, will raise a `TypeError` exception: ###Code %%micropython -unix 1 import ulab a = ulab.array([1, 2, 3]) print(2 < a) ###Output Traceback (most recent call last): File "/dev/shm/micropython.py", line 4, in <module> TypeError: unsupported types for __lt__: 'int', 'ndarray' ###Markdown **WARNING:** `numpy` also allows operations between a matrix, and a row vector, if the row vector has exactly as many elements, as many columns the matrix has. This feature will be added in future versions of `ulab`. ###Code a = array([[1, 2, 3], [4, 5, 6], [7, 8, 6]]) b = array([10, 20, 30]) a+b ###Output _____no_output_____ ###Markdown **WARNING:** `circuitpython` users should use the `equal`, and `not_equal` operators instead of `==`, and `!=`. See the section on [array comparison](Comparison-of-arrays) for details. UpcastingBinary operations require special attention, because two arrays with different typecodes can be the operands of an operation, in which case it is not trivial, what the typecode of the result is. This decision on the result's typecode is called upcasting. Since the number of typecodes in `ulab` is significantly smaller than in `numpy`, we have to define new upcasting rules. Where possible, I followed `numpy`'s conventions. `ulab` observes the following upcasting rules:1. Operations with two `ndarray`s of the same `dtype` preserve their `dtype`, even when the results overflow.2. if either of the operands is a float, the result is automatically a float3. When the right hand side of a binary operator is a micropython variable, `mp_obj_int`, or `mp_obj_float`, then the result will be promoted to `dtype` `float`. This is necessary, because a micropython integer can be 31 bites wide. Other micropython types (e.g., lists, tuples, etc.) raise a `TypeError` exception. 4. | left hand side | right hand side | ulab result | numpy result ||----------------|-----------------|-------------|--------------||`uint8` |`int8` |`int16` |`int16` ||`uint8` |`int16` |`int16` |`int16` ||`uint8` |`uint16` |`uint16` |`uint16` ||`int8` |`int16` |`int16` |`int16` | |`int8` |`uint16` |`uint16` |`int32` ||`uint16` |`int16` |`float` |`int32` | Note that the last two operations are promoted to `int32` in `numpy`. **WARNING:** Due to the lower number of available data types, the upcasting rules of `ulab` are slightly different to those of `numpy`. Watch out for this, when porting code!When one of the operands is a scalar, it will internally be turned into a single-element `ndarray` with the *smallest* possible `dtype`. Thus, e.g., if the scalar is 123, it will be converted to an array of `dtype` `uint8`.Upcasting can be seen in action in the following snippet: ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4], dtype=np.uint8) b = np.array([1, 2, 3, 4], dtype=np.int8) print("a:\t", a) print("b:\t", b) print("a+b:\t", a+b) c = np.array([1, 2, 3, 4], dtype=np.float) print("\na:\t", a) print("c:\t", c) print("a*c:\t", a*c) ###Output a: array([1, 2, 3, 4], dtype=uint8) b: array([1, 2, 3, 4], dtype=int8) a+b: array([2, 4, 6, 8], dtype=int16) a: array([1, 2, 3, 4], dtype=uint8) c: array([1.0, 2.0, 3.0, 4.0], dtype=float) a*c: array([1.0, 4.0, 9.0, 16.0], dtype=float) ###Markdown BenchmarksThe following snippet compares the performance of binary operations to a possible implementation in python. For the time measurement, we will take the following snippet from the micropython manual: ###Code %%micropython -pyboard 1 import utime def timeit(f, *args, **kwargs): func_name = str(f).split(' ')[1] def new_func(*args, **kwargs): t = utime.ticks_us() result = f(*args, **kwargs) print('execution time: ', utime.ticks_diff(utime.ticks_us(), t), ' us') return result return new_func %%micropython -pyboard 1 import ulab as np @timeit def py_add(a, b): return [a[i]+b[i] for i in range(1000)] @timeit def py_multiply(a, b): return [a[i]*b[i] for i in range(1000)] @timeit def ulab_add(a, b): return a + b @timeit def ulab_multiply(a, b): return a * b a = [0.0]*1000 b = range(1000) print('python add:') py_add(a, b) print('\npython multiply:') py_multiply(a, b) a = np.linspace(0, 10, num=1000) b = np.ones(1000) print('\nulab add:') ulab_add(a, b) print('\nulab multiply:') ulab_multiply(a, b) ###Output python add: execution time: 10051 us python multiply: execution time: 14175 us ulab add: execution time: 222 us ulab multiply: execution time: 213 us ###Markdown I do not claim that the python implementation above is perfect, and certainly, there is much room for improvement. However, the factor of 50 difference in execution time is very spectacular. This is nothing but a consequence of the fact that the `ulab` functions run `C` code, with very little python overhead. The factor of 50 appears to be quite universal: the FFT routine obeys similar scaling (see [Speed of FFTs](Speed-of-FFTs)), and this number came up with font rendering, too: [fast font rendering on graphical displays](https://forum.micropython.org/viewtopic.php?f=15&t=5815&p=33362&hilit=ufontp33383). Comparison operatorsThe smaller than, greater than, smaller or equal, and greater or equal operators return a vector of Booleans indicating the positions (`True`), where the condition is satisfied. ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.uint8) print(a < 5) ###Output [True, True, True, True, False, False, False, False] ###Markdown **WARNING**: at the moment, due to implementation details, the `ndarray` must be on the left hand side of the relational operators. This will change in a future version of `ulab`. That is, while `a a` have the same meaning, the following code will not work: ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.uint8) print(5 > a) ###Output Traceback (most recent call last): File "/dev/shm/micropython.py", line 5, in <module> TypeError: unsupported types for __gt__: 'int', 'ndarray' ###Markdown **WARNING:** Note that `numpy` returns an array of Booleans. For most use cases this fact should not make a difference. ###Code a = array([1, 2, 3, 4, 5, 6, 7, 8]) a < 5 ###Output _____no_output_____ ###Markdown These operators work with matrices, too, in which case a list of lists of Booleans will be returned: ###Code %%micropython -unix 1 import ulab as np a = np.array([range(0, 5, 1), range(1, 6, 1), range(2, 7, 1)], dtype=np.uint8) print(a) print(a < 5) ###Output array([[0, 1, 2, 3, 4], [1, 2, 3, 4, 5], [2, 3, 4, 5, 6]], dtype=uint8) [[True, True, True, True, True], [True, True, True, True, False], [True, True, True, False, False]] ###Markdown Iterating over arrays`ndarray`s are iterable, which means that their elements can also be accessed as can the elements of a list, tuple, etc. If the array is one-dimensional, the iterator returns scalars, otherwise a new one-dimensional `ndarray`, which is simply a copy of the corresponding row of the matrix, i.e, its data type will be inherited. ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4, 5], dtype=np.uint8) b = np.array([range(5), range(10, 15, 1), range(20, 25, 1), range(30, 35, 1)], dtype=np.uint8) print("a:\t", a) for i, _a in enumerate(a): print("element %d in a:"%i, _a) print("\nb:\t", b) for i, _b in enumerate(b): print("element %d in b:"%i, _b) ###Output a: array([1, 2, 3, 4, 5], dtype=uint8) element 0 in a: 1 element 1 in a: 2 element 2 in a: 3 element 3 in a: 4 element 4 in a: 5 b: array([[0, 1, 2, 3, 4], [10, 11, 12, 13, 14], [20, 21, 22, 23, 24], [30, 31, 32, 33, 34]], dtype=uint8) element 0 in b: array([0, 1, 2, 3, 4], dtype=uint8) element 1 in b: array([10, 11, 12, 13, 14], dtype=uint8) element 2 in b: array([20, 21, 22, 23, 24], dtype=uint8) element 3 in b: array([30, 31, 32, 33, 34], dtype=uint8) ###Markdown Slicing and indexingCopies of sub-arrays can be created by indexing, and slicing. IndexingThe simplest form of indexing is specifying a single integer between the square brackets as in ###Code %%micropython -unix 1 import ulab as np a = np.array(range(10), dtype=np.uint8) print("a:\t\t\t\t\t\t", a) print("the first, and first from right element of a:\t", a[0], a[-1]) print("the second, and second from right element of a:\t", a[1], a[-2]) ###Output a: array([0, 1, 2, ..., 7, 8, 9], dtype=uint8) the first, and first from right element of a: 0 9 the second, and second from right element of a: 1 8 ###Markdown Indices are (not necessarily non-negative) integers, or a list of Booleans. By using a Boolean list, we can select those elements of an array that satisfy a specific condition. At the moment, such indexing is defined for row vectors only, for matrices the function raises a `ValueError` exception, though this will be rectified in a future version of `ulab`. ###Code %%micropython -unix 1 import ulab as np a = np.array(range(9), dtype=np.float) print("a:\t", a) print("a < 5:\t", a[a < 5]) ###Output a: array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) a < 5: array([0.0, 1.0, 2.0, 3.0, 4.0], dtype=float) ###Markdown Indexing with Boolean arrays can take more complicated expressions. This is a very concise way of comparing two vectors, e.g.: ###Code %%micropython -pyboard 1 import ulab as np a = np.array(range(9), dtype=np.uint8) b = np.array([4, 4, 4, 3, 3, 3, 13, 13, 13], dtype=np.uint8) print("a:\t", a) print("\na**2:\t", a*a) print("\nb:\t", b) print("\n100*sin(b):\t", np.sin(b)*100.0) print("\na[a*a > np.sin(b)*100.0]:\t", a[a*a > np.sin(b)*100.0]) ###Output a: array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=uint8) a**2: array([0, 1, 4, 9, 16, 25, 36, 49, 64], dtype=uint8) b: array([4, 4, 4, 3, 3, 3, 13, 13, 13], dtype=uint8) 100*sin(b): array([-75.68025, -75.68025, -75.68025, 14.112, 14.112, 14.112, 42.01671, 42.01671, 42.01671], dtype=float) a[a*a > np.sin(b)*100.0]: array([0, 1, 2, 4, 5, 7, 8], dtype=uint8) ###Markdown Slicing and assigning to slicesYou can also generate sub-arrays by specifying slices as the index of an array. Slices are special python objects of the form ```pythonslice = start:end:stop```where `start`, `end`, and `stop` are (not necessarily non-negative) integers. Not all of these three numbers must be specified in an index, in fact, all three of them can be missing. The interpreter takes care of filling in the missing values. (Note that slices cannot be defined in this way, only there, where an index is expected.) For a good explanation on how slices work in python, you can read the stackoverflow question https://stackoverflow.com/questions/509211/understanding-slice-notation.Slices work on both axes: ###Code %%micropython -unix 1 import ulab as np a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.uint8) print('a:\n', a) # the first row print('\na[0]:\n', a[0]) # the first two elements of the first row print('\na[0,:2]:\n', a[0,:2]) # the zeroth element in each row (also known as the zeroth column) print('\na[:,0]:\n', a[:,0]) # the last but one row print('\na[-1]:\n', a[-1]) # the last two rows backwards print('\na[::1]:\n', a[::-1]) ###Output a: array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=uint8) a[0]: array([1, 2, 3], dtype=uint8) a[0,:2]: array([1, 2], dtype=uint8) a[:,0]: array([1, 4, 7], dtype=uint8) a[-1]: array([7, 8, 9], dtype=uint8) a[::1]: array([[7, 8, 9], [4, 5, 6]], dtype=uint8) ###Markdown Assignment to slices can be done for the whole slice, per row, and per column. A couple of examples should make these statements clearer: ###Code %%micropython -unix 1 import ulab as np zero_list = [0, 0, 0] a = np.array([zero_list, zero_list, zero_list], dtype=np.uint8) print('a:\n', a) # assigning to the whole row a[0] = 1 print('\na[0] = 1\n', a) # assigning to the whole row a[0] = np.array([1, 2, -333], dtype=np.float) print('\na[0] = np.array([1, 2, 3])\n', a) # assigning to a column a[:,2] = 3.0 print('\na[:,0]:\n', a) ###Output a: array([[0, 0, 0], [0, 0, 0], [0, 0, 0]], dtype=uint8) a[0] = 1 array([[1, 1, 1], [0, 0, 0], [0, 0, 0]], dtype=uint8) a[0] = np.array([1, 2, 3]) array([[1, 2, 179], [0, 0, 0], [0, 0, 0]], dtype=uint8) a[:,0]: array([[1, 2, 3], [0, 0, 3], [0, 0, 3]], dtype=uint8) ###Markdown Universal functionsStandard mathematical functions are defined in the `vector` sub-module, and can be calculated on any scalar, scalar-valued iterable (ranges, lists, tuples containing numbers), and on `ndarray`s without having to change the call signature. In all cases the functions return a new `ndarray` of typecode `float` (since these functions usually generate float values, anyway). The functions execute faster with `ndarray` arguments than with iterables, because the values of the input vector can be extracted faster. At present, the following functions are supported:`acos`, `acosh`, `arctan2`, `around`, `asin`, `asinh`, `atan`, `atanh`, `ceil`, `cos`, `erf`, `erfc`, `exp`, `expm1`, `floor`, `tgamma`, `lgamma`, `log`, `log10`, `log2`, `sin`, `sinh`, `sqrt`, `tan`, `tanh`.These functions are applied element-wise to the arguments, thus, e.g., the exponential of a matrix cannot be calculated in this way. The functions can be invoked by importing the `vector` sub-module first. ###Code %%micropython -pyboard 1 import ulab as np from ulab import vector a = range(9) b = np.array(a) # works with ranges, lists, tuples etc. print('a:\t', a) print('exp(a):\t', vector.exp(a)) # with 1D arrays print('\nb:\t', b) print('exp(b):\t', vector.exp(b)) # as well as with matrices c = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) print('\nc:\t', c) print('exp(c):\t', vector.exp(c)) ###Output a: range(0, 9) exp(a): array([1.0, 2.718282, 7.389056, 20.08554, 54.59816, 148.4132, 403.4288, 1096.633, 2980.958], dtype=float) b: array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) exp(b): array([1.0, 2.718282, 7.389056, 20.08554, 54.59816, 148.4132, 403.4288, 1096.633, 2980.958], dtype=float) c: array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype=float) exp(c): array([[2.718282, 7.389056, 20.08554], [54.59816, 148.4132, 403.4288], [1096.633, 2980.958, 8103.084]], dtype=float) ###Markdown Computation expensesThe overhead for calculating with micropython iterables is quite significant: for the 1000 samples below, the difference is more than 800 microseconds, because internally the function has to create the `ndarray` for the output, has to fetch the iterable's items of unknown type, and then convert them to floats. All these steps are skipped for `ndarray`s, because these pieces of information are already known. Doing the same with `list` comprehension requires 30 times more time than with the `ndarray`, which would become even more, if we converted the resulting list to an `ndarray`. ###Code %%micropython -pyboard 1 import ulab as np from ulab import vector import math a = [0]*1000 b = np.array(a) @timeit def timed_vector(iterable): return vector.exp(iterable) @timeit def timed_list(iterable): return [math.exp(i) for i in iterable] print('iterating over ndarray in ulab') timed_vector(b) print('\niterating over list in ulab') timed_vector(a) print('\niterating over list in python') timed_list(a) ###Output iterating over ndarray in ulab execution time: 441 us iterating over list in ulab execution time: 1266 us iterating over list in python execution time: 11379 us ###Markdown Vectorising generic python functions`numpy`: https://numpy.org/doc/stable/reference/generated/numpy.vectorize.htmlThe examples above use factory functions. In fact, they are nothing but the vectorised versions of the standard mathematical functions. User-defined `python` functions can also be vectorised by help of `vectorize`. This function takes a positional argument, namely, the `python` function that you want to vectorise, and a non-mandatory keyword argument, `otypes`, which determines the `dtype` of the output array. The `otypes` must be `None` (default), or any of the `dtypes` defined in `ulab`. With `None`, the output is automatically turned into a float array. The return value of `vectorize` is a `micropython` object that can be called as a standard function, but which now accepts either a scalar, an `ndarray`, or a generic `micropython` iterable as its sole argument. Note that the function that is to be vectorised must have a single argument. ###Code %%micropython -unix 1 import ulab as np from ulab import vector def f(x): return x*x vf = vector.vectorize(f) # calling with a scalar print('{:20}'.format('f on a scalar: '), vf(44.0)) # calling with an ndarray a = np.array([1, 2, 3, 4]) print('{:20}'.format('f on an ndarray: '), vf(a)) # calling with a list print('{:20}'.format('f on a list: '), vf([2, 3, 4])) ###Output f on a scalar: array([1936.0], dtype=float) f on an ndarray: array([1.0, 4.0, 9.0, 16.0], dtype=float) f on a list: array([4.0, 9.0, 16.0], dtype=float) ###Markdown As mentioned, the `dtype` of the resulting `ndarray` can be specified via the `otypes` keyword. The value is bound to the function object that `vectorize` returns, therefore, if the same function is to be vectorised with different output types, then for each type a new function object must be created. ###Code %%micropython -unix 1 import ulab as np from ulab import vector l = [1, 2, 3, 4] def f(x): return x*x vf1 = vector.vectorize(f, otypes=np.uint8) vf2 = vector.vectorize(f, otypes=np.float) print('{:20}'.format('output is uint8: '), vf1(l)) print('{:20}'.format('output is float: '), vf2(l)) ###Output output is uint8: array([1, 4, 9, 16], dtype=uint8) output is float: array([1.0, 4.0, 9.0, 16.0], dtype=float) ###Markdown The `otypes` keyword argument cannot be used for type coercion: if the function evaluates to a float, but `otypes` would dictate an integer type, an exception will be raised: ###Code %%micropython -unix 1 import ulab as np from ulab import vector int_list = [1, 2, 3, 4] float_list = [1.0, 2.0, 3.0, 4.0] def f(x): return x*x vf = vector.vectorize(f, otypes=np.uint8) print('{:20}'.format('integer list: '), vf(int_list)) # this will raise a TypeError exception print(vf(float_list)) ###Output integer list: array([1, 4, 9, 16], dtype=uint8) Traceback (most recent call last): File "/dev/shm/micropython.py", line 14, in <module> TypeError: can't convert float to int ###Markdown BenchmarksIt should be pointed out that the `vectorize` function produces the pseudo-vectorised version of the `python` function that is fed into it, i.e., on the C level, the same `python` function is called, with the all-encompassing `mp_obj_t` type arguments, and all that happens is that the `for` loop in `[f(i) for i in iterable]` runs purely in C. Since type checking and type conversion in `f()` is expensive, the speed-up is not so spectacular as when iterating over an `ndarray` with a factory function: a gain of approximately 30% can be expected, when a native `python` type (e.g., `list`) is returned by the function, and this becomes around 50% (a factor of 2), if conversion to an `ndarray` is also counted.The following code snippet calculates the square of a 1000 numbers with the vectorised function (which returns an `ndarray`), with `list` comprehension, and with `list` comprehension followed by conversion to an `ndarray`. For comparison, the execution time is measured also for the case, when the square is calculated entirely in `ulab`. ###Code %%micropython -pyboard 1 import ulab as np from ulab import vector def f(x): return x*x vf = vector.vectorize(f) @timeit def timed_vectorised_square(iterable): return vf(iterable) @timeit def timed_python_square(iterable): return [f(i) for i in iterable] @timeit def timed_ndarray_square(iterable): return np.array([f(i) for i in iterable]) @timeit def timed_ulab_square(ndarray): return ndarray**2 print('vectorised function') squares = timed_vectorised_square(range(1000)) print('\nlist comprehension') squares = timed_python_square(range(1000)) print('\nlist comprehension + ndarray conversion') squares = timed_ndarray_square(range(1000)) print('\nsquaring an ndarray entirely in ulab') a = np.array(range(1000)) squares = timed_ulab_square(a) ###Output vectorised function execution time: 7237 us list comprehension execution time: 10248 us list comprehension + ndarray conversion execution time: 12562 us squaring an ndarray entirely in ulab execution time: 560 us ###Markdown From the comparisons above, it is obvious that `python` functions should only be vectorised, when the same effect cannot be gotten in `ulab` only. However, although the time savings are not significant, there is still a good reason for caring about vectorised functions. Namely, user-defined `python` functions become universal, i.e., they can accept generic iterables as well as `ndarray`s as their arguments. A vectorised function is still a one-liner, resulting in transparent and elegant code.A final comment on this subject: the `f(x)` that we defined is a *generic* `python` function. This means that it is not required that it just crunches some numbers. It has to return a number object, but it can still access the hardware in the meantime. So, e.g., ```pythonled = pyb.LED(2)def f(x): if x < 100: led.toggle() return x*x```is perfectly valid code. around`numpy`: https://docs.scipy.org/doc/numpy-1.17.0/reference/generated/numpy.around.html`numpy`'s `around` function can also be found in the `vector` sub-module. The function implements the `decimals` keyword argument with default value `0`. The first argument must be an `ndarray`. If this is not the case, the function raises a `TypeError` exception. Note that `numpy` accepts general iterables. The `out` keyword argument known from `numpy` is not accepted. The function always returns an ndarray of type `mp_float_t`. ###Code %%micropython -unix 1 import ulab as np from ulab import vector a = np.array([1, 2.2, 33.33, 444.444]) print('a:\t\t', a) print('\ndecimals = 0\t', vector.around(a, decimals=0)) print('\ndecimals = 1\t', vector.around(a, decimals=1)) print('\ndecimals = -1\t', vector.around(a, decimals=-1)) ###Output a: array([1.0, 2.2, 33.33, 444.444], dtype=float) decimals = 0 array([1.0, 2.0, 33.0, 444.0], dtype=float) decimals = 1 array([1.0, 2.2, 33.3, 444.4], dtype=float) decimals = -1 array([0.0, 0.0, 30.0, 440.0], dtype=float) ###Markdown arctan2`numpy`: https://docs.scipy.org/doc/numpy-1.17.0/reference/generated/numpy.arctan2.htmlThe two-argument inverse tangent function is also part of the `vector` sub-module. The function implements only partial broadcasting, i.e., its two arguments either have the same shape, or at least one of them must be a single-element array. Scalars (`micropython` integers or floats) are also allowed. ###Code %%micropython -unix 1 import ulab as np from ulab import vector a = np.array([1, 2.2, 33.33, 444.444]) print('a:\t\t', a) print('\narctan2(a, 1.0)\t', vector.arctan2(a, 1.0)) print('\narctan2(1.0, a)\t', vector.arctan2(1.0, a)) print('\narctan2(a, a): \t', vector.arctan2(a, a)) ###Output a: array([1.0, 2.2, 33.33, 444.444], dtype=float) arctan2(a, 1.0) array([0.7853981633974483, 1.14416883366802, 1.5408023243361, 1.568546328341769], dtype=float) arctan2(1.0, a) array([0.7853981633974483, 0.426627493126876, 0.02999400245879636, 0.002249998453127392], dtype=float) arctan2(a, a): array([0.7853981633974483, 0.7853981633974483, 0.7853981633974483, 0.7853981633974483], dtype=float) ###Markdown NumericalFunction in the `numerical` sub-module can be called by importing the sub-module first. min, argmin, max, argmax`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.min.html`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.max.html`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html**WARNING:** Difference to `numpy`: the `out` keyword argument is not implemented.These functions follow the same pattern, and work with generic iterables, and `ndarray`s. `min`, and `max` return the minimum or maximum of a sequence. If the input array is two-dimensional, the `axis` keyword argument can be supplied, in which case the minimum/maximum along the given axis will be returned. If `axis=None` (this is also the default value), the minimum/maximum of the flattened array will be determined.`argmin/argmax` return the position (index) of the minimum/maximum in the sequence. ###Code %%micropython -unix 1 import ulab as np from ulab import numerical a = np.array([1, 2, 0, 1, 10]) print('a:', a) print('min of a:', numerical.min(a)) print('argmin of a:', numerical.argmin(a)) b = np.array([[1, 2, 0], [1, 10, -1]]) print('\nb:\n', b) print('min of b (flattened):', numerical.min(b)) print('min of b (axis=0):', numerical.min(b, axis=0)) print('min of b (axis=1):', numerical.min(b, axis=1)) ###Output a: array([1.0, 2.0, 0.0, 1.0, 10.0], dtype=float) min of a: 0.0 argmin of a: 2 b: array([[1.0, 2.0, 0.0], [1.0, 10.0, -1.0]], dtype=float) min of b (flattened): -1.0 min of b (axis=0): array([1.0, 2.0, -1.0], dtype=float) min of b (axis=1): array([0.0, -1.0], dtype=float) ###Markdown sum, std, mean`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.sum.html`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.std.html`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.htmlThese three functions follow the same pattern: if the axis keyword is not specified, it assumes the default value of `None`, and returns the result of the computation for the flattened array. Otherwise, the calculation is along the given axis. ###Code %%micropython -pyboard 1 import ulab as np from ulab import numerical a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) print('a: \n', a) print('sum, flat array: ', numerical.sum(a)) print('mean, horizontal: ', numerical.mean(a, axis=1)) print('std, vertical: ', numerical.std(a, axis=0)) ###Output a: array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype=float) sum, flat array: 45.0 mean, horizontal: array([2.0, 5.0, 8.0], dtype=float) std, vertical: array([2.44949, 2.44949, 2.44949], dtype=float) ###Markdown roll`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.roll.htmlThe roll function shifts the content of a vector by the positions given as the second argument. If the `axis` keyword is supplied, the shift is applied to the given axis. ###Code %%micropython -unix 1 import ulab as np from ulab import numerical a = np.array([1, 2, 3, 4, 5, 6, 7, 8]) print("a:\t\t\t", a) numerical.roll(a, 2) print("a rolled to the left:\t", a) # this should be the original vector numerical.roll(a, -2) print("a rolled to the right:\t", a) ###Output a: array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) a rolled to the left: array([3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 1.0, 2.0], dtype=float) a rolled to the right: array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) ###Markdown Rolling works with matrices, too. If the `axis` keyword is 0, the matrix is rolled along its vertical axis, otherwise, horizontally. Horizontal rolls are faster, because they require fewer steps, and larger memory chunks are copied, however, they also require more RAM: basically the whole row must be stored internally. Most expensive are the `None` keyword values, because with `axis = None`, the array is flattened first, hence the row's length is the size of the whole matrix.Vertical rolls require two internal copies of single columns. ###Code %%micropython -unix 1 import ulab as np from ulab import numerical a = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) print("a:\n", a) numerical.roll(a, 2) print("\na rolled to the left:\n", a) numerical.roll(a, -1, axis=1) print("\na rolled up:\n", a) numerical.roll(a, 1, axis=None) print("\na rolled with None:\n", a) ###Output a: array([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]], dtype=float) a rolled to the left: array([[3.0, 4.0, 5.0, 6.0], [7.0, 8.0, 1.0, 2.0]], dtype=float) a rolled up: array([[6.0, 3.0, 4.0, 5.0], [2.0, 7.0, 8.0, 1.0]], dtype=float) a rolled with None: array([[3.0, 4.0, 5.0, 2.0], [7.0, 8.0, 1.0, 6.0]], dtype=float) ###Markdown Simple running weighted averageAs a demonstration of the conciseness of `ulab/numpy` operations, we will calculate an exponentially weighted running average of a measurement vector in just a couple of lines. I chose this particular example, because I think that this can indeed be used in real-life applications. ###Code %%micropython -unix 1 import ulab as np from ulab import numerical from ulab import vector def dummy_adc(): # dummy adc function, so that the results are reproducible return 2 n = 10 # These are the normalised weights; the last entry is the most dominant weight = vector.exp([1, 2, 3, 4, 5]) weight = weight/numerical.sum(weight) print(weight) # initial array of samples samples = np.array([0]*n) for i in range(n): # a new datum is inserted on the right hand side. This simply overwrites whatever was in the last slot samples[-1] = dummy_adc() print(numerical.mean(samples[-5:]*weight)) print(samples[-5:]) # the data are shifted by one position to the left numerical.roll(samples, 1) ###Output array([0.01165623031556606, 0.03168492019176483, 0.08612854033708572, 0.234121635556221, 0.6364086270332336], dtype=float) 0.2545634508132935 array([0.0, 0.0, 0.0, 0.0, 2.0], dtype=float) 0.3482121050357819 array([0.0, 0.0, 0.0, 2.0, 2.0], dtype=float) 0.3826635211706161 array([0.0, 0.0, 2.0, 2.0, 2.0], dtype=float) 0.3953374892473221 array([0.0, 2.0, 2.0, 2.0, 2.0], dtype=float) 0.3999999813735485 array([2.0, 2.0, 2.0, 2.0, 2.0], dtype=float) 0.3999999813735485 array([2.0, 2.0, 2.0, 2.0, 2.0], dtype=float) 0.3999999813735485 array([2.0, 2.0, 2.0, 2.0, 2.0], dtype=float) 0.3999999813735485 array([2.0, 2.0, 2.0, 2.0, 2.0], dtype=float) 0.3999999813735485 array([2.0, 2.0, 2.0, 2.0, 2.0], dtype=float) 0.3999999813735485 array([2.0, 2.0, 2.0, 2.0, 2.0], dtype=float) ###Markdown flip`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.htmlThe `flip` function takes one positional, an `ndarray`, and one keyword argument, `axis = None`, and reverses the order of elements along the given axis. If the keyword argument is `None`, the matrix' entries are flipped along all axes. `flip` returns a new copy of the array. ###Code %%micropython -unix 1 import ulab as np from ulab import numerical a = np.array([1, 2, 3, 4, 5]) print("a: \t", a) print("a flipped:\t", np.flip(a)) a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.uint8) print("\na flipped horizontally\n", numerical.flip(a, axis=1)) print("\na flipped vertically\n", numerical.flip(a, axis=0)) print("\na flipped horizontally+vertically\n", numerical.flip(a)) ###Output a: array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=float) a flipped: array([5.0, 4.0, 3.0, 2.0, 1.0], dtype=float) a flipped horizontally array([[3, 2, 1], [6, 5, 4], [9, 8, 7]], dtype=uint8) a flipped vertically array([[7, 8, 9], [4, 5, 6], [1, 2, 3]], dtype=uint8) a flipped horizontally+vertically array([[9, 8, 7], [6, 5, 4], [3, 2, 1]], dtype=uint8) ###Markdown diff`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.diff.htmlThe `diff` function returns the numerical derivative of the forward scheme, or more accurately, the differences of an `ndarray` along a given axis. The order of derivative can be stipulated with the `n` keyword argument, which should be between 0, and 9. Default is 1. If higher order derivatives are required, they can be gotten by repeated calls to the function. The `axis` keyword argument should be -1 (last axis, in `ulab` equivalent to the second axis, and this also happens to be the default value), 0, or 1. Beyond the output array, the function requires only a couple of bytes of extra RAM for the differentiation stencil. (The stencil is an `int8` array, one byte longer than `n`. This also explains, why the highest order is 9: the coefficients of a ninth-order stencil all fit in signed bytes, while 10 would require `int16`.) Note that as usual in numerical differentiation (and also in `numpy`), the length of the respective axis will be reduced by `n` after the operation. If `n` is larger than, or equal to the length of the axis, an empty array will be returned.**WARNING**: the `diff` function does not implement the `prepend` and `append` keywords that can be found in `numpy`. ###Code %%micropython -unix 1 import ulab as np from ulab import numerical a = np.array(range(9), dtype=np.uint8) print('a:\n', a) print('\nfirst derivative:\n', numerical.diff(a, n=1)) print('\nsecond derivative:\n', numerical.diff(a, n=2)) c = np.array([[1, 2, 3, 4], [4, 3, 2, 1], [1, 4, 9, 16], [0, 0, 0, 0]]) print('\nc:\n', c) print('\nfirst derivative, first axis:\n', numerical.diff(c, axis=0)) print('\nfirst derivative, second axis:\n', numerical.diff(c, axis=1)) ###Output a: array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=uint8) first derivative: array([1, 1, 1, 1, 1, 1, 1, 1], dtype=uint8) second derivative: array([0, 0, 0, 0, 0, 0, 0], dtype=uint8) c: array([[1.0, 2.0, 3.0, 4.0], [4.0, 3.0, 2.0, 1.0], [1.0, 4.0, 9.0, 16.0], [0.0, 0.0, 0.0, 0.0]], dtype=float) first derivative, first axis: array([[3.0, 1.0, -1.0, -3.0], [-3.0, 1.0, 7.0, 15.0], [-1.0, -4.0, -9.0, -16.0]], dtype=float) first derivative, second axis: array([[1.0, 1.0, 1.0], [-1.0, -1.0, -1.0], [3.0, 5.0, 7.0], [0.0, 0.0, 0.0]], dtype=float) ###Markdown sort`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.sort.htmlThe sort function takes an ndarray, and sorts its elements in ascending order along the specified axis using a heap sort algorithm. As opposed to the `.sort()` method discussed earlier, this function creates a copy of its input before sorting, and at the end, returns this copy. Sorting takes place in place, without auxiliary storage. The `axis` keyword argument takes on the possible values of -1 (the last axis, in `ulab` equivalent to the second axis, and this also happens to be the default value), 0, 1, or `None`. The first three cases are identical to those in [diff](diff), while the last one flattens the array before sorting. If descending order is required, the result can simply be `flip`ped, see [flip](flip).**WARNING:** `numpy` defines the `kind`, and `order` keyword arguments that are not implemented here. The function in `ulab` always uses heap sort, and since `ulab` does not have the concept of data fields, the `order` keyword argument would have no meaning. ###Code %%micropython -unix 1 import ulab as np from ulab import numerical a = np.array([[1, 12, 3, 0], [5, 3, 4, 1], [9, 11, 1, 8], [7, 10, 0, 1]], dtype=np.float) print('\na:\n', a) b = numerical.sort(a, axis=0) print('\na sorted along vertical axis:\n', b) c = numerical.sort(a, axis=1) print('\na sorted along horizontal axis:\n', c) c = numerical.sort(a, axis=None) print('\nflattened a sorted:\n', c) ###Output a: array([[1.0, 12.0, 3.0, 0.0], [5.0, 3.0, 4.0, 1.0], [9.0, 11.0, 1.0, 8.0], [7.0, 10.0, 0.0, 1.0]], dtype=float) a sorted along vertical axis: array([[1.0, 3.0, 0.0, 0.0], [5.0, 10.0, 1.0, 1.0], [7.0, 11.0, 3.0, 1.0], [9.0, 12.0, 4.0, 8.0]], dtype=float) a sorted along horizontal axis: array([[0.0, 1.0, 3.0, 12.0], [1.0, 3.0, 4.0, 5.0], [1.0, 8.0, 9.0, 11.0], [0.0, 1.0, 7.0, 10.0]], dtype=float) flattened a sorted: array([0.0, 0.0, 1.0, ..., 10.0, 11.0, 12.0], dtype=float) ###Markdown Heap sort requires $\sim N\log N$ operations, and notably, the worst case costs only 20% more time than the average. In order to get an order-of-magnitude estimate, we will take the sine of 1000 uniformly spaced numbers between 0, and two pi, and sort them: ###Code %%micropython -pyboard 1 import ulab as np from ulab import vector from ulab import numerical @timeit def sort_time(array): return numerical.sort(array) b = vector.sin(np.linspace(0, 6.28, num=1000)) print('b: ', b) sort_time(b) print('\nb sorted:\n', b) ###Output _____no_output_____ ###Markdown argsort`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.argsort.htmlSimilarly to [sort](sort), `argsort` takes a positional, and a keyword argument, and returns an unsigned short index array of type `ndarray` with the same dimensions as the input, or, if `axis=None`, as a row vector with length equal to the number of elements in the input (i.e., the flattened array). The indices in the output sort the input in ascending order. The routine in `argsort` is the same as in `sort`, therefore, the comments on computational expenses (time and RAM) also apply. In particular, since no copy of the original data is required, virtually no RAM beyond the output array is used. Since the underlying container of the output array is of type `uint16_t`, neither of the output dimensions should be larger than 65535. If that happens to be the case, the function will bail out with a `ValueError`. ###Code %%micropython -unix 1 import ulab as np from ulab import numerical a = np.array([[1, 12, 3, 0], [5, 3, 4, 1], [9, 11, 1, 8], [7, 10, 0, 1]], dtype=np.float) print('\na:\n', a) b = numerical.argsort(a, axis=0) print('\na sorted along vertical axis:\n', b) c = numerical.argsort(a, axis=1) print('\na sorted along horizontal axis:\n', c) c = numerical.argsort(a, axis=None) print('\nflattened a sorted:\n', c) ###Output a: array([[1.0, 12.0, 3.0, 0.0], [5.0, 3.0, 4.0, 1.0], [9.0, 11.0, 1.0, 8.0], [7.0, 10.0, 0.0, 1.0]], dtype=float) a sorted along vertical axis: array([[0, 1, 3, 0], [1, 3, 2, 1], [3, 2, 0, 3], [2, 0, 1, 2]], dtype=uint16) a sorted along horizontal axis: array([[3, 0, 2, 1], [3, 1, 2, 0], [2, 3, 0, 1], [2, 3, 0, 1]], dtype=uint16) flattened a sorted: array([3, 14, 0, ..., 13, 9, 1], dtype=uint16) ###Markdown Since during the sorting, only the indices are shuffled, `argsort` does not modify the input array, as one can verify this by the following example: ###Code %%micropython -unix 1 import ulab as np from ulab import numerical a = np.array([0, 5, 1, 3, 2, 4], dtype=np.uint8) print('\na:\n', a) b = numerical.argsort(a, axis=1) print('\nsorting indices:\n', b) print('\nthe original array:\n', a) ###Output a: array([0, 5, 1, 3, 2, 4], dtype=uint8) sorting indices: array([0, 2, 4, 3, 5, 1], dtype=uint16) the original array: array([0, 5, 1, 3, 2, 4], dtype=uint8) ###Markdown LinalgFunctions in the `linalg` module can be called by importing the sub-module first. size`size` takes a single argument, the axis, whose size is to be returned. Depending on the value of the argument, the following information will be returned:1. argument is 0: the number of elements of the array2. argument is 1: the number of rows3. argument is 2: the number of columns ###Code %%micropython -unix 1 import ulab as np from ulab import linalg a = np.array([1, 2, 3, 4], dtype=np.int8) print("a:\n", a) print("size of a:", linalg.size(a, axis=None), ",", linalg.size(a, axis=0)) b= np.array([[1, 2], [3, 4]], dtype=np.int8) print("\nb:\n", b) print("size of b:", linalg.size(b, axis=None), ",", linalg.size(b, axis=0), ",", linalg.size(b, axis=1)) ###Output a: array([1, 2, 3, 4], dtype=int8) size of a: 4 , 4 b: array([[1, 2], [3, 4]], dtype=int8) size of b: 4 , 2 , 2 ###Markdown inv`numpy`: https://docs.scipy.org/doc/numpy-1.17.0/reference/generated/numpy.linalg.inv.htmlA square matrix, provided that it is not singular, can be inverted by calling the `inv` function that takes a single argument. The inversion is based on successive elimination of elements in the lower left triangle, and raises a `ValueError` exception, if the matrix turns out to be singular (i.e., one of the diagonal entries is zero). ###Code %%micropython -pyboard 1 import ulab as np from ulab import linalg m = np.array([[1, 2, 3, 4], [4, 5, 6, 4], [7, 8.6, 9, 4], [3, 4, 5, 6]]) print(linalg.inv(m)) ###Output array([[-2.166666, 1.499999, -0.8333326, 1.0], [1.666666, -3.333331, 1.666666, -4.768516e-08], [0.1666672, 2.166666, -0.8333327, -1.0], [-0.1666666, -0.3333334, 4.96705e-08, 0.5]], dtype=float) ###Markdown Computation expensesNote that the cost of inverting a matrix is approximately twice as many floats (RAM), as the number of entries in the original matrix, and approximately as many operations, as the number of entries. Here are a couple of numbers: ###Code %%micropython -pyboard 1 import ulab as np from ulab import linalg @timeit def invert_matrix(m): return linalg.inv(m) m = np.array([[1, 2,], [4, 5]]) print('2 by 2 matrix:') invert_matrix(m) m = np.array([[1, 2, 3, 4], [4, 5, 6, 4], [7, 8.6, 9, 4], [3, 4, 5, 6]]) print('\n4 by 4 matrix:') invert_matrix(m) m = np.array([[1, 2, 3, 4, 5, 6, 7, 8], [0, 5, 6, 4, 5, 6, 4, 5], [0, 0, 9, 7, 8, 9, 7, 8], [0, 0, 0, 10, 11, 12, 11, 12], [0, 0, 0, 0, 4, 6, 7, 8], [0, 0, 0, 0, 0, 5, 6, 7], [0, 0, 0, 0, 0, 0, 7, 6], [0, 0, 0, 0, 0, 0, 0, 2]]) print('\n8 by 8 matrix:') invert_matrix(m) ###Output 2 by 2 matrix: execution time: 65 us 4 by 4 matrix: execution time: 105 us 8 by 8 matrix: execution time: 299 us ###Markdown The above-mentioned scaling is not obeyed strictly. The reason for the discrepancy is that the function call is still the same for all three cases: the input must be inspected, the output array must be created, and so on. dot`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html**WARNING:** numpy applies upcasting rules for the multiplication of matrices, while `ulab` simply returns a float matrix. Once you can invert a matrix, you might want to know, whether the inversion is correct. You can simply take the original matrix and its inverse, and multiply them by calling the `dot` function, which takes the two matrices as its arguments. If the matrix dimensions do not match, the function raises a `ValueError`. The result of the multiplication is expected to be the unit matrix, which is demonstrated below. ###Code %%micropython -pyboard 1 import ulab as np from ulab import linalg m = np.array([[1, 2, 3], [4, 5, 6], [7, 10, 9]], dtype=np.uint8) n = linalg.inv(m) print("m:\n", m) print("\nm^-1:\n", n) # this should be the unit matrix print("\nm*m^-1:\n", linalg.dot(m, n)) ###Output m: array([[1, 2, 3], [4, 5, 6], [7, 10, 9]], dtype=uint8) m^-1: array([[-1.25, 1.0, -0.25], [0.5, -1.0, 0.5], [0.4166667, 0.3333334, -0.25]], dtype=float) m*m^-1: array([[1.0, 2.384186e-07, -1.490116e-07], [-2.980232e-07, 1.000001, -4.172325e-07], [-3.278255e-07, 1.311302e-06, 0.9999992]], dtype=float) ###Markdown Note that for matrix multiplication you don't necessarily need square matrices, it is enough, if their dimensions are compatible (i.e., the the left-hand-side matrix has as many columns, as does the right-hand-side matrix rows): ###Code %%micropython -unix 1 import ulab as np from ulab import linalg m = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=np.uint8) n = np.array([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=np.uint8) print(m) print(n) print(linalg.dot(m, n)) ###Output array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=uint8) array([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=uint8) array([[7.0, 10.0], [23.0, 34.0]], dtype=float) ###Markdown det`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.det.htmlThe `det` function takes a square matrix as its single argument, and calculates the determinant. The calculation is based on successive elimination of the matrix elements, and the return value is a float, even if the input array was of integer type. ###Code %%micropython -pyboard 1 import ulab as np from ulab import linalg a = np.array([[1, 2], [3, 4]], dtype=np.uint8) print(linalg.det(a)) ###Output -2.0 ###Markdown BenchmarkSince the routine for calculating the determinant is pretty much the same as for finding the [inverse of a matrix](inv), the execution times are similar: ###Code %%micropython -pyboard 1 @timeit def matrix_det(m): return linalg.inv(m) m = np.array([[1, 2, 3, 4, 5, 6, 7, 8], [0, 5, 6, 4, 5, 6, 4, 5], [0, 0, 9, 7, 8, 9, 7, 8], [0, 0, 0, 10, 11, 12, 11, 12], [0, 0, 0, 0, 4, 6, 7, 8], [0, 0, 0, 0, 0, 5, 6, 7], [0, 0, 0, 0, 0, 0, 7, 6], [0, 0, 0, 0, 0, 0, 0, 2]]) matrix_det(m) ###Output execution time: 294 us ###Markdown eig`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eig.htmlThe `eig` function calculates the eigenvalues and the eigenvectors of a real, symmetric square matrix. If the matrix is not symmetric, a `ValueError` will be raised. The function takes a single argument, and returns a tuple with the eigenvalues, and eigenvectors. With the help of the eigenvectors, amongst other things, you can implement sophisticated stabilisation routines for robots. ###Code %%micropython -pyboard 1 import ulab as np from ulab import linalg a = np.array([[1, 2, 1, 4], [2, 5, 3, 5], [1, 3, 6, 1], [4, 5, 1, 7]], dtype=np.uint8) x, y = linalg.eig(a) print('eigenvectors of a:\n', x) print('\neigenvalues of a:\n', y) ###Output eigenvectors of a: array([-1.165288, 0.8029362, 5.585626, 13.77673], dtype=float) eigenvalues of a: array([[0.8151754, -0.4499267, -0.1643907, 0.3256237], [0.2211193, 0.7847154, 0.08373602, 0.5729892], [-0.1340859, -0.3100657, 0.8742685, 0.3486182], [-0.5182822, -0.2926556, -0.4490192, 0.6664218]], dtype=float) ###Markdown The same matrix diagonalised with `numpy` yields: ###Code a = array([[1, 2, 1, 4], [2, 5, 3, 5], [1, 3, 6, 1], [4, 5, 1, 7]], dtype=np.uint8) x, y = eig(a) print('eigenvectors of a:\n', x) print('\neigenvalues of a:\n', y) ###Output eigenvectors of a: [13.77672606 -1.16528837 0.80293655 5.58562576] eigenvalues of a: [[ 0.32561419 0.815156 0.44994112 -0.16446602] [ 0.57300777 0.22113342 -0.78469926 0.08372081] [ 0.34861093 -0.13401142 0.31007764 0.87427868] [ 0.66641421 -0.51832581 0.29266348 -0.44897499]] ###Markdown When comparing results, we should keep two things in mind: 1. the eigenvalues and eigenvectors are not necessarily sorted in the same way2. an eigenvector can be multiplied by an arbitrary non-zero scalar, and it is still an eigenvector with the same eigenvalue. This is why all signs of the eigenvector belonging to 5.58, and 0.80 are flipped in `ulab` with respect to `numpy`. This difference, however, is of absolutely no consequence. Computation expensesSince the function is based on [Givens rotations](https://en.wikipedia.org/wiki/Givens_rotation) and runs till convergence is achieved, or till the maximum number of allowed rotations is exhausted, there is no universal estimate for the time required to find the eigenvalues. However, an order of magnitude can, at least, be guessed based on the measurement below: ###Code %%micropython -pyboard 1 import ulab as np from ulab import linalg @timeit def matrix_eig(a): return linalg.eig(a) a = np.array([[1, 2, 1, 4], [2, 5, 3, 5], [1, 3, 6, 1], [4, 5, 1, 7]], dtype=np.uint8) matrix_eig(a) ###Output execution time: 111 us ###Markdown Cholesky decomposition`numpy`: https://docs.scipy.org/doc/numpy-1.17.0/reference/generated/numpy.linalg.cholesky.html`cholesky` takes a positive definite, symmetric square matrix as its single argument, and returns *square root matrix* in the lower triangular form. If the input argument does not fulfill the positivity or symmetry condition, a `ValueError` is raised. ###Code %%micropython -unix 1 import ulab from ulab import linalg a = ulab.array([[25, 15, -5], [15, 18, 0], [-5, 0, 11]]) print('a: ', a) print('\n' + '='*20 + '\nCholesky decomposition\n', linalg.cholesky(a)) ###Output a: array([[25.0, 15.0, -5.0], [15.0, 18.0, 0.0], [-5.0, 0.0, 11.0]], dtype=float) ==================== Cholesky decomposition array([[5.0, 0.0, 0.0], [3.0, 3.0, 0.0], [-1.0, 1.0, 3.0]], dtype=float) ###Markdown norm`numpy`: https://numpy.org/doc/stable/reference/generated/numpy.linalg.norm.htmlThe function takes a vector or matrix without options, and returns its 2-norm, i.e., the square root of the sum of the square of the elements. ###Code %%micropython -unix 1 import ulab from ulab import linalg a = ulab.array([1, 2, 3, 4, 5]) b = ulab.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) print('norm of a:', linalg.norm(a)) print('norm of b:', linalg.norm(b)) ###Output norm of a: 7.416198487095663 norm of b: 16.88194301613414 ###Markdown trace`numpy`: https://docs.scipy.org/doc/numpy-1.17.0/reference/generated/numpy.linalg.trace.htmlThe `trace` function returns the sum of the diagonal elements of a square matrix. If the input argument is not a square matrix, an exception will be raised.The scalar so returned will inherit the type of the input array, i.e., integer arrays have integer trace, and floating point arrays a floating point trace. ###Code %%micropython -unix 1 import ulab from ulab import linalg a = ulab.array([[25, 15, -5], [15, 18, 0], [-5, 0, 11]], dtype=ulab.int8) print('a: ', a) print('\ntrace of a: ', linalg.trace(a)) b = ulab.array([[25, 15, -5], [15, 18, 0], [-5, 0, 11]], dtype=ulab.float) print('='*20 + '\nb: ', b) print('\ntrace of b: ', linalg.trace(b)) ###Output a: array([[25, 15, -5], [15, 18, 0], [-5, 0, 11]], dtype=int8) trace of a: 54 ==================== b: array([[25.0, 15.0, -5.0], [15.0, 18.0, 0.0], [-5.0, 0.0, 11.0]], dtype=float) trace of b: 54.0 ###Markdown PolynomialsFunctions in the polynomial sub-module can be invoked by importing the module first. polyval`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.polyval.html`polyval` takes two arguments, both arrays or other iterables. ###Code %%micropython -unix 1 import ulab as np from ulab import poly p = [1, 1, 1, 0] x = [0, 1, 2, 3, 4] print('coefficients: ', p) print('independent values: ', x) print('\nvalues of p(x): ', poly.polyval(p, x)) # the same works with one-dimensional ndarrays a = np.array(x) print('\nndarray (a): ', a) print('value of p(a): ', poly.polyval(p, a)) ###Output coefficients: [1, 1, 1, 0] independent values: [0, 1, 2, 3, 4] values of p(x): array([0.0, 3.0, 14.0, 39.0, 84.0], dtype=float) ndarray (a): array([0.0, 1.0, 2.0, 3.0, 4.0], dtype=float) value of p(a): array([0.0, 3.0, 14.0, 39.0, 84.0], dtype=float) ###Markdown polyfit`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.polyfit.htmlpolyfit takes two, or three arguments. The last one is the degree of the polynomial that will be fitted, the last but one is an array or iterable with the `y` (dependent) values, and the first one, an array or iterable with the `x` (independent) values, can be dropped. If that is the case, `x` will be generated in the function, assuming uniform sampling. If the length of `x`, and `y` are not the same, the function raises a `ValueError`. ###Code %%micropython -unix 1 import ulab as np from ulab import poly x = np.array([0, 1, 2, 3, 4, 5, 6]) y = np.array([9, 4, 1, 0, 1, 4, 9]) print('independent values:\t', x) print('dependent values:\t', y) print('fitted values:\t\t', poly.polyfit(x, y, 2)) # the same with missing x print('\ndependent values:\t', y) print('fitted values:\t\t', poly.polyfit(y, 2)) ###Output independent values: array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=float) dependent values: array([9.0, 4.0, 1.0, 0.0, 1.0, 4.0, 9.0], dtype=float) fitted values: array([1.0, -6.0, 9.000000000000004], dtype=float) dependent values: array([9.0, 4.0, 1.0, 0.0, 1.0, 4.0, 9.0], dtype=float) fitted values: array([1.0, -6.0, 9.000000000000004], dtype=float) ###Markdown Execution time`polyfit` is based on the inversion of a matrix (there is more on the background in https://en.wikipedia.org/wiki/Polynomial_regression), and it requires the intermediate storage of `2*N*(deg+1)` floats, where `N` is the number of entries in the input array, and `deg` is the fit's degree. The additional computation costs of the matrix inversion discussed in [inv](inv) also apply. The example from above needs around 150 microseconds to return: ###Code %%micropython -pyboard 1 import ulab as np from ulab import poly @timeit def time_polyfit(x, y, n): return poly.polyfit(x, y, n) x = np.array([0, 1, 2, 3, 4, 5, 6]) y = np.array([9, 4, 1, 0, 1, 4, 9]) time_polyfit(x, y, 2) ###Output execution time: 153 us ###Markdown Fourier transformsFunctions related to Fourier transforms can be called by importing the `fft` sub-module first.`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.ifft.html fftSince `ulab`'s `ndarray` does not support complex numbers, the invocation of the Fourier transform differs from that in `numpy`. In `numpy`, you can simply pass an array or iterable to the function, and it will be treated as a complex array: ###Code fft.fft([1, 2, 3, 4, 1, 2, 3, 4]) ###Output _____no_output_____ ###Markdown **WARNING:** The array that is returned is also complex, i.e., the real and imaginary components are cast together. In `ulab`, the real and imaginary parts are treated separately: you have to pass two `ndarray`s to the function, although, the second argument is optional, in which case the imaginary part is assumed to be zero.**WARNING:** The function, as opposed to `numpy`, returns a 2-tuple, whose elements are two `ndarray`s, holding the real and imaginary parts of the transform separately. ###Code %%micropython -pyboard 1 import ulab as np from ulab import vector from ulab import fft x = np.linspace(0, 10, num=1024) y = vector.sin(x) z = np.zeros(len(x)) a, b = fft.fft(x) print('real part:\t', a) print('\nimaginary part:\t', b) c, d = fft.fft(x, z) print('\nreal part:\t', c) print('\nimaginary part:\t', d) ###Output real part: array([5119.996, -5.004663, -5.004798, ..., -5.005482, -5.005643, -5.006577], dtype=float) imaginary part: array([0.0, 1631.333, 815.659, ..., -543.764, -815.6588, -1631.333], dtype=float) real part: array([5119.996, -5.004663, -5.004798, ..., -5.005482, -5.005643, -5.006577], dtype=float) imaginary part: array([0.0, 1631.333, 815.659, ..., -543.764, -815.6588, -1631.333], dtype=float) ###Markdown ifftThe above-mentioned rules apply to the inverse Fourier transform. The inverse is also normalised by `N`, the number of elements, as is customary in `numpy`. With the normalisation, we can ascertain that the inverse of the transform is equal to the original array. ###Code %%micropython -pyboard 1 import ulab as np from ulab import vector from ulab import fft x = np.linspace(0, 10, num=1024) y = vector.sin(x) a, b = fft.fft(y) print('original vector:\t', y) y, z = fft.ifft(a, b) # the real part should be equal to y print('\nreal part of inverse:\t', y) # the imaginary part should be equal to zero print('\nimaginary part of inverse:\t', z) ###Output original vector: array([0.0, 0.009775016, 0.0195491, ..., -0.5275068, -0.5357859, -0.5440139], dtype=float) real part of inverse: array([-2.980232e-08, 0.0097754, 0.0195494, ..., -0.5275064, -0.5357857, -0.5440133], dtype=float) imaginary part of inverse: array([-2.980232e-08, -1.451171e-07, 3.693752e-08, ..., 6.44871e-08, 9.34986e-08, 2.18336e-07], dtype=float) ###Markdown Note that unlike in `numpy`, the length of the array on which the Fourier transform is carried out must be a power of 2. If this is not the case, the function raises a `ValueError` exception. spectrogramIn addition to the Fourier transform and its inverse, `ulab` also sports a function called `spectrogram`, which returns the absolute value of the Fourier transform. This could be used to find the dominant spectral component in a time series. The arguments are treated in the same way as in `fft`, and `ifft`. ###Code %%micropython -unix 1 import ulab as np from ulab import vector from ulab import fft x = np.linspace(0, 10, num=1024) y = vector.sin(x) a = fft.spectrogram(y) print('original vector:\t', y) print('\nspectrum:\t', a) ###Output original vector: array([0.0, 0.009775015390171337, 0.01954909674625918, ..., -0.5275140569487312, -0.5357931822978732, -0.5440211108893639], dtype=float) spectrum: array([187.8635087634579, 315.3112063607119, 347.8814873399374, ..., 84.45888934298905, 347.8814873399374, 315.3112063607118], dtype=float) ###Markdown As such, `spectrogram` is really just a shorthand for `np.sqrt(a*a + b*b)`: ###Code %%micropython -pyboard 1 import ulab as np from ulab import fft from ulab import vector x = np.linspace(0, 10, num=1024) y = vector.sin(x) a, b = fft.fft(y) print('\nspectrum calculated the hard way:\t', vector.sqrt(a*a + b*b)) a = fft.spectrogram(y) print('\nspectrum calculated the lazy way:\t', a) ###Output spectrum calculated the hard way: array([187.8641, 315.3125, 347.8804, ..., 84.4587, 347.8803, 315.3124], dtype=float) spectrum calculated the lazy way: array([187.8641, 315.3125, 347.8804, ..., 84.4587, 347.8803, 315.3124], dtype=float) ###Markdown Computation and storage costs RAMThe FFT routine of `ulab` calculates the transform in place. This means that beyond reserving space for the two `ndarray`s that will be returned (the computation uses these two as intermediate storage space), only a handful of temporary variables, all floats or 32-bit integers, are required. Speed of FFTsA comment on the speed: a 1024-point transform implemented in python would cost around 90 ms, and 13 ms in assembly, if the code runs on the pyboard, v.1.1. You can gain a factor of four by moving to the D series https://github.com/peterhinch/micropython-fourier/blob/master/README.md8-performance. ###Code %%micropython -pyboard 1 import ulab as np from ulab import vector from ulab import fft x = np.linspace(0, 10, num=1024) y = vector.sin(x) @timeit def np_fft(y): return fft.fft(y) a, b = np_fft(y) ###Output execution time: 1985 us ###Markdown The C implementation runs in less than 2 ms on the pyboard (we have just measured that), and has been reported to run in under 0.8 ms on the D series board. That is an improvement of at least a factor of four. Filter routinesFunctions in the `filter` module can be called by importing the sub-module first. convolve`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.convolve.htmlReturns the discrete, linear convolution of two one-dimensional sequences.Only the ``full`` mode is supported, and the ``mode`` named parameter is not accepted. Note that all other modes can be had by slicing a ``full`` result. ###Code %%micropython -unix 1 import ulab as np from ulab import filter x = np.array((1,2,3)) y = np.array((1,10,100,1000)) print(filter.convolve(x, y)) ###Output array([1.0, 12.0, 123.0, 1230.0, 2300.0, 3000.0], dtype=float) ###Markdown sosfilt`scipy`: https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.sosfilt.html Filter data along one dimension using cascaded second-order sections.The function takes two positional arguments, `sos`, the filter segments of length 6, and the one-dimensional, uniformly sample data set to be filtered. Returns the filtered data, or the filtered data and the final filter delays, if the `zi` keyword arguments is supplied. The keyword argument be a float `ndarray` of shape `(n_sections, 2)`. If `zi` is not passed to the function, the initial values are assumed to be 0. ###Code %%micropython -unix 1 import ulab from ulab import filter as filter x = ulab.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) sos = [[1, 2, 3, 1, 5, 6], [1, 2, 3, 1, 5, 6]] y = filter.sosfilt(sos, x) print('y: ', y) %%micropython -unix 1 import ulab from ulab import filter as filter x = ulab.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) sos = [[1, 2, 3, 1, 5, 6], [1, 2, 3, 1, 5, 6]] # initial conditions of the filter zi = ulab.array([[1, 2], [3, 4]]) y, zf = filter.sosfilt(sos, x, zi=zi) print('y: ', y) print('\n' + '='*40 + '\nzf: ', zf) ###Output y: array([4.0, -16.0, 63.00000000000001, -227.0, 802.9999999999999, -2751.0, 9271.000000000001, -30775.0, 101067.0, -328991.0000000001], dtype=float) ======================================== zf: array([[37242.0, 74835.0], [1026187.0, 1936542.0]], dtype=float) ###Markdown Comparison of arraysFunctions in the `compare` module can be called by importing the sub-module first. equal, not_equal`numpy`: https://numpy.org/doc/stable/reference/generated/numpy.equal.html`numpy`: https://numpy.org/doc/stable/reference/generated/numpy.not_equal.htmlIn `micropython`, equality of arrays or scalars can be established by utilising the `==`, `!=`, ``, `` binary operators. In `circuitpython`, `==` and `!=` will produce unexpected results. In order to avoid this discrepancy, and to maintain compatibility with `numpy`, `ulab` implements the `equal` and `not_equal` operators that return the same results, irrespective of the `python` implementation.These two functions take two `ndarray`s, or scalars as their arguments. No keyword arguments are implemented. ###Code %%micropython -unix 1 import ulab as np a = np.array(range(9)) b = np.zeros(9) print('a: ', a) print('b: ', b) print('\na == b: ', np.compare.equal(a, b)) print('a != b: ', np.compare.not_equal(a, b)) # comparison with scalars print('a == 2: ', np.compare.equal(a, 2)) ###Output a: array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) b: array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=float) a == b: [True, False, False, False, False, False, False, False, False] a != b: [False, True, True, True, True, True, True, True, True] a == 2: [False, False, True, False, False, False, False, False, False] ###Markdown minimum`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.minimum.htmlReturns the minimum of two arrays, or two scalars, or an array, and a scalar. Partial broadcasting is implemented. If the arrays are of different `dtype`, the output is upcast as in [Binary operators](Binary-operators). If both inputs are scalars, a scalar is returned. Only positional arguments are implemented. maximum`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.maximum.htmlReturns the maximum of two arrays, or two scalars, or an array, and a scalar. Partial broadcasting is implemented. If the arrays are of different `dtype`, the output is upcast as in [Binary operators](Binary-operators). If both inputs are scalars, a scalar is returned. Only positional arguments are implemented. ###Code %%micropython -unix 1 import ulab a = ulab.array([1, 2, 3, 4, 5], dtype=ulab.uint8) b = ulab.array([5, 4, 3, 2, 1], dtype=ulab.float) print('minimum of a, and b:') print(ulab.compare.minimum(a, b)) print('\nmaximum of a, and b:') print(ulab.compare.maximum(a, b)) print('\nmaximum of 1, and 5.5:') print(ulab.compare.maximum(1, 5.5)) ###Output minimum of a, and b: array([1.0, 2.0, 3.0, 2.0, 1.0], dtype=float) maximum of a, and b: array([5.0, 4.0, 3.0, 4.0, 5.0], dtype=float) maximum of 1, and 5.5: 5.5 ###Markdown clip`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.clip.htmlClips an array, i.e., values that are outside of an interval are clipped to the interval edges. The function is equivalent to `maximum(a_min, minimum(a, a_max))`. or two scalars, hence partial broadcasting takes place exactly as in [minimum](minimum). If the arrays are of different `dtype`, the output is upcast as in [Binary operators](Binary-operators). ###Code %%micropython -unix 1 import ulab a = ulab.array(range(9), dtype=ulab.uint8) print('a:\t\t', a) print('clipped:\t', ulab.compare.clip(a, 3, 7)) b = 3 * ulab.ones(len(a), dtype=ulab.float) print('\na:\t\t', a) print('b:\t\t', b) print('clipped:\t', ulab.compare.clip(a, b, 7)) ###Output a: array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=uint8) clipped: array([3, 3, 3, 3, 4, 5, 6, 7, 7], dtype=uint8) a: array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=uint8) b: array([3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0], dtype=float) clipped: array([3.0, 3.0, 3.0, 3.0, 4.0, 5.0, 6.0, 7.0, 7.0], dtype=float) ###Markdown Interpolation, root finding, and function minimisationThe `approx` sub-module defines functions for interpolating numerical data, and finding the roots and the minimum of arbitrary functions defined in `python`. Note that routines that work with user-definedfunctions still have to call the underlying `python` code, and therefore, gains in speed are not as significant as with other vectorised operations. As a rule of thumb, a factor of two can be expected, when compared to an optimised python implementation. interp`numpy`: https://docs.scipy.org/doc/numpy/numpy.interpThe `interp` function returns the linearly interpolated values of a one-dimensional numerical array. It requires three positional arguments,`x`, at which the interpolated values are evaluated, `xp`, the arrayof the independent variables of the data, and `fp`, the array of the dependent values of the data. `xp` must be a monotonically increasing sequence of numbers.Two keyword arguments, `left`, and `right` can also be supplied; these determine the return values, if `x xp[-1]`, respectively. If these arguments are not supplied, `left`, and `right` default to `fp[0]`, and `fp[-1]`, respectively. ###Code %%micropython -unix 1 import ulab from ulab import approx x = ulab.array([1, 2, 3, 4, 5]) xp = ulab.array([1, 2, 3, 4]) fp = ulab.array([1, 2, 3, 5]) x = x - 0.2 print(x) print(approx.interp(x, xp, fp)) print(approx.interp(x, xp, fp, left=0.0)) print(approx.interp(x, xp, fp, right=10.0)) ###Output array([0.8, 1.8, 2.8, 3.8, 4.8], dtype=float) array([1.0, 1.8, 2.8, 4.6, 5.0], dtype=float) array([0.0, 1.8, 2.8, 4.6, 5.0], dtype=float) array([1.0, 1.8, 2.8, 4.6, 10.0], dtype=float) ###Markdown newton`scipy`:https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.newton.html`newton` finds a zero of a real, user-defined function using the Newton-Raphson (or secant or Halley’s) method. The routine requires two positional arguments, the function, and the initial value. Three keywordarguments can be supplied to control the iteration. These are the absolute and relative tolerances `tol`, and `rtol`, respectively, and the number of iterations before stopping, `maxiter`. The function retuns a single scalar, the position of the root. ###Code %%micropython -unix 1 import ulab from ulab import approx def f(x): return x*x*x - 2.0 print(approx.newton(f, 3., tol=0.001, rtol=0.01)) ###Output 1.260135727246117 ###Markdown bisect `scipy`: https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.bisect.html`bisect` finds the root of a function of one variable using a simple bisection routine. It takes three positional arguments, the function itself, and two starting points. The function must have opposite signsat the starting points. Returned is the position of the root.Two keyword arguments, `xtol`, and `maxiter` can be supplied to control the accuracy, and the number of bisections, respectively. ###Code %%micropython -unix 1 import ulab from ulab import approx def f(x): return x*x - 1 print(approx.bisect(f, 0, 4)) print('only 8 bisections: ', approx.bisect(f, 0, 4, maxiter=8)) print('with 0.1 accuracy: ', approx.bisect(f, 0, 4, xtol=0.1)) ###Output 0.9999997615814209 only 8 bisections: 0.984375 with 0.1 accuracy: 0.9375 ###Markdown PerformanceSince the `bisect` routine calls user-defined `python` functions, the speed gain is only about a factor of two, if compared to a purely `python` implementation. ###Code %%micropython -pyboard 1 import ulab from ulab import approx def f(x): return (x-1)*(x-1) - 2.0 def bisect(f, a, b, xtol=2.4e-7, maxiter=100): if f(a) * f(b) > 0: raise ValueError rtb = a if f(a) < 0.0 else b dx = b - a if f(a) < 0.0 else a - b for i in range(maxiter): dx *= 0.5 x_mid = rtb + dx mid_value = f(x_mid) if mid_value < 0: rtb = x_mid if abs(dx) < xtol: break return rtb @timeit def bisect_approx(f, a, b): return approx.bisect(f, a, b) @timeit def bisect_timed(f, a, b): return bisect(f, a, b) print('bisect running in python') bisect_timed(f, 3, 2) print('bisect running in C') bisect_approx(f, 3, 2) ###Output bisect running in python execution time: 1270 us bisect running in C execution time: 642 us ###Markdown fmin`scipy`: https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin.htmlThe `fmin` function finds the position of the minimum of a user-defined function by using the downhill simplex method. Requires two positional arguments, the function, and the initial value. Three keyword arguments, `xatol`, `fatol`, and `maxiter` stipulate conditions for stopping. ###Code %%micropython -unix 1 import ulab from ulab import approx def f(x): return (x-1)**2 - 1 print(approx.fmin(f, 3.0)) print(approx.fmin(f, 3.0, xatol=0.1)) ###Output 0.9996093749999952 1.199999999999996 ###Markdown trapz`numpy`: https://numpy.org/doc/stable/reference/generated/numpy.trapz.htmlThe function takes one or two one-dimensional `ndarray`s, and integrates the dependent values (`y`) using the trapezoidal rule. If the independent variable (`x`) is given, that is taken as the sample points corresponding to `y`. ###Code %%micropython -unix 1 import ulab from ulab import approx x = ulab.linspace(0, 9, num=10) y = x*x print('x: ', x) print('y: ', y) print('============================') print('integral of y: ', approx.trapz(y)) print('integral of y at x: ', approx.trapz(y, x=x)) ###Output x: array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0], dtype=float) y: array([0.0, 1.0, 4.0, 9.0, 16.0, 25.0, 36.0, 49.0, 64.0, 81.0], dtype=float) ============================ integral of y: 244.5 integral of y at x: 244.5 ###Markdown Notebook conversion ###Code import nbformat as nb import nbformat.v4.nbbase as nb4 from nbconvert import RSTExporter def convert_notebook(node, fn): (rst, resources) = rstexporter.from_notebook_node(notebook) with open(fn, 'w') as fout: fout.write(rst) rstexporter = RSTExporter() rstexporter.template_file = './templates/manual.tpl' source = nb.read('ulab-manual.ipynb', nb.NO_CONVERT) append_cell = False notebook = nb4.new_notebook() for j, cell in enumerate(source['cells']): if cell['cell_type'] == 'markdown': # skip everything before Introduction if cell['source'].split('\n')[0].startswith('# Introduction'): append_cell = True if append_cell: notebook.cells.append(cell) convert_notebook(notebook,'./manual/source/ulab.rst') %%writefile ./templates/manual.tpl {%- extends 'display_priority.tpl' -%} {% block in_prompt %} {% endblock in_prompt %} {% block output_prompt %} {% endblock output_prompt %} {% block input scoped%} {%- if cell.source.split('\n')[0].startswith('%%micropython') -%} .. code:: {{ '\n'.join(['# code to be run in micropython'] + cell.source.strip().split('\n')[1:]) | indent}} {%- else -%} .. code:: {{ '\n'.join(['# code to be run in CPython\n'] + cell.source.strip().split('\n')) | indent}} {%- endif -%} {% endblock input %} {% block error %} :: {{ super() }} {% endblock error %} {% block traceback_line %} {{ line | indent | strip_ansi }} {% endblock traceback_line %} {% block execute_result %} {% block data_priority scoped %} {{ super() }} {% endblock %} {% endblock execute_result %} {% block stream %} .. parsed-literal:: {{ output.text | indent }} {% endblock stream %} {% block data_svg %} .. image:: {{ output.metadata.filenames['image/svg+xml'] | urlencode }} {% endblock data_svg %} {% block data_png %} .. image:: {{ output.metadata.filenames['image/png'] | urlencode }} {%- set width=output | get_metadata('width', 'image/png') -%} {%- if width is not none %} :width: {{ width }}px {%- endif %} {%- set height=output | get_metadata('height', 'image/png') -%} {%- if height is not none %} :height: {{ height }}px {%- endif %} {% endblock data_png %} {% block data_jpg %} .. image:: {{ output.metadata.filenames['image/jpeg'] | urlencode }} {%- set width=output | get_metadata('width', 'image/jpeg') -%} {%- if width is not none %} :width: {{ width }}px {%- endif %} {%- set height=output | get_metadata('height', 'image/jpeg') -%} {%- if height is not none %} :height: {{ height }}px {%- endif %} {% endblock data_jpg %} {% block data_markdown %} {{ output.data['text/markdown'] | convert_pandoc("markdown", "rst") }} {% endblock data_markdown %} {% block data_latex %} .. math:: {{ output.data['text/latex'] | strip_dollars | indent }} {% endblock data_latex %} {% block data_text scoped %} .. parsed-literal:: {{ output.data['text/plain'] | indent }} {% endblock data_text %} {% block data_html scoped %} .. raw:: html {{ output.data['text/html'] | indent }} {% endblock data_html %} {% block markdowncell scoped %} {{ cell.source | convert_pandoc("markdown", "rst") }} {% endblock markdowncell %} {%- block rawcell scoped -%} {%- if cell.metadata.get('raw_mimetype', '').lower() in resources.get('raw_mimetypes', ['']) %} {{cell.source}} {% endif -%} {%- endblock rawcell -%} {% block headingcell scoped %} {{ ("#" * cell.level + cell.source) | replace('\n', ' ') | convert_pandoc("markdown", "rst") }} {% endblock headingcell %} {% block unknowncell scoped %} unknown type {{cell.type}} {% endblock unknowncell %} ###Output Overwriting ./templates/manual.tpl ###Markdown Notebook magic ###Code from IPython.core.magic import Magics, magics_class, line_cell_magic from IPython.core.magic import cell_magic, register_cell_magic, register_line_magic from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring import subprocess import os @magics_class class PyboardMagic(Magics): @cell_magic @magic_arguments() @argument('-skip') @argument('-unix') @argument('-pyboard') @argument('-file') @argument('-data') @argument('-time') @argument('-memory') def micropython(self, line='', cell=None): args = parse_argstring(self.micropython, line) if args.skip: # doesn't care about the cell's content print('skipped execution') return None # do not parse the rest if args.unix: # tests the code on the unix port. Note that this works on unix only with open('/dev/shm/micropython.py', 'w') as fout: fout.write(cell) proc = subprocess.Popen(["../../micropython/ports/unix/micropython", "/dev/shm/micropython.py"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(proc.stdout.read().decode("utf-8")) print(proc.stderr.read().decode("utf-8")) return None if args.file: # can be used to copy the cell content onto the pyboard's flash spaces = " " try: with open(args.file, 'w') as fout: fout.write(cell.replace('\t', spaces)) printf('written cell to {}'.format(args.file)) except: print('Failed to write to disc!') return None # do not parse the rest if args.data: # can be used to load data from the pyboard directly into kernel space message = pyb.exec(cell) if len(message) == 0: print('pyboard >>>') else: print(message.decode('utf-8')) # register new variable in user namespace self.shell.user_ns[args.data] = string_to_matrix(message.decode("utf-8")) if args.time: # measures the time of executions pyb.exec('import utime') message = pyb.exec('t = utime.ticks_us()\n' + cell + '\ndelta = utime.ticks_diff(utime.ticks_us(), t)' + "\nprint('execution time: {:d} us'.format(delta))") print(message.decode('utf-8')) if args.memory: # prints out memory information message = pyb.exec('from micropython import mem_info\nprint(mem_info())\n') print("memory before execution:\n========================\n", message.decode('utf-8')) message = pyb.exec(cell) print(">>> ", message.decode('utf-8')) message = pyb.exec('print(mem_info())') print("memory after execution:\n========================\n", message.decode('utf-8')) if args.pyboard: message = pyb.exec(cell) print(message.decode('utf-8')) ip = get_ipython() ip.register_magics(PyboardMagic) ###Output _____no_output_____ ###Markdown pyboard ###Code import pyboard pyb = pyboard.Pyboard('/dev/ttyACM0') pyb.enter_raw_repl() pyb.exit_raw_repl() pyb.close() %%micropython -pyboard 1 import utime import ulab as np def timeit(n=1000): def wrapper(f, *args, **kwargs): func_name = str(f).split(' ')[1] def new_func(*args, **kwargs): run_times = np.zeros(n, dtype=np.uint16) for i in range(n): t = utime.ticks_us() result = f(*args, **kwargs) run_times[i] = utime.ticks_diff(utime.ticks_us(), t) print('{}() execution times based on {} cycles'.format(func_name, n, (delta2-delta1)/n)) print('\tbest: %d us'%np.min(run_times)) print('\tworst: %d us'%np.max(run_times)) print('\taverage: %d us'%np.mean(run_times)) print('\tdeviation: +/-%.3f us'%np.std(run_times)) return result return new_func return wrapper def timeit(f, *args, **kwargs): func_name = str(f).split(' ')[1] def new_func(*args, **kwargs): t = utime.ticks_us() result = f(*args, **kwargs) print('execution time: ', utime.ticks_diff(utime.ticks_us(), t), ' us') return result return new_func ###Output ###Markdown Introduction In the [last chapter](https://micropython-usermod.readthedocs.io/en/latest/usermods_15.html) of the usermod documentation, I mentioned that I have another story, for another day. The day has come, so here is my story. Enter ulab`ulab` is a `numpy`-like module for `micropython`, meant to simplify and speed up common mathematical operations on arrays. The primary goal was to implement a small subset of `numpy` that might be useful in the context of a microcontroller. This means low-level data processing of linear (array) and two-dimensional (matrix) data. PurposeOf course, the first question that one has to answer is, why on Earth one would need a fast math library on a microcontroller. After all, it is not expected that heavy number crunching is going to take place on bare metal. It is not meant to. On a PC, the main reason for writing fast code is the sheer amount of data that one wants to process. On a microcontroller, the data volume is probably small, but it might lead to catastrophic system failure, if these data are not processed in time, because the microcontroller is supposed to interact with the outside world in a timely fashion. In fact, this latter objective was the initiator of this project: I needed the Fourier transform of a signal coming from the ADC of the pyboard, and all available options were simply too slow. In addition to speed, another issue that one has to keep in mind when working with embedded systems is the amount of available RAM: I believe, everything here could be implemented in pure python with relatively little effort, but the price we would have to pay for that is not only speed, but RAM, too. python code, if is not frozen, and compiled into the firmware, has to be compiled at runtime, which is not exactly a cheap process. On top of that, if numbers are stored in a list or tuple, which would be the high-level container, then they occupy 8 bytes, no matter, whether they are all smaller than 100, or larger than one hundred million. This is obviously a waste of resources in an environment, where resources are scarce. Finally, there is a reason for using `micropython` in the first place. Namely, that a microcontroller can be programmed in a very elegant, and *pythonic* way. But if it is so, why should we not extend this idea to other tasks and concepts that might come up in this context? If there was no other reason than this *elegance*, I would find that convincing enough.Based on the above-mentioned considerations, all functions in `ulab` are implemented in a way that 1. conforms to `numpy` as much as possible2. is so frugal with RAM as possible,3. and yet, fast. Much faster than pure python.The main points of `ulab` are - compact, iterable and slicable containers of numerical data in 1, and 2 dimensions (arrays and matrices). These containers support all the relevant unary and binary operators (e.g., `len`, ==, +, *, etc.)- vectorised computations on micropython iterables and numerical arrays/matrices (in `numpy`-speak, universal functions)- basic linear algebra routines (matrix inversion, multiplication, reshaping, transposition, determinant, and eigenvalues)- polynomial fits to numerical data- fast Fourier transformsAt the time of writing this manual (for version 0.42.0), the library adds approximately 40 kB of extra compiled code to the micropython (pyboard.v.11) firmware. However, if you are tight with flash space, you can easily shave off a couple of kB. See the section on [customising ulab](Custom_builds). Resources and legal mattersThe source code of the module can be found under https://github.com/v923z/micropython-ulab/tree/master/code. The source of this user manual is under https://github.com/v923z/micropython-ulab/tree/master/docs, while the technical details of the implementation are discussed at great length in https://github.com/v923z/micropython-ulab/tree/master/docs/ulab.ipynb. If you want an even thorougher explanation on why the various constructs of the implementation work, and work in that particular way, you can read more on the subject under https://micropython-usermod.readthedocs.io/en/latest/, where I demonstrate, what you have to do, if you want to make a C object behave in a *pythonic* way. The MIT licence applies to all material. Friendly requestIf you use `ulab`, and bump into a bug, or think that a particular function is missing, or its behaviour does not conform to `numpy`, please, raise a [ulab issue](https://github.com/v923z/micropython-ulab/issues) on github, so that the community can profit from your experiences. Even better, if you find the project useful, and think that it could be made better, faster, tighter, and shinier, please, consider contributing, and issue a pull request with the implementation of your improvements and new features. `ulab` can only become successful, if it offers what the community needs.These last comments apply to the documentation, too. If, in your opinion, the documentation is obscure, misleading, or not detailed enough, please, let me know, so that *we* can fix it. Differences between micropython-ulab and circuitpython-ulab`ulab` has originally been developed for `micropython`, but has since been integrated into a number of its flavours. Most of these flavours are simply forks of `micropython` itself, with some additional functionality. One of the notable exceptions is `circuitpython`, which has slightly diverged at the core level, and this has some minor consequences. Some of these concern the C implementation details only, which all have been sorted out with the generous and enthusiastic support of Jeff Epler from [Adafruit Industries](http://www.adafruit.com).There are, however, a couple of instances, where the usage in the two environments is slightly different at the python level. These are how the packages can be imported, and how the class properties can be accessed. In both cases, the `circuitpython` implementation results in `numpy`-conform code. `numpy`-compatibility in `micropython` will be implemented as soon as `micropython` itself has the required tools. Till then we have to live with a workaround, which I will point out at the relevant places. Customising `ulab``ulab` implements a great number of functions, which are organised in sub-modules. E.g., functions related to Fourier transforms are located in the `ulab.fft` sub-module, so you would import `fft` as```pythonimport ulabfrom ulab import fft```by which point you can get the FFT of your data by calling `fft.fft(...)`. The idea of such grouping of functions and methods is to provide a means for granularity: It is quite possible that you do not need all functions in a particular application. If you want to save some flash space, you can easily exclude arbitrary sub-modules from the firmware. The [ulab.h](https://github.com/v923z/micropython-ulab/blob/master/code/ulab.h) header file contains a pre-processor flag for each sub-module. The default setting is 1 for each of them. Setting them to 0 removes the module from the compiled firmware. The first couple of lines of the file look like this```c// vectorise (all functions) takes approx. 4.5 kB of flash spacedefine ULAB_VECTORISE_MODULE (1)// linalg adds around 6 kBdefine ULAB_LINALG_MODULE (1)// poly requires approx. 2.5 kBdefine ULAB_POLY_MODULE (1)```In order to simplify navigation in the header, each flag begins with `ULAB_`, and continues with the name of the sub-module. This name is also the `.c` file, where the sub-module is implemented. So, e.g., the linear algebra routines can be found in `linalg.c`, and the corresponding compiler flag is `ULAB_LINALG_MODULE`. Each section displays a hint as to how much space you can save by un-setting the flag.At first, having to import everything in this way might appear to be overly complicated, but there is a very good reason behind all this: you can find out at the time of importing, whether a function or sub-module is part of your `ulab` firmware, or not. The alternative, namely, that you do not have to import anything beyond `ulab`, could prove catastrophic: you would learn only at run time (at the moment of calling the function in your code) that a particular function is not in the firmware, and that is most probably too late.Except for `fft`, the standard sub-modules, `vector`, `linalg`, `numerical`, and `poly` are all `numpy`-compatible. User-defined functions that accept `ndarray`s as their argument should be implemented in the `extras` sub-module, or its sub-modules. Hints as to how to do that can be found in the section [Extending ulab](Extending-ulab). Supported functions and methods`ulab` supports a number of array operators, which are listed here. I tried to follow the specifications of the `numpy` interface as closely as possible, though, it was not always practical to implement verbatim behaviour. The differences, if any, are in each case small (e.g., a function cannot take all possible keyword arguments), and should not hinder everyday use. In the list below, a single asterisk denotes slight deviations from `numpy`'s nomenclature, and a double asterisk denotes those cases, where a bit more caution should be exercised, though this usually means functions that are not supported by `numpy`.The detailed discussion of the various functions always contains a link to the corresponding `numpy` documentation. However, before going down the rabbit hole, the module also defines a constant, the version, which can always be queried as ###Code %%micropython -unix 1 import ulab as np print('you are running ulab version', np.__version__) ###Output you are running ulab version 0.40.0 ###Markdown If you find a bug, please, include this number in your report! Basic ndarray operations[Unary operators](Unary-operators)[Binary operators](Binary-operators)[Indexing and slicing](Slicing-and-indexing)[ndarray iterators](Iterating-over-arrays)[Comparison operators*](Comparison-operators)[Universal functions](Universal-functions) (also support function calls on general iterables) Methods of ndarrays[.shape*](.shape)[size*](size)[itemsize*](itemsize)[.reshape](.reshape)[.transpose](.transpose)[.flatten**](.flatten) Matrix methods[inv](inv)[dot](dot)[det](det)[roll](roll)[flip](flip) Array initialisation functions[eye](eye)[ones](ones,-zeros)[zeros](ones,-zeros)[linspace](linspace) Statistical and other properties of arrays[min](min,-argmin,-max,-argmax)[argmin](min,-argmin,-max,-argmax)[max](min,-argmin,-max,-argmax)[argmax](min,-argmin,-max,-argmax)[sum](sum,-std,-mean)[std](sum,-std,-mean)[mean](sum,-std,-mean)[diff](diff)[sort](sort)[argsort](argsort) Linear algebra functions[size](size)[inv](inv)[dot](dot)[det](det)[eig](eig)[cholesky](cholesky)[trace](trace) Manipulation of polynomials[polyval](polyval)[polyfit](polyfit) FFT routines[fft**](fft)[ifft**](ifft)[spectrogram**](spectrogram) Filter functions[convolve](convolve) Comparison of arrays[minimum](minimum)[maximum](maximum)[clip](clip) ndarray, the basic containerThe `ndarray` is the underlying container of numerical data. It is derived from micropython's own `array` object, but has a great number of extra features starting with how it can be initialised, which operations can be done on it, and which functions can accept it as an argument. One important property of an `ndarray` is that it is also a proper `micropython` iterable.Since the `ndarray` is a binary container, it is also compact, meaning that it takes only a couple of bytes of extra RAM in addition to what is required for storing the numbers themselves. `ndarray`s are also type-aware, i.e., one can save RAM by specifying a data type, and using the smallest reasonable one. Five such types are defined, namely `uint8`, `int8`, which occupy a single byte of memory per datum, `uint16`, and `int16`, which occupy two bytes per datum, and `float`, which occupies four or eight bytes per datum. The precision/size of the `float` type depends on the definition of `mp_float_t`. Some platforms, e.g., the PYBD, implement `double`s, but some, e.g., the pyboard.v.11, don't. You can find out, what type of float your particular platform implements by looking at the output of the [.itemsize](.itemsize) class property.On the following pages, we will see how one can work with `ndarray`s. Those familiar with `numpy` should find that the nomenclature and naming conventions of `numpy` are adhered to as closely as possible. I will point out the few differences, where necessary.For the sake of comparison, in addition to the `ulab` code snippets, sometimes the equivalent `numpy` code is also presented. You can find out, where the snippet is supposed to run by looking at its first line, the header.Hint: you can easily port existing `numpy` code, if you `import ulab as np`. Initialising an arrayA new array can be created by passing either a standard micropython iterable, or another `ndarray` into the constructor. Initialising by passing iterablesIf the iterable is one-dimensional, i.e., one whose elements are numbers, then a row vector will be created and returned. If the iterable is two-dimensional, i.e., one whose elements are again iterables, a matrix will be created. If the lengths of the iterables is not consistent, a `ValueError` will be raised. Iterables of different types can be mixed in the initialisation function. If the `dtype` keyword with the possible `uint8/int8/uint16/int16/float` values is supplied, the new `ndarray` will have that type, otherwise, it assumes `float` as default. ###Code %%micropython -unix 1 import ulab as np a = [1, 2, 3, 4, 5, 6, 7, 8] b = np.array(a) print("a:\t", a) print("b:\t", b) # a two-dimensional array with mixed-type initialisers c = np.array([range(5), range(20, 25, 1), [44, 55, 66, 77, 88]], dtype=np.uint8) print("\nc:\t", c) # and now we throw an exception d = np.array([range(5), range(10), [44, 55, 66, 77, 88]], dtype=np.uint8) print("\nd:\t", d) ###Output a: [1, 2, 3, 4, 5, 6, 7, 8] b: array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) c: array([[0, 1, 2, 3, 4], [20, 21, 22, 23, 24], [44, 55, 66, 77, 88]], dtype=uint8) Traceback (most recent call last): File "/dev/shm/micropython.py", line 15, in <module> ValueError: iterables are not of the same length ###Markdown `ndarray`s are pretty-printed, i.e., if the length is larger than 10, then only the first and last three entries will be printed. Also note that, as opposed to `numpy`, the printout always contains the `dtype`. ###Code %%micropython -unix 1 import ulab as np a = np.array(range(200)) print("a:\t", a) ###Output a: array([0.0, 1.0, 2.0, ..., 197.0, 198.0, 199.0], dtype=float) ###Markdown Initialising by passing arraysAn `ndarray` can be initialised by supplying another array. This statement is almost trivial, since `ndarray`s are iterables themselves, though it should be pointed out that initialising through arrays is faster, because simply a new copy is created, without inspection, iteration etc. It is also possible to coerce type conversion of the output (with type conversion, the iteration cannot be avoided, therefore, this case will always be slower than straight copying): ###Code %%micropython -unix 1 import ulab as np a = [1, 2, 3, 4, 5, 6, 7, 8] b = np.array(a) c = np.array(b) d = np.array(b, dtype=np.uint8) print("a:\t", a) print("\nb:\t", b) print("\nc:\t", c) print("\nd:\t", d) ###Output a: [1, 2, 3, 4, 5, 6, 7, 8] b: array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) c: array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) d: array([1, 2, 3, 4, 5, 6, 7, 8], dtype=uint8) ###Markdown Note that the default type of the `ndarray` is `float`. Hence, if the array is initialised from another array, type conversion will always take place, except, when the output type is specifically supplied. I.e., ###Code %%micropython -unix 1 import ulab as np a = np.array(range(5), dtype=np.uint8) b = np.array(a) print("a:\t", a) print("\nb:\t", b) ###Output a: array([0, 1, 2, 3, 4], dtype=uint8) b: array([0.0, 1.0, 2.0, 3.0, 4.0], dtype=float) ###Markdown will iterate over the elements in `a`, since in the assignment `b = np.array(a)` no output type was given, therefore, `float` was assumed. On the other hand, ###Code %%micropython -unix 1 import ulab as np a = np.array(range(5), dtype=np.uint8) b = np.array(a, dtype=np.uint8) print("a:\t", a) print("\nb:\t", b) ###Output a: array([0, 1, 2, 3, 4], dtype=uint8) b: array([0, 1, 2, 3, 4], dtype=uint8) ###Markdown will simply copy the content of `a` into `b` without any iteration, and will, therefore, be faster. Keep this in mind, whenever the output type, or performance is important. Array initialisation functionsThere are four functions that can be used for initialising an array. These are bound to `ulab` itself at the top level, i.e., no module has to be imported for the function invocations. ones, zeros`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.zeros.html`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ones.htmlA couple of special arrays and matrices can easily be initialised by calling one of the `ones`, or `zeros` functions. `ones` and `zeros` follow the same pattern, and have the call signature```pythonones(shape, dtype=float)zeros(shape, dtype=float)```where shape is either an integer, or a 2-tuple. ###Code %%micropython -unix 1 import ulab as np print(np.ones(6, dtype=np.uint8)) print(np.zeros((6, 4))) ###Output array([1, 1, 1, 1, 1, 1], dtype=uint8) array([[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], dtype=float) ###Markdown eye`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.eye.htmlAnother special array method is the `eye` function, whose call signature is ```pythoneye(N, M, k=0, dtype=float)```where `N` (`M`) specify the dimensions of the matrix (if only `N` is supplied, then we get a square matrix, otherwise one with `M` rows, and `N` columns), and `k` is the shift of the ones (the main diagonal corresponds to `k=0`). Here are a couple of examples. With a single argument ###Code %%micropython -unix 1 import ulab as np print(np.eye(5)) ###Output array([[1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0]], dtype=float) ###Markdown Specifying the dimensions of the matrix ###Code ### Shifting the diagonal %%micropython -unix 1 import ulab as np print(np.eye(4, M=6, k=-1, dtype=np.int16)) %%micropython -unix 1 import ulab as np print(np.eye(4, M=6, dtype=np.int8)) ###Output array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=int8) ###Markdown linspace`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.htmlThis function returns an array, whose elements are uniformly spaced between the `start`, and `stop` points. The number of intervals is determined by the `num` keyword argument, whose default value is 50. With the `endpoint` keyword argument (defaults to `True`) one can include `stop` in the sequence. In addition, the `dtype` keyword can be supplied to force type conversion of the output. The default is `float`. Note that, when `dtype` is of integer type, the sequence is not necessarily evenly spaced. This is not an error, rather a consequence of rounding. (This is also the `numpy` behaviour.) ###Code %%micropython -unix 1 import ulab as np # generate a sequence with defaults print('default sequence:\t', np.linspace(0, 10)) # num=5 print('num=5:\t\t\t', np.linspace(0, 10, num=5)) # num=5, endpoint=False print('num=5:\t\t\t', np.linspace(0, 10, num=5, endpoint=False)) # num=5, endpoint=False, dtype=uint8 print('num=5:\t\t\t', np.linspace(0, 5, num=7, endpoint=False, dtype=np.uint8)) ###Output default sequence: array([0.0, 0.2040816396474838, 0.4081632792949677, ..., 9.591833114624023, 9.795914649963379, 9.999996185302734], dtype=float) num=5: array([0.0, 2.5, 5.0, 7.5, 10.0], dtype=float) num=5: array([0.0, 2.0, 4.0, 6.0, 8.0], dtype=float) num=5: array([0, 0, 1, 2, 2, 3, 4], dtype=uint8) ###Markdown Methods of ndarrays .shapeThe `.shape` method (property) returns a 2-tuple with the number of rows, and columns. **WARNING:** In `circuitpython`, you can call the method as a property, i.e., ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4], dtype=np.int8) print("a:\n", a) print("shape of a:", a.shape) b= np.array([[1, 2], [3, 4]], dtype=np.int8) print("\nb:\n", b) print("shape of b:", b.shape) ###Output a: array([1, 2, 3, 4], dtype=int8) shape of a: (1, 4) b: array([[1, 2], [3, 4]], dtype=int8) shape of b: (2, 2) ###Markdown **WARNING:** On the other hand, since properties are not implemented in `micropython`, there you would call the method as a function, i.e., ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4], dtype=np.int8) print("a:\n", a) print("shape of a:", a.shape) b= np.array([[1, 2], [3, 4]], dtype=np.int8) print("\nb:\n", b) print("shape of b:", b.shape()) ###Output a: array([1, 2, 3, 4], dtype=int8) shape of a: (1, 4) b: array([[1, 2], [3, 4]], dtype=int8) shape of b: (2, 2) ###Markdown .sizeThe `.size` method (property) returns an integer with the number of elements in the array. **WARNING:** In `circuitpython`, the `numpy` nomenclature applies, i.e., ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3], dtype=np.int8) print("a:\n", a) print("size of a:", a.size) b= np.array([[1, 2], [3, 4]], dtype=np.int8) print("\nb:\n", b) print("size of b:", b.size) ###Output a: array([1, 2, 3], dtype=int8) size of a: 3 b: array([[1, 2], [3, 4]], dtype=int8) size of b: 4 ###Markdown **WARNING:** In `micropython`, `size` is a method, i.e., ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3], dtype=np.int8) print("a:\n", a) print("size of a:", a.size) b= np.array([[1, 2], [3, 4]], dtype=np.int8) print("\nb:\n", b) print("size of b:", b.size()) ###Output a: array([1, 2, 3], dtype=int8) size of a: 3 b: array([[1, 2], [3, 4]], dtype=int8) size of b: 4 ###Markdown .itemsizeThe `.itemsize` method (property) returns an integer with the siz enumber of elements in the array.**WARNING:** In `circuitpython`: ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3], dtype=np.int8) print("a:\n", a) print("itemsize of a:", a.itemsize) b= np.array([[1, 2], [3, 4]], dtype=np.float) print("\nb:\n", b) print("itemsize of b:", b.itemsize) ###Output a: array([1, 2, 3], dtype=int8) itemsize of a: 1 b: array([[1.0, 2.0], [3.0, 4.0]], dtype=float) itemsize of b: 8 ###Markdown **WARNING:** In `micropython`: ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3], dtype=np.int8) print("a:\n", a) print("itemsize of a:", a.itemsize) b= np.array([[1, 2], [3, 4]], dtype=np.float) print("\nb:\n", b) print("itemsize of b:", b.itemsize()) ###Output a: array([1, 2, 3], dtype=int8) itemsize of a: 1 b: array([[1.0, 2.0], [3.0, 4.0]], dtype=float) itemsize of b: 8 ###Markdown .reshape`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.reshape.html`reshape` re-writes the shape properties of an `ndarray`, but the array will not be modified in any other way. The function takes a single 2-tuple with two integers as its argument. The 2-tuple should specify the desired number of rows and columns. If the new shape is not consistent with the old, a `ValueError` exception will be raised. ###Code %%micropython -unix 1 import ulab as np a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]], dtype=np.uint8) print('a (4 by 4):', a) print('a (2 by 8):', a.reshape((2, 8))) print('a (1 by 16):', a.reshape((1, 16))) ###Output a (4 by 4): array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]], dtype=uint8) a (2 by 8): array([[1, 2, 3, 4, 5, 6, 7, 8], [9, 10, 11, 12, 13, 14, 15, 16]], dtype=uint8) a (1 by 16): array([1, 2, 3, ..., 14, 15, 16], dtype=uint8) ###Markdown .flatten`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.flatten.htm`.flatten` returns the flattened array. The array can be flattened in `C` style (i.e., moving horizontally in the matrix), or in `fortran` style (i.e., moving vertically in the matrix). The `C`-style flattening is the default, and it is also fast, because this is just a verbatim copy of the contents. ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4], dtype=np.int8) print("a: \t\t", a) print("a flattened: \t", a.flatten()) b = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int8) print("\nb:", b) print("b flattened (C): \t", b.flatten()) print("b flattened (F): \t", b.flatten(order='F')) ###Output a: array([1, 2, 3, 4], dtype=int8) a flattened: array([1, 2, 3, 4], dtype=int8) b: array([[1, 2, 3], [4, 5, 6]], dtype=int8) b flattened (C): array([1, 2, 3, 4, 5, 6], dtype=int8) b flattened (F): array([1, 4, 2, 5, 3, 6], dtype=int8) ###Markdown .transpose`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.transpose.html ###Code %%micropython -unix 1 import ulab as np a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=np.uint8) print('a:\n', a) print('shape of a:', a.shape()) a.transpose() print('\ntranspose of a:\n', a) print('shape of a:', a.shape()) ###Output a: array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=uint8) shape of a: (4, 3) transpose of a: array([[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]], dtype=uint8) shape of a: (3, 4) ###Markdown .sort`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.sort.htmlIn-place sorting of an `ndarray`. For a more detailed exposition, see [sort](sort). ###Code %%micropython -unix 1 import ulab as np a = np.array([[1, 12, 3, 0], [5, 3, 4, 1], [9, 11, 1, 8], [7, 10, 0, 1]], dtype=np.uint8) print('\na:\n', a) a.sort(axis=0) print('\na sorted along vertical axis:\n', a) a = np.array([[1, 12, 3, 0], [5, 3, 4, 1], [9, 11, 1, 8], [7, 10, 0, 1]], dtype=np.uint8) a.sort(a, axis=1) print('\na sorted along horizontal axis:\n', a) a = np.array([[1, 12, 3, 0], [5, 3, 4, 1], [9, 11, 1, 8], [7, 10, 0, 1]], dtype=np.uint8) a.sort(a, axis=None) print('\nflattened a sorted:\n', a) ###Output a: array([[1, 12, 3, 0], [5, 3, 4, 1], [9, 11, 1, 8], [7, 10, 0, 1]], dtype=uint8) a sorted along vertical axis: array([[1, 3, 0, 0], [5, 10, 1, 1], [7, 11, 3, 1], [9, 12, 4, 8]], dtype=uint8) a sorted along horizontal axis: array([[0, 1, 3, 12], [1, 3, 4, 5], [1, 8, 9, 11], [0, 1, 7, 10]], dtype=uint8) flattened a sorted: array([0, 0, 1, ..., 10, 11, 12], dtype=uint8) ###Markdown Unary operatorsWith the exception of `len`, which returns a single number, all unary operators manipulate the underlying data element-wise. lenThis operator takes a single argument, and returns either the length (for row vectors), or the number of rows (for matrices) of its argument. ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4, 5], dtype=np.uint8) b = np.array([range(5), range(5), range(5), range(5)], dtype=np.uint8) print("a:\t", a) print("length of a: ", len(a)) print("shape of a: ", a.shape()) print("\nb:\t", b) print("length of b: ", len(b)) print("shape of b: ", b.shape()) ###Output a: array([1, 2, 3, 4, 5], dtype=uint8) length of a: 5 shape of a: (1, 5) b: array([[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], dtype=uint8) length of b: 4 shape of b: (4, 5) ###Markdown The number returned by `len` is also the length of the iterations, when the array supplies the elements for an iteration (see later). invertThe function is defined for integer data types (`uint8`, `int8`, `uint16`, and `int16`) only, takes a single argument, and returns the element-by-element, bit-wise inverse of the array. If a `float` is supplied, the function raises a `ValueError` exception.With signed integers (`int8`, and `int16`), the results might be unexpected, as in the example below: ###Code %%micropython -unix 1 import ulab as np a = np.array([0, -1, -100], dtype=np.int8) print("a:\t\t", a) print("inverse of a:\t", ~a) a = np.array([0, 1, 254, 255], dtype=np.uint8) print("\na:\t\t", a) print("inverse of a:\t", ~a) ###Output a: array([0, -1, -100], dtype=int8) inverse of a: array([-1, 0, 99], dtype=int8) a: array([0, 1, 254, 255], dtype=uint8) inverse of a: array([255, 254, 1, 0], dtype=uint8) ###Markdown absThis function takes a single argument, and returns the element-by-element absolute value of the array. When the data type is unsigned (`uint8`, or `uint16`), a copy of the array will be returned immediately, and no calculation takes place. ###Code %%micropython -unix 1 import ulab as np a = np.array([0, -1, -100], dtype=np.int8) print("a:\t\t\t ", a) print("absolute value of a:\t ", abs(a)) ###Output a: array([0, -1, -100], dtype=int8) absolute value of a: array([0, 1, 100], dtype=int8) ###Markdown negThis operator takes a single argument, and changes the sign of each element in the array. Unsigned values are wrapped. ###Code %%micropython -unix 1 import ulab as np a = np.array([10, -1, 1], dtype=np.int8) print("a:\t\t", a) print("negative of a:\t", -a) b = np.array([0, 100, 200], dtype=np.uint8) print("\nb:\t\t", b) print("negative of b:\t", -b) ###Output a: array([10, -1, 1], dtype=int8) negative of a: array([-10, 1, -1], dtype=int8) b: array([0, 100, 200], dtype=uint8) negative of b: array([0, 156, 56], dtype=uint8) ###Markdown posThis function takes a single argument, and simply returns a copy of the array. ###Code %%micropython -unix 1 import ulab as np a = np.array([10, -1, 1], dtype=np.int8) print("a:\t\t", a) print("positive of a:\t", +a) ###Output a: array([10, -1, 1], dtype=int8) positive of a: array([10, -1, 1], dtype=int8) ###Markdown Binary operators`ulab` implements the `+`, `-`, `*`, `/`, `**`, ``, `=`, `==`, `!=` binary operators that work element-wise. Partial broadcasting is available, meaning that the operands either must have the same shape, or one of them must be a scalar.The operators raise a `ValueError` exception, if partial broadcasting is not possible. The only exceptions are the `==` and `!=` operators that will return `False` in this case. **WARNING**: note that relational operators (``, `=`, `==`, `!=`) should have the `ndarray` on their left hand side, when compared to scalars. This means that the following works ###Code %%micropython -unix 1 import ulab a = ulab.array([1, 2, 3]) print(a > 2) ###Output [False, False, True] ###Markdown while the equivalent statement, `2 < a`, will raise a `TypeError` exception: ###Code %%micropython -unix 1 import ulab a = ulab.array([1, 2, 3]) print(2 < a) ###Output Traceback (most recent call last): File "/dev/shm/micropython.py", line 4, in <module> TypeError: unsupported types for __lt__: 'int', 'ndarray' ###Markdown **WARNING:** `numpy` also allows operations between a matrix, and a row vector, if the row vector has exactly as many elements, as many columns the matrix has. This feature will be added in future versions of `ulab`. ###Code a = array([[1, 2, 3], [4, 5, 6], [7, 8, 6]]) b = array([10, 20, 30]) a+b ###Output _____no_output_____ ###Markdown UpcastingBinary operations require special attention, because two arrays with different typecodes can be the operands of an operation, in which case it is not trivial, what the typecode of the result is. This decision on the result's typecode is called upcasting. Since the number of typecodes in `ulab` is significantly smaller than in `numpy`, we have to define new upcasting rules. Where possible, I followed `numpy`'s conventions. `ulab` observes the following upcasting rules:1. Operations with two `ndarray`s of the same `dtype` preserve their `dtype`, even when the results overflow.2. if either of the operands is a float, the result is automatically a float3. When the right hand side of a binary operator is a micropython variable, `mp_obj_int`, or `mp_obj_float`, then the result will be promoted to `dtype` `float`. This is necessary, because a micropython integer can be 31 bites wide. Other micropython types (e.g., lists, tuples, etc.) raise a `TypeError` exception. 4. | left hand side | right hand side | ulab result | numpy result ||----------------|-----------------|-------------|--------------||`uint8` |`int8` |`int16` |`int16` ||`uint8` |`int16` |`int16` |`int16` ||`uint8` |`uint16` |`uint16` |`uint16` ||`int8` |`int16` |`int16` |`int16` | |`int8` |`uint16` |`uint16` |`int32` ||`uint16` |`int16` |`float` |`int32` | Note that the last two operations are promoted to `int32` in `numpy`. **WARNING:** Due to the lower number of available data types, the upcasting rules of `ulab` are slightly different to those of `numpy`. Watch out for this, when porting code!When one of the operands is a scalar, it will internally be turned into a single-element `ndarray` with the *smallest* possible `dtype`. Thus, e.g., if the scalar is 123, it will be converted to an array of `dtype` `uint8`.Upcasting can be seen in action in the following snippet: ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4], dtype=np.uint8) b = np.array([1, 2, 3, 4], dtype=np.int8) print("a:\t", a) print("b:\t", b) print("a+b:\t", a+b) c = np.array([1, 2, 3, 4], dtype=np.float) print("\na:\t", a) print("c:\t", c) print("a*c:\t", a*c) ###Output a: array([1, 2, 3, 4], dtype=uint8) b: array([1, 2, 3, 4], dtype=int8) a+b: array([2, 4, 6, 8], dtype=int16) a: array([1, 2, 3, 4], dtype=uint8) c: array([1.0, 2.0, 3.0, 4.0], dtype=float) a*c: array([1.0, 4.0, 9.0, 16.0], dtype=float) ###Markdown BenchmarksThe following snippet compares the performance of binary operations to a possible implementation in python. For the time measurement, we will take the following snippet from the micropython manual: ###Code %%micropython -pyboard 1 def timeit(f, *args, **kwargs): func_name = str(f).split(' ')[1] def new_func(*args, **kwargs): t = utime.ticks_us() result = f(*args, **kwargs) print('execution time: ', utime.ticks_diff(utime.ticks_us(), t), ' us') return result return new_func %%micropython -pyboard 1 import ulab as np @timeit def py_add(a, b): return [a[i]+b[i] for i in range(1000)] @timeit def py_multiply(a, b): return [a[i]*b[i] for i in range(1000)] @timeit def ulab_add(a, b): return a + b @timeit def ulab_multiply(a, b): return a * b a = [0.0]*1000 b = range(1000) print('python add:') py_add(a, b) print('\npython multiply:') py_multiply(a, b) a = np.linspace(0, 10, num=1000) b = np.ones(1000) print('\nulab add:') ulab_add(a, b) print('\nulab multiply:') ulab_multiply(a, b) ###Output python add: execution time: 10051 us python multiply: execution time: 14175 us ulab add: execution time: 222 us ulab multiply: execution time: 213 us ###Markdown I do not claim that the python implementation above is perfect, and certainly, there is much room for improvement. However, the factor of 50 difference in execution time is very spectacular. This is nothing but a consequence of the fact that the `ulab` functions run `C` code, with very little python overhead. The factor of 50 appears to be quite universal: the FFT routine obeys similar scaling (see [Speed of FFTs](Speed-of-FFTs)), and this number came up with font rendering, too: [fast font rendering on graphical displays](https://forum.micropython.org/viewtopic.php?f=15&t=5815&p=33362&hilit=ufontp33383). Comparison operatorsThe smaller than, greater than, smaller or equal, and greater or equal operators return a vector of Booleans indicating the positions (`True`), where the condition is satisfied. ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.uint8) print(a < 5) ###Output [True, True, True, True, False, False, False, False] ###Markdown **WARNING**: at the moment, due to implementation details, the `ndarray` must be on the left hand side of the relational operators. This will change in a future version of `ulab`. That is, while `a a` have the same meaning, the following code will not work: ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.uint8) print(5 > a) ###Output Traceback (most recent call last): File "/dev/shm/micropython.py", line 5, in <module> TypeError: unsupported types for __gt__: 'int', 'ndarray' ###Markdown **WARNING:** Note that `numpy` returns an array of Booleans. For most use cases this fact should not make a difference. ###Code a = array([1, 2, 3, 4, 5, 6, 7, 8]) a < 5 ###Output _____no_output_____ ###Markdown These operators work with matrices, too, in which case a list of lists of Booleans will be returned: ###Code %%micropython -unix 1 import ulab as np a = np.array([range(0, 5, 1), range(1, 6, 1), range(2, 7, 1)], dtype=np.uint8) print(a) print(a < 5) ###Output array([[0, 1, 2, 3, 4], [1, 2, 3, 4, 5], [2, 3, 4, 5, 6]], dtype=uint8) [[True, True, True, True, True], [True, True, True, True, False], [True, True, True, False, False]] ###Markdown Iterating over arrays`ndarray`s are iterable, which means that their elements can also be accessed as can the elements of a list, tuple, etc. If the array is one-dimensional, the iterator returns scalars, otherwise a new one-dimensional `ndarray`, which is simply a copy of the corresponding row of the matrix, i.e, its data type will be inherited. ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4, 5], dtype=np.uint8) b = np.array([range(5), range(10, 15, 1), range(20, 25, 1), range(30, 35, 1)], dtype=np.uint8) print("a:\t", a) for i, _a in enumerate(a): print("element %d in a:"%i, _a) print("\nb:\t", b) for i, _b in enumerate(b): print("element %d in b:"%i, _b) ###Output a: array([1, 2, 3, 4, 5], dtype=uint8) element 0 in a: 1 element 1 in a: 2 element 2 in a: 3 element 3 in a: 4 element 4 in a: 5 b: array([[0, 1, 2, 3, 4], [10, 11, 12, 13, 14], [20, 21, 22, 23, 24], [30, 31, 32, 33, 34]], dtype=uint8) element 0 in b: array([0, 1, 2, 3, 4], dtype=uint8) element 1 in b: array([10, 11, 12, 13, 14], dtype=uint8) element 2 in b: array([20, 21, 22, 23, 24], dtype=uint8) element 3 in b: array([30, 31, 32, 33, 34], dtype=uint8) ###Markdown Slicing and indexingCopies of sub-arrays can be created by indexing, and slicing. IndexingThe simplest form of indexing is specifying a single integer between the square brackets as in ###Code %%micropython -unix 1 import ulab as np a = np.array(range(10), dtype=np.uint8) print("a:\t\t\t\t\t\t", a) print("the first, and first from right element of a:\t", a[0], a[-1]) print("the second, and second from right element of a:\t", a[1], a[-2]) ###Output a: array([0, 1, 2, ..., 7, 8, 9], dtype=uint8) the first, and first from right element of a: 0 9 the second, and second from right element of a: 1 8 ###Markdown Indices are (not necessarily non-negative) integers, or a list of Booleans. By using a Boolean list, we can select those elements of an array that satisfy a specific condition. At the moment, such indexing is defined for row vectors only, for matrices the function raises a `ValueError` exception, though this will be rectified in a future version of `ulab`. ###Code %%micropython -unix 1 import ulab as np a = np.array(range(9), dtype=np.float) print("a:\t", a) print("a < 5:\t", a[a < 5]) ###Output a: array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) a < 5: array([0.0, 1.0, 2.0, 3.0, 4.0], dtype=float) ###Markdown Indexing with Boolean arrays can take more complicated expressions. This is a very concise way of comparing two vectors, e.g.: ###Code %%micropython -pyboard 1 import ulab as np a = np.array(range(9), dtype=np.uint8) b = np.array([4, 4, 4, 3, 3, 3, 13, 13, 13], dtype=np.uint8) print("a:\t", a) print("\na**2:\t", a*a) print("\nb:\t", b) print("\n100*sin(b):\t", np.sin(b)*100.0) print("\na[a*a > np.sin(b)*100.0]:\t", a[a*a > np.sin(b)*100.0]) ###Output a: array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=uint8) a**2: array([0, 1, 4, 9, 16, 25, 36, 49, 64], dtype=uint8) b: array([4, 4, 4, 3, 3, 3, 13, 13, 13], dtype=uint8) 100*sin(b): array([-75.68025, -75.68025, -75.68025, 14.112, 14.112, 14.112, 42.01671, 42.01671, 42.01671], dtype=float) a[a*a > np.sin(b)*100.0]: array([0, 1, 2, 4, 5, 7, 8], dtype=uint8) ###Markdown Slicing and assigning to slicesYou can also generate sub-arrays by specifying slices as the index of an array. Slices are special python objects of the form ```pythonslice = start:end:stop```where `start`, `end`, and `stop` are (not necessarily non-negative) integers. Not all of these three numbers must be specified in an index, in fact, all three of them can be missing. The interpreter takes care of filling in the missing values. (Note that slices cannot be defined in this way, only there, where an index is expected.) For a good explanation on how slices work in python, you can read the stackoverflow question https://stackoverflow.com/questions/509211/understanding-slice-notation.Slices work on both axes: ###Code %%micropython -unix 1 import ulab as np a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.uint8) print('a:\n', a) # the first row print('\na[0]:\n', a[0]) # the first two elements of the first row print('\na[0,:2]:\n', a[0,:2]) # the zeroth element in each row (also known as the zeroth column) print('\na[:,0]:\n', a[:,0]) # the last but one row print('\na[-1]:\n', a[-1]) # the last two rows backwards print('\na[::1]:\n', a[::-1]) ###Output a: array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=uint8) a[0]: array([1, 2, 3], dtype=uint8) a[0,:2]: array([1, 2], dtype=uint8) a[:,0]: array([1, 4, 7], dtype=uint8) a[-1]: array([7, 8, 9], dtype=uint8) a[::1]: array([[7, 8, 9], [4, 5, 6]], dtype=uint8) ###Markdown Assignment to slices can be done for the whole slice, per row, and per column. A couple of examples should make these statements clearer: ###Code %%micropython -unix 1 import ulab as np zero_list = [0, 0, 0] a = np.array([zero_list, zero_list, zero_list], dtype=np.uint8) print('a:\n', a) # assigning to the whole row a[0] = 1 print('\na[0] = 1\n', a) # assigning to the whole row a[0] = np.array([1, 2, -333], dtype=np.float) print('\na[0] = np.array([1, 2, 3])\n', a) # assigning to a column a[:,2] = 3.0 print('\na[:,0]:\n', a) ###Output a: array([[0, 0, 0], [0, 0, 0], [0, 0, 0]], dtype=uint8) a[0] = 1 array([[1, 1, 1], [0, 0, 0], [0, 0, 0]], dtype=uint8) a[0] = np.array([1, 2, 3]) array([[1, 2, 179], [0, 0, 0], [0, 0, 0]], dtype=uint8) a[:,0]: array([[1, 2, 3], [0, 0, 3], [0, 0, 3]], dtype=uint8) ###Markdown Universal functionsStandard mathematical functions defined in the `vector` sub-module, and can be calculated on any scalar-valued iterable (ranges, lists, tuples containing numbers), and on `ndarray`s without having to change the call signature. In all cases the functions return a new `ndarray` of typecode `float` (since these functions usually generate float values, anyway). The functions execute faster with `ndarray` arguments than with iterables, because the values of the input vector can be extracted faster. At present, the following functions are supported:`acos`, `acosh`, `arctan2`, `around`, `asin`, `asinh`, `atan`, `atanh`, `ceil`, `cos`, `erf`, `erfc`, `exp`, `expm1`, `floor`, `tgamma`, `lgamma`, `log`, `log10`, `log2`, `sin`, `sinh`, `sqrt`, `tan`, `tanh`.These functions are applied element-wise to the arguments, thus, e.g., the exponential of a matrix cannot be calculated in this way. The functions can be invoked by importing the `vector` sub-module first. ###Code %%micropython -pyboard 1 import ulab as np from ulab import vector a = range(9) b = np.array(a) # works with ranges, lists, tuples etc. print('a:\t', a) print('exp(a):\t', vector.exp(a)) # with 1D arrays print('\nb:\t', b) print('exp(b):\t', vector.exp(b)) # as well as with matrices c = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) print('\nc:\t', c) print('exp(c):\t', vector.exp(c)) ###Output a: range(0, 9) exp(a): array([1.0, 2.718282, 7.389056, 20.08554, 54.59816, 148.4132, 403.4288, 1096.633, 2980.958], dtype=float) b: array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) exp(b): array([1.0, 2.718282, 7.389056, 20.08554, 54.59816, 148.4132, 403.4288, 1096.633, 2980.958], dtype=float) c: array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype=float) exp(c): array([[2.718282, 7.389056, 20.08554], [54.59816, 148.4132, 403.4288], [1096.633, 2980.958, 8103.084]], dtype=float) ###Markdown Computation expensesThe overhead for calculating with micropython iterables is quite significant: for the 1000 samples below, the difference is more than 800 microseconds, because internally the function has to create the `ndarray` for the output, has to fetch the iterable's items of unknown type, and then convert them to floats. All these steps are skipped for `ndarray`s, because these pieces of information are already known. ###Code %%micropython -pyboard 1 import ulab as np from ulab import vector a = [0]*1000 b = np.array(a) @timeit def measure_run_time(x): return vector.exp(x) measure_run_time(a) measure_run_time(b) ###Output execution time: 1259 us execution time: 408 us ###Markdown Of course, such a time saving is reasonable only, if the data are already available as an `ndarray`. If one has to initialise the `ndarray` from the list, then there is no gain, because the iterator was simply pushed into the initialisation function. around`numpy`: https://docs.scipy.org/doc/numpy-1.17.0/reference/generated/numpy.around.html`numpy`'s `around` function can also be found in the `vector` sub-module. The function implements the `decimals` keyword argument with default value `0`. The first argument must be an `ndarray`. If this is not the case, the function raises a `TypeError` exception. Note that `numpy` accepts general iterables. The `out` keyword argument known from `numpy` is not accepted. The function always returns an ndarray of type `mp_float_t`. ###Code %%micropython -unix 1 import ulab as np from ulab import vector a = np.array([1, 2.2, 33.33, 444.444]) print('a:\t\t', a) print('\ndecimals = 0\t', vector.around(a, decimals=0)) print('\ndecimals = 1\t', vector.around(a, decimals=1)) print('\ndecimals = -1\t', vector.around(a, decimals=-1)) ###Output a: array([1.0, 2.2, 33.33, 444.444], dtype=float) decimals = 0 array([1.0, 2.0, 33.0, 444.0], dtype=float) decimals = 1 array([1.0, 2.2, 33.3, 444.4], dtype=float) decimals = -1 array([0.0, 0.0, 30.0, 440.0], dtype=float) ###Markdown arctan2`numpy`: https://docs.scipy.org/doc/numpy-1.17.0/reference/generated/numpy.arctan2.htmlThe two-argument inverse tangent function is also part of the `vector` sub-module. The function implements only partial broadcasting, i.e., its two arguments either have the same shape, or at least one of them must be a single-element array. Scalars (`micropython` integers or floats) are also allowed. ###Code %%micropython -unix 1 import ulab as np from ulab import vector a = np.array([1, 2.2, 33.33, 444.444]) print('a:\t\t', a) print('\narctan2(a, 1.0)\t', vector.arctan2(a, 1.0)) print('\narctan2(1.0, a)\t', vector.arctan2(1.0, a)) print('\narctan2(a, a): \t', vector.arctan2(a, a)) ###Output a: array([1.0, 2.2, 33.33, 444.444], dtype=float) arctan2(a, 1.0) array([0.7853981633974483, 1.14416883366802, 1.5408023243361, 1.568546328341769], dtype=float) arctan2(1.0, a) array([0.7853981633974483, 0.426627493126876, 0.02999400245879636, 0.002249998453127392], dtype=float) arctan2(a, a): array([0.7853981633974483, 0.7853981633974483, 0.7853981633974483, 0.7853981633974483], dtype=float) ###Markdown NumericalFunction in the `numerical` sub-module can be called by importing the sub-module first. min, argmin, max, argmax`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.min.html`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.max.html`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html**WARNING:** Difference to `numpy`: the `out` keyword argument is not implemented.These functions follow the same pattern, and work with generic iterables, and `ndarray`s. `min`, and `max` return the minimum or maximum of a sequence. If the input array is two-dimensional, the `axis` keyword argument can be supplied, in which case the minimum/maximum along the given axis will be returned. If `axis=None` (this is also the default value), the minimum/maximum of the flattened array will be determined.`argmin/argmax` return the position (index) of the minimum/maximum in the sequence. ###Code %%micropython -unix 1 import ulab as np from ulab import numerical a = np.array([1, 2, 0, 1, 10]) print('a:', a) print('min of a:', numerical.min(a)) print('argmin of a:', numerical.argmin(a)) b = np.array([[1, 2, 0], [1, 10, -1]]) print('\nb:\n', b) print('min of b (flattened):', numerical.min(b)) print('min of b (axis=0):', numerical.min(b, axis=0)) print('min of b (axis=1):', numerical.min(b, axis=1)) ###Output a: array([1.0, 2.0, 0.0, 1.0, 10.0], dtype=float) min of a: 0.0 argmin of a: 2 b: array([[1.0, 2.0, 0.0], [1.0, 10.0, -1.0]], dtype=float) min of b (flattened): -1.0 min of b (axis=0): array([1.0, 2.0, -1.0], dtype=float) min of b (axis=1): array([0.0, -1.0], dtype=float) ###Markdown sum, std, mean`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.sum.html`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.std.html`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.htmlThese three functions follow the same pattern: if the axis keyword is not specified, it assumes the default value of `None`, and returns the result of the computation for the flattened array. Otherwise, the calculation is along the given axis. ###Code %%micropython -pyboard 1 import ulab as np from ulab import numerical a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) print('a: \n', a) print('sum, flat array: ', numerical.sum(a)) print('mean, horizontal: ', numerical.mean(a, axis=1)) print('std, vertical: ', numerical.std(a, axis=0)) ###Output a: array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype=float) sum, flat array: 45.0 mean, horizontal: array([2.0, 5.0, 8.0], dtype=float) std, vertical: array([2.44949, 2.44949, 2.44949], dtype=float) ###Markdown roll`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.roll.htmlThe roll function shifts the content of a vector by the positions given as the second argument. If the `axis` keyword is supplied, the shift is applied to the given axis. ###Code %%micropython -unix 1 import ulab as np from ulab import numerical a = np.array([1, 2, 3, 4, 5, 6, 7, 8]) print("a:\t\t\t", a) numerical.roll(a, 2) print("a rolled to the left:\t", a) # this should be the original vector numerical.roll(a, -2) print("a rolled to the right:\t", a) ###Output a: array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) a rolled to the left: array([3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 1.0, 2.0], dtype=float) a rolled to the right: array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) ###Markdown Rolling works with matrices, too. If the `axis` keyword is 0, the matrix is rolled along its vertical axis, otherwise, horizontally. Horizontal rolls are faster, because they require fewer steps, and larger memory chunks are copied, however, they also require more RAM: basically the whole row must be stored internally. Most expensive are the `None` keyword values, because with `axis = None`, the array is flattened first, hence the row's length is the size of the whole matrix.Vertical rolls require two internal copies of single columns. ###Code %%micropython -unix 1 import ulab as np from ulab import numerical a = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) print("a:\n", a) numerical.roll(a, 2) print("\na rolled to the left:\n", a) numerical.roll(a, -1, axis=1) print("\na rolled up:\n", a) numerical.roll(a, 1, axis=None) print("\na rolled with None:\n", a) ###Output a: array([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]], dtype=float) a rolled to the left: array([[3.0, 4.0, 5.0, 6.0], [7.0, 8.0, 1.0, 2.0]], dtype=float) a rolled up: array([[6.0, 3.0, 4.0, 5.0], [2.0, 7.0, 8.0, 1.0]], dtype=float) a rolled with None: array([[3.0, 4.0, 5.0, 2.0], [7.0, 8.0, 1.0, 6.0]], dtype=float) ###Markdown Simple running weighted averageAs a demonstration of the conciseness of `ulab/numpy` operations, we will calculate an exponentially weighted running average of a measurement vector in just a couple of lines. I chose this particular example, because I think that this can indeed be used in real-life applications. ###Code %%micropython -unix 1 import ulab as np from ulab import numerical from ulab import vector def dummy_adc(): # dummy adc function, so that the results are reproducible return 2 n = 10 # These are the normalised weights; the last entry is the most dominant weight = vector.exp([1, 2, 3, 4, 5]) weight = weight/numerical.sum(weight) print(weight) # initial array of samples samples = np.array([0]*n) for i in range(n): # a new datum is inserted on the right hand side. This simply overwrites whatever was in the last slot samples[-1] = dummy_adc() print(numerical.mean(samples[-5:]*weight)) print(samples[-5:]) # the data are shifted by one position to the left numerical.roll(samples, 1) ###Output array([0.01165623031556606, 0.03168492019176483, 0.08612854033708572, 0.234121635556221, 0.6364086270332336], dtype=float) 0.2545634508132935 array([0.0, 0.0, 0.0, 0.0, 2.0], dtype=float) 0.3482121050357819 array([0.0, 0.0, 0.0, 2.0, 2.0], dtype=float) 0.3826635211706161 array([0.0, 0.0, 2.0, 2.0, 2.0], dtype=float) 0.3953374892473221 array([0.0, 2.0, 2.0, 2.0, 2.0], dtype=float) 0.3999999813735485 array([2.0, 2.0, 2.0, 2.0, 2.0], dtype=float) 0.3999999813735485 array([2.0, 2.0, 2.0, 2.0, 2.0], dtype=float) 0.3999999813735485 array([2.0, 2.0, 2.0, 2.0, 2.0], dtype=float) 0.3999999813735485 array([2.0, 2.0, 2.0, 2.0, 2.0], dtype=float) 0.3999999813735485 array([2.0, 2.0, 2.0, 2.0, 2.0], dtype=float) 0.3999999813735485 array([2.0, 2.0, 2.0, 2.0, 2.0], dtype=float) ###Markdown flip`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.htmlThe `flip` function takes one positional, an `ndarray`, and one keyword argument, `axis = None`, and reverses the order of elements along the given axis. If the keyword argument is `None`, the matrix' entries are flipped along all axes. `flip` returns a new copy of the array. ###Code %%micropython -unix 1 import ulab as np from ulab import numerical a = np.array([1, 2, 3, 4, 5]) print("a: \t", a) print("a flipped:\t", np.flip(a)) a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.uint8) print("\na flipped horizontally\n", numerical.flip(a, axis=1)) print("\na flipped vertically\n", numerical.flip(a, axis=0)) print("\na flipped horizontally+vertically\n", numerical.flip(a)) ###Output a: array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=float) a flipped: array([5.0, 4.0, 3.0, 2.0, 1.0], dtype=float) a flipped horizontally array([[3, 2, 1], [6, 5, 4], [9, 8, 7]], dtype=uint8) a flipped vertically array([[7, 8, 9], [4, 5, 6], [1, 2, 3]], dtype=uint8) a flipped horizontally+vertically array([[9, 8, 7], [6, 5, 4], [3, 2, 1]], dtype=uint8) ###Markdown diff`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.diff.htmlThe `diff` function returns the numerical derivative of the forward scheme, or more accurately, the differences of an `ndarray` along a given axis. The order of derivative can be stipulated with the `n` keyword argument, which should be between 0, and 9. Default is 1. If higher order derivatives are required, they can be gotten by repeated calls to the function. The `axis` keyword argument should be -1 (last axis, in `ulab` equivalent to the second axis, and this also happens to be the default value), 0, or 1. Beyond the output array, the function requires only a couple of bytes of extra RAM for the differentiation stencil. (The stencil is an `int8` array, one byte longer than `n`. This also explains, why the highest order is 9: the coefficients of a ninth-order stencil all fit in signed bytes, while 10 would require `int16`.) Note that as usual in numerical differentiation (and also in `numpy`), the length of the respective axis will be reduced by `n` after the operation. If `n` is larger than, or equal to the length of the axis, an empty array will be returned.**WARNING**: the `diff` function does not implement the `prepend` and `append` keywords that can be found in `numpy`. ###Code %%micropython -unix 1 import ulab as np from ulab import numerical a = np.array(range(9), dtype=np.uint8) print('a:\n', a) print('\nfirst derivative:\n', numerical.diff(a, n=1)) print('\nsecond derivative:\n', numerical.diff(a, n=2)) c = np.array([[1, 2, 3, 4], [4, 3, 2, 1], [1, 4, 9, 16], [0, 0, 0, 0]]) print('\nc:\n', c) print('\nfirst derivative, first axis:\n', numerical.diff(c, axis=0)) print('\nfirst derivative, second axis:\n', numerical.diff(c, axis=1)) ###Output a: array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=uint8) first derivative: array([1, 1, 1, 1, 1, 1, 1, 1], dtype=uint8) second derivative: array([0, 0, 0, 0, 0, 0, 0], dtype=uint8) c: array([[1.0, 2.0, 3.0, 4.0], [4.0, 3.0, 2.0, 1.0], [1.0, 4.0, 9.0, 16.0], [0.0, 0.0, 0.0, 0.0]], dtype=float) first derivative, first axis: array([[3.0, 1.0, -1.0, -3.0], [-3.0, 1.0, 7.0, 15.0], [-1.0, -4.0, -9.0, -16.0]], dtype=float) first derivative, second axis: array([[1.0, 1.0, 1.0], [-1.0, -1.0, -1.0], [3.0, 5.0, 7.0], [0.0, 0.0, 0.0]], dtype=float) ###Markdown sort`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.sort.htmlThe sort function takes an ndarray, and sorts its elements in ascending order along the specified axis using a heap sort algorithm. As opposed to the `.sort()` method discussed earlier, this function creates a copy of its input before sorting, and at the end, returns this copy. Sorting takes place in place, without auxiliary storage. The `axis` keyword argument takes on the possible values of -1 (the last axis, in `ulab` equivalent to the second axis, and this also happens to be the default value), 0, 1, or `None`. The first three cases are identical to those in [diff](diff), while the last one flattens the array before sorting. If descending order is required, the result can simply be `flip`ped, see [flip](flip).**WARNING:** `numpy` defines the `kind`, and `order` keyword arguments that are not implemented here. The function in `ulab` always uses heap sort, and since `ulab` does not have the concept of data fields, the `order` keyword argument would have no meaning. ###Code %%micropython -unix 1 import ulab as np from ulab import numerical a = np.array([[1, 12, 3, 0], [5, 3, 4, 1], [9, 11, 1, 8], [7, 10, 0, 1]], dtype=np.float) print('\na:\n', a) b = numerical.sort(a, axis=0) print('\na sorted along vertical axis:\n', b) c = numerical.sort(a, axis=1) print('\na sorted along horizontal axis:\n', c) c = numerical.sort(a, axis=None) print('\nflattened a sorted:\n', c) ###Output a: array([[1.0, 12.0, 3.0, 0.0], [5.0, 3.0, 4.0, 1.0], [9.0, 11.0, 1.0, 8.0], [7.0, 10.0, 0.0, 1.0]], dtype=float) a sorted along vertical axis: array([[1.0, 3.0, 0.0, 0.0], [5.0, 10.0, 1.0, 1.0], [7.0, 11.0, 3.0, 1.0], [9.0, 12.0, 4.0, 8.0]], dtype=float) a sorted along horizontal axis: array([[0.0, 1.0, 3.0, 12.0], [1.0, 3.0, 4.0, 5.0], [1.0, 8.0, 9.0, 11.0], [0.0, 1.0, 7.0, 10.0]], dtype=float) flattened a sorted: array([0.0, 0.0, 1.0, ..., 10.0, 11.0, 12.0], dtype=float) ###Markdown Heap sort requires $\sim N\log N$ operations, and notably, the worst case costs only 20% more time than the average. In order to get an order-of-magnitude estimate, we will take the sine of 1000 uniformly spaced numbers between 0, and two pi, and sort them: ###Code %%micropython -pyboard 1 import ulab as np from ulab import vector from ulab import numerical @timeit def sort_time(array): return numerical.sort(array) b = vector.sin(np.linspace(0, 6.28, num=1000)) print('b: ', b) sort_time(b) print('\nb sorted:\n', b) ###Output _____no_output_____ ###Markdown argsort`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.argsort.htmlSimilarly to [sort](sort), `argsort` takes a positional, and a keyword argument, and returns an unsigned short index array of type `ndarray` with the same dimensions as the input, or, if `axis=None`, as a row vector with length equal to the number of elements in the input (i.e., the flattened array). The indices in the output sort the input in ascending order. The routine in `argsort` is the same as in `sort`, therefore, the comments on computational expenses (time and RAM) also apply. In particular, since no copy of the original data is required, virtually no RAM beyond the output array is used. Since the underlying container of the output array is of type `uint16_t`, neither of the output dimensions should be larger than 65535. ###Code %%micropython -unix 1 import ulab as np from ulab import numerical a = np.array([[1, 12, 3, 0], [5, 3, 4, 1], [9, 11, 1, 8], [7, 10, 0, 1]], dtype=np.float) print('\na:\n', a) b = numerical.argsort(a, axis=0) print('\na sorted along vertical axis:\n', b) c = numerical.argsort(a, axis=1) print('\na sorted along horizontal axis:\n', c) c = numerical.argsort(a, axis=None) print('\nflattened a sorted:\n', c) ###Output a: array([[1.0, 12.0, 3.0, 0.0], [5.0, 3.0, 4.0, 1.0], [9.0, 11.0, 1.0, 8.0], [7.0, 10.0, 0.0, 1.0]], dtype=float) a sorted along vertical axis: array([[0, 1, 3, 0], [1, 3, 2, 1], [3, 2, 0, 3], [2, 0, 1, 2]], dtype=uint16) a sorted along horizontal axis: array([[3, 0, 2, 1], [3, 1, 2, 0], [2, 3, 0, 1], [2, 3, 0, 1]], dtype=uint16) flattened a sorted: array([3, 14, 0, ..., 13, 9, 1], dtype=uint16) ###Markdown Since during the sorting, only the indices are shuffled, `argsort` does not modify the input array, as one can verify this by the following example: ###Code %%micropython -unix 1 import ulab as np from ulab import numerical a = np.array([0, 5, 1, 3, 2, 4], dtype=np.uint8) print('\na:\n', a) b = numerical.argsort(a, axis=1) print('\nsorting indices:\n', b) print('\nthe original array:\n', a) ###Output a: array([0, 5, 1, 3, 2, 4], dtype=uint8) sorting indices: array([0, 2, 4, 3, 5, 1], dtype=uint16) the original array: array([0, 5, 1, 3, 2, 4], dtype=uint8) ###Markdown LinalgFunctions in the `linalg` module can be called by importing the sub-module first. size`size` takes a single argument, the axis, whose size is to be returned. Depending on the value of the argument, the following information will be returned:1. argument is 0: the number of elements of the array2. argument is 1: the number of rows3. argument is 2: the number of columns ###Code %%micropython -unix 1 import ulab as np from ulab import linalg a = np.array([1, 2, 3, 4], dtype=np.int8) print("a:\n", a) print("size of a:", linalg.size(a, axis=None), ",", linalg.size(a, axis=0)) b= np.array([[1, 2], [3, 4]], dtype=np.int8) print("\nb:\n", b) print("size of b:", linalg.size(b, axis=None), ",", linalg.size(b, axis=0), ",", linalg.size(b, axis=1)) ###Output a: array([1, 2, 3, 4], dtype=int8) size of a: 4 , 4 b: array([[1, 2], [3, 4]], dtype=int8) size of b: 4 , 2 , 2 ###Markdown inv`numpy`: https://docs.scipy.org/doc/numpy-1.17.0/reference/generated/numpy.linalg.inv.htmlA square matrix, provided that it is not singular, can be inverted by calling the `inv` function that takes a single argument. The inversion is based on successive elimination of elements in the lower left triangle, and raises a `ValueError` exception, if the matrix turns out to be singular (i.e., one of the diagonal entries is zero). ###Code %%micropython -pyboard 1 import ulab as np from ulab import linalg m = np.array([[1, 2, 3, 4], [4, 5, 6, 4], [7, 8.6, 9, 4], [3, 4, 5, 6]]) print(linalg.inv(m)) ###Output array([[-2.166666, 1.499999, -0.8333326, 1.0], [1.666666, -3.333331, 1.666666, -4.768516e-08], [0.1666672, 2.166666, -0.8333327, -1.0], [-0.1666666, -0.3333334, 4.96705e-08, 0.5]], dtype=float) ###Markdown Computation expensesNote that the cost of inverting a matrix is approximately twice as many floats (RAM), as the number of entries in the original matrix, and approximately as many operations, as the number of entries. Here are a couple of numbers: ###Code %%micropython -pyboard 1 import ulab as np from ulab import linalg @timeit def invert_matrix(m): return linalg.inv(m) m = np.array([[1, 2,], [4, 5]]) print('2 by 2 matrix:') invert_matrix(m) m = np.array([[1, 2, 3, 4], [4, 5, 6, 4], [7, 8.6, 9, 4], [3, 4, 5, 6]]) print('\n4 by 4 matrix:') invert_matrix(m) m = np.array([[1, 2, 3, 4, 5, 6, 7, 8], [0, 5, 6, 4, 5, 6, 4, 5], [0, 0, 9, 7, 8, 9, 7, 8], [0, 0, 0, 10, 11, 12, 11, 12], [0, 0, 0, 0, 4, 6, 7, 8], [0, 0, 0, 0, 0, 5, 6, 7], [0, 0, 0, 0, 0, 0, 7, 6], [0, 0, 0, 0, 0, 0, 0, 2]]) print('\n8 by 8 matrix:') invert_matrix(m) ###Output 2 by 2 matrix: execution time: 65 us 4 by 4 matrix: execution time: 105 us 8 by 8 matrix: execution time: 299 us ###Markdown The above-mentioned scaling is not obeyed strictly. The reason for the discrepancy is that the function call is still the same for all three cases: the input must be inspected, the output array must be created, and so on. dot`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html**WARNING:** numpy applies upcasting rules for the multiplication of matrices, while `ulab` simply returns a float matrix. Once you can invert a matrix, you might want to know, whether the inversion is correct. You can simply take the original matrix and its inverse, and multiply them by calling the `dot` function, which takes the two matrices as its arguments. If the matrix dimensions do not match, the function raises a `ValueError`. The result of the multiplication is expected to be the unit matrix, which is demonstrated below. ###Code %%micropython -pyboard 1 import ulab as np from ulab import linalg m = np.array([[1, 2, 3], [4, 5, 6], [7, 10, 9]], dtype=np.uint8) n = linalg.inv(m) print("m:\n", m) print("\nm^-1:\n", n) # this should be the unit matrix print("\nm*m^-1:\n", linalg.dot(m, n)) ###Output m: array([[1, 2, 3], [4, 5, 6], [7, 10, 9]], dtype=uint8) m^-1: array([[-1.25, 1.0, -0.25], [0.5, -1.0, 0.5], [0.4166667, 0.3333334, -0.25]], dtype=float) m*m^-1: array([[1.0, 2.384186e-07, -1.490116e-07], [-2.980232e-07, 1.000001, -4.172325e-07], [-3.278255e-07, 1.311302e-06, 0.9999992]], dtype=float) ###Markdown Note that for matrix multiplication you don't necessarily need square matrices, it is enough, if their dimensions are compatible (i.e., the the left-hand-side matrix has as many columns, as does the right-hand-side matrix rows): ###Code %%micropython -unix 1 import ulab as np from ulab import linalg m = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=np.uint8) n = np.array([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=np.uint8) print(m) print(n) print(linalg.dot(m, n)) ###Output array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=uint8) array([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=uint8) array([[7.0, 10.0], [23.0, 34.0]], dtype=float) ###Markdown det`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.det.htmlThe `det` function takes a square matrix as its single argument, and calculates the determinant. The calculation is based on successive elimination of the matrix elements, and the return value is a float, even if the input array was of integer type. ###Code %%micropython -pyboard 1 import ulab as np from ulab import linalg a = np.array([[1, 2], [3, 4]], dtype=np.uint8) print(linalg.det(a)) ###Output -2.0 ###Markdown BenchmarkSince the routine for calculating the determinant is pretty much the same as for finding the [inverse of a matrix](inv), the execution times are similar: ###Code %%micropython -pyboard 1 @timeit def matrix_det(m): return linalg.inv(m) m = np.array([[1, 2, 3, 4, 5, 6, 7, 8], [0, 5, 6, 4, 5, 6, 4, 5], [0, 0, 9, 7, 8, 9, 7, 8], [0, 0, 0, 10, 11, 12, 11, 12], [0, 0, 0, 0, 4, 6, 7, 8], [0, 0, 0, 0, 0, 5, 6, 7], [0, 0, 0, 0, 0, 0, 7, 6], [0, 0, 0, 0, 0, 0, 0, 2]]) matrix_det(m) ###Output execution time: 294 us ###Markdown eig`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eig.htmlThe `eig` function calculates the eigenvalues and the eigenvectors of a real, symmetric square matrix. If the matrix is not symmetric, a `ValueError` will be raised. The function takes a single argument, and returns a tuple with the eigenvalues, and eigenvectors. With the help of the eigenvectors, amongst other things, you can implement sophisticated stabilisation routines for robots. ###Code %%micropython -pyboard 1 import ulab as np from ulab import linalg a = np.array([[1, 2, 1, 4], [2, 5, 3, 5], [1, 3, 6, 1], [4, 5, 1, 7]], dtype=np.uint8) x, y = linalg.eig(a) print('eigenvectors of a:\n', x) print('\neigenvalues of a:\n', y) ###Output eigenvectors of a: array([-1.165288, 0.8029362, 5.585626, 13.77673], dtype=float) eigenvalues of a: array([[0.8151754, -0.4499267, -0.1643907, 0.3256237], [0.2211193, 0.7847154, 0.08373602, 0.5729892], [-0.1340859, -0.3100657, 0.8742685, 0.3486182], [-0.5182822, -0.2926556, -0.4490192, 0.6664218]], dtype=float) ###Markdown The same matrix diagonalised with `numpy` yields: ###Code a = array([[1, 2, 1, 4], [2, 5, 3, 5], [1, 3, 6, 1], [4, 5, 1, 7]], dtype=np.uint8) x, y = eig(a) print('eigenvectors of a:\n', x) print('\neigenvalues of a:\n', y) ###Output eigenvectors of a: [13.77672606 -1.16528837 0.80293655 5.58562576] eigenvalues of a: [[ 0.32561419 0.815156 0.44994112 -0.16446602] [ 0.57300777 0.22113342 -0.78469926 0.08372081] [ 0.34861093 -0.13401142 0.31007764 0.87427868] [ 0.66641421 -0.51832581 0.29266348 -0.44897499]] ###Markdown When comparing results, we should keep two things in mind: 1. the eigenvalues and eigenvectors are not necessarily sorted in the same way2. an eigenvector can be multiplied by an arbitrary non-zero scalar, and it is still an eigenvector with the same eigenvalue. This is why all signs of the eigenvector belonging to 5.58, and 0.80 are flipped in `ulab` with respect to `numpy`. This difference, however, is of absolutely no consequence. Computation expensesSince the function is based on [Givens rotations](https://en.wikipedia.org/wiki/Givens_rotation) and runs till convergence is achieved, or till the maximum number of allowed rotations is exhausted, there is no universal estimate for the time required to find the eigenvalues. However, an order of magnitude can, at least, be guessed based on the measurement below: ###Code %%micropython -pyboard 1 import ulab as np from ulab import linalg @timeit def matrix_eig(a): return linalg.eig(a) a = np.array([[1, 2, 1, 4], [2, 5, 3, 5], [1, 3, 6, 1], [4, 5, 1, 7]], dtype=np.uint8) matrix_eig(a) ###Output execution time: 111 us ###Markdown Cholesky decomposition`numpy`: https://docs.scipy.org/doc/numpy-1.17.0/reference/generated/numpy.linalg.cholesky.html`cholesky` takes a positive definite, symmetric square matrix as its single argument, and returns *square root matrix* in the lower triangular form. If the input argument does not fulfill the positivity or symmetry condition, a `ValueError` is raised. ###Code %%micropython -unix 1 import ulab from ulab import linalg a = ulab.array([[25, 15, -5], [15, 18, 0], [-5, 0, 11]]) print('a: ', a) print('\n' + '='*20 + '\nCholesky decomposition\n', linalg.cholesky(a)) ###Output a: array([[25.0, 15.0, -5.0], [15.0, 18.0, 0.0], [-5.0, 0.0, 11.0]], dtype=float) ==================== Cholesky decomposition array([[5.0, 0.0, 0.0], [3.0, 3.0, 0.0], [-1.0, 1.0, 3.0]], dtype=float) ###Markdown trace`numpy`: https://docs.scipy.org/doc/numpy-1.17.0/reference/generated/numpy.linalg.trace.htmlThe `trace` function returns the sum of the diagonal elements of a square matrix. If the input argument is not a square matrix, an exception will be raised.The scalar so returned will inherit the type of the input array, i.e., integer arrays have integer trace, and floating point arrays a floating point trace. ###Code %%micropython -unix 1 import ulab from ulab import linalg a = ulab.array([[25, 15, -5], [15, 18, 0], [-5, 0, 11]], dtype=ulab.int8) print('a: ', a) print('\ntrace of a: ', linalg.trace(a)) b = ulab.array([[25, 15, -5], [15, 18, 0], [-5, 0, 11]], dtype=ulab.float) print('='*20 + '\nb: ', b) print('\ntrace of b: ', linalg.trace(b)) ###Output a: array([[25, 15, -5], [15, 18, 0], [-5, 0, 11]], dtype=int8) trace of a: 54 ==================== b: array([[25.0, 15.0, -5.0], [15.0, 18.0, 0.0], [-5.0, 0.0, 11.0]], dtype=float) trace of b: 54.0 ###Markdown PolynomialsFunctions in the polynomial sub-module can be invoked by importing the module first. polyval`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.polyval.html`polyval` takes two arguments, both arrays or other iterables. ###Code %%micropython -unix 1 import ulab as np from ulab import poly p = [1, 1, 1, 0] x = [0, 1, 2, 3, 4] print('coefficients: ', p) print('independent values: ', x) print('\nvalues of p(x): ', poly.polyval(p, x)) # the same works with one-dimensional ndarrays a = np.array(x) print('\nndarray (a): ', a) print('value of p(a): ', poly.polyval(p, a)) ###Output coefficients: [1, 1, 1, 0] independent values: [0, 1, 2, 3, 4] values of p(x): array([0.0, 3.0, 14.0, 39.0, 84.0], dtype=float) ndarray (a): array([0.0, 1.0, 2.0, 3.0, 4.0], dtype=float) value of p(a): array([0.0, 3.0, 14.0, 39.0, 84.0], dtype=float) ###Markdown polyfit`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.polyfit.htmlpolyfit takes two, or three arguments. The last one is the degree of the polynomial that will be fitted, the last but one is an array or iterable with the `y` (dependent) values, and the first one, an array or iterable with the `x` (independent) values, can be dropped. If that is the case, `x` will be generated in the function, assuming uniform sampling. If the length of `x`, and `y` are not the same, the function raises a `ValueError`. ###Code %%micropython -unix 1 import ulab as np from ulab import poly x = np.array([0, 1, 2, 3, 4, 5, 6]) y = np.array([9, 4, 1, 0, 1, 4, 9]) print('independent values:\t', x) print('dependent values:\t', y) print('fitted values:\t\t', poly.polyfit(x, y, 2)) # the same with missing x print('\ndependent values:\t', y) print('fitted values:\t\t', poly.polyfit(y, 2)) ###Output independent values: array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=float) dependent values: array([9.0, 4.0, 1.0, 0.0, 1.0, 4.0, 9.0], dtype=float) fitted values: array([1.0, -6.0, 9.000000000000004], dtype=float) dependent values: array([9.0, 4.0, 1.0, 0.0, 1.0, 4.0, 9.0], dtype=float) fitted values: array([1.0, -6.0, 9.000000000000004], dtype=float) ###Markdown Execution time`polyfit` is based on the inversion of a matrix (there is more on the background in https://en.wikipedia.org/wiki/Polynomial_regression), and it requires the intermediate storage of `2*N*(deg+1)` floats, where `N` is the number of entries in the input array, and `deg` is the fit's degree. The additional computation costs of the matrix inversion discussed in [inv](inv) also apply. The example from above needs around 150 microseconds to return: ###Code %%micropython -pyboard 1 import ulab as np from ulab import poly @timeit def time_polyfit(x, y, n): return poly.polyfit(x, y, n) x = np.array([0, 1, 2, 3, 4, 5, 6]) y = np.array([9, 4, 1, 0, 1, 4, 9]) time_polyfit(x, y, 2) ###Output execution time: 153 us ###Markdown Fourier transformsFunctions related to Fourier transforms can be called by importing the `fft` sub-module first.`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.ifft.html fftSince `ulab`'s `ndarray` does not support complex numbers, the invocation of the Fourier transform differs from that in `numpy`. In `numpy`, you can simply pass an array or iterable to the function, and it will be treated as a complex array: ###Code fft.fft([1, 2, 3, 4, 1, 2, 3, 4]) ###Output _____no_output_____ ###Markdown **WARNING:** The array that is returned is also complex, i.e., the real and imaginary components are cast together. In `ulab`, the real and imaginary parts are treated separately: you have to pass two `ndarray`s to the function, although, the second argument is optional, in which case the imaginary part is assumed to be zero.**WARNING:** The function, as opposed to `numpy`, returns a 2-tuple, whose elements are two `ndarray`s, holding the real and imaginary parts of the transform separately. ###Code %%micropython -pyboard 1 import ulab as np from ulab import vector from ulab import fft x = np.linspace(0, 10, num=1024) y = vector.sin(x) z = np.zeros(len(x)) a, b = fft.fft(x) print('real part:\t', a) print('\nimaginary part:\t', b) c, d = fft.fft(x, z) print('\nreal part:\t', c) print('\nimaginary part:\t', d) ###Output real part: array([5119.996, -5.004663, -5.004798, ..., -5.005482, -5.005643, -5.006577], dtype=float) imaginary part: array([0.0, 1631.333, 815.659, ..., -543.764, -815.6588, -1631.333], dtype=float) real part: array([5119.996, -5.004663, -5.004798, ..., -5.005482, -5.005643, -5.006577], dtype=float) imaginary part: array([0.0, 1631.333, 815.659, ..., -543.764, -815.6588, -1631.333], dtype=float) ###Markdown ifftThe above-mentioned rules apply to the inverse Fourier transform. The inverse is also normalised by `N`, the number of elements, as is customary in `numpy`. With the normalisation, we can ascertain that the inverse of the transform is equal to the original array. ###Code %%micropython -pyboard 1 import ulab as np from ulab import vector from ulab import fft x = np.linspace(0, 10, num=1024) y = vector.sin(x) a, b = fft.fft(y) print('original vector:\t', y) y, z = fft.ifft(a, b) # the real part should be equal to y print('\nreal part of inverse:\t', y) # the imaginary part should be equal to zero print('\nimaginary part of inverse:\t', z) ###Output original vector: array([0.0, 0.009775016, 0.0195491, ..., -0.5275068, -0.5357859, -0.5440139], dtype=float) real part of inverse: array([-2.980232e-08, 0.0097754, 0.0195494, ..., -0.5275064, -0.5357857, -0.5440133], dtype=float) imaginary part of inverse: array([-2.980232e-08, -1.451171e-07, 3.693752e-08, ..., 6.44871e-08, 9.34986e-08, 2.18336e-07], dtype=float) ###Markdown Note that unlike in `numpy`, the length of the array on which the Fourier transform is carried out must be a power of 2. If this is not the case, the function raises a `ValueError` exception. spectrogramIn addition to the Fourier transform and its inverse, `ulab` also sports a function called `spectrogram`, which returns the absolute value of the Fourier transform. This could be used to find the dominant spectral component in a time series. The arguments are treated in the same way as in `fft`, and `ifft`. In order to keep compatibility of the core modules with `numpy`, this function is defined in the `extras` sub-module. ###Code %%micropython -unix 1 import ulab as np from ulab import vector from ulab import extras x = np.linspace(0, 10, num=1024) y = vector.sin(x) a = extras.spectrogram(y) print('original vector:\t', y) print('\nspectrum:\t', a) ###Output original vector: array([0.0, 0.009775015390171337, 0.01954909674625918, ..., -0.5275140569487312, -0.5357931822978732, -0.5440211108893639], dtype=float) spectrum: array([187.8635087634579, 315.3112063607119, 347.8814873399374, ..., 84.45888934298905, 347.8814873399374, 315.3112063607118], dtype=float) ###Markdown As such, `spectrogram` is really just a shorthand for `np.sqrt(a*a + b*b)`: ###Code %%micropython -pyboard 1 import ulab as np from ulab import fft from ulab import vector from ulab import extras x = np.linspace(0, 10, num=1024) y = vector.sin(x) a, b = fft.fft(y) print('\nspectrum calculated the hard way:\t', vector.sqrt(a*a + b*b)) a = extras.spectrogram(y) print('\nspectrum calculated the lazy way:\t', a) ###Output spectrum calculated the hard way: array([187.8641, 315.3125, 347.8804, ..., 84.4587, 347.8803, 315.3124], dtype=float) spectrum calculated the lazy way: array([187.8641, 315.3125, 347.8804, ..., 84.4587, 347.8803, 315.3124], dtype=float) ###Markdown Computation and storage costs RAMThe FFT routine of `ulab` calculates the transform in place. This means that beyond reserving space for the two `ndarray`s that will be returned (the computation uses these two as intermediate storage space), only a handful of temporary variables, all floats or 32-bit integers, are required. Speed of FFTsA comment on the speed: a 1024-point transform implemented in python would cost around 90 ms, and 13 ms in assembly, if the code runs on the pyboard, v.1.1. You can gain a factor of four by moving to the D series https://github.com/peterhinch/micropython-fourier/blob/master/README.md8-performance. ###Code %%micropython -pyboard 1 import ulab as np from ulab import vector from ulab import fft x = np.linspace(0, 10, num=1024) y = vector.sin(x) @timeit def np_fft(y): return fft.fft(y) a, b = np_fft(y) ###Output execution time: 1985 us ###Markdown The C implementation runs in less than 2 ms on the pyboard (we have just measured that), and has been reported to run in under 0.8 ms on the D series board. That is an improvement of at least a factor of four. Filter routinesFunctions in the `filter` module can be called by importing the sub-module first. convolve`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.convolve.htmlReturns the discrete, linear convolution of two one-dimensional sequences.Only the ``full`` mode is supported, and the ``mode`` named parameter is not accepted. Note that all other modes can be had by slicing a ``full`` result. ###Code %%micropython -unix 1 import ulab as np from ulab import filter x = np.array((1,2,3)) y = np.array((1,10,100,1000)) print(filter.convolve(x, y)) ###Output array([1.0, 12.0, 123.0, 1230.0, 2300.0, 3000.0], dtype=float) ###Markdown Comparison of arraysFunctions in the `compare` module can be called by importing the sub-module first. minimum`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.minimum.htmlReturns the minimum of two arrays, or two scalars, or an array, and a scalar. Partial broadcasting is implemented. If the arrays are of different `dtype`, the output is upcast as in [Binary operators](Binary-operators). If both inputs are scalars, a scalar is returned. Only positional arguments are implemented. maximum`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.maximum.htmlReturns the maximum of two arrays, or two scalars, or an array, and a scalar. Partial broadcasting is implemented. If the arrays are of different `dtype`, the output is upcast as in [Binary operators](Binary-operators). If both inputs are scalars, a scalar is returned. Only positional arguments are implemented. ###Code %%micropython -unix 1 import ulab a = ulab.array([1, 2, 3, 4, 5], dtype=ulab.uint8) b = ulab.array([5, 4, 3, 2, 1], dtype=ulab.float) print('minimum of a, and b:') print(ulab.compare.minimum(a, b)) print('\nmaximum of a, and b:') print(ulab.compare.maximum(a, b)) print('\nmaximum of 1, and 5.5:') print(ulab.compare.maximum(1, 5.5)) ###Output minimum of a, and b: array([1.0, 2.0, 3.0, 2.0, 1.0], dtype=float) maximum of a, and b: array([5.0, 4.0, 3.0, 4.0, 5.0], dtype=float) maximum of 1, and 5.5: 5.5 ###Markdown clip`numpy`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.clip.htmlClips an array, i.e., values that are outside of an interval are clipped to the interval edges. The function is equivalent to `maximum(a_min, minimum(a, a_max))`. or two scalars, hence partial broadcasting takes place exactly as in [minimum](minimum). If the arrays are of different `dtype`, the output is upcast as in [Binary operators](Binary-operators). ###Code %%micropython -unix 1 import ulab a = ulab.array(range(9), dtype=ulab.uint8) print('a:\t\t', a) print('clipped:\t', ulab.compare.clip(a, 3, 7)) b = 3 * ulab.ones(len(a), dtype=ulab.float) print('\na:\t\t', a) print('b:\t\t', b) print('clipped:\t', ulab.compare.clip(a, b, 7)) ###Output a: array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=uint8) clipped: array([3, 3, 3, 3, 4, 5, 6, 7, 7], dtype=uint8) a: array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=uint8) b: array([3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0], dtype=float) clipped: array([3.0, 3.0, 3.0, 3.0, 4.0, 5.0, 6.0, 7.0, 7.0], dtype=float) ###Markdown Notebook conversion ###Code import nbformat as nb import nbformat.v4.nbbase as nb4 from nbconvert import RSTExporter def convert_notebook(node, fn): (rst, resources) = rstexporter.from_notebook_node(notebook) with open(fn, 'w') as fout: fout.write(rst) rstexporter = RSTExporter() rstexporter.template_file = './templates/manual.tpl' source = nb.read('ulab-manual.ipynb', nb.NO_CONVERT) append_cell = False notebook = nb4.new_notebook() for j, cell in enumerate(source['cells']): if cell['cell_type'] == 'markdown': # skip everything before Introduction if cell['source'].split('\n')[0].startswith('# Introduction'): append_cell = True if append_cell: notebook.cells.append(cell) convert_notebook(notebook,'./manual/source/ulab.rst') %%writefile ./templates/manual.tpl {%- extends 'display_priority.tpl' -%} {% block in_prompt %} {% endblock in_prompt %} {% block output_prompt %} {% endblock output_prompt %} {% block input scoped%} {%- if cell.source.split('\n')[0].startswith('%%micropython') -%} .. code:: {{ '\n'.join(['# code to be run in micropython'] + cell.source.strip().split('\n')[1:]) | indent}} {%- else -%} .. code:: {{ '\n'.join(['# code to be run in CPython\n'] + cell.source.strip().split('\n')) | indent}} {%- endif -%} {% endblock input %} {% block error %} :: {{ super() }} {% endblock error %} {% block traceback_line %} {{ line | indent | strip_ansi }} {% endblock traceback_line %} {% block execute_result %} {% block data_priority scoped %} {{ super() }} {% endblock %} {% endblock execute_result %} {% block stream %} .. parsed-literal:: {{ output.text | indent }} {% endblock stream %} {% block data_svg %} .. image:: {{ output.metadata.filenames['image/svg+xml'] | urlencode }} {% endblock data_svg %} {% block data_png %} .. image:: {{ output.metadata.filenames['image/png'] | urlencode }} {%- set width=output | get_metadata('width', 'image/png') -%} {%- if width is not none %} :width: {{ width }}px {%- endif %} {%- set height=output | get_metadata('height', 'image/png') -%} {%- if height is not none %} :height: {{ height }}px {%- endif %} {% endblock data_png %} {% block data_jpg %} .. image:: {{ output.metadata.filenames['image/jpeg'] | urlencode }} {%- set width=output | get_metadata('width', 'image/jpeg') -%} {%- if width is not none %} :width: {{ width }}px {%- endif %} {%- set height=output | get_metadata('height', 'image/jpeg') -%} {%- if height is not none %} :height: {{ height }}px {%- endif %} {% endblock data_jpg %} {% block data_markdown %} {{ output.data['text/markdown'] | convert_pandoc("markdown", "rst") }} {% endblock data_markdown %} {% block data_latex %} .. math:: {{ output.data['text/latex'] | strip_dollars | indent }} {% endblock data_latex %} {% block data_text scoped %} .. parsed-literal:: {{ output.data['text/plain'] | indent }} {% endblock data_text %} {% block data_html scoped %} .. raw:: html {{ output.data['text/html'] | indent }} {% endblock data_html %} {% block markdowncell scoped %} {{ cell.source | convert_pandoc("markdown", "rst") }} {% endblock markdowncell %} {%- block rawcell scoped -%} {%- if cell.metadata.get('raw_mimetype', '').lower() in resources.get('raw_mimetypes', ['']) %} {{cell.source}} {% endif -%} {%- endblock rawcell -%} {% block headingcell scoped %} {{ ("#" * cell.level + cell.source) | replace('\n', ' ') | convert_pandoc("markdown", "rst") }} {% endblock headingcell %} {% block unknowncell scoped %} unknown type {{cell.type}} {% endblock unknowncell %} from IPython.core.magic import Magics, magics_class, line_cell_magic from IPython.core.magic import cell_magic, register_cell_magic, register_line_magic from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring import subprocess import os @magics_class class PyboardMagic(Magics): @cell_magic @magic_arguments() @argument('-skip') @argument('-unix') @argument('-pyboard') @argument('-file') @argument('-data') @argument('-time') @argument('-memory') def micropython(self, line='', cell=None): args = parse_argstring(self.micropython, line) if args.skip: # doesn't care about the cell's content print('skipped execution') return None # do not parse the rest if args.unix: # tests the code on the unix port. Note that this works on unix only with open('/dev/shm/micropython.py', 'w') as fout: fout.write(cell) proc = subprocess.Popen(["../../micropython/ports/unix/micropython", "/dev/shm/micropython.py"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(proc.stdout.read().decode("utf-8")) print(proc.stderr.read().decode("utf-8")) return None if args.file: # can be used to copy the cell content onto the pyboard's flash spaces = " " try: with open(args.file, 'w') as fout: fout.write(cell.replace('\t', spaces)) printf('written cell to {}'.format(args.file)) except: print('Failed to write to disc!') return None # do not parse the rest if args.data: # can be used to load data from the pyboard directly into kernel space message = pyb.exec(cell) if len(message) == 0: print('pyboard >>>') else: print(message.decode('utf-8')) # register new variable in user namespace self.shell.user_ns[args.data] = string_to_matrix(message.decode("utf-8")) if args.time: # measures the time of executions pyb.exec('import utime') message = pyb.exec('t = utime.ticks_us()\n' + cell + '\ndelta = utime.ticks_diff(utime.ticks_us(), t)' + "\nprint('execution time: {:d} us'.format(delta))") print(message.decode('utf-8')) if args.memory: # prints out memory information message = pyb.exec('from micropython import mem_info\nprint(mem_info())\n') print("memory before execution:\n========================\n", message.decode('utf-8')) message = pyb.exec(cell) print(">>> ", message.decode('utf-8')) message = pyb.exec('print(mem_info())') print("memory after execution:\n========================\n", message.decode('utf-8')) if args.pyboard: message = pyb.exec(cell) print(message.decode('utf-8')) ip = get_ipython() ip.register_magics(PyboardMagic) ###Output _____no_output_____ ###Markdown pyboard ###Code import pyboard pyb = pyboard.Pyboard('/dev/ttyACM0') pyb.enter_raw_repl() pyb.exit_raw_repl() pyb.close() %%micropython -pyboard 1 import utime import ulab as np def timeit(n=1000): def wrapper(f, *args, **kwargs): func_name = str(f).split(' ')[1] def new_func(*args, **kwargs): run_times = np.zeros(n, dtype=np.uint16) for i in range(n): t = utime.ticks_us() result = f(*args, **kwargs) run_times[i] = utime.ticks_diff(utime.ticks_us(), t) print('{}() execution times based on {} cycles'.format(func_name, n, (delta2-delta1)/n)) print('\tbest: %d us'%np.min(run_times)) print('\tworst: %d us'%np.max(run_times)) print('\taverage: %d us'%np.mean(run_times)) print('\tdeviation: +/-%.3f us'%np.std(run_times)) return result return new_func return wrapper def timeit(f, *args, **kwargs): func_name = str(f).split(' ')[1] def new_func(*args, **kwargs): t = utime.ticks_us() result = f(*args, **kwargs) print('execution time: ', utime.ticks_diff(utime.ticks_us(), t), ' us') return result return new_func ###Output ###Markdown Introduction In the [last chapter](https://micropython-usermod.readthedocs.io/en/latest/usermods_15.html) of the usermod documentation, I mentioned that I have another story, for another day. The day has come, so here is my story. Enter ulab`ulab` is a numpy-like module for `micropython`, meant to simplify and speed up common mathematical operations on arrays. The primary goal was to implement a small subset of numpy that might be useful in the context of a microcontroller. This means low-level data processing of linear (array) and two-dimensional (matrix) data. PurposeOf course, the first question that one has to answer is, why on Earth one would need a fast math library on a microcontroller. After all, it is not expected that heavy number crunching is going to take place on bare metal. It is not meant to. On a PC, the main reason for writing fast code is the sheer amount of data that one wants to process. On a microcontroller, the data volume is probably small, but it might lead to catastrophic system failure, if these data are not processed in time, because the microcontroller is supposed to interact with the outside world in a timely fashion. In fact, this latter objective was the initiator of this project: I needed the Fourier transform of a signal coming from the ADC of the pyboard, and all available options were simply too slow. In addition to speed, another issue that one has to keep in mind when working with embedded systems is the amount of available RAM: I believe, everything here could be implemented in pure python with relatively little effort, but the price we would have to pay for that is not only speed, but RAM, too. python code, if is not frozen, and compiled into the firmware, has to be compiled at runtime, which is not exactly a cheap process. On top of that, if numbers are stored in a list or tuple, which would be the high-level container, then they occupy 8 bytes, no matter, whether they are all smaller than 100, or larger than one hundred million. This is obviously a waste of resources in an environment, where resources are scarce. Finally, there is a reason for using `micropython` in the first place. Namely, that a microcontroller can be programmed in a very elegant, and *pythonic* way. But if it is so, why should we not extend this idea to other tasks and concepts that might come up in this context? If there was no other reason than this *elegance*, I would find that convincing enough.Based on the above-mentioned considerations, all functions in `ulab` are implemented in a way that 1. conforms to `numpy` as much as possible2. is so frugal with RAM as possible,3. and yet, fast. Much faster than pure python.The main points of `ulab` are - compact, iterable and slicable containers of numerical data in 1, and 2 dimensions (arrays and matrices). These containers support all the relevant unary and binary operators (e.g., `len`, ==, +, *, etc.)- vectorised computations on micropython iterables and numerical arrays/matrices (in `numpy`-speak, universal functions)- basic linear algebra routines (matrix inversion, multiplication, reshaping, transposition, determinant, and eigenvalues)- polynomial fits to numerical data- fast Fourier transformsAt the time of writing this manual (for version 0.33.2), the library adds approximately 30 kB of extra compiled code to the micropython (pyboard.v.11) firmware. However, if you are tight with flash space, you can easily shave off a couple of kB. See the section on [customising ulab](Custom_builds). Resources and legal mattersThe source code of the module can be found under https://github.com/v923z/micropython-ulab/tree/master/code. The source of this user manual is under https://github.com/v923z/micropython-ulab/tree/master/docs, while the technical details of the implementation are discussed at great length in https://github.com/v923z/micropython-ulab/tree/master/docs/ulab.ipynb. If you want an even thorougher explanation on why the various constructs of the implementation work, and work in that particular way, you can read more on the subject under https://micropython-usermod.readthedocs.io/en/latest/, where I demonstrate, what you have to do, if you want to make a C object behave in a *pythonic* way. The MIT licence applies to all material. Friendly requestIf you use `ulab`, and bump into a bug, or think that a particular function is missing, or its behaviour does not conform to `numpy`, please, raise a [ulab issue](https://github.com/v923z/micropython-ulab/issues) on github, so that the community can profit from your experiences. Even better, if you find the project useful, and think that it could be made better, faster, tighter, and shinier, please, consider contributing, and issue a pull request with the implementation of your improvements and new features. `ulab` can only become successful, if it offers what the community needs.These last comments apply to the documentation, too. If, in your opinion, the documentation is obscure, misleading, or not detailed enough, please, let me know, so that *we* can fix it. Differences between micropython-ulab and circuitpython-ulab`ulab` has originally been developed for `micropython`, but has since been integrated into a number of its flavours. Most of these flavours are simply forks of `micropython` itself, with some additional functionality. One of the notable exceptions is `circuitpython`, which has slightly diverged at the core level, and this has some minor consequences. Some of these concern the C implementation details only, which all have been sorted out with the generous and enthusiastic support of Jeff Epler from [Adafruit Industries](http://www.adafruit.com).There are, however, a couple of instances, where the usage in the two environments is slightly different at the python level. These are how the packges can be imported, and how the class properties can be accessed. In both cases, the `circuitpython` implementation results in `numpy`-conform code. `numpy`-compatibility in `micropython` will be implemented as soon as `micropython` itself has the required tools. Till then we have to live with a workaround, which I will point out at the relevant places. Customising `ulab``ulab` implements a great number of functions, which are organised in sub-modules. E.g., functions related to Fourier transforms are located in the `ulab.fft` sub-module, so you would import `fft` as```pythonimport ulabfrom ulab import fft```by which point you can get the FFT of your data by calling `fft.fft(...)`. The idea of such grouping of functions and methods is to provide a means for granularity: It is quite possible that you do not need all functions in a particular application. If you want to save some flash space, you can easily exclude arbitrary sub-modules from the firmware. The [ulab.h](https://github.com/v923z/micropython-ulab/blob/master/code/ulab.h) header file contains a pre-processor flag for each sub-module. The default setting is 1 for each of them. Setting them to 0 removes the module from the compiled firmware. The first couple of lines of the file look like this```c// vectorise (all functions) takes approx. 3 kB of flash spacedefine ULAB_VECTORISE_MODULE (1)// linalg adds around 6 kBdefine ULAB_LINALG_MODULE (1)// poly is approx. 2.5 kBdefine ULAB_POLY_MODULE (1)```In order to simplify navigation in the header, each flag begins with `ULAB_`, and continues with the name of the sub-module. This name is also the `.c` file, where the sub-module is implemented. So, e.g., the linear algebra routines can be found in `linalg.c`, and the corresponding compiler flag is `ULAB_LINALG_MODULE`. Each section displays a hint as to how much space you can save by un-setting the flag.At first, having to import everything in this way might appear to be overly complicated, but there is a very good reason behind all this: you can find out at the time of importing, whether a function or sub-module is part of your `ulab` firmware, or not. The alternative, namely, that you do not have to import anything beyond `ulab`, could prove catastrophic: you would learn only at run time (at the moment of calling the function in your code) that a particular function is not in the firmware, and that is most probably too late.Except for `fft`, the standard sub-modules, `vector`, `linalg`, `numerical`, `and poly`all `numpy`-compatible. User-defined functions that accept `ndarray`s as their argument should be implemented in the `extra` sub-module, or its sub-modules. Hints as to how to do that can be found in the section [Extending ulab](Extending-ulab). Supported functions and methods`ulab` supports a number of array operators, which are listed here. I tried to follow the specifications of the `numpy` interface as closely as possible, though, it was not always practical to implement verbatim behaviour. The differences, if any, are in each case small (e.g., a function cannot take all possible keyword arguments), and should not hinder everyday use. In the list below, a single asterisk denotes slight deviations from `numpy`'s nomenclature, and a double asterisk denotes those cases, where a bit more caution should be exercised, though this usually means functions that are not supported by `numpy`.The detailed discussion of the various functions always contains a link to the corresponding `numpy` documentation. However, before going down the rabbit hole, the module also defines a constant, the version, which can always be queried as ###Code %%micropython -unix 1 import ulab as np print('you are running ulab version', np.__version__) ###Output you are running ulab version 0.24 ###Markdown If you find a bug, please, include this number in your report! Basic ndarray operations[Unary operators](Unary-operators)[Binary operators](Binary-operators)[Indexing and slicing](Slicing-and-indexing)[ndarray iterators](Iterating-over-arrays)[Comparison operators*](Comparison-operators)[Universal functions](Universal-functions) (also support function calls on general iterables) Methods of ndarrays[.shape*](.shape)[size*](size)[itemsize*](itemsize)[.reshape](.reshape)[.transpose](.transpose)[.flatten**](.flatten) Matrix methods[inv](inv)[dot](dot)[det](det)[roll](roll)[flip](flip) Array initialisation functions[eye](eye)[ones](ones,-zeros)[zeros](ones,-zeros)[linspace](linspace) Statistical and other properties of arrays[min](min,-argmin,-max,-argmax)[argmin](min,-argmin,-max,-argmax)[max](min,-argmin,-max,-argmax)[argmax](min,-argmin,-max,-argmax)[sum](sum,-std,-mean)[std](sum,-std,-mean)[mean](sum,-std,-mean)[diff](diff)[sort](sort)[argsort](argsort) Manipulation of polynomials[polyval](polyval)[polyfit](polyfit) FFT routines[fft**](fft)[ifft**](ifft)[spectrum**](spectrum) Filter functions[convolve](convolve) ndarray, the basic containerThe `ndarray` is the underlying container of numerical data. It is derived from micropython's own `array` object, but has a great number of extra features starting with how it can be initialised, which operations can be done on it, and which functions can accept it as an argument. One important property of an `ndarray` is that it is also a proper `micropython` iterable.Since the `ndarray` is a binary container, it is also compact, meaning that it takes only a couple of bytes of extra RAM in addition to what is required for storing the numbers themselves. `ndarray`s are also type-aware, i.e., one can save RAM by specifying a data type, and using the smallest reasonable one. Five such types are defined, namely `uint8`, `int8`, which occupy a single byte of memory per datum, `uint16`, and `int16`, which occupy two bytes per datum, and `float`, which occupies four or eight bytes per datum. The precision/size of the `float` type depends on the definition of `mp_float_t`. Some platforms, e.g., the PYBD, implement `double`s, but some, e.g., the pyboard.v.11, don't. You can find out, what type of float your particular platform implements by looking at the output of the [.itemsize](.itemsize) class property.On the following pages, we will see how one can work with `ndarray`s. Those familiar with `numpy` should find that the nomenclature and naming conventions of `numpy` are adhered to as closely as possible. I will point out the few differences, where necessary.For the sake of comparison, in addition to the `ulab` code snippets, sometimes the equivalent `numpy` code is also presented. You can find out, where the snippet is supposed to run by looking at its first line, the header.Hint: you can easily port existing `numpy` code, if you `import ulab as np`. Initialising an arrayA new array can be created by passing either a standard micropython iterable, or another `ndarray` into the constructor. Initialising by passing iterablesIf the iterable is one-dimensional, i.e., one whose elements are numbers, then a row vector will be created and returned. If the iterable is two-dimensional, i.e., one whose elements are again iterables, a matrix will be created. If the lengths of the iterables is not consistent, a `ValueError` will be raised. Iterables of different types can be mixed in the initialisation function. If the `dtype` keyword with the possible `uint8/int8/uint16/int16/float` values is supplied, the new `ndarray` will have that type, otherwise, it assumes `float` as default. ###Code %%micropython -unix 1 import ulab as np a = [1, 2, 3, 4, 5, 6, 7, 8] b = np.array(a) print("a:\t", a) print("b:\t", b) # a two-dimensional array with mixed-type initialisers c = np.array([range(5), range(20, 25, 1), [44, 55, 66, 77, 88]], dtype=np.uint8) print("\nc:\t", c) # and now we throw an exception d = np.array([range(5), range(10), [44, 55, 66, 77, 88]], dtype=np.uint8) print("\nd:\t", d) ###Output a: [1, 2, 3, 4, 5, 6, 7, 8] b: array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) c: array([[0, 1, 2, 3, 4], [20, 21, 22, 23, 24], [44, 55, 66, 77, 88]], dtype=uint8) Traceback (most recent call last): File "/dev/shm/micropython.py", line 15, in <module> ValueError: iterables are not of the same length ###Markdown `ndarray`s are pretty-printed, i.e., if the length is larger than 10, then only the first and last three entries will be printed. Also note that, as opposed to `numpy`, the printout always contains the `dtype`. ###Code %%micropython -unix 1 import ulab as np a = np.array(range(200)) print("a:\t", a) ###Output a: array([0.0, 1.0, 2.0, ..., 197.0, 198.0, 199.0], dtype=float) ###Markdown Initialising by passing arraysAn `ndarray` can be initialised by supplying another array. This statement is almost trivial, since `ndarray`s are iterables themselves, though it should be pointed out that initialising through arrays should be faster, because simply a new copy is created, without inspection, iteration etc. ###Code %%micropython -unix 1 import ulab as np a = [1, 2, 3, 4, 5, 6, 7, 8] b = np.array(a) c = np.array(b) print("a:\t", a) print("b:\t", b) print("\nc:\t", c) ###Output a: [1, 2, 3, 4, 5, 6, 7, 8] b: array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) c: array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) ###Markdown Methods of ndarrays .shapeThe `.shape` method (property) returns a 2-tuple with the number of rows, and columns. **WARNING:** In `circuitpython`, you can call the method as a property, i.e., ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4], dtype=np.int8) print("a:\n", a) print("shape of a:", a.shape) b= np.array([[1, 2], [3, 4]], dtype=np.int8) print("\nb:\n", b) print("shape of b:", b.shape) ###Output a: array([1, 2, 3, 4], dtype=int8) shape of a: (1, 4) b: array([[1, 2], [3, 4]], dtype=int8) shape of b: (2, 2) ###Markdown **WARNING:** On the other hand, since properties are not implemented in `micropython`, there you would call the method as a function, i.e., ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4], dtype=np.int8) print("a:\n", a) print("shape of a:", a.shape) b= np.array([[1, 2], [3, 4]], dtype=np.int8) print("\nb:\n", b) print("shape of b:", b.shape()) ###Output a: array([1, 2, 3, 4], dtype=int8) shape of a: (1, 4) b: array([[1, 2], [3, 4]], dtype=int8) shape of b: (2, 2) ###Markdown .sizeThe `.size` method (property) returns an integer with the number of elements in the array. **WARNING:** In `circuitpython`, the `numpy` nomenclature applies, i.e., ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3], dtype=np.int8) print("a:\n", a) print("size of a:", a.size) b= np.array([[1, 2], [3, 4]], dtype=np.int8) print("\nb:\n", b) print("size of b:", b.size) ###Output a: array([1, 2, 3], dtype=int8) size of a: 3 b: array([[1, 2], [3, 4]], dtype=int8) size of b: 4 ###Markdown **WARNING:** In `micropython`, `size` is a method, i.e., ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3], dtype=np.int8) print("a:\n", a) print("size of a:", a.size) b= np.array([[1, 2], [3, 4]], dtype=np.int8) print("\nb:\n", b) print("size of b:", b.size()) ###Output a: array([1, 2, 3], dtype=int8) size of a: 3 b: array([[1, 2], [3, 4]], dtype=int8) size of b: 4 ###Markdown .itemsizeThe `.itemsize` method (property) returns an integer with the siz enumber of elements in the array.**WARNING:** In `circuitpython`: ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3], dtype=np.int8) print("a:\n", a) print("itemsize of a:", a.itemsize) b= np.array([[1, 2], [3, 4]], dtype=np.float) print("\nb:\n", b) print("itemsize of b:", b.itemsize) ###Output a: array([1, 2, 3], dtype=int8) itemsize of a: 1 b: array([[1.0, 2.0], [3.0, 4.0]], dtype=float) itemsize of b: 8 ###Markdown **WARNING:** In `micropython`: ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3], dtype=np.int8) print("a:\n", a) print("itemsize of a:", a.itemsize) b= np.array([[1, 2], [3, 4]], dtype=np.float) print("\nb:\n", b) print("itemsize of b:", b.itemsize()) ###Output a: array([1, 2, 3], dtype=int8) itemsize of a: 1 b: array([[1.0, 2.0], [3.0, 4.0]], dtype=float) itemsize of b: 8 ###Markdown .reshapenumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.reshape.html`reshape` re-writes the shape properties of an `ndarray`, but the array will not be modified in any other way. The function takes a single 2-tuple with two integers as its argument. The 2-tuple should specify the desired number of rows and columns. If the new shape is not consistent with the old, a `ValueError` exception will be raised. ###Code %%micropython -unix 1 import ulab as np a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]], dtype=np.uint8) print('a (4 by 4):', a) print('a (2 by 8):', a.reshape((2, 8))) print('a (1 by 16):', a.reshape((1, 16))) ###Output a (4 by 4): array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]], dtype=uint8) a (2 by 8): array([[1, 2, 3, 4, 5, 6, 7, 8], [9, 10, 11, 12, 13, 14, 15, 16]], dtype=uint8) a (1 by 16): array([1, 2, 3, ..., 14, 15, 16], dtype=uint8) ###Markdown .flattennumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.flatten.htm`.flatten` returns the flattened array. The array can be flattened in `C` style (i.e., moving horizontally in the matrix), or in `fortran` style (i.e., moving vertically in the matrix). The `C`-style flattening is the default, and it is also fast, because this is just a verbatim copy of the contents. ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4], dtype=np.int8) print("a: \t\t", a) print("a flattened: \t", a.flatten()) b = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int8) print("\nb:", b) print("b flattened (C): \t", b.flatten()) print("b flattened (F): \t", b.flatten(order='F')) ###Output a: array([1, 2, 3, 4], dtype=int8) a flattened: array([1, 2, 3, 4], dtype=int8) b: array([[1, 2, 3], [4, 5, 6]], dtype=int8) b flattened (C): array([1, 2, 3, 4, 5, 6], dtype=int8) b flattened (F): array([1, 4, 2, 5, 3, 6], dtype=int8) ###Markdown .transposenumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.transpose.htmlNote that only square matrices can be transposed in place, and in general, an internal copy of the matrix is required. If RAM is a concern, plan accordingly! ###Code %%micropython -unix 1 import ulab as np a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=np.uint8) print('a:\n', a) print('shape of a:', a.shape()) a.transpose() print('\ntranspose of a:\n', a) print('shape of a:', a.shape()) ###Output a: array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=uint8) shape of a: (4, 3) transpose of a: array([[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]], dtype=uint8) shape of a: (3, 4) ###Markdown .sortnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.sort.htmlIn-place sorting of an `ndarray`. For a more detailed exposition, see [sort](sort). ###Code %%micropython -unix 1 import ulab as np a = np.array([[1, 12, 3, 0], [5, 3, 4, 1], [9, 11, 1, 8], [7, 10, 0, 1]], dtype=np.uint8) print('\na:\n', a) a.sort(axis=0) print('\na sorted along vertical axis:\n', a) a = np.array([[1, 12, 3, 0], [5, 3, 4, 1], [9, 11, 1, 8], [7, 10, 0, 1]], dtype=np.uint8) a.sort(a, axis=1) print('\na sorted along horizontal axis:\n', a) a = np.array([[1, 12, 3, 0], [5, 3, 4, 1], [9, 11, 1, 8], [7, 10, 0, 1]], dtype=np.uint8) a.sort(a, axis=None) print('\nflattened a sorted:\n', a) ###Output a: array([[1, 12, 3, 0], [5, 3, 4, 1], [9, 11, 1, 8], [7, 10, 0, 1]], dtype=uint8) a sorted along vertical axis: array([[1, 3, 0, 0], [5, 10, 1, 1], [7, 11, 3, 1], [9, 12, 4, 8]], dtype=uint8) a sorted along horizontal axis: array([[0, 1, 3, 12], [1, 3, 4, 5], [1, 8, 9, 11], [0, 1, 7, 10]], dtype=uint8) flattened a sorted: array([0, 0, 1, ..., 10, 11, 12], dtype=uint8) ###Markdown Unary operatorsWith the exception of `len`, which returns a single number, all unary operators manipulate the underlying data element-wise. lenThis operator takes a single argument, and returns either the length (for row vectors), or the number of rows (for matrices) of its argument. ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4, 5], dtype=np.uint8) b = np.array([range(5), range(5), range(5), range(5)], dtype=np.uint8) print("a:\t", a) print("length of a: ", len(a)) print("shape of a: ", a.shape()) print("\nb:\t", b) print("length of b: ", len(b)) print("shape of b: ", b.shape()) ###Output a: array([1, 2, 3, 4, 5], dtype=uint8) length of a: 5 shape of a: (1, 5) b: array([[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], dtype=uint8) length of b: 4 shape of b: (4, 5) ###Markdown The number returned by `len` is also the length of the iterations, when the array supplies the elements for an iteration (see later). invertThe function function is defined for integer data types (`uint8`, `int8`, `uint16`, and `int16`) only, takes a single argument, and returns the element-by-element, bit-wise inverse of the array. If a `float` is supplied, the function raises a `ValueError` exception.With signed integers (`int8`, and `int16`), the results might be unexpected, as in the example below: ###Code %%micropython -unix 1 import ulab as np a = np.array([0, -1, -100], dtype=np.int8) print("a:\t\t", a) print("inverse of a:\t", ~a) a = np.array([0, 1, 254, 255], dtype=np.uint8) print("\na:\t\t", a) print("inverse of a:\t", ~a) ###Output a: array([0, -1, -100], dtype=int8) inverse of a: array([-1, 0, 99], dtype=int8) a: array([0, 1, 254, 255], dtype=uint8) inverse of a: array([255, 254, 1, 0], dtype=uint8) ###Markdown absThis function takes a single argument, and returns the element-by-element absolute value of the array. When the data type is unsigned (`uint8`, or `uint16`), a copy of the array will be returned immediately, and no calculation takes place. ###Code %%micropython -unix 1 import ulab as np a = np.array([0, -1, -100], dtype=np.int8) print("a:\t\t\t ", a) print("absolute value of a:\t ", abs(a)) ###Output a: array([0, -1, -100], dtype=int8) absolute value of a: array([0, 1, 100], dtype=int8) ###Markdown negThis operator takes a single argument, and changes the sign of each element in the array. Unsigned values are wrapped. ###Code %%micropython -unix 1 import ulab as np a = np.array([10, -1, 1], dtype=np.int8) print("a:\t\t", a) print("negative of a:\t", -a) b = np.array([0, 100, 200], dtype=np.uint8) print("\nb:\t\t", b) print("negative of b:\t", -b) ###Output a: array([10, -1, 1], dtype=int8) negative of a: array([-10, 1, -1], dtype=int8) b: array([0, 100, 200], dtype=uint8) negative of b: array([0, 156, 56], dtype=uint8) ###Markdown posThis function takes a single argument, and simply returns a copy of the array. ###Code %%micropython -unix 1 import ulab as np a = np.array([10, -1, 1], dtype=np.int8) print("a:\t\t", a) print("positive of a:\t", +a) ###Output a: array([10, -1, 1], dtype=int8) positive of a: array([10, -1, 1], dtype=int8) ###Markdown Binary operatorsAll binary operators work element-wise. This also means that the operands either must have the same shape, or one of them must be a scalar.**WARNING:** `numpy` also allows operations between a matrix, and a row vector, if the row vector has exactly as many elements, as many columns the matrix has. This feature will be added in future versions of `ulab`. ###Code a = array([[1, 2, 3], [4, 5, 6], [7, 8, 6]]) b = array([10, 20, 30]) a+b ###Output _____no_output_____ ###Markdown UpcastingBinary operations require special attention, because two arrays with different typecodes can be the operands of an operation, in which case it is not trivial, what the typecode of the result is. This decision on the result's typecode is called upcasting. Since the number of typecodes in `ulab` is significantly smaller than in `numpy`, we have to define new upcasting rules. Where possible, I followed `numpy`'s conventions. `ulab` observes the following upcasting rules:1. Operations with two `ndarray`s of the same `dtype` preserve their `dtype`, even when the results overflow.2. if either of the operands is a float, the result is automatically a float3. When the right hand side of a binary operator is a micropython variable, `mp_obj_int`, or `mp_obj_float`, then the result will be promoted to `dtype` `float`. This is necessary, because a micropython integer can be 31 bites wide. Other micropython types (e.g., lists, tuples, etc.) raise a `TypeError` exception. 4. | left hand side | right hand side | ulab result | numpy result ||----------------|-----------------|-------------|--------------||`uint8` |`int8` |`int16` |`int16` ||`uint8` |`int16` |`int16` |`int16` ||`uint8` |`uint16` |`uint16` |`uint16` ||`int8` |`int16` |`int16` |`int16` | |`int8` |`uint16` |`uint16` |`int32` ||`uint16` |`int16` |`float` |`int32` | Note that the last two operations are promoted to `int32` in `numpy`. **WARNING:** Due to the lower number of available data types, the upcasting rules of `ulab` are slightly different to those of `numpy`. Watch out for this, when porting code!Upcasting can be seen in action in the following snippet: ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4], dtype=np.uint8) b = np.array([1, 2, 3, 4], dtype=np.int8) print("a:\t", a) print("b:\t", b) print("a+b:\t", a+b) c = np.array([1, 2, 3, 4], dtype=np.float) print("\na:\t", a) print("c:\t", c) print("a*c:\t", a*c) ###Output a: array([1, 2, 3, 4], dtype=uint8) b: array([1, 2, 3, 4], dtype=int8) a+b: array([2, 4, 6, 8], dtype=int16) a: array([1, 2, 3, 4], dtype=uint8) c: array([1.0, 2.0, 3.0, 4.0], dtype=float) a*c: array([1.0, 4.0, 9.0, 16.0], dtype=float) ###Markdown **WARNING:** If a binary operation involves an `ndarray` and a micropython type (integer, or float), then the array must be on the left hand side. ###Code %%micropython -unix 1 import ulab as np # this is going to work a = np.array([1, 2, 3, 4], dtype=np.uint8) b = 12 print("a:\t", a) print("b:\t", b) print("a+b:\t", a+b) # but this will spectacularly fail print("b+a:\t", b+a) ###Output a: array([1, 2, 3, 4], dtype=uint8) b: 12 a+b: array([13, 14, 15, 16], dtype=uint8) Traceback (most recent call last): File "/dev/shm/micropython.py", line 12, in <module> TypeError: unsupported types for __add__: 'int', 'ndarray' ###Markdown The reason for this lies in how micropython resolves binary operators, and this means that a fix can only be implemented, if micropython itself changes the corresponding function(s). Till then, keep `ndarray`s on the left hand side. BenchmarksThe following snippet compares the performance of binary operations to a possible implementation in python. For the time measurement, we will take the following snippet from the micropython manual: ###Code %%micropython -pyboard 1 def timeit(f, *args, **kwargs): func_name = str(f).split(' ')[1] def new_func(*args, **kwargs): t = utime.ticks_us() result = f(*args, **kwargs) print('execution time: ', utime.ticks_diff(utime.ticks_us(), t), ' us') return result return new_func %%micropython -pyboard 1 import ulab as np @timeit def py_add(a, b): return [a[i]+b[i] for i in range(1000)] @timeit def py_multiply(a, b): return [a[i]*b[i] for i in range(1000)] @timeit def ulab_add(a, b): return a + b @timeit def ulab_multiply(a, b): return a * b a = [0.0]*1000 b = range(1000) print('python add:') py_add(a, b) print('\npython multiply:') py_multiply(a, b) a = np.linspace(0, 10, num=1000) b = np.ones(1000) print('\nulab add:') ulab_add(a, b) print('\nulab multiply:') ulab_multiply(a, b) ###Output python add: execution time: 10051 us python multiply: execution time: 14175 us ulab add: execution time: 222 us ulab multiply: execution time: 213 us ###Markdown I do not claim that the python implementation above is perfect, and certainly, there is much room for improvement. However, the factor of 50 difference in execution time is very spectacular. This is nothing but a consequence of the fact that the `ulab` functions run `C` code, with very little python overhead. The factor of 50 appears to be quite universal: the FFT routine obeys similar scaling (see [Speed of FFTs](Speed-of-FFTs)), and this number came up with font rendering, too: [fast font rendering on graphical displays](https://forum.micropython.org/viewtopic.php?f=15&t=5815&p=33362&hilit=ufontp33383). Comparison operatorsThe smaller than, greater than, smaller or equal, and greater or equal operators return a vector of Booleans indicating the positions (`True`), where the condition is satisfied. ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.uint8) print(a < 5) ###Output [True, True, True, True, False, False, False, False] ###Markdown **WARNING:** Note that `numpy` returns an array of Booleans. For most use cases this fact should not make a difference. ###Code a = array([1, 2, 3, 4, 5, 6, 7, 8]) a < 5 ###Output _____no_output_____ ###Markdown These operators work with matrices, too, in which case a list of lists of Booleans will be returned: ###Code %%micropython -unix 1 import ulab as np a = np.array([range(0, 5, 1), range(1, 6, 1), range(2, 7, 1)], dtype=np.uint8) print(a) print(a < 5) ###Output array([[0, 1, 2, 3, 4], [1, 2, 3, 4, 5], [2, 3, 4, 5, 6]], dtype=uint8) [[True, True, True, True, True], [True, True, True, True, False], [True, True, True, False, False]] ###Markdown Iterating over arrays`ndarray`s are iterable, which means that their elements can also be accessed as can the elements of a list, tuple, etc. If the array is one-dimensional, the iterator returns scalars, otherwise a new one-dimensional `ndarray`, which is simply a copy of the corresponding row of the matrix, i.e, its data type will be inherited. ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4, 5], dtype=np.uint8) b = np.array([range(5), range(10, 15, 1), range(20, 25, 1), range(30, 35, 1)], dtype=np.uint8) print("a:\t", a) for i, _a in enumerate(a): print("element %d in a:"%i, _a) print("\nb:\t", b) for i, _b in enumerate(b): print("element %d in b:"%i, _b) ###Output a: array([1, 2, 3, 4, 5], dtype=uint8) element 0 in a: 1 element 1 in a: 2 element 2 in a: 3 element 3 in a: 4 element 4 in a: 5 b: array([[0, 1, 2, 3, 4], [10, 11, 12, 13, 14], [20, 21, 22, 23, 24], [30, 31, 32, 33, 34]], dtype=uint8) element 0 in b: array([0, 1, 2, 3, 4], dtype=uint8) element 1 in b: array([10, 11, 12, 13, 14], dtype=uint8) element 2 in b: array([20, 21, 22, 23, 24], dtype=uint8) element 3 in b: array([30, 31, 32, 33, 34], dtype=uint8) ###Markdown Slicing and indexingCopies of sub-arrays can be created by indexing, and slicing. IndexingThe simplest form of indexing is specifying a single integer between the square brackets as in ###Code %%micropython -unix 1 import ulab as np a = np.array(range(10), dtype=np.uint8) print("a:\t\t\t\t\t\t", a) print("the first, and first from right element of a:\t", a[0], a[-1]) print("the second, and second from right element of a:\t", a[1], a[-2]) ###Output a: array([0, 1, 2, ..., 7, 8, 9], dtype=uint8) the first, and first from right element of a: 0 9 the second, and second from right element of a: 1 8 ###Markdown Indices are (not necessarily non-negative) integers, or a list of Booleans. By using a Boolean list, we can select those elements of an array that satisfy a specific condition. At the moment, such indexing is defined for row vectors only, for matrices the function raises a `ValueError` exception, though this will be rectified in a future version of `ulab`. ###Code %%micropython -unix 1 import ulab as np a = np.array(range(9), dtype=np.float) print("a:\t", a) print("a < 5:\t", a[a < 5]) ###Output a: array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) a < 5: array([0.0, 1.0, 2.0, 3.0, 4.0], dtype=float) ###Markdown Indexing with Boolean arrays can take more complicated expressions. This is a very concise way of comparing two vectors, e.g.: ###Code %%micropython -pyboard 1 import ulab as np a = np.array(range(9), dtype=np.uint8) b = np.array([4, 4, 4, 3, 3, 3, 13, 13, 13], dtype=np.uint8) print("a:\t", a) print("\na**2:\t", a*a) print("\nb:\t", b) print("\n100*sin(b):\t", np.sin(b)*100.0) print("\na[a*a > np.sin(b)*100.0]:\t", a[a*a > np.sin(b)*100.0]) ###Output a: array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=uint8) a**2: array([0, 1, 4, 9, 16, 25, 36, 49, 64], dtype=uint8) b: array([4, 4, 4, 3, 3, 3, 13, 13, 13], dtype=uint8) 100*sin(b): array([-75.68025, -75.68025, -75.68025, 14.112, 14.112, 14.112, 42.01671, 42.01671, 42.01671], dtype=float) a[a*a > np.sin(b)*100.0]: array([0, 1, 2, 4, 5, 7, 8], dtype=uint8) ###Markdown Slicing and assigning to slicesYou can also generate sub-arrays by specifying slices as the index of an array. Slices are special python objects of the form ```pythonslice = start:end:stop```where `start`, `end`, and `stop` are (not necessarily non-negative) integers. Not all of these three numbers must be specified in an index, in fact, all three of them can be missing. The interpreter takes care of filling in the missing values. (Note that slices cannot be defined in this way, only there, where an index is expected.) For a good explanation on how slices work in python, you can read the stackoverflow question https://stackoverflow.com/questions/509211/understanding-slice-notation.Slices work on both axes: ###Code %%micropython -unix 1 import ulab as np a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.uint8) print('a:\n', a) # the first row print('\na[0]:\n', a[0]) # the first two elements of the first row print('\na[0,:2]:\n', a[0,:2]) # the zeroth element in each row (also known as the zeroth column) print('\na[:,0]:\n', a[:,0]) # the last but one row print('\na[-1]:\n', a[-1]) # the last two rows backwards print('\na[::1]:\n', a[::-1]) ###Output a: array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=uint8) a[0]: array([1, 2, 3], dtype=uint8) a[0,:2]: array([1, 2], dtype=uint8) a[:,0]: array([1, 4, 7], dtype=uint8) a[-1]: array([7, 8, 9], dtype=uint8) a[::1]: array([[7, 8, 9], [4, 5, 6]], dtype=uint8) ###Markdown Assignment to slices can be done for the whole slice, per row, and per column. A couple of examples should make these statements clearer: ###Code %%micropython -unix 1 import ulab as np zero_list = [0, 0, 0] a = np.array([zero_list, zero_list, zero_list], dtype=np.uint8) print('a:\n', a) # assigning to the whole row a[0] = 1 print('\na[0] = 1\n', a) # assigning to the whole row a[0] = np.array([1, 2, -333], dtype=np.float) print('\na[0] = np.array([1, 2, 3])\n', a) # assigning to a column a[:,2] = 3.0 print('\na[:,0]:\n', a) ###Output a: array([[0, 0, 0], [0, 0, 0], [0, 0, 0]], dtype=uint8) a[0] = 1 array([[1, 1, 1], [0, 0, 0], [0, 0, 0]], dtype=uint8) a[0] = np.array([1, 2, 3]) array([[1, 2, 179], [0, 0, 0], [0, 0, 0]], dtype=uint8) a[:,0]: array([[1, 2, 3], [0, 0, 3], [0, 0, 3]], dtype=uint8) ###Markdown Universal functionsStandard mathematical functions can be calculated on any scalar-valued iterable (ranges, lists, tuples containing numbers), and on `ndarray`s without having to change the call signature. In all cases the functions return a new `ndarray` of typecode `float` (since these functions usually generate float values, anyway). The functions execute faster with `ndarray` arguments than with iterables, because the values of the input vector can be extracted faster. At present, the following functions are supported:`acos`, `acosh`, `asin`, `asinh`, `atan`, `atanh`, `ceil`, `cos`, `erf`, `erfc`, `exp`, `expm1`, `floor`, `tgamma`, `lgamma`, `log`, `log10`, `log2`, `sin`, `sinh`, `sqrt`, `tan`, `tanh`.These functions are applied element-wise to the arguments, thus, e.g., the exponential of a matrix cannot be calculated in this way. ###Code %%micropython -pyboard 1 import ulab as np a = range(9) b = np.array(a) # works with ranges, lists, tuples etc. print('a:\t', a) print('exp(a):\t', np.exp(a)) # with 1D arrays print('\nb:\t', b) print('exp(b):\t', np.exp(b)) # as well as with matrices c = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) print('\nc:\t', c) print('exp(c):\t', np.exp(c)) ###Output a: range(0, 9) exp(a): array([1.0, 2.718282, 7.389056, 20.08554, 54.59816, 148.4132, 403.4288, 1096.633, 2980.958], dtype=float) b: array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) exp(b): array([1.0, 2.718282, 7.389056, 20.08554, 54.59816, 148.4132, 403.4288, 1096.633, 2980.958], dtype=float) c: array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype=float) exp(c): array([[2.718282, 7.389056, 20.08554], [54.59816, 148.4132, 403.4288], [1096.633, 2980.958, 8103.084]], dtype=float) ###Markdown Computation expensesThe overhead for calculating with micropython iterables is quite significant: for the 1000 samples below, the difference is more than 800 microseconds, because internally the function has to create the `ndarray` for the output, has to fetch the iterable's items of unknown type, and then convert them to floats. All these steps are skipped for `ndarray`s, because these pieces of information are already known. ###Code %%micropython -pyboard 1 import ulab as np a = [0]*1000 b = np.array(a) @timeit def measure_run_time(x): return np.exp(x) measure_run_time(a) measure_run_time(b) ###Output execution time: 1259 us execution time: 408 us ###Markdown Of course, such a time saving is reasonable only, if the data are already available as an `ndarray`. If one has to initialise the `ndarray` from the list, then there is no gain, because the iterator was simply pushed into the initialisation function. Numerical linspacenumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.htmlThis function returns an array, whose elements are uniformly spaced between the `start`, and `stop` points. The number of intervals is determined by the `num` keyword argument, whose default value is 50. With the `endpoint` keyword argument (defaults to `True`) one can include `stop` in the sequence. In addition, the `dtype` keyword can be supplied to force type conversion of the output. The default is `float`. Note that, when `dtype` is of integer type, the sequence is not necessarily evenly spaced. This is not an error, rather a consequence of rounding. (This is also the `numpy` behaviour.) ###Code %%micropython -unix 1 import ulab as np # generate a sequence with defaults print('default sequence:\t', np.linspace(0, 10)) # num=5 print('num=5:\t\t\t', np.linspace(0, 10, num=5)) # num=5, endpoint=False print('num=5:\t\t\t', np.linspace(0, 10, num=5, endpoint=False)) # num=5, endpoint=False, dtype=uint8 print('num=5:\t\t\t', np.linspace(0, 5, num=7, endpoint=False, dtype=np.uint8)) ###Output default sequence: array([0.0, 0.2040816396474838, 0.4081632792949677, ..., 9.591833114624023, 9.795914649963379, 9.999996185302734], dtype=float) num=5: array([0.0, 2.5, 5.0, 7.5, 10.0], dtype=float) num=5: array([0.0, 2.0, 4.0, 6.0, 8.0], dtype=float) num=5: array([0, 0, 1, 2, 2, 3, 4], dtype=uint8) ###Markdown min, argmin, max, argmaxnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.min.htmlnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.htmlnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.max.htmlnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html**WARNING:** Difference to `numpy`: the `out` keyword argument is not implemented.These functions follow the same pattern, and work with generic iterables, and `ndarray`s. `min`, and `max` return the minimum or maximum of a sequence. If the input array is two-dimensional, the `axis` keyword argument can be supplied, in which case the minimum/maximum along the given axis will be returned. If `axis=None` (this is also the default value), the minimum/maximum of the flattened array will be determined.`argmin/argmax` return the position (index) of the minimum/maximum in the sequence. ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 0, 1, 10]) print('a:', a) print('min of a:', np.min(a)) print('argmin of a:', np.argmin(a)) b = np.array([[1, 2, 0], [1, 10, -1]]) print('\nb:\n', b) print('min of b (flattened):', np.min(b)) print('min of b (axis=0):', np.min(b, axis=0)) print('min of b (axis=1):', np.min(b, axis=1)) ###Output a: array([1.0, 2.0, 0.0, 1.0, 10.0], dtype=float) min of a: 0.0 argmin of a: 2 b: array([[1.0, 2.0, 0.0], [1.0, 10.0, -1.0]], dtype=float) min of b (flattened): -1.0 min of b (axis=0): array([1.0, 2.0, -1.0], dtype=float) min of b (axis=1): array([0.0, -1.0], dtype=float) ###Markdown sum, std, meannumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.sum.htmlnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.std.htmlnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.htmlThese three functions follow the same pattern: if the axis keyword is not specified, it assumes the default value of `None`, and returns the result of the computation for the flattened array. Otherwise, the calculation is along the given axis. ###Code %%micropython -pyboard 1 import ulab as np a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) print('a: \n', a) print('sum, flat array: ', np.sum(a)) print('mean, horizontal: ', np.mean(a, axis=1)) print('std, vertical: ', np.std(a, axis=0)) ###Output a: array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype=float) sum, flat array: 45.0 mean, horizontal: array([2.0, 5.0, 8.0], dtype=float) std, vertical: array([2.44949, 2.44949, 2.44949], dtype=float) ###Markdown rollnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.roll.htmlThe roll function shifts the content of a vector by the positions given as the second argument. If the `axis` keyword is supplied, the shift is applied to the given axis. ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4, 5, 6, 7, 8]) print("a:\t\t\t", a) np.roll(a, 2) print("a rolled to the left:\t", a) # this should be the original vector np.roll(a, -2) print("a rolled to the right:\t", a) ###Output a: array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) a rolled to the left: array([3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 1.0, 2.0], dtype=float) a rolled to the right: array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) ###Markdown Rolling works with matrices, too. If the `axis` keyword is 0, the matrix is rolled along its vertical axis, otherwise, horizontally. Horizontal rolls are faster, because they require fewer steps, and larger memory chunks are copied, however, they also require more RAM: basically the whole row must be stored internally. Most expensive are the `None` keyword values, because with `axis = None`, the array is flattened first, hence the row's length is the size of the whole matrix.Vertical rolls require two internal copies of single columns. ###Code %%micropython -unix 1 import ulab as np a = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) print("a:\n", a) np.roll(a, 2) print("\na rolled to the left:\n", a) np.roll(a, -1, axis=1) print("\na rolled up:\n", a) np.roll(a, 1, axis=None) print("\na rolled with None:\n", a) ###Output a: array([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]], dtype=float) a rolled to the left: array([[3.0, 4.0, 5.0, 6.0], [7.0, 8.0, 1.0, 2.0]], dtype=float) a rolled up: array([[6.0, 3.0, 4.0, 5.0], [2.0, 7.0, 8.0, 1.0]], dtype=float) a rolled with None: array([[3.0, 4.0, 5.0, 2.0], [7.0, 8.0, 1.0, 6.0]], dtype=float) ###Markdown Simple running weighted averageAs a demonstration of the conciseness of `ulab/numpy` operations, we will calculate an exponentially weighted running average of a measurement vector in just a couple of lines. I chose this particular example, because I think that this can indeed be used in real-life applications. ###Code %%micropython -unix 1 import ulab as np def dummy_adc(): # dummy adc function, so that the results are reproducible return 2 n = 10 # These are the normalised weights; the last entry is the most dominant weight = np.exp([1, 2, 3, 4, 5]) weight = weight/np.sum(weight) print(weight) # initial array of samples samples = np.array([0]*n) for i in range(n): # a new datum is inserted on the right hand side. This simply overwrites whatever was in the last slot samples[-1] = dummy_adc() print(np.mean(samples[-5:]*weight)) print(samples[-5:]) # the data are shifted by one position to the left np.roll(samples, 1) ###Output array([0.01165623031556606, 0.03168492019176483, 0.08612854033708572, 0.234121635556221, 0.6364086270332336], dtype=float) 0.2545634508132935 array([0.0, 0.0, 0.0, 0.0, 2.0], dtype=float) 0.3482121050357819 array([0.0, 0.0, 0.0, 2.0, 2.0], dtype=float) 0.3826635211706161 array([0.0, 0.0, 2.0, 2.0, 2.0], dtype=float) 0.3953374892473221 array([0.0, 2.0, 2.0, 2.0, 2.0], dtype=float) 0.3999999813735485 array([2.0, 2.0, 2.0, 2.0, 2.0], dtype=float) 0.3999999813735485 array([2.0, 2.0, 2.0, 2.0, 2.0], dtype=float) 0.3999999813735485 array([2.0, 2.0, 2.0, 2.0, 2.0], dtype=float) 0.3999999813735485 array([2.0, 2.0, 2.0, 2.0, 2.0], dtype=float) 0.3999999813735485 array([2.0, 2.0, 2.0, 2.0, 2.0], dtype=float) 0.3999999813735485 array([2.0, 2.0, 2.0, 2.0, 2.0], dtype=float) ###Markdown flipnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.htmlThe `flip` function takes one positional, an `ndarray`, and one keyword argument, `axis = None`, and reverses the order of elements along the given axis. If the keyword argument is `None`, the matrix' entries are flipped along all axes. `flip` returns a new copy of the array. ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4, 5]) print("a: \t", a) print("a flipped:\t", np.flip(a)) a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.uint8) print("\na flipped horizontally\n", np.flip(a, axis=1)) print("\na flipped vertically\n", np.flip(a, axis=0)) print("\na flipped horizontally+vertically\n", np.flip(a)) ###Output a: array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=float) a flipped: array([5.0, 4.0, 3.0, 2.0, 1.0], dtype=float) a flipped horizontally array([[3, 2, 1], [6, 5, 4], [9, 8, 7]], dtype=uint8) a flipped vertically array([[7, 8, 9], [4, 5, 6], [1, 2, 3]], dtype=uint8) a flipped horizontally+vertically array([[9, 8, 7], [6, 5, 4], [3, 2, 1]], dtype=uint8) ###Markdown diffnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.diff.htmlThe `diff` function returns the numerical derivative of the forward scheme, or more accurately, the differences of an `ndarray` along a given axis. The order of derivative can be stipulated with the `n` keyword argument, which should be between 0, and 9. Default is 1. If higher order derivatives are required, they can be gotten by repeated calls to the function. The `axis` keyword argument should be -1 (last axis, in `ulab` equivalent to the second axis, and this also happens to be the default value), 0, or 1. Beyond the output array, the function requires only a couple of bytes of extra RAM for the differentiation stencil. (The stencil is an `int8` array, one byte longer than `n`. This also explains, why the highest order is 9: the coefficients of a ninth-order stencil all fit in signed bytes, while 10 would require `int16`.) Note that as usual in numerical differentiation (and also in `numpy`), the length of the respective axis will be reduced by `n` after the operation. If `n` is larger than, or equal to the length of the axis, an empty array will be returned.**WARNING**: the `diff` function does not implement the `prepend` and `append` keywords that can be found in `numpy`. ###Code %%micropython -unix 1 import ulab as np a = np.array(range(9), dtype=np.uint8) print('a:\n', a) print('\nfirst derivative:\n', np.diff(a, n=1)) print('\nsecond derivative:\n', np.diff(a, n=2)) c = np.array([[1, 2, 3, 4], [4, 3, 2, 1], [1, 4, 9, 16], [0, 0, 0, 0]]) print('\nc:\n', c) print('\nfirst derivative, first axis:\n', np.diff(c, axis=0)) print('\nfirst derivative, second axis:\n', np.diff(c, axis=1)) ###Output a: array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=uint8) first derivative: array([1, 1, 1, 1, 1, 1, 1, 1], dtype=uint8) second derivative: array([0, 0, 0, 0, 0, 0, 0], dtype=uint8) c: array([[1.0, 2.0, 3.0, 4.0], [4.0, 3.0, 2.0, 1.0], [1.0, 4.0, 9.0, 16.0], [0.0, 0.0, 0.0, 0.0]], dtype=float) first derivative, first axis: array([[3.0, 1.0, -1.0, -3.0], [-3.0, 1.0, 7.0, 15.0], [-1.0, -4.0, -9.0, -16.0]], dtype=float) first derivative, second axis: array([[1.0, 1.0, 1.0], [-1.0, -1.0, -1.0], [3.0, 5.0, 7.0], [0.0, 0.0, 0.0]], dtype=float) ###Markdown sortnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.sort.htmlThe sort function takes an ndarray, and sorts its elements in ascending order along the specified axis using a heap sort algorithm. As opposed to the `.sort()` method discussed earlier, this function creates a copy of its input before sorting, and at the end, returns this copy. Sorting takes place in place, without auxiliary storage. The `axis` keyword argument takes on the possible values of -1 (the last axis, in `ulab` equivalent to the second axis, and this also happens to be the default value), 0, 1, or `None`. The first three cases are identical to those in [diff](diff), while the last one flattens the array before sorting. If descending order is required, the result can simply be `flip`ped, see [flip](flip).**WARNING:** `numpy` defines the `kind`, and `order` keyword arguments that are not implemented here. The function in `ulab` always uses heap sort, and since `ulab` does not have the concept of data fields, the `order` keyword argument would have no meaning. ###Code %%micropython -unix 1 import ulab as np a = np.array([[1, 12, 3, 0], [5, 3, 4, 1], [9, 11, 1, 8], [7, 10, 0, 1]], dtype=np.float) print('\na:\n', a) b = np.sort(a, axis=0) print('\na sorted along vertical axis:\n', b) c = np.sort(a, axis=1) print('\na sorted along horizontal axis:\n', c) c = np.sort(a, axis=None) print('\nflattened a sorted:\n', c) ###Output a: array([[1.0, 12.0, 3.0, 0.0], [5.0, 3.0, 4.0, 1.0], [9.0, 11.0, 1.0, 8.0], [7.0, 10.0, 0.0, 1.0]], dtype=float) a sorted along vertical axis: array([[1.0, 3.0, 0.0, 0.0], [5.0, 10.0, 1.0, 1.0], [7.0, 11.0, 3.0, 1.0], [9.0, 12.0, 4.0, 8.0]], dtype=float) a sorted along horizontal axis: array([[0.0, 1.0, 3.0, 12.0], [1.0, 3.0, 4.0, 5.0], [1.0, 8.0, 9.0, 11.0], [0.0, 1.0, 7.0, 10.0]], dtype=float) flattened a sorted: array([0.0, 0.0, 1.0, ..., 10.0, 11.0, 12.0], dtype=float) ###Markdown Heap sort requires $\sim N\log N$ operations, and notably, the worst case costs only 20% more time than the average. In order to get an order-of-magnitude estimate, we will take the sine of 1000 uniformly spaced numbers between 0, and two pi, and sort them: ###Code %%micropython -pyboard 1 import ulab as np @timeit def sort_time(array): return np.sort(array) b = np.sin(np.linspace(0, 6.28, num=1000)) print('b: ', b) sort_time(b) print('\nb sorted:\n', b) ###Output _____no_output_____ ###Markdown argsortnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.argsort.htmlSimilarly to [sort](sort), `argsort` takes a positional, and a keyword argument, and returns an unsigned short index array of type `ndarray` with the same dimensions as the input, or, if `axis=None`, as a row vector with length equal to the number of elements in the input (i.e., the flattened array). The indices in the output sort the input in ascending order. The routine in `argsort` is the same as in `sort`, therefore, the comments on computational expenses (time and RAM) also apply. In particular, since no copy of the original data is required, virtually no RAM beyond the output array is used. Since the underlying container of the output array is of type `uint16_t`, neither of the output dimensions should be larger than 65535. ###Code %%micropython -unix 1 import ulab as np a = np.array([[1, 12, 3, 0], [5, 3, 4, 1], [9, 11, 1, 8], [7, 10, 0, 1]], dtype=np.float) print('\na:\n', a) b = np.argsort(a, axis=0) print('\na sorted along vertical axis:\n', b) c = np.argsort(a, axis=1) print('\na sorted along horizontal axis:\n', c) c = np.argsort(a, axis=None) print('\nflattened a sorted:\n', c) ###Output a: array([[1.0, 12.0, 3.0, 0.0], [5.0, 3.0, 4.0, 1.0], [9.0, 11.0, 1.0, 8.0], [7.0, 10.0, 0.0, 1.0]], dtype=float) a sorted along vertical axis: array([[0, 1, 3, 0], [1, 3, 2, 1], [3, 2, 0, 3], [2, 0, 1, 2]], dtype=uint16) a sorted along horizontal axis: array([[3, 0, 2, 1], [3, 1, 2, 0], [2, 3, 0, 1], [2, 3, 0, 1]], dtype=uint16) flattened a sorted: array([3, 14, 0, ..., 13, 9, 1], dtype=uint16) ###Markdown Since during the sorting, only the indices are shuffled, `argsort` does not modify the input array, as one can verify this by the following example: ###Code %%micropython -unix 1 import ulab as np a = np.array([0, 5, 1, 3, 2, 4], dtype=np.uint8) print('\na:\n', a) b = np.argsort(a, axis=1) print('\nsorting indices:\n', b) print('\nthe original array:\n', a) ###Output a: array([0, 5, 1, 3, 2, 4], dtype=uint8) sorting indices: array([0, 2, 4, 3, 5, 1], dtype=uint16) the original array: array([0, 5, 1, 3, 2, 4], dtype=uint8) ###Markdown Linalg size`size` takes a single argument, the axis, whose size is to be returned. Depending on the value of the argument, the following information will be returned:1. argument is 0: the number of elements of the array2. argument is 1: the number of rows3. argument is 2: the number of columns ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4], dtype=np.int8) print("a:\n", a) print("size of a:", np.size(a, axis=None), ",", np.size(a, axis=0)) b= np.array([[1, 2], [3, 4]], dtype=np.int8) print("\nb:\n", b) print("size of b:", np.size(b, axis=None), ",", np.size(b, axis=0), ",", np.size(b, axis=1)) ###Output a: array([1, 2, 3, 4], dtype=int8) size of a: 4 , 4 b: array([[1, 2], [3, 4]], dtype=int8) size of b: 4 , 2 , 2 ###Markdown ones, zerosnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.zeros.htmlnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ones.htmlA couple of special arrays and matrices can easily be initialised by calling one of the `ones`, or `zeros` functions. `ones` and `zeros` follow the same pattern, and have the call signature```pythonones(shape, dtype=float)zeros(shape, dtype=float)```where shape is either an integer, or a 2-tuple. ###Code %%micropython -unix 1 import ulab as np print(np.ones(6, dtype=np.uint8)) print(np.zeros((6, 4))) ###Output array([1, 1, 1, 1, 1, 1], dtype=uint8) array([[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], dtype=float) ###Markdown eyenumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.eye.htmlAnother special array method is the `eye` function, whose call signature is ```pythoneye(N, M, k=0, dtype=float)```where `N` (`M`) specify the dimensions of the matrix (if only `N` is supplied, then we get a square matrix, otherwise one with `M` rows, and `N` columns), and `k` is the shift of the ones (the main diagonal corresponds to `k=0`). Here are a couple of examples. With a single argument ###Code %%micropython -unix 1 import ulab as np print(np.eye(5)) ###Output array([[1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0]], dtype=float) ###Markdown Specifying the dimensions of the matrix ###Code %%micropython -unix 1 import ulab as np print(np.eye(4, M=6, dtype=np.int8)) ###Output array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=int8) ###Markdown Shifting the diagonal ###Code %%micropython -unix 1 import ulab as np print(np.eye(4, M=6, k=-1, dtype=np.int16)) ###Output array([[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0]], dtype=int16) ###Markdown invA square matrix, provided that it is not singular, can be inverted by calling the `inv` function that takes a single argument. The inversion is based on successive elimination of elements in the lower left triangle, and raises a `ValueError` exception, if the matrix turns out to be singular (i.e., one of the diagonal entries is zero). ###Code %%micropython -pyboard 1 import ulab as np m = np.array([[1, 2, 3, 4], [4, 5, 6, 4], [7, 8.6, 9, 4], [3, 4, 5, 6]]) print(np.inv(m)) ###Output array([[-2.166666, 1.499999, -0.8333326, 1.0], [1.666666, -3.333331, 1.666666, -4.768516e-08], [0.1666672, 2.166666, -0.8333327, -1.0], [-0.1666666, -0.3333334, 4.96705e-08, 0.5]], dtype=float) ###Markdown Computation expensesNote that the cost of inverting a matrix is approximately twice as many floats (RAM), as the number of entries in the original matrix, and approximately as many operations, as the number of entries. Here are a couple of numbers: ###Code %%micropython -pyboard 1 import ulab as np @timeit def invert_matrix(m): return np.inv(m) m = np.array([[1, 2,], [4, 5]]) print('2 by 2 matrix:') invert_matrix(m) m = np.array([[1, 2, 3, 4], [4, 5, 6, 4], [7, 8.6, 9, 4], [3, 4, 5, 6]]) print('\n4 by 4 matrix:') invert_matrix(m) m = np.array([[1, 2, 3, 4, 5, 6, 7, 8], [0, 5, 6, 4, 5, 6, 4, 5], [0, 0, 9, 7, 8, 9, 7, 8], [0, 0, 0, 10, 11, 12, 11, 12], [0, 0, 0, 0, 4, 6, 7, 8], [0, 0, 0, 0, 0, 5, 6, 7], [0, 0, 0, 0, 0, 0, 7, 6], [0, 0, 0, 0, 0, 0, 0, 2]]) print('\n8 by 8 matrix:') invert_matrix(m) ###Output 2 by 2 matrix: execution time: 65 us 4 by 4 matrix: execution time: 105 us 8 by 8 matrix: execution time: 299 us ###Markdown The above-mentioned scaling is not obeyed strictly. The reason for the discrepancy is that the function call is still the same for all three cases: the input must be inspected, the output array must be created, and so on. dotnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html**WARNING:** numpy applies upcasting rules for the multiplication of matrices, while `ulab` simply returns a float matrix. Once you can invert a matrix, you might want to know, whether the inversion is correct. You can simply take the original matrix and its inverse, and multiply them by calling the `dot` function, which takes the two matrices as its arguments. If the matrix dimensions do not match, the function raises a `ValueError`. The result of the multiplication is expected to be the unit matrix, which is demonstrated below. ###Code %%micropython -pyboard 1 import ulab as np m = np.array([[1, 2, 3], [4, 5, 6], [7, 10, 9]], dtype=np.uint8) n = np.inv(m) print("m:\n", m) print("\nm^-1:\n", n) # this should be the unit matrix print("\nm*m^-1:\n", np.dot(m, n)) ###Output m: array([[1, 2, 3], [4, 5, 6], [7, 10, 9]], dtype=uint8) m^-1: array([[-1.25, 1.0, -0.25], [0.5, -1.0, 0.5], [0.4166667, 0.3333334, -0.25]], dtype=float) m*m^-1: array([[1.0, 2.384186e-07, -1.490116e-07], [-2.980232e-07, 1.000001, -4.172325e-07], [-3.278255e-07, 1.311302e-06, 0.9999992]], dtype=float) ###Markdown Note that for matrix multiplication you don't necessarily need square matrices, it is enough, if their dimensions are compatible (i.e., the the left-hand-side matrix has as many columns, as does the right-hand-side matrix rows): ###Code %%micropython -unix 1 import ulab as np m = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=np.uint8) n = np.array([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=np.uint8) print(m) print(n) print(np.dot(m, n)) ###Output array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=uint8) array([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=uint8) array([[7.0, 10.0], [23.0, 34.0]], dtype=float) ###Markdown detnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.det.htmlThe `det` function takes a square matrix as its single argument, and calculates the determinant. The calculation is based on successive elimination of the matrix elements, and the return value is a float, even if the input array was of integer type. ###Code %%micropython -pyboard 1 import ulab as np a = np.array([[1, 2], [3, 4]], dtype=np.uint8) print(np.det(a)) ###Output -2.0 ###Markdown BenchmarkSince the routine for calculating the determinant is pretty much the same as for finding the [inverse of a matrix](inv), the execution times are similar: ###Code %%micropython -pyboard 1 @timeit def matrix_det(m): return np.inv(m) m = np.array([[1, 2, 3, 4, 5, 6, 7, 8], [0, 5, 6, 4, 5, 6, 4, 5], [0, 0, 9, 7, 8, 9, 7, 8], [0, 0, 0, 10, 11, 12, 11, 12], [0, 0, 0, 0, 4, 6, 7, 8], [0, 0, 0, 0, 0, 5, 6, 7], [0, 0, 0, 0, 0, 0, 7, 6], [0, 0, 0, 0, 0, 0, 0, 2]]) matrix_det(m) ###Output execution time: 294 us ###Markdown eignumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eig.htmlThe `eig` function calculates the eigenvalues and the eigenvectors of a real, symmetric square matrix. If the matrix is not symmetric, a `ValueError` will be raised. The function takes a single argument, and returns a tuple with the eigenvalues, and eigenvectors. With the help of the eigenvectors, amongst other things, you can implement sophisticated stabilisation routines for robots. ###Code %%micropython -pyboard 1 import ulab as np a = np.array([[1, 2, 1, 4], [2, 5, 3, 5], [1, 3, 6, 1], [4, 5, 1, 7]], dtype=np.uint8) x, y = np.eig(a) print('eigenvectors of a:\n', x) print('\neigenvalues of a:\n', y) ###Output eigenvectors of a: array([-1.165288, 0.8029362, 5.585626, 13.77673], dtype=float) eigenvalues of a: array([[0.8151754, -0.4499267, -0.1643907, 0.3256237], [0.2211193, 0.7847154, 0.08373602, 0.5729892], [-0.1340859, -0.3100657, 0.8742685, 0.3486182], [-0.5182822, -0.2926556, -0.4490192, 0.6664218]], dtype=float) ###Markdown The same matrix diagonalised with `numpy` yields: ###Code a = array([[1, 2, 1, 4], [2, 5, 3, 5], [1, 3, 6, 1], [4, 5, 1, 7]], dtype=np.uint8) x, y = eig(a) print('eigenvectors of a:\n', x) print('\neigenvalues of a:\n', y) ###Output eigenvectors of a: [13.77672606 -1.16528837 0.80293655 5.58562576] eigenvalues of a: [[ 0.32561419 0.815156 0.44994112 -0.16446602] [ 0.57300777 0.22113342 -0.78469926 0.08372081] [ 0.34861093 -0.13401142 0.31007764 0.87427868] [ 0.66641421 -0.51832581 0.29266348 -0.44897499]] ###Markdown When comparing results, we should keep two things in mind: 1. the eigenvalues and eigenvectors are not necessarily sorted in the same way2. an eigenvector can be multiplied by an arbitrary non-zero scalar, and it is still an eigenvector with the same eigenvalue. This is why all signs of the eigenvector belonging to 5.58, and 0.80 are flipped in `ulab` with respect to `numpy`. This difference, however, is of absolutely no consequence. Computation expensesSince the function is based on [Givens rotations](https://en.wikipedia.org/wiki/Givens_rotation) and runs till convergence is achieved, or till the maximum number of allowed rotations is exhausted, there is no universal estimate for the time required to find the eigenvalues. However, an order of magnitude can, at least, be guessed based on the measurement below: ###Code %%micropython -pyboard 1 import ulab as np @timeit def matrix_eig(a): return np.eig(a) a = np.array([[1, 2, 1, 4], [2, 5, 3, 5], [1, 3, 6, 1], [4, 5, 1, 7]], dtype=np.uint8) matrix_eig(a) ###Output execution time: 111 us ###Markdown Polynomials polyvalnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.polyval.htmlpolyval takes two arguments, both arrays or other iterables. ###Code %%micropython -unix 1 import ulab as np p = [1, 1, 1, 0] x = [0, 1, 2, 3, 4] print('coefficients: ', p) print('independent values: ', x) print('\nvalues of p(x): ', np.polyval(p, x)) # the same works with one-dimensional ndarrays a = np.array(x) print('\nndarray (a): ', a) print('value of p(a): ', np.polyval(p, a)) ###Output coefficients: [1, 1, 1, 0] independent values: [0, 1, 2, 3, 4] values of p(x): array([0.0, 3.0, 14.0, 39.0, 84.0], dtype=float) ndarray (a): array([0.0, 1.0, 2.0, 3.0, 4.0], dtype=float) value of p(a): array([0.0, 3.0, 14.0, 39.0, 84.0], dtype=float) ###Markdown polyfitnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.polyfit.htmlpolyfit takes two, or three arguments. The last one is the degree of the polynomial that will be fitted, the last but one is an array or iterable with the `y` (dependent) values, and the first one, an array or iterable with the `x` (independent) values, can be dropped. If that is the case, `x` will be generated in the function, assuming uniform sampling. If the length of `x`, and `y` are not the same, the function raises a `ValueError`. ###Code %%micropython -unix 1 import ulab as np x = np.array([0, 1, 2, 3, 4, 5, 6]) y = np.array([9, 4, 1, 0, 1, 4, 9]) print('independent values:\t', x) print('dependent values:\t', y) print('fitted values:\t\t', np.polyfit(x, y, 2)) # the same with missing x print('\ndependent values:\t', y) print('fitted values:\t\t', np.polyfit(y, 2)) ###Output independent values: array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=float) dependent values: array([9.0, 4.0, 1.0, 0.0, 1.0, 4.0, 9.0], dtype=float) fitted values: array([1.0, -6.0, 9.000000000000004], dtype=float) dependent values: array([9.0, 4.0, 1.0, 0.0, 1.0, 4.0, 9.0], dtype=float) fitted values: array([1.0, -6.0, 9.000000000000004], dtype=float) ###Markdown Execution time`polyfit` is based on the inversion of a matrix (there is more on the background in https://en.wikipedia.org/wiki/Polynomial_regression), and it requires the intermediate storage of `2*N*(deg+1)` floats, where `N` is the number of entries in the input array, and `deg` is the fit's degree. The additional computation costs of the matrix inversion discussed in [inv](inv) also apply. The example from above needs around 150 microseconds to return: ###Code %%micropython -pyboard 1 import ulab as np @timeit def time_polyfit(x, y, n): return np.polyfit(x, y, n) x = np.array([0, 1, 2, 3, 4, 5, 6]) y = np.array([9, 4, 1, 0, 1, 4, 9]) time_polyfit(x, y, 2) ###Output execution time: 153 us ###Markdown Fourier transformsnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.ifft.html fftSince `ulab`'s `ndarray` does not support complex numbers, the invocation of the Fourier transform differs from that in `numpy`. In `numpy`, you can simply pass an array or iterable to the function, and it will be treated as a complex array: ###Code fft.fft([1, 2, 3, 4, 1, 2, 3, 4]) ###Output _____no_output_____ ###Markdown **WARNING:** The array that is returned is also complex, i.e., the real and imaginary components are cast together. In `ulab`, the real and imaginary parts are treated separately: you have to pass two `ndarray`s to the function, although, the second argument is optional, in which case the imaginary part is assumed to be zero.**WARNING:** The function, as opposed to `numpy`, returns a 2-tuple, whose elements are two `ndarray`s, holding the real and imaginary parts of the transform separately. ###Code %%micropython -pyboard 1 import ulab as np from ulab import numerical from ulab import vector from ulab import fft from ulab import linalg x = numerical.linspace(0, 10, num=1024) y = vector.sin(x) z = linalg.zeros(len(x)) a, b = fft.fft(x) print('real part:\t', a) print('\nimaginary part:\t', b) c, d = fft.fft(x, z) print('\nreal part:\t', c) print('\nimaginary part:\t', d) ###Output real part: array([5119.996, -5.004663, -5.004798, ..., -5.005482, -5.005643, -5.006577], dtype=float) imaginary part: array([0.0, 1631.333, 815.659, ..., -543.764, -815.6588, -1631.333], dtype=float) real part: array([5119.996, -5.004663, -5.004798, ..., -5.005482, -5.005643, -5.006577], dtype=float) imaginary part: array([0.0, 1631.333, 815.659, ..., -543.764, -815.6588, -1631.333], dtype=float) ###Markdown ifftThe above-mentioned rules apply to the inverse Fourier transform. The inverse is also normalised by `N`, the number of elements, as is customary in `numpy`. With the normalisation, we can ascertain that the inverse of the transform is equal to the original array. ###Code %%micropython -pyboard 1 import ulab as np x = np.linspace(0, 10, num=1024) y = np.sin(x) a, b = np.fft(y) print('original vector:\t', y) y, z = np.ifft(a, b) # the real part should be equal to y print('\nreal part of inverse:\t', y) # the imaginary part should be equal to zero print('\nimaginary part of inverse:\t', z) ###Output original vector: array([0.0, 0.009775016, 0.0195491, ..., -0.5275068, -0.5357859, -0.5440139], dtype=float) real part of inverse: array([-2.980232e-08, 0.0097754, 0.0195494, ..., -0.5275064, -0.5357857, -0.5440133], dtype=float) imaginary part of inverse: array([-2.980232e-08, -1.451171e-07, 3.693752e-08, ..., 6.44871e-08, 9.34986e-08, 2.18336e-07], dtype=float) ###Markdown Note that unlike in `numpy`, the length of the array on which the Fourier transform is carried out must be a power of 2. If this is not the case, the function raises a `ValueError` exception. spectrumIn addition to the Fourier transform and its inverse, `ulab` also sports a function called `spectrum`, which returns the absolute value of the Fourier transform. This could be used to find the dominant spectral component in a time series. The arguments are treated in the same way as in `fft`, and `ifft`. ###Code %%micropython -pyboard 1 import ulab as np x = np.linspace(0, 10, num=1024) y = np.sin(x) a = np.spectrum(y) print('original vector:\t', y) print('\nspectrum:\t', a) ###Output original vector: array([0.0, 0.009775016, 0.0195491, ..., -0.5275068, -0.5357859, -0.5440139], dtype=float) spectrum: array([187.8641, 315.3125, 347.8804, ..., 84.4587, 347.8803, 315.3124], dtype=float) ###Markdown As such, `spectrum` is really just a shorthand for `np.sqrt(a*a + b*b)`: ###Code %%micropython -pyboard 1 import ulab as np x = np.linspace(0, 10, num=1024) y = np.sin(x) a, b = np.fft(y) print('\nspectrum calculated the hard way:\t', np.sqrt(a*a + b*b)) a = np.spectrum(y) print('\nspectrum calculated the lazy way:\t', a) ###Output spectrum calculated the hard way: array([187.8641, 315.3125, 347.8804, ..., 84.4587, 347.8803, 315.3124], dtype=float) spectrum calculated the lazy way: array([187.8641, 315.3125, 347.8804, ..., 84.4587, 347.8803, 315.3124], dtype=float) ###Markdown Computation and storage costs RAMThe FFT routine of `ulab` calculates the transform in place. This means that beyond reserving space for the two `ndarray`s that will be returned (the computation uses these two as intermediate storage space), only a handful of temporary variables, all floats or 32-bit integers, are required. Speed of FFTsA comment on the speed: a 1024-point transform implemented in python would cost around 90 ms, and 13 ms in assembly, if the code runs on the pyboard, v.1.1. You can gain a factor of four by moving to the D series https://github.com/peterhinch/micropython-fourier/blob/master/README.md8-performance. ###Code %%micropython -pyboard 1 import ulab as np x = np.linspace(0, 10, num=1024) y = np.sin(x) np.fft(y) @timeit def np_fft(y): return np.fft(y) a, b = np_fft(y) ###Output execution time: 1985 us ###Markdown The C implementation runs in less than 2 ms on the pyboard (we have just measured that), and has been reported to run in under 0.8 ms on the D series board. That is an improvement of at least a factor of four. Calculating FFTs of real signalsNow, if you have real signals, and you are really pressed for time, you can still gain a bit on speed without sacrificing anything at all. If you take the FFT of a real-valued signal, the real part of the transform will be symmetric, while the imaginary part will be anti-symmetric in frequency. If, on the other hand, the signal is imaginary-valued, then the real part of the transform will be anti-symmetric, and the imaginary part will be symmetric in frequency. These two statements follow from the definition of the Fourier transform. By combining the two observations above, if you place the first signal, $y_1(t)$, into the real part, and the second signal, $y_2(t)$, into the imaginary part of your input vector, i.e., $y(t) = y_1(t) + iy_2(t)$, and take the Fourier transform of the combined signal, then the Fourier transforms of the two components can be recovered as \begin{eqnarray}Y_1(k) &=& \frac{1}{2}\left(Y(k) + Y^*(N-k)\right)\\Y_2(k) &=& -\frac{i}{2}\left(Y(k) - Y^*(N-k)\right)\end{eqnarray}where $N$ is the length of $y_1$, and $Y_1, Y_2$, and $Y$, respectively, are the Fourier transforms of $y_1, y_2$, and $y = y_1 + iy_2$. Filter routines numpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.convolve.html convolveReturns the discrete, linear convolution of two one-dimensional sequences.Only the ``full`` mode is supported, and the ``mode`` named parameter is not accepted. Note that all other modes can be had by slicing a ``full`` result. ###Code %%micropython -unix 1 import ulab as np x = np.array((1,2,3)) y = np.array((1,10,100,1000)) print(np.convolve(x, y)) ###Output array([1.0, 12.0, 123.0, 1230.0, 2300.0, 3000.0], dtype=float) ###Markdown **WARNING:** If a binary operation involves an `ndarray` and a micropython type (integer, or float), then the array must be on the left hand side. ###Code %%micropython -unix 1 import ulab as np # this is going to work a = np.array([1, 2, 3, 4], dtype=np.uint8) b = 12 print("a:\t", a) print("b:\t", b) print("a+b:\t", a+b) # but this will spectacularly fail print("b+a:\t", b+a) ###Output a: array([1, 2, 3, 4], dtype=uint8) b: 12 a+b: array([13, 14, 15, 16], dtype=uint8) Traceback (most recent call last): File "/dev/shm/micropython.py", line 12, in <module> TypeError: unsupported types for __add__: 'int', 'ndarray' ###Markdown The reason for this lies in how micropython resolves binary operators, and this means that a fix can only be implemented, if micropython itself changes the corresponding function(s). Till then, keep `ndarray`s on the left hand side. BenchmarksThe following snippet compares the performance of binary operations to a possible implementation in python. For the time measurement, we will take the following snippet from the micropython manual: ###Code %%micropython -pyboard 1 def timeit(f, *args, **kwargs): func_name = str(f).split(' ')[1] def new_func(*args, **kwargs): t = utime.ticks_us() result = f(*args, **kwargs) print('execution time: ', utime.ticks_diff(utime.ticks_us(), t), ' us') return result return new_func %%micropython -pyboard 1 import ulab as np @timeit def py_add(a, b): return [a[i]+b[i] for i in range(1000)] @timeit def py_multiply(a, b): return [a[i]*b[i] for i in range(1000)] @timeit def ulab_add(a, b): return a + b @timeit def ulab_multiply(a, b): return a * b a = [0.0]*1000 b = range(1000) print('python add:') py_add(a, b) print('\npython multiply:') py_multiply(a, b) a = np.linspace(0, 10, num=1000) b = np.ones(1000) print('\nulab add:') ulab_add(a, b) print('\nulab multiply:') ulab_multiply(a, b) ###Output python add: execution time: 10051 us python multiply: execution time: 14175 us ulab add: execution time: 222 us ulab multiply: execution time: 213 us ###Markdown I do not claim that the python implementation above is perfect, and certainly, there is much room for improvement. However, the factor of 50 difference in execution time is very spectacular. This is nothing but a consequence of the fact that the `ulab` functions run `C` code, with very little python overhead. The factor of 50 appears to be quite universal: the FFT routine obeys similar scaling (see [Speed of FFTs](Speed-of-FFTs)), and this number came up with font rendering, too: [fast font rendering on graphical displays](https://forum.micropython.org/viewtopic.php?f=15&t=5815&p=33362&hilit=ufontp33383). Comparison operatorsThe smaller than, greater than, smaller or equal, and greater or equal operators return a vector of Booleans indicating the positions (`True`), where the condition is satisfied. ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.uint8) print(a < 5) ###Output [True, True, True, True, False, False, False, False] ###Markdown **WARNING:** Note that `numpy` returns an array of Booleans. For most use cases this fact should not make a difference. ###Code a = array([1, 2, 3, 4, 5, 6, 7, 8]) a < 5 ###Output _____no_output_____ ###Markdown These operators work with matrices, too, in which case a list of lists of Booleans will be returned: ###Code %%micropython -unix 1 import ulab as np a = np.array([range(0, 5, 1), range(1, 6, 1), range(2, 7, 1)], dtype=np.uint8) print(a) print(a < 5) ###Output array([[0, 1, 2, 3, 4], [1, 2, 3, 4, 5], [2, 3, 4, 5, 6]], dtype=uint8) [[True, True, True, True, True], [True, True, True, True, False], [True, True, True, False, False]] ###Markdown Iterating over arrays`ndarray`s are iterable, which means that their elements can also be accessed as can the elements of a list, tuple, etc. If the array is one-dimensional, the iterator returns scalars, otherwise a new one-dimensional `ndarray`, which is simply a copy of the corresponding row of the matrix, i.e, its data type will be inherited. ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4, 5], dtype=np.uint8) b = np.array([range(5), range(10, 15, 1), range(20, 25, 1), range(30, 35, 1)], dtype=np.uint8) print("a:\t", a) for i, _a in enumerate(a): print("element %d in a:"%i, _a) print("\nb:\t", b) for i, _b in enumerate(b): print("element %d in b:"%i, _b) ###Output a: array([1, 2, 3, 4, 5], dtype=uint8) element 0 in a: 1 element 1 in a: 2 element 2 in a: 3 element 3 in a: 4 element 4 in a: 5 b: array([[0, 1, 2, 3, 4], [10, 11, 12, 13, 14], [20, 21, 22, 23, 24], [30, 31, 32, 33, 34]], dtype=uint8) element 0 in b: array([0, 1, 2, 3, 4], dtype=uint8) element 1 in b: array([10, 11, 12, 13, 14], dtype=uint8) element 2 in b: array([20, 21, 22, 23, 24], dtype=uint8) element 3 in b: array([30, 31, 32, 33, 34], dtype=uint8) ###Markdown Slicing and indexingCopies of sub-arrays can be created by indexing, and slicing. IndexingThe simplest form of indexing is specifying a single integer between the square brackets as in ###Code %%micropython -unix 1 import ulab as np a = np.array(range(10), dtype=np.uint8) print("a:\t\t\t\t\t\t", a) print("the first, and first from right element of a:\t", a[0], a[-1]) print("the second, and second from right element of a:\t", a[1], a[-2]) ###Output a: array([0, 1, 2, ..., 7, 8, 9], dtype=uint8) the first, and first from right element of a: 0 9 the second, and second from right element of a: 1 8 ###Markdown Indices are (not necessarily non-negative) integers, or a list of Booleans. By using a Boolean list, we can select those elements of an array that satisfy a specific condition. At the moment, such indexing is defined for row vectors only, for matrices the function raises a `ValueError` exception, though this will be rectified in a future version of `ulab`. ###Code %%micropython -unix 1 import ulab as np a = np.array(range(9), dtype=np.float) print("a:\t", a) print("a < 5:\t", a[a < 5]) ###Output a: array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) a < 5: array([0.0, 1.0, 2.0, 3.0, 4.0], dtype=float) ###Markdown Indexing with Boolean arrays can take more complicated expressions. This is a very concise way of comparing two vectors, e.g.: ###Code %%micropython -pyboard 1 import ulab as np a = np.array(range(9), dtype=np.uint8) b = np.array([4, 4, 4, 3, 3, 3, 13, 13, 13], dtype=np.uint8) print("a:\t", a) print("\na**2:\t", a*a) print("\nb:\t", b) print("\n100*sin(b):\t", np.sin(b)*100.0) print("\na[a*a > np.sin(b)*100.0]:\t", a[a*a > np.sin(b)*100.0]) ###Output a: array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=uint8) a**2: array([0, 1, 4, 9, 16, 25, 36, 49, 64], dtype=uint8) b: array([4, 4, 4, 3, 3, 3, 13, 13, 13], dtype=uint8) 100*sin(b): array([-75.68025, -75.68025, -75.68025, 14.112, 14.112, 14.112, 42.01671, 42.01671, 42.01671], dtype=float) a[a*a > np.sin(b)*100.0]: array([0, 1, 2, 4, 5, 7, 8], dtype=uint8) ###Markdown Slicing and assigning to slicesYou can also generate sub-arrays by specifying slices as the index of an array. Slices are special python objects of the form ```pythonslice = start:end:stop```where `start`, `end`, and `stop` are (not necessarily non-negative) integers. Not all of these three numbers must be specified in an index, in fact, all three of them can be missing. The interpreter takes care of filling in the missing values. (Note that slices cannot be defined in this way, only there, where an index is expected.) For a good explanation on how slices work in python, you can read the stackoverflow question https://stackoverflow.com/questions/509211/understanding-slice-notation.Slices work on both axes: ###Code %%micropython -unix 1 import ulab as np a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.uint8) print('a:\n', a) # the first row print('\na[0]:\n', a[0]) # the first two elements of the first row print('\na[0,:2]:\n', a[0,:2]) # the zeroth element in each row (also known as the zeroth column) print('\na[:,0]:\n', a[:,0]) # the last but one row print('\na[-1]:\n', a[-1]) # the last two rows backwards print('\na[::1]:\n', a[::-1]) ###Output a: array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=uint8) a[0]: array([1, 2, 3], dtype=uint8) a[0,:2]: array([1, 2], dtype=uint8) a[:,0]: array([1, 4, 7], dtype=uint8) a[-1]: array([7, 8, 9], dtype=uint8) a[::1]: array([[7, 8, 9], [4, 5, 6]], dtype=uint8) ###Markdown Assignment to slices can be done for the whole slice, per row, and per column. A couple of examples should make these statements clearer: ###Code %%micropython -unix 1 import ulab as np zero_list = [0, 0, 0] a = np.array([zero_list, zero_list, zero_list], dtype=np.uint8) print('a:\n', a) # assigning to the whole row a[0] = 1 print('\na[0] = 1\n', a) # assigning to the whole row a[0] = np.array([1, 2, -333], dtype=np.float) print('\na[0] = np.array([1, 2, 3])\n', a) # assigning to a column a[:,2] = 3.0 print('\na[:,0]:\n', a) ###Output a: array([[0, 0, 0], [0, 0, 0], [0, 0, 0]], dtype=uint8) a[0] = 1 array([[1, 1, 1], [0, 0, 0], [0, 0, 0]], dtype=uint8) a[0] = np.array([1, 2, 3]) array([[1, 2, 179], [0, 0, 0], [0, 0, 0]], dtype=uint8) a[:,0]: array([[1, 2, 3], [0, 0, 3], [0, 0, 3]], dtype=uint8) ###Markdown Universal functionsStandard mathematical functions can be calculated on any scalar-valued iterable (ranges, lists, tuples containing numbers), and on `ndarray`s without having to change the call signature. In all cases the functions return a new `ndarray` of typecode `float` (since these functions usually generate float values, anyway). The functions execute faster with `ndarray` arguments than with iterables, because the values of the input vector can be extracted faster. At present, the following functions are supported:`acos`, `acosh`, `asin`, `asinh`, `atan`, `atanh`, `ceil`, `cos`, `erf`, `erfc`, `exp`, `expm1`, `floor`, `tgamma`, `lgamma`, `log`, `log10`, `log2`, `sin`, `sinh`, `sqrt`, `tan`, `tanh`.These functions are applied element-wise to the arguments, thus, e.g., the exponential of a matrix cannot be calculated in this way. ###Code %%micropython -pyboard 1 import ulab as np a = range(9) b = np.array(a) # works with ranges, lists, tuples etc. print('a:\t', a) print('exp(a):\t', np.exp(a)) # with 1D arrays print('\nb:\t', b) print('exp(b):\t', np.exp(b)) # as well as with matrices c = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) print('\nc:\t', c) print('exp(c):\t', np.exp(c)) ###Output a: range(0, 9) exp(a): array([1.0, 2.718282, 7.389056, 20.08554, 54.59816, 148.4132, 403.4288, 1096.633, 2980.958], dtype=float) b: array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) exp(b): array([1.0, 2.718282, 7.389056, 20.08554, 54.59816, 148.4132, 403.4288, 1096.633, 2980.958], dtype=float) c: array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype=float) exp(c): array([[2.718282, 7.389056, 20.08554], [54.59816, 148.4132, 403.4288], [1096.633, 2980.958, 8103.084]], dtype=float) ###Markdown Computation expensesThe overhead for calculating with micropython iterables is quite significant: for the 1000 samples below, the difference is more than 800 microseconds, because internally the function has to create the `ndarray` for the output, has to fetch the iterable's items of unknown type, and then convert them to floats. All these steps are skipped for `ndarray`s, because these pieces of information are already known. ###Code %%micropython -pyboard 1 import ulab as np a = [0]*1000 b = np.array(a) @timeit def measure_run_time(x): return np.exp(x) measure_run_time(a) measure_run_time(b) ###Output execution time: 1259 us execution time: 408 us ###Markdown Of course, such a time saving is reasonable only, if the data are already available as an `ndarray`. If one has to initialise the `ndarray` from the list, then there is no gain, because the iterator was simply pushed into the initialisation function. Numerical linspacenumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.htmlThis function returns an array, whose elements are uniformly spaced between the `start`, and `stop` points. The number of intervals is determined by the `num` keyword argument, whose default value is 50. With the `endpoint` keyword argument (defaults to `True`) one can include `stop` in the sequence. In addition, the `dtype` keyword can be supplied to force type conversion of the output. The default is `float`. Note that, when `dtype` is of integer type, the sequence is not necessarily evenly spaced. This is not an error, rather a consequence of rounding. (This is also the `numpy` behaviour.) ###Code %%micropython -unix 1 import ulab as np # generate a sequence with defaults print('default sequence:\t', np.linspace(0, 10)) # num=5 print('num=5:\t\t\t', np.linspace(0, 10, num=5)) # num=5, endpoint=False print('num=5:\t\t\t', np.linspace(0, 10, num=5, endpoint=False)) # num=5, endpoint=False, dtype=uint8 print('num=5:\t\t\t', np.linspace(0, 5, num=7, endpoint=False, dtype=np.uint8)) ###Output default sequence: array([0.0, 0.2040816396474838, 0.4081632792949677, ..., 9.591833114624023, 9.795914649963379, 9.999996185302734], dtype=float) num=5: array([0.0, 2.5, 5.0, 7.5, 10.0], dtype=float) num=5: array([0.0, 2.0, 4.0, 6.0, 8.0], dtype=float) num=5: array([0, 0, 1, 2, 2, 3, 4], dtype=uint8) ###Markdown min, argmin, max, argmaxnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.min.htmlnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.htmlnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.max.htmlnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html**WARNING:** Difference to `numpy`: the `out` keyword argument is not implemented.These functions follow the same pattern, and work with generic iterables, and `ndarray`s. `min`, and `max` return the minimum or maximum of a sequence. If the input array is two-dimensional, the `axis` keyword argument can be supplied, in which case the minimum/maximum along the given axis will be returned. If `axis=None` (this is also the default value), the minimum/maximum of the flattened array will be determined.`argmin/argmax` return the position (index) of the minimum/maximum in the sequence. ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 0, 1, 10]) print('a:', a) print('min of a:', np.min(a)) print('argmin of a:', np.argmin(a)) b = np.array([[1, 2, 0], [1, 10, -1]]) print('\nb:\n', b) print('min of b (flattened):', np.min(b)) print('min of b (axis=0):', np.min(b, axis=0)) print('min of b (axis=1):', np.min(b, axis=1)) ###Output a: array([1.0, 2.0, 0.0, 1.0, 10.0], dtype=float) min of a: 0.0 argmin of a: 2 b: array([[1.0, 2.0, 0.0], [1.0, 10.0, -1.0]], dtype=float) min of b (flattened): -1.0 min of b (axis=0): array([1.0, 2.0, -1.0], dtype=float) min of b (axis=1): array([0.0, -1.0], dtype=float) ###Markdown sum, std, meannumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.sum.htmlnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.std.htmlnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.htmlThese three functions follow the same pattern: if the axis keyword is not specified, it assumes the default value of `None`, and returns the result of the computation for the flattened array. Otherwise, the calculation is along the given axis. ###Code %%micropython -pyboard 1 import ulab as np a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) print('a: \n', a) print('sum, flat array: ', np.sum(a)) print('mean, horizontal: ', np.mean(a, axis=1)) print('std, vertical: ', np.std(a, axis=0)) ###Output a: array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype=float) sum, flat array: 45.0 mean, horizontal: array([2.0, 5.0, 8.0], dtype=float) std, vertical: array([2.44949, 2.44949, 2.44949], dtype=float) ###Markdown rollnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.roll.htmlThe roll function shifts the content of a vector by the positions given as the second argument. If the `axis` keyword is supplied, the shift is applied to the given axis. ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4, 5, 6, 7, 8]) print("a:\t\t\t", a) np.roll(a, 2) print("a rolled to the left:\t", a) # this should be the original vector np.roll(a, -2) print("a rolled to the right:\t", a) ###Output a: array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) a rolled to the left: array([3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 1.0, 2.0], dtype=float) a rolled to the right: array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) ###Markdown Rolling works with matrices, too. If the `axis` keyword is 0, the matrix is rolled along its vertical axis, otherwise, horizontally. Horizontal rolls are faster, because they require fewer steps, and larger memory chunks are copied, however, they also require more RAM: basically the whole row must be stored internally. Most expensive are the `None` keyword values, because with `axis = None`, the array is flattened first, hence the row's length is the size of the whole matrix.Vertical rolls require two internal copies of single columns. ###Code %%micropython -unix 1 import ulab as np a = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) print("a:\n", a) np.roll(a, 2) print("\na rolled to the left:\n", a) np.roll(a, -1, axis=1) print("\na rolled up:\n", a) np.roll(a, 1, axis=None) print("\na rolled with None:\n", a) ###Output a: array([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]], dtype=float) a rolled to the left: array([[3.0, 4.0, 5.0, 6.0], [7.0, 8.0, 1.0, 2.0]], dtype=float) a rolled up: array([[6.0, 3.0, 4.0, 5.0], [2.0, 7.0, 8.0, 1.0]], dtype=float) a rolled with None: array([[3.0, 4.0, 5.0, 2.0], [7.0, 8.0, 1.0, 6.0]], dtype=float) ###Markdown Simple running weighted averageAs a demonstration of the conciseness of `ulab/numpy` operations, we will calculate an exponentially weighted running average of a measurement vector in just a couple of lines. I chose this particular example, because I think that this can indeed be used in real-life applications. ###Code %%micropython -unix 1 import ulab as np def dummy_adc(): # dummy adc function, so that the results are reproducible return 2 n = 10 # These are the normalised weights; the last entry is the most dominant weight = np.exp([1, 2, 3, 4, 5]) weight = weight/np.sum(weight) print(weight) # initial array of samples samples = np.array([0]*n) for i in range(n): # a new datum is inserted on the right hand side. This simply overwrites whatever was in the last slot samples[-1] = dummy_adc() print(np.mean(samples[-5:]*weight)) print(samples[-5:]) # the data are shifted by one position to the left np.roll(samples, 1) ###Output array([0.01165623031556606, 0.03168492019176483, 0.08612854033708572, 0.234121635556221, 0.6364086270332336], dtype=float) 0.2545634508132935 array([0.0, 0.0, 0.0, 0.0, 2.0], dtype=float) 0.3482121050357819 array([0.0, 0.0, 0.0, 2.0, 2.0], dtype=float) 0.3826635211706161 array([0.0, 0.0, 2.0, 2.0, 2.0], dtype=float) 0.3953374892473221 array([0.0, 2.0, 2.0, 2.0, 2.0], dtype=float) 0.3999999813735485 array([2.0, 2.0, 2.0, 2.0, 2.0], dtype=float) 0.3999999813735485 array([2.0, 2.0, 2.0, 2.0, 2.0], dtype=float) 0.3999999813735485 array([2.0, 2.0, 2.0, 2.0, 2.0], dtype=float) 0.3999999813735485 array([2.0, 2.0, 2.0, 2.0, 2.0], dtype=float) 0.3999999813735485 array([2.0, 2.0, 2.0, 2.0, 2.0], dtype=float) 0.3999999813735485 array([2.0, 2.0, 2.0, 2.0, 2.0], dtype=float) ###Markdown flipnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.htmlThe `flip` function takes one positional, an `ndarray`, and one keyword argument, `axis = None`, and reverses the order of elements along the given axis. If the keyword argument is `None`, the matrix' entries are flipped along all axes. `flip` returns a new copy of the array. ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4, 5]) print("a: \t", a) print("a flipped:\t", np.flip(a)) a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.uint8) print("\na flipped horizontally\n", np.flip(a, axis=1)) print("\na flipped vertically\n", np.flip(a, axis=0)) print("\na flipped horizontally+vertically\n", np.flip(a)) ###Output a: array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=float) a flipped: array([5.0, 4.0, 3.0, 2.0, 1.0], dtype=float) a flipped horizontally array([[3, 2, 1], [6, 5, 4], [9, 8, 7]], dtype=uint8) a flipped vertically array([[7, 8, 9], [4, 5, 6], [1, 2, 3]], dtype=uint8) a flipped horizontally+vertically array([[9, 8, 7], [6, 5, 4], [3, 2, 1]], dtype=uint8) ###Markdown diffnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.diff.htmlThe `diff` function returns the numerical derivative of the forward scheme, or more accurately, the differences of an `ndarray` along a given axis. The order of derivative can be stipulated with the `n` keyword argument, which should be between 0, and 9. Default is 1. If higher order derivatives are required, they can be gotten by repeated calls to the function. The `axis` keyword argument should be -1 (last axis, in `ulab` equivalent to the second axis, and this also happens to be the default value), 0, or 1. Beyond the output array, the function requires only a couple of bytes of extra RAM for the differentiation stencil. (The stencil is an `int8` array, one byte longer than `n`. This also explains, why the highest order is 9: the coefficients of a ninth-order stencil all fit in signed bytes, while 10 would require `int16`.) Note that as usual in numerical differentiation (and also in `numpy`), the length of the respective axis will be reduced by `n` after the operation. If `n` is larger than, or equal to the length of the axis, an empty array will be returned.**WARNING**: the `diff` function does not implement the `prepend` and `append` keywords that can be found in `numpy`. ###Code %%micropython -unix 1 import ulab as np a = np.array(range(9), dtype=np.uint8) print('a:\n', a) print('\nfirst derivative:\n', np.diff(a, n=1)) print('\nsecond derivative:\n', np.diff(a, n=2)) c = np.array([[1, 2, 3, 4], [4, 3, 2, 1], [1, 4, 9, 16], [0, 0, 0, 0]]) print('\nc:\n', c) print('\nfirst derivative, first axis:\n', np.diff(c, axis=0)) print('\nfirst derivative, second axis:\n', np.diff(c, axis=1)) ###Output a: array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=uint8) first derivative: array([1, 1, 1, 1, 1, 1, 1, 1], dtype=uint8) second derivative: array([0, 0, 0, 0, 0, 0, 0], dtype=uint8) c: array([[1.0, 2.0, 3.0, 4.0], [4.0, 3.0, 2.0, 1.0], [1.0, 4.0, 9.0, 16.0], [0.0, 0.0, 0.0, 0.0]], dtype=float) first derivative, first axis: array([[3.0, 1.0, -1.0, -3.0], [-3.0, 1.0, 7.0, 15.0], [-1.0, -4.0, -9.0, -16.0]], dtype=float) first derivative, second axis: array([[1.0, 1.0, 1.0], [-1.0, -1.0, -1.0], [3.0, 5.0, 7.0], [0.0, 0.0, 0.0]], dtype=float) ###Markdown sortnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.sort.htmlThe sort function takes an ndarray, and sorts its elements in ascending order along the specified axis using a heap sort algorithm. As opposed to the `.sort()` method discussed earlier, this function creates a copy of its input before sorting, and at the end, returns this copy. Sorting takes place in place, without auxiliary storage. The `axis` keyword argument takes on the possible values of -1 (the last axis, in `ulab` equivalent to the second axis, and this also happens to be the default value), 0, 1, or `None`. The first three cases are identical to those in [diff](diff), while the last one flattens the array before sorting. If descending order is required, the result can simply be `flip`ped, see [flip](flip).**WARNING:** `numpy` defines the `kind`, and `order` keyword arguments that are not implemented here. The function in `ulab` always uses heap sort, and since `ulab` does not have the concept of data fields, the `order` keyword argument would have no meaning. ###Code %%micropython -unix 1 import ulab as np a = np.array([[1, 12, 3, 0], [5, 3, 4, 1], [9, 11, 1, 8], [7, 10, 0, 1]], dtype=np.float) print('\na:\n', a) b = np.sort(a, axis=0) print('\na sorted along vertical axis:\n', b) c = np.sort(a, axis=1) print('\na sorted along horizontal axis:\n', c) c = np.sort(a, axis=None) print('\nflattened a sorted:\n', c) ###Output a: array([[1.0, 12.0, 3.0, 0.0], [5.0, 3.0, 4.0, 1.0], [9.0, 11.0, 1.0, 8.0], [7.0, 10.0, 0.0, 1.0]], dtype=float) a sorted along vertical axis: array([[1.0, 3.0, 0.0, 0.0], [5.0, 10.0, 1.0, 1.0], [7.0, 11.0, 3.0, 1.0], [9.0, 12.0, 4.0, 8.0]], dtype=float) a sorted along horizontal axis: array([[0.0, 1.0, 3.0, 12.0], [1.0, 3.0, 4.0, 5.0], [1.0, 8.0, 9.0, 11.0], [0.0, 1.0, 7.0, 10.0]], dtype=float) flattened a sorted: array([0.0, 0.0, 1.0, ..., 10.0, 11.0, 12.0], dtype=float) ###Markdown Heap sort requires $\sim N\log N$ operations, and notably, the worst case costs only 20% more time than the average. In order to get an order-of-magnitude estimate, we will take the sine of 1000 uniformly spaced numbers between 0, and two pi, and sort them: ###Code %%micropython -pyboard 1 import ulab as np @timeit def sort_time(array): return np.sort(array) b = np.sin(np.linspace(0, 6.28, num=1000)) print('b: ', b) sort_time(b) print('\nb sorted:\n', b) ###Output _____no_output_____ ###Markdown argsortnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.argsort.htmlSimilarly to [sort](sort), `argsort` takes a positional, and a keyword argument, and returns an unsigned short index array of type `ndarray` with the same dimensions as the input, or, if `axis=None`, as a row vector with length equal to the number of elements in the input (i.e., the flattened array). The indices in the output sort the input in ascending order. The routine in `argsort` is the same as in `sort`, therefore, the comments on computational expenses (time and RAM) also apply. In particular, since no copy of the original data is required, virtually no RAM beyond the output array is used. Since the underlying container of the output array is of type `uint16_t`, neither of the output dimensions should be larger than 65535. ###Code %%micropython -unix 1 import ulab as np a = np.array([[1, 12, 3, 0], [5, 3, 4, 1], [9, 11, 1, 8], [7, 10, 0, 1]], dtype=np.float) print('\na:\n', a) b = np.argsort(a, axis=0) print('\na sorted along vertical axis:\n', b) c = np.argsort(a, axis=1) print('\na sorted along horizontal axis:\n', c) c = np.argsort(a, axis=None) print('\nflattened a sorted:\n', c) ###Output a: array([[1.0, 12.0, 3.0, 0.0], [5.0, 3.0, 4.0, 1.0], [9.0, 11.0, 1.0, 8.0], [7.0, 10.0, 0.0, 1.0]], dtype=float) a sorted along vertical axis: array([[0, 1, 3, 0], [1, 3, 2, 1], [3, 2, 0, 3], [2, 0, 1, 2]], dtype=uint16) a sorted along horizontal axis: array([[3, 0, 2, 1], [3, 1, 2, 0], [2, 3, 0, 1], [2, 3, 0, 1]], dtype=uint16) flattened a sorted: array([3, 14, 0, ..., 13, 9, 1], dtype=uint16) ###Markdown Since during the sorting, only the indices are shuffled, `argsort` does not modify the input array, as one can verify this by the following example: ###Code %%micropython -unix 1 import ulab as np a = np.array([0, 5, 1, 3, 2, 4], dtype=np.uint8) print('\na:\n', a) b = np.argsort(a, axis=1) print('\nsorting indices:\n', b) print('\nthe original array:\n', a) ###Output a: array([0, 5, 1, 3, 2, 4], dtype=uint8) sorting indices: array([0, 2, 4, 3, 5, 1], dtype=uint16) the original array: array([0, 5, 1, 3, 2, 4], dtype=uint8) ###Markdown Linalg size`size` takes a single argument, the axis, whose size is to be returned. Depending on the value of the argument, the following information will be returned:1. argument is 0: the number of elements of the array2. argument is 1: the number of rows3. argument is 2: the number of columns ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4], dtype=np.int8) print("a:\n", a) print("size of a:", np.size(a, axis=None), ",", np.size(a, axis=0)) b= np.array([[1, 2], [3, 4]], dtype=np.int8) print("\nb:\n", b) print("size of b:", np.size(b, axis=None), ",", np.size(b, axis=0), ",", np.size(b, axis=1)) ###Output a: array([1, 2, 3, 4], dtype=int8) size of a: 4 , 4 b: array([[1, 2], [3, 4]], dtype=int8) size of b: 4 , 2 , 2 ###Markdown ones, zerosnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.zeros.htmlnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ones.htmlA couple of special arrays and matrices can easily be initialised by calling one of the `ones`, or `zeros` functions. `ones` and `zeros` follow the same pattern, and have the call signature```pythonones(shape, dtype=float)zeros(shape, dtype=float)```where shape is either an integer, or a 2-tuple. ###Code %%micropython -unix 1 import ulab as np print(np.ones(6, dtype=np.uint8)) print(np.zeros((6, 4))) ###Output array([1, 1, 1, 1, 1, 1], dtype=uint8) array([[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], dtype=float) ###Markdown eyenumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.eye.htmlAnother special array method is the `eye` function, whose call signature is ```pythoneye(N, M, k=0, dtype=float)```where `N` (`M`) specify the dimensions of the matrix (if only `N` is supplied, then we get a square matrix, otherwise one with `M` rows, and `N` columns), and `k` is the shift of the ones (the main diagonal corresponds to `k=0`). Here are a couple of examples. With a single argument ###Code %%micropython -unix 1 import ulab as np print(np.eye(5)) ###Output array([[1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0]], dtype=float) ###Markdown Specifying the dimensions of the matrix ###Code %%micropython -unix 1 import ulab as np print(np.eye(4, M=6, dtype=np.int8)) ###Output array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=int8) ###Markdown Shifting the diagonal ###Code %%micropython -unix 1 import ulab as np print(np.eye(4, M=6, k=-1, dtype=np.int16)) ###Output array([[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0]], dtype=int16) ###Markdown invA square matrix, provided that it is not singular, can be inverted by calling the `inv` function that takes a single argument. The inversion is based on successive elimination of elements in the lower left triangle, and raises a `ValueError` exception, if the matrix turns out to be singular (i.e., one of the diagonal entries is zero). ###Code %%micropython -pyboard 1 import ulab as np m = np.array([[1, 2, 3, 4], [4, 5, 6, 4], [7, 8.6, 9, 4], [3, 4, 5, 6]]) print(np.inv(m)) ###Output array([[-2.166666, 1.499999, -0.8333326, 1.0], [1.666666, -3.333331, 1.666666, -4.768516e-08], [0.1666672, 2.166666, -0.8333327, -1.0], [-0.1666666, -0.3333334, 4.96705e-08, 0.5]], dtype=float) ###Markdown Computation expensesNote that the cost of inverting a matrix is approximately twice as many floats (RAM), as the number of entries in the original matrix, and approximately as many operations, as the number of entries. Here are a couple of numbers: ###Code %%micropython -pyboard 1 import ulab as np @timeit def invert_matrix(m): return np.inv(m) m = np.array([[1, 2,], [4, 5]]) print('2 by 2 matrix:') invert_matrix(m) m = np.array([[1, 2, 3, 4], [4, 5, 6, 4], [7, 8.6, 9, 4], [3, 4, 5, 6]]) print('\n4 by 4 matrix:') invert_matrix(m) m = np.array([[1, 2, 3, 4, 5, 6, 7, 8], [0, 5, 6, 4, 5, 6, 4, 5], [0, 0, 9, 7, 8, 9, 7, 8], [0, 0, 0, 10, 11, 12, 11, 12], [0, 0, 0, 0, 4, 6, 7, 8], [0, 0, 0, 0, 0, 5, 6, 7], [0, 0, 0, 0, 0, 0, 7, 6], [0, 0, 0, 0, 0, 0, 0, 2]]) print('\n8 by 8 matrix:') invert_matrix(m) ###Output 2 by 2 matrix: execution time: 65 us 4 by 4 matrix: execution time: 105 us 8 by 8 matrix: execution time: 299 us ###Markdown The above-mentioned scaling is not obeyed strictly. The reason for the discrepancy is that the function call is still the same for all three cases: the input must be inspected, the output array must be created, and so on. dotnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html**WARNING:** numpy applies upcasting rules for the multiplication of matrices, while `ulab` simply returns a float matrix. Once you can invert a matrix, you might want to know, whether the inversion is correct. You can simply take the original matrix and its inverse, and multiply them by calling the `dot` function, which takes the two matrices as its arguments. If the matrix dimensions do not match, the function raises a `ValueError`. The result of the multiplication is expected to be the unit matrix, which is demonstrated below. ###Code %%micropython -pyboard 1 import ulab as np m = np.array([[1, 2, 3], [4, 5, 6], [7, 10, 9]], dtype=np.uint8) n = np.inv(m) print("m:\n", m) print("\nm^-1:\n", n) # this should be the unit matrix print("\nm*m^-1:\n", np.dot(m, n)) ###Output m: array([[1, 2, 3], [4, 5, 6], [7, 10, 9]], dtype=uint8) m^-1: array([[-1.25, 1.0, -0.25], [0.5, -1.0, 0.5], [0.4166667, 0.3333334, -0.25]], dtype=float) m*m^-1: array([[1.0, 2.384186e-07, -1.490116e-07], [-2.980232e-07, 1.000001, -4.172325e-07], [-3.278255e-07, 1.311302e-06, 0.9999992]], dtype=float) ###Markdown Note that for matrix multiplication you don't necessarily need square matrices, it is enough, if their dimensions are compatible (i.e., the the left-hand-side matrix has as many columns, as does the right-hand-side matrix rows): ###Code %%micropython -unix 1 import ulab as np m = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=np.uint8) n = np.array([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=np.uint8) print(m) print(n) print(np.dot(m, n)) ###Output array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=uint8) array([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=uint8) array([[7.0, 10.0], [23.0, 34.0]], dtype=float) ###Markdown detnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.det.htmlThe `det` function takes a square matrix as its single argument, and calculates the determinant. The calculation is based on successive elimination of the matrix elements, and the return value is a float, even if the input array was of integer type. ###Code %%micropython -pyboard 1 import ulab as np a = np.array([[1, 2], [3, 4]], dtype=np.uint8) print(np.det(a)) ###Output -2.0 ###Markdown BenchmarkSince the routine for calculating the determinant is pretty much the same as for finding the [inverse of a matrix](inv), the execution times are similar: ###Code %%micropython -pyboard 1 @timeit def matrix_det(m): return np.inv(m) m = np.array([[1, 2, 3, 4, 5, 6, 7, 8], [0, 5, 6, 4, 5, 6, 4, 5], [0, 0, 9, 7, 8, 9, 7, 8], [0, 0, 0, 10, 11, 12, 11, 12], [0, 0, 0, 0, 4, 6, 7, 8], [0, 0, 0, 0, 0, 5, 6, 7], [0, 0, 0, 0, 0, 0, 7, 6], [0, 0, 0, 0, 0, 0, 0, 2]]) matrix_det(m) ###Output execution time: 294 us ###Markdown eignumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eig.htmlThe `eig` function calculates the eigenvalues and the eigenvectors of a real, symmetric square matrix. If the matrix is not symmetric, a `ValueError` will be raised. The function takes a single argument, and returns a tuple with the eigenvalues, and eigenvectors. With the help of the eigenvectors, amongst other things, you can implement sophisticated stabilisation routines for robots. ###Code %%micropython -pyboard 1 import ulab as np a = np.array([[1, 2, 1, 4], [2, 5, 3, 5], [1, 3, 6, 1], [4, 5, 1, 7]], dtype=np.uint8) x, y = np.eig(a) print('eigenvectors of a:\n', x) print('\neigenvalues of a:\n', y) ###Output eigenvectors of a: array([-1.165288, 0.8029362, 5.585626, 13.77673], dtype=float) eigenvalues of a: array([[0.8151754, -0.4499267, -0.1643907, 0.3256237], [0.2211193, 0.7847154, 0.08373602, 0.5729892], [-0.1340859, -0.3100657, 0.8742685, 0.3486182], [-0.5182822, -0.2926556, -0.4490192, 0.6664218]], dtype=float) ###Markdown The same matrix diagonalised with `numpy` yields: ###Code a = array([[1, 2, 1, 4], [2, 5, 3, 5], [1, 3, 6, 1], [4, 5, 1, 7]], dtype=np.uint8) x, y = eig(a) print('eigenvectors of a:\n', x) print('\neigenvalues of a:\n', y) ###Output eigenvectors of a: [13.77672606 -1.16528837 0.80293655 5.58562576] eigenvalues of a: [[ 0.32561419 0.815156 0.44994112 -0.16446602] [ 0.57300777 0.22113342 -0.78469926 0.08372081] [ 0.34861093 -0.13401142 0.31007764 0.87427868] [ 0.66641421 -0.51832581 0.29266348 -0.44897499]] ###Markdown When comparing results, we should keep two things in mind: 1. the eigenvalues and eigenvectors are not necessarily sorted in the same way2. an eigenvector can be multiplied by an arbitrary non-zero scalar, and it is still an eigenvector with the same eigenvalue. This is why all signs of the eigenvector belonging to 5.58, and 0.80 are flipped in `ulab` with respect to `numpy`. This difference, however, is of absolutely no consequence. Computation expensesSince the function is based on [Givens rotations](https://en.wikipedia.org/wiki/Givens_rotation) and runs till convergence is achieved, or till the maximum number of allowed rotations is exhausted, there is no universal estimate for the time required to find the eigenvalues. However, an order of magnitude can, at least, be guessed based on the measurement below: ###Code %%micropython -pyboard 1 import ulab as np @timeit def matrix_eig(a): return np.eig(a) a = np.array([[1, 2, 1, 4], [2, 5, 3, 5], [1, 3, 6, 1], [4, 5, 1, 7]], dtype=np.uint8) matrix_eig(a) ###Output execution time: 111 us ###Markdown Polynomials polyvalnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.polyval.htmlpolyval takes two arguments, both arrays or other iterables. ###Code %%micropython -unix 1 import ulab as np p = [1, 1, 1, 0] x = [0, 1, 2, 3, 4] print('coefficients: ', p) print('independent values: ', x) print('\nvalues of p(x): ', np.polyval(p, x)) # the same works with one-dimensional ndarrays a = np.array(x) print('\nndarray (a): ', a) print('value of p(a): ', np.polyval(p, a)) ###Output coefficients: [1, 1, 1, 0] independent values: [0, 1, 2, 3, 4] values of p(x): array([0.0, 3.0, 14.0, 39.0, 84.0], dtype=float) ndarray (a): array([0.0, 1.0, 2.0, 3.0, 4.0], dtype=float) value of p(a): array([0.0, 3.0, 14.0, 39.0, 84.0], dtype=float) ###Markdown polyfitnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.polyfit.htmlpolyfit takes two, or three arguments. The last one is the degree of the polynomial that will be fitted, the last but one is an array or iterable with the `y` (dependent) values, and the first one, an array or iterable with the `x` (independent) values, can be dropped. If that is the case, `x` will be generated in the function, assuming uniform sampling. If the length of `x`, and `y` are not the same, the function raises a `ValueError`. ###Code %%micropython -unix 1 import ulab as np x = np.array([0, 1, 2, 3, 4, 5, 6]) y = np.array([9, 4, 1, 0, 1, 4, 9]) print('independent values:\t', x) print('dependent values:\t', y) print('fitted values:\t\t', np.polyfit(x, y, 2)) # the same with missing x print('\ndependent values:\t', y) print('fitted values:\t\t', np.polyfit(y, 2)) ###Output independent values: array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=float) dependent values: array([9.0, 4.0, 1.0, 0.0, 1.0, 4.0, 9.0], dtype=float) fitted values: array([1.0, -6.0, 9.000000000000004], dtype=float) dependent values: array([9.0, 4.0, 1.0, 0.0, 1.0, 4.0, 9.0], dtype=float) fitted values: array([1.0, -6.0, 9.000000000000004], dtype=float) ###Markdown Execution time`polyfit` is based on the inversion of a matrix (there is more on the background in https://en.wikipedia.org/wiki/Polynomial_regression), and it requires the intermediate storage of `2*N*(deg+1)` floats, where `N` is the number of entries in the input array, and `deg` is the fit's degree. The additional computation costs of the matrix inversion discussed in [inv](inv) also apply. The example from above needs around 150 microseconds to return: ###Code %%micropython -pyboard 1 import ulab as np @timeit def time_polyfit(x, y, n): return np.polyfit(x, y, n) x = np.array([0, 1, 2, 3, 4, 5, 6]) y = np.array([9, 4, 1, 0, 1, 4, 9]) time_polyfit(x, y, 2) ###Output execution time: 153 us ###Markdown Fourier transformsnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.ifft.html fftSince `ulab`'s `ndarray` does not support complex numbers, the invocation of the Fourier transform differs from that in `numpy`. In `numpy`, you can simply pass an array or iterable to the function, and it will be treated as a complex array: ###Code fft.fft([1, 2, 3, 4, 1, 2, 3, 4]) ###Output _____no_output_____ ###Markdown **WARNING:** The array that is returned is also complex, i.e., the real and imaginary components are cast together. In `ulab`, the real and imaginary parts are treated separately: you have to pass two `ndarray`s to the function, although, the second argument is optional, in which case the imaginary part is assumed to be zero.**WARNING:** The function, as opposed to `numpy`, returns a 2-tuple, whose elements are two `ndarray`s, holding the real and imaginary parts of the transform separately. ###Code %%micropython -pyboard 1 import ulab as np x = np.linspace(0, 10, num=1024) y = np.sin(x) z = np.zeros(len(x)) a, b = np.fft(x) print('real part:\t', a) print('\nimaginary part:\t', b) c, d = np.fft(x, z) print('\nreal part:\t', c) print('\nimaginary part:\t', d) ###Output real part: array([5119.996, -5.004663, -5.004798, ..., -5.005482, -5.005643, -5.006577], dtype=float) imaginary part: array([0.0, 1631.333, 815.659, ..., -543.764, -815.6588, -1631.333], dtype=float) real part: array([5119.996, -5.004663, -5.004798, ..., -5.005482, -5.005643, -5.006577], dtype=float) imaginary part: array([0.0, 1631.333, 815.659, ..., -543.764, -815.6588, -1631.333], dtype=float) ###Markdown ifftThe above-mentioned rules apply to the inverse Fourier transform. The inverse is also normalised by `N`, the number of elements, as is customary in `numpy`. With the normalisation, we can ascertain that the inverse of the transform is equal to the original array. ###Code %%micropython -pyboard 1 import ulab as np x = np.linspace(0, 10, num=1024) y = np.sin(x) a, b = np.fft(y) print('original vector:\t', y) y, z = np.ifft(a, b) # the real part should be equal to y print('\nreal part of inverse:\t', y) # the imaginary part should be equal to zero print('\nimaginary part of inverse:\t', z) ###Output original vector: array([0.0, 0.009775016, 0.0195491, ..., -0.5275068, -0.5357859, -0.5440139], dtype=float) real part of inverse: array([-2.980232e-08, 0.0097754, 0.0195494, ..., -0.5275064, -0.5357857, -0.5440133], dtype=float) imaginary part of inverse: array([-2.980232e-08, -1.451171e-07, 3.693752e-08, ..., 6.44871e-08, 9.34986e-08, 2.18336e-07], dtype=float) ###Markdown Note that unlike in `numpy`, the length of the array on which the Fourier transform is carried out must be a power of 2. If this is not the case, the function raises a `ValueError` exception. spectrumIn addition to the Fourier transform and its inverse, `ulab` also sports a function called `spectrum`, which returns the absolute value of the Fourier transform. This could be used to find the dominant spectral component in a time series. The arguments are treated in the same way as in `fft`, and `ifft`. ###Code %%micropython -pyboard 1 import ulab as np x = np.linspace(0, 10, num=1024) y = np.sin(x) a = np.spectrum(y) print('original vector:\t', y) print('\nspectrum:\t', a) ###Output original vector: array([0.0, 0.009775016, 0.0195491, ..., -0.5275068, -0.5357859, -0.5440139], dtype=float) spectrum: array([187.8641, 315.3125, 347.8804, ..., 84.4587, 347.8803, 315.3124], dtype=float) ###Markdown As such, `spectrum` is really just a shorthand for `np.sqrt(a*a + b*b)`: ###Code %%micropython -pyboard 1 import ulab as np x = np.linspace(0, 10, num=1024) y = np.sin(x) a, b = np.fft(y) print('\nspectrum calculated the hard way:\t', np.sqrt(a*a + b*b)) a = np.spectrum(y) print('\nspectrum calculated the lazy way:\t', a) ###Output spectrum calculated the hard way: array([187.8641, 315.3125, 347.8804, ..., 84.4587, 347.8803, 315.3124], dtype=float) spectrum calculated the lazy way: array([187.8641, 315.3125, 347.8804, ..., 84.4587, 347.8803, 315.3124], dtype=float) ###Markdown Computation and storage costs RAMThe FFT routine of `ulab` calculates the transform in place. This means that beyond reserving space for the two `ndarray`s that will be returned (the computation uses these two as intermediate storage space), only a handful of temporary variables, all floats or 32-bit integers, are required. Speed of FFTsA comment on the speed: a 1024-point transform implemented in python would cost around 90 ms, and 13 ms in assembly, if the code runs on the pyboard, v.1.1. You can gain a factor of four by moving to the D series https://github.com/peterhinch/micropython-fourier/blob/master/README.md8-performance. ###Code %%micropython -pyboard 1 import ulab as np x = np.linspace(0, 10, num=1024) y = np.sin(x) np.fft(y) @timeit def np_fft(y): return np.fft(y) a, b = np_fft(y) ###Output execution time: 1985 us ###Markdown Notebook conversion ###Code import nbformat as nb import nbformat.v4.nbbase as nb4 from nbconvert import RSTExporter def convert_notebook(node, fn): (rst, resources) = rstexporter.from_notebook_node(notebook) with open(fn, 'w') as fout: fout.write(rst) rstexporter = RSTExporter() rstexporter.template_file = './templates/manual.tpl' source = nb.read('ulab-manual.ipynb', nb.NO_CONVERT) append_cell = False notebook = nb4.new_notebook() for j, cell in enumerate(source['cells']): if cell['cell_type'] == 'markdown': # skip everything before Introduction if cell['source'].split('\n')[0].startswith('# Introduction'): append_cell = True if append_cell: notebook.cells.append(cell) convert_notebook(notebook,'./manual/source/ulab.rst') %%writefile ./templates/manual.tpl {%- extends 'display_priority.tpl' -%} {% block in_prompt %} {% endblock in_prompt %} {% block output_prompt %} {% endblock output_prompt %} {% block input scoped%} {%- if cell.source.split('\n')[0].startswith('%%micropython') -%} .. code:: {{ '\n'.join(['# code to be run in micropython'] + cell.source.strip().split('\n')[1:]) | indent}} {%- else -%} .. code:: {{ '\n'.join(['# code to be run in CPython\n'] + cell.source.strip().split('\n')) | indent}} {%- endif -%} {% endblock input %} {% block error %} :: {{ super() }} {% endblock error %} {% block traceback_line %} {{ line | indent | strip_ansi }} {% endblock traceback_line %} {% block execute_result %} {% block data_priority scoped %} {{ super() }} {% endblock %} {% endblock execute_result %} {% block stream %} .. parsed-literal:: {{ output.text | indent }} {% endblock stream %} {% block data_svg %} .. image:: {{ output.metadata.filenames['image/svg+xml'] | urlencode }} {% endblock data_svg %} {% block data_png %} .. image:: {{ output.metadata.filenames['image/png'] | urlencode }} {%- set width=output | get_metadata('width', 'image/png') -%} {%- if width is not none %} :width: {{ width }}px {%- endif %} {%- set height=output | get_metadata('height', 'image/png') -%} {%- if height is not none %} :height: {{ height }}px {%- endif %} {% endblock data_png %} {% block data_jpg %} .. image:: {{ output.metadata.filenames['image/jpeg'] | urlencode }} {%- set width=output | get_metadata('width', 'image/jpeg') -%} {%- if width is not none %} :width: {{ width }}px {%- endif %} {%- set height=output | get_metadata('height', 'image/jpeg') -%} {%- if height is not none %} :height: {{ height }}px {%- endif %} {% endblock data_jpg %} {% block data_markdown %} {{ output.data['text/markdown'] | convert_pandoc("markdown", "rst") }} {% endblock data_markdown %} {% block data_latex %} .. math:: {{ output.data['text/latex'] | strip_dollars | indent }} {% endblock data_latex %} {% block data_text scoped %} .. parsed-literal:: {{ output.data['text/plain'] | indent }} {% endblock data_text %} {% block data_html scoped %} .. raw:: html {{ output.data['text/html'] | indent }} {% endblock data_html %} {% block markdowncell scoped %} {{ cell.source | convert_pandoc("markdown", "rst") }} {% endblock markdowncell %} {%- block rawcell scoped -%} {%- if cell.metadata.get('raw_mimetype', '').lower() in resources.get('raw_mimetypes', ['']) %} {{cell.source}} {% endif -%} {%- endblock rawcell -%} {% block headingcell scoped %} {{ ("#" * cell.level + cell.source) | replace('\n', ' ') | convert_pandoc("markdown", "rst") }} {% endblock headingcell %} {% block unknowncell scoped %} unknown type {{cell.type}} {% endblock unknowncell %} from IPython.core.magic import Magics, magics_class, line_cell_magic from IPython.core.magic import cell_magic, register_cell_magic, register_line_magic from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring import subprocess import os @magics_class class PyboardMagic(Magics): @cell_magic @magic_arguments() @argument('-skip') @argument('-unix') @argument('-pyboard') @argument('-file') @argument('-data') @argument('-time') @argument('-memory') def micropython(self, line='', cell=None): args = parse_argstring(self.micropython, line) if args.skip: # doesn't care about the cell's content print('skipped execution') return None # do not parse the rest if args.unix: # tests the code on the unix port. Note that this works on unix only with open('/dev/shm/micropython.py', 'w') as fout: fout.write(cell) proc = subprocess.Popen(["../../micropython/ports/unix/micropython", "/dev/shm/micropython.py"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(proc.stdout.read().decode("utf-8")) print(proc.stderr.read().decode("utf-8")) return None if args.file: # can be used to copy the cell content onto the pyboard's flash spaces = " " try: with open(args.file, 'w') as fout: fout.write(cell.replace('\t', spaces)) printf('written cell to {}'.format(args.file)) except: print('Failed to write to disc!') return None # do not parse the rest if args.data: # can be used to load data from the pyboard directly into kernel space message = pyb.exec(cell) if len(message) == 0: print('pyboard >>>') else: print(message.decode('utf-8')) # register new variable in user namespace self.shell.user_ns[args.data] = string_to_matrix(message.decode("utf-8")) if args.time: # measures the time of executions pyb.exec('import utime') message = pyb.exec('t = utime.ticks_us()\n' + cell + '\ndelta = utime.ticks_diff(utime.ticks_us(), t)' + "\nprint('execution time: {:d} us'.format(delta))") print(message.decode('utf-8')) if args.memory: # prints out memory information message = pyb.exec('from micropython import mem_info\nprint(mem_info())\n') print("memory before execution:\n========================\n", message.decode('utf-8')) message = pyb.exec(cell) print(">>> ", message.decode('utf-8')) message = pyb.exec('print(mem_info())') print("memory after execution:\n========================\n", message.decode('utf-8')) if args.pyboard: message = pyb.exec(cell) print(message.decode('utf-8')) ip = get_ipython() ip.register_magics(PyboardMagic) import pyboard pyb = pyboard.Pyboard('/dev/ttyACM0') pyb.enter_raw_repl() pyb.exit_raw_repl() pyb.close() %%micropython -pyboard 1 import utime import ulab as np def timeit(n=1000): def wrapper(f, *args, **kwargs): func_name = str(f).split(' ')[1] def new_func(*args, **kwargs): run_times = np.zeros(n, dtype=np.uint16) for i in range(n): t = utime.ticks_us() result = f(*args, **kwargs) run_times[i] = utime.ticks_diff(utime.ticks_us(), t) print('{}() execution times based on {} cycles'.format(func_name, n, (delta2-delta1)/n)) print('\tbest: %d us'%np.min(run_times)) print('\tworst: %d us'%np.max(run_times)) print('\taverage: %d us'%np.mean(run_times)) print('\tdeviation: +/-%.3f us'%np.std(run_times)) return result return new_func return wrapper def timeit(f, *args, **kwargs): func_name = str(f).split(' ')[1] def new_func(*args, **kwargs): t = utime.ticks_us() result = f(*args, **kwargs) print('execution time: ', utime.ticks_diff(utime.ticks_us(), t), ' us') return result return new_func ###Output ###Markdown Introduction In https://micropython-usermod.readthedocs.io/en/latest/usermods_14.html, I mentioned that I have another story, for another day. The day has come, so here is my story. Enter ulab`ulab` is a numpy-like module for micropython, meant to simplify and speed up common mathematical operations on arrays. The primary goal was to implement a small subset of numpy that might be useful in the context of a microcontroller. This means low-level data processing of linear (array) and two-dimensional (matrix) data. PurposeOf course, the first question that one has to answer is, why on Earth one would need a fast math library on a microcontroller. After all, it is not expected that heavy number crunching is going to take place on bare metal. It is not meant to. On a PC, the main reason for writing fast code is the sheer amount of data that one wants to process. On a microcontroller, the data volume is probably small, but it might lead to catastrophic system failure, if these data are not processed in time, because the microcontroller is supposed to interact with the outside world in a timely fashion. In fact, this latter objective was the initiator of this project: I needed the Fourier transform of the ADC signal, and all the available options were simply too slow. In addition to speed, another issue that one has to keep in mind when working with embedded systems is the amount of available RAM: I believe, everything here could be implemented in pure python with relatively little effort, but the price we would have to pay for that is not only speed, but RAM, too. python code, if is not frozen, and compiled into the firmware, has to be compiled at runtime, which is not exactly a cheap process. On top of that, if numbers are stored in a list or tuple, which would be the high-level container, then they occupy 8 bytes, no matter, whether they are all smaller than 100, or larger than one hundred million. This is obviously a waste of resources in an environment, where resources are scarce. Finally, there is a reason for using micropython in the first place. Namely, that a microcontroller can be programmed in a very elegant, and *pythonic* way. But if it is so, why should we not extend this idea to other tasks and concepts that might come up in this context? If there was no other reason than this *elegance*, I would find that convincing enough.Based on the above-mentioned considerations, all functions are implemented in a way that 1. conforms to `numpy` as much as possible2. is so frugal with RAM as possible,3. and yet, fast. Much faster than pure python.The main points of `ulab` are - compact, iterable and slicable containers of numerical data in 1, and 2 dimensions (arrays and matrices). These containers support all the relevant unary and binary operators (e.g., `len`, ==, +, *, etc.)- vectorised computations on micropython iterables and numerical arrays/matrices (in numpy-speak, universal functions)- basic linear algebra routines (matrix inversion, multiplication, reshaping, transposition, determinant, and eigenvalues)- polynomial fits to numerical data- fast Fourier transformsAt the time of writing this manual (for version 0.26), the library adds approximately 30 kB of extra compiled code to the micropython (pyboard.v.11) firmware. Resources and legal mattersThe source code of the module can be found under https://github.com/v923z/micropython-ulab/tree/master/code. The source of this user manual is under https://github.com/v923z/micropython-ulab/tree/master/docs, while the technical details of the implementation are discussed at great length in https://github.com/v923z/micropython-ulab/tree/master/docs/ulab.ipynb. If you want an even thorougher explanation on why the various constructs of the implementation work, and work in that particular way, you can read more on the subject under https://micropython-usermod.readthedocs.io/en/latest/, where I demonstrate, what you have to do, if you want to make a C object behave in a *pythonic* way. The MIT licence applies to all material. Friendly requestIf you use `ulab`, and bump into a bug, or think that a particular function is missing, or its behaviour does not conform to `numpy`, please, raise an issue on github, so that the community can profit from your experiences. Even better, if you find the project useful, and think that it could be made better, faster, tighter, and shinier, please, consider contributing, and issue a pull request with the implementation of your improvements and new features. `ulab` can only become successful, if it offers what the community needs.These last comments apply to the documentation, too. If, in your opinion, the documentation is obscure, misleading, or not detailed enough, please, let me know, so that *we* can fix it. Supported functions and methods`ulab` supports a number of array operators, which are listed here. I tried to follow the specifications of the `numpy` interface as closely as possible, though, it was not always practical to implement verbatim behaviour. The differences, if any, are in each case small (e.g., a function cannot take all possible keyword arguments), and should not hinder everyday use. In the list below, a single asterisk denotes slight deviations from `numpy`'s nomenclature, and a double asterisk denotes those cases, where a bit more caution should be exercised, though this usually means functions that are not supported by `numpy`.The detailed discussion of the various functions always contains a link to the corresponding `numpy` documentation. However, before going down the rabbit hole, the module also defines a constant, the version, which can always be queried as ###Code %%micropython -unix 1 import ulab as np print('you are running ulab version', np.__version__) ###Output you are running ulab version 0.24 ###Markdown If you find a bug, please, include this number in your report! Basic ndarray operations[Unary operators](Unary-operators)[Binary operators](Binary-operators)[Indexing and slicing](Slicing-and-indexing)[ndarray iterators](Iterating-over-arrays)[Comparison operators*](Comparison-operators)[Universal functions](Universal-functions) (also support function calls on general iterables) Methods of ndarrays[.shape](.shape)[.reshape](.reshape)[.rawsize**](.rawsize)[.transpose](.transpose)[.flatten**](.flatten)[.asbytearray](.asbytearray) Matrix methods[size](size)[inv](inv)[dot](dot)[det](det)[roll](roll)[flip](flip) Array initialisation functions[eye](eye)[ones](ones,-zeros)[zeros](ones,-zeros)[linspace](linspace) Statistical and other properties of arrays[min](min,-argmin,-max,-argmax)[argmin](min,-argmin,-max,-argmax)[max](min,-argmin,-max,-argmax)[argmax](min,-argmin,-max,-argmax)[sum](sum,-std,-mean)[std](sum,-std,-mean)[mean](sum,-std,-mean)[diff](diff)[sort](sort)[argsort](argsort) Manipulation of polynomials[polyval](polyval)[polyfit](polyfit) FFT routines[fft**](fft)[ifft**](ifft)[spectrum**](spectrum) ndarray, the basic containerThe `ndarray` is the underlying container of numerical data. It is derived from micropython's own `array` object, but has a great number of extra features starting with how it can be initialised, how operations can be done on it, and which functions can accept it as an argument.Since the `ndarray` is a binary container, it is also compact, meaning that it takes only a couple of bytes of extra RAM in addition to what is required for storing the numbers themselves. `ndarray`s are also type-aware, i.e., one can save RAM by specifying a data type, and using the smallest reasonable one. Five such types are defined, namely `uint8`, `int8`, which occupy a single byte of memory per datum, `uint16`, and `int16`, which occupy two bytes per datum, and `float`, which occupies four or eight bytes per datum. The precision/size of the `float` type depends on the definition of `mp_float_t`. Some platforms, e.g., the PYBD, implement `double`s, but some, e.g., the pyboard.v.11, don't. You can find out, what type of float your particular platform implements by looking at the output of the [.rawsize](.rawsize) class method.On the following pages, we will see how one can work with `ndarray`s. Those familiar with `numpy` should find that the nomenclature and naming conventions of `numpy` are adhered to as closely as possible. I will point out the few differences, where necessary.For the sake of comparison, in addition to `ulab` code snippets, sometimes the equivalent `numpy` code is also presented. You can find out, where the snippet is supposed to run by looking at its first line, the header.Hint: you can easily port existing `numpy` code, if you `import ulab as np`. Initialising an arrayA new array can be created by passing either a standard micropython iterable, or another `ndarray` into the constructor. Initialising by passing iterablesIf the iterable is one-dimensional, i.e., one whose elements are numbers, then a row vector will be created and returned. If the iterable is two-dimensional, i.e., one whose elements are again iterables, a matrix will be created. If the lengths of the iterables is not consistent, a `ValueError` will be raised. Iterables of different types can be mixed in the initialisation function. If the `dtype` keyword with the possible `uint8/int8/uint16/int16/float` values is supplied, the new `ndarray` will have that type, otherwise, it assumes `float` as default. ###Code %%micropython -unix 1 import ulab as np a = [1, 2, 3, 4, 5, 6, 7, 8] b = np.array(a) print("a:\t", a) print("b:\t", b) # a two-dimensional array with mixed-type initialisers c = np.array([range(5), range(20, 25, 1), [44, 55, 66, 77, 88]], dtype=np.uint8) print("\nc:\t", c) # and now we throw an exception d = np.array([range(5), range(10), [44, 55, 66, 77, 88]], dtype=np.uint8) print("\nd:\t", d) ###Output a: [1, 2, 3, 4, 5, 6, 7, 8] b: array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) c: array([[0, 1, 2, 3, 4], [20, 21, 22, 23, 24], [44, 55, 66, 77, 88]], dtype=uint8) Traceback (most recent call last): File "/dev/shm/micropython.py", line 15, in <module> ValueError: iterables are not of the same length ###Markdown `ndarray`s are pretty-printed, i.e., if the length is larger than 10, then only the first and last three entries will be printed. Also note that, as opposed to `numpy`, the printout always contains the `dtype`. ###Code %%micropython -unix 1 import ulab as np a = np.array(range(200)) print("a:\t", a) ###Output a: array([0.0, 1.0, 2.0, ..., 197.0, 198.0, 199.0], dtype=float) ###Markdown Initialising by passing arraysAn `ndarray` can be initialised by supplying another array. This statement is almost trivial, since `ndarray`s are iterables themselves, though it should be pointed out that initialising through arrays should be faster, because simply a new copy is created, without inspection, iteration etc. ###Code %%micropython -unix 1 import ulab as np a = [1, 2, 3, 4, 5, 6, 7, 8] b = np.array(a) c = np.array(b) print("a:\t", a) print("b:\t", b) print("\nc:\t", c) ###Output a: [1, 2, 3, 4, 5, 6, 7, 8] b: array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) c: array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=float) ###Markdown Methods of ndarrays .shapeThe `.shape` method returns a 2-tuple with the number of rows, and columns. ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4], dtype=np.int8) print("a:\n", a) print("shape of a:", a.shape()) b= np.array([[1, 2], [3, 4]], dtype=np.int8) print("\nb:\n", b) print("shape of b:", b.shape()) ###Output a: array([1, 2, 3, 4], dtype=int8) shape of a: (1, 4) b: array([[1, 2], [3, 4]], dtype=int8) shape of b: (2, 2) ###Markdown .reshapenumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.reshape.html`reshape` re-writes the shape properties of an `ndarray`, but the array will not be modified in any other way. The function takes a single 2-tuple with two integers as its argument. The 2-tuple should specify the desired number of rows and columns. If the new shape is not consistent with the old, a `ValueError` exception will be raised. ###Code %%micropython -unix 1 import ulab as np a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]], dtype=np.uint8) print('a (4 by 4):', a) print('a (2 by 8):', a.reshape((2, 8))) print('a (1 by 16):', a.reshape((1, 16))) ###Output a (4 by 4): array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]], dtype=uint8) a (2 by 8): array([[1, 2, 3, 4, 5, 6, 7, 8], [9, 10, 11, 12, 13, 14, 15, 16]], dtype=uint8) a (1 by 16): array([1, 2, 3, ..., 14, 15, 16], dtype=uint8) ###Markdown .rawsizeThe `rawsize` method of the `ndarray` returns a 5-tuple with the following data1. number of rows2. number of columns3. length of the storage (should be equal to the product of 1. and 2.)4. length of the data storage in bytes 5. datum size in bytes (1 for `uint8`/`int8`, 2 for `uint16`/`int16`, and 4, or 8 for `floats`, see [ndarray, the basic container](ndarray,-the-basic-container))**WARNING:** `rawsize` is a `ulab`-only method; it has no equivalent in `numpy`. ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4], dtype=np.float) print("a: \t\t", a) print("rawsize of a: \t", a.rawsize()) ###Output a: array([1.0, 2.0, 3.0, 4.0], dtype=float) rawsize of a: (1, 4, 4, 16, 4) ###Markdown .flattennumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.flatten.htm`.flatten` returns the flattened array. The array can be flattened in `C` style (i.e., moving horizontally in the matrix), or in `fortran` style (i.e., moving vertically in the matrix). The `C`-style flattening is the default, and it is also fast, because this is just a verbatim copy of the contents. ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4], dtype=np.int8) print("a: \t\t", a) print("a flattened: \t", a.flatten()) b = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int8) print("\nb:", b) print("b flattened (C): \t", b.flatten()) print("b flattened (F): \t", b.flatten(order='F')) ###Output a: array([1, 2, 3, 4], dtype=int8) a flattened: array([1, 2, 3, 4], dtype=int8) b: array([[1, 2, 3], [4, 5, 6]], dtype=int8) b flattened (C): array([1, 2, 3, 4, 5, 6], dtype=int8) b flattened (F): array([1, 4, 2, 5, 3, 6], dtype=int8) ###Markdown .asbytearrayThe contents of an `ndarray` can be accessed directly by calling the `.asbytearray` method. This will simply return a pointer to the underlying flat `array` object, which can then be manipulated directly.**WARNING:** `asbytearray` is a `ulab`-only method; it has no equivalent in `numpy`.In the example below, note the difference between `a`, and `buffer`: while both are designated as an array, you recognise the micropython array from the fact that it prints the typecode (`b` in this particular case). The `ndarray`, on the other hand, prints out the `dtype` (`int8` here). ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4], dtype=np.int8) print('a: ', a) buffer = a.asbytearray() print("array content:", buffer) buffer[1] = 123 print("array content:", buffer) ###Output a: array([1, 2, 3, 4], dtype=int8) array content: array('b', [1, 2, 3, 4]) array content: array('b', [1, 123, 3, 4]) ###Markdown This in itself wouldn't be very interesting, but since `buffer` is a proper micropython `array`, we can pass it to functions that can employ the buffer protocol. E.g., all the `ndarray` facilities can be applied to the results of timed ADC conversions. ###Code %%micropython -pyboard 1 import pyb import ulab as np n = 100 adc = pyb.ADC(pyb.Pin.board.X19) tim = pyb.Timer(6, freq=10) a = np.array([0]*n, dtype=np.uint8) buffer = a.asbytearray() adc.read_timed(buffer, tim) print("ADC results:\t", a) print("mean of results:\t", np.mean(a)) print("std of results:\t", np.std(a)) ###Output ADC results: array([48, 2, 2, ..., 0, 0, 0], dtype=uint8) mean of results: 1.22 std of results: 4.744639 ###Markdown Likewise, data can be read directly into `ndarray`s from other interfaces, e.g., SPI, I2C etc, and also, by laying bare the `ndarray`, we can pass results of `ulab` computations to anything that can read from a buffer. .transposenumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.transpose.htmlNote that only square matrices can be transposed in place, and in general, an internal copy of the matrix is required. If RAM is a concern, plan accordingly! ###Code %%micropython -unix 1 import ulab as np a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=np.uint8) print('a:\n', a) print('shape of a:', a.shape()) a.transpose() print('\ntranspose of a:\n', a) print('shape of a:', a.shape()) ###Output a: array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=uint8) shape of a: (4, 3) transpose of a: array([[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]], dtype=uint8) shape of a: (3, 4) ###Markdown .sortnumpy: https://docs.scipy.org/doc/numpy/reference/generated/numpy.sort.htmlIn-place sorting of an `ndarray`. For a more detailed exposition, see [sort](sort). ###Code %%micropython -unix 1 import ulab as np a = np.array([[1, 12, 3, 0], [5, 3, 4, 1], [9, 11, 1, 8], [7, 10, 0, 1]], dtype=np.uint8) print('\na:\n', a) a.sort(axis=0) print('\na sorted along vertical axis:\n', a) a = np.array([[1, 12, 3, 0], [5, 3, 4, 1], [9, 11, 1, 8], [7, 10, 0, 1]], dtype=np.uint8) a.sort(a, axis=1) print('\na sorted along horizontal axis:\n', a) a = np.array([[1, 12, 3, 0], [5, 3, 4, 1], [9, 11, 1, 8], [7, 10, 0, 1]], dtype=np.uint8) a.sort(a, axis=None) print('\nflattened a sorted:\n', a) ###Output a: array([[1, 12, 3, 0], [5, 3, 4, 1], [9, 11, 1, 8], [7, 10, 0, 1]], dtype=uint8) a sorted along vertical axis: array([[1, 3, 0, 0], [5, 10, 1, 1], [7, 11, 3, 1], [9, 12, 4, 8]], dtype=uint8) a sorted along horizontal axis: array([[0, 1, 3, 12], [1, 3, 4, 5], [1, 8, 9, 11], [0, 1, 7, 10]], dtype=uint8) flattened a sorted: array([0, 0, 1, ..., 10, 11, 12], dtype=uint8) ###Markdown Unary operatorsWith the exception of `len`, which returns a single number, all unary operators manipulate the underlying data element-wise. lenThis operator takes a single argument, and returns either the length (for row vectors), or the number of rows (for matrices) of its argument. ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4, 5], dtype=np.uint8) b = np.array([range(5), range(5), range(5), range(5)], dtype=np.uint8) print("a:\t", a) print("length of a: ", len(a)) print("shape of a: ", a.shape()) print("\nb:\t", b) print("length of b: ", len(b)) print("shape of b: ", b.shape()) ###Output a: array([1, 2, 3, 4, 5], dtype=uint8) length of a: 5 shape of a: (1, 5) b: array([[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], dtype=uint8) length of b: 4 shape of b: (4, 5) ###Markdown The number returned by `len` is also the length of the iterations, when the array supplies the elements for an iteration (see later). invertThe function function is defined for integer data types (`uint8`, `int8`, `uint16`, and `int16`) only, takes a single argument, and returns the element-by-element, bit-wise inverse of the array. If a `float` is supplied, the function raises a `ValueError` exception.With signed integers (`int8`, and `int16`), the results might be unexpected, as in the example below: ###Code %%micropython -unix 1 import ulab as np a = np.array([0, -1, -100], dtype=np.int8) print("a:\t\t", a) print("inverse of a:\t", ~a) a = np.array([0, 1, 254, 255], dtype=np.uint8) print("\na:\t\t", a) print("inverse of a:\t", ~a) ###Output a: array([0, -1, -100], dtype=int8) inverse of a: array([-1, 0, 99], dtype=int8) a: array([0, 1, 254, 255], dtype=uint8) inverse of a: array([255, 254, 1, 0], dtype=uint8) ###Markdown absThis function takes a single argument, and returns the element-by-element absolute value of the array. When the data type is unsigned (`uint8`, or `uint16`), a copy of the array will be returned immediately, and no calculation takes place. ###Code %%micropython -unix 1 import ulab as np a = np.array([0, -1, -100], dtype=np.int8) print("a:\t\t\t ", a) print("absolute value of a:\t ", abs(a)) ###Output a: array([0, -1, -100], dtype=int8) absolute value of a: array([0, 1, 100], dtype=int8) ###Markdown negThis operator takes a single argument, and changes the sign of each element in the array. Unsigned values are wrapped. ###Code %%micropython -unix 1 import ulab as np a = np.array([10, -1, 1], dtype=np.int8) print("a:\t\t", a) print("negative of a:\t", -a) b = np.array([0, 100, 200], dtype=np.uint8) print("\nb:\t\t", b) print("negative of b:\t", -b) ###Output a: array([10, -1, 1], dtype=int8) negative of a: array([-10, 1, -1], dtype=int8) b: array([0, 100, 200], dtype=uint8) negative of b: array([0, 156, 56], dtype=uint8) ###Markdown posThis function takes a single argument, and simply returns a copy of the array. ###Code %%micropython -unix 1 import ulab as np a = np.array([10, -1, 1], dtype=np.int8) print("a:\t\t", a) print("positive of a:\t", +a) ###Output a: array([10, -1, 1], dtype=int8) positive of a: array([10, -1, 1], dtype=int8) ###Markdown Binary operatorsAll binary operators work element-wise. This also means that the operands either must have the same shape, or one of them must be a scalar.**WARNING:** `numpy` also allows operations between a matrix, and a row vector, if the row vector has exactly as many elements, as many columns the matrix has. This feature will be added in future versions of `ulab`. ###Code a = array([[1, 2, 3], [4, 5, 6], [7, 8, 6]]) b = array([10, 20, 30]) a+b ###Output _____no_output_____ ###Markdown UpcastingBinary operations require special attention, because two arrays with different typecodes can be the operands of an operation, in which case it is not trivial, what the typecode of the result is. This decision on the result's typecode is called upcasting. Since the number of typecodes in `ulab` is significantly smaller than in `numpy`, we have to define new upcasting rules. Where possible, I followed `numpy`'s conventions. `ulab` observes the following upcasting rules:1. Operations with two `ndarray`s of the same `dtype` preserve their `dtype`, even when the results overflow.2. if either of the operands is a float, the result is automatically a float3. When the right hand side of a binary operator is a micropython variable, `mp_obj_int`, or `mp_obj_float`, then the result will be promoted to `dtype` `float`. This is necessary, because a micropython integer can be 31 bites wide. Other micropython types (e.g., lists, tuples, etc.) raise a `TypeError` exception. 4. | left hand side | right hand side | ulab result | numpy result ||----------------|-----------------|-------------|--------------||`uint8` |`int8` |`int16` |`int16` ||`uint8` |`int16` |`int16` |`int16` ||`uint8` |`uint16` |`uint16` |`uint16` ||`int8` |`int16` |`int16` |`int16` | |`int8` |`uint16` |`uint16` |`int32` ||`uint16` |`int16` |`float` |`int32` | Note that the last two operations are promoted to `int32` in `numpy`. **WARNING:** Due to the lower number of available data types, the upcasting rules of `ulab` are slightly different to those of `numpy`. Watch out for this, when porting code!Upcasting can be seen in action in the following snippet: ###Code %%micropython -unix 1 import ulab as np a = np.array([1, 2, 3, 4], dtype=np.uint8) b = np.array([1, 2, 3, 4], dtype=np.int8) print("a:\t", a) print("b:\t", b) print("a+b:\t", a+b) c = np.array([1, 2, 3, 4], dtype=np.float) print("\na:\t", a) print("c:\t", c) print("a*c:\t", a*c) ###Output a: array([1, 2, 3, 4], dtype=uint8) b: array([1, 2, 3, 4], dtype=int8) a+b: array([2, 4, 6, 8], dtype=int16) a: array([1, 2, 3, 4], dtype=uint8) c: array([1.0, 2.0, 3.0, 4.0], dtype=float) a*c: array([1.0, 4.0, 9.0, 16.0], dtype=float)
2_Curso/Laboratorio/SAGE-noteb/IPYNB/CRIPT/83-CRIPT-vigenere-frec.ipynb
###Markdown Cifra de VigenerePuedes leer sobre este m&eacute;todo de cifrado&nbsp;&nbsp; en Vigenere. Elegida una palabra clave, por ejemplo "CIRUELA", el m&eacute;todo para encriptar consiste en, primero, obtener para cada letra de la clave el n&uacute;mero ASCII que le corresponde: ###Code L3 = list("CIRUELA");print L3 L4 = map(ord2,L3);print L4 ###Output [2, 8, 17, 20, 4, 11, 0] ###Markdown Llamemos $K$ a la lista de enteros correspondientes a la clave y $m$ a la longitud de la clave, en nuestro ejemplo $7$. Para cada resto m&oacute;dulo $m$, digamos $k$, tenemos un entero $K[k]$.En el segundo paso del m&eacute;todo de Vigenere, cada letra del mensaje, que ocupa una posici&oacute;n digamos $N$ en el mensaje, se encripta de manera diferente seg&uacute;n el valor del resto de dividir $N$ entre $m$. Si el valor de ese resto es $k$, usamos $k$ como clave para encriptarla mediante la cifra de C&eacute;sar.Finalmente, tenemos una lista de enteros, entre $0$ y $25$, y la transformamos en una cadena de caracteres, que son el mensaje encriptado. ###Code def encriptar_vig(T,C): '''T y C son cadenas de caracteres, T el texto y C la clave''' L1 = map(ord2,list(C)) L2 = map(ord2,list(T)) L3 = [] for int in srange(len(L2)): n = int%len(C) L3.append(chr2((L2[int]+L1[n])%26)) return join(L3,sep="") texto_e = encriptar_vig(texto_l,'CIRUELA');texto_e def complementaria(C): L1 = map(ord2,list(C)) return join(map(chr2,[(26-n)%26 for n in L1]),sep="") encriptar_vig(encriptar_vig(texto_l,'CIRUELA'),complementaria('CIRUELA')) ###Output _____no_output_____ ###Markdown Análisis de frecuencias ###Code def cortar_texto(texto,nclave): '''Almacenamos en C una lista con nclave cadenas de caracteres, cada una contiene la parte del texto que se ha encriptado con la misma clave''' C=[item for item in texto[:nclave]] for i in srange(nclave): for j in srange(len(texto)): if j%nclave==i: C[i] += texto[j] else: continue return C cortado = cortar_texto(texto_e,7);cortado def analisis_frec(T): frecuencias = {} N = len(T) for letra in T: if letra in frecuencias: frecuencias[letra] += (1/N).n() else: frecuencias[letra]=(1/N).n() return frecuencias def invertir(dicc): dict_inv = {} for key in dicc: dict_inv[dicc[key]] = ord2(key) return dict_inv def analisis_frec_compl(T): dicc = analisis_frec(T) dicc2 = invertir(dicc) L = dicc2.items() L.sort(reverse=True) return L FF = analisis_frec_compl(cortado[0]);FF chr2(21) chr2(6) ###Output _____no_output_____ ###Markdown Aparece aqu&iacute; un problema: las frecuencias en el texto original, cuando se divide en&nbsp; 7 trozos,&nbsp; no corresponden a las frecuencias naturales en los textos en ingl&eacute;s y, entonces, es la G la que encripta a la E y no la V. Probablemente, eso indica que disponemos de demasiado poco texto. Si desencriptamos suponiendo que la clave consiste en encriptar E como V tendremos, siempre que no aparezca el mismo problema con los otros trozos del texto,&nbsp; una s&eacute;ptima parte de las letras mal. ###Code def analisis_frec2(T): dicc = analisis_frec(T) dicc2 = invertir(dicc) L = dicc2.items() L.sort(reverse=True) return chr2(L[0][1]-4) ###Output _____no_output_____ ###Markdown &iquest;De d&oacute;nde viene el $-4$ de la &uacute;ltima l&iacute;nea? ###Code def buscar_clave(texto_e,nclave): cortado = cortar_texto(texto_e,nclave) clave = '' for i in srange(nclave): clave += analisis_frec2(cortado[i]) return clave buscar_clave(texto_e,7) ###Output _____no_output_____ ###Markdown Probemos con un texto mucho mayor: ###Code texto_largo = "Tell me, O Muse, of that ingenious hero who travelled far and wide after he had sacked the famous town of Troy. Many cities did he visit, and many were the nations with whose manners and customs he was acquainted; moreover he suffered much by sea while trying to save his own life and bring his men safely home; but do what he might he could not save his men, for they perished through their own sheer folly in eating the cattle of the Sun-god Hyperion; so the god prevented them from ever reaching home. Tell me, too, about all these things, oh daughter of Jove, from whatsoever source you may know them. So now all who escaped death in battle or by shipwreck had got safely home except Ulysses, and he, though he was longing to return to his wife and country, was detained by the goddess Calypso, who had got him into a large cave and wanted to marry him. But as years went by, there came a time when the gods settled that he should go back to Ithaca; even then, however, when he was among his own people, his troubles were not yet over; nevertheless all the gods had now begun to pity him except Neptune, who still persecuted him without ceasing and would not let him get home. Now Neptune had gone off to the Ethiopians, who are at the world's end, and lie in two halves, the one looking West and the other East. {1} He had gone there to accept a hecatomb of sheep and oxen, and was enjoying himself at his festival; but the other gods met in the house of Olympian Jove, and the sire of gods and men spoke first. At that moment he was thinking of Aegisthus, who had been killed by Agamemnon's son Orestes; so he said to the other gods: See now, how men lay blame upon us gods for what is after all nothing but their own folly. Look at Aegisthus; he must needs make love to Agamemnon's wife unrighteously and then kill Agamemnon, though he knew it would be the death of him; for I sent Mercury to warn him not to do either of these things, inasmuch as Orestes would be sure to take his revenge when he grew up and wanted to return home. Mercury told him this in all good will but he would not listen, and now he has paid for everything in full. Then Minerva said, Father, son of Saturn, King of kings, it served Aegisthus right, and so it would any one else who does as he did; but Aegisthus is neither here nor there; it is for Ulysses that my heart bleeds, when I think of his sufferings in that lonely sea-girt island, far away, poor man, from all his friends. It is an island covered with forest, in the very middle of the sea, and a goddess lives there, daughter of the magician Atlas, who looks after the bottom of the ocean, and carries the great columns that keep heaven and earth asunder. This daughter of Atlas has got hold of poor unhappy Ulysses, and keeps trying by every kind of blandishment to make him forget his home, so that he is tired of life, and thinks of nothing but how he may once more see the smoke of his own chimneys. You, sir, take no heed of this, and yet when Ulysses was before Troy did he not propitiate you with many a burnt sacrifice? Why then should you keep on being so angry with him? And Jove said, My child, what are you talking about? How can I forget Ulysses than whom there is no more capable man on earth, nor more liberal in his offerings to the immortal gods that live in heaven? Bear in mind, however, that Neptune is still furious with Ulysses for having blinded an eye of Polyphemus king of the Cyclopes. Polyphemus is son to Neptune by the nymph Thoosa, daughter to the sea-king Phorcys; therefore though he will not kill Ulysses outright, he torments him by preventing him from getting home. Still, let us lay our heads together and see how we can help him to return; Neptune will then be pacified, for if we are all of a mind he can hardly stand out against us. And Minerva said, Father, son of Saturn, King of kings, if, then, the gods now mean that Ulysses should get home, we should first send Mercury to the Ogygian island to tell Calypso that we have made up our minds and that he is to return. In the meantime I will go to Ithaca, to put heart into Ulysses' son Telemachus; I will embolden him to call the Achaeans in assembly, and speak out to the suitors of his mother Penelope, who persist in eating up any number of his sheep and oxen; I will also conduct him to Sparta and to Pylos, to see if he can hear anything about the return of his dear father--for this will make people speak well of him. So saying she bound on her glittering golden sandals, imperishable, with which she can fly like the wind over land or sea; she grasped the redoubtable bronze-shod spear, so stout and sturdy and strong, wherewith she quells the ranks of heroes who have displeased her, and down she darted from the topmost summits of Olympus, whereon forthwith she was in Ithaca, at the gateway of Ulysses' house, disguised as a visitor, Mentes, chief of the Taphians, and she held a bronze spear in her hand. There she found the lordly suitors seated on hides of the oxen which they had killed and eaten, and playing draughts in front of the house. Men-servants and pages were bustling about to wait upon them, some mixing wine with water in the mixing-bowls, some cleaning down the tables with wet sponges and laying them out again, and some cutting up great quantities of meat. Telemachus saw her long before any one else did. He was sitting moodily among the suitors thinking about his brave father, and how he would send them flying out of the house, if he were to come to his own again and be honoured as in days gone by. Thus brooding as he sat among them, he caught sight of Minerva and went straight to the gate, for he was vexed that a stranger should be kept waiting for admittance. He took her right hand in his own, and bade her give him her spear. Welcome, said he, to our house, and when you have partaken of food you shall tell us what you have come for. He led the way as he spoke, and Minerva followed him. When they were within he took her spear and set it in the spear-stand against a strong bearing-post along with the many other spears of his unhappy father, and he conducted her to a richly decorated seat under which he threw a cloth of damask. There was a footstool also for her feet,{2} and he set another seat near her for himself, away from the suitors, that she might not be annoyed while eating by their noise and insolence, and that he might ask her more freely about his father. A maid servant then brought them water in a beautiful golden ewer and poured it into a silver basin for them to wash their hands, and she drew a clean table beside them. An upper servant brought them bread, and offered them many good things of what there was in the house, the carver fetched them plates of all manner of meats and set cups of gold by their side, and a manservant brought them wine and poured it out for them. Then the suitors came in and took their places on the benches and seats. {3} Forthwith men servants poured water over their hands, maids went round with the bread-baskets, pages filled the mixing-bowls with wine and water, and they laid their hands upon the good things that were before them. As soon as they had had enough to eat and drink they wanted music and dancing, which are the crowning embellishments of a banquet, so a servant brought a lyre to Phemius, whom they compelled perforce to sing to them. As soon as he touched his lyre and began to sing Telemachus spoke low to Minerva, with his head close to hers that no man might hear. I hope, sir, said he, that you will not be offended with what I am going to say. Singing comes cheap to those who do not pay for it, and all this is done at the cost of one whose bones lie rotting in some wilderness or grinding to powder in the surf. If these men were to see my father come back to Ithaca they would pray for longer legs rather than a longer purse, for money would not serve them; but he, alas, has fallen on an ill fate, and even when people do sometimes say that he is coming, we no longer heed them; we shall never see him again. And now, sir, tell me and tell me true, who you are and where you come from. Tell me of your town and parents, what manner of ship you came in, how your crew brought you to Ithaca, and of what nation they declared themselves to be--for you cannot have come by land. Tell me also truly, for I want to know, are you a stranger to this house, or have you been here in my father's time? In the old days we had many visitors for my father went about much himself. And Minerva answered, I will tell you truly and particularly all about it. I am Mentes, son of Anchialus, and I am King of the Taphians. I have come here with my ship and crew, on a voyage to men of a foreign tongue being bound for Temesa {4} with a cargo of iron, and I shall bring back copper. As for my ship, it lies over yonder off the open country away from the town, in the harbour Rheithron {5} under the wooded mountain Neritum. {6} Our fathers were friends before us, as old Laertes will tell you, if you will go and ask him. They say, however, that he never comes to town now, and lives by himself in the country, faring hardly, with an old woman to look after him and get his dinner for him, when he comes in tired from pottering about his vineyard. They told me your father was at home again, and that was why I came, but it seems the gods are still keeping him back, for he is not dead yet not on the mainland. It is more likely he is on some sea-girt island in mid ocean, or a prisoner among savages who are detaining him against his will. I am no prophet, and know very little about omens, but I speak as it is borne in upon me from heaven, and assure you that he will not be away much longer; for he is a man of such resource that even though he were in chains of iron he would find some means of getting home again. But tell me, and tell me true, can Ulysses really have such a fine looking fellow for a son? You are indeed wonderfully like him about the head and eyes, for we were close friends before he set sail for Troy where the flower of all the Argives went also. Since that time we have never either of us seen the other. My mother, answered Telemachus, tells me I am son to Ulysses, but it is a wise child that knows his own father. Would that I were son to one who had grown old upon his own estates, for, since you ask me, there is no more ill-starred man under heaven than he who they tell me is my father. And Minerva said, There is no fear of your race dying out yet, while Penelope has such a fine son as you are. But tell me, and tell me true, what is the meaning of all this feasting, and who are these people? What is it all about? Have you some banquet, or is there a wedding in the family--for no one seems to be bringing any provisions of his own? And the guests--how atrociously they are behaving; what riot they make over the whole house; it is enough to disgust any respectable person who comes near them. Sir, said Telemachus, as regards your question, so long as my father was here it was well with us and with the house, but the gods in their displeasure have willed it otherwise, and have hidden him away more closely than mortal man was ever yet hidden. I could have borne it better even though he were dead, if he had fallen with his men before Troy, or had died with friends around him when the days of his fighting were done; for then the Achaeans would have built a mound over his ashes, and I should myself have been heir to his renown; but now the storm-winds have spirited him away we know not whither; he is gone without leaving so much as a trace behind him, and I inherit nothing but dismay. Nor does the matter end simply with grief for the loss of my father; heaven has laid sorrows upon me of yet another kind; for the chiefs from all our islands, Dulichium, Same, and the woodland island of Zacynthus, as also all the principal men of Ithaca itself, are eating up my house under the pretext of paying their court to my mother, who will neither point blank say that she will not marry, {7} nor yet bring matters to an end; so they are making havoc of my estate, and before long will do so also with myself. Is that so? exclaimed Minerva, then you do indeed want Ulysses home again. Give him his helmet, shield, and a couple of lances, and if he is the man he was when I first knew him in our house, drinking and making merry, he would soon lay his hands about these rascally suitors, were he to stand once more upon his own threshold. He was then coming from Ephyra, where he had been to beg poison for his arrows from Ilus, son of Mermerus. Ilus feared the ever-living gods and would not give him any, but my father let him have some, for he was very fond of him. If Ulysses is the man he then was these suitors will have a short shrift and a sorry wedding. But there! It rests with heaven to determine whether he is to return, and take his revenge in his own house or no; I would, however, urge you to set about trying to get rid of these suitors at once. Take my advice, call the Achaean heroes in assembly to-morrow morning--lay your case before them, and call heaven to bear you witness. Bid the suitors take themselves off, each to his own place, and if your mother's mind is set on marrying again, let her go back to her father, who will find her a husband and provide her with all the marriage gifts that so dear a daughter may expect. As for yourself, let me prevail upon you to take the best ship you can get, with a crew of twenty men, and go in quest of your father who has so long been missing. Some one may tell you something, or (and people often hear things in this way) some heaven-sent message may direct you. First go to Pylos and ask Nestor; thence go on to Sparta and visit Menelaus, for he got home last of all the Achaeans; if you hear that your father is alive and on his way home, you can put up with the waste these suitors will make for yet another twelve months. If on the other hand you hear of his death, come home at once, celebrate his funeral rites with all due pomp, build a barrow to his memory, and make your mother marry again. Then, having done all this, think it well over in your mind how, by fair means or foul, you may kill these suitors in your own house. You are too old to plead infancy any longer; have you not heard how people are singing Orestes' praises for having killed his father's murderer Aegisthus? You are a fine, smart looking fellow; show your mettle, then, and make yourself a name in story. Now, however, I must go back to my ship and to my crew, who will be impatient if I keep them waiting longer; think the matter over for yourself, and remember what I have said to you. Sir, answered Telemachus, it has been very kind of you to talk to me in this way, as though I were your own son, and I will do all you tell me; I know you want to be getting on with your voyage, but stay a little longer till you have taken a bath and refreshed yourself. I will then give you a present, and you shall go on your way rejoicing; I will give you one of great beauty and value--a keepsake such as only dear friends give to one another. Minerva answered, Do not try to keep me, for I would be on my way at once. As for any present you may be disposed to make me, keep it till I come again, and I will take it home with me. You shall give me a very good one, and I will give you one of no less value in return. With these words she flew away like a bird into the air, but she had given Telemachus courage, and had made him think more than ever about his father. He felt the change, wondered at it, and knew that the stranger had been a god, so he went straight to where the suitors were sitting. Phemius was still singing, and his hearers sat rapt in silence as he told the sad tale of the return from Troy, and the ills Minerva had laid upon the Achaeans. Penelope, daughter of Icarius, heard his song from her room upstairs, and came down by the great staircase, not alone, but attended by two of her handmaids. When she reached the suitors she stood by one of the bearing posts that supported the roof of the cloisters {8} with a staid maiden on either side of her. She held a veil, moreover, before her face, and was weeping bitterly. Phemius, she cried, you know many another feat of gods and heroes, such as poets love to celebrate. Sing the suitors some one of these, and let them drink their wine in silence, but cease this sad tale, for it breaks my sorrowful heart, and reminds me of my lost husband whom I mourn ever without ceasing, and whose name was great over all Hellas and middle Argos. {9} Mother, answered Telemachus, let the bard sing what he has a mind to; bards do not make the ills they sing of; it is Jove, not they, who makes them, and who sends weal or woe upon mankind according to his own good pleasure. This fellow means no harm by singing the ill-fated return of the Danaans, for people always applaud the latest songs most warmly. Make up your mind to it and bear it; Ulysses is not the only man who never came back from Troy, but many another went down as well as he. Go, then, within the house and busy yourself with your daily duties, your loom, your distaff, and the ordering of your servants; for speech is man's matter, and mine above all others {10}--for it is I who am master here. She went wondering back into the house, and laid her son's saying in her heart. Then, going upstairs with her handmaids into her room, she mourned her dear husband till Minerva shed sweet sleep over her eyes. But the suitors were clamorous throughout the covered cloisters {11}, and prayed each one that he might be her bed fellow. Then Telemachus spoke, Shameless, he cried, and insolent suitors, let us feast at our pleasure now, and let there be no brawling, for it is a rare thing to hear a man with such a divine voice as Phemius has; but in the morning meet me in full assembly that I may give you formal notice to depart, and feast at one another's houses, turn and turn about, at your own cost. If on the other hand you choose to persist in spunging upon one man, heaven help me, but Jove shall reckon with you in full, and when you fall in my father's house there shall be no man to avenge you. The suitors bit their lips as they heard him, and marvelled at the boldness of his speech. Then, Antinous, son of Eupeithes, said, The gods seem to have given you lessons in bluster and tall talking; may Jove never grant you to be chief in Ithaca as your father was before you. Telemachus answered, Antinous, do not chide with me, but, god willing, I will be chief too if I can. Is this the worst fate you can think of for me? It is no bad thing to be a chief, for it brings both riches and honour. Still, now that Ulysses is dead there are many great men in Ithaca both old and young, and some other may take the lead among them; nevertheless I will be chief in my own house, and will rule those whom Ulysses has won for me. Then Eurymachus, son of Polybus, answered, It rests with heaven to decide who shall be chief among us, but you shall be master in your own house and over your own possessions; no one while there is a man in Ithaca shall do you violence nor rob you. And now, my good fellow, I want to know about this stranger. What country does he come from? Of what family is he, and where is his estate? Has he brought you news about the return of your father, or was he on business of his own? He seemed a well to do man, but he hurried off so suddenly that he was gone in a moment before we could get to know him. My father is dead and gone, answered Telemachus, and even if some rumour reaches me I put no more faith in it now. My mother does indeed sometimes send for a soothsayer and question him, but I give his prophecyings no heed. As for the stranger, he was Mentes, son of Anchialus, chief of the Taphians, an old friend of my father's. But in his heart he knew that it had been the goddess. The suitors then returned to their singing and dancing until the evening; but when night fell upon their pleasuring they went home to bed each in his own abode. {12} Telemachus's room was high up in a tower {13} that looked on to the outer court; hither, then, he hied, brooding and full of thought. A good old woman, Euryclea, daughter of Ops, the son of Pisenor, went before him with a couple of blazing torches. Laertes had bought her with his own money when she was quite young; he gave the worth of twenty oxen for her, and shewed as much respect to her in his household as he did to his own wedded wife, but he did not take her to his bed for he feared his wife's resentment. {14} She it was who now lighted Telemachus to his room, and she loved him better than any of the other women in the house did, for she had nursed him when he was a baby. He opened the door of his bed room and sat down upon the bed; as he took off his shirt {15} he gave it to the good old woman, who folded it tidily up, and hung it for him over a peg by his bed side, after which she went out, pulled the door to by a silver catch, and drew the bolt home by means of the strap. {16} But Telemachus as he lay covered with a woollen fleece kept thinking all night through of his intended voyage and of the counsel that Minerva had given him." texto_largo_l = limpiar(texto_largo,alfb) texto_largo_e = encriptar_vig(texto_largo_l,'CIRUELA'); buscar_clave(texto_largo_e,7) ###Output _____no_output_____ ###Markdown Longitud de la clave ###Code def buscar_clave2(texto_e): for int in srange(3,30): clave = buscar_clave(texto_largo_e,int) print clave buscar_clave2(texto_largo_e) ###Output EEA EEAE EEAEE EEEAEA CIRUELA EAAEEEEE EEEEEEARA AEELEURAAE EAEAAEAEEEE EREEEEAEALAA AERAEEAREEAEA CIRUELACIRUELA ARAEELAEAEEEEEE LLEEELEEERAEEEEA RAEREEEAAULAAEEEE EHAEELIELLEUEEEARA RAAEAUEEREEELEAIEEA LEAEEUUAAEAAELEERAAH CIRUELACIRUELACIRUELA EAUAEEAALUUEAAILHLREAL AELAAELEUAEUAEAEALRELEA LREUREAERAAAEEEEEUAEALEA LELLUAAAIEERARAHJEEEEAAAE AEIULRAEEULERUALAEELRAEEEL EHEAELAREEHAEEAARALAAIRERLA CIRUELACIRJELACIRUELACIRUELA REERLEARUAEEERLELALAUEAEALEEA
notebooks/roundtrip_comments.ipynb
###Markdown Define a class ###Code %%sh rm -f files/roundtrip_comments.yml from datafiles import datafile @datafile('files/roundtrip_comments.yml') class Sample: foo: int bar: str ###Output _____no_output_____ ###Markdown Initialize an instance ###Code sample = Sample(42, "Hello, world") %%sh cat files/roundtrip_comments.yml ###Output foo: 42 bar: Hello, world ###Markdown Modify the file ###Code %%writefile files/roundtrip_comments.yml # Heading comment foo: 42 bar: Hello, world! # Line comment %%sh cat files/roundtrip_comments.yml ###Output # Heading comment foo: 42 bar: Hello, world! # Line comment ###Markdown Modify the object ###Code sample.foo = 2019 sample.bar = "Hello, notebook" ###Output INFO: datafiles.mapper: Loading 'Sample' object from 'files/roundtrip_comments.yml' ###Markdown View merged contents ###Code %%sh cat files/roundtrip_comments.yml ###Output # Heading comment foo: 2019 bar: Hello, notebook # Line comment ###Markdown Define a class ###Code %%sh rm -f files/roundtrip_comments.yml from datafiles import datafile @datafile('files/roundtrip_comments.yml') class Sample: foo: int bar: str ###Output _____no_output_____ ###Markdown Initialize an instance ###Code sample = Sample(42, "Hello, world") %%sh cat files/roundtrip_comments.yml ###Output foo: 42 bar: Hello, world ###Markdown Modify the file ###Code %%writefile files/roundtrip_comments.yml # Heading comment foo: 42 bar: Hello, world! # Line comment %%sh cat files/roundtrip_comments.yml ###Output # Heading comment foo: 42 bar: Hello, world! # Line comment ###Markdown Modify the object ###Code sample.foo = 2019 sample.bar = "Hello, notebook" ###Output _____no_output_____ ###Markdown View merged contents ###Code %%sh cat files/roundtrip_comments.yml ###Output # Heading comment foo: 2019 bar: Hello, notebook # Line comment ###Markdown Define a class ###Code %%sh rm -f files/roundtrip_comments.yml from datafiles import datafile @datafile('files/roundtrip_comments.yml') class Sample: foo: int bar: str ###Output _____no_output_____ ###Markdown Initialize an instance ###Code sample = Sample(42, "Hello, world") %%sh cat files/roundtrip_comments.yml ###Output foo: 42 bar: Hello, world ###Markdown Modify the file ###Code %%writefile files/roundtrip_comments.yml # Heading comment foo: 42 bar: Hello, world! # Line comment %%sh cat files/roundtrip_comments.yml ###Output # Heading comment foo: 42 bar: Hello, world! # Line comment ###Markdown Modify the object ###Code sample.foo = 2019 sample.bar = "Hello, notebook" ###Output INFO: datafiles.managers: Loading 'Sample' object from 'files/roundtrip_comments.yml' INFO: datafiles.managers: Saving 'Sample' object to 'files/roundtrip_comments.yml' INFO: datafiles.managers: Saving 'Sample' object to 'files/roundtrip_comments.yml' ###Markdown View merged contents ###Code %%sh cat files/roundtrip_comments.yml ###Output # Heading comment foo: 2019 bar: Hello, notebook # Line comment ###Markdown Define a class ###Code %%sh rm -f files/roundtrip_comments.yml from datafiles import datafile @datafile('files/roundtrip_comments.yml') class Sample: foo: int bar: str ###Output _____no_output_____ ###Markdown Initialize an instance ###Code sample = Sample(42, "Hello, world") %%sh cat files/roundtrip_comments.yml ###Output foo: 42 bar: Hello, world ###Markdown Modify the file ###Code %%writefile files/roundtrip_comments.yml # Heading comment foo: 42 bar: Hello, world! # Line comment %%sh cat files/roundtrip_comments.yml ###Output # Heading comment foo: 42 bar: Hello, world! # Line comment ###Markdown Modify the object ###Code sample.foo = 2019 sample.bar = "Hello, notebook" ###Output _____no_output_____ ###Markdown View merged contents ###Code %%sh cat files/roundtrip_comments.yml ###Output # Heading comment foo: 2019 bar: Hello, notebook # Line comment ###Markdown Define a class ###Code %%sh rm -f files/roundtrip_comments.yml from datafiles import datafile @datafile('files/roundtrip_comments.yml') class Sample: foo: int bar: str ###Output _____no_output_____ ###Markdown Initialize an instance ###Code sample = Sample(42, "Hello, world") %%sh cat files/roundtrip_comments.yml ###Output foo: 42 bar: Hello, world ###Markdown Modify the file ###Code %%writefile files/roundtrip_comments.yml # Heading comment foo: 42 bar: Hello, world! # Line comment %%sh cat files/roundtrip_comments.yml ###Output # Heading comment foo: 42 bar: Hello, world! # Line comment ###Markdown Modify the object ###Code sample.foo = 2019 sample.bar = "Hello, notebook" ###Output _____no_output_____ ###Markdown View merged contents ###Code %%sh cat files/roundtrip_comments.yml ###Output # Heading comment foo: 2019 bar: Hello, notebook # Line comment ###Markdown Define a class ###Code %%sh rm -f files/roundtrip_comments.yml from datafiles import datafile @datafile('files/roundtrip_comments.yml') class Sample: foo: int bar: str ###Output _____no_output_____ ###Markdown Initialize an instance ###Code sample = Sample(42, "Hello, world") %%sh cat files/roundtrip_comments.yml ###Output foo: 42 bar: Hello, world ###Markdown Modify the file ###Code %%writefile files/roundtrip_comments.yml # Heading comment foo: 42 bar: Hello, world! # Line comment %%sh cat files/roundtrip_comments.yml ###Output # Heading comment foo: 42 bar: Hello, world! # Line comment ###Markdown Modify the object ###Code sample.foo = 2019 sample.bar = "Hello, notebook" ###Output _____no_output_____ ###Markdown View merged contents ###Code %%sh cat files/roundtrip_comments.yml ###Output # Heading comment foo: 2019 bar: Hello, notebook # Line comment ###Markdown Define a class ###Code %%sh rm -f files/roundtrip_comments.yml from datafiles import datafile @datafile('files/roundtrip_comments.yml') class Sample: foo: int bar: str ###Output _____no_output_____ ###Markdown Initialize an instance ###Code sample = Sample(42, "Hello, world") %%sh cat files/roundtrip_comments.yml ###Output foo: 42 bar: Hello, world ###Markdown Modify the file ###Code %%writefile files/roundtrip_comments.yml # Heading comment foo: 42 bar: Hello, world! # Line comment %%sh cat files/roundtrip_comments.yml ###Output # Heading comment foo: 42 bar: Hello, world! # Line comment ###Markdown Modify the object ###Code sample.foo = 2019 sample.bar = "Hello, notebook" ###Output INFO: datafiles.mapper: Loading 'Sample' object from 'files/roundtrip_comments.yml' ###Markdown View merged contents ###Code %%sh cat files/roundtrip_comments.yml ###Output # Heading comment foo: 2019 bar: Hello, notebook # Line comment
nbs/02_model_hub.ipynb
###Markdown Interacting with HuggingFace and Flair, Model Zoo> An interactive API for model lookup within HuggingFace and Flair > Note: For right now this is only available in the dev build of adaptnlp, which you can install with `pip install git+https://github.com/novetta/adaptnlp` ###Code #hide from nbdev.showdoc import show_doc from fastcore.test import * #export from fastcore.basics import Self, merge from fastcore.utils import dict2obj, obj2dict, mk_class from fastai_minima.utils import apply from huggingface_hub.hf_api import ModelInfo, HfApi from typing import List, Dict ###Output _____no_output_____ ###Markdown Tasks`HF_TASKS` and `FLAIR_TASKS` are namespace objects that can enable tab-completion when searching for specific tasks within the `HFModelHub` and `FlairModelHub` ###Code #exporti _hf_tasks = { 'FILL_MASK':'fill-mask', 'QUESTION_ANSWERING':'question-answering', 'SUMMARIZATION':'summarization', 'TABLE_QUESTION_ANSWERING':'table-question-answering', 'TEXT_CLASSIFICATION':'text-classification', 'TEXT_GENERATION':'text-generation', 'TEXT2TEXT_GENERATION':'text2text-generation', 'TOKEN_CLASSIFICATION':'token-classification', 'TRANSLATION':'translation', 'ZERO_SHOT_CLASSIFICATION':'zero-shot-classification', 'CONVERSATIONAL':'conversational', 'TEXT_TO_SPEECH':'text-to-speech', 'AUTOMATIC_SPEECH_RECOGNITION':'automatic-speech-recognition', 'AUDIO_SOURCE_SEPERATION':'audio-source-seperation', 'VOICE_ACTIVITY_DETECTION':'voice-activity-detection'} #export mk_class('HF_TASKS', **_hf_tasks, doc="A list of all HuggingFace tasks for valid API lookup as attribtues to get tab-completion and typo-proofing") #export _all_ = ['HF_TASKS'] show_doc(HF_TASKS, title_level=4) #hide_input print(f'Possible tasks:') for val in _hf_tasks.values(): print(f'* {val}') #exporti _flair_tasks = { 'NAMED_ENTITY_RECOGNITION':'ner', 'PHRASE_CHUNKING':'chunk', 'VERB_DISAMBIGUATION':'frame', 'PART_OF_SPEECH_TAGGING':'pos', 'UNIVERSAL_PART_OF_SPEECH_TAGGING':'upos', 'EMBEDDINGS':'embeddings', } #export mk_class('FLAIR_TASKS', **_flair_tasks, doc="A list of all Flair tasks for valid API lookup as attribtues to get tab-completion and typo-proofing") #export _all_ = ['FLAIR_TASKS'] show_doc(FLAIR_TASKS, title_level=4) #hide_input print(f'Possible tasks:') for val in _flair_tasks.values(): print(f'* {val}') #export class HFModelResult: """ A very basic class for storing a HuggingFace model returned through an API request They have 4 properties: - `name`: The `modelId` from the `modelInfo`. This also includes the model author's name, such as "IlyaGusev/mbart_ru_sum_gazeta" - `tags`: Any tags that were included in `HugginFace` in relation to the model. - `tasks`: These are the tasks dictated for the model. """ def __init__(self, model_info: ModelInfo): self.info = model_info def __repr__(self): return f'Model Name: {self.name}, Tasks: [' + ', '.join(self.tasks) + ']' @property def name(self): return self.info.modelId @property def tags(self): return self.info.tags @property def tasks(self): if self.info.pipeline_tag: all_tasks = [self.info.pipeline_tag] for tag in self.tags: if (tag in _hf_tasks.values()) and (tag not in all_tasks): all_tasks += [tag] else: all_tasks = [] all_tasks.sort() return all_tasks def to_dict(self): """ Returns `HFModelResult` as a dictionary with the keys: * `model_name` * `tags` * `tasks` * `model_info` """ return {'model_name':self.name, 'tags':self.tags, 'tasks':self.tasks, 'model_info':self.info} show_doc(HFModelResult) ###Output _____no_output_____ ###Markdown We look inside of `modelInfo.pipeline_tag` as well as the `tags` for if there is any overlap ###Code show_doc(HFModelResult.to_dict) #export class HFModelHub: """ A class for interacting with the HF model hub API, and searching for models by name or task Can optionally include your HuggingFace login for authorized access (but is not required) """ def __init__(self, username=None, password=None): self.api = HfApi() if username and password: self.token = self.api.login(username, password) elif username or password: print('Only a username or password was entered. You should include both to get authorized access') def _format_results(self, results:list, as_dict=False, user_uploaded=False) -> (List[HFModelResult], Dict[str, HFModelResult]): """ Takes raw HuggingFace API results and makes them easier to read and work with """ results = apply(HFModelResult, results) if not user_uploaded: results = [r for r in results if '/' not in r.name] if as_dict: dicts = apply(Self.to_dict(), results) results = {m['model_name'] : m for m in dicts} return results def search_model_by_task(self, task:str, as_dict=False, user_uploaded=False) -> (List[HFModelResult], Dict[str, HFModelResult]): """ Searches HuggingFace Model API for all pretrained models relating to `task` and returns a list of HFModelResults Optionally can return all models as a `dict` rather than a list If `user_uploaded` is False, will only return models originating in HuggingFace (such as distilgpt2) """ if task not in _hf_tasks.values(): raise ValueError(f'''`{task}` is not a valid task. Please choose a valid one available from HuggingFace: (https://huggingface.co/transformers/task_summary.html) Or with the `HF_TASKS` object''') models = self.api.list_models(task) return self._format_results(models, as_dict, user_uploaded) def search_model_by_name(self, name:str, as_dict=False, user_uploaded=False) -> (List[HFModelResult], Dict[str, HFModelResult]): """ Searches HuggingFace Model API for all pretrained models containing `name` and returns a list of HFModelResults Optionally can return all models as `dict` rather than a list If `user_uploaded` is False, will only return models originating from HuggingFace (such as distilgpt2) """ if user_uploaded: models = self.api.list_models() models = self._format_results(models, as_dict, user_uploaded) models = [m for m in models if name in m.name] else: models = self.api.list_models(name) models = self._format_results(models, as_dict, user_uploaded) return models ###Output _____no_output_____ ###Markdown The model search hub creates a friendly end-user API when searching through HuggingFace (and Flair, as we will see later). Usage is extremely simple as well. ###Code show_doc(HFModelHub.search_model_by_task) ###Output _____no_output_____ ###Markdown More in depth examples are below: ###Code #hide hub = HFModelHub() models = hub.search_model_by_task('summarization', user_uploaded=False, as_dict=False) models #hide test_eq(models[0].name, 't5-11b') test_eq(models[0].tasks, ['summarization', 'text2text-generation', 'translation']) #hide models = hub.search_model_by_task('summarization', user_uploaded=True) models[:10] #hide model = models[5] author, name = model.name.split('/') test_eq(author, 'Callidior') test_eq(name, 'bert2bert-base-arxiv-titlegen') test_eq(model.tasks, ['summarization', 'text2text-generation']) ###Output _____no_output_____ ###Markdown There are also cases where a `dict` may be easier to work with (perhaps utilizing a network API, or ease of use for some). We can instead return a dictionary of `HFModelResult` objects too by passing `as_dict=True` to any search call: ###Code models = hub.search_model_by_task('summarization', as_dict=True); models['t5-11b'] #hide test_eq(models.keys(), ['t5-11b', 't5-3b', 't5-base', 't5-large', 't5-small']) test_eq(models['t5-11b'].keys(), ['model_name', 'tags', 'tasks', 'model_info']) ###Output _____no_output_____ ###Markdown This will return a dictionary of the name, the HuggingFace tags affiliated with the model, the dictated tasks, and an instance of `huggingface_hub`'s `ModelInfo`. ###Code show_doc(HFModelHub.search_model_by_name) ###Output _____no_output_____ ###Markdown With `search_model_by_name` you're allowed a bit more freedom in what you wish to search for. `search_model_by_name` downloads the entire list of models from `HuggingFace` then performs partial string matching. As a result you can search for all models by a particular user by doing: ###Code hub.search_model_by_name('Callidior', user_uploaded=True) ###Output _____no_output_____ ###Markdown Or (as implied by the function name) any model type itself: ###Code hub.search_model_by_name('gpt2', user_uploaded=True)[5:10] #exporti # Flair models originating from: # https://github.com/flairNLP/flair/blob/master/flair/models/text_classification_model.py#L483 # and: https://github.com/flairNLP/flair/blob/master/flair/models/sequence_tagger_model.py#L1053 # and: https://github.com/flairNLP/flair/blob/master/flair/embeddings/token.py#L406 _flair_models = { 'de-offensive-language' : ['text-classification'], 'sentiment' : ['text-classification'], 'en-sentiment' : ['text-classification'], 'sentiment-fast' : ['text-classification'], 'communicative-functions' : ['text-classification'], 'tars-base' : ['text-classification'], # English Named Entity Recognition Models (NER) 'ner' : ['token-classification'], 'ner-pooled' : ['token-classification'], 'ner-fast' : ['token-classification'], 'ner-ontonotes' : ['token-classification'], 'ner-ontonotes-fast' : ['token-classification'], # Multilingual NER models 'ner-multi' : ['token-classification'], 'multi-ner' : ['token-classification'], 'ner-multi-fast' : ['token-classification'], # English POS models 'upos' : ['token-classification'], 'upos-fast' : ['token-classification'], 'pos' : ['token-classification'], 'pos-fast' : ['token-classification'], # Multilingual POS models 'pos-multi' : ['token-classification'], 'multi-pos' : ['token-classification'], 'pos-multi-fast' : ['token-classification'], 'multi-pos-fast' : ['token-classification'], # English SRL models 'frame' : ['token-classification'], 'frame-fast' : ['token-classification'], # English chunking models 'chunk' : ['token-classification'], 'chunk-fast' : ['token-classification'], # Danish models 'da-pos' : ['token-classification'], 'da-ner' : ['token-classification'], # German models 'de-pos' : ['token-classification'], 'de-pos-tweets' : ['token-classification'], 'de-ner' : ['token-classification'], 'de-ner-germeval' : ['token-classification'], 'de-ler' : ['token-classification'], 'de-ner-legal' : ['token-classification'], # French models 'fr-ner' : ['token-classification'], # Dutch models 'nl-ner' : ['token-classification'], 'nl-ner-rnn' : ['token-classification'], # Malayalam models 'ml-pos' : ['token-classification'], 'ml-upos' : ['token-classification'], # Portuguese models 'pt-pos-clinical' : ['token-classification'], # Keyphrase models 'keyphrase' : ['token-classification'], 'negation-speculation' : ['token-classification'], # Biomedical 'hunflair-paper-cellline' : ['token-classification'], 'hunflair-paper-chemical' : ['token-classification'], 'hunflair-paper-disease' : ['token-classification'], 'hunflair-paper-gene' : ['token-classification'], 'hunflair-paper-species' : ['token-classification'], 'hunflair-cellline' : ['token-classification'], 'hunflair-chemical' : ['token-classification'], 'hunflair-disease' : ['token-classification'], 'hunflair-gene' : ['token-classification'], 'hunflair-species' : ['token-classification'], # Embeddings # multilingual models "multi-forward":['embeddings'], "multi-backward":['embeddings'], "multi-v0-forward":['embeddings'], "multi-v0-backward":['embeddings'], "multi-forward-fast":['embeddings'], "multi-backward-fast":['embeddings'], # English models "en-forward":['embeddings'], "en-backward":['embeddings'], "en-forward-fast":['embeddings'], "en-backward-fast":['embeddings'], "news-forward":['embeddings'], "news-backward":['embeddings'], "news-forward-fast":['embeddings'], "news-backward-fast":['embeddings'], "mix-forward":['embeddings'], "mix-backward":['embeddings'], # Arabic "ar-forward":['embeddings'], "ar-backward":['embeddings'], # Bulgarian "bg-forward-fast":['embeddings'], "bg-backward-fast":['embeddings'], "bg-forward":['embeddings'], "bg-backward":['embeddings'], # Czech "cs-forward":['embeddings'], "cs-backward":['embeddings'], "cs-v0-forward":['embeddings'], "cs-v0-backward":['embeddings'], # Danish "da-forward":['embeddings'], "da-backward":['embeddings'], # German "de-forward":['embeddings'], "de-backward":['embeddings'], "de-historic-ha-forward":['embeddings'], "de-historic-ha-backward":['embeddings'], "de-historic-wz-forward":['embeddings'], "de-historic-wz-backward":['embeddings'], "de-historic-rw-forward":['embeddings'], "de-historic-rw-backward":['embeddings'], # Spanish "es-forward":['embeddings'], "es-backward":['embeddings'], "es-forward-fast":['embeddings'], "es-backward-fast":['embeddings'], # Basque "eu-forward":['embeddings'], "eu-backward":['embeddings'], "eu-v1-forward":['embeddings'], "eu-v1-backward":['embeddings'], "eu-v0-forward":['embeddings'], "eu-v0-backward":['embeddings'], # Persian "fa-forward":['embeddings'], "fa-backward":['embeddings'], # Finnish "fi-forward":['embeddings'], "fi-backward":['embeddings'], # French "fr-forward":['embeddings'], "fr-backward":['embeddings'], # Hebrew "he-forward":['embeddings'], "he-backward":['embeddings'], # Hindi "hi-forward":['embeddings'], "hi-backward":['embeddings'], # Croatian "hr-forward":['embeddings'], "hr-backward":['embeddings'], # Indonesian "id-forward":['embeddings'], "id-backward":['embeddings'], # Italian "it-forward":['embeddings'], "it-backward":['embeddings'], # Japanese "ja-forward":['embeddings'], "ja-backward":['embeddings'], # Malayalam "ml-forward":['embeddings'], "ml-backward":['embeddings'], # Dutch "nl-forward":['embeddings'], "nl-backward":['embeddings'], "nl-v0-forward":['embeddings'], "nl-v0-backward":['embeddings'], # Norwegian "no-forward":['embeddings'], "no-backward":['embeddings'], # Polish "pl-forward":['embeddings'], "pl-backward":['embeddings'], "pl-opus-forward":['embeddings'], "pl-opus-backward":['embeddings'], # Portuguese "pt-forward":['embeddings'], "pt-backward":['embeddings'], # Pubmed "pubmed-forward":['embeddings'], "pubmed-backward":['embeddings'], "pubmed-2015-forward":['embeddings'], "pubmed-2015-backward":['embeddings'], # Slovenian "sl-forward":['embeddings'], "sl-backward":['embeddings'], "sl-v0-forward":['embeddings'], "sl-v0-backward":['embeddings'], # Swedish "sv-forward":['embeddings'], "sv-backward":['embeddings'], "sv-v0-forward":['embeddings'], "sv-v0-backward":['embeddings'], # Tamil "ta-forward":['embeddings'], "ta-backward":['embeddings'], # CLEF HIPE Shared task "de-impresso-hipe-v1-forward":['embeddings'], "de-impresso-hipe-v1-backward":['embeddings'], "en-impresso-hipe-v1-forward":['embeddings'], "en-impresso-hipe-v1-backward":['embeddings'], "fr-impresso-hipe-v1-forward":['embeddings'], "fr-impresso-hipe-v1-backward":['embeddings'] } #export FLAIR_MODELS = [ModelInfo(f'flairNLP/{key}', pipeline_tag=val[0]) for key,val in _flair_models.items()] ###Output _____no_output_____ ###Markdown Flair has a series of extra models available for use that are not available through HuggingFace such as `sentiment`, `communicative-functions`, and more. `FLAIR_MODELS` is a convience holder for quick lookup of these models (as no such list is easily available currently). When shown as results on the API they will be given the same `flair` prefix for convience. ###Code #export class FlairModelResult(HFModelResult): """ A version of `HFModelResult` for Flair specifically. Includes which backend the model was found (such as on HuggingFace or Flair's private model list) """ def __init__(self, model_info: ModelInfo): if 'flairNLP' in model_info.modelId: self.from_hf = False else: self.from_hf = True super().__init__(model_info) def __repr__(self): return f'Model Name: {self.name.replace("flairNLP", "flair")}, Tasks: [' + ', '.join(self.tasks) + ']' + f', Source: {self.source}' @property def source(self): if self.from_hf: return "HuggingFace Model Hub" else: return "Flair's Private Model Hub" #export class FlairModelHub: """ A class for interacting with the HF model hub API, and searching for Flair models by name or task Can optionally include your HuggingFace login for authorized access (but is not required) """ def __init__(self, username=None, password=None): self.api = HfApi() if username and password: self.token = self.api.login(username, password) elif username or password: print('Only a username or password was entered. You should include both to get authorized access') self.models = self.api.list_models('flair') + FLAIR_MODELS def _format_results(self, results:list, as_dict=False, user_uploaded=False) -> (List[HFModelResult], Dict[str, HFModelResult]): """ Takes raw HuggingFace API results and makes them easier to read and work with """ results = apply(FlairModelResult, results) if not user_uploaded: results = [r for r in results if 'flair/' in r.name or 'flairNLP/' in r.name] if as_dict: dicts = apply(Self.to_dict(), results) results = {m['model_name'] : m for m in dicts} return results def search_model_by_name(self, name:str, as_dict=False, user_uploaded=False) -> (List[HFModelResult], Dict[str, HFModelResult]): """ Searches HuggingFace Model API for all flair models containing `name` and returns a list of `HFModelResults` Optionally can return all models as `dict` rather than a list If `user_uploaded` is False, will only return models originating from Flair (such as flair/chunk-english-fast) Usage: ```python hub = FlairModelHubSearch() hub.search_model_by_name('flair/chunk-english-fast') ``` """ models = [m for m in self.models if name in m.modelId] return self._format_results(models, as_dict, user_uploaded) def search_model_by_task(self, task:str, as_dict=False, user_uploaded=False) -> (List[HFModelResult], Dict[str, HFModelResult]): """ Searches HuggingFace Model API for all flair models for `task` and returns a list of `HFModelResults` Optionally can return all models as `dict` rather than a list If `user_uploaded` is False, will only return models originating from Flair (such as flair/chunk-english-fast) """ if (task not in _flair_tasks.values()) and (task != ''): raise ValueError(f'''`{task}` is not a valid task. Please choose a valid one available from Flair: (https://huggingface.co/flair) Or with the `FLAIR_TASKS` object''') models = [m for m in self.models if task in m.modelId or task == m.pipeline_tag] return self._format_results(models, as_dict, user_uploaded) show_doc(FlairModelHub) ###Output _____no_output_____ ###Markdown `FlairModelHub` is extremely similar to `HFModelHub`, with the two differences being that it will **only** return `Flair` models, and it has access to the *other* Flair models available that can't be accessed through the HuggingFace model hub ###Code hub = FlairModelHub() show_doc(FlairModelHub.search_model_by_name) ###Output _____no_output_____ ###Markdown `seach_model_by_name` will also let you search for models without needing the `flair` prefix, such as: ###Code hub.search_model_by_name('sentiment') #hide test_eq(0, len(hub.search_model_by_name('gpt'))) #hide hub = FlairModelHub() #hide models = hub.search_model_by_task('ner') models = [m for m in models if m.source == "Flair's Private Model Hub"] test_eq(len(models), 15) show_doc(FlairModelHub.search_model_by_task) ###Output _____no_output_____ ###Markdown Interacting with HuggingFace and Flair, Model Zoo> An interactive API for model lookup within HuggingFace and Flair ###Code #hide from nbverbose.showdoc import * from fastcore.test import * #export from fastcore.basics import Self, merge from fastcore.utils import dict2obj, obj2dict, mk_class from fastai.torch_core import apply from huggingface_hub.hf_api import ModelInfo, HfApi from typing import List, Dict ###Output _____no_output_____ ###Markdown Tasks`HF_TASKS` and `FLAIR_TASKS` are namespace objects that can enable tab-completion when searching for specific tasks within the `HFModelHub` and `FlairModelHub` ###Code #exporti _hf_tasks = { 'FILL_MASK':'fill-mask', 'QUESTION_ANSWERING':'question-answering', 'SUMMARIZATION':'summarization', 'TABLE_QUESTION_ANSWERING':'table-question-answering', 'TEXT_CLASSIFICATION':'text-classification', 'TEXT_GENERATION':'text-generation', 'TEXT2TEXT_GENERATION':'text2text-generation', 'TOKEN_CLASSIFICATION':'token-classification', 'TRANSLATION':'translation', 'ZERO_SHOT_CLASSIFICATION':'zero-shot-classification', 'CONVERSATIONAL':'conversational', 'TEXT_TO_SPEECH':'text-to-speech', 'AUTOMATIC_SPEECH_RECOGNITION':'automatic-speech-recognition', 'AUDIO_SOURCE_SEPERATION':'audio-source-seperation', 'VOICE_ACTIVITY_DETECTION':'voice-activity-detection'} #export mk_class('HF_TASKS', **_hf_tasks, doc="A list of all HuggingFace tasks for valid API lookup as attribtues to get tab-completion and typo-proofing") #export _all_ = ['HF_TASKS'] show_doc(HF_TASKS, title_level=4) #hide_input print(f'Possible tasks:') for val in _hf_tasks.values(): print(f'* {val}') #exporti _flair_tasks = { 'NAMED_ENTITY_RECOGNITION':'ner', 'PHRASE_CHUNKING':'chunk', 'VERB_DISAMBIGUATION':'frame', 'PART_OF_SPEECH_TAGGING':'pos', 'UNIVERSAL_PART_OF_SPEECH_TAGGING':'upos', 'EMBEDDINGS':'embeddings', } #export mk_class('FLAIR_TASKS', **_flair_tasks, doc="A list of all Flair tasks for valid API lookup as attributes to get tab-completion and typo-proofing") #export _all_ = ['FLAIR_TASKS'] show_doc(FLAIR_TASKS, title_level=4) #hide_input print(f'Possible tasks:') for val in _flair_tasks.values(): print(f'* {val}') #export class HFModelResult: """ A very basic class for storing a HuggingFace model returned through an API request They have 4 properties: - `name`: The `modelId` from the `modelInfo`. This also includes the model author's name, such as "IlyaGusev/mbart_ru_sum_gazeta" - `tags`: Any tags that were included in `HuggingFace` in relation to the model. - `tasks`: These are the tasks dictated for the model. """ def __init__( self, model_info: ModelInfo # `ModelInfo` object from HuggingFace model hub ): self.info = model_info def __repr__(self): return f'Model Name: {self.name}, Tasks: [' + ', '.join(self.tasks) + ']' @property def name(self): return self.info.modelId @property def tags(self): return self.info.tags @property def tasks(self): if self.info.pipeline_tag: all_tasks = [self.info.pipeline_tag] for tag in self.tags: if (tag in _hf_tasks.values()) and (tag not in all_tasks): all_tasks += [tag] else: all_tasks = [] all_tasks.sort() return all_tasks def to_dict( self ) -> dict: # Dictionary with keys `model_name`, `tags`, `tasks`, `model_info` "Returns `HFModelResult` as a dictionary" return {'model_name':self.name, 'tags':self.tags, 'tasks':self.tasks, 'model_info':self.info} show_doc(HFModelResult) ###Output _____no_output_____ ###Markdown We look inside of `modelInfo.pipeline_tag` as well as the `tags` for if there is any overlap ###Code show_doc(HFModelResult.to_dict) #export class HFModelHub: "A class for interacting with the HF model hub API, and searching for models by name or task" def __init__( self, username:str=None, # Your HuggingFace username password:str=None # Your HuggingFace password ): self.api = HfApi() if username and password: self.token = self.api.login(username, password) elif username or password: print('Only a username or password was entered. You should include both to get authorized access') def _format_results( self, results:list, # A list of HuggingFace API results as_dict:bool=False, # Whether to return as a dictionary or list user_uploaded:bool=False # Whether to filter out user-uploaded results ) -> (List[HFModelResult], Dict[str, HFModelResult]): # A list of `HFModelResult`s "Takes raw HuggingFace API results and makes them easier to read and work with" results = apply(HFModelResult, results) if not user_uploaded: results = [r for r in results if '/' not in r.name] if as_dict: dicts = apply(Self.to_dict(), results) results = {m['model_name'] : m for m in dicts} return results def search_model_by_task( self, task:str, # A valid task to search in the HuggingFace hub for as_dict:bool=False, # Whether to return as a dictionary or list user_uploaded:bool=False # Whether to filter out user-uploaded results ) -> (List[HFModelResult], Dict[str, HFModelResult]): # A list of `HFModelResult`s "Searches HuggingFace Model API for all pretrained models relating to `task`" if task not in _hf_tasks.values(): raise ValueError(f'''`{task}` is not a valid task. Please choose a valid one available from HuggingFace: (https://huggingface.co/transformers/task_summary.html) Or with the `HF_TASKS` object''') models = self.api.list_models(task) return self._format_results(models, as_dict, user_uploaded) def search_model_by_name( self, name:str, # A valid model name as_dict:bool=False, # Whether to return as a dictionary or list user_uploaded:bool=False # Whether to filter out user-uploaded results ) -> (List[HFModelResult], Dict[str, HFModelResult]): # A list of `HFModelResult`s "Searches HuggingFace Model API for all pretrained models containing `name`" if user_uploaded: models = self.api.list_models() models = self._format_results(models, as_dict, user_uploaded) models = [m for m in models if name in m.name] else: models = self.api.list_models(name) models = self._format_results(models, as_dict, user_uploaded) return models ###Output _____no_output_____ ###Markdown The model search hub creates a friendly end-user API when searching through HuggingFace (and Flair, as we will see later). Usage is extremely simple as well. ###Code show_doc(HFModelHub.search_model_by_task) ###Output _____no_output_____ ###Markdown This will return a list of models available for a particular class. A few usage examples are below: ###Code hub = HFModelHub() models = hub.search_model_by_task('summarization', user_uploaded=False, as_dict=False) models #hide test_eq(models[0].name, 't5-11b') test_eq(models[0].tasks, ['summarization', 'text2text-generation', 'translation']) ###Output _____no_output_____ ###Markdown We can also search for any user-uploaded models from the community too: ###Code models = hub.search_model_by_task('summarization', user_uploaded=True) models[:10] ###Output _____no_output_____ ###Markdown There are also cases where a `dict` may be easier to work with (perhaps utilizing a network API, or ease of use for some). We can instead return a dictionary of `HFModelResult` objects too by passing `as_dict=True` to any search call: ###Code models = hub.search_model_by_task('summarization', as_dict=True); models['t5-11b'] #hide test_eq(models.keys(), ['t5-11b', 't5-3b', 't5-base', 't5-large', 't5-small']) test_eq(models['t5-11b'].keys(), ['model_name', 'tags', 'tasks', 'model_info']) ###Output _____no_output_____ ###Markdown This will return a dictionary of the name, the HuggingFace tags affiliated with the model, the dictated tasks, and an instance of `huggingface_hub`'s `ModelInfo`. ###Code show_doc(HFModelHub.search_model_by_name) ###Output _____no_output_____ ###Markdown With `search_model_by_name` you're allowed a bit more freedom in what you wish to search for. `search_model_by_name` downloads the entire list of models from `HuggingFace` then performs partial string matching. As a result you can search for all models by a particular user by doing: ###Code hub.search_model_by_name('Callidior', user_uploaded=True) ###Output _____no_output_____ ###Markdown Or (as implied by the function name) any model type itself: ###Code hub.search_model_by_name('gpt2', user_uploaded=True)[5:10] #exporti # Flair models originating from: # https://github.com/flairNLP/flair/blob/master/flair/models/text_classification_model.py#L483 # and: https://github.com/flairNLP/flair/blob/master/flair/models/sequence_tagger_model.py#L1053 # and: https://github.com/flairNLP/flair/blob/master/flair/embeddings/token.py#L406 _flair_models = { 'de-offensive-language' : ['text-classification'], 'sentiment' : ['text-classification'], 'en-sentiment' : ['text-classification'], 'sentiment-fast' : ['text-classification'], 'communicative-functions' : ['text-classification'], 'tars-base' : ['text-classification'], # English Named Entity Recognition Models (NER) 'ner' : ['token-classification'], 'ner-pooled' : ['token-classification'], 'ner-fast' : ['token-classification'], 'ner-ontonotes' : ['token-classification'], 'ner-ontonotes-fast' : ['token-classification'], # Multilingual NER models 'ner-multi' : ['token-classification'], 'multi-ner' : ['token-classification'], 'ner-multi-fast' : ['token-classification'], # English POS models 'upos' : ['token-classification'], 'upos-fast' : ['token-classification'], 'pos' : ['token-classification'], 'pos-fast' : ['token-classification'], # Multilingual POS models 'pos-multi' : ['token-classification'], 'multi-pos' : ['token-classification'], 'pos-multi-fast' : ['token-classification'], 'multi-pos-fast' : ['token-classification'], # English SRL models 'frame' : ['token-classification'], 'frame-fast' : ['token-classification'], # English chunking models 'chunk' : ['token-classification'], 'chunk-fast' : ['token-classification'], # Danish models 'da-pos' : ['token-classification'], 'da-ner' : ['token-classification'], # German models 'de-pos' : ['token-classification'], 'de-pos-tweets' : ['token-classification'], 'de-ner' : ['token-classification'], 'de-ner-germeval' : ['token-classification'], 'de-ler' : ['token-classification'], 'de-ner-legal' : ['token-classification'], # French models 'fr-ner' : ['token-classification'], # Dutch models 'nl-ner' : ['token-classification'], 'nl-ner-rnn' : ['token-classification'], # Malayalam models 'ml-pos' : ['token-classification'], 'ml-upos' : ['token-classification'], # Portuguese models 'pt-pos-clinical' : ['token-classification'], # Keyphrase models 'keyphrase' : ['token-classification'], 'negation-speculation' : ['token-classification'], # Biomedical 'hunflair-paper-cellline' : ['token-classification'], 'hunflair-paper-chemical' : ['token-classification'], 'hunflair-paper-disease' : ['token-classification'], 'hunflair-paper-gene' : ['token-classification'], 'hunflair-paper-species' : ['token-classification'], 'hunflair-cellline' : ['token-classification'], 'hunflair-chemical' : ['token-classification'], 'hunflair-disease' : ['token-classification'], 'hunflair-gene' : ['token-classification'], 'hunflair-species' : ['token-classification'], # Embeddings # multilingual models "multi-forward":['embeddings'], "multi-backward":['embeddings'], "multi-v0-forward":['embeddings'], "multi-v0-backward":['embeddings'], "multi-forward-fast":['embeddings'], "multi-backward-fast":['embeddings'], # English models "en-forward":['embeddings'], "en-backward":['embeddings'], "en-forward-fast":['embeddings'], "en-backward-fast":['embeddings'], "news-forward":['embeddings'], "news-backward":['embeddings'], "news-forward-fast":['embeddings'], "news-backward-fast":['embeddings'], "mix-forward":['embeddings'], "mix-backward":['embeddings'], # Arabic "ar-forward":['embeddings'], "ar-backward":['embeddings'], # Bulgarian "bg-forward-fast":['embeddings'], "bg-backward-fast":['embeddings'], "bg-forward":['embeddings'], "bg-backward":['embeddings'], # Czech "cs-forward":['embeddings'], "cs-backward":['embeddings'], "cs-v0-forward":['embeddings'], "cs-v0-backward":['embeddings'], # Danish "da-forward":['embeddings'], "da-backward":['embeddings'], # German "de-forward":['embeddings'], "de-backward":['embeddings'], "de-historic-ha-forward":['embeddings'], "de-historic-ha-backward":['embeddings'], "de-historic-wz-forward":['embeddings'], "de-historic-wz-backward":['embeddings'], "de-historic-rw-forward":['embeddings'], "de-historic-rw-backward":['embeddings'], # Spanish "es-forward":['embeddings'], "es-backward":['embeddings'], "es-forward-fast":['embeddings'], "es-backward-fast":['embeddings'], # Basque "eu-forward":['embeddings'], "eu-backward":['embeddings'], "eu-v1-forward":['embeddings'], "eu-v1-backward":['embeddings'], "eu-v0-forward":['embeddings'], "eu-v0-backward":['embeddings'], # Persian "fa-forward":['embeddings'], "fa-backward":['embeddings'], # Finnish "fi-forward":['embeddings'], "fi-backward":['embeddings'], # French "fr-forward":['embeddings'], "fr-backward":['embeddings'], # Hebrew "he-forward":['embeddings'], "he-backward":['embeddings'], # Hindi "hi-forward":['embeddings'], "hi-backward":['embeddings'], # Croatian "hr-forward":['embeddings'], "hr-backward":['embeddings'], # Indonesian "id-forward":['embeddings'], "id-backward":['embeddings'], # Italian "it-forward":['embeddings'], "it-backward":['embeddings'], # Japanese "ja-forward":['embeddings'], "ja-backward":['embeddings'], # Malayalam "ml-forward":['embeddings'], "ml-backward":['embeddings'], # Dutch "nl-forward":['embeddings'], "nl-backward":['embeddings'], "nl-v0-forward":['embeddings'], "nl-v0-backward":['embeddings'], # Norwegian "no-forward":['embeddings'], "no-backward":['embeddings'], # Polish "pl-forward":['embeddings'], "pl-backward":['embeddings'], "pl-opus-forward":['embeddings'], "pl-opus-backward":['embeddings'], # Portuguese "pt-forward":['embeddings'], "pt-backward":['embeddings'], # Pubmed "pubmed-forward":['embeddings'], "pubmed-backward":['embeddings'], "pubmed-2015-forward":['embeddings'], "pubmed-2015-backward":['embeddings'], # Slovenian "sl-forward":['embeddings'], "sl-backward":['embeddings'], "sl-v0-forward":['embeddings'], "sl-v0-backward":['embeddings'], # Swedish "sv-forward":['embeddings'], "sv-backward":['embeddings'], "sv-v0-forward":['embeddings'], "sv-v0-backward":['embeddings'], # Tamil "ta-forward":['embeddings'], "ta-backward":['embeddings'], # CLEF HIPE Shared task "de-impresso-hipe-v1-forward":['embeddings'], "de-impresso-hipe-v1-backward":['embeddings'], "en-impresso-hipe-v1-forward":['embeddings'], "en-impresso-hipe-v1-backward":['embeddings'], "fr-impresso-hipe-v1-forward":['embeddings'], "fr-impresso-hipe-v1-backward":['embeddings'] } #export FLAIR_MODELS = [ModelInfo(f'flairNLP/{key}', pipeline_tag=val[0]) for key,val in _flair_models.items()] ###Output _____no_output_____ ###Markdown Flair has a series of extra models available for use that are not available through HuggingFace such as `sentiment`, `communicative-functions`, and more. `FLAIR_MODELS` is a convience holder for quick lookup of these models (as no such list is easily available currently). When shown as results on the API they will be given the same `flair` prefix for convience. ###Code #export class FlairModelResult(HFModelResult): """ A version of `HFModelResult` for Flair specifically. Includes which backend the model was found (such as on HuggingFace or Flair's private model list) """ def __init__( self, model_info: ModelInfo # ModelInfo object from HuggingFace model hub ): if 'flairNLP' in model_info.modelId: self.from_hf = False else: self.from_hf = True super().__init__(model_info) def __repr__(self): return f'Model Name: {self.name.replace("flairNLP", "flair")}, Tasks: [' + ', '.join(self.tasks) + ']' + f', Source: {self.source}' @property def source(self): if self.from_hf: return "HuggingFace Model Hub" else: return "Flair's Private Model Hub" #export class FlairModelHub: "A class for interacting with the HF model hub API, and searching for Flair models by name or task" def __init__( self, username:str=None, # HuggingFace username password:str=None # HuggingFace password ): self.api = HfApi() if username and password: self.token = self.api.login(username, password) elif username or password: print('Only a username or password was entered. You should include both to get authorized access') self.models = self.api.list_models('flair') + FLAIR_MODELS def _format_results( self, results:list, # A list of HuggingFace API results as_dict:bool=False, # Whether to return as a dictionary or list user_uploaded:bool=False # Whether to filter out user-uploaded results ) -> (List[FlairModelResult], Dict[str, FlairModelResult]): # A list of `FlairModelResult`s "Takes raw HuggingFace API results and makes them easier to read and work with" results = apply(FlairModelResult, results) if not user_uploaded: results = [r for r in results if 'flair/' in r.name or 'flairNLP/' in r.name] if as_dict: dicts = apply(Self.to_dict(), results) results = {m['model_name'] : m for m in dicts} return results def search_model_by_name( self, name:str, # A valid model name as_dict:bool=False, # Whether to return as a dictionary or list user_uploaded:bool=False # Whether to filter out user-uploaded results ) -> (List[FlairModelResult], Dict[str, FlairModelResult]): # A list of `FlairModelResult`s "Searches HuggingFace Model API for all flair models containing `name`" models = [m for m in self.models if name in m.modelId] return self._format_results(models, as_dict, user_uploaded) def search_model_by_task( self, task:str, # A valid task to search the HuggingFace hub for as_dict=False, # Whether to return as a dictionary or list user_uploaded=False # Whether to filter out user-uploaded results ) -> (List[FlairModelResult], Dict[str, FlairModelResult]): # A list of `FlairModelResult`s "Searches HuggingFace Model API for all flair models for `task`" if (task not in _flair_tasks.values()) and (task != ''): raise ValueError(f'''`{task}` is not a valid task. Please choose a valid one available from Flair: (https://huggingface.co/flair) Or with the `FLAIR_TASKS` object''') models = [m for m in self.models if task in m.modelId or task == m.pipeline_tag] return self._format_results(models, as_dict, user_uploaded) show_doc(FlairModelHub) ###Output _____no_output_____ ###Markdown `FlairModelHub` is extremely similar to `HFModelHub`, with the two differences being that it will **only** return `Flair` models, and it has access to the *other* Flair models available that can't be accessed through the HuggingFace model hub ###Code hub = FlairModelHub() show_doc(FlairModelHub.search_model_by_name) ###Output _____no_output_____ ###Markdown `seach_model_by_name` will also let you search for models without needing the `flair` prefix, such as: ###Code hub.search_model_by_name('sentiment') #hide test_eq(0, len(hub.search_model_by_name('gpt'))) show_doc(FlairModelHub.search_model_by_task) ###Output _____no_output_____ ###Markdown Since we have a `FLAIR_TASKS` object declared earlier, we can utilize it when searching for models by a task. Similar to `search_model_by_name` you should not include `flair/` in your search results, and instead search through the task key such as `ner` or `FLAIR_TASKS.NAMED_ENTITY_RECOGNITION` ###Code #hide models = hub.search_model_by_task('ner') models = [m for m in models if m.source == "Flair's Private Model Hub"] test_eq(len(models), 15) ###Output _____no_output_____ ###Markdown Interacting with HuggingFace and Flair, Model Zoo> An interactive API for model lookup within HuggingFace and Flair ###Code #hide from nbverbose.showdoc import * from fastcore.test import * #export from fastcore.basics import Self, merge from fastcore.utils import dict2obj, obj2dict, mk_class from fastai.torch_core import apply from huggingface_hub.hf_api import ModelInfo, HfApi from typing import List, Dict ###Output _____no_output_____ ###Markdown Tasks`HF_TASKS` and `FLAIR_TASKS` are namespace objects that can enable tab-completion when searching for specific tasks within the `HFModelHub` and `FlairModelHub` ###Code #exporti _hf_tasks = { 'FILL_MASK':'fill-mask', 'QUESTION_ANSWERING':'question-answering', 'SUMMARIZATION':'summarization', 'TABLE_QUESTION_ANSWERING':'table-question-answering', 'TEXT_CLASSIFICATION':'text-classification', 'TEXT_GENERATION':'text-generation', 'TEXT2TEXT_GENERATION':'text2text-generation', 'TOKEN_CLASSIFICATION':'token-classification', 'TRANSLATION':'translation', 'ZERO_SHOT_CLASSIFICATION':'zero-shot-classification', 'CONVERSATIONAL':'conversational', 'TEXT_TO_SPEECH':'text-to-speech', 'AUTOMATIC_SPEECH_RECOGNITION':'automatic-speech-recognition', 'AUDIO_SOURCE_SEPERATION':'audio-source-seperation', 'VOICE_ACTIVITY_DETECTION':'voice-activity-detection'} #export mk_class('HF_TASKS', **_hf_tasks, doc="A list of all HuggingFace tasks for valid API lookup as attribtues to get tab-completion and typo-proofing") #export _all_ = ['HF_TASKS'] show_doc(HF_TASKS, title_level=4) #hide_input print(f'Possible tasks:') for val in _hf_tasks.values(): print(f'* {val}') #exporti _flair_tasks = { 'NAMED_ENTITY_RECOGNITION':'ner', 'PHRASE_CHUNKING':'chunk', 'VERB_DISAMBIGUATION':'frame', 'PART_OF_SPEECH_TAGGING':'pos', 'UNIVERSAL_PART_OF_SPEECH_TAGGING':'upos', 'EMBEDDINGS':'embeddings', } #export mk_class('FLAIR_TASKS', **_flair_tasks, doc="A list of all Flair tasks for valid API lookup as attributes to get tab-completion and typo-proofing") #export _all_ = ['FLAIR_TASKS'] show_doc(FLAIR_TASKS, title_level=4) #hide_input print(f'Possible tasks:') for val in _flair_tasks.values(): print(f'* {val}') #export class HFModelResult: """ A very basic class for storing a HuggingFace model returned through an API request They have 4 properties: - `name`: The `modelId` from the `modelInfo`. This also includes the model author's name, such as "IlyaGusev/mbart_ru_sum_gazeta" - `tags`: Any tags that were included in `HuggingFace` in relation to the model. - `tasks`: These are the tasks dictated for the model. """ def __init__( self, model_info: ModelInfo # `ModelInfo` object from HuggingFace model hub ): self.info = model_info def __repr__(self): return f'Model Name: {self.name}, Tasks: [' + ', '.join(self.tasks) + ']' @property def name(self): return self.info.modelId @property def tags(self): return self.info.tags @property def tasks(self): if self.info.pipeline_tag: all_tasks = [self.info.pipeline_tag] for tag in self.tags: if (tag in _hf_tasks.values()) and (tag not in all_tasks): all_tasks += [tag] else: all_tasks = [] all_tasks.sort() return all_tasks def to_dict( self ) -> dict: # Dictionary with keys `model_name`, `tags`, `tasks`, `model_info` "Returns `HFModelResult` as a dictionary" return {'model_name':self.name, 'tags':self.tags, 'tasks':self.tasks, 'model_info':self.info} show_doc(HFModelResult) ###Output _____no_output_____ ###Markdown We look inside of `modelInfo.pipeline_tag` as well as the `tags` for if there is any overlap ###Code show_doc(HFModelResult.to_dict) #export class HFModelHub: "A class for interacting with the HF model hub API, and searching for models by name or task" def __init__( self, username:str=None, # Your HuggingFace username password:str=None # Your HuggingFace password ): self.api = HfApi() if username and password: self.token = self.api.login(username, password) elif username or password: print('Only a username or password was entered. You should include both to get authorized access') def _format_results( self, results:list, # A list of HuggingFace API results as_dict:bool=False, # Whether to return as a dictionary or list user_uploaded:bool=False # Whether to filter out user-uploaded results ) -> (List[HFModelResult], Dict[str, HFModelResult]): # A list of `HFModelResult`s "Takes raw HuggingFace API results and makes them easier to read and work with" results = apply(HFModelResult, results) if not user_uploaded: results = [r for r in results if '/' not in r.name] if as_dict: dicts = apply(Self.to_dict(), results) results = {m['model_name'] : m for m in dicts} return results def search_model_by_task( self, task:str, # A valid task to search in the HuggingFace hub for as_dict:bool=False, # Whether to return as a dictionary or list user_uploaded:bool=False # Whether to filter out user-uploaded results ) -> (List[HFModelResult], Dict[str, HFModelResult]): # A list of `HFModelResult`s "Searches HuggingFace Model API for all pretrained models relating to `task`" if task not in _hf_tasks.values(): raise ValueError(f'''`{task}` is not a valid task. Please choose a valid one available from HuggingFace: (https://huggingface.co/transformers/task_summary.html) Or with the `HF_TASKS` object''') models = self.api.list_models(task) return self._format_results(models, as_dict, user_uploaded) def search_model_by_name( self, name:str, # A valid model name as_dict:bool=False, # Whether to return as a dictionary or list user_uploaded:bool=False # Whether to filter out user-uploaded results ) -> (List[HFModelResult], Dict[str, HFModelResult]): # A list of `HFModelResult`s "Searches HuggingFace Model API for all pretrained models containing `name`" if user_uploaded: models = self.api.list_models() models = self._format_results(models, as_dict, user_uploaded) models = [m for m in models if name in m.name] else: models = self.api.list_models(name) models = self._format_results(models, as_dict, user_uploaded) return models ###Output _____no_output_____ ###Markdown The model search hub creates a friendly end-user API when searching through HuggingFace (and Flair, as we will see later). Usage is extremely simple as well. ###Code show_doc(HFModelHub.search_model_by_task) ###Output _____no_output_____ ###Markdown This will return a list of models available for a particular class. A few usage examples are below: ###Code hub = HFModelHub() models = hub.search_model_by_task('summarization', user_uploaded=False, as_dict=False) models #hide test_eq(models[0].name, 't5-11b') test_eq(models[0].tasks, ['summarization', 'text2text-generation', 'translation']) ###Output _____no_output_____ ###Markdown We can also search for any user-uploaded models from the community too: ###Code models = hub.search_model_by_task('summarization', user_uploaded=True) models[:10] #hide model = models[5] author, name = model.name.split('/') test_eq(author, 'Callidior') test_eq(name, 'bert2bert-base-arxiv-titlegen') test_eq(model.tasks, ['summarization', 'text2text-generation']) ###Output _____no_output_____ ###Markdown There are also cases where a `dict` may be easier to work with (perhaps utilizing a network API, or ease of use for some). We can instead return a dictionary of `HFModelResult` objects too by passing `as_dict=True` to any search call: ###Code models = hub.search_model_by_task('summarization', as_dict=True); models['t5-11b'] #hide test_eq(models.keys(), ['t5-11b', 't5-3b', 't5-base', 't5-large', 't5-small']) test_eq(models['t5-11b'].keys(), ['model_name', 'tags', 'tasks', 'model_info']) ###Output _____no_output_____ ###Markdown This will return a dictionary of the name, the HuggingFace tags affiliated with the model, the dictated tasks, and an instance of `huggingface_hub`'s `ModelInfo`. ###Code show_doc(HFModelHub.search_model_by_name) ###Output _____no_output_____ ###Markdown With `search_model_by_name` you're allowed a bit more freedom in what you wish to search for. `search_model_by_name` downloads the entire list of models from `HuggingFace` then performs partial string matching. As a result you can search for all models by a particular user by doing: ###Code hub.search_model_by_name('Callidior', user_uploaded=True) ###Output _____no_output_____ ###Markdown Or (as implied by the function name) any model type itself: ###Code hub.search_model_by_name('gpt2', user_uploaded=True)[5:10] #exporti # Flair models originating from: # https://github.com/flairNLP/flair/blob/master/flair/models/text_classification_model.py#L483 # and: https://github.com/flairNLP/flair/blob/master/flair/models/sequence_tagger_model.py#L1053 # and: https://github.com/flairNLP/flair/blob/master/flair/embeddings/token.py#L406 _flair_models = { 'de-offensive-language' : ['text-classification'], 'sentiment' : ['text-classification'], 'en-sentiment' : ['text-classification'], 'sentiment-fast' : ['text-classification'], 'communicative-functions' : ['text-classification'], 'tars-base' : ['text-classification'], # English Named Entity Recognition Models (NER) 'ner' : ['token-classification'], 'ner-pooled' : ['token-classification'], 'ner-fast' : ['token-classification'], 'ner-ontonotes' : ['token-classification'], 'ner-ontonotes-fast' : ['token-classification'], # Multilingual NER models 'ner-multi' : ['token-classification'], 'multi-ner' : ['token-classification'], 'ner-multi-fast' : ['token-classification'], # English POS models 'upos' : ['token-classification'], 'upos-fast' : ['token-classification'], 'pos' : ['token-classification'], 'pos-fast' : ['token-classification'], # Multilingual POS models 'pos-multi' : ['token-classification'], 'multi-pos' : ['token-classification'], 'pos-multi-fast' : ['token-classification'], 'multi-pos-fast' : ['token-classification'], # English SRL models 'frame' : ['token-classification'], 'frame-fast' : ['token-classification'], # English chunking models 'chunk' : ['token-classification'], 'chunk-fast' : ['token-classification'], # Danish models 'da-pos' : ['token-classification'], 'da-ner' : ['token-classification'], # German models 'de-pos' : ['token-classification'], 'de-pos-tweets' : ['token-classification'], 'de-ner' : ['token-classification'], 'de-ner-germeval' : ['token-classification'], 'de-ler' : ['token-classification'], 'de-ner-legal' : ['token-classification'], # French models 'fr-ner' : ['token-classification'], # Dutch models 'nl-ner' : ['token-classification'], 'nl-ner-rnn' : ['token-classification'], # Malayalam models 'ml-pos' : ['token-classification'], 'ml-upos' : ['token-classification'], # Portuguese models 'pt-pos-clinical' : ['token-classification'], # Keyphrase models 'keyphrase' : ['token-classification'], 'negation-speculation' : ['token-classification'], # Biomedical 'hunflair-paper-cellline' : ['token-classification'], 'hunflair-paper-chemical' : ['token-classification'], 'hunflair-paper-disease' : ['token-classification'], 'hunflair-paper-gene' : ['token-classification'], 'hunflair-paper-species' : ['token-classification'], 'hunflair-cellline' : ['token-classification'], 'hunflair-chemical' : ['token-classification'], 'hunflair-disease' : ['token-classification'], 'hunflair-gene' : ['token-classification'], 'hunflair-species' : ['token-classification'], # Embeddings # multilingual models "multi-forward":['embeddings'], "multi-backward":['embeddings'], "multi-v0-forward":['embeddings'], "multi-v0-backward":['embeddings'], "multi-forward-fast":['embeddings'], "multi-backward-fast":['embeddings'], # English models "en-forward":['embeddings'], "en-backward":['embeddings'], "en-forward-fast":['embeddings'], "en-backward-fast":['embeddings'], "news-forward":['embeddings'], "news-backward":['embeddings'], "news-forward-fast":['embeddings'], "news-backward-fast":['embeddings'], "mix-forward":['embeddings'], "mix-backward":['embeddings'], # Arabic "ar-forward":['embeddings'], "ar-backward":['embeddings'], # Bulgarian "bg-forward-fast":['embeddings'], "bg-backward-fast":['embeddings'], "bg-forward":['embeddings'], "bg-backward":['embeddings'], # Czech "cs-forward":['embeddings'], "cs-backward":['embeddings'], "cs-v0-forward":['embeddings'], "cs-v0-backward":['embeddings'], # Danish "da-forward":['embeddings'], "da-backward":['embeddings'], # German "de-forward":['embeddings'], "de-backward":['embeddings'], "de-historic-ha-forward":['embeddings'], "de-historic-ha-backward":['embeddings'], "de-historic-wz-forward":['embeddings'], "de-historic-wz-backward":['embeddings'], "de-historic-rw-forward":['embeddings'], "de-historic-rw-backward":['embeddings'], # Spanish "es-forward":['embeddings'], "es-backward":['embeddings'], "es-forward-fast":['embeddings'], "es-backward-fast":['embeddings'], # Basque "eu-forward":['embeddings'], "eu-backward":['embeddings'], "eu-v1-forward":['embeddings'], "eu-v1-backward":['embeddings'], "eu-v0-forward":['embeddings'], "eu-v0-backward":['embeddings'], # Persian "fa-forward":['embeddings'], "fa-backward":['embeddings'], # Finnish "fi-forward":['embeddings'], "fi-backward":['embeddings'], # French "fr-forward":['embeddings'], "fr-backward":['embeddings'], # Hebrew "he-forward":['embeddings'], "he-backward":['embeddings'], # Hindi "hi-forward":['embeddings'], "hi-backward":['embeddings'], # Croatian "hr-forward":['embeddings'], "hr-backward":['embeddings'], # Indonesian "id-forward":['embeddings'], "id-backward":['embeddings'], # Italian "it-forward":['embeddings'], "it-backward":['embeddings'], # Japanese "ja-forward":['embeddings'], "ja-backward":['embeddings'], # Malayalam "ml-forward":['embeddings'], "ml-backward":['embeddings'], # Dutch "nl-forward":['embeddings'], "nl-backward":['embeddings'], "nl-v0-forward":['embeddings'], "nl-v0-backward":['embeddings'], # Norwegian "no-forward":['embeddings'], "no-backward":['embeddings'], # Polish "pl-forward":['embeddings'], "pl-backward":['embeddings'], "pl-opus-forward":['embeddings'], "pl-opus-backward":['embeddings'], # Portuguese "pt-forward":['embeddings'], "pt-backward":['embeddings'], # Pubmed "pubmed-forward":['embeddings'], "pubmed-backward":['embeddings'], "pubmed-2015-forward":['embeddings'], "pubmed-2015-backward":['embeddings'], # Slovenian "sl-forward":['embeddings'], "sl-backward":['embeddings'], "sl-v0-forward":['embeddings'], "sl-v0-backward":['embeddings'], # Swedish "sv-forward":['embeddings'], "sv-backward":['embeddings'], "sv-v0-forward":['embeddings'], "sv-v0-backward":['embeddings'], # Tamil "ta-forward":['embeddings'], "ta-backward":['embeddings'], # CLEF HIPE Shared task "de-impresso-hipe-v1-forward":['embeddings'], "de-impresso-hipe-v1-backward":['embeddings'], "en-impresso-hipe-v1-forward":['embeddings'], "en-impresso-hipe-v1-backward":['embeddings'], "fr-impresso-hipe-v1-forward":['embeddings'], "fr-impresso-hipe-v1-backward":['embeddings'] } #export FLAIR_MODELS = [ModelInfo(f'flairNLP/{key}', pipeline_tag=val[0]) for key,val in _flair_models.items()] ###Output _____no_output_____ ###Markdown Flair has a series of extra models available for use that are not available through HuggingFace such as `sentiment`, `communicative-functions`, and more. `FLAIR_MODELS` is a convience holder for quick lookup of these models (as no such list is easily available currently). When shown as results on the API they will be given the same `flair` prefix for convience. ###Code #export class FlairModelResult(HFModelResult): """ A version of `HFModelResult` for Flair specifically. Includes which backend the model was found (such as on HuggingFace or Flair's private model list) """ def __init__( self, model_info: ModelInfo # ModelInfo object from HuggingFace model hub ): if 'flairNLP' in model_info.modelId: self.from_hf = False else: self.from_hf = True super().__init__(model_info) def __repr__(self): return f'Model Name: {self.name.replace("flairNLP", "flair")}, Tasks: [' + ', '.join(self.tasks) + ']' + f', Source: {self.source}' @property def source(self): if self.from_hf: return "HuggingFace Model Hub" else: return "Flair's Private Model Hub" #export class FlairModelHub: "A class for interacting with the HF model hub API, and searching for Flair models by name or task" def __init__( self, username:str=None, # HuggingFace username password:str=None # HuggingFace password ): self.api = HfApi() if username and password: self.token = self.api.login(username, password) elif username or password: print('Only a username or password was entered. You should include both to get authorized access') self.models = self.api.list_models('flair') + FLAIR_MODELS def _format_results( self, results:list, # A list of HuggingFace API results as_dict:bool=False, # Whether to return as a dictionary or list user_uploaded:bool=False # Whether to filter out user-uploaded results ) -> (List[FlairModelResult], Dict[str, FlairModelResult]): # A list of `FlairModelResult`s "Takes raw HuggingFace API results and makes them easier to read and work with" results = apply(FlairModelResult, results) if not user_uploaded: results = [r for r in results if 'flair/' in r.name or 'flairNLP/' in r.name] if as_dict: dicts = apply(Self.to_dict(), results) results = {m['model_name'] : m for m in dicts} return results def search_model_by_name( self, name:str, # A valid model name as_dict:bool=False, # Whether to return as a dictionary or list user_uploaded:bool=False # Whether to filter out user-uploaded results ) -> (List[FlairModelResult], Dict[str, FlairModelResult]): # A list of `FlairModelResult`s "Searches HuggingFace Model API for all flair models containing `name`" models = [m for m in self.models if name in m.modelId] return self._format_results(models, as_dict, user_uploaded) def search_model_by_task( self, task:str, # A valid task to search the HuggingFace hub for as_dict=False, # Whether to return as a dictionary or list user_uploaded=False # Whether to filter out user-uploaded results ) -> (List[FlairModelResult], Dict[str, FlairModelResult]): # A list of `FlairModelResult`s "Searches HuggingFace Model API for all flair models for `task`" if (task not in _flair_tasks.values()) and (task != ''): raise ValueError(f'''`{task}` is not a valid task. Please choose a valid one available from Flair: (https://huggingface.co/flair) Or with the `FLAIR_TASKS` object''') models = [m for m in self.models if task in m.modelId or task == m.pipeline_tag] return self._format_results(models, as_dict, user_uploaded) show_doc(FlairModelHub) ###Output _____no_output_____ ###Markdown `FlairModelHub` is extremely similar to `HFModelHub`, with the two differences being that it will **only** return `Flair` models, and it has access to the *other* Flair models available that can't be accessed through the HuggingFace model hub ###Code hub = FlairModelHub() show_doc(FlairModelHub.search_model_by_name) ###Output _____no_output_____ ###Markdown `seach_model_by_name` will also let you search for models without needing the `flair` prefix, such as: ###Code hub.search_model_by_name('sentiment') #hide test_eq(0, len(hub.search_model_by_name('gpt'))) show_doc(FlairModelHub.search_model_by_task) ###Output _____no_output_____ ###Markdown Since we have a `FLAIR_TASKS` object declared earlier, we can utilize it when searching for models by a task. Similar to `search_model_by_name` you should not include `flair/` in your search results, and instead search through the task key such as `ner` or `FLAIR_TASKS.NAMED_ENTITY_RECOGNITION` ###Code #hide models = hub.search_model_by_task('ner') models = [m for m in models if m.source == "Flair's Private Model Hub"] test_eq(len(models), 15) ###Output _____no_output_____
src/plotting/rolling_window/tau_prep/TF_diversity_rw_plots_combined_tau.ipynb
###Markdown Now do constitutive and variable promoter from Czechowski et al 2005 ###Code #add gene type column to all_combined df all_combined_genetype = add_genetype(all_combined, Czechowski_gene_categories) #make sample sizes equal # select only unique promoters unique_proms = all_combined_genetype.drop_duplicates('AGI') # next count count number of unique promoters in each gene_type unique_proms #plot median rolling_rootshootintersect = windows_coords('Czechowski_genetypenocontrol',all_combined_genetype, 'Shannon_diversity_TF','TF Shannon diversity', 'Shannon_diversity_TF_family','TF family Shannon diversity', promoter_bed,promoter_no_5UTR, 50,EPD_TSS_bed,includeEPDTSS=False,estimator='median', genetype='constitutive', genetype2='variable',ci=95, n_boot=10000) ###Output sample size in each category = 100 ###Markdown There aren't 100 constitutive and 100 variale genes because some were filtered out if they only contained 5'UTRs as the promoter was overlapping other genes ###Code #plot with control genetype too rolling_incl_control = windows_coords('Czechowski_genetype',all_combined_genetype,'Shannon_diversity_TF','TF Shannon diversity', 'Shannon_diversity_TF_family','TF family Shannon diversity', promoter_bed,promoter_no_5UTR, 50,EPD_TSS_bed,includeEPDTSS=False,estimator='median', genetype='constitutive', genetype2='variable', genetype3='control') ###Output sample size in each category = 100 ###Markdown plot lengths ###Code # # plot CRM (cis-regulatory module) length distribution # plot_length(all_proms,'allproms', genetype=False) # #add genetype column # all_combined_types_length = add_genetype(all_proms, Czechowski_gene_categories) # # plot CRM lenght distribution for each genetype excluding control genes # plot_length(all_combined_types_length,'Czechowski_genetypenocontrol', genetype='constitutive', genetype2='variable') # # plot CRM lenght distribution for each genetype including control genes # plot_length(all_combined_types_length,'Czechowski_genetype', genetype='constitutive', genetype2='variable', genetype3='control') ###Output _____no_output_____
code/data_preprocessing/reference_data.ipynb
###Markdown Reference data standardizationThis module provides reference data download, indexing and preprocessing (if necessary), in preparation for use throughout the pipeline.We have included the PDF document compiled by data standardization subgroup in the [minimal working example folder on Google Drive](https://drive.google.com/file/d/1R5sw5o8vqk_mbQQb4CGmtH3ldu1T3Vu0/view?usp=sharing). It contains the reference data to use for the project. OverviewThis module is based on the [TOPMed workflow from Broad](https://github.com/broadinstitute/gtex-pipeline/blob/master/TOPMed_RNAseq_pipeline.md).Workflows implemented include: Convert transcript feature file gff3 to gtf- Input: an uncompressed gff3 file.(i.e. can be view via cat)- Output: a gtf file. Collapse transcript features into genes- Input: a gtf file.- Output: a gtf file with collapesed gene model. Generate STAR index based on gtf and reference fasta- Input: a gtf file and an acompanying fasta file.- Output: A folder of STAR index. Generate RSEM index based on gtf and reference fasta- Input: a gtf file and an acompanying fasta file.- Output: A folder of RSEM index. Example commandsTo download reference data: ###Code sos run reference_data.ipynb download_hg_reference --cwd reference_data sos run reference_data.ipynb download_gene_annotation --cwd reference_data sos run reference_data.ipynb download_ercc_reference --cwd reference_data ###Output _____no_output_____ ###Markdown To format reference data: ###Code sos run reference_data.ipynb hg_reference \ --cwd reference_data \ --ercc-reference reference_data/ERCC92.fa \ --hg-reference reference_data/GRCh38_full_analysis_set_plus_decoy_hla.fa \ --container container/rna_quantification.sif sos run pipeline/reference_data.ipynb hg_gtf \ --cwd reference_data \ --hg-gtf /mnt/mfs/statgen/snuc_pseudo_bulk/data/reference_data/genes.gtf \ --hg-reference data/reference_data/GRCh38_full_analysis_set_plus_decoy_hla.noALT_noHLA_noDecoy_ERCC.fasta \ --containers containers/rna_quantification.sif -J 1 -q csg -c csg.yml & ###Output _____no_output_____ ###Markdown To format gene feature data: ###Code sos run reference_data.ipynb gene_annotation \ --cwd reference_data \ --ercc-gtf reference_data/ERCC92.gtf \ --hg-gtf reference_data/Homo_sapiens.GRCh38.103.chr.gtf \ --hg-reference reference_data/GRCh38_full_analysis_set_plus_decoy_hla.noALT_noHLA_noDecoy_ERCC.fasta \ --container container/rna_quantification.sif ###Output _____no_output_____ ###Markdown **Notice that for stranded RNA-seq protocol please add boolean switch `--stranded` to the command above. More details can be found later in the document.** To generate STAR index using the GTF annotation file before gene model collapse: ###Code sos run reference_data.ipynb STAR_index \ --cwd reference_data \ --hg-reference reference_data/GRCh38_full_analysis_set_plus_decoy_hla.noALT_noHLA_noDecoy_ERCC.fasta \ --hg-gtf reference_data/Homo_sapiens.GRCh38.103.chr.reformatted.ERCC.gtf \ --container container/rna_quantification.sif \ --mem 40G ###Output _____no_output_____ ###Markdown **Notice that command above requires at least 40G of memory, and takes quite a while to complete**. To generate RSEM index: ###Code sos run reference_data.ipynb RSEM_index \ --cwd reference_data \ --hg-reference reference_data/GRCh38_full_analysis_set_plus_decoy_hla.noALT_noHLA_noDecoy_ERCC.fasta \ --hg-gtf reference_data/Homo_sapiens.GRCh38.103.chr.reformatted.ERCC.gtf \ --container container/rna_quantification.sif \ --mem 40G ###Output _____no_output_____ ###Markdown Command interface ###Code sos run reference_data.ipynb -h [global] # The output directory for generated files. parameter: cwd = path # For cluster jobs, number commands to run per job parameter: job_size = 1 # Wall clock time expected parameter: walltime = "5h" # Memory expected parameter: mem = "16G" # Number of threads parameter: numThreads = 8 # Software container option parameter: container = "" cwd = path(f'{cwd:a}') from sos.utils import expand_size ###Output _____no_output_____ ###Markdown Data download ###Code [download_hg_reference] output: f"{cwd:a}/GRCh38_full_analysis_set_plus_decoy_hla.fa" download: dest_dir = cwd ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/technical/reference/GRCh38_reference_genome/GRCh38_full_analysis_set_plus_decoy_hla.fa [download_gene_annotation] output: f"{cwd:a}/Homo_sapiens.GRCh38.103.chr.gtf" download: dest_dir = cwd, decompress=True http://ftp.ensembl.org/pub/release-103/gtf/homo_sapiens/Homo_sapiens.GRCh38.103.chr.gtf.gz [download_ercc_reference] output: f"{cwd:a}/ERCC92.gtf", f"{cwd:a}/ERCC92.fa" download: dest_dir = cwd, decompress=True https://tools.thermofisher.com/content/sfs/manuals/ERCC92.zip ###Output _____no_output_____ ###Markdown GFF3 to GTF formatting ###Code [gff3_to_gtf] parameter: gff3_file = path input: gff3_file output: f'{cwd}/{_input:bn}.gtf' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' bash: container=container, expand= "${ }", stderr = f'{_output:n}.stderr', stdout = f'{_output:n}.stdout' gffread ${_input} -T -o ${_output} ###Output _____no_output_____ ###Markdown HG reference file preprocessing1. Remove the HLA/ALT/Decoy record from the fasta2. Adding in ERCC information to the fasta file3. Generating index for the fasta file ###Code [hg_reference_1 (HLA ALT Decoy removal)] # Path to HG reference file parameter: hg_reference = path input: hg_reference output: f'{cwd}/{_input:bn}.noALT_noHLA_noDecoy.fasta' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' python: expand = "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout', container = container with open('${_input}', 'r') as fasta: contigs = fasta.read() contigs = contigs.split('>') contig_ids = [i.split(' ', 1)[0] for i in contigs] # exclude ALT, HLA and decoy contigs filtered_fasta = '>'.join([c for i,c in zip(contig_ids, contigs) if not (i[-4:]=='_alt' or i[:3]=='HLA' or i[-6:]=='_decoy')]) with open('${_output}', 'w') as fasta: fasta.write(filtered_fasta) [hg_reference_2 (merge with ERCC reference)] parameter: ercc_reference = path output: f'{cwd}/{_input:bn}_ERCC.fasta' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' bash: expand = "${ }", stderr = f'{_output[0]}.stderr', stdout = f'{_output}.stdout', container = container sed 's/ERCC-/ERCC_/g' ${ercc_reference} > ${ercc_reference:n}.patched.fa cat ${_input} ${ercc_reference:n}.patched.fa > ${_output} [hg_reference_3 (index the fasta file)] output: f'{cwd}/{_input:bn}.dict' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' bash: expand = "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout', container = container samtools faidx ${_input} java -jar /opt/picard-tools/picard.jar \ CreateSequenceDictionary \ R=${_input} \ O=${_output} ###Output _____no_output_____ ###Markdown Transcript and gene model reference processing This step modify the `gtf` file for following reasons:1. RSEM require GTF input to have the same chromosome name format (with `chr` prefix) as the fasta file. **although for STAR, this problem can be solved by the now commented --sjdbGTFchrPrefix "chr" option, we have to add `chr` to it for use with RSEM**. 2. Gene model collapsing script `collapse_annotation.py` from GTEx require the gtf have `transcript_type` instead `transcript_biotype` in its annotation. We rename it here, although **this problem can also be solved by modifying the collapse_annotation.py while building the docker, since we are doing 1 above we think it is better to add in another customization here.**3. Adding in ERCC information to the `gtf` reference.We may reimplement 1 and 2 if the problem with RSEM is solved, or when RSEM is no longer needed. ###Code [hg_gtf_1 (add chr prefix to gtf file)] parameter: hg_reference = path parameter: hg_gtf = path input: hg_reference, hg_gtf output: f'{cwd}/{_input[1]:bn}.reformatted.gtf' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' R: expand = "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout', container = container library("readr") library("stringr") library("dplyr") options(scipen = 999) con <- file("${_input[0]}","r") fasta <- readLines(con,n=1) close(con) gtf = read_delim("${_input[1]}", "\t", col_names = F, comment = "#", col_types="ccccccccc") if(!str_detect(fasta,">chr")) { gtf_mod = gtf%>%mutate(X1 = str_remove_all(X1,"chr")) } else if (!any(str_detect(gtf$X1[1],"chr"))) { gtf_mod = gtf%>%mutate(X1 = paste0("chr",X1)) } else (gtf_mod = gtf) if(any(str_detect(gtf_mod$X9, "transcript_biotype"))) { gtf_mod = gtf_mod%>%mutate(X9 = str_replace_all(X9,"transcript_biotype","transcript_type")) } gtf_mod%>%write.table("${_output}",sep = "\t",quote = FALSE,col.names = F,row.names = F) ###Output _____no_output_____ ###Markdown **Text below is taken from https://github.com/broadinstitute/gtex-pipeline/tree/master/gene_model**Gene-level expression and eQTLs from the GTEx project are calculated based on a collapsed gene model (i.e., combining all isoforms of a gene into a single transcript), according to the following rules:1. Transcripts annotated as “retained_intron” or “read_through” are excluded. Additionally, transcripts that overlap with annotated read-through transcripts may be blacklisted (blacklists for GENCODE v19, 24 & 25 are provided in this repository; no transcripts were blacklisted for v26).2. The union of all exon intervals of each gene is calculated.3. Overlapping intervals between genes are excluded from all genes.The purpose of step 3 is primarily to exclude overlapping regions from genes annotated on both strands, which can't be unambiguously quantified from unstranded RNA-seq (GTEx samples were sequenced using an unstranded protocol). For stranded protocols, this step can be skipped by adding the `--collapse_only` flag.Further documentation is available on the [GTEx Portal](https://gtexportal.org/home/documentationPagestaticTextAnalysisMethods). ###Code [hg_gtf_2 (collapsed gene model)] parameter: stranded = bool output: f'{_input:n}{".collapse_only" if stranded else ""}.gene.gtf' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' bash: expand = "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout', container = container collapse_annotation.py ${"--collapse_only" if stranded else ""} ${_input} ${_output} [ercc_gtf (Preprocess ERCC gtf file)] parameter: ercc_gtf = path input: ercc_gtf output: f'{cwd}/{_input:bn}.genes.patched.gtf' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' python: expand = "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout', container = container with open('${_input}') as exon_gtf, open('${_output}', 'w') as gene_gtf: for line in exon_gtf: f = line.strip().split('\t') f[0] = f[0].replace('-','_') # required for RNA-SeQC/GATK (no '-' in contig name) attr = f[8] if attr[-1]==';': attr = attr[:-1] attr = dict([i.split(' ') for i in attr.replace('"','').split('; ')]) # add gene_name, gene_type attr['gene_name'] = attr['gene_id'] attr['gene_type'] = 'ercc_control' attr['gene_status'] = 'KNOWN' attr['level'] = 2 for k in ['id', 'type', 'name', 'status']: attr['transcript_'+k] = attr['gene_'+k] attr_str = [] for k in ['gene_id', 'transcript_id', 'gene_type', 'gene_status', 'gene_name', 'transcript_type', 'transcript_status', 'transcript_name']: attr_str.append('{0:s} "{1:s}";'.format(k, attr[k])) attr_str.append('{0:s} {1:d};'.format('level', attr['level'])) f[8] = ' '.join(attr_str) # write gene, transcript, exon gene_gtf.write('\t'.join(f[:2]+['gene']+f[3:])+'\n') gene_gtf.write('\t'.join(f[:2]+['transcript']+f[3:])+'\n') f[8] = ' '.join(attr_str[:2]) gene_gtf.write('\t'.join(f[:2]+['exon']+f[3:])+'\n') [gene_annotation] input: output_from("hg_gtf_1"), output_from("hg_gtf_2"), output_from("ercc_gtf") output: f'{cwd}/{_input[0]:bn}.ERCC.gtf', f'{cwd}/{_input[1]:bn}.ERCC.gtf' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' bash: expand = "${ }", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout', container = container cat ${_input[0]} ${_input[2]} > ${_output[0]} cat ${_input[1]} ${_input[2]} > ${_output[1]} ###Output _____no_output_____ ###Markdown Generating index file for `STAR` This step generate the index file for STAR alignment. This file just need to generate once and can be re-used. **At least 40GB of memory is needed**. Step Inputs* `gtf` and `fasta`: path to reference sequence. Both of them needs to be unzipped. `gtf` should be the one prior to collapse by gene.* `sjdbOverhang`: specifies the length of the genomic sequence around the annotated junction to be used in constructing the splice junctions database. Ideally, this length should be equal to the ReadLength-1, where ReadLength is the length of the reads. We use 100 here as recommended by the TOPMed pipeline. See here for [some additional discussions](https://groups.google.com/g/rna-star/c/h9oh10UlvhI/m/BfSPGivUHmsJ). Step Output* Indexing file stored in `{cwd}/STAR_index`, which will be used by `STAR` ###Code [STAR_index] parameter: hg_gtf = path parameter: hg_reference = path # Specifies the length of the genomic sequence around the annotated junction to be used in constructing the splice junctions database. Ideally, this length should be equal to the ReadLength-1, where ReadLength is the length of the reads. # Default choice follows from TOPMed pipeline recommendation. parameter: sjdbOverhang = 100 fail_if(expand_size(mem) < expand_size('40G'), msg = "At least 40GB of memory is required for this step") input: hg_reference, hg_gtf output: f"{cwd}/STAR_Index/chrName.txt", f"{cwd}/STAR_Index/Log.out", f"{cwd}/STAR_Index/transcriptInfo.tab", f"{cwd}/STAR_Index/exonGeTrInfo.tab", f"{cwd}/STAR_Index/SAindex", f"{cwd}/STAR_Index/SA", f"{cwd}/STAR_Index/genomeParameters.txt", f"{cwd}/STAR_Index/chrStart.txt", f"{cwd}/STAR_Index/sjdbList.out.tab", f"{cwd}/STAR_Index/exonInfo.tab", f"{cwd}/STAR_Index/sjdbList.fromGTF.out.tab", f"{cwd}/STAR_Index/chrLength.txt", f"{cwd}/STAR_Index/sjdbInfo.txt", f"{cwd}/STAR_Index/Genome", f"{cwd}/STAR_Index/chrNameLength.txt", f"{cwd}/STAR_Index/geneInfo.tab" task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output[0]:bd}' bash: container=container, expand= "${ }", stderr = f'{_output[0]:d}.stderr', stdout = f'{_output[0]:d}.stdout' STAR --runMode genomeGenerate \ --genomeDir ${_output:d} \ --genomeFastaFiles ${_input[0]} \ --sjdbGTFfile ${_input[1]} \ --sjdbOverhang ${sjdbOverhang} \ --runThreadN ${numThreads} #--sjdbGTFchrPrefix "chr" ###Output _____no_output_____ ###Markdown Generating index file for `RSEM`This step generate the indexing file for `RSEM`. This file just need to generate once. Step Inputs* `gtf` and `fasta`: path to reference sequence. `gtf` should be the one prior to collapse by gene.* `sjdbOverhang`: specifies the length of the genomic sequence around the annotated junction to be used in constructing the splice junctions database. Ideally, this length should be equal to the ReadLength-1, where ReadLength is the length of the reads. Step Outputs* Indexing file stored in `RSEM_index_dir`, which will be used by `RSEM` ###Code [RSEM_index] parameter: hg_gtf = path parameter: hg_reference = path input: hg_reference, hg_gtf output: f"{cwd}/RSEM_Index/rsem_reference.n2g.idx.fa", f"{cwd}/RSEM_Index/rsem_reference.grp", f"{cwd}/RSEM_Index/rsem_reference.idx.fa", f"{cwd}/RSEM_Index/rsem_reference.ti", f"{cwd}/RSEM_Index/rsem_reference.chrlist", f"{cwd}/RSEM_Index/rsem_reference.seq", f"{cwd}/RSEM_Index/rsem_reference.transcripts.fa" task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output[0]:bd}' bash: container=container, expand= "${ }", stderr = f'{_output[0]:d}.stderr', stdout = f'{_output[0]:d}.stdout' rsem-prepare-reference \ ${_input[0]} \ ${_output[1]:n} \ --gtf ${_input[1]} \ --num-threads ${numThreads} ###Output _____no_output_____ ###Markdown Reference data standardizationThis module provides reference data download, indexing and preprocessing (if necessary), in preparation for use throughout the pipeline.We have included the PDF document compiled by data standardization subgroup in the [on Google Drive](https://drive.google.com/file/d/1R5sw5o8vqk_mbQQb4CGmtH3ldu1T3Vu0/view?usp=sharing) as well as on [ADSP Dashboard](https://www.niagads.org/adsp/content/adspgcadgenomeresources-v2pdf). It contains the reference data to use for the project. OverviewThis module is based on the [TOPMed workflow from Broad](https://github.com/broadinstitute/gtex-pipeline/blob/master/TOPMed_RNAseq_pipeline.md). The reference data after we process it (details see Methods section and the rest of the analysis) can be found [in this folder on Google Drive](https://drive.google.com/drive/folders/19fmoII8yS7XE7HFcMU4OfvC2bL1zMD_P). Specifically, the list of reference files to be used are:1. `GRCh38_full_analysis_set_plus_decoy_hla.noALT_noHLA_noDecoy_ERCC.{dict,fasta,fasta.fai}`2. `Homo_sapiens.GRCh38.103.chr.reformatted.collapse_only.gene.ERCC.gtf` for stranded protocol, and `Homo_sapiens.GRCh38.103.chr.reformatted.gene.ERCC.gtf` for unstranded protocol.3. Everything under `STAR_Index` folder4. Everything under `RSEM_Index` folder MethodsWorkflows implemented include: Convert transcript feature file gff3 to gtf- Input: an uncompressed gff3 file.(i.e. can be view via cat)- Output: a gtf file. Collapse transcript features into genes- Input: a gtf file.- Output: a gtf file with collapesed gene model. Generate STAR index based on gtf and reference fasta- Input: a gtf file and an acompanying fasta file.- Output: A folder of STAR index. Generate RSEM index based on gtf and reference fasta- Input: a gtf file and an acompanying fasta file.- Output: A folder of RSEM index. Example commandsTo download reference data: ###Code sos run reference_data.ipynb download_hg_reference --cwd reference_data sos run reference_data.ipynb download_gene_annotation --cwd reference_data sos run reference_data.ipynb download_ercc_reference --cwd reference_data ###Output _____no_output_____ ###Markdown To format reference data: ###Code sos run reference_data.ipynb hg_reference \ --cwd reference_data \ --ercc-reference reference_data/ERCC92.fa \ --hg-reference reference_data/GRCh38_full_analysis_set_plus_decoy_hla.fa \ --container container/rna_quantification.sif sos run pipeline/reference_data.ipynb hg_gtf \ --cwd reference_data \ --hg-gtf /mnt/mfs/statgen/snuc_pseudo_bulk/data/reference_data/genes.gtf \ --hg-reference data/reference_data/GRCh38_full_analysis_set_plus_decoy_hla.noALT_noHLA_noDecoy_ERCC.fasta \ --containers containers/rna_quantification.sif -J 1 -q csg -c csg.yml & ###Output _____no_output_____ ###Markdown To format gene feature data: ###Code sos run reference_data.ipynb gene_annotation \ --cwd reference_data \ --ercc-gtf reference_data/ERCC92.gtf \ --hg-gtf reference_data/Homo_sapiens.GRCh38.103.chr.gtf \ --hg-reference reference_data/GRCh38_full_analysis_set_plus_decoy_hla.noALT_noHLA_noDecoy_ERCC.fasta \ --container container/rna_quantification.sif --stranded ###Output _____no_output_____ ###Markdown **Notice that for un-stranded RNA-seq protocol please use switch `--no-stranded` to the command above instead of `--stranded`. More details can be found later in the document.** To generate STAR index using the GTF annotation file before gene model collapse: ###Code sos run reference_data.ipynb STAR_index \ --cwd reference_data \ --hg-reference reference_data/GRCh38_full_analysis_set_plus_decoy_hla.noALT_noHLA_noDecoy_ERCC.fasta \ --hg-gtf reference_data/Homo_sapiens.GRCh38.103.chr.reformatted.ERCC.gtf \ --container container/rna_quantification.sif \ --mem 40G ###Output _____no_output_____ ###Markdown **Notice that command above requires at least 40G of memory, and takes quite a while to complete**. To generate RSEM index: ###Code sos run reference_data.ipynb RSEM_index \ --cwd reference_data \ --hg-reference reference_data/GRCh38_full_analysis_set_plus_decoy_hla.noALT_noHLA_noDecoy_ERCC.fasta \ --hg-gtf reference_data/Homo_sapiens.GRCh38.103.chr.reformatted.ERCC.gtf \ --container container/rna_quantification.sif \ --mem 40G ###Output _____no_output_____ ###Markdown Command interface ###Code sos run reference_data.ipynb -h [global] # The output directory for generated files. parameter: cwd = path # For cluster jobs, number commands to run per job parameter: job_size = 1 # Wall clock time expected parameter: walltime = "5h" # Memory expected parameter: mem = "16G" # Number of threads parameter: numThreads = 8 # Software container option parameter: container = "" cwd = path(f'{cwd:a}') from sos.utils import expand_size ###Output _____no_output_____ ###Markdown Data download ###Code [download_hg_reference] output: f"{cwd:a}/GRCh38_full_analysis_set_plus_decoy_hla.fa" download: dest_dir = cwd ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/technical/reference/GRCh38_reference_genome/GRCh38_full_analysis_set_plus_decoy_hla.fa [download_gene_annotation] output: f"{cwd:a}/Homo_sapiens.GRCh38.103.chr.gtf" download: dest_dir = cwd, decompress=True http://ftp.ensembl.org/pub/release-103/gtf/homo_sapiens/Homo_sapiens.GRCh38.103.chr.gtf.gz [download_ercc_reference] output: f"{cwd:a}/ERCC92.gtf", f"{cwd:a}/ERCC92.fa" download: dest_dir = cwd, decompress=True https://tools.thermofisher.com/content/sfs/manuals/ERCC92.zip ###Output _____no_output_____ ###Markdown GFF3 to GTF formatting ###Code [gff3_to_gtf] parameter: gff3_file = path input: gff3_file output: f'{cwd}/{_input:bn}.gtf' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' bash: container=container, expand= "${ }", stderr = f'{_output:n}.stderr', stdout = f'{_output:n}.stdout' gffread ${_input} -T -o ${_output} ###Output _____no_output_____ ###Markdown HG reference file preprocessing1. Remove the HLA/ALT/Decoy record from the fasta -- because none of the downstreams RNA-seq calling pipeline component can handle them properly.2. Adding in ERCC information to the fasta file -- even if ERCC is not included in the RNA-seq library it does not harm to add them.3. Generating index for the fasta file ###Code [hg_reference_1 (HLA ALT Decoy removal)] # Path to HG reference file parameter: hg_reference = path input: hg_reference output: f'{cwd}/{_input:bn}.noALT_noHLA_noDecoy.fasta' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' python: expand = "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout', container = container with open('${_input}', 'r') as fasta: contigs = fasta.read() contigs = contigs.split('>') contig_ids = [i.split(' ', 1)[0] for i in contigs] # exclude ALT, HLA and decoy contigs filtered_fasta = '>'.join([c for i,c in zip(contig_ids, contigs) if not (i[-4:]=='_alt' or i[:3]=='HLA' or i[-6:]=='_decoy')]) with open('${_output}', 'w') as fasta: fasta.write(filtered_fasta) [hg_reference_2 (merge with ERCC reference)] parameter: ercc_reference = path output: f'{cwd}/{_input:bn}_ERCC.fasta' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' bash: expand = "${ }", stderr = f'{_output[0]}.stderr', stdout = f'{_output}.stdout', container = container sed 's/ERCC-/ERCC_/g' ${ercc_reference} > ${ercc_reference:n}.patched.fa cat ${_input} ${ercc_reference:n}.patched.fa > ${_output} [hg_reference_3 (index the fasta file)] output: f'{cwd}/{_input:bn}.dict' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' bash: expand = "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout', container = container samtools faidx ${_input} java -jar /opt/picard-tools/picard.jar \ CreateSequenceDictionary \ R=${_input} \ O=${_output} ###Output _____no_output_____ ###Markdown Transcript and gene model reference processing This step modify the `gtf` file for following reasons:1. RSEM require GTF input to have the same chromosome name format (with `chr` prefix) as the fasta file. **although for STAR, this problem can be solved by the now commented --sjdbGTFchrPrefix "chr" option, we have to add `chr` to it for use with RSEM**. 2. Gene model collapsing script `collapse_annotation.py` from GTEx require the gtf have `transcript_type` instead `transcript_biotype` in its annotation. We rename it here, although **this problem can also be solved by modifying the collapse_annotation.py while building the docker, since we are doing 1 above we think it is better to add in another customization here.**3. Adding in ERCC information to the `gtf` reference.We may reimplement 1 and 2 if the problem with RSEM is solved, or when RSEM is no longer needed. ###Code [hg_gtf_1 (add chr prefix to gtf file)] parameter: hg_reference = path parameter: hg_gtf = path input: hg_reference, hg_gtf output: f'{cwd}/{_input[1]:bn}.reformatted.gtf' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' R: expand = "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout', container = container library("readr") library("stringr") library("dplyr") options(scipen = 999) con <- file("${_input[0]}","r") fasta <- readLines(con,n=1) close(con) gtf = read_delim("${_input[1]}", "\t", col_names = F, comment = "#", col_types="ccccccccc") if(!str_detect(fasta,">chr")) { gtf_mod = gtf%>%mutate(X1 = str_remove_all(X1,"chr")) } else if (!any(str_detect(gtf$X1[1],"chr"))) { gtf_mod = gtf%>%mutate(X1 = paste0("chr",X1)) } else (gtf_mod = gtf) if(any(str_detect(gtf_mod$X9, "transcript_biotype"))) { gtf_mod = gtf_mod%>%mutate(X9 = str_replace_all(X9,"transcript_biotype","transcript_type")) } gtf_mod%>%write.table("${_output}",sep = "\t",quote = FALSE,col.names = F,row.names = F) ###Output _____no_output_____ ###Markdown **Text below is taken from https://github.com/broadinstitute/gtex-pipeline/tree/master/gene_model**Gene-level expression and eQTLs from the GTEx project are calculated based on a collapsed gene model (i.e., combining all isoforms of a gene into a single transcript), according to the following rules:1. Transcripts annotated as “retained_intron” or “read_through” are excluded. Additionally, transcripts that overlap with annotated read-through transcripts may be blacklisted (blacklists for GENCODE v19, 24 & 25 are provided in this repository; no transcripts were blacklisted for v26).2. The union of all exon intervals of each gene is calculated.3. Overlapping intervals between genes are excluded from all genes.The purpose of step 3 is primarily to exclude overlapping regions from genes annotated on both strands, which can't be unambiguously quantified from unstranded RNA-seq (GTEx samples were sequenced using an unstranded protocol). For stranded protocols, this step can be skipped by adding the `--collapse_only` flag.Further documentation is available on the [GTEx Portal](https://gtexportal.org/home/documentationPagestaticTextAnalysisMethods). ###Code [hg_gtf_2 (collapsed gene model)] parameter: stranded = bool output: f'{_input:n}{".collapse_only" if stranded else ""}.gene.gtf' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' bash: expand = "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout', container = container collapse_annotation.py ${"--collapse_only" if stranded else ""} ${_input} ${_output} [ercc_gtf (Preprocess ERCC gtf file)] parameter: ercc_gtf = path input: ercc_gtf output: f'{cwd}/{_input:bn}.genes.patched.gtf' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' python: expand = "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout', container = container with open('${_input}') as exon_gtf, open('${_output}', 'w') as gene_gtf: for line in exon_gtf: f = line.strip().split('\t') f[0] = f[0].replace('-','_') # required for RNA-SeQC/GATK (no '-' in contig name) attr = f[8] if attr[-1]==';': attr = attr[:-1] attr = dict([i.split(' ') for i in attr.replace('"','').split('; ')]) # add gene_name, gene_type attr['gene_name'] = attr['gene_id'] attr['gene_type'] = 'ercc_control' attr['gene_status'] = 'KNOWN' attr['level'] = 2 for k in ['id', 'type', 'name', 'status']: attr['transcript_'+k] = attr['gene_'+k] attr_str = [] for k in ['gene_id', 'transcript_id', 'gene_type', 'gene_status', 'gene_name', 'transcript_type', 'transcript_status', 'transcript_name']: attr_str.append('{0:s} "{1:s}";'.format(k, attr[k])) attr_str.append('{0:s} {1:d};'.format('level', attr['level'])) f[8] = ' '.join(attr_str) # write gene, transcript, exon gene_gtf.write('\t'.join(f[:2]+['gene']+f[3:])+'\n') gene_gtf.write('\t'.join(f[:2]+['transcript']+f[3:])+'\n') f[8] = ' '.join(attr_str[:2]) gene_gtf.write('\t'.join(f[:2]+['exon']+f[3:])+'\n') [gene_annotation] input: output_from("hg_gtf_1"), output_from("hg_gtf_2"), output_from("ercc_gtf") output: f'{cwd}/{_input[0]:bn}.ERCC.gtf', f'{cwd}/{_input[1]:bn}.ERCC.gtf' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' bash: expand = "${ }", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout', container = container cat ${_input[0]} ${_input[2]} > ${_output[0]} cat ${_input[1]} ${_input[2]} > ${_output[1]} ###Output _____no_output_____ ###Markdown Generating index file for `STAR` This step generate the index file for STAR alignment. This file just need to generate once and can be re-used. **At least 40GB of memory is needed**. Step Inputs* `gtf` and `fasta`: path to reference sequence. Both of them needs to be unzipped. `gtf` should be the one prior to collapse by gene.* `sjdbOverhang`: specifies the length of the genomic sequence around the annotated junction to be used in constructing the splice junctions database. Ideally, this length should be equal to the ReadLength-1, where ReadLength is the length of the reads. We use 100 here as recommended by the TOPMed pipeline. See here for [some additional discussions](https://groups.google.com/g/rna-star/c/h9oh10UlvhI/m/BfSPGivUHmsJ). Step Output* Indexing file stored in `{cwd}/STAR_index`, which will be used by `STAR` ###Code [STAR_index] parameter: hg_gtf = path parameter: hg_reference = path # Specifies the length of the genomic sequence around the annotated junction to be used in constructing the splice junctions database. Ideally, this length should be equal to the ReadLength-1, where ReadLength is the length of the reads. # Default choice follows from TOPMed pipeline recommendation. parameter: sjdbOverhang = 100 fail_if(expand_size(mem) < expand_size('40G'), msg = "At least 40GB of memory is required for this step") input: hg_reference, hg_gtf output: f"{cwd}/STAR_Index/chrName.txt", f"{cwd}/STAR_Index/Log.out", f"{cwd}/STAR_Index/transcriptInfo.tab", f"{cwd}/STAR_Index/exonGeTrInfo.tab", f"{cwd}/STAR_Index/SAindex", f"{cwd}/STAR_Index/SA", f"{cwd}/STAR_Index/genomeParameters.txt", f"{cwd}/STAR_Index/chrStart.txt", f"{cwd}/STAR_Index/sjdbList.out.tab", f"{cwd}/STAR_Index/exonInfo.tab", f"{cwd}/STAR_Index/sjdbList.fromGTF.out.tab", f"{cwd}/STAR_Index/chrLength.txt", f"{cwd}/STAR_Index/sjdbInfo.txt", f"{cwd}/STAR_Index/Genome", f"{cwd}/STAR_Index/chrNameLength.txt", f"{cwd}/STAR_Index/geneInfo.tab" task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output[0]:bd}' bash: container=container, expand= "${ }", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout' STAR --runMode genomeGenerate \ --genomeDir ${_output:d} \ --genomeFastaFiles ${_input[0]} \ --sjdbGTFfile ${_input[1]} \ --sjdbOverhang ${sjdbOverhang} \ --runThreadN ${numThreads} #--sjdbGTFchrPrefix "chr" ###Output _____no_output_____ ###Markdown Generating index file for `RSEM`This step generate the indexing file for `RSEM`. This file just need to generate once. Step Inputs* `gtf` and `fasta`: path to reference sequence. `gtf` should be the one prior to collapse by gene.* `sjdbOverhang`: specifies the length of the genomic sequence around the annotated junction to be used in constructing the splice junctions database. Ideally, this length should be equal to the ReadLength-1, where ReadLength is the length of the reads. Step Outputs* Indexing file stored in `RSEM_index_dir`, which will be used by `RSEM` ###Code [RSEM_index] parameter: hg_gtf = path parameter: hg_reference = path input: hg_reference, hg_gtf output: f"{cwd}/RSEM_Index/rsem_reference.n2g.idx.fa", f"{cwd}/RSEM_Index/rsem_reference.grp", f"{cwd}/RSEM_Index/rsem_reference.idx.fa", f"{cwd}/RSEM_Index/rsem_reference.ti", f"{cwd}/RSEM_Index/rsem_reference.chrlist", f"{cwd}/RSEM_Index/rsem_reference.seq", f"{cwd}/RSEM_Index/rsem_reference.transcripts.fa" task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output[0]:bd}' bash: container=container, expand= "${ }", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout' rsem-prepare-reference \ ${_input[0]} \ ${_output[1]:n} \ --gtf ${_input[1]} \ --num-threads ${numThreads} ###Output _____no_output_____ ###Markdown Generation of RefFlat file This file is needed for picard CollectRnaSeqMetrics module, which in turn >produces metrics describing the distribution of the bases within the transcripts. It calculates the total numbers and the fractions of nucleotides within specific genomic regions including untranslated regions (UTRs), introns, intergenic sequences (between discrete genes), and peptide-coding sequences (exons). This tool also determines the numbers of bases that pass quality filters that are specific to Illumina data (PF_BASES). ###Code [RefFlat_generation] parameter: hg_gtf = path input: hg_gtf output: f'{_input:n}.ref.flat' bash: container=container, expand= "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout' gtfToGenePred ${_input} ${_output}.tmp -genePredExt -geneNameAsName2 awk -F'\t' -v OFS="\t" '{$1=$12 OFS $1;}7' ${_output}.tmp | cut -f 1-11 > ${_output} rm ${_output}.tmp ###Output _____no_output_____ ###Markdown Reference data standardizationThis module provides reference data download, indexing and preprocessing (if necessary), in preparation for use throughout the pipeline.We have included the PDF document compiled by data standardization subgroup in the [minimal working example folder on Google Drive](https://drive.google.com/file/d/1R5sw5o8vqk_mbQQb4CGmtH3ldu1T3Vu0/view?usp=sharing). It contains the reference data to use for the project. OverviewThis module is based on the [TOPMed workflow from Broad](https://github.com/broadinstitute/gtex-pipeline/blob/master/TOPMed_RNAseq_pipeline.md).Workflows implemented include: Convert transcript feature file gff3 to gtf- Input: an uncompressed gff3 file.(i.e. can be view via cat)- Output: a gtf file. Collapse transcript features into genes- Input: a gtf file.- Output: a gtf file with collapesed gene model. Generate STAR index based on gtf and reference fasta- Input: a gtf file and an acompanying fasta file.- Output: A folder of STAR index. Generate RSEM index based on gtf and reference fasta- Input: a gtf file and an acompanying fasta file.- Output: A folder of RSEM index. Example commandsTo download reference data: ###Code sos run reference_data.ipynb download_hg_reference --cwd reference_data sos run reference_data.ipynb download_gene_annotation --cwd reference_data sos run reference_data.ipynb download_ercc_reference --cwd reference_data ###Output _____no_output_____ ###Markdown To format reference data: ###Code sos run reference_data.ipynb hg_reference \ --cwd reference_data \ --ercc-reference reference_data/ERCC92.fa \ --hg-reference reference_data/GRCh38_full_analysis_set_plus_decoy_hla.fa \ --container container/rna_quantification.sif sos run pipeline/reference_data.ipynb hg_gtf \ --cwd reference_data \ --hg-gtf /mnt/mfs/statgen/snuc_pseudo_bulk/data/reference_data/genes.gtf \ --hg-reference data/reference_data/GRCh38_full_analysis_set_plus_decoy_hla.noALT_noHLA_noDecoy_ERCC.fasta \ --containers containers/rna_quantification.sif -J 1 -q csg -c csg.yml & ###Output _____no_output_____ ###Markdown To format gene feature data: ###Code sos run reference_data.ipynb gene_annotation \ --cwd reference_data \ --ercc-gtf reference_data/ERCC92.gtf \ --hg-gtf reference_data/Homo_sapiens.GRCh38.103.chr.gtf \ --hg-reference reference_data/GRCh38_full_analysis_set_plus_decoy_hla.noALT_noHLA_noDecoy_ERCC.fasta \ --container container/rna_quantification.sif ###Output _____no_output_____ ###Markdown **Notice that for stranded RNA-seq protocol please add `--is-stranded` to the command above. More details can be found later in the document.** To generate STAR index using the GTF annotation file before gene model collapse: ###Code sos run reference_data.ipynb STAR_index \ --cwd reference_data \ --hg-reference reference_data/GRCh38_full_analysis_set_plus_decoy_hla.noALT_noHLA_noDecoy_ERCC.fasta \ --hg-gtf reference_data/Homo_sapiens.GRCh38.103.chr.reformatted.ERCC.gtf \ --container container/rna_quantification.sif \ --mem 40G ###Output _____no_output_____ ###Markdown **Notice that command above requires at least 40G of memory, and takes quite a while to complete**. To generate RSEM index: ###Code sos run reference_data.ipynb RSEM_index \ --cwd reference_data \ --hg-reference reference_data/GRCh38_full_analysis_set_plus_decoy_hla.noALT_noHLA_noDecoy_ERCC.fasta \ --hg-gtf reference_data/Homo_sapiens.GRCh38.103.chr.reformatted.ERCC.gtf \ --container container/rna_quantification.sif \ --mem 40G ###Output _____no_output_____ ###Markdown Command interface ###Code sos run reference_data.ipynb -h [global] # The output directory for generated files. parameter: cwd = path # For cluster jobs, number commands to run per job parameter: job_size = 1 # Wall clock time expected parameter: walltime = "5h" # Memory expected parameter: mem = "16G" # Number of threads parameter: numThreads = 8 # Software container option parameter: container = "" cwd = path(f'{cwd:a}') from sos.utils import expand_size ###Output _____no_output_____ ###Markdown Data download ###Code [download_hg_reference] output: f"{cwd:a}/GRCh38_full_analysis_set_plus_decoy_hla.fa" download: dest_dir = cwd ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/technical/reference/GRCh38_reference_genome/GRCh38_full_analysis_set_plus_decoy_hla.fa [download_gene_annotation] output: f"{cwd:a}/Homo_sapiens.GRCh38.103.chr.gtf" download: dest_dir = cwd, decompress=True http://ftp.ensembl.org/pub/release-103/gtf/homo_sapiens/Homo_sapiens.GRCh38.103.chr.gtf.gz [download_ercc_reference] output: f"{cwd:a}/ERCC92.gtf", f"{cwd:a}/ERCC92.fa" download: dest_dir = cwd, decompress=True https://tools.thermofisher.com/content/sfs/manuals/ERCC92.zip ###Output _____no_output_____ ###Markdown GFF3 to GTF formatting ###Code [gff3_to_gtf] parameter: gff3_file = path input: gff3_file output: f'{cwd}/{_input:bn}.gtf' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' bash: container=container, expand= "${ }", stderr = f'{_output:n}.stderr', stdout = f'{_output:n}.stdout' gffread ${_input} -T -o ${_output} ###Output _____no_output_____ ###Markdown HG reference file preprocessing1. Remove the HLA/ALT/Decoy record from the fasta2. Adding in ERCC information to the fasta file3. Generating index for the fasta file ###Code [hg_reference_1 (HLA ALT Decoy removal)] # Path to HG reference file parameter: hg_reference = path input: hg_reference output: f'{cwd}/{_input:bn}.noALT_noHLA_noDecoy.fasta' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' python: expand = "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout', container = container with open('${_input}', 'r') as fasta: contigs = fasta.read() contigs = contigs.split('>') contig_ids = [i.split(' ', 1)[0] for i in contigs] # exclude ALT, HLA and decoy contigs filtered_fasta = '>'.join([c for i,c in zip(contig_ids, contigs) if not (i[-4:]=='_alt' or i[:3]=='HLA' or i[-6:]=='_decoy')]) with open('${_output}', 'w') as fasta: fasta.write(filtered_fasta) [hg_reference_2 (merge with ERCC reference)] parameter: ercc_reference = path output: f'{cwd}/{_input:bn}_ERCC.fasta' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' bash: expand = "${ }", stderr = f'{_output[0]}.stderr', stdout = f'{_output}.stdout', container = container sed 's/ERCC-/ERCC_/g' ${ercc_reference} > ${ercc_reference:n}.patched.fa cat ${_input} ${ercc_reference:n}.patched.fa > ${_output} [hg_reference_3 (index the fasta file)] output: f'{cwd}/{_input:bn}.dict' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' bash: expand = "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout', container = container samtools faidx ${_input} java -jar /opt/picard-tools/picard.jar \ CreateSequenceDictionary \ R=${_input} \ O=${_output} ###Output _____no_output_____ ###Markdown Transcript and gene model reference processing This step modify the `gtf` file for following reasons:1. RSEM require GTF input to have the same chromosome name format (with `chr` prefix) as the fasta file. **although for STAR, this problem can be solved by the now commented --sjdbGTFchrPrefix "chr" option, we have to add `chr` to it for use with RSEM**. 2. Gene model collapsing script `collapse_annotation.py` from GTEx require the gtf have `transcript_type` instead `transcript_biotype` in its annotation. We rename it here, although **this problem can also be solved by modifying the collapse_annotation.py while building the docker, since we are doing 1 above we think it is better to add in another customization here.**3. Adding in ERCC information to the `gtf` reference.We may reimplement 1 and 2 if the problem with RSEM is solved, or when RSEM is no longer needed. ###Code [hg_gtf_1 (add chr prefix to gtf file)] parameter: hg_reference = path parameter: hg_gtf = path input: hg_reference, hg_gtf output: f'{cwd}/{_input[1]:bn}.reformatted.gtf' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' R: expand = "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout', container = container library("readr") library("stringr") library("dplyr") options(scipen = 999) con <- file("${_input[0]}","r") fasta <- readLines(con,n=1) close(con) gtf = read_delim("${_input[1]}", "\t", col_names = F, comment = "#", col_types="ccccccccc") if(!str_detect(fasta,">chr")) { gtf_mod = gtf%>%mutate(X1 = str_remove_all(X1,"chr")) } else if (!any(str_detect(gtf$X1[1],"chr"))) { gtf_mod = gtf%>%mutate(X1 = paste0("chr",X1)) } else (gtf_mod = gtf) if(any(str_detect(gtf_mod$X9, "transcript_biotype"))) { gtf_mod = gtf_mod%>%mutate(X9 = str_replace_all(X9,"transcript_biotype","transcript_type")) } gtf_mod%>%write.table("${_output}",sep = "\t",quote = FALSE,col.names = F,row.names = F) ###Output _____no_output_____ ###Markdown **Text below is taken from https://github.com/broadinstitute/gtex-pipeline/tree/master/gene_model**Gene-level expression and eQTLs from the GTEx project are calculated based on a collapsed gene model (i.e., combining all isoforms of a gene into a single transcript), according to the following rules:1. Transcripts annotated as “retained_intron” or “read_through” are excluded. Additionally, transcripts that overlap with annotated read-through transcripts may be blacklisted (blacklists for GENCODE v19, 24 & 25 are provided in this repository; no transcripts were blacklisted for v26).2. The union of all exon intervals of each gene is calculated.3. Overlapping intervals between genes are excluded from all genes.The purpose of step 3 is primarily to exclude overlapping regions from genes annotated on both strands, which can't be unambiguously quantified from unstranded RNA-seq (GTEx samples were sequenced using an unstranded protocol). For stranded protocols, this step can be skipped by adding the `--collapse_only` flag.Further documentation is available on the [GTEx Portal](https://gtexportal.org/home/documentationPagestaticTextAnalysisMethods). ###Code [hg_gtf_2 (collapsed gene model)] parameter: is_stranded = bool output: f'{_input:n}{".collapse_only" if is_stranded else ""}.gene.gtf' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' bash: expand = "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout', container = container collapse_annotation.py ${"--collapse_only" if is_stranded else ""} ${_input} ${_output} [ercc_gtf (Preprocess ERCC gtf file)] parameter: ercc_gtf = path input: ercc_gtf output: f'{cwd}/{_input:bn}.genes.patched.gtf' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' python: expand = "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout', container = container with open('${_input}') as exon_gtf, open('${_output}', 'w') as gene_gtf: for line in exon_gtf: f = line.strip().split('\t') f[0] = f[0].replace('-','_') # required for RNA-SeQC/GATK (no '-' in contig name) attr = f[8] if attr[-1]==';': attr = attr[:-1] attr = dict([i.split(' ') for i in attr.replace('"','').split('; ')]) # add gene_name, gene_type attr['gene_name'] = attr['gene_id'] attr['gene_type'] = 'ercc_control' attr['gene_status'] = 'KNOWN' attr['level'] = 2 for k in ['id', 'type', 'name', 'status']: attr['transcript_'+k] = attr['gene_'+k] attr_str = [] for k in ['gene_id', 'transcript_id', 'gene_type', 'gene_status', 'gene_name', 'transcript_type', 'transcript_status', 'transcript_name']: attr_str.append('{0:s} "{1:s}";'.format(k, attr[k])) attr_str.append('{0:s} {1:d};'.format('level', attr['level'])) f[8] = ' '.join(attr_str) # write gene, transcript, exon gene_gtf.write('\t'.join(f[:2]+['gene']+f[3:])+'\n') gene_gtf.write('\t'.join(f[:2]+['transcript']+f[3:])+'\n') f[8] = ' '.join(attr_str[:2]) gene_gtf.write('\t'.join(f[:2]+['exon']+f[3:])+'\n') [gene_annotation] input: output_from("hg_gtf_1"), output_from("hg_gtf_2"), output_from("ercc_gtf") output: f'{cwd}/{_input[0]:bn}.ERCC.gtf', f'{cwd}/{_input[1]:bn}.ERCC.gtf' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' bash: expand = "${ }", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout', container = container cat ${_input[0]} ${_input[2]} > ${_output[0]} cat ${_input[1]} ${_input[2]} > ${_output[1]} ###Output _____no_output_____ ###Markdown Generating index file for `STAR` This step generate the index file for STAR alignment. This file just need to generate once and can be re-used. **At least 40GB of memory is needed**. Step Inputs* `gtf` and `fasta`: path to reference sequence. Both of them needs to be unzipped. `gtf` should be the one prior to collapse by gene.* `sjdbOverhang`: specifies the length of the genomic sequence around the annotated junction to be used in constructing the splice junctions database. Ideally, this length should be equal to the ReadLength-1, where ReadLength is the length of the reads. We use 100 here as recommended by the TOPMed pipeline. See here for [some additional discussions](https://groups.google.com/g/rna-star/c/h9oh10UlvhI/m/BfSPGivUHmsJ). Step Output* Indexing file stored in `{cwd}/STAR_index`, which will be used by `STAR` ###Code [STAR_index] parameter: hg_gtf = path parameter: hg_reference = path # Specifies the length of the genomic sequence around the annotated junction to be used in constructing the splice junctions database. Ideally, this length should be equal to the ReadLength-1, where ReadLength is the length of the reads. # Default choice follows from TOPMed pipeline recommendation. parameter: sjdbOverhang = 100 fail_if(expand_size(mem) < expand_size('40G'), msg = "At least 40GB of memory is required for this step") input: hg_reference, hg_gtf output: f"{cwd}/STAR_Index/chrName.txt", f"{cwd}/STAR_Index/Log.out", f"{cwd}/STAR_Index/transcriptInfo.tab", f"{cwd}/STAR_Index/exonGeTrInfo.tab", f"{cwd}/STAR_Index/SAindex", f"{cwd}/STAR_Index/SA", f"{cwd}/STAR_Index/genomeParameters.txt", f"{cwd}/STAR_Index/chrStart.txt", f"{cwd}/STAR_Index/sjdbList.out.tab", f"{cwd}/STAR_Index/exonInfo.tab", f"{cwd}/STAR_Index/sjdbList.fromGTF.out.tab", f"{cwd}/STAR_Index/chrLength.txt", f"{cwd}/STAR_Index/sjdbInfo.txt", f"{cwd}/STAR_Index/Genome", f"{cwd}/STAR_Index/chrNameLength.txt", f"{cwd}/STAR_Index/geneInfo.tab" task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output[0]:bd}' bash: container=container, expand= "${ }", stderr = f'{_output[0]:d}.stderr', stdout = f'{_output[0]:d}.stdout' STAR --runMode genomeGenerate \ --genomeDir ${_output:d} \ --genomeFastaFiles ${_input[0]} \ --sjdbGTFfile ${_input[1]} \ --sjdbOverhang ${sjdbOverhang} \ --runThreadN ${numThreads} #--sjdbGTFchrPrefix "chr" ###Output _____no_output_____ ###Markdown Generating index file for `RSEM`This step generate the indexing file for `RSEM`. This file just need to generate once. Step Inputs* `gtf` and `fasta`: path to reference sequence. `gtf` should be the one prior to collapse by gene.* `sjdbOverhang`: specifies the length of the genomic sequence around the annotated junction to be used in constructing the splice junctions database. Ideally, this length should be equal to the ReadLength-1, where ReadLength is the length of the reads. Step Outputs* Indexing file stored in `RSEM_index_dir`, which will be used by `RSEM` ###Code [RSEM_index] parameter: hg_gtf = path parameter: hg_reference = path input: hg_reference, hg_gtf output: f"{cwd}/RSEM_Index/rsem_reference.n2g.idx.fa", f"{cwd}/RSEM_Index/rsem_reference.grp", f"{cwd}/RSEM_Index/rsem_reference.idx.fa", f"{cwd}/RSEM_Index/rsem_reference.ti", f"{cwd}/RSEM_Index/rsem_reference.chrlist", f"{cwd}/RSEM_Index/rsem_reference.seq", f"{cwd}/RSEM_Index/rsem_reference.transcripts.fa" task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output[0]:bd}' bash: container=container, expand= "${ }", stderr = f'{_output[0]:d}.stderr', stdout = f'{_output[0]:d}.stdout' rsem-prepare-reference \ ${_input[0]} \ ${_output[1]:n} \ --gtf ${_input[1]} \ --num-threads ${numThreads} ###Output _____no_output_____ ###Markdown Reference data standardizationThis module provides reference data download, indexing and preprocessing (if necessary), in preparation for use throughout the pipeline.We have included the PDF document compiled by data standardization subgroup in the [on Google Drive](https://drive.google.com/file/d/1R5sw5o8vqk_mbQQb4CGmtH3ldu1T3Vu0/view?usp=sharing) as well as on [ADSP Dashboard](https://www.niagads.org/adsp/content/adspgcadgenomeresources-v2pdf). It contains the reference data to use for the project. OverviewThis module is based on the [TOPMed workflow from Broad](https://github.com/broadinstitute/gtex-pipeline/blob/master/TOPMed_RNAseq_pipeline.md). The reference data after we process it (details see Methods section and the rest of the analysis) can be found [in this folder on Google Drive](https://drive.google.com/drive/folders/19fmoII8yS7XE7HFcMU4OfvC2bL1zMD_P). Processed reference file for RNA-seq based expression quantification**We have decided to use these preprocessed reference files for RNA-seq expression quantification. They may not be applicable to other molecular phenotypes.**Specifically, the list of reference files to be used are:1. `GRCh38_full_analysis_set_plus_decoy_hla.noALT_noHLA_noDecoy_ERCC.{dict,fasta,fasta.fai}`2. `Homo_sapiens.GRCh38.103.chr.reformatted.collapse_only.gene.ERCC.gtf` for stranded protocol, and `Homo_sapiens.GRCh38.103.chr.reformatted.gene.ERCC.gtf` for unstranded protocol.3. Everything under `STAR_Index` folder4. Everything under `RSEM_Index` folder5. Optionally, for quality control, `gtf_ref.flat` MethodsWorkflows implemented include: Convert transcript feature file gff3 to gtf- Input: an uncompressed gff3 file.(i.e. can be view via cat)- Output: a gtf file. Collapse transcript features into genes- Input: a gtf file.- Output: a gtf file with collapesed gene model. Generate STAR index based on gtf and reference fasta- Input: a gtf file and an acompanying fasta file.- Output: A folder of STAR index. Generate RSEM index based on gtf and reference fasta- Input: a gtf file and an acompanying fasta file.- Output: A folder of RSEM index. Example commandsTo download reference data: ###Code sos run pipeline/reference_data.ipynb download_hg_reference --cwd reference_data & sos run pipeline/reference_data.ipynb download_gene_annotation --cwd reference_data & sos run pipeline/reference_data.ipynb download_ercc_reference --cwd reference_data & ###Output _____no_output_____ ###Markdown To format reference data: ###Code sos run reference_data.ipynb hg_reference \ --cwd reference_data \ --ercc-reference reference_data/ERCC92.fa \ --hg-reference reference_data/GRCh38_full_analysis_set_plus_decoy_hla.fa \ --container container/rna_quantification.sif sos run pipeline/reference_data.ipynb hg_gtf \ --cwd reference_data \ --hg-gtf /mnt/mfs/statgen/snuc_pseudo_bulk/data/reference_data/genes.gtf \ --hg-reference data/reference_data/GRCh38_full_analysis_set_plus_decoy_hla.noALT_noHLA_noDecoy_ERCC.fasta \ --containers containers/rna_quantification.sif -J 1 -q csg -c csg.yml & ###Output _____no_output_____ ###Markdown To format gene feature data: ###Code sos run reference_data.ipynb gene_annotation \ --cwd reference_data \ --ercc-gtf reference_data/ERCC92.gtf \ --hg-gtf reference_data/Homo_sapiens.GRCh38.103.chr.gtf \ --hg-reference reference_data/GRCh38_full_analysis_set_plus_decoy_hla.noALT_noHLA_noDecoy_ERCC.fasta \ --container container/rna_quantification.sif --stranded ###Output _____no_output_____ ###Markdown **Notice that for un-stranded RNA-seq protocol please use switch `--no-stranded` to the command above instead of `--stranded`. More details can be found later in the document.** To generate STAR index using the GTF annotation file before gene model collapse: ###Code sos run reference_data.ipynb STAR_index \ --cwd reference_data \ --hg-reference reference_data/GRCh38_full_analysis_set_plus_decoy_hla.noALT_noHLA_noDecoy_ERCC.fasta \ --hg-gtf reference_data/Homo_sapiens.GRCh38.103.chr.reformatted.ERCC.gtf \ --container container/rna_quantification.sif \ --mem 40G ###Output _____no_output_____ ###Markdown **Notice that command above requires at least 40G of memory, and takes quite a while to complete**. To generate RSEM index: ###Code sos run reference_data.ipynb RSEM_index \ --cwd reference_data \ --hg-reference reference_data/GRCh38_full_analysis_set_plus_decoy_hla.noALT_noHLA_noDecoy_ERCC.fasta \ --hg-gtf reference_data/Homo_sapiens.GRCh38.103.chr.reformatted.ERCC.gtf \ --container container/rna_quantification.sif \ --mem 40G ###Output _____no_output_____ ###Markdown Command interface ###Code sos run reference_data.ipynb -h [global] # The output directory for generated files. parameter: cwd = path # For cluster jobs, number commands to run per job parameter: job_size = 1 # Wall clock time expected parameter: walltime = "5h" # Memory expected parameter: mem = "16G" # Number of threads parameter: numThreads = 8 # Software container option parameter: container = "" cwd = path(f'{cwd:a}') from sos.utils import expand_size ###Output _____no_output_____ ###Markdown Data download ###Code [download_hg_reference] output: f"{cwd:a}/GRCh38_full_analysis_set_plus_decoy_hla.fa" download: dest_dir = cwd ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/technical/reference/GRCh38_reference_genome/GRCh38_full_analysis_set_plus_decoy_hla.fa [download_gene_annotation] output: f"{cwd:a}/Homo_sapiens.GRCh38.103.chr.gtf" download: dest_dir = cwd, decompress=True http://ftp.ensembl.org/pub/release-103/gtf/homo_sapiens/Homo_sapiens.GRCh38.103.chr.gtf.gz [download_ercc_reference] output: f"{cwd:a}/ERCC92.gtf", f"{cwd:a}/ERCC92.fa" download: dest_dir = cwd, decompress=True https://tools.thermofisher.com/content/sfs/manuals/ERCC92.zip ###Output _____no_output_____ ###Markdown GFF3 to GTF formatting ###Code [gff3_to_gtf] parameter: gff3_file = path input: gff3_file output: f'{cwd}/{_input:bn}.gtf' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' bash: container=container, expand= "${ }", stderr = f'{_output:n}.stderr', stdout = f'{_output:n}.stdout' gffread ${_input} -T -o ${_output} ###Output _____no_output_____ ###Markdown HG reference file preprocessing1. Remove the HLA/ALT/Decoy record from the fasta -- because none of the downstreams RNA-seq calling pipeline component can handle them properly.2. Adding in ERCC information to the fasta file -- even if ERCC is not included in the RNA-seq library it does not harm to add them.3. Generating index for the fasta file ###Code [hg_reference_1 (HLA ALT Decoy removal)] # Path to HG reference file parameter: hg_reference = path input: hg_reference output: f'{cwd}/{_input:bn}.noALT_noHLA_noDecoy.fasta' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' python: expand = "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout', container = container with open('${_input}', 'r') as fasta: contigs = fasta.read() contigs = contigs.split('>') contig_ids = [i.split(' ', 1)[0] for i in contigs] # exclude ALT, HLA and decoy contigs filtered_fasta = '>'.join([c for i,c in zip(contig_ids, contigs) if not (i[-4:]=='_alt' or i[:3]=='HLA' or i[-6:]=='_decoy')]) with open('${_output}', 'w') as fasta: fasta.write(filtered_fasta) [hg_reference_2 (merge with ERCC reference)] parameter: ercc_reference = path output: f'{cwd}/{_input:bn}_ERCC.fasta' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' bash: expand = "${ }", stderr = f'{_output[0]}.stderr', stdout = f'{_output}.stdout', container = container sed 's/ERCC-/ERCC_/g' ${ercc_reference} > ${ercc_reference:n}.patched.fa cat ${_input} ${ercc_reference:n}.patched.fa > ${_output} [hg_reference_3 (index the fasta file)] output: f'{cwd}/{_input:bn}.dict' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' bash: expand = "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout', container = container samtools faidx ${_input} java -jar /opt/picard-tools/picard.jar \ CreateSequenceDictionary \ R=${_input} \ O=${_output} ###Output _____no_output_____ ###Markdown Transcript and gene model reference processing This step modify the `gtf` file for following reasons:1. RSEM require GTF input to have the same chromosome name format (with `chr` prefix) as the fasta file. **although for STAR, this problem can be solved by the now commented --sjdbGTFchrPrefix "chr" option, we have to add `chr` to it for use with RSEM**. 2. Gene model collapsing script `collapse_annotation.py` from GTEx require the gtf have `transcript_type` instead `transcript_biotype` in its annotation. We rename it here, although **this problem can also be solved by modifying the collapse_annotation.py while building the docker, since we are doing 1 above we think it is better to add in another customization here.**3. Adding in ERCC information to the `gtf` reference.We may reimplement 1 and 2 if the problem with RSEM is solved, or when RSEM is no longer needed. ###Code [hg_gtf_1 (add chr prefix to gtf file)] parameter: hg_reference = path parameter: hg_gtf = path input: hg_reference, hg_gtf output: f'{cwd}/{_input[1]:bn}.reformatted.gtf' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' R: expand = "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout', container = container library("readr") library("stringr") library("dplyr") options(scipen = 999) con <- file("${_input[0]}","r") fasta <- readLines(con,n=1) close(con) gtf = read_delim("${_input[1]}", "\t", col_names = F, comment = "#", col_types="ccccccccc") if(!str_detect(fasta,">chr")) { gtf_mod = gtf%>%mutate(X1 = str_remove_all(X1,"chr")) } else if (!any(str_detect(gtf$X1[1],"chr"))) { gtf_mod = gtf%>%mutate(X1 = paste0("chr",X1)) } else (gtf_mod = gtf) if(any(str_detect(gtf_mod$X9, "transcript_biotype"))) { gtf_mod = gtf_mod%>%mutate(X9 = str_replace_all(X9,"transcript_biotype","transcript_type")) } gtf_mod%>%write.table("${_output}",sep = "\t",quote = FALSE,col.names = F,row.names = F) ###Output _____no_output_____ ###Markdown **Text below is taken from https://github.com/broadinstitute/gtex-pipeline/tree/master/gene_model**Gene-level expression and eQTLs from the GTEx project are calculated based on a collapsed gene model (i.e., combining all isoforms of a gene into a single transcript), according to the following rules:1. Transcripts annotated as “retained_intron” or “read_through” are excluded. Additionally, transcripts that overlap with annotated read-through transcripts may be blacklisted (blacklists for GENCODE v19, 24 & 25 are provided in this repository; no transcripts were blacklisted for v26).2. The union of all exon intervals of each gene is calculated.3. Overlapping intervals between genes are excluded from all genes.The purpose of step 3 is primarily to exclude overlapping regions from genes annotated on both strands, which can't be unambiguously quantified from unstranded RNA-seq (GTEx samples were sequenced using an unstranded protocol). For stranded protocols, this step can be skipped by adding the `--collapse_only` flag.Further documentation is available on the [GTEx Portal](https://gtexportal.org/home/documentationPagestaticTextAnalysisMethods). ###Code [hg_gtf_2 (collapsed gene model)] parameter: stranded = bool output: f'{_input:n}{".collapse_only" if stranded else ""}.gene.gtf' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' bash: expand = "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout', container = container collapse_annotation.py ${"--collapse_only" if stranded else ""} ${_input} ${_output} [ercc_gtf (Preprocess ERCC gtf file)] parameter: ercc_gtf = path input: ercc_gtf output: f'{cwd}/{_input:bn}.genes.patched.gtf' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' python: expand = "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout', container = container with open('${_input}') as exon_gtf, open('${_output}', 'w') as gene_gtf: for line in exon_gtf: f = line.strip().split('\t') f[0] = f[0].replace('-','_') # required for RNA-SeQC/GATK (no '-' in contig name) attr = f[8] if attr[-1]==';': attr = attr[:-1] attr = dict([i.split(' ') for i in attr.replace('"','').split('; ')]) # add gene_name, gene_type attr['gene_name'] = attr['gene_id'] attr['gene_type'] = 'ercc_control' attr['gene_status'] = 'KNOWN' attr['level'] = 2 for k in ['id', 'type', 'name', 'status']: attr['transcript_'+k] = attr['gene_'+k] attr_str = [] for k in ['gene_id', 'transcript_id', 'gene_type', 'gene_status', 'gene_name', 'transcript_type', 'transcript_status', 'transcript_name']: attr_str.append('{0:s} "{1:s}";'.format(k, attr[k])) attr_str.append('{0:s} {1:d};'.format('level', attr['level'])) f[8] = ' '.join(attr_str) # write gene, transcript, exon gene_gtf.write('\t'.join(f[:2]+['gene']+f[3:])+'\n') gene_gtf.write('\t'.join(f[:2]+['transcript']+f[3:])+'\n') f[8] = ' '.join(attr_str[:2]) gene_gtf.write('\t'.join(f[:2]+['exon']+f[3:])+'\n') [gene_annotation] input: output_from("hg_gtf_1"), output_from("hg_gtf_2"), output_from("ercc_gtf") output: f'{cwd}/{_input[0]:bn}.ERCC.gtf', f'{cwd}/{_input[1]:bn}.ERCC.gtf' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' bash: expand = "${ }", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout', container = container cat ${_input[0]} ${_input[2]} > ${_output[0]} cat ${_input[1]} ${_input[2]} > ${_output[1]} ###Output _____no_output_____ ###Markdown Generating index file for `STAR` This step generate the index file for STAR alignment. This file just need to generate once and can be re-used. **At least 40GB of memory is needed**. Step Inputs* `gtf` and `fasta`: path to reference sequence. Both of them needs to be unzipped. `gtf` should be the one prior to collapse by gene.* `sjdbOverhang`: specifies the length of the genomic sequence around the annotated junction to be used in constructing the splice junctions database. Ideally, this length should be equal to the ReadLength-1, where ReadLength is the length of the reads. We use 100 here as recommended by the TOPMed pipeline. See here for [some additional discussions](https://groups.google.com/g/rna-star/c/h9oh10UlvhI/m/BfSPGivUHmsJ). Step Output* Indexing file stored in `{cwd}/STAR_index`, which will be used by `STAR` ###Code [STAR_index] parameter: hg_gtf = path parameter: hg_reference = path # Specifies the length of the genomic sequence around the annotated junction to be used in constructing the splice junctions database. Ideally, this length should be equal to the ReadLength-1, where ReadLength is the length of the reads. # Default choice follows from TOPMed pipeline recommendation. parameter: sjdbOverhang = 100 fail_if(expand_size(mem) < expand_size('40G'), msg = "At least 40GB of memory is required for this step") input: hg_reference, hg_gtf output: f"{cwd}/STAR_Index/chrName.txt", f"{cwd}/STAR_Index/Log.out", f"{cwd}/STAR_Index/transcriptInfo.tab", f"{cwd}/STAR_Index/exonGeTrInfo.tab", f"{cwd}/STAR_Index/SAindex", f"{cwd}/STAR_Index/SA", f"{cwd}/STAR_Index/genomeParameters.txt", f"{cwd}/STAR_Index/chrStart.txt", f"{cwd}/STAR_Index/sjdbList.out.tab", f"{cwd}/STAR_Index/exonInfo.tab", f"{cwd}/STAR_Index/sjdbList.fromGTF.out.tab", f"{cwd}/STAR_Index/chrLength.txt", f"{cwd}/STAR_Index/sjdbInfo.txt", f"{cwd}/STAR_Index/Genome", f"{cwd}/STAR_Index/chrNameLength.txt", f"{cwd}/STAR_Index/geneInfo.tab" task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output[0]:bd}' bash: container=container, expand= "${ }", stderr = f'{_output[1]:n}.stderr', stdout = f'{_output[1]:n}.stdout' STAR --runMode genomeGenerate \ --genomeDir ${_output:d} \ --genomeFastaFiles ${_input[0]} \ --sjdbGTFfile ${_input[1]} \ --sjdbOverhang ${sjdbOverhang} \ --runThreadN ${numThreads} #--sjdbGTFchrPrefix "chr" ###Output _____no_output_____ ###Markdown Generating index file for `RSEM`This step generate the indexing file for `RSEM`. This file just need to generate once. Step Inputs* `gtf` and `fasta`: path to reference sequence. `gtf` should be the one prior to collapse by gene.* `sjdbOverhang`: specifies the length of the genomic sequence around the annotated junction to be used in constructing the splice junctions database. Ideally, this length should be equal to the ReadLength-1, where ReadLength is the length of the reads. Step Outputs* Indexing file stored in `RSEM_index_dir`, which will be used by `RSEM` ###Code [RSEM_index] parameter: hg_gtf = path parameter: hg_reference = path input: hg_reference, hg_gtf output: f"{cwd}/RSEM_Index/rsem_reference.n2g.idx.fa", f"{cwd}/RSEM_Index/rsem_reference.grp", f"{cwd}/RSEM_Index/rsem_reference.idx.fa", f"{cwd}/RSEM_Index/rsem_reference.ti", f"{cwd}/RSEM_Index/rsem_reference.chrlist", f"{cwd}/RSEM_Index/rsem_reference.seq", f"{cwd}/RSEM_Index/rsem_reference.transcripts.fa" task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output[0]:bd}' bash: container=container, expand= "${ }", stderr = f'{_output[1]:n}.stderr', stdout = f'{_output[1]:n}.stdout' rsem-prepare-reference \ ${_input[0]} \ ${_output[1]:n} \ --gtf ${_input[1]} \ --num-threads ${numThreads} ###Output _____no_output_____ ###Markdown Generation of RefFlat file This file is needed for picard CollectRnaSeqMetrics module, which in turn >produces metrics describing the distribution of the bases within the transcripts. It calculates the total numbers and the fractions of nucleotides within specific genomic regions including untranslated regions (UTRs), introns, intergenic sequences (between discrete genes), and peptide-coding sequences (exons). This tool also determines the numbers of bases that pass quality filters that are specific to Illumina data (PF_BASES). ###Code [RefFlat_generation] parameter: hg_gtf = path input: hg_gtf output: f'{_input:n}.ref.flat' bash: container=container, expand= "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout' gtfToGenePred ${_input} ${_output}.tmp -genePredExt -geneNameAsName2 awk -F'\t' -v OFS="\t" '{$1=$12 OFS $1;}7' ${_output}.tmp | cut -f 1-11 > ${_output} rm ${_output}.tmp ###Output _____no_output_____ ###Markdown Reference data standardizationThis module provides reference data download, indexing and preprocessing (if necessary), in preparation for use throughout the pipeline.We have included the PDF document compiled by data standardization subgroup in the [minimal working example folder on Google Drive](https://drive.google.com/file/d/1R5sw5o8vqk_mbQQb4CGmtH3ldu1T3Vu0/view?usp=sharing). It contains the reference data to use for the project. OverviewThis module is based on the [TOPMed workflow from Broad](https://github.com/broadinstitute/gtex-pipeline/blob/master/TOPMed_RNAseq_pipeline.md).Workflows implemented include: Convert transcript feature file gff3 to gtf- Input: an uncompressed gff3 file.(i.e. can be view via cat)- Output: a gtf file. Collapse transcript features into genes- Input: a gtf file.- Output: a gtf file with collapesed gene model. Generate STAR index based on gtf and reference fasta- Input: a gtf file and an acompanying fasta file.- Output: A folder of STAR index. Generate RSEM index based on gtf and reference fasta- Input: a gtf file and an acompanying fasta file.- Output: A folder of RSEM index. Example commandsTo download reference data: ###Code sos run reference_data.ipynb download_hg_reference --cwd reference_data sos run reference_data.ipynb download_gene_annotation --cwd reference_data sos run reference_data.ipynb download_ercc_reference --cwd reference_data ###Output _____no_output_____ ###Markdown To format reference data: ###Code sos run reference_data.ipynb hg_reference \ --cwd reference_data \ --ercc-reference reference_data/ERCC92.fa \ --hg-reference reference_data/GRCh38_full_analysis_set_plus_decoy_hla.fa \ --container container/rna_quantification.sif sos run pipeline/reference_data.ipynb hg_gtf \ --cwd reference_data \ --hg-gtf /mnt/mfs/statgen/snuc_pseudo_bulk/data/reference_data/genes.gtf \ --hg-reference data/reference_data/GRCh38_full_analysis_set_plus_decoy_hla.noALT_noHLA_noDecoy_ERCC.fasta \ --containers containers/rna_quantification.sif -J 1 -q csg -c csg.yml & ###Output _____no_output_____ ###Markdown To format gene feature data: ###Code sos run reference_data.ipynb gene_annotation \ --cwd reference_data \ --ercc-gtf reference_data/ERCC92.gtf \ --hg-gtf reference_data/Homo_sapiens.GRCh38.103.chr.gtf \ --hg-reference reference_data/GRCh38_full_analysis_set_plus_decoy_hla.noALT_noHLA_noDecoy_ERCC.fasta \ --container container/rna_quantification.sif ###Output _____no_output_____ ###Markdown **Notice that for stranded RNA-seq protocol please add `--is-stranded` to the command above. More details can be found later in the document.** To generate STAR index using the GTF annotation file before gene model collapse: ###Code sos run reference_data.ipynb STAR_index \ --cwd reference_data \ --hg-reference reference_data/GRCh38_full_analysis_set_plus_decoy_hla.noALT_noHLA_noDecoy_ERCC.fasta \ --hg-gtf reference_data/Homo_sapiens.GRCh38.103.chr.reformatted.ERCC.gtf \ --container container/rna_quantification.sif \ --mem 40G ###Output _____no_output_____ ###Markdown **Notice that command above requires at least 40G of memory, and takes quite a while to complete**. To generate RSEM index: ###Code sos run reference_data.ipynb RSEM_index \ --cwd reference_data \ --hg-reference reference_data/GRCh38_full_analysis_set_plus_decoy_hla.noALT_noHLA_noDecoy_ERCC.fasta \ --hg-gtf reference_data/Homo_sapiens.GRCh38.103.chr.reformatted.ERCC.gtf \ --container container/rna_quantification.sif \ --mem 40G ###Output _____no_output_____ ###Markdown Command interface ###Code sos run reference_data.ipynb -h [global] # The output directory for generated files. parameter: cwd = path # For cluster jobs, number commands to run per job parameter: job_size = 1 # Wall clock time expected parameter: walltime = "5h" # Memory expected parameter: mem = "16G" # Number of threads parameter: numThreads = 8 # Software container option parameter: container = "" cwd = path(f'{cwd:a}') from sos.utils import expand_size ###Output _____no_output_____ ###Markdown Data download ###Code [download_hg_reference] output: f"{cwd:a}/GRCh38_full_analysis_set_plus_decoy_hla.fa" download: dest_dir = cwd ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/technical/reference/GRCh38_reference_genome/GRCh38_full_analysis_set_plus_decoy_hla.fa [download_gene_annotation] output: f"{cwd:a}/Homo_sapiens.GRCh38.103.chr.gtf" download: dest_dir = cwd, decompress=True http://ftp.ensembl.org/pub/release-103/gtf/homo_sapiens/Homo_sapiens.GRCh38.103.chr.gtf.gz [download_ercc_reference] output: f"{cwd:a}/ERCC92.gtf", f"{cwd:a}/ERCC92.fa" download: dest_dir = cwd, decompress=True https://tools.thermofisher.com/content/sfs/manuals/ERCC92.zip ###Output _____no_output_____ ###Markdown GFF3 to GTF formatting ###Code [gff3_to_gtf] parameter: gff3_file = path input: gff3_file output: f'{cwd}/{_input:bn}.gtf' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' bash: container=container, expand= "${ }", stderr = f'{_output:n}.stderr', stdout = f'{_output:n}.stdout' gffread ${_input} -T -o ${_output} ###Output _____no_output_____ ###Markdown HG reference file preprocessing1. Remove the HLA/ALT/Decoy record from the fasta2. Adding in ERCC information to the fasta file3. Generating index for the fasta file ###Code [hg_reference_1 (HLA ALT Decoy removal)] # Path to HG reference file parameter: hg_reference = path input: hg_reference output: f'{cwd}/{_input:bn}.noALT_noHLA_noDecoy.fasta' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' python: expand = "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout', container = container with open('${_input}', 'r') as fasta: contigs = fasta.read() contigs = contigs.split('>') contig_ids = [i.split(' ', 1)[0] for i in contigs] # exclude ALT, HLA and decoy contigs filtered_fasta = '>'.join([c for i,c in zip(contig_ids, contigs) if not (i[-4:]=='_alt' or i[:3]=='HLA' or i[-6:]=='_decoy')]) with open('${_output}', 'w') as fasta: fasta.write(filtered_fasta) [hg_reference_2 (merge with ERCC reference)] parameter: ercc_reference = path output: f'{cwd}/{_input:bn}_ERCC.fasta' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' bash: expand = "${ }", stderr = f'{_output[0]}.stderr', stdout = f'{_output}.stdout', container = container sed 's/ERCC-/ERCC_/g' ${ercc_reference} > ${ercc_reference:n}.patched.fa cat ${_input} ${ercc_reference:n}.patched.fa > ${_output} [hg_reference_3 (index the fasta file)] output: f'{cwd}/{_input:bn}.dict' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' bash: expand = "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout', container = container samtools faidx ${_input} java -jar /opt/picard-tools/picard.jar \ CreateSequenceDictionary \ R=${_input} \ O=${_output} ###Output _____no_output_____ ###Markdown Transcript and gene model reference processing This step modify the `gtf` file for following reasons:1. RSEM require GTF input to have the same chromosome name format (with `chr` prefix) as the fasta file. **although for STAR, this problem can be solved by the now commented --sjdbGTFchrPrefix "chr" option, we have to add `chr` to it for use with RSEM**. 2. Gene model collapsing script `collapse_annotation.py` from GTEx require the gtf have `transcript_type` instead `transcript_biotype` in its annotation. We rename it here, although **this problem can also be solved by modifying the collapse_annotation.py while building the docker, since we are doing 1 above we think it is better to add in another customization here.**3. Adding in ERCC information to the `gtf` reference.We may reimplement 1 and 2 if the problem with RSEM is solved, or when RSEM is no longer needed. ###Code [hg_gtf_1 (add chr prefix to gtf file)] parameter: hg_reference = path parameter: hg_gtf = path input: hg_reference, hg_gtf output: f'{cwd}/{_input[1]:bn}.reformatted.gtf' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' R: expand = "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout', container = container library("readr") library("stringr") library("dplyr") options(scipen = 999) con <- file("${_input[0]}","r") fasta <- readLines(con,n=1) close(con) gtf = read_delim("${_input[1]}", "\t", col_names = F, comment = "#", col_types="ccccccccc") if(!str_detect(fasta,">chr")) { gtf_mod = gtf%>%mutate(X1 = str_remove_all(X1,"chr")) } else if (!any(str_detect(gtf$X1[1],"chr"))) { gtf_mod = gtf%>%mutate(X1 = paste0("chr",X1)) } else (gtf_mod = gtf) if(any(str_detect(gtf_mod$X9, "transcript_biotype"))) { gtf_mod = gtf_mod%>%mutate(X9 = str_replace_all(X9,"transcript_biotype","transcript_type")) } gtf_mod%>%write.table("${_output}",sep = "\t",quote = FALSE,col.names = F,row.names = F) ###Output _____no_output_____ ###Markdown **Text below is taken from https://github.com/broadinstitute/gtex-pipeline/tree/master/gene_model**Gene-level expression and eQTLs from the GTEx project are calculated based on a collapsed gene model (i.e., combining all isoforms of a gene into a single transcript), according to the following rules:1. Transcripts annotated as “retained_intron” or “read_through” are excluded. Additionally, transcripts that overlap with annotated read-through transcripts may be blacklisted (blacklists for GENCODE v19, 24 & 25 are provided in this repository; no transcripts were blacklisted for v26).2. The union of all exon intervals of each gene is calculated.3. Overlapping intervals between genes are excluded from all genes.The purpose of step 3 is primarily to exclude overlapping regions from genes annotated on both strands, which can't be unambiguously quantified from unstranded RNA-seq (GTEx samples were sequenced using an unstranded protocol). For stranded protocols, this step can be skipped by adding the `--collapse_only` flag.Further documentation is available on the [GTEx Portal](https://gtexportal.org/home/documentationPagestaticTextAnalysisMethods). ###Code [hg_gtf_2 (collapsed gene model)] parameter: is_stranded = False output: f'{_input:n}{".collapse_only" if is_stranded else ""}.gene.gtf' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' bash: expand = "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout', container = container collapse_annotation.py ${"--collapse_only" if is_stranded else ""} ${_input} ${_output} [ercc_gtf (Preprocess ERCC gtf file)] parameter: ercc_gtf = path input: ercc_gtf output: f'{cwd}/{_input:bn}.genes.patched.gtf' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' python: expand = "${ }", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout', container = container with open('${_input}') as exon_gtf, open('${_output}', 'w') as gene_gtf: for line in exon_gtf: f = line.strip().split('\t') f[0] = f[0].replace('-','_') # required for RNA-SeQC/GATK (no '-' in contig name) attr = f[8] if attr[-1]==';': attr = attr[:-1] attr = dict([i.split(' ') for i in attr.replace('"','').split('; ')]) # add gene_name, gene_type attr['gene_name'] = attr['gene_id'] attr['gene_type'] = 'ercc_control' attr['gene_status'] = 'KNOWN' attr['level'] = 2 for k in ['id', 'type', 'name', 'status']: attr['transcript_'+k] = attr['gene_'+k] attr_str = [] for k in ['gene_id', 'transcript_id', 'gene_type', 'gene_status', 'gene_name', 'transcript_type', 'transcript_status', 'transcript_name']: attr_str.append('{0:s} "{1:s}";'.format(k, attr[k])) attr_str.append('{0:s} {1:d};'.format('level', attr['level'])) f[8] = ' '.join(attr_str) # write gene, transcript, exon gene_gtf.write('\t'.join(f[:2]+['gene']+f[3:])+'\n') gene_gtf.write('\t'.join(f[:2]+['transcript']+f[3:])+'\n') f[8] = ' '.join(attr_str[:2]) gene_gtf.write('\t'.join(f[:2]+['exon']+f[3:])+'\n') [gene_annotation] input: output_from("hg_gtf_1"), output_from("hg_gtf_2"), output_from("ercc_gtf") output: f'{cwd}/{_input[0]:bn}.ERCC.gtf', f'{cwd}/{_input[1]:bn}.ERCC.gtf' task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output:bn}' bash: expand = "${ }", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout', container = container cat ${_input[0]} ${_input[2]} > ${_output[0]} cat ${_input[1]} ${_input[2]} > ${_output[1]} ###Output _____no_output_____ ###Markdown Generating index file for `STAR` This step generate the index file for STAR alignment. This file just need to generate once and can be re-used. **At least 40GB of memory is needed**. Step Inputs* `gtf` and `fasta`: path to reference sequence. Both of them needs to be unzipped. `gtf` should be the one prior to collapse by gene.* `sjdbOverhang`: specifies the length of the genomic sequence around the annotated junction to be used in constructing the splice junctions database. Ideally, this length should be equal to the ReadLength-1, where ReadLength is the length of the reads. We use 100 here as recommended by the TOPMed pipeline. See here for [some additional discussions](https://groups.google.com/g/rna-star/c/h9oh10UlvhI/m/BfSPGivUHmsJ). Step Output* Indexing file stored in `{cwd}/STAR_index`, which will be used by `STAR` ###Code [STAR_index] parameter: hg_gtf = path parameter: hg_reference = path # Specifies the length of the genomic sequence around the annotated junction to be used in constructing the splice junctions database. Ideally, this length should be equal to the ReadLength-1, where ReadLength is the length of the reads. # Default choice follows from TOPMed pipeline recommendation. parameter: sjdbOverhang = 100 fail_if(expand_size(mem) < expand_size('40G'), msg = "At least 40GB of memory is required for this step") input: hg_reference, hg_gtf output: f"{cwd}/STAR_Index/chrName.txt", f"{cwd}/STAR_Index/Log.out", f"{cwd}/STAR_Index/transcriptInfo.tab", f"{cwd}/STAR_Index/exonGeTrInfo.tab", f"{cwd}/STAR_Index/SAindex", f"{cwd}/STAR_Index/SA", f"{cwd}/STAR_Index/genomeParameters.txt", f"{cwd}/STAR_Index/chrStart.txt", f"{cwd}/STAR_Index/sjdbList.out.tab", f"{cwd}/STAR_Index/exonInfo.tab", f"{cwd}/STAR_Index/sjdbList.fromGTF.out.tab", f"{cwd}/STAR_Index/chrLength.txt", f"{cwd}/STAR_Index/sjdbInfo.txt", f"{cwd}/STAR_Index/Genome", f"{cwd}/STAR_Index/chrNameLength.txt", f"{cwd}/STAR_Index/geneInfo.tab" task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output[0]:bd}' bash: container=container, expand= "${ }", stderr = f'{_output[0]:d}.stderr', stdout = f'{_output[0]:d}.stdout' STAR --runMode genomeGenerate \ --genomeDir ${_output:d} \ --genomeFastaFiles ${_input[0]} \ --sjdbGTFfile ${_input[1]} \ --sjdbOverhang ${sjdbOverhang} \ --runThreadN ${numThreads} #--sjdbGTFchrPrefix "chr" ###Output _____no_output_____ ###Markdown Generating index file for `RSEM`This step generate the indexing file for `RSEM`. This file just need to generate once. Step Inputs* `gtf` and `fasta`: path to reference sequence. `gtf` should be the one prior to collapse by gene.* `sjdbOverhang`: specifies the length of the genomic sequence around the annotated junction to be used in constructing the splice junctions database. Ideally, this length should be equal to the ReadLength-1, where ReadLength is the length of the reads. Step Outputs* Indexing file stored in `RSEM_index_dir`, which will be used by `RSEM` ###Code [RSEM_index] parameter: hg_gtf = path parameter: hg_reference = path input: hg_reference, hg_gtf output: f"{cwd}/RSEM_Index/rsem_reference.n2g.idx.fa", f"{cwd}/RSEM_Index/rsem_reference.grp", f"{cwd}/RSEM_Index/rsem_reference.idx.fa", f"{cwd}/RSEM_Index/rsem_reference.ti", f"{cwd}/RSEM_Index/rsem_reference.chrlist", f"{cwd}/RSEM_Index/rsem_reference.seq", f"{cwd}/RSEM_Index/rsem_reference.transcripts.fa" task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, tags = f'{step_name}_{_output[0]:bd}' bash: container=container, expand= "${ }", stderr = f'{_output[0]:d}.stderr', stdout = f'{_output[0]:d}.stdout' rsem-prepare-reference \ ${_input[0]} \ ${_output:nn} \ --gtf ${_input[1]} \ --num-threads ${numThreads} ###Output _____no_output_____
Fashion_MNIST_y_Convoluciones_entrenamiento,_evaluacion_y_descarga.ipynb
###Markdown Clasificación de tipos de ropa con Redes Neuronales Convolucionales (CNN) Esta Guia entrena un modelo de red neuronal para clasificar imagenes de ropa como, tennis y camisetas. No hay problema sino entiende todos los detalles; es un repaso rapido de un programa completo de Tensorflow con los detalles explicados a medida que avanza.Esta Guia usa [tf.keras](https://www.tensorflow.org/guide/keras), un API de alto nivel para construir y entrenar modelos en Tensorflow. ###Code # Antes de nada, importamos las librerías que nos puedan hacer falta import tensorflow as tf # Para crear modelos de Aprendizaje Profundo import matplotlib.pyplot as plt # Para graficar imágenes y gráficas de evaluación import numpy as np # Nos permite trabajar con estructuras vectoriales eficientes # que son además las que emplea tensorflow import math # Operaciones matemáticas # métodos para calcular métricas y matriz de confusión from sklearn.metrics import confusion_matrix, classification_report import itertools # funciones eficientes sobre elementos iterables ###Output _____no_output_____ ###Markdown El dataset de moda de MNIST Fashion MNIST contiene mas de 70,000 imagenes divididas en 10 categorias de prendas. Al igual que ocurria con el dataset clásico MNIST, cada imagen tiene una resolución de 28 por 28 pixeles.En tanto que las imágenes son más complejas, su clasificación por una red es más compleja. Por ello, vamos a usar una Red Neuronal con Convoluciones para resolver el problema. ###Code fashion_mnist = tf.keras.datasets.fashion_mnist (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data() class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] n_samples_train = len(x_train) n_samples_test = len(x_test) n_samples = n_samples_train + n_samples_test print("La relación de muestras entre los dos subconjuntos es del {:.2%} en " "subconjunto de entrenamiento y del {:.2%}, de prueba".format( n_samples_train/n_samples, n_samples_test/n_samples)) # Las imágenes están en escala de grises, con una resolución de 256 valores. Es # común en problemas de clasificación realizar una normalización y escalar ese # rango de valores al intervalo [0,1], si bien no es necesario. Puede probarse el # realizar el entrenamiento sin hacer esta conversión. x_train, x_test = x_train / 255.0, x_test / 255.0 # Una pequeña función para graficar muestras aleatorias def random_sample_plot(n_samples, mnist_set): rnd_index = np.random.choice(mnist_set.shape[0], size=(n_samples,), replace=False) subset = mnist_set[rnd_index] grid_width = 5 grid_length = math.ceil(n_samples/grid_width) plt.figure(figsize=(15,10)) # specifying the overall grid size for i in range(n_samples): plt.subplot(grid_length,grid_width,i+1) # the number of images in the grid is 5*5 (25) plt.imshow(subset[i], cmap='gray') # Una muestra del MNIST random_sample_plot(20, x_train) ###Output _____no_output_____ ###Markdown Un modelo de CNN sencilloConstruir la red neuronal requiere configurar las capas del modelo y luego compilar el modelo. ###Code x_train = x_train.reshape((x_train.shape[0], 28, 28, 1)) x_test = x_test.reshape((x_test.shape[0], 28, 28, 1)) model = tf.keras.models.Sequential() # El modelo secuencial vacio inicialmente model.add(tf.keras.Input(shape=(28, 28, 1))) # Podemos describir la forma de la entrada # como si de una capa se tratase. # Capas de convolución y pooling model.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, padding="same", activation="relu")) model.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding='valid')) #model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=3, padding="same", activation="relu")) #model.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding='valid')) model.add(tf.keras.layers.Flatten()) # Esta capa "aplana" la imagen, es decir, pasa # la matriz de valores a un vector unidimensional. model.add(tf.keras.layers.Dense(128, activation='relu')) # Una capa Densa o Fully-Connected. model.add(tf.keras.layers.Dropout(0.2)) # Empleamos Dropout, una técnica que inhabilita # un % de nodos de la capa anterior de forma aleatoria # durante el entrenamiento, que ayuda a la generalización # del modelo evitando el sobre ajuste de los nodos a los # ejemplos particulares usados. model.add(tf.keras.layers.Dense(10, activation='softmax')) # Una última capa Densa, ya como salida. # Se la ha dotado de 10 nodos, tantos como # dígitos diferentes consideramos para su # clasificación (0 al 9) model.summary() # Con este método podemos comprobar que nuestra red tiene las # capas añadidas y ver alguna información adicional ###Output Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_1 (Conv2D) (None, 28, 28, 32) 320 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 14, 14, 32) 0 _________________________________________________________________ flatten_1 (Flatten) (None, 6272) 0 _________________________________________________________________ dense_2 (Dense) (None, 128) 802944 _________________________________________________________________ dropout_1 (Dropout) (None, 128) 0 _________________________________________________________________ dense_3 (Dense) (None, 10) 1290 ================================================================= Total params: 804,554 Trainable params: 804,554 Non-trainable params: 0 _________________________________________________________________ ###Markdown Entrenamiento del modeloElegimos un optimizador y una funcion de perdida para el entrenamiento del modelo. ###Code # El modelo se inicializa para su entrenamiento con el método compile # El loss SparseCategoricalCrossEntropy permite introducir las etiquetas sin # necesidad de codificarlas en One-Hot model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) # Con este método del modelo podemos entrenarlo. El parámetro "validation_data" # es opcional y simplemente evalúa el modelo con los datos proporcionados en cada # época, pero no serán empleados para el entrenamiento (en nuestro caso, le hemos # proporcionado el subconjunto de prueba) model.fit(x_train, y_train, epochs=10) ###Output Epoch 1/10 1875/1875 [==============================] - 4s 2ms/step - loss: 0.4077 - accuracy: 0.8549 Epoch 2/10 1875/1875 [==============================] - 4s 2ms/step - loss: 0.2783 - accuracy: 0.8992 Epoch 3/10 1875/1875 [==============================] - 4s 2ms/step - loss: 0.2399 - accuracy: 0.9117 Epoch 4/10 1875/1875 [==============================] - 4s 2ms/step - loss: 0.2113 - accuracy: 0.9212 Epoch 5/10 1875/1875 [==============================] - 4s 2ms/step - loss: 0.1871 - accuracy: 0.9297 Epoch 6/10 1875/1875 [==============================] - 4s 2ms/step - loss: 0.1660 - accuracy: 0.9375 Epoch 7/10 1875/1875 [==============================] - 4s 2ms/step - loss: 0.1481 - accuracy: 0.9440 Epoch 8/10 1875/1875 [==============================] - 4s 2ms/step - loss: 0.1322 - accuracy: 0.9508 Epoch 9/10 1875/1875 [==============================] - 4s 2ms/step - loss: 0.1203 - accuracy: 0.9550 Epoch 10/10 1875/1875 [==============================] - 4s 2ms/step - loss: 0.1089 - accuracy: 0.9588 ###Markdown Evaluando el modeloEl modo más básico de evaluar un modelo de clasificación es a partir de la métrica de precisión o accuracy. ###Code # Cálculo de la precisión con el método evaluate del modelo, empleando el subconjunto de prueba model.evaluate(x_test, y_test, verbose=2) ###Output 313/313 - 1s - loss: 0.2831 - accuracy: 0.9176 ###Markdown Se pueden emplear otro tipo de métricas. Podemos obtener algunas con la ayuda de la librería para Inteligencia Artificial Scikit Learn ###Code def otras_metricas(logits, ground_truth, classes): logits = np.argmax(logits, axis=1) print(classification_report(ground_truth, logits, target_names=classes)) # Llamamos al modelo para que calcule las clasificaciones con el subconjunto de # prueba predicts = model.predict(x_test) # Hallamos las métricas otras_metricas(predicts, y_test, classes=class_names) ###Output precision recall f1-score support T-shirt/top 0.86 0.86 0.86 1000 Trouser 0.99 0.98 0.99 1000 Pullover 0.82 0.91 0.86 1000 Dress 0.91 0.93 0.92 1000 Coat 0.89 0.85 0.87 1000 Sandal 0.99 0.96 0.98 1000 Shirt 0.80 0.74 0.77 1000 Sneaker 0.94 0.99 0.96 1000 Bag 0.98 0.98 0.98 1000 Ankle boot 0.98 0.96 0.97 1000 accuracy 0.92 10000 macro avg 0.92 0.92 0.92 10000 weighted avg 0.92 0.92 0.92 10000 ###Markdown No obstante, si es interesante saber que la mayor parte de estas métricas se calculan a partir de datos que quedan reflejados en lo que es denominado una **matriz de confusión**, la cual ilustra la cantidad de aciertos y fallos cometidos por el modelo, de forma desglosada. Vamos a mostrar esta matriz para el modelo entrenado. ###Code # Función que convierte las salidas del modelo en una clase, para que pueda ser # asimilado por la función de cálculo de la matriz de confusión de SciKit Learn def confusion_matrix_v2(logits, ground_truth): logits = np.argmax(logits, axis=1) cm = confusion_matrix(ground_truth, logits) return cm # Una función para hacer más atractiva e intuitiva la matriz de confusión def plot_pretty_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') plt.rcParams["figure.figsize"] = (len(classes),len(classes)) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('Etiquetas del dataset') plt.xlabel('Predicciones/clasificación del modelo') # Calculamos la matriz de confusión de nuestro modelo, comparando los resultados # con las etiquetas del dataset correspondiente a cada muestra cm = confusion_matrix_v2(predicts, y_test) # Mostramos la matriz de confusión de forma intuitiva plot_pretty_confusion_matrix(cm, classes=class_names, normalize=True, title='Matriz de confusión', cmap=plt.cm.Reds) ###Output Normalized confusion matrix ###Markdown ¿Te animas a continuar? Con lo anterior ya tenemos un modelo a nuestra disposición. Sin embargo, quedan una serie de pasos por realizar para trabajar y exprimir las utilidades de CUBE IDE. En concreto, falta por realizar:1. **Esencial**: descargar el modelo de Keras para embeberlo en CUBE-AI. ***Nota***: recuerda que puede haber problemas de versiones, por lo que es recomendable hacer un downgrade de la versión de Tensorflow.2. **Interesante e ilustrativo, extraer el dataset**: La extracción del dataset para poder hacer una evaluación meticulosa del modelo embebido. Recuerda que ha de descargarse en csv y con una forma adecuada para que sea asimilable por la herramienta.3. **Para sacar el máximo provecho, cuantizar**: Tratar de cuantizar el modelo. Si queremos aprovechar al máximo las bondades de la herramienta y mejorar la eficiencia de nuestro sistema embebido, la labor de cuantizar es casi imperativa, sobre todo con placas de bajo consumo. Esta es una buena meta a alcanzar. ###Code ###Output _____no_output_____
Modulo3/.ipynb_checkpoints/Clase14_SeleccionOptimaPortI-checkpoint.ipynb
###Markdown Selección óptima de portafolios IEn la clase pasada vimos que:- La LAC describe las posibles selecciones de riesgo-rendimiento entre un activo libre de riesgo y un activo riesgoso.- Su pendiente es igual al radio de Sharpe del activo riesgoso.- La asignación óptima de capital para cualquier inversionista es el punto tangente de la curva de indiferencia del inversionista con la LAC (depende de las preferencias particulares - aversión al riesgo).Para todo lo anterior, supusimos que ya teníamos el portafolio óptimo (activo riesgoso).En el siguiente análisis: **Objetivos:**- ¿Cuál es el portafolio óptimo de activos riesgosos? - ¿Cuál es el mejor portafolio de activos riesgosos? - Es un portafolio eficiente en media-varianza.- Problema: dado un conjunto de activos riesgosos, ¿cómo construimos la mejor combinación?*Referencia:*- Notas del curso "Portfolio Selection and Risk Management", Rice University, disponible en Coursera.___ 1. Maximizando el radio de Sharpe ¿Qué pasa si tenemos dos activos riesgosos?Cuando tenemos dos activos riesgosos, tenemos disponibles diferentes LAC. ¿Qué significan sus pendientes? Ver en el tablero.Pregunta:- ¿Qué es lo que se quiere? **Conclusión:** - el *portafolio tangente* de la LAC más pendiente;- por tanto, maximiza el radio de Sharpe para todos los posibles portafolios de activos riesgosos;- este portafolio se conoce como el portafolio eficiente en media-varianza (EMV). **Idea principal: el portafolio óptimo de activos riesgosos es independiente de las preferencias del inversionista.** - El portafolio EMV determina el portafolio óptimo de activos riesgosos.- Todos tendremos el mismo portafolio de activos riesgosos (EMV), y lo combinaremos con el activo libre de reisgo de acuerdo con las preferencias de cada uno de nosotros (aversión al riesgo).- La LAC combinando el activo libre de riesgo y el portafolio EMV, se vuelve el conjunto de portafolios eficientes. Entonces, se deben seguir los siguientes pasos:1. Crear la frontera media-varianza.2. Encontrar el portafolio que maximize el radio de Sharpe (portafolio EMV).3. Construir la frontera eficiente (LAC) del punto $(0,r_f)$ al punto $(E[r_s],\sigma_s)$ del portafolio EMV.4. Combinar de acuerdo a sus preferencias.___ 2. Solución analítica del portafolio EMV: caso con dos activos.Queremos solucionar el siguiente problema:\begin{align}\max_{w_1,w_2} &\quad \frac{E[r_p]-r_f}{\sigma_p}\\\text{s.a.} &\quad E[r_p]=w_1E[r_1]+w_2E[r_2]\\ &\quad \sigma_p=\sqrt{w_1^2\sigma_1^2+w_2^2\sigma_2^2+2w_1w_2\rho_{12}\sigma_1\sigma_2}\\ &\quad w_1+w_2=1, \quad w_1,w_2\geq0\end{align} el cual es equivalente a\begin{align}\max_{w_1} &\quad \frac{w_1E[r_1]+(1-w_1)E[r_2]-r_f}{\sqrt{w_1^2\sigma_1^2+(1-w_1)^2\sigma_2^2+2w_1(1-w_1)\rho_{12}\sigma_1\sigma_2}}\\\text{s.a.} &\quad 0\leq w_1\leq1\end{align} **Actividad.**El anterior es un problema de maximizar una función de una variable en un dominio cerrado. No debaría representar dificultad.Encontrar la solución analítica a este problema.Quien primero lo haga, y salga a explicarlo al tablero, le subo alguna tarea, quiz o el primer examen a 100.Deben llegar a:$$w_{1,EMV}=\frac{(E[r_1]-r_f)\sigma_2^2-(E[r_2]-r_f)\sigma_{12}}{(E[r_2]-r_f)\sigma_1^2+(E[r_1]-r_f)\sigma_2^2-((E[r_1]-r_f)+(E[r_2]-r_f))\sigma_{12}}.$$ Si nadie lo ha hecho en 30 min., procederé a hacerlo yo.**Nota:** - así como obtuvimos una expresión para el peso del portafolio de mínima varianza con dos activos, obtenemos una expresión para el peso del portafolio Eficiente en Media-Varianza. - Estas actividades son sin duda un buen ejercicio, y se pueden replicar usando técnicas de varias variables (multiplicadores de Lagrange) cuando se tengan más de dos activos.- Sin embargo, la complejidad del problema crece considerablemente con el número de variables, y la solución analítica deja de ser viable cuando mencionamos que un portafolio bien diversificado consta aproximadamente de 50-60 activos.- En esos casos, este problema se soluciona con rutinas numéricas que hagan la optimización por nosotros.- Por eso, les enseño cómo resolver este problema con optimizadores numéricos, porque son una solución viable y escalable a más variables. 3. Ejemplo ilustrativo.Retomamos el ejemplo de mercados de acciones en los países integrantes del $G5$: EU, RU, Francia, Alemania y Japón. ###Code # Importamos pandas y numpy import pandas as pd import numpy as np # Resumen en base anual de rendimientos esperados y volatilidades annual_ret_summ = pd.DataFrame(columns=['EU', 'RU', 'Francia', 'Alemania', 'Japon'], index=['Media', 'Volatilidad']) annual_ret_summ.loc['Media'] = np.array([0.1355, 0.1589, 0.1519, 0.1435, 0.1497]) annual_ret_summ.loc['Volatilidad'] = np.array([0.1535, 0.2430, 0.2324, 0.2038, 0.2298]) annual_ret_summ.round(4) # Matriz de correlación corr = pd.DataFrame(data= np.array([[1.0000, 0.5003, 0.4398, 0.3681, 0.2663], [0.5003, 1.0000, 0.5420, 0.4265, 0.3581], [0.4398, 0.5420, 1.0000, 0.6032, 0.3923], [0.3681, 0.4265, 0.6032, 1.0000, 0.3663], [0.2663, 0.3581, 0.3923, 0.3663, 1.0000]]), columns=annual_ret_summ.columns, index=annual_ret_summ.columns) corr.round(4) ###Output _____no_output_____ ###Markdown Supondremos, además, que la tasa libre de riesgo es $r_f=5\%$. ###Code # Tasa libre de riesgo rf = 0.05 ###Output _____no_output_____ ###Markdown Entonces, supondremos que tenemos disponibles los activos correspondientes a los mercados de acciones de EU y Japón, y en adición el activo libre de riesgo. 1. Construir la frontera de mínima varianza ###Code # Vector de w variando entre 0 y 1 con n pasos # Rendimientos esperados individuales # Activo1: EU, Activo2:Japon # Volatilidades individuales # Correlacion # DataFrame de portafolios: # 1. Índice: i # 2. Columnas 1-2: w, 1-w # 3. Columnas 3-4: E[r], sigma # 4. Columna 5: Sharpe ratio # Importar librerías de gráficos # Gráfica de dispersión de puntos coloreando # de acuerdo a SR ###Output _____no_output_____ ###Markdown 2. Encontrar el portafolio que maximiza el radio de Sharpe (EMV) Primero, encontramos este portafolio con la fórmula que obtuvimos: ###Code # Fórmula que obtuvimos ###Output _____no_output_____ ###Markdown Ahora sí, con la función scipy.optimize.minimize ###Code # Importar el módulo optimize ## Construcción de parámetros ## Activo 1: EU, Activo 2: Japon # 1. Sigma: matriz de varianza-covarianza # 2. Eind: rendimientos esperados activos individuales # Función objetivo (-SR) # Dato inicial # Cotas de las variables # Restricciones # Optimización numérica # Resultado ###Output _____no_output_____ ###Markdown Con lo anterior, podemos obtener datos de rendimiento esperado y volatilidad del portafolio EMV ###Code # Rendimiento esperado y volatilidad del portafolio EMV # Gráfica de dispersión de puntos coloreando # de acuerdo a SR, y portafolio EMV ###Output _____no_output_____ ###Markdown 3. Construir LAC Ahora, dibujamos la LAC, combinando el portafolio EMV con el activo libre de riesgo: ###Code # Vector de wr variando entre 0 y 1.5 con n pasos # DataFrame de CAL: # 1. Índice: i # 2. Columnas 1-2: wp, wrf # 3. Columnas 3-4: E[r], sigma # 4. Columna 5: Sharpe ratio # Gráfica de dispersión de puntos coloreando # de acuerdo a SR, portafolio EMV y LAC ###Output _____no_output_____ ###Markdown 4. Combinación óptima de acuerdo a preferenciasCon los datos anteriores, y la caracterización de aversión al riesgo, se escoge la combinación óptima entre el portafolio EMV y el activo libre de riesgo de acuerdo a:$$w^\ast=\frac{E[r_s-r_f]}{\gamma\sigma_s^2}.$$ ###Code # Para gamma=7 ###Output _____no_output_____
Course_4_-_Convolutional_Neural_Networks/Week_3/assignment/Autonomous_driving_application_-_Car_detection_-_v1.ipynb
###Markdown Autonomous driving - Car detectionWelcome to your week 3 programming assignment. You will learn about object detection using the very powerful YOLO model. Many of the ideas in this notebook are described in the two YOLO papers: Redmon et al., 2016 (https://arxiv.org/abs/1506.02640) and Redmon and Farhadi, 2016 (https://arxiv.org/abs/1612.08242). **You will learn to**:- Use object detection on a car detection dataset- Deal with bounding boxesRun the following cell to load the packages and dependencies that are going to be useful for your journey! ###Code import argparse import os import matplotlib.pyplot as plt from matplotlib.pyplot import imshow import scipy.io import scipy.misc import numpy as np import pandas as pd import PIL import tensorflow as tf from keras import backend as K from keras.layers import Input, Lambda, Conv2D from keras.models import load_model, Model from yolo_utils import read_classes, read_anchors, generate_colors, preprocess_image, draw_boxes, scale_boxes from yad2k.models.keras_yolo import yolo_head, yolo_boxes_to_corners, preprocess_true_boxes, yolo_loss, yolo_body %matplotlib inline ###Output Using TensorFlow backend. ###Markdown **Important Note**: As you can see, we import Keras's backend as K. This means that to use a Keras function in this notebook, you will need to write: `K.function(...)`. 1 - Problem StatementYou are working on a self-driving car. As a critical component of this project, you'd like to first build a car detection system. To collect data, you've mounted a camera to the hood (meaning the front) of the car, which takes pictures of the road ahead every few seconds while you drive around. Pictures taken from a car-mounted camera while driving around Silicon Valley. We would like to especially thank [drive.ai](https://www.drive.ai/) for providing this dataset! Drive.ai is a company building the brains of self-driving vehicles.You've gathered all these images into a folder and have labelled them by drawing bounding boxes around every car you found. Here's an example of what your bounding boxes look like. **Figure 1** : **Definition of a box** If you have 80 classes that you want YOLO to recognize, you can represent the class label $c$ either as an integer from 1 to 80, or as an 80-dimensional vector (with 80 numbers) one component of which is 1 and the rest of which are 0. The video lectures had used the latter representation; in this notebook, we will use both representations, depending on which is more convenient for a particular step. In this exercise, you will learn how YOLO works, then apply it to car detection. Because the YOLO model is very computationally expensive to train, we will load pre-trained weights for you to use. 2 - YOLO YOLO ("you only look once") is a popular algoritm because it achieves high accuracy while also being able to run in real-time. This algorithm "only looks once" at the image in the sense that it requires only one forward propagation pass through the network to make predictions. After non-max suppression, it then outputs recognized objects together with the bounding boxes. 2.1 - Model detailsFirst things to know:- The **input** is a batch of images of shape (m, 608, 608, 3)- The **output** is a list of bounding boxes along with the recognized classes. Each bounding box is represented by 6 numbers $(p_c, b_x, b_y, b_h, b_w, c)$ as explained above. If you expand $c$ into an 80-dimensional vector, each bounding box is then represented by 85 numbers. We will use 5 anchor boxes. So you can think of the YOLO architecture as the following: IMAGE (m, 608, 608, 3) -> DEEP CNN -> ENCODING (m, 19, 19, 5, 85).Lets look in greater detail at what this encoding represents. **Figure 2** : **Encoding architecture for YOLO** If the center/midpoint of an object falls into a grid cell, that grid cell is responsible for detecting that object. Since we are using 5 anchor boxes, each of the 19 x19 cells thus encodes information about 5 boxes. Anchor boxes are defined only by their width and height.For simplicity, we will flatten the last two last dimensions of the shape (19, 19, 5, 85) encoding. So the output of the Deep CNN is (19, 19, 425). **Figure 3** : **Flattening the last two last dimensions** Now, for each box (of each cell) we will compute the following elementwise product and extract a probability that the box contains a certain class. **Figure 4** : **Find the class detected by each box** Here's one way to visualize what YOLO is predicting on an image:- For each of the 19x19 grid cells, find the maximum of the probability scores (taking a max across both the 5 anchor boxes and across different classes). - Color that grid cell according to what object that grid cell considers the most likely.Doing this results in this picture: **Figure 5** : Each of the 19x19 grid cells colored according to which class has the largest predicted probability in that cell. Note that this visualization isn't a core part of the YOLO algorithm itself for making predictions; it's just a nice way of visualizing an intermediate result of the algorithm. Another way to visualize YOLO's output is to plot the bounding boxes that it outputs. Doing that results in a visualization like this: **Figure 6** : Each cell gives you 5 boxes. In total, the model predicts: 19x19x5 = 1805 boxes just by looking once at the image (one forward pass through the network)! Different colors denote different classes. In the figure above, we plotted only boxes that the model had assigned a high probability to, but this is still too many boxes. You'd like to filter the algorithm's output down to a much smaller number of detected objects. To do so, you'll use non-max suppression. Specifically, you'll carry out these steps: - Get rid of boxes with a low score (meaning, the box is not very confident about detecting a class)- Select only one box when several boxes overlap with each other and detect the same object. 2.2 - Filtering with a threshold on class scoresYou are going to apply a first filter by thresholding. You would like to get rid of any box for which the class "score" is less than a chosen threshold. The model gives you a total of 19x19x5x85 numbers, with each box described by 85 numbers. It'll be convenient to rearrange the (19,19,5,85) (or (19,19,425)) dimensional tensor into the following variables: - `box_confidence`: tensor of shape $(19 \times 19, 5, 1)$ containing $p_c$ (confidence probability that there's some object) for each of the 5 boxes predicted in each of the 19x19 cells.- `boxes`: tensor of shape $(19 \times 19, 5, 4)$ containing $(b_x, b_y, b_h, b_w)$ for each of the 5 boxes per cell.- `box_class_probs`: tensor of shape $(19 \times 19, 5, 80)$ containing the detection probabilities $(c_1, c_2, ... c_{80})$ for each of the 80 classes for each of the 5 boxes per cell.**Exercise**: Implement `yolo_filter_boxes()`.1. Compute box scores by doing the elementwise product as described in Figure 4. The following code may help you choose the right operator: ```pythona = np.random.randn(19*19, 5, 1)b = np.random.randn(19*19, 5, 80)c = a * b shape of c will be (19*19, 5, 80)```2. For each box, find: - the index of the class with the maximum box score ([Hint](https://keras.io/backend/argmax)) (Be careful with what axis you choose; consider using axis=-1) - the corresponding box score ([Hint](https://keras.io/backend/max)) (Be careful with what axis you choose; consider using axis=-1)3. Create a mask by using a threshold. As a reminder: `([0.9, 0.3, 0.4, 0.5, 0.1] < 0.4)` returns: `[False, True, False, False, True]`. The mask should be True for the boxes you want to keep. 4. Use TensorFlow to apply the mask to box_class_scores, boxes and box_classes to filter out the boxes we don't want. You should be left with just the subset of boxes you want to keep. ([Hint](https://www.tensorflow.org/api_docs/python/tf/boolean_mask))Reminder: to call a Keras function, you should use `K.function(...)`. ###Code # GRADED FUNCTION: yolo_filter_boxes def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = .6): """Filters YOLO boxes by thresholding on object and class confidence. Arguments: box_confidence -- tensor of shape (19, 19, 5, 1) boxes -- tensor of shape (19, 19, 5, 4) box_class_probs -- tensor of shape (19, 19, 5, 80) threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box Returns: scores -- tensor of shape (None,), containing the class probability score for selected boxes boxes -- tensor of shape (None, 4), containing (b_x, b_y, b_h, b_w) coordinates of selected boxes classes -- tensor of shape (None,), containing the index of the class detected by the selected boxes Note: "None" is here because you don't know the exact number of selected boxes, as it depends on the threshold. For example, the actual output size of scores would be (10,) if there are 10 boxes. """ # Step 1: Compute box scores ### START CODE HERE ### (≈ 1 line) box_scores = box_confidence * box_class_probs ### END CODE HERE ### # Step 2: Find the box_classes thanks to the max box_scores, keep track of the corresponding score ### START CODE HERE ### (≈ 2 lines) box_classes = K.argmax(box_scores, axis=-1) box_class_scores = K.max(box_scores, axis=-1) ### END CODE HERE ### # Step 3: Create a filtering mask based on "box_class_scores" by using "threshold". The mask should have the # same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold) ### START CODE HERE ### (≈ 1 line) filtering_mask = box_class_scores >= threshold ### END CODE HERE ### # Step 4: Apply the mask to scores, boxes and classes ### START CODE HERE ### (≈ 3 lines) print(box_class_scores.shape) print(boxes.shape) print(filtering_mask.shape) scores = tf.boolean_mask(tensor=box_class_scores, mask=filtering_mask) boxes = tf.boolean_mask(tensor=boxes, mask=filtering_mask) classes = tf.boolean_mask(tensor=box_classes, mask=filtering_mask) ### END CODE HERE ### return scores, boxes, classes with tf.Session() as test_a: box_confidence = tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1) boxes = tf.random_normal([19, 19, 5, 4], mean=1, stddev=4, seed = 1) box_class_probs = tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1) scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = 0.5) print("scores[2] = " + str(scores[2].eval())) print("boxes[2] = " + str(boxes[2].eval())) print("classes[2] = " + str(classes[2].eval())) print("scores.shape = " + str(scores.shape)) print("boxes.shape = " + str(boxes.shape)) print("classes.shape = " + str(classes.shape)) ###Output (19, 19, 5) (19, 19, 5, 4) (19, 19, 5) scores[2] = 10.7506 boxes[2] = [ 8.42653275 3.27136683 -0.5313437 -4.94137383] classes[2] = 7 scores.shape = (?,) boxes.shape = (?, 4) classes.shape = (?,) ###Markdown **Expected Output**: **scores[2]** 10.7506 **boxes[2]** [ 8.42653275 3.27136683 -0.5313437 -4.94137383] **classes[2]** 7 **scores.shape** (?,) **boxes.shape** (?, 4) **classes.shape** (?,) 2.3 - Non-max suppression Even after filtering by thresholding over the classes scores, you still end up a lot of overlapping boxes. A second filter for selecting the right boxes is called non-maximum suppression (NMS). **Figure 7** : In this example, the model has predicted 3 cars, but it's actually 3 predictions of the same car. Running non-max suppression (NMS) will select only the most accurate (highest probabiliy) one of the 3 boxes. Non-max suppression uses the very important function called **"Intersection over Union"**, or IoU. **Figure 8** : Definition of "Intersection over Union". **Exercise**: Implement iou(). Some hints:- In this exercise only, we define a box using its two corners (upper left and lower right): (x1, y1, x2, y2) rather than the midpoint and height/width.- To calculate the area of a rectangle you need to multiply its height (y2 - y1) by its width (x2 - x1)- You'll also need to find the coordinates (xi1, yi1, xi2, yi2) of the intersection of two boxes. Remember that: - xi1 = maximum of the x1 coordinates of the two boxes - yi1 = maximum of the y1 coordinates of the two boxes - xi2 = minimum of the x2 coordinates of the two boxes - yi2 = minimum of the y2 coordinates of the two boxes In this code, we use the convention that (0,0) is the top-left corner of an image, (1,0) is the upper-right corner, and (1,1) the lower-right corner. ###Code # GRADED FUNCTION: iou def iou(box1, box2): """Implement the intersection over union (IoU) between box1 and box2 Arguments: box1 -- first box, list object with coordinates (x1, y1, x2, y2) box2 -- second box, list object with coordinates (x1, y1, x2, y2) """ # Calculate the (y1, x1, y2, x2) coordinates of the intersection of box1 and box2. Calculate its Area. ### START CODE HERE ### (≈ 5 lines) xi1 = np.max([box1[0], box2[0]]) yi1 = np.max([box1[1], box2[1]]) xi2 = np.min([box1[2], box2[2]]) yi2 = np.min([box1[3], box2[3]]) inter_area = (xi2 - xi1) * (yi2 - yi1) ### END CODE HERE ### # Calculate the Union area by using Formula: Union(A,B) = A + B - Inter(A,B) ### START CODE HERE ### (≈ 3 lines) box1_area = (box1[2] - box1[0]) * (box1[3] - box1[1]) box2_area = (box2[2] - box2[0]) * (box2[3] - box2[1]) union_area = box1_area + box2_area - inter_area ### END CODE HERE ### # compute the IoU ### START CODE HERE ### (≈ 1 line) iou = inter_area / union_area ### END CODE HERE ### return iou box1 = (2, 1, 4, 3) box2 = (1, 2, 3, 4) print("iou = " + str(iou(box1, box2))) ###Output iou = 0.142857142857 ###Markdown **Expected Output**: **iou = ** 0.14285714285714285 You are now ready to implement non-max suppression. The key steps are: 1. Select the box that has the highest score.2. Compute its overlap with all other boxes, and remove boxes that overlap it more than `iou_threshold`.3. Go back to step 1 and iterate until there's no more boxes with a lower score than the current selected box.This will remove all boxes that have a large overlap with the selected boxes. Only the "best" boxes remain.**Exercise**: Implement yolo_non_max_suppression() using TensorFlow. TensorFlow has two built-in functions that are used to implement non-max suppression (so you don't actually need to use your `iou()` implementation):- [tf.image.non_max_suppression()](https://www.tensorflow.org/api_docs/python/tf/image/non_max_suppression)- [K.gather()](https://www.tensorflow.org/api_docs/python/tf/gather) ###Code # GRADED FUNCTION: yolo_non_max_suppression def yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold = 0.5): """ Applies Non-max suppression (NMS) to set of boxes Arguments: scores -- tensor of shape (None,), output of yolo_filter_boxes() boxes -- tensor of shape (None, 4), output of yolo_filter_boxes() that have been scaled to the image size (see later) classes -- tensor of shape (None,), output of yolo_filter_boxes() max_boxes -- integer, maximum number of predicted boxes you'd like iou_threshold -- real value, "intersection over union" threshold used for NMS filtering Returns: scores -- tensor of shape (, None), predicted score for each box boxes -- tensor of shape (4, None), predicted box coordinates classes -- tensor of shape (, None), predicted class for each box Note: The "None" dimension of the output tensors has obviously to be less than max_boxes. Note also that this function will transpose the shapes of scores, boxes, classes. This is made for convenience. """ max_boxes_tensor = K.variable(max_boxes, dtype='int32') # tensor to be used in tf.image.non_max_suppression() K.get_session().run(tf.variables_initializer([max_boxes_tensor])) # initialize variable max_boxes_tensor # Use c to get the list of indices corresponding to boxes you keep ### START CODE HERE ### (≈ 1 line) nms_indices = tf.image.non_max_suppression(boxes=boxes, scores=scores, max_output_size=max_boxes, iou_threshold=iou_threshold) ### END CODE HERE ### # Use K.gather() to select only nms_indices from scores, boxes and classes ### START CODE HERE ### (≈ 3 lines) scores = K.gather(scores, nms_indices) boxes = K.gather(boxes, nms_indices) classes = K.gather(classes, nms_indices) ### END CODE HERE ### return scores, boxes, classes with tf.Session() as test_b: scores = tf.random_normal([54,], mean=1, stddev=4, seed = 1) boxes = tf.random_normal([54, 4], mean=1, stddev=4, seed = 1) classes = tf.random_normal([54,], mean=1, stddev=4, seed = 1) scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes) print("scores[2] = " + str(scores[2].eval())) print("boxes[2] = " + str(boxes[2].eval())) print("classes[2] = " + str(classes[2].eval())) print("scores.shape = " + str(scores.eval().shape)) print("boxes.shape = " + str(boxes.eval().shape)) print("classes.shape = " + str(classes.eval().shape)) ###Output scores[2] = 6.9384 boxes[2] = [-5.299932 3.13798141 4.45036697 0.95942086] classes[2] = -2.24527 scores.shape = (10,) boxes.shape = (10, 4) classes.shape = (10,) ###Markdown **Expected Output**: **scores[2]** 6.9384 **boxes[2]** [-5.299932 3.13798141 4.45036697 0.95942086] **classes[2]** -2.24527 **scores.shape** (10,) **boxes.shape** (10, 4) **classes.shape** (10,) 2.4 Wrapping up the filteringIt's time to implement a function taking the output of the deep CNN (the 19x19x5x85 dimensional encoding) and filtering through all the boxes using the functions you've just implemented. **Exercise**: Implement `yolo_eval()` which takes the output of the YOLO encoding and filters the boxes using score threshold and NMS. There's just one last implementational detail you have to know. There're a few ways of representing boxes, such as via their corners or via their midpoint and height/width. YOLO converts between a few such formats at different times, using the following functions (which we have provided): ```pythonboxes = yolo_boxes_to_corners(box_xy, box_wh) ```which converts the yolo box coordinates (x,y,w,h) to box corners' coordinates (x1, y1, x2, y2) to fit the input of `yolo_filter_boxes````pythonboxes = scale_boxes(boxes, image_shape)```YOLO's network was trained to run on 608x608 images. If you are testing this data on a different size image--for example, the car detection dataset had 720x1280 images--this step rescales the boxes so that they can be plotted on top of the original 720x1280 image. Don't worry about these two functions; we'll show you where they need to be called. ###Code # GRADED FUNCTION: yolo_eval def yolo_eval(yolo_outputs, image_shape = (720., 1280.), max_boxes=10, score_threshold=.6, iou_threshold=.5): """ Converts the output of YOLO encoding (a lot of boxes) to your predicted boxes along with their scores, box coordinates and classes. Arguments: yolo_outputs -- output of the encoding model (for image_shape of (608, 608, 3)), contains 4 tensors: box_confidence: tensor of shape (None, 19, 19, 5, 1) box_xy: tensor of shape (None, 19, 19, 5, 2) box_wh: tensor of shape (None, 19, 19, 5, 2) box_class_probs: tensor of shape (None, 19, 19, 5, 80) image_shape -- tensor of shape (2,) containing the input shape, in this notebook we use (608., 608.) (has to be float32 dtype) max_boxes -- integer, maximum number of predicted boxes you'd like score_threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box iou_threshold -- real value, "intersection over union" threshold used for NMS filtering Returns: scores -- tensor of shape (None, ), predicted score for each box boxes -- tensor of shape (None, 4), predicted box coordinates classes -- tensor of shape (None,), predicted class for each box """ ### START CODE HERE ### # Retrieve outputs of the YOLO model (≈1 line) box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs # Convert boxes to be ready for filtering functions boxes = yolo_boxes_to_corners(box_xy, box_wh) # Use one of the functions you've implemented to perform Score-filtering with a threshold of score_threshold (≈1 line) scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold=score_threshold) # Scale boxes back to original image shape. boxes = scale_boxes(boxes, image_shape) # Use one of the functions you've implemented to perform Non-max suppression with a threshold of iou_threshold (≈1 line) scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes) ### END CODE HERE ### return scores, boxes, classes with tf.Session() as test_b: yolo_outputs = (tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1), tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1), tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1), tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1)) scores, boxes, classes = yolo_eval(yolo_outputs) print("scores[2] = " + str(scores[2].eval())) print("boxes[2] = " + str(boxes[2].eval())) print("classes[2] = " + str(classes[2].eval())) print("scores.shape = " + str(scores.eval().shape)) print("boxes.shape = " + str(boxes.eval().shape)) print("classes.shape = " + str(classes.eval().shape)) ###Output (19, 19, 5) (19, 19, 5, 4) (19, 19, 5) scores[2] = 138.791 boxes[2] = [ 1292.32971191 -278.52166748 3876.98925781 -835.56494141] classes[2] = 54 scores.shape = (10,) boxes.shape = (10, 4) classes.shape = (10,) ###Markdown **Expected Output**: **scores[2]** 138.791 **boxes[2]** [ 1292.32971191 -278.52166748 3876.98925781 -835.56494141] **classes[2]** 54 **scores.shape** (10,) **boxes.shape** (10, 4) **classes.shape** (10,) **Summary for YOLO**:- Input image (608, 608, 3)- The input image goes through a CNN, resulting in a (19,19,5,85) dimensional output. - After flattening the last two dimensions, the output is a volume of shape (19, 19, 425): - Each cell in a 19x19 grid over the input image gives 425 numbers. - 425 = 5 x 85 because each cell contains predictions for 5 boxes, corresponding to 5 anchor boxes, as seen in lecture. - 85 = 5 + 80 where 5 is because $(p_c, b_x, b_y, b_h, b_w)$ has 5 numbers, and and 80 is the number of classes we'd like to detect- You then select only few boxes based on: - Score-thresholding: throw away boxes that have detected a class with a score less than the threshold - Non-max suppression: Compute the Intersection over Union and avoid selecting overlapping boxes- This gives you YOLO's final output. 3 - Test YOLO pretrained model on images In this part, you are going to use a pretrained model and test it on the car detection dataset. As usual, you start by **creating a session to start your graph**. Run the following cell. ###Code sess = K.get_session() ###Output _____no_output_____ ###Markdown 3.1 - Defining classes, anchors and image shape. Recall that we are trying to detect 80 classes, and are using 5 anchor boxes. We have gathered the information about the 80 classes and 5 boxes in two files "coco_classes.txt" and "yolo_anchors.txt". Let's load these quantities into the model by running the next cell. The car detection dataset has 720x1280 images, which we've pre-processed into 608x608 images. ###Code class_names = read_classes("model_data/coco_classes.txt") anchors = read_anchors("model_data/yolo_anchors.txt") image_shape = (720., 1280.) ###Output _____no_output_____ ###Markdown 3.2 - Loading a pretrained modelTraining a YOLO model takes a very long time and requires a fairly large dataset of labelled bounding boxes for a large range of target classes. You are going to load an existing pretrained Keras YOLO model stored in "yolo.h5". (These weights come from the official YOLO website, and were converted using a function written by Allan Zelener. References are at the end of this notebook. Technically, these are the parameters from the "YOLOv2" model, but we will more simply refer to it as "YOLO" in this notebook.) Run the cell below to load the model from this file. ###Code yolo_model = load_model("model_data/yolo.h5") ###Output /opt/conda/lib/python3.6/site-packages/keras/models.py:251: UserWarning: No training configuration found in save file: the model was *not* compiled. Compile it manually. warnings.warn('No training configuration found in save file: ' ###Markdown This loads the weights of a trained YOLO model. Here's a summary of the layers your model contains. ###Code yolo_model.summary() ###Output ____________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ==================================================================================================== input_1 (InputLayer) (None, 608, 608, 3) 0 ____________________________________________________________________________________________________ conv2d_1 (Conv2D) (None, 608, 608, 32) 864 input_1[0][0] ____________________________________________________________________________________________________ batch_normalization_1 (BatchNorm (None, 608, 608, 32) 128 conv2d_1[0][0] ____________________________________________________________________________________________________ leaky_re_lu_1 (LeakyReLU) (None, 608, 608, 32) 0 batch_normalization_1[0][0] ____________________________________________________________________________________________________ max_pooling2d_1 (MaxPooling2D) (None, 304, 304, 32) 0 leaky_re_lu_1[0][0] ____________________________________________________________________________________________________ conv2d_2 (Conv2D) (None, 304, 304, 64) 18432 max_pooling2d_1[0][0] ____________________________________________________________________________________________________ batch_normalization_2 (BatchNorm (None, 304, 304, 64) 256 conv2d_2[0][0] ____________________________________________________________________________________________________ leaky_re_lu_2 (LeakyReLU) (None, 304, 304, 64) 0 batch_normalization_2[0][0] ____________________________________________________________________________________________________ max_pooling2d_2 (MaxPooling2D) (None, 152, 152, 64) 0 leaky_re_lu_2[0][0] ____________________________________________________________________________________________________ conv2d_3 (Conv2D) (None, 152, 152, 128) 73728 max_pooling2d_2[0][0] ____________________________________________________________________________________________________ batch_normalization_3 (BatchNorm (None, 152, 152, 128) 512 conv2d_3[0][0] ____________________________________________________________________________________________________ leaky_re_lu_3 (LeakyReLU) (None, 152, 152, 128) 0 batch_normalization_3[0][0] ____________________________________________________________________________________________________ conv2d_4 (Conv2D) (None, 152, 152, 64) 8192 leaky_re_lu_3[0][0] ____________________________________________________________________________________________________ batch_normalization_4 (BatchNorm (None, 152, 152, 64) 256 conv2d_4[0][0] ____________________________________________________________________________________________________ leaky_re_lu_4 (LeakyReLU) (None, 152, 152, 64) 0 batch_normalization_4[0][0] ____________________________________________________________________________________________________ conv2d_5 (Conv2D) (None, 152, 152, 128) 73728 leaky_re_lu_4[0][0] ____________________________________________________________________________________________________ batch_normalization_5 (BatchNorm (None, 152, 152, 128) 512 conv2d_5[0][0] ____________________________________________________________________________________________________ leaky_re_lu_5 (LeakyReLU) (None, 152, 152, 128) 0 batch_normalization_5[0][0] ____________________________________________________________________________________________________ max_pooling2d_3 (MaxPooling2D) (None, 76, 76, 128) 0 leaky_re_lu_5[0][0] ____________________________________________________________________________________________________ conv2d_6 (Conv2D) (None, 76, 76, 256) 294912 max_pooling2d_3[0][0] ____________________________________________________________________________________________________ batch_normalization_6 (BatchNorm (None, 76, 76, 256) 1024 conv2d_6[0][0] ____________________________________________________________________________________________________ leaky_re_lu_6 (LeakyReLU) (None, 76, 76, 256) 0 batch_normalization_6[0][0] ____________________________________________________________________________________________________ conv2d_7 (Conv2D) (None, 76, 76, 128) 32768 leaky_re_lu_6[0][0] ____________________________________________________________________________________________________ batch_normalization_7 (BatchNorm (None, 76, 76, 128) 512 conv2d_7[0][0] ____________________________________________________________________________________________________ leaky_re_lu_7 (LeakyReLU) (None, 76, 76, 128) 0 batch_normalization_7[0][0] ____________________________________________________________________________________________________ conv2d_8 (Conv2D) (None, 76, 76, 256) 294912 leaky_re_lu_7[0][0] ____________________________________________________________________________________________________ batch_normalization_8 (BatchNorm (None, 76, 76, 256) 1024 conv2d_8[0][0] ____________________________________________________________________________________________________ leaky_re_lu_8 (LeakyReLU) (None, 76, 76, 256) 0 batch_normalization_8[0][0] ____________________________________________________________________________________________________ max_pooling2d_4 (MaxPooling2D) (None, 38, 38, 256) 0 leaky_re_lu_8[0][0] ____________________________________________________________________________________________________ conv2d_9 (Conv2D) (None, 38, 38, 512) 1179648 max_pooling2d_4[0][0] ____________________________________________________________________________________________________ batch_normalization_9 (BatchNorm (None, 38, 38, 512) 2048 conv2d_9[0][0] ____________________________________________________________________________________________________ leaky_re_lu_9 (LeakyReLU) (None, 38, 38, 512) 0 batch_normalization_9[0][0] ____________________________________________________________________________________________________ conv2d_10 (Conv2D) (None, 38, 38, 256) 131072 leaky_re_lu_9[0][0] ____________________________________________________________________________________________________ batch_normalization_10 (BatchNor (None, 38, 38, 256) 1024 conv2d_10[0][0] ____________________________________________________________________________________________________ leaky_re_lu_10 (LeakyReLU) (None, 38, 38, 256) 0 batch_normalization_10[0][0] ____________________________________________________________________________________________________ conv2d_11 (Conv2D) (None, 38, 38, 512) 1179648 leaky_re_lu_10[0][0] ____________________________________________________________________________________________________ batch_normalization_11 (BatchNor (None, 38, 38, 512) 2048 conv2d_11[0][0] ____________________________________________________________________________________________________ leaky_re_lu_11 (LeakyReLU) (None, 38, 38, 512) 0 batch_normalization_11[0][0] ____________________________________________________________________________________________________ conv2d_12 (Conv2D) (None, 38, 38, 256) 131072 leaky_re_lu_11[0][0] ____________________________________________________________________________________________________ batch_normalization_12 (BatchNor (None, 38, 38, 256) 1024 conv2d_12[0][0] ____________________________________________________________________________________________________ leaky_re_lu_12 (LeakyReLU) (None, 38, 38, 256) 0 batch_normalization_12[0][0] ____________________________________________________________________________________________________ conv2d_13 (Conv2D) (None, 38, 38, 512) 1179648 leaky_re_lu_12[0][0] ____________________________________________________________________________________________________ batch_normalization_13 (BatchNor (None, 38, 38, 512) 2048 conv2d_13[0][0] ____________________________________________________________________________________________________ leaky_re_lu_13 (LeakyReLU) (None, 38, 38, 512) 0 batch_normalization_13[0][0] ____________________________________________________________________________________________________ max_pooling2d_5 (MaxPooling2D) (None, 19, 19, 512) 0 leaky_re_lu_13[0][0] ____________________________________________________________________________________________________ conv2d_14 (Conv2D) (None, 19, 19, 1024) 4718592 max_pooling2d_5[0][0] ____________________________________________________________________________________________________ batch_normalization_14 (BatchNor (None, 19, 19, 1024) 4096 conv2d_14[0][0] ____________________________________________________________________________________________________ leaky_re_lu_14 (LeakyReLU) (None, 19, 19, 1024) 0 batch_normalization_14[0][0] ____________________________________________________________________________________________________ conv2d_15 (Conv2D) (None, 19, 19, 512) 524288 leaky_re_lu_14[0][0] ____________________________________________________________________________________________________ batch_normalization_15 (BatchNor (None, 19, 19, 512) 2048 conv2d_15[0][0] ____________________________________________________________________________________________________ leaky_re_lu_15 (LeakyReLU) (None, 19, 19, 512) 0 batch_normalization_15[0][0] ____________________________________________________________________________________________________ conv2d_16 (Conv2D) (None, 19, 19, 1024) 4718592 leaky_re_lu_15[0][0] ____________________________________________________________________________________________________ batch_normalization_16 (BatchNor (None, 19, 19, 1024) 4096 conv2d_16[0][0] ____________________________________________________________________________________________________ leaky_re_lu_16 (LeakyReLU) (None, 19, 19, 1024) 0 batch_normalization_16[0][0] ____________________________________________________________________________________________________ conv2d_17 (Conv2D) (None, 19, 19, 512) 524288 leaky_re_lu_16[0][0] ____________________________________________________________________________________________________ batch_normalization_17 (BatchNor (None, 19, 19, 512) 2048 conv2d_17[0][0] ____________________________________________________________________________________________________ leaky_re_lu_17 (LeakyReLU) (None, 19, 19, 512) 0 batch_normalization_17[0][0] ____________________________________________________________________________________________________ conv2d_18 (Conv2D) (None, 19, 19, 1024) 4718592 leaky_re_lu_17[0][0] ____________________________________________________________________________________________________ batch_normalization_18 (BatchNor (None, 19, 19, 1024) 4096 conv2d_18[0][0] ____________________________________________________________________________________________________ leaky_re_lu_18 (LeakyReLU) (None, 19, 19, 1024) 0 batch_normalization_18[0][0] ____________________________________________________________________________________________________ conv2d_19 (Conv2D) (None, 19, 19, 1024) 9437184 leaky_re_lu_18[0][0] ____________________________________________________________________________________________________ batch_normalization_19 (BatchNor (None, 19, 19, 1024) 4096 conv2d_19[0][0] ____________________________________________________________________________________________________ conv2d_21 (Conv2D) (None, 38, 38, 64) 32768 leaky_re_lu_13[0][0] ____________________________________________________________________________________________________ leaky_re_lu_19 (LeakyReLU) (None, 19, 19, 1024) 0 batch_normalization_19[0][0] ____________________________________________________________________________________________________ batch_normalization_21 (BatchNor (None, 38, 38, 64) 256 conv2d_21[0][0] ____________________________________________________________________________________________________ conv2d_20 (Conv2D) (None, 19, 19, 1024) 9437184 leaky_re_lu_19[0][0] ____________________________________________________________________________________________________ leaky_re_lu_21 (LeakyReLU) (None, 38, 38, 64) 0 batch_normalization_21[0][0] ____________________________________________________________________________________________________ batch_normalization_20 (BatchNor (None, 19, 19, 1024) 4096 conv2d_20[0][0] ____________________________________________________________________________________________________ space_to_depth_x2 (Lambda) (None, 19, 19, 256) 0 leaky_re_lu_21[0][0] ____________________________________________________________________________________________________ leaky_re_lu_20 (LeakyReLU) (None, 19, 19, 1024) 0 batch_normalization_20[0][0] ____________________________________________________________________________________________________ concatenate_1 (Concatenate) (None, 19, 19, 1280) 0 space_to_depth_x2[0][0] leaky_re_lu_20[0][0] ____________________________________________________________________________________________________ conv2d_22 (Conv2D) (None, 19, 19, 1024) 11796480 concatenate_1[0][0] ____________________________________________________________________________________________________ batch_normalization_22 (BatchNor (None, 19, 19, 1024) 4096 conv2d_22[0][0] ____________________________________________________________________________________________________ leaky_re_lu_22 (LeakyReLU) (None, 19, 19, 1024) 0 batch_normalization_22[0][0] ____________________________________________________________________________________________________ conv2d_23 (Conv2D) (None, 19, 19, 425) 435625 leaky_re_lu_22[0][0] ==================================================================================================== Total params: 50,983,561 Trainable params: 50,962,889 Non-trainable params: 20,672 ____________________________________________________________________________________________________ ###Markdown **Note**: On some computers, you may see a warning message from Keras. Don't worry about it if you do--it is fine.**Reminder**: this model converts a preprocessed batch of input images (shape: (m, 608, 608, 3)) into a tensor of shape (m, 19, 19, 5, 85) as explained in Figure (2). 3.3 - Convert output of the model to usable bounding box tensorsThe output of `yolo_model` is a (m, 19, 19, 5, 85) tensor that needs to pass through non-trivial processing and conversion. The following cell does that for you. ###Code yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names)) ###Output _____no_output_____ ###Markdown You added `yolo_outputs` to your graph. This set of 4 tensors is ready to be used as input by your `yolo_eval` function. 3.4 - Filtering boxes`yolo_outputs` gave you all the predicted boxes of `yolo_model` in the correct format. You're now ready to perform filtering and select only the best boxes. Lets now call `yolo_eval`, which you had previously implemented, to do this. ###Code scores, boxes, classes = yolo_eval(yolo_outputs, image_shape) ###Output (?, ?, ?, 5) (?, ?, ?, 5, 4) (?, ?, ?, 5) ###Markdown 3.5 - Run the graph on an imageLet the fun begin. You have created a (`sess`) graph that can be summarized as follows:1. yolo_model.input is given to `yolo_model`. The model is used to compute the output yolo_model.output 2. yolo_model.output is processed by `yolo_head`. It gives you yolo_outputs 3. yolo_outputs goes through a filtering function, `yolo_eval`. It outputs your predictions: scores, boxes, classes **Exercise**: Implement predict() which runs the graph to test YOLO on an image.You will need to run a TensorFlow session, to have it compute `scores, boxes, classes`.The code below also uses the following function:```pythonimage, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))```which outputs:- image: a python (PIL) representation of your image used for drawing boxes. You won't need to use it.- image_data: a numpy-array representing the image. This will be the input to the CNN.**Important note**: when a model uses BatchNorm (as is the case in YOLO), you will need to pass an additional placeholder in the feed_dict {K.learning_phase(): 0}. ###Code def predict(sess, image_file): """ Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions. Arguments: sess -- your tensorflow/Keras session containing the YOLO graph image_file -- name of an image stored in the "images" folder. Returns: out_scores -- tensor of shape (None, ), scores of the predicted boxes out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes out_classes -- tensor of shape (None, ), class index of the predicted boxes Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes. """ # Preprocess your image image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608)) # Run the session with the correct tensors and choose the correct placeholders in the feed_dict. # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0}) ### START CODE HERE ### (≈ 1 line) out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input: image_data, K.learning_phase(): 0}) ### END CODE HERE ### # Print predictions info print('Found {} boxes for {}'.format(len(out_boxes), image_file)) # Generate colors for drawing bounding boxes. colors = generate_colors(class_names) # Draw bounding boxes on the image file draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors) # Save the predicted bounding box on the image image.save(os.path.join("out", image_file), quality=90) # Display the results in the notebook output_image = scipy.misc.imread(os.path.join("out", image_file)) imshow(output_image) return out_scores, out_boxes, out_classes ###Output _____no_output_____ ###Markdown Run the following cell on the "test.jpg" image to verify that your function is correct. ###Code out_scores, out_boxes, out_classes = predict(sess, "test.jpg") ###Output Found 7 boxes for test.jpg car 0.60 (925, 285) (1045, 374) car 0.66 (706, 279) (786, 350) bus 0.67 (5, 266) (220, 407) car 0.70 (947, 324) (1280, 705) car 0.74 (159, 303) (346, 440) car 0.80 (761, 282) (942, 412) car 0.89 (367, 300) (745, 648)
SQLite3-with-pandas.ipynb
###Markdown SQLite with pandas *SQLite* is a database engine that stores data in single files that can be easily shared. It is supported by many programming languages, such as python, that provides a library to access SQLite databases.We see how to use such a library, called *sqlite3*, to create, update, and query databases. In particular, we see how the *pandas* package simplifies working with SQLite databases Import libraries ###Code import pandas as pd import sqlite3 ###Output _____no_output_____ ###Markdown In order to work with a *SQLite* database from python, we have to make a *connection* using the **connect** function which returns a *connection* object. Below, we specify the (path to the) database's name, *iTunes*, in our case. Note that if the database does not exist, the connect function creates the empty database. Otherwise, a connection would be made to the existing database. ###Code # connection to a database db = sqlite3.connect('iTunes.db') ###Output _____no_output_____ ###Markdown Then, we create a *cursor* object that allows python to execute SQL queries. ###Code # create cursor to execute SQL queries cur = db.cursor() ###Output _____no_output_____ ###Markdown A very simple music database The picture below shows the database that we are going to create. It contains $4$ tables, *track*, *album*, *genre*, and *artist*. Each of them contains a proper primary key. The *track* table also contains a foreign key, *genre_id*, pointing to *genre* table, and *album_id*, pointing to *album* table. The *album* table, in turn, also contains a foreign key, *artist_id*, to the *artist* table. Create tablesThe first step is create the tables above.SQL queries are executed using *execute* from the cursor object.Moreover, the database is modified only when the modification is committed using *commit* from the database object. ###Code # artist table: # - a primary key # - name of artist cur.execute("""CREATE TABLE IF NOT EXISTS artist ( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, name TEXT )""" ) db.commit() # genre table: # - a primary key # - name of genre cur.execute("""CREATE TABLE IF NOT EXISTS genre ( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, name TEXT )""" ) db.commit() # album table: # - a primary key # - a foreign key pointing to the artist table # - name of the album cur.execute("""CREATE TABLE IF NOT EXISTS album ( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, artist_id INTEGER, title TEXT )""" ) db.commit() # track table: # - a primary key # - a foreign key pointing to the album table # - a foreign key pointing to the genre table # - track title, a rating, the length, the reproduction cur.execute("""CREATE TABLE IF NOT EXISTS track ( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, album_id INTEGER, genre_id INTEGER, title TEXT, len INTEGER, rating INTEGER, repr INTEGER )""" ) db.commit() ###Output _____no_output_____ ###Markdown SQL queries using pandaspandas provides the **read_sql_query** function which permits to execute SQL queries and returns pandas *DataFrames*. Of course, this has some advantages, such as automatically reading the names of the headers from the table as well as visualise data easily through a dataframe. Let us double check that our tables have been created: ###Code pd.read_sql_query("SELECT * FROM sqlite_master WHERE type='table'",db) ###Output _____no_output_____ ###Markdown Verify that tables are still empty. ###Code pd.read_sql_query("SELECT * FROM track", db) ###Output _____no_output_____ ###Markdown Fill tablesLet us now populate the tables. Populate the artist table ###Code values = zip(['Led Zeppelin','Genesis','Metallica','AC/DC','Pink Floyd','The Cure','The Smiths']) for artistname in values: cur.execute("INSERT INTO artist (name) VALUES (?)",artistname) #endfor db.commit() # double check pd.read_sql_query("SELECT * FROM artist", db) ###Output _____no_output_____ ###Markdown Populate the genre table ###Code values = zip(['Rock','Metal','Pop','Progressive rock','Classic','Alternative rock']) for genre in values: cur.execute("INSERT INTO genre (name) VALUES (?)",genre) #endfor db.commit() # double check pd.read_sql_query("SELECT * FROM genre", db) ###Output _____no_output_____ ###Markdown Populate the album tableIn this table, we have to fill the foreign key column *artist_id*. The numbers here have to be equal to the corresponding primary key of the artist table. ###Code cur.execute("""INSERT INTO album (title,artist_id) VALUES ('Who Made Who',4), ('Physical Graffiti',1), ('The dark side of the moon',5), ('Ride the Lightning',3), ('Selling England by the pound',2), ('The Queen Is Dead',7), ('Wish',6) """) db.commit() # double check pd.read_sql_query("SELECT * FROM album", db) ###Output _____no_output_____ ###Markdown For example, notice that (foreign key) *artist_id* for "Who made who" album is $4$ and this is equal to (the primary key) *id* in the artist table for AC/DC. Populate the track tableIn this table, we have two foreign keys, *album_id* and *genre_id*. ###Code cur.execute("""INSERT INTO track (title,album_id,genre_id,len,rating,repr) VALUES ('Time',3,1,401,5,27), ('Firth of Fifth',5,1,521,5,18), ('The Rover',2,1,498,5,22), ('For Whom The Bell Tolls',4,1,390,4,11), ('You Shook Me All Night Long',1,1,330,5,15), ('The Boy with the Thorn in His Side',6,6,210,4,14), ('Friday I''m in Love',7,3,218,5,25) """) db.commit() # double check pd.read_sql_query("SELECT * FROM track", db) ###Output _____no_output_____ ###Markdown For example, notice that (the foreign key) *album_id* for "Time" song is $3$ and this is equal to (the primary key) *id* in the album table for "The dark side of the moon". Also (the foreign key) *genre_id* is $1$ and this is equal to (the primary key) *id* in the genre table for "Rock". Querying the database (using JOIN) Return **the name and genre** of all songs present in the database ###Code pd.read_sql_query("SELECT track.title, genre.name FROM track JOIN genre ON track.genre_id = genre.id", db) ###Output _____no_output_____ ###Markdown Return **the tracks and the albums** of all songs present in the database ###Code pd.read_sql_query("""SELECT track.title, album.title FROM track JOIN album JOIN genre ON track.genre_id = genre.id AND track.album_id = album.id """,db) ###Output _____no_output_____ ###Markdown Return **the tracks, the artists, the albums, and the genres** of all songs ###Code pd.read_sql_query("""SELECT track.title, artist.name, album.title, genre.name FROM track JOIN artist JOIN album JOIN genre ON track.genre_id = genre.id AND track.album_id = album.id AND album.artist_id = artist.id """, db) ###Output _____no_output_____ ###Markdown Use AS ###Code pd.read_sql_query("""SELECT t.title AS Song, art.name AS Artist, album.title AS Album, genre.name AS Genre FROM track AS t JOIN artist AS art JOIN album JOIN genre ON t.genre_id = genre.id AND t.album_id = album.id AND album.artist_id = art.id """, db) ###Output _____no_output_____ ###Markdown Alter the table and insert new data We want to be more precise about generes. For example, we see that "For Whom The Bell Tolls" is not properly rock but metal and "Firth of Fifth" is Progressive rock. We have to update the associated (foreign key) *genre_id* in the track table. ###Code cur.execute("UPDATE track SET genre_id=2 WHERE title='For Whom The Bell Tolls'") cur.execute("UPDATE track SET genre_id=4 WHERE title='Firth of Fifth'") db.commit() # show result pd.read_sql_query("""SELECT track.title, artist.name, album.title, genre.name FROM track JOIN artist JOIN album JOIN genre ON track.genre_id = genre.id AND track.album_id = album.id AND album.artist_id = artist.id """, db) ###Output _____no_output_____ ###Markdown Now we want to add a new artist, one of its songs, the corresponding album, and the genre. In particular, we add a Michael Jackson's song. We need to add a new row in *artist* table, in *album* table, and *track* table, since the genre pop is already in. ###Code cur.execute("INSERT INTO artist (name) VALUES ('Michael Jackson')") db.commit() pd.read_sql_query("SELECT * FROM artist",db) cur.execute("INSERT INTO album (title,artist_id) VALUES ('Thriller',8)") db.commit() pd.read_sql_query("SELECT * FROM album",db) cur.execute("""INSERT INTO track (title,album_id,genre_id,len,rating,repr) VALUES ('Billie Jean',8,3,294,4,24)""") db.commit() pd.read_sql_query("SELECT * FROM track",db) # Let us see show everything pd.read_sql_query("""SELECT t.title AS Song, art.name AS Artist, album.title AS Album, genre.name AS Genre FROM track AS t JOIN artist AS art JOIN album JOIN genre ON t.genre_id = genre.id AND t.album_id = album.id AND album.artist_id = art.id """, db) ###Output _____no_output_____ ###Markdown Further queries Find songs by genre ###Code # Find all Rock songs pd.read_sql_query("""SELECT track.title AS Song, artist.name AS Artist, album.title AS Album, genre.name AS Genre, track.rating AS Rating, track.repr AS Reproductions FROM track JOIN artist JOIN album JOIN genre ON track.genre_id = genre.id AND track.album_id = album.id AND album.artist_id = artist.id WHERE genre.name = 'Rock' """, db) ###Output _____no_output_____ ###Markdown Find songs by artist ###Code #Find all Michael Jackson's songs pd.read_sql_query("""SELECT track.title AS Song, artist.name AS Artist, album.title AS Album, genre.name AS Genre, track.rating AS Rating, track.repr AS Reproductions FROM track JOIN artist JOIN album JOIN genre ON track.genre_id = genre.id AND track.album_id = album.id AND album.artist_id = artist.id WHERE Artist = 'Michael Jackson' """, db) ###Output _____no_output_____ ###Markdown Find songs of different genres ###Code # Find all Pop *AND* Metal songs pd.read_sql_query("""SELECT track.title AS Song, artist.name AS Artist, album.title AS Album, genre.name AS Genre, track.rating AS Rating, track.repr AS Reproductions FROM track JOIN artist JOIN album JOIN genre ON track.genre_id = genre.id AND track.album_id = album.id AND album.artist_id = artist.id WHERE genre.name = 'Pop' OR genre.name = 'Metal' """, db) ###Output _____no_output_____ ###Markdown Constraints ###Code # Find all Pop and Rock songs in the database whose reproductions are >= 23 pd.read_sql_query("""SELECT track.title AS Song, artist.name AS Artist, album.title AS Album, genre.name AS Genre, track.rating AS Rating, track.repr AS Reproductions FROM track JOIN artist JOIN album JOIN genre ON track.genre_id = genre.id AND track.album_id = album.id AND album.artist_id = artist.id AND track.repr >= 23 WHERE genre.name = 'Pop' OR genre.name = 'Rock' """, db) ###Output _____no_output_____ ###Markdown Close the cursor and the connection ###Code cur.close() db.close() ###Output _____no_output_____
Classification/Logistic_regression.ipynb
###Markdown Logistic Regression Importing the libraries ###Code import numpy as np import matplotlib.pyplot as plt import pandas as pd ###Output _____no_output_____ ###Markdown Importing the dataset ###Code dataset = pd.read_csv('Social_Network_Ads.csv') X = dataset.iloc[:, :-1].values y = dataset.iloc[:, -1].values dataset.head() ###Output _____no_output_____ ###Markdown Splitting the dataset into the Training set and Test set ###Code from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0) print(X_train) print(y_train) print(X_test) print(y_test) ###Output [0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 1 0 1 0 0 0 0 0 1 1 0 0 0 0 0 0 1 0 0 0 0 1 0 0 1 0 1 1 0 0 0 1 1 0 0 1 0 0 1 0 1 0 1 0 0 0 0 1 0 0 1 0 0 0 0 1 1 1 0 0 0 1 1 0 1 1 0 0 1 0 0 0 1 0 1 1 1] ###Markdown Feature Scaling Since, the data range of both features is largely different and we wouldnt want one feature to over-shadow another one. ###Code from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) ###Output _____no_output_____ ###Markdown fit transform for the training data as we are gonna learn the scaling parameters (sigma, mu), while just transform the test data as it will only use the previously learned parameters and test data will be considered as a surprise set. ###Code print(X_train) print(X_test) ###Output [[-0.80480212 0.50496393] [-0.01254409 -0.5677824 ] [-0.30964085 0.1570462 ] [-0.80480212 0.27301877] [-0.30964085 -0.5677824 ] [-1.10189888 -1.43757673] [-0.70576986 -1.58254245] [-0.21060859 2.15757314] [-1.99318916 -0.04590581] [ 0.8787462 -0.77073441] [-0.80480212 -0.59677555] [-1.00286662 -0.42281668] [-0.11157634 -0.42281668] [ 0.08648817 0.21503249] [-1.79512465 0.47597078] [-0.60673761 1.37475825] [-0.11157634 0.21503249] [-1.89415691 0.44697764] [ 1.67100423 1.75166912] [-0.30964085 -1.37959044] [-0.30964085 -0.65476184] [ 0.8787462 2.15757314] [ 0.28455268 -0.53878926] [ 0.8787462 1.02684052] [-1.49802789 -1.20563157] [ 1.07681071 2.07059371] [-1.00286662 0.50496393] [-0.90383437 0.30201192] [-0.11157634 -0.21986468] [-0.60673761 0.47597078] [-1.6960924 0.53395707] [-0.11157634 0.27301877] [ 1.86906873 -0.27785096] [-0.11157634 -0.48080297] [-1.39899564 -0.33583725] [-1.99318916 -0.50979612] [-1.59706014 0.33100506] [-0.4086731 -0.77073441] [-0.70576986 -1.03167271] [ 1.07681071 -0.97368642] [-1.10189888 0.53395707] [ 0.28455268 -0.50979612] [-1.10189888 0.41798449] [-0.30964085 -1.43757673] [ 0.48261718 1.22979253] [-1.10189888 -0.33583725] [-0.11157634 0.30201192] [ 1.37390747 0.59194336] [-1.20093113 -1.14764529] [ 1.07681071 0.47597078] [ 1.86906873 1.51972397] [-0.4086731 -1.29261101] [-0.30964085 -0.3648304 ] [-0.4086731 1.31677196] [ 2.06713324 0.53395707] [ 0.68068169 -1.089659 ] [-0.90383437 0.38899135] [-1.20093113 0.30201192] [ 1.07681071 -1.20563157] [-1.49802789 -1.43757673] [-0.60673761 -1.49556302] [ 2.1661655 -0.79972756] [-1.89415691 0.18603934] [-0.21060859 0.85288166] [-1.89415691 -1.26361786] [ 2.1661655 0.38899135] [-1.39899564 0.56295021] [-1.10189888 -0.33583725] [ 0.18552042 -0.65476184] [ 0.38358493 0.01208048] [-0.60673761 2.331532 ] [-0.30964085 0.21503249] [-1.59706014 -0.19087153] [ 0.68068169 -1.37959044] [-1.10189888 0.56295021] [-1.99318916 0.35999821] [ 0.38358493 0.27301877] [ 0.18552042 -0.27785096] [ 1.47293972 -1.03167271] [ 0.8787462 1.08482681] [ 1.96810099 2.15757314] [ 2.06713324 0.38899135] [-1.39899564 -0.42281668] [-1.20093113 -1.00267957] [ 1.96810099 -0.91570013] [ 0.38358493 0.30201192] [ 0.18552042 0.1570462 ] [ 2.06713324 1.75166912] [ 0.77971394 -0.8287207 ] [ 0.28455268 -0.27785096] [ 0.38358493 -0.16187839] [-0.11157634 2.21555943] [-1.49802789 -0.62576869] [-1.29996338 -1.06066585] [-1.39899564 0.41798449] [-1.10189888 0.76590222] [-1.49802789 -0.19087153] [ 0.97777845 -1.06066585] [ 0.97777845 0.59194336] [ 0.38358493 0.99784738]] ###Markdown Training the Logistic Regression model on the Training set ###Code from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(random_state = 0) classifier.fit(X_train, y_train) ###Output _____no_output_____ ###Markdown Predicting a new result Let's try to predict whether a person with estimated salary Rs. 87000/- and 30 yrs of age, will buy the product in the ad or not. ###Code print(classifier.predict(sc.transform([[30,87000]]))) ###Output [0] ###Markdown Predicting the Test set results predicting on X_test(which is transformed) using the classifier.predict converting y_pred and y_test into 2D arrays by reshaping them as per their lengths and specifying column 1. Can also use .reshape(-1,1) ###Code y_pred = classifier.predict(X_test) print(np.concatenate((y_pred.reshape(-1,1), y_test.reshape(len(y_test),1)),1)) ###Output [[0 0] [0 0] [0 0] [0 0] [0 0] [0 0] [0 0] [1 1] [0 0] [1 0] [0 0] [0 0] [0 0] [0 0] [0 0] [0 0] [0 0] [0 0] [1 1] [0 0] [0 0] [1 1] [0 0] [1 1] [0 0] [1 1] [0 0] [0 0] [0 0] [0 0] [0 0] [0 1] [1 1] [0 0] [0 0] [0 0] [0 0] [0 0] [0 0] [1 1] [0 0] [0 0] [0 0] [0 0] [1 1] [0 0] [0 0] [1 1] [0 0] [1 1] [1 1] [0 0] [0 0] [0 0] [1 1] [0 1] [0 0] [0 0] [0 1] [0 0] [0 0] [1 1] [0 0] [0 1] [0 0] [1 1] [0 0] [0 0] [0 0] [0 0] [1 1] [0 0] [0 0] [0 1] [0 0] [0 0] [1 0] [0 0] [1 1] [1 1] [1 1] [1 0] [0 0] [0 0] [1 1] [1 1] [0 0] [1 1] [0 1] [0 0] [0 0] [1 1] [0 0] [0 0] [0 0] [0 1] [0 0] [0 1] [1 1] [1 1]] ###Markdown Making the Confusion Matrix As visible above, we can see some wrong predictions, so lets get a confusion matrix to clearly understand how many TP AND TNs do we have. ###Code from sklearn.metrics import confusion_matrix, accuracy_score cm = confusion_matrix(y_test, y_pred) print(cm) accuracy_score(y_test, y_pred) ###Output [[65 3] [ 8 24]] ###Markdown Visualising the Training set results ###Code from matplotlib.colors import ListedColormap X_set, y_set = sc.inverse_transform(X_train), y_train X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 10, stop = X_set[:, 0].max() + 10, step = 0.25), np.arange(start = X_set[:, 1].min() - 1000, stop = X_set[:, 1].max() + 1000, step = 0.25)) plt.contourf(X1, X2, classifier.predict(sc.transform(np.array([X1.ravel(), X2.ravel()]).T)).reshape(X1.shape), alpha = 0.75, cmap = ListedColormap(('red', 'green'))) plt.xlim(X1.min(), X1.max()) plt.ylim(X2.min(), X2.max()) for i, j in enumerate(np.unique(y_set)): plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], color = ListedColormap(('red', 'green'))(i), label = j) plt.title('Logistic Regression (Training set)') plt.xlabel('Age') plt.ylabel('Estimated Salary') plt.legend() plt.show() ###Output _____no_output_____ ###Markdown Visualising the Test set results ###Code from matplotlib.colors import ListedColormap X_set, y_set = sc.inverse_transform(X_test), y_test X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 10, stop = X_set[:, 0].max() + 10, step = 0.25), np.arange(start = X_set[:, 1].min() - 1000, stop = X_set[:, 1].max() + 1000, step = 0.25)) plt.contourf(X1, X2, classifier.predict(sc.transform(np.array([X1.ravel(), X2.ravel()]).T)).reshape(X1.shape), alpha = 0.75, cmap = ListedColormap(('red', 'green'))) plt.xlim(X1.min(), X1.max()) plt.ylim(X2.min(), X2.max()) for i, j in enumerate(np.unique(y_set)): plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap(('red', 'green'))(i), label = j) plt.title('Logistic Regression (Test set)') plt.xlabel('Age') plt.ylabel('Estimated Salary') plt.legend() plt.show() ###Output _____no_output_____ ###Markdown Logistic Regression Importing the libraries ###Code import numpy as np import matplotlib.pyplot as plt import pandas as pd ###Output _____no_output_____ ###Markdown Importing the dataset ###Code dataset = pd.read_csv('Social_Network_Ads.csv') x = dataset.iloc[:, :-1].values y = dataset.iloc[:, -1].values ###Output _____no_output_____ ###Markdown Splitting the dataset into the Training set and Test set ###Code from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.25, random_state = 0) print(x_train) print(y_train) print(x_test) print(y_test) ###Output [0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 1 0 1 0 0 0 0 0 1 1 0 0 0 0 0 0 1 0 0 0 0 1 0 0 1 0 1 1 0 0 0 1 1 0 0 1 0 0 1 0 1 0 1 0 0 0 0 1 0 0 1 0 0 0 0 1 1 1 0 0 0 1 1 0 1 1 0 0 1 0 0 0 1 0 1 1 1] ###Markdown Feature Scaling ###Code from sklearn.preprocessing import StandardScaler sc = StandardScaler() x_train = sc.fit_transform(x_train) x_test = sc.transform(x_test) print(x_train) print(x_test) ###Output [[-0.80480212 0.50496393] [-0.01254409 -0.5677824 ] [-0.30964085 0.1570462 ] [-0.80480212 0.27301877] [-0.30964085 -0.5677824 ] [-1.10189888 -1.43757673] [-0.70576986 -1.58254245] [-0.21060859 2.15757314] [-1.99318916 -0.04590581] [ 0.8787462 -0.77073441] [-0.80480212 -0.59677555] [-1.00286662 -0.42281668] [-0.11157634 -0.42281668] [ 0.08648817 0.21503249] [-1.79512465 0.47597078] [-0.60673761 1.37475825] [-0.11157634 0.21503249] [-1.89415691 0.44697764] [ 1.67100423 1.75166912] [-0.30964085 -1.37959044] [-0.30964085 -0.65476184] [ 0.8787462 2.15757314] [ 0.28455268 -0.53878926] [ 0.8787462 1.02684052] [-1.49802789 -1.20563157] [ 1.07681071 2.07059371] [-1.00286662 0.50496393] [-0.90383437 0.30201192] [-0.11157634 -0.21986468] [-0.60673761 0.47597078] [-1.6960924 0.53395707] [-0.11157634 0.27301877] [ 1.86906873 -0.27785096] [-0.11157634 -0.48080297] [-1.39899564 -0.33583725] [-1.99318916 -0.50979612] [-1.59706014 0.33100506] [-0.4086731 -0.77073441] [-0.70576986 -1.03167271] [ 1.07681071 -0.97368642] [-1.10189888 0.53395707] [ 0.28455268 -0.50979612] [-1.10189888 0.41798449] [-0.30964085 -1.43757673] [ 0.48261718 1.22979253] [-1.10189888 -0.33583725] [-0.11157634 0.30201192] [ 1.37390747 0.59194336] [-1.20093113 -1.14764529] [ 1.07681071 0.47597078] [ 1.86906873 1.51972397] [-0.4086731 -1.29261101] [-0.30964085 -0.3648304 ] [-0.4086731 1.31677196] [ 2.06713324 0.53395707] [ 0.68068169 -1.089659 ] [-0.90383437 0.38899135] [-1.20093113 0.30201192] [ 1.07681071 -1.20563157] [-1.49802789 -1.43757673] [-0.60673761 -1.49556302] [ 2.1661655 -0.79972756] [-1.89415691 0.18603934] [-0.21060859 0.85288166] [-1.89415691 -1.26361786] [ 2.1661655 0.38899135] [-1.39899564 0.56295021] [-1.10189888 -0.33583725] [ 0.18552042 -0.65476184] [ 0.38358493 0.01208048] [-0.60673761 2.331532 ] [-0.30964085 0.21503249] [-1.59706014 -0.19087153] [ 0.68068169 -1.37959044] [-1.10189888 0.56295021] [-1.99318916 0.35999821] [ 0.38358493 0.27301877] [ 0.18552042 -0.27785096] [ 1.47293972 -1.03167271] [ 0.8787462 1.08482681] [ 1.96810099 2.15757314] [ 2.06713324 0.38899135] [-1.39899564 -0.42281668] [-1.20093113 -1.00267957] [ 1.96810099 -0.91570013] [ 0.38358493 0.30201192] [ 0.18552042 0.1570462 ] [ 2.06713324 1.75166912] [ 0.77971394 -0.8287207 ] [ 0.28455268 -0.27785096] [ 0.38358493 -0.16187839] [-0.11157634 2.21555943] [-1.49802789 -0.62576869] [-1.29996338 -1.06066585] [-1.39899564 0.41798449] [-1.10189888 0.76590222] [-1.49802789 -0.19087153] [ 0.97777845 -1.06066585] [ 0.97777845 0.59194336] [ 0.38358493 0.99784738]] ###Markdown Training the Logistic Regression model on the Training set ###Code from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(random_state = 0) classifier.fit(x_train, y_train) ###Output _____no_output_____ ###Markdown Predicting a new result ###Code print(classifier.predict(sc.transform([[30, 87000]]))) ###Output [0] ###Markdown Predicting the Test set results ###Code y_pred = classifier.predict(x_test) print(np.concatenate((y_pred.reshape(len(y_pred), 1), y_test.reshape(len(y_test), 1)), 1)) ###Output [[0 0] [0 0] [0 0] [0 0] [0 0] [0 0] [0 0] [1 1] [0 0] [1 0] [0 0] [0 0] [0 0] [0 0] [0 0] [0 0] [0 0] [0 0] [1 1] [0 0] [0 0] [1 1] [0 0] [1 1] [0 0] [1 1] [0 0] [0 0] [0 0] [0 0] [0 0] [0 1] [1 1] [0 0] [0 0] [0 0] [0 0] [0 0] [0 0] [1 1] [0 0] [0 0] [0 0] [0 0] [1 1] [0 0] [0 0] [1 1] [0 0] [1 1] [1 1] [0 0] [0 0] [0 0] [1 1] [0 1] [0 0] [0 0] [0 1] [0 0] [0 0] [1 1] [0 0] [0 1] [0 0] [1 1] [0 0] [0 0] [0 0] [0 0] [1 1] [0 0] [0 0] [0 1] [0 0] [0 0] [1 0] [0 0] [1 1] [1 1] [1 1] [1 0] [0 0] [0 0] [1 1] [1 1] [0 0] [1 1] [0 1] [0 0] [0 0] [1 1] [0 0] [0 0] [0 0] [0 1] [0 0] [0 1] [1 1] [1 1]] ###Markdown Making the Confusion Matrix ###Code from sklearn.metrics import confusion_matrix, accuracy_score cm = confusion_matrix(y_test, y_pred) print(cm) accuracy_score(y_test, y_pred) ###Output [[65 3] [ 8 24]] ###Markdown Visualising the Training set results ###Code from matplotlib.colors import ListedColormap X_set, y_set = sc.inverse_transform(x_train), y_train X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 10, stop = X_set[:, 0].max() + 10, step = 0.25), np.arange(start = X_set[:, 1].min() - 1000, stop = X_set[:, 1].max() + 1000, step = 0.25)) plt.contourf(X1, X2, classifier.predict(sc.transform(np.array([X1.ravel(), X2.ravel()]).T)).reshape(X1.shape), alpha = 0.75, cmap = ListedColormap(('red', 'green'))) plt.xlim(X1.min(), X1.max()) plt.ylim(X2.min(), X2.max()) for i, j in enumerate(np.unique(y_set)): plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap(('red', 'green'))(i), label = j) plt.title('Logistic Regression (Training set)') plt.xlabel('Age') plt.ylabel('Estimated Salary') plt.legend() plt.show() ###Output *c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points. *c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points. ###Markdown Visualising the Test set results ###Code from matplotlib.colors import ListedColormap X_set, y_set = sc.inverse_transform(x_test), y_test X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 10, stop = X_set[:, 0].max() + 10, step = 0.25), np.arange(start = X_set[:, 1].min() - 1000, stop = X_set[:, 1].max() + 1000, step = 0.25)) plt.contourf(X1, X2, classifier.predict(sc.transform(np.array([X1.ravel(), X2.ravel()]).T)).reshape(X1.shape), alpha = 0.75, cmap = ListedColormap(('red', 'green'))) plt.xlim(X1.min(), X1.max()) plt.ylim(X2.min(), X2.max()) for i, j in enumerate(np.unique(y_set)): plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap(('red', 'green'))(i), label = j) plt.title('Logistic Regression (Test set)') plt.xlabel('Age') plt.ylabel('Estimated Salary') plt.legend() plt.show() ###Output *c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points. *c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points.
code_notebooks/1.1-quantecon/pandas/matplotlib.ipynb
###Markdown Intermediate Plotting**Prerequisites**- [Introduction](intro.ipynb) **Outcomes**- Be able to build visualizations with multiple subplots - Add plot elements using data from multiple DataFrames - Understand the relationship between the matplotlib Figure and Axes objects - Customize fonts, legends, axis labels, tick labels, titles, and more! - Save figures to files to add to presentation, email, or use outside of a Jupyter notebook **Data**- iPhone announcement dates - Apple stock price ###Code # Uncomment following line to install on colab #! pip install qeds import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib import matplotlib.transforms as transforms import quandl quandl.ApiConfig.api_key = os.environ.get("QUANDL_AUTH", "Dn6BtVoBhzuKTuyo6hbp") %matplotlib inline # activate plot theme import qeds qeds.themes.mpl_style(); ###Output _____no_output_____ ###Markdown Outline- [Intermediate Plotting](Intermediate-Plotting) - [Introduction](Introduction) - [The Want Operator: Replicate a Professional Figure](The-Want-Operator:-Replicate-a-Professional-Figure) - [Data](Data) - [Warmup](Warmup) - [Data Cleaning](Data-Cleaning) - [Constructing the Plot](Constructing-the-Plot) - [Saving the Figure](Saving-the-Figure) - [Exercises](Exercises) IntroductionWe have already seen a few examples of basic visualizations createdusing the `.plot` method for a DataFrame.When we use the `.plot` method, pandas uses a package calledmatplotlib that actually creates the visualization.In this lecture, we will dive deeper into the customization options inthe DataFrame `.plot` method as well as look under the hood at how touse matplotlib directly to unlock unlimited control over our figures. The Want Operator: Replicate a Professional FigureVisualization is a complex subject.[Manybooks](https://www.amazon.com/s/ref=nb_sb_noss?url=search-alias%3Dstripbooks&field-keywords=data+visualization&rh=n%3A283155%2Ck%3Adata+visualization)have been written on the subject.We cannot hope to convey all of what is possible in a short lecture, sowe thought we’d try something fun instead.Our goal in this lecture is to show off some of the common – and somenot-so-common – matplotlib capabilities to try to re-create thisstriking figure from a Financial Times[article](https://www.ft.com/content/4743ce96-e4bf-11e7-97e2-916d4fbac0da).The figure shows how we can see the anticipation of and response to iPhoneannouncements in Apple stock share prices. Our goal is to replicate the top portion of this figure in this lecture.**Disclaimer**: Many tools you will see in this lecture will be“more advanced” than what you typically use when customizing a plot.Don’t (please!) try to memorize the commands used here – the purpose ofthe lecture is to show what is possible and expose you a variety of themethods you can use. DataLet’s get the data.First, we create a Series containing the date of each iPhoneannouncement that appears in the FT original chart.As there have only been 11 of them and we couldn’t find this “dataset”online anywhere, we will do this by hand. ###Code announcement_dates = pd.Series( [ "First iPhone", "3G", "3GS", "4", "4S", "5", "5S/5C", "6/6 Plus", "6S/6S Plus", "7/7 Plus", "8/8 Plus/X" ], index=pd.to_datetime([ "Jan. 9, 2007", "Jun. 9, 2008", "Jun. 8, 2009", "Jan. 11, 2011", "Oct. 4, 2011", "Sep. 12, 2012", "Sep. 10, 2013", "Sep. 9, 2014", "Sep. 9, 2015", "Sep. 7, 2016", "Sep. 12, 2017" ]), name="Model" ) announcement_dates ###Output _____no_output_____ ###Markdown Then, let’s grab Apple’s stock price data from quandl, starting a fewweeks before the first announcement. ###Code aapl = quandl.get("WIKI/AAPL", start_date="2006-12-25") aapl.head() ###Output _____no_output_____ ###Markdown WarmupMatplotlib figures are composed two main types of Python objects:1. `Figure`: represents the entirety of the visualization 1. `Axes`: A (potentially full) subset of the figure on which things are drawn Most of the time, we will customize our plots by calling methods on anAxes object.However, things like setting a title over the entire plot or saving theplot to a file on your computer require methods on a `Figure`.Let’s start by getting our hands dirty and practicing using theseobjects.> See exercise 1 in the [*exercise list*](exerciselist-0)You should have seen that the object returned by the `.plot` method isa matplotlib Axes.As mentioned above, we can control most aspects of a plot by callingmethods on an Axes.Let’s see some examples. ###Code # plot the Adjusted open to account for stock split ax = aapl["Adj. Open"].plot() # get the figure so we can re-display the plot after making changes fig = ax.get_figure() # set the title ax.set_title("AAPL Adjusted opening price") fig ax.set_ylim(0, 200) fig ax.set_yticks([0, 50, 100, 150, 200]) fig ###Output _____no_output_____ ###Markdown We can also create a Figure and Axes beforehand and then tell pandas toplot a DataFrame or Series’ data on the axis.We typically use the `plt.subplots` function to create the Figure andAxes.Below, we use this function to create a Figure that is 10 inches wide by6 inches tall and filled with a one by two grid of Axes objects. ###Code fig2, axs2 = plt.subplots(1, 2, figsize=(10, 6)) print("type(fig2): ", type(fig2)) print("type(axs): ", type(axs2)) print("axs2.shape: ", axs2.shape) ###Output type(fig2): <class 'matplotlib.figure.Figure'> type(axs): <class 'numpy.ndarray'> axs2.shape: (2,) ###Markdown We can plot from our DataFrame directly on our Axes objects by settingthe `ax` argument when calling `.plot`. ###Code aapl[["Adj. Low", "Adj. High"]].plot(ax=axs2[0]) aapl[["Low", "High"]].plot(ax=axs2[1]) fig2 ###Output _____no_output_____ ###Markdown > See exercise 2 in the [*exercise list*](exerciselist-0)> See exercise 3 in the [*exercise list*](exerciselist-0) Data CleaningLet’s continue on our path to recreating the Financial Timesvisualization from above.Before we can actually make the plot, we first have to clean the data.Looking at our goal, we will need share price three days before andafter each announcement.We will also need to normalize the share price to be 100 on the day ofthe announcement and scale the neighboring days accordingly. ###Code from pandas.tseries.holiday import USFederalHolidayCalendar from pandas.tseries.offsets import CustomBusinessDay bday_us = CustomBusinessDay(calendar=USFederalHolidayCalendar()) def neighbor_dates(date, nbefore=3, nafter=3): # Make sure the date is a datetime date = pd.to_datetime(date) # Create a list of business days before_and_after = [date + i*bday_us for i in range(-nbefore, nafter+1)] return before_and_after dates = [] for ann_date in announcement_dates.index: dates.extend(neighbor_dates(ann_date)) dates = pd.Series(dates) # Index into our DataFrame using the new dates prices = aapl.loc[dates] prices.head() ###Output _____no_output_____ ###Markdown We now want to bring information on iPhone models into the DataFrame.We do this by:- Joining on the announcement date. This will introduce a new column named `Model` which has a value in the announcement date but has 3 `NaN` above and below each announcement date (a total of 66 `NaN`) - Using the methods `ffill` and `bfill`, we can replace these `NaN`s with the corresponding model names. - `prices.ffill(limit=3)` will fill the three days *after* the announcement with the model name (down to 33 Nan) - `prices.bfill(limit=3)` will fill the three days *before* the announcement with the model name (no more `NaN`) ###Code prices = prices.join(announcement_dates) print(prices["Model"].isnull().sum()) prices["Model"].head(7) prices = prices.ffill(limit=3) print(prices["Model"].isnull().sum()) prices["Model"].head(7) prices = prices.bfill(limit=3) print(prices["Model"].isnull().sum()) prices["Model"].head(7) ###Output 0 ###Markdown Success!Now for the second part of the cleaning: normalize the share price oneach announcement date to 100 and scale all neighbors accordingly. ###Code def scale_by_middle(df): # How many rows N = df.shape[0] # Divide by middle row and scale to 100 # Note: N // 2 is modulus division meaning that it is # rounded to nearest whole number) out = (df["Open"] / df.iloc[N // 2]["Open"]) * 100 # We don't want to keep actual dates, but rather the number # of days before or after the announcment. Let's set that # as the index. Note the +1 because range excludes upper limit out.index = list(range(-(N//2), N//2+1)) # also change the name of this series out.name = "DeltaDays" return out to_plot = prices.groupby("Model").apply(scale_by_middle).T to_plot ###Output _____no_output_____ ###Markdown Re-order the columns. ###Code to_plot = to_plot[announcement_dates.values] to_plot ###Output _____no_output_____ ###Markdown Constructing the PlotNow that we have cleaned up the data, let’s construct the plot.We do this by using the DataFrame `.plot` method and then using thematplotlib methods and functions to fine tune our plot, changing onefeature or set of features at a time.To prepare our use of the plot method, we will need to set up some datafor what color each line should be, as well as where to draw the tickmarks on the vertical axis. ###Code # colors background = tuple(np.array([253, 238, 222]) / 255) blue = tuple(np.array([20, 64, 134]) / 255) pink = tuple(np.array([232, 75, 126]) / 255) def get_color(x): if "S" in x: return pink else: return blue colors = announcement_dates.map(get_color).values # yticks yticks = [90, 95, 100, 105, 110, 115] ###Output _____no_output_____ ###Markdown Below, we construct the basic plot using `to_plot.plot`.Notice that we have specified a few options as keyword arguments to ourfunction. ###Code # construct figure and Axes objects fig, axs = plt.subplots(1, 11, sharey=True, figsize=(14, 5)) # We can pass our array of Axes and `subplots=True` # because we have one Axes per column to_plot.plot( ax=axs, subplots=True, legend=False, yticks=yticks, xticks=[-3, 3], color=colors, linewidth=3, fontsize=12 ); ###Output _____no_output_____ ###Markdown > See exercise 4 in the [*exercise list*](exerciselist-0) Subplot Spacing: `fig.tight_layout`That figure has the basic lines we are after, but is quite ugly whencompared to the FT figure we are trying to produce.Let’s refine the plot one step at a time.First, notice how the `-3` and `3` labels are running into eachother?This commonly happens in figures with many subplots.The function `fig.tight_layout()` will fix these problems – as wellas most other subplot spacing issues.We *almost always* call this method when building a plot with multiplesubplots. ###Code # add some spacing around subplots fig.tight_layout() fig ###Output _____no_output_____ ###Markdown Properties of the FigureNow, let’s make the background of our figure match the background of theFT post.To do this, we will use the `fig.set_facecolor` method. ###Code # set background color fig.set_facecolor(background) fig ###Output _____no_output_____ ###Markdown Properties of an `Axes`Notice that this worked for the figure as a whole, but not for any ofthe Axes.To fix this, we will need to call `.set_facecolor` on each Axes.While we are doing that, let’s fix up a number of other things about theAxes:- Add Axes titles - Remove the x axis titles - Remove tick marks on y axis - Add a “faint” version of the line from the first subplot - Remove x axis tick labels - Make x axis ticks longer and semi-transparent - Make sure all Axes have same y limits - Remove the spines (the border on each Axes) - Add a white circle to (0, 100) on each Axes ###Code # For each Axes... do the following for i in range(announcement_dates.shape[0]): ax = axs[i] # add faint blue line representing impact of original iPhone announcement to_plot["First iPhone"].plot(ax=ax, color=blue, alpha=0.2, linewidth=3) # add a title ti = str(announcement_dates.index[i].year) + "\n" + announcement_dates.iloc[i] + "\n" ax.set_title(ti) # set background color of plotting area ax.set_facecolor(background) # remove xlabels ax.set_xlabel("") # turn of tick marks ax.tick_params(which="both", left=False, labelbottom=False) # make x ticks longer and semi-transparent ax.tick_params(axis="x", length=7.0, color=(0, 0, 0, 0.4)) # set limits on vertical axis ax.set_ylim((yticks[0], yticks[-1])) # add a white circle at 0, 100 ax.plot(0, 100, 'o', markeredgecolor=blue, markersize=8, color="white", zorder=10) # remove border around each subplot for direction in ["top", "right", "left", "bottom"]: ax.spines[direction].set_visible(False) fig ###Output _____no_output_____ ###Markdown > See exercise 5 in the [*exercise list*](exerciselist-0)Let’s continue and add tick labels to the right of the far rightAxes. ###Code # add tick labels to right of iPhone 8/X announcement axs[-1].tick_params(labelright=True, labelsize=12) axs[-1] fig ###Output _____no_output_____ ###Markdown We can also add tick labels for the x-axis ticks on the 1st and 6thplots. ###Code for ax in axs[[0, 5]]: ax.tick_params(labelbottom=True) ax.set_xticklabels(["3 days\nbefore", "3 days\nafter"]) # need to make these tick labels centered at tick, # instead of the default of right aligned for label in ax.xaxis.get_ticklabels(): label.set_horizontalalignment("center") fig ###Output _____no_output_____ ###Markdown Transforms and LinesNow we would like to add a horizontal line that lines up with eachvertical tick label (the numbers from 90 to 115) and runs across theentire figure.This is actually harder than it sounds because most of the “drawing”capabilities of matplotlib are built around drawing on a single Axes andwe want to draw across 11 of them.However, as we promised above, anything *is* possible and we will showyou how to do it.When matplotlib draws any data – be it a line, circle, rectangle, orother – it must know what *coordinate system* to use.We typically think about drawing things in the *data’s* coordinatesystem (remember above how we added a white circle at (0, 100)).However, we might also want to draw using two other coordinate systems:- Figure: the bottom left of the figure is (0, 0) and top right is (1, 1) - Axes: The bottom left of an Axes is (0, 0) and top right is (1, 1) For our application, we would like to use the figure’s coordinate systemin the `x` dimension (going across the plot), but the data’scoordinate system in the `y` dimension (so we make sure to put thelines at each of our `yticks`).Luckily for us, matplotlib provides a way to use *exactly* that coordinatesystem. ###Code # create a transform that... trans = transforms.blended_transform_factory( fig.transFigure, # goes across whole figure in x direction axs[0].transData # goes up with the y data in the first axis ) ###Output _____no_output_____ ###Markdown We can now use `trans` to draw lines where the `x` values will mapfrom (0, 1) in the Figure coordinates and the `y` values will go from(90, 115) on the data coordinates. ###Code for y in yticks: l = plt.Line2D( # x values found by trial and error [0.04, 0.985], [y, y], transform=trans, color="black", alpha=0.4, linewidth=0.5, zorder=0.1 ) if y == 100: l.set_linewidth(1.5) fig.lines.append(l) fig ###Output _____no_output_____ ###Markdown Now, we need to a add vertical line from the (0, 90) to (0, 100) on thefirst axis so we can label the center of each line as the announcementdate.We will split this line in two to leave room for the text `Announced`to be added soon.To add the lines, we will use the data coordinate system from the firstAxes. ###Code for y in ([90, 91.5], [93, 100]): l = plt.Line2D( [0, 0], y, transform=axs[0].transData, color="black", alpha=0.5, linewidth=1.5, zorder=0.1 ) fig.lines.append(l) fig ###Output _____no_output_____ ###Markdown AnnotationsThe last step on our journey is to add annotations that mark Tim Cook’sfirst announcement as CEO, the lackluster market response to `S` modelannouncements, and a label showing that the white dot on each subplot isassociated with the announcement date.Adding text to figures is always a bit verbose, so don’t get too scaredby what is happening here. ###Code axs[5].annotate( "Tim Cook's first iPhone\nannouncement as Apple's CEO", xy=(0.2, 99.5), xycoords="data", xytext=(-2, 93), annotation_clip=False, horizontalalignment="right", arrowprops={ "arrowstyle": "-|>", "connectionstyle": "angle3,angleA=0,angleB=110", "color": "black" }, fontsize=12, ) fig for ann in axs[8].texts: ann.remove() axs[8].annotate( "Hardware upgrade 'S' models\nunderwhelm the market", xy=(-5, 99.5), xycoords="data", xytext=(-12, 92), annotation_clip=False, horizontalalignment="left", arrowprops={"visible": False}, fontsize=12, fontweight="semibold", color=pink, ) fig axs[0].annotate( "Announced", xy=(0, 99.5), xycoords="data", xytext=(0, 92), annotation_clip=False, horizontalalignment="center", arrowprops={"visible": False}, fontsize=12, ) fig ###Output _____no_output_____ ###Markdown Saving the FigureNow that we have a finished product we are happy with, let’s save it toa file on our computer using the `fig.savefig` function. ###Code fig.savefig("aapl_iPhone_annoucements.png", dpi=400, bbox_inches="tight", facecolor=background) ###Output _____no_output_____ ###Markdown Here, we asked matplotlib to save our figure in the `png` format, with400 dots per inch (`dpi`, meaning each inch has a 400 by 400 set ofcolored points).The `bbox_inches` command is needed here to make sure pandas doesn’tchop off any of our Axes titles or tick labels.The `facecolor` argument was necessary because matplotlibwill save figures with a transparent background by default (meaning the backgroundis see-through so it “adopts” the background color of whatever website,document, or presentation is is placed in).We could have chosen a different file format. ###Code fig.savefig("aapl_iPhone_annoucements.jpeg", dpi=400, bbox_inches="tight", facecolor=background) # dpi not needed as pdf is a "vector" format that effectively has an infinite dpi fig.savefig("aapl_iPhone_annoucements.pdf", bbox_inches="tight", facecolor=background) # svg is also a vector format fig.savefig("aapl_iPhone_annoucements.svg", bbox_inches="tight", facecolor=background) ###Output _____no_output_____ ###Markdown SummaryPhew, we made it!We ended up writing quite a bit of code to get the figure to look*exactly* how we wanted it to look.Typically, our plotting code will be much more concise and simplebecause we don’t usually require the same standards for aestheticproperties as professional journalists do. Exercises**Exercise 1****Exercise:** Using the `.plot` method, plot the opening share pricefor Apple's stock.What `type` of object is returned from that method?What methods does this object have? ###Code # make plot here # explore methods here ###Output _____no_output_____
padchest-covid/datasets.ipynb
###Markdown Generate dataset [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/BIMCV-CSUSP/BIMCV-COVID-19/blob/master/padchest-covid/datasets.ipynb)WIP This code is intended to generate a subset of the [padchest dataset](http://bimcv.cipf.es/bimcv-projects/padchest/) to train a model for detecting Coronavirus 2019 (COVID-19) in Chest Radiograph Images. The [PADCHEST_chest_x_ray_images_labels_160K_01.02.19.csv file](https://drive.google.com/file/d/1tBW4EB5DfHdRikHbUotjmGKO5Kos6tRC/view?usp=sharing) ###Code import csv from IPython.display import HTML, display ###Output _____no_output_____ ###Markdown Load the padchest dataset tables ###Code with open("./PADCHEST_chest_x_ray_images_labels_160K_01.02.19.csv") as f: r = csv.DictReader(f) data = [l for l in r] headers = data[0].keys() display(HTML( '<table><tr><th>{}</th></tr><tr>{}</tr></table>'.format( '</th><th>'.join(str(h) for h in headers), '</tr><tr>'.join( '<td>{}</td>'.format('</td><td>'.join(str(row[k]) for k in headers)) for row in data[:3]) ) )) ###Output _____no_output_____ ###Markdown Split the dataset in 5 groups depending if they have some keywords in their labels ###Code normals = [l for l in data if "'normal'" in l["Labels"]] pneumonia = [l for l in data if "pneumonia" in l["Labels"] and "infiltrates" not in l["Labels"]] infiltrates = [l for l in data if "infiltrates" in l["Labels"] and "pneumonia" not in l["Labels"]] p_i = [l for l in data if "infiltrates" in l["Labels"] and "pneumonia" in l["Labels"]] not_pneumonia = [l for l in data if "infiltrates" not in l["Labels"] if "pneumonia" not in l["Labels"] and "unchanged" not in l["Labels"]] ###Output _____no_output_____ ###Markdown And split them again in sex for detecting balancing issues ###Code normals_m = [l for l in normals if l["PatientSex_DICOM"] == 'M'] normals_f = [l for l in normals if l["PatientSex_DICOM"] == 'F'] pneumonia_m = [l for l in pneumonia if l["PatientSex_DICOM"] == 'M'] pneumonia_f = [l for l in pneumonia if l["PatientSex_DICOM"] == 'F'] not_pneumonia_m = [l for l in not_pneumonia if l["PatientSex_DICOM"] == 'M'] not_pneumonia_f = [l for l in not_pneumonia if l["PatientSex_DICOM"] == 'F'] infiltrates_m = [l for l in infiltrates if l["PatientSex_DICOM"] == 'M'] infiltrates_f = [l for l in infiltrates if l["PatientSex_DICOM"] == 'F'] p_i_m = [l for l in p_i if l["PatientSex_DICOM"] == 'M'] p_i_f = [l for l in p_i if l["PatientSex_DICOM"] == 'F'] values = [["", "Masculine", "Femenine", "Total" ], ["Normals", len(normals_m), len(normals_f), len(normals)], ["Pneumonia", len(pneumonia_m), len(pneumonia_f), len(pneumonia)], ["Infiltrates", len(infiltrates_m), len(infiltrates_f), len(infiltrates)], ["Pneumonia and infiltrates", len(p_i_m), len(p_i_f), len(p_i)], ["Not pneumonia nor infiltrates", len(not_pneumonia_m), len(not_pneumonia_f), len(not_pneumonia)]] display(HTML( '<table><tr>{}</tr></table>'.format( '</tr><tr>'.join( '<td>{}</td>'.format('</td><td>'.join(str(_) for _ in row)) for row in values) ) )) ###Output _____no_output_____ ###Markdown Store the generated subgroups in diferents files in case we need them ###Code datasets = [{"filename":"neumo_normals_m.tsv","data":normals_m},{"filename":"neumo_normals_f.tsv","data":normals_f}, {"filename":"neumo_pneumonia_m.tsv","data":pneumonia_m},{"filename":"neumo_pneumonia_f.tsv","data":pneumonia_f}, {"filename":"neumo_infiltrates_m.tsv","data":pneumonia_m},{"filename":"neumo_infiltrates_f.tsv","data":pneumonia_f}, {"filename":"neumo_pneumonia_infiltrates_m.tsv","data":pneumonia_m},{"filename":"neumo_pneumonia_infiltrates_f.tsv","data":pneumonia_f}, {"filename":"neumo_not_pneumonia_m.tsv","data":not_pneumonia_m},{"filename":"neumo_not_pneumonia_f.tsv","data":not_pneumonia_f}] header = ["ImageID","StudyDate_DICOM","StudyID","PatientID","PatientBirth","PatientSex_DICOM","ViewPosition_DICOM","Projection","MethodProjection","Pediatric","Modality_DICOM","Manufacturer_DICOM","PhotometricInterpretation_DICOM","PixelRepresentation_DICOM","PixelAspectRatio_DICOM","SpatialResolution_DICOM","BitsStored_DICOM","WindowCenter_DICOM","WindowWidth_DICOM","Rows_DICOM","Columns_DICOM","XRayTubeCurrent_DICOM","Exposure_DICOM","ExposureInuAs_DICOM","ExposureTime","RelativeXRayExposure_DICOM","Labels"] for d in datasets: with open(d["filename"], "w") as f: w = csv.DictWriter(f, header, delimiter='\t', extrasaction='ignore') w.writeheader() for l in d["data"]: w.writerow(l) ###Output _____no_output_____ ###Markdown Generate the dataset. As we have about 13k images with findings that can be caused by covid19, we fill the rest of the dataset with the same number of images from the normal group and other findings group in a 1:1 ratio. ###Code header2 = header+['group'] with open("neumo_dataset.tsv", "w") as f: w = csv.DictWriter(f, header2, delimiter='\t', extrasaction='ignore') w.writeheader() for l in pneumonia: w.writerow({**l,**{"group":'N'}}) for l in infiltrates: w.writerow({**l,**{"group":'I'}}) for l in p_i: w.writerow({**l,**{"group":'NI'}}) for l in normals[:6505]: w.writerow({**l,**{"group":'C'}}) for l in not_pneumonia[:6505]: w.writerow({**l,**{"group":'C'}}) ###Output _____no_output_____ ###Markdown Generate dataset [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/BIMCV-CSUSP/BIMCV-COVID-19/blob/master/padchest-covid/datasets.ipynb)WIP This code is intended to generate a subset of the [padchest dataset](http://bimcv.cipf.es/bimcv-projects/padchest/) to train a model for detecting Coronavirus 2019 (COVID-19) in Chest Radiograph Images. The [PADCHEST_chest_x_ray_images_labels_160K_01.02.19.csv file](https://drive.google.com/file/d/1tBW4EB5DfHdRikHbUotjmGKO5Kos6tRC/view?usp=sharing) ###Code import csv from IPython.display import HTML, display ###Output _____no_output_____ ###Markdown Load the padchest dataset tables ###Code with open("./PADCHEST_chest_x_ray_images_labels_160K_01.02.19.csv") as f: r = csv.DictReader(f) data = [l for l in r] headers = data[0].keys() display(HTML( '<table><tr><th>{}</th></tr><tr>{}</tr></table>'.format( '</th><th>'.join(str(h) for h in headers), '</tr><tr>'.join( '<td>{}</td>'.format('</td><td>'.join(str(row[k]) for k in headers)) for row in data[:3]) ) )) ###Output _____no_output_____ ###Markdown Split the dataset in 5 groups depending if they have some keywords in their labels ###Code normals = [l for l in data if "'normal'" in l["Labels"]] pneumonia = [l for l in data if "pneumonia" in l["Labels"] and "infiltrates" not in l["Labels"]] infiltrates = [l for l in data if "infiltrates" in l["Labels"] and "pneumonia" not in l["Labels"]] p_i = [l for l in data if "infiltrates" in l["Labels"] and "pneumonia" in l["Labels"]] not_pneumonia = [l for l in data if "infiltrates" not in l["Labels"] if "pneumonia" not in l["Labels"] and "unchanged" not in l["Labels"]] ###Output _____no_output_____ ###Markdown And split them again in sex for detecting balancing issues ###Code normals_m = [l for l in normals if l["PatientSex_DICOM"] == 'M'] normals_f = [l for l in normals if l["PatientSex_DICOM"] == 'F'] pneumonia_m = [l for l in pneumonia if l["PatientSex_DICOM"] == 'M'] pneumonia_f = [l for l in pneumonia if l["PatientSex_DICOM"] == 'F'] not_pneumonia_m = [l for l in not_pneumonia if l["PatientSex_DICOM"] == 'M'] not_pneumonia_f = [l for l in not_pneumonia if l["PatientSex_DICOM"] == 'F'] infiltrates_m = [l for l in infiltrates if l["PatientSex_DICOM"] == 'M'] infiltrates_f = [l for l in infiltrates if l["PatientSex_DICOM"] == 'F'] p_i_m = [l for l in p_i if l["PatientSex_DICOM"] == 'M'] p_i_f = [l for l in p_i if l["PatientSex_DICOM"] == 'F'] values = [["", "Masculine", "Femenine", "Total" ], ["Normals", len(normals_m), len(normals_f), len(normals)], ["Pneumonia", len(pneumonia_m), len(pneumonia_f), len(pneumonia)], ["Infiltrates", len(infiltrates_m), len(infiltrates_f), len(infiltrates)], ["Pneumonia and infiltrates", len(p_i_m), len(p_i_f), len(p_i)], ["Not pneumonia nor infiltrates", len(not_pneumonia_m), len(not_pneumonia_f), len(not_pneumonia)]] display(HTML( '<table><tr>{}</tr></table>'.format( '</tr><tr>'.join( '<td>{}</td>'.format('</td><td>'.join(str(_) for _ in row)) for row in values) ) )) ###Output _____no_output_____ ###Markdown Store the generated subgroups in diferents files in case we need them ###Code datasets = [{"filename":"neumo_normals_m.tsv","data":normals_m},{"filename":"neumo_normals_f.tsv","data":normals_f}, {"filename":"neumo_pneumonia_m.tsv","data":pneumonia_m},{"filename":"neumo_pneumonia_f.tsv","data":pneumonia_f}, {"filename":"neumo_infiltrates_m.tsv","data":pneumonia_m},{"filename":"neumo_infiltrates_f.tsv","data":pneumonia_f}, {"filename":"neumo_pneumonia_infiltrates_m.tsv","data":pneumonia_m},{"filename":"neumo_pneumonia_infiltrates_f.tsv","data":pneumonia_f}, {"filename":"neumo_not_pneumonia_m.tsv","data":not_pneumonia_m},{"filename":"neumo_not_pneumonia_f.tsv","data":not_pneumonia_f}] header = ["ImageID","StudyDate_DICOM","StudyID","PatientID","PatientBirth","PatientSex_DICOM","ViewPosition_DICOM","Projection","MethodProjection","Pediatric","Modality_DICOM","Manufacturer_DICOM","PhotometricInterpretation_DICOM","PixelRepresentation_DICOM","PixelAspectRatio_DICOM","SpatialResolution_DICOM","BitsStored_DICOM","WindowCenter_DICOM","WindowWidth_DICOM","Rows_DICOM","Columns_DICOM","XRayTubeCurrent_DICOM","Exposure_DICOM","ExposureInuAs_DICOM","ExposureTime","RelativeXRayExposure_DICOM","Labels"] for d in datasets: with open(d["filename"], "w") as f: w = csv.DictWriter(f, header, delimiter='\t', extrasaction='ignore') w.writeheader() for l in d["data"]: w.writerow(l) ###Output _____no_output_____ ###Markdown Generate the dataset. As we have about 13k images with findings that can be caused by covid19, we fill the rest of the dataset with the same number of images from the normal group and other findings group in a 1:1 ratio. ###Code header2 = header+['group'] with open("neumo_dataset.tsv", "w") as f: w = csv.DictWriter(f, header2, delimiter='\t', extrasaction='ignore') w.writeheader() for l in pneumonia: w.writerow({**l,**{"group":'N'}}) for l in infiltrates: w.writerow({**l,**{"group":'I'}}) for l in p_i: w.writerow({**l,**{"group":'NI'}}) for l in normals[:6505]: w.writerow({**l,**{"group":'C'}}) for l in not_pneumonia[:6505]: w.writerow({**l,**{"group":'C'}}) ###Output _____no_output_____
Models and Tags Analysis notebooks/Logistic regression 4 grams model.ipynb
###Markdown Logistic regression model with 4 gram featurization ###Code import pandas as pd import numpy as np import re from tqdm import tqdm import warnings warnings.filterwarnings("ignore") import matplotlib.pyplot as plt import seaborn as sns from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from wordcloud import WordCloud from nltk.corpus import stopwords from nltk.tokenize import word_tokenize from nltk.stem.snowball import SnowballStemmer from sklearn.model_selection import train_test_split from sklearn.multiclass import OneVsRestClassifier from sklearn.linear_model import SGDClassifier from sklearn.linear_model import LogisticRegression from sklearn import metrics from sklearn.metrics import f1_score,precision_score,recall_score %%time ## sample_500k is a sample from main dataset preprocess_data = pd.read_csv("/content/drive/MyDrive/Colab Notebooks/Stack overflow Tag /preprocessed_3title_100k.csv") preprocess_data.head() preprocess_data.head() def text_splitter(text): return text.split() # binary='true' will give a binary vectorizer tag_vectorizer = CountVectorizer(tokenizer = text_splitter, binary=True) multi_label_y = tag_vectorizer.fit_transform(preprocess_data['tags'].values.astype(str)) # make sum column wise tag_column_sum = multi_label_y.sum(axis=0).tolist()[0] # To select n number of top tags def select_top_tags(n): # To get sotred list (means: tags appear in maximum number of questions come first) # top 10: [3711, 15246, 22934, 15324, 1054, 15713, 3720, 24481, 14905, 1897] sorted_tags = sorted(range(len(tag_column_sum)), key=lambda i: tag_column_sum[i], reverse=True) # With this line of code we get tags in our columns which are come in most of the questions # we will get shape: (999999, n) multi_label_n_y = multi_label_y[:,sorted_tags[:n]] return multi_label_n_y # def questions_covered_fn(n): multi_label_n_y = select_top_tags(n) # This line will give us row wise sum of each row [[1, 2], [[3], # [4, 3]] to [7]] row_sum_array = multi_label_n_y.sum(axis=1) # Counts the number of non-zero values in the array return (np.count_nonzero(row_sum_array==0)) # With this code we checking how much percent questions are explained by how many tags # Here we are starting from 500 because we think top 500 are most important tags we can't skip them questions_covered=[] total_tags=multi_label_y.shape[1] total_qs=preprocess_data.shape[0] for i in range(500, total_tags, 100): questions_covered.append(np.round(((total_qs-questions_covered_fn(i))/total_qs)*100,3)) multi_label_n_y = select_top_tags(500) print("number of questions that are not covered :", questions_covered_fn(5500),"out of ", total_qs) print("Number of tags in sample :", multi_label_y.shape[1]) print("number of tags taken :", multi_label_n_y.shape[1],"-->",round((multi_label_n_y.shape[1]/multi_label_y.shape[1]),3)*100,"%") total_size=preprocess_data.shape[0] train_size=int(0.80*total_size) x_train=preprocess_data.head(train_size) x_test=preprocess_data.tail(total_size - train_size) y_train = multi_label_n_y[0:train_size,:] y_test = multi_label_n_y[train_size:total_size,:] %%time # To get new features with tfidf technique get 200000 features with upto 3-grams vectorizer = TfidfVectorizer(min_df=0.00009, max_features=200000, smooth_idf=True, norm="l2", tokenizer = text_splitter, sublinear_tf=False, ngram_range=(1,4)) # Apply this vectorizer only on question data column x_train_multi_label = vectorizer.fit_transform(x_train['question']) x_test_multi_label = vectorizer.transform(x_test['question']) # Now check data shapes after featurization print("Dimensions of train data X:",x_train_multi_label.shape, "Y :",y_train.shape) print("Dimensions of test data X:",x_test_multi_label.shape,"Y:",y_test.shape) from joblib import dump dump(vectorizer, '/content/drive/MyDrive/Colab Notebooks/Stack overflow Tag /stackoverflow_tfidf_vectorizer_logistic_regression__4grams_100k.pkl') classifier = OneVsRestClassifier(LogisticRegression(penalty='l1',solver='liblinear'), n_jobs=-1) import time start = time.time() classifier.fit(x_train_multi_label, y_train) print("Time it takes to run this :",(time.time()-start)/60,"minutes") dump(classifier, '/content/drive/MyDrive/Colab Notebooks/Stack overflow Tag /stackoverflow_model_logistic_regression_4grams_100k.pkl') predictions = classifier.predict(x_test_multi_label) print("accuracy :",metrics.accuracy_score(y_test,predictions)) print("macro f1 score :",metrics.f1_score(y_test, predictions, average = 'macro')) print("micro f1 scoore :",metrics.f1_score(y_test, predictions, average = 'micro')) print("hamming loss :",metrics.hamming_loss(y_test,predictions)) report = metrics.classification_report(y_test, predictions, output_dict=True) report_df = pd.DataFrame(report).transpose() report_df.to_csv("/content/report_4grams_100k.csv") ###Output _____no_output_____
PY0101EN_3_2_Loops.ipynb
###Markdown Loops in Python Welcome! This notebook will teach you about the loops in the Python Programming Language. By the end of this lab, you'll know how to use the loop statements in Python, including for loop, and while loop. Table of Contents Loops Range What is for loop? What is while loop? Quiz on Loops Estimated time needed: 20 min Loops Range Sometimes, you might want to repeat a given operation many times. Repeated executions like this are performed by loops. We will look at two types of loops, for loops and while loops.Before we discuss loops lets discuss the range object. It is helpful to think of the range object as an ordered list. For now, let's look at the simplest case. If we would like to generate a sequence that contains three elements ordered from 0 to 2 we simply use the following command: ###Code # Use the range range(3) for i in range(3): print(i) ###Output 0 1 2 ###Markdown What is for loop? The for loop enables you to execute a code block multiple times. For example, you would use this if you would like to print out every element in a list. Let's try to use a for loop to print all the years presented in the list dates: This can be done as follows: ###Code # For loop example dates = [1982,1980,1973] N = len(dates) for i in range(N): print(dates[i]) ###Output 1982 1980 1973 ###Markdown The code in the indent is executed N times, each time the value of i is increased by 1 for every execution. The statement executed is to print out the value in the list at index i as shown here: In this example we can print out a sequence of numbers from 0 to 7: ###Code # Example of for loop for i in range(0, 8): print(i) ###Output 0 1 2 3 4 5 6 7 ###Markdown In Python we can directly access the elements in the list as follows: ###Code # Exmaple of for loop, loop through list for year in dates: print(year) ###Output 1982 1980 1973 ###Markdown For each iteration, the value of the variable years behaves like the value of dates[i] in the first example: We can change the elements in a list: ###Code # Use for loop to change the elements in list squares = ['red', 'yellow', 'green', 'purple', 'blue'] for i in range(0, 5): print("Before square ", i, 'is', squares[i]) squares[i] = 'weight' print("After square ", i, 'is', squares[i]) ###Output Before square 0 is red After square 0 is weight Before square 1 is yellow After square 1 is weight Before square 2 is green After square 2 is weight Before square 3 is purple After square 3 is weight Before square 4 is blue After square 4 is weight ###Markdown We can access the index and the elements of a list as follows: ###Code # Loop through the list and iterate on both index and element value squares=['red', 'yellow', 'green', 'purple', 'blue'] for i, square in enumerate(squares): print(i, square) ###Output 0 red 1 yellow 2 green 3 purple 4 blue ###Markdown What is while loop? As you can see, the for loop is used for a controlled flow of repetition. However, what if we don't know when we want to stop the loop? What if we want to keep executing a code block until a certain condition is met? The while loop exists as a tool for repeated execution based on a condition. The code block will keep being executed until the given logical condition returns a **False** boolean value. Let’s say we would like to iterate through list dates and stop at the year 1973, then print out the number of iterations. This can be done with the following block of code: ###Code # While Loop Example dates = [1982, 1980, 1973, 2000] i = 0 year = 0 while(year != 1973): year = dates[i] i = i + 1 print(year) print("It took ", i ,"repetitions to get out of loop.") ###Output 1982 1980 1973 It took 3 repetitions to get out of loop. ###Markdown A while loop iterates merely until the condition in the argument is not met, as shown in the following figure: Quiz on Loops Write a for loop the prints out all the element between -5 and 5 using the range function. ###Code # Write your code below and press Shift+Enter to execute for i in range(-5, 5): print(i) ###Output -5 -4 -3 -2 -1 0 1 2 3 4 ###Markdown Double-click __here__ for the solution.<!-- for i in range(-5, 6): print(i)--> Print the elements of the following list:Genres=[ 'rock', 'R&B', 'Soundtrack', 'R&B', 'soul', 'pop']Make sure you follow Python conventions. ###Code # Write your code below and press Shift+Enter to execute Genres = ['rock', 'R&B', 'Soundtrack', 'R&B', 'soul', 'pop'] Genres ###Output _____no_output_____ ###Markdown Double-click __here__ for the solution.<!-- Genres = ['rock', 'R&B', 'Soundtrack', 'R&B', 'soul', 'pop']for Genre in Genres: print(Genre)--> Write a for loop that prints out the following list: squares=['red', 'yellow', 'green', 'purple', 'blue'] ###Code # Write your code below and press Shift+Enter to execute squares = [ 'red', 'yellow', 'green', 'purple', 'blue' ] for i,name in enumerate(squares): print(i+1, name) ###Output 1 red 2 yellow 3 green 4 purple 5 blue ###Markdown Double-click __here__ for the solution.<!-- squares=['red', 'yellow', 'green', 'purple', 'blue']for square in squares: print(square) --> Write a while loop to display the values of the Rating of an album playlist stored in the list PlayListRatings. If the score is less than 6, exit the loop. The list PlayListRatings is given by: PlayListRatings = [10, 9.5, 10, 8, 7.5, 5, 10, 10] ###Code # Write your code below and press Shift+Enter to execute playListRatings = [10, 9.5, 10, 8, 7.5, 5, 10, 10] for i in playListRatings: if i < 6: break print(i) ###Output 10 9.5 10 8 7.5 ###Markdown Double-click __here__ for the solution.<!-- PlayListRatings = [10, 9.5, 10, 8, 7.5, 5, 10, 10]i = 1Rating = PlayListRatings[0]while(Rating >= 6): print(Rating) Rating = PlayListRatings[i] i = i + 1 --> Write a while loop to copy the strings 'orange' of the list squares to the list new_squares. Stop and exit the loop if the value on the list is not 'orange': ###Code # Write your code below and press Shift+Enter to execute squares = ['orange', 'orange', 'purple', 'blue ', 'orange'] new_squares = [] i = 0 while (squares[i] == 'orange'): new_squares = squares[i] i+=1 print(new_squares) print(new_squares,'the loop is repeat : ', i) ###Output orange orange orange the loop is repeat : 2 ###Markdown Loops in Python Welcome! This notebook will teach you about the loops in the Python Programming Language. By the end of this lab, you'll know how to use the loop statements in Python, including for loop, and while loop. Table of Contents Loops Range What is for loop? What is while loop? Quiz on Loops Estimated time needed: 20 min Loops Range Sometimes, you might want to repeat a given operation many times. Repeated executions like this are performed by loops. We will look at two types of loops, for loops and while loops.Before we discuss loops lets discuss the range object. It is helpful to think of the range object as an ordered list. For now, let's look at the simplest case. If we would like to generate a sequence that contains three elements ordered from 0 to 2 we simply use the following command: ###Code # Use the range range(3) for i in range(3): print(i) ###Output 0 1 2 ###Markdown What is for loop? The for loop enables you to execute a code block multiple times. For example, you would use this if you would like to print out every element in a list. Let's try to use a for loop to print all the years presented in the list dates: This can be done as follows: ###Code # For loop example dates = [1982,1980,1973] N = len(dates) for i in range(N): print(dates[i]) ###Output 1982 1980 1973 ###Markdown The code in the indent is executed N times, each time the value of i is increased by 1 for every execution. The statement executed is to print out the value in the list at index i as shown here: In this example we can print out a sequence of numbers from 0 to 7: ###Code # Example of for loop for i in range(0, 8): print(i) ###Output 0 1 2 3 4 5 6 7 ###Markdown In Python we can directly access the elements in the list as follows: ###Code # Exmaple of for loop, loop through list for year in dates: print(year) ###Output 1982 1980 1973 ###Markdown For each iteration, the value of the variable years behaves like the value of dates[i] in the first example: We can change the elements in a list: ###Code # Use for loop to change the elements in list squares = ['red', 'yellow', 'green', 'purple', 'blue'] for i in range(0, 5): print("Before square ", i, 'is', squares[i]) squares[i] = 'weight' print("After square ", i, 'is', squares[i]) ###Output Before square 0 is red After square 0 is weight Before square 1 is yellow After square 1 is weight Before square 2 is green After square 2 is weight Before square 3 is purple After square 3 is weight Before square 4 is blue After square 4 is weight ###Markdown We can access the index and the elements of a list as follows: ###Code # Loop through the list and iterate on both index and element value squares=['red', 'yellow', 'green', 'purple', 'blue'] for i, square in enumerate(squares): print(i, square) ###Output 0 red 1 yellow 2 green 3 purple 4 blue ###Markdown What is while loop? As you can see, the for loop is used for a controlled flow of repetition. However, what if we don't know when we want to stop the loop? What if we want to keep executing a code block until a certain condition is met? The while loop exists as a tool for repeated execution based on a condition. The code block will keep being executed until the given logical condition returns a **False** boolean value. Let’s say we would like to iterate through list dates and stop at the year 1973, then print out the number of iterations. This can be done with the following block of code: ###Code # While Loop Example dates = [1982, 1980, 1973, 2000] i = 0 year = 0 while(year != 1973): year = dates[i] i = i + 1 print(year) print("It took ", i ,"repetitions to get out of loop.") ###Output 1982 1980 1973 It took 3 repetitions to get out of loop. ###Markdown A while loop iterates merely until the condition in the argument is not met, as shown in the following figure: Quiz on Loops Write a for loop the prints out all the element between -5 and 5 using the range function. ###Code # Write your code below and press Shift+Enter to execute for i in range(-5, 5): print(i) ###Output -5 -4 -3 -2 -1 0 1 2 3 4 ###Markdown Double-click __here__ for the solution.<!-- for i in range(-5, 6): print(i)--> Print the elements of the following list:Genres=[ 'rock', 'R&B', 'Soundtrack', 'R&B', 'soul', 'pop']Make sure you follow Python conventions. ###Code # Write your code below and press Shift+Enter to execute Genres = ['rock', 'R&B', 'Soundtrack', 'R&B', 'soul', 'pop'] Genres ###Output _____no_output_____ ###Markdown Double-click __here__ for the solution.<!-- Genres = ['rock', 'R&B', 'Soundtrack', 'R&B', 'soul', 'pop']for Genre in Genres: print(Genre)--> Write a for loop that prints out the following list: squares=['red', 'yellow', 'green', 'purple', 'blue'] ###Code # Write your code below and press Shift+Enter to execute squares = [ 'red', 'yellow', 'green', 'purple', 'blue' ] for i,name in enumerate(squares): print(i+1, name) ###Output 1 red 2 yellow 3 green 4 purple 5 blue ###Markdown Double-click __here__ for the solution.<!-- squares=['red', 'yellow', 'green', 'purple', 'blue']for square in squares: print(square) --> Write a while loop to display the values of the Rating of an album playlist stored in the list PlayListRatings. If the score is less than 6, exit the loop. The list PlayListRatings is given by: PlayListRatings = [10, 9.5, 10, 8, 7.5, 5, 10, 10] ###Code # Write your code below and press Shift+Enter to execute playListRatings = [10, 9.5, 10, 8, 7.5, 5, 10, 10] for i in playListRatings: if i < 6: break print(i) ###Output 10 9.5 10 8 7.5 ###Markdown Double-click __here__ for the solution.<!-- PlayListRatings = [10, 9.5, 10, 8, 7.5, 5, 10, 10]i = 1Rating = PlayListRatings[0]while(Rating >= 6): print(Rating) Rating = PlayListRatings[i] i = i + 1 --> Write a while loop to copy the strings 'orange' of the list squares to the list new_squares. Stop and exit the loop if the value on the list is not 'orange': ###Code # Write your code below and press Shift+Enter to execute squares = ['orange', 'orange', 'purple', 'blue ', 'orange'] new_squares = [] i = 0 while (squares[i] == 'orange'): new_squares = squares[i] i+=1 print(new_squares) print(new_squares,'the loop is repeat : ', i) ###Output orange orange orange the loop is repeat : 2
Phase_1/ds-data_visualization-main/Matplotlib_Applied.ipynb
###Markdown Matplotlib Applied **Aim: SWBAT create a figure with 4 subplots of varying graph types.** ###Code import matplotlib.pyplot as plt import numpy as np from numpy.random import seed, randint seed(100) # Create Figure and Subplots fig, axes = plt.subplots(2,2, figsize=(10,6), sharex=True, sharey=True, dpi=100) # Define the colors and markers to use colors = {0:'g', 1:'b', 2:'r', 3:'y'} markers = {0:'o', 1:'x', 2:'*', 3:'p'} # Plot each axes for i, ax in enumerate(axes.ravel()): #enumerate is grabbing the index(i) and then attaching the axes value (from the array hash)? #Doesn't do anything at the time, but gives you each individual element as you call it ax.plot(sorted(randint(0,10,10)), sorted(randint(0,10,10)), marker=markers[i], color=colors[i]) ax.set_title('Ax: ' + str(i)) ax.yaxis.set_ticks_position('right') plt.suptitle('Four Subplots in One Figure', verticalalignment='bottom', fontsize=16) plt.tight_layout() # plt.show() ###Output _____no_output_____ ###Markdown Go through and play with the code above to try answer the questions below:- What do you think `sharex` and `sharey` do?- What does the `dpi` argument control?- What does `numpy.ravel()` do, and why do they call it here?- What does `yaxis.set_ticks_position()` do?- How do they use the `colors` and `markers` dictionaries? Your turn:- Create a figure that has 4 sub plots on it.- Plot 1: a line blue graph (`.plot()`) using data `x` and `y`- Plot 2: a scatter plot (`.scatter()`) using data `x2` and `y2` with red markers that are non-filled circles.- Plot 3: a plot that has both a line graph (x and y data) and a scatterplot (x2, y2) that only use 1 y axis- plot 4: a plot that is similiar to plot3 except the scatterplot has it own axis on the right hand side. - Put titles on each subplot.- Create a title for the entire figure.- Save figure as png. ###Code from numpy.random import seed, randint seed(100) x = sorted(randint(0,10,10)) x2 = sorted(randint(0,20,10)) y = sorted(randint(0,10,10)) y2 = sorted(randint(0,20,10)) fig, axes = plt.subplots(2,2, figsize=(10,6)) ax2 = axes[1,1].twinx() # instantiate a second axes that shares the same x-axis ax2.scatter(x2,y2, edgecolor='r', facecolor='none') axes[0,0].plot(x,y, color='b') axes[0,1].scatter(x2,y2, edgecolor='r', facecolor='none') axes[1,0].plot(x,y, color='b') axes[1,0].scatter(x2,y2, edgecolor='r', facecolor='none') axes[1,1].plot(x,y, color='b') axes[1,1].scatter(x2,y2, edgecolor='r', facecolor='none') fig ###Output _____no_output_____ ###Markdown Great tutorial on matplotlibhttps://www.machinelearningplus.com/plots/matplotlib-tutorial-complete-guide-python-plot-examples/ ###Code fig ###Output _____no_output_____ ###Markdown Matplotlib Applied **Aim: SWBAT create a figure with 4 subplots of varying graph types.** ###Code import matplotlib.pyplot as plt import numpy as np from numpy.random import seed, randint seed(100) # Create Figure and Subplots fig, axes = plt.subplots(2,2, figsize=(10,6), sharex=True, sharey=True, dpi=100) # Define the colors and markers to use colors = {0:'g', 1:'b', 2:'r', 3:'y'} markers = {0:'o', 1:'x', 2:'*', 3:'p'} # Plot each axes for i, ax in enumerate(axes.ravel()): ax.plot(sorted(randint(0,10,10)), sorted(randint(0,10,10)), marker=markers[i], color=colors[i]) ax.set_title('Ax: ' + str(i)) ax.yaxis.set_ticks_position('right') plt.suptitle('Four Subplots in One Figure', verticalalignment='bottom', fontsize=16) plt.tight_layout() # plt.show() ###Output _____no_output_____ ###Markdown Go through and play with the code above to try answer the questions below:- What do you think `sharex` and `sharey` do?- What does the `dpi` argument control?- What does `numpy.ravel()` do, and why do they call it here?- What does `yaxis.set_ticks_position()` do?- How do they use the `colors` and `markers` dictionaries? Your turn:- Create a figure that has 4 sub plots on it.- Plot 1: a line blue graph (`.plot()`) using data `x` and `y`- Plot 2: a scatter plot (`.scatter()`) using data `x2` and `y2` with red markers that are non-filled circles.- Plot 3: a plot that has both a line graph (x and y data) and a scatterplot (x2, y2) that only use 1 y axis- plot 4: a plot that is similiar to plot3 except the scatterplot has it own axis on the right hand side. - Put titles on each subplot.- Create a title for the entire figure.- Save figure as png. ###Code from numpy.random import seed, randint seed(100) x = sorted(randint(0,10,10)) x2 = sorted(randint(0,20,10)) y = sorted(randint(0,10,10)) y2 = sorted(randint(0,20,10)) ###Output _____no_output_____ ###Markdown Great tutorial on matplotlibhttps://www.machinelearningplus.com/plots/matplotlib-tutorial-complete-guide-python-plot-examples/ ###Code fig, ax = plt.subplots(2,2, figsize=(10,6)) ax[0,0].plot(x,y, color='b') ax[0,1].scatter(x2,y2, facecolors='none', edgecolors='r' ) ax[1,0].plot(x,y, color='b') ax[1,0].scatter(x2,y2, facecolors='none', edgecolors='r' ) ax[1,1].plot(x,y, color='b') ax2=ax[1,1].twinx() ax2.scatter(x2,y2, facecolors='none', edgecolors='r' ) fig ###Output _____no_output_____ ###Markdown Matplotlib Applied **Aim: SWBAT create a figure with 4 subplots of varying graph types.** ###Code import matplotlib.pyplot as plt import numpy as np from numpy.random import seed, randint seed(100) # Create Figure and Subplots fig, axes = plt.subplots(2,2, figsize=(10,6), sharex=True, sharey=True, dpi=100) # Define the colors and markers to use colors = {0:'g', 1:'b', 2:'r', 3:'y'} markers = {0:'o', 1:'x', 2:'*', 3:'p'} # Plot each axes for i, ax in enumerate(axes.ravel()): ax.plot(sorted(randint(0,10,10)), sorted(randint(0,10,10)), marker=markers[i], color=colors[i]) ax.set_title('Ax: ' + str(i)) ax.yaxis.set_ticks_position('right') plt.suptitle('Four Subplots in One Figure', verticalalignment='bottom', fontsize=16) plt.tight_layout() # plt.show() axes ###Output _____no_output_____ ###Markdown Go through and play with the code above to try answer the questions below:- What do you think `sharex` and `sharey` do?- What does the `dpi` argument control?- What does `numpy.ravel()` do, and why do they call it here?- What does `yaxis.set_ticks_position()` do?- How do they use the `colors` and `markers` dictionaries? Your turn:- Create a figure that has 4 sub plots on it.- Plot 1: a line blue graph (`.plot()`) using data `x` and `y`- Plot 2: a scatter plot (`.scatter()`) using data `x2` and `y2` with red markers that are non-filled circles.- Plot 3: a plot that has both a line graph (x and y data) and a scatterplot (x2, y2) that only use 1 y axis- plot 4: a plot that is similiar to plot3 except the scatterplot has it own axis on the right hand side. - Put titles on each subplot.- Create a title for the entire figure.- Save figure as png. ###Code from numpy.random import seed, randint seed(100) x = sorted(randint(0,10,10)) x2 = sorted(randint(0,20,10)) y = sorted(randint(0,10,10)) y2 = sorted(randint(0,20,10)) ###Output _____no_output_____ ###Markdown Great tutorial on matplotlibhttps://www.machinelearningplus.com/plots/matplotlib-tutorial-complete-guide-python-plot-examples/ ###Code fig ###Output _____no_output_____ ###Markdown Matplotlib Applied **Aim: SWBAT create a figure with 4 subplots of varying graph types.** ###Code import matplotlib.pyplot as plt from matplotlib.lines import Line2D import numpy as np from numpy.random import seed, randint seed(100) # Create Figure and Subplots fig, axes = plt.subplots(2,3, figsize=(9,9), sharex=True, sharey=True, dpi=100) # Define the colors and markers to use colors = {0:(0.1,0.1,0.1), 1:(0.1,0.1,0.3), 2:(0.1,0.1,0.5), 3:(0.1,0.1,0.7), 4:(0.1,0.1,0.9), 5:(0.8,0.1,0.7)} markers = {0:'o', 1:'^', 2:'s', 3:'p', 4: 'x', 5: '*'} # Plot each axes for i, ax in enumerate(axes.ravel()): ax.plot(sorted(randint(0,10,15)), sorted(randint(0,10,15)), marker=markers[i], color=colors[i], linestyle = "None") ax.set_title('Ax: ' + str(i+1)) ax.yaxis.set_ticks_position('left') ax.xaxis.set_ticks_position('bottom') plt.suptitle('Four Subplots in One Figure', verticalalignment='center', fontsize=14) plt.tight_layout() #plt.show() ###Output _____no_output_____ ###Markdown Go through and play with the code above to try answer the questions below:- What do you think `sharex` and `sharey` do?- What does the `dpi` argument control?- What does `numpy.ravel()` do, and why do they call it here?- What does `yaxis.set_ticks_position()` do?- How do they use the `colors` and `markers` dictionaries? Your turn:- Create a figure that has 4 sub plots on it.- Plot 1: a line blue graph (`.plot()`) using data `x` and `y`- Plot 2: a scatter plot (`.scatter()`) using data `x2` and `y2` with red markers that are non-filled circles.- Plot 3: a plot that has both a line graph (x and y data) and a scatterplot (x2, y2) that only use 1 y axis- plot 4: a plot that is similiar to plot3 except the scatterplot has it own axis on the right hand side. - Put titles on each subplot.- Create a title for the entire figure.- Save figure as png. ###Code x = sorted(randint(0,10,10)) x2 = sorted(randint(0,20,10)) y = sorted(randint(0,10,10)) y2 = sorted(randint(0,20,10)) from numpy.random import seed, randint seed(100) fig, axes = plt.subplots(2,2, figsize=(20,20), dpi =60) # Define the colors and markers to use colors = {0:'b', 1:'r'} markers = {0:'o', 1:'^', 2:'s', 3:'p', 4: 'x', 5: '*'} # Plot each axes axes[0,0].plot(x, y, color='b', markersize=10) axes[0,1].scatter(x2, y2, color='r',facecolors='none') axes[1,0].plot(x, y, color='b') axes[1,0].scatter(x2, y2,color='r', facecolors='none') axes[1,1].plot(x, y, color='b') ax2=axes[1,1].twinx() ax2.scatter(x2, y2,color='r', facecolors='none') plt.suptitle('Four Subplots in One Figure', verticalalignment='center', fontsize=14) #plt.show() ###Output _____no_output_____ ###Markdown Great tutorial on matplotlibhttps://www.machinelearningplus.com/plots/matplotlib-tutorial-complete-guide-python-plot-examples/ ###Code fig ###Output _____no_output_____ ###Markdown Matplotlib Applied **Aim: SWBAT create a figure with 4 subplots of varying graph types.** ###Code import matplotlib.pyplot as plt import numpy as np from numpy.random import seed, randint seed(100) # Create Figure and Subplots fig, axes = plt.subplots(2,2, figsize=(10,6), sharex=True, sharey=True, dpi=100) # Define the colors and markers to use colors = {0:'g', 1:'b', 2:'r', 3:'y'} markers = {0:'o', 1:'x', 2:'*', 3:'p'} # Plot each axes for i, ax in enumerate(axes.ravel()): ax.plot(sorted(randint(0,10,10)), sorted(randint(0,10,10)), marker=markers[i], color=colors[i]) ax.set_title('Ax: ' + str(i)) ax.yaxis.set_ticks_position('right') plt.suptitle('Four Subplots in One Figure', verticalalignment='bottom', fontsize=16) plt.tight_layout() # plt.show() ###Output _____no_output_____ ###Markdown Go through and play with the code above to try answer the questions below:- What do you think `sharex` and `sharey` do?- What does the `dpi` argument control?- What does `numpy.ravel()` do, and why do they call it here?- What does `yaxis.set_ticks_position()` do?- How do they use the `colors` and `markers` dictionaries? Your turn:- Create a figure that has 4 sub plots on it.- Plot 1: a line blue graph (`.plot()`) using data `x` and `y`- Plot 2: a scatter plot (`.scatter()`) using data `x2` and `y2` with red markers that are non-filled circles.- Plot 3: a plot that has both a line graph (x and y data) and a scatterplot (x2, y2) that only use 1 y axis- plot 4: a plot that is similiar to plot3 except the scatterplot has it own axis on the right hand side. - Put titles on each subplot.- Create a title for the entire figure.- Save figure as png. ###Code from numpy.random import seed, randint seed(100) x = sorted(randint(0,10,10)) x2 = sorted(randint(0,20,10)) y = sorted(randint(0,10,10)) y2 = sorted(randint(0,20,10)) ###Output _____no_output_____ ###Markdown Great tutorial on matplotlibhttps://www.machinelearningplus.com/plots/matplotlib-tutorial-complete-guide-python-plot-examples/ ###Code fig ###Output _____no_output_____ ###Markdown Matplotlib Applied **Aim: SWBAT create a figure with 4 subplots of varying graph types.** ###Code import matplotlib.pyplot as plt import numpy as np from numpy.random import seed, randint seed(100) # Create Figure and Subplots fig, axes = plt.subplots(2,2, figsize=(10,6), sharex=True, sharey=True, dpi=100) # Define the colors and markers to use colors = {0:'g', 1:'b', 2:'r', 3:'y'} markers = {0:'o', 1:'x', 2:'*', 3:'p'} # Plot each axes for i, ax in enumerate(axes.ravel()): ax.plot(sorted(randint(0,10,10)), sorted(randint(0,10,10)), marker=markers[i], color=colors[i]) ax.set_title('Ax: ' + str(i)) ax.yaxis.set_ticks_position('right') plt.suptitle('Four Subplots in One Figure', verticalalignment='bottom', fontsize=16) plt.tight_layout() # plt.show() ###Output _____no_output_____ ###Markdown Go through and play with the code above to try answer the questions below:- What do you think `sharex` and `sharey` do?- What does the `dpi` argument control?- What does `numpy.ravel()` do, and why do they call it here?- What does `yaxis.set_ticks_position()` do?- How do they use the `colors` and `markers` dictionaries? Your turn:- Create a figure that has 4 sub plots on it.- Plot 1: a line blue graph (`.plot()`) using data `x` and `y`- Plot 2: a scatter plot (`.scatter()`) using data `x2` and `y2` with red markers that are non-filled circles.- Plot 3: a plot that has both a line graph (x and y data) and a scatterplot (x2, y2) that only use 1 y axis- plot 4: a plot that is similiar to plot3 except the scatterplot has it own axis on the right hand side. - Put titles on each subplot.- Create a title for the entire figure.- Save figure as png. ###Code from numpy.random import seed, randint seed(100) x = sorted(randint(0,10,10)) x2 = sorted(randint(0,20,10)) y = sorted(randint(0,10,10)) y2 = sorted(randint(0,20,10)) fig, axes = plt.subplots(2,2, figsize=(10,6)) axes[0,0].plot(x, y, color='b', marker=None) axes[0,1].scatter(x2, y2, color='r', marker= 'o') axes[1,0].plot(x,y, color='b') axes[1,0].scatter(x2, y2, color='r', marker='o') axes[1,1].plot(x,y, color='b') ax2 = axes[1,1].twinx() ax2.scatter(x2, y2, color='r', marker='o') fig ###Output _____no_output_____ ###Markdown Great tutorial on matplotlibhttps://www.machinelearningplus.com/plots/matplotlib-tutorial-complete-guide-python-plot-examples/ ###Code fig ###Output _____no_output_____
nbs/22_01_04__Do_Offsets_Suck.ipynb
###Markdown [01/04/22] Do Offsets Suck? ###Code import matplotlib.pyplot as plt import numpy as np import pandas as pd from pathlib import Path import seaborn as sns plt.style.use('default') sns.set_theme( style='ticks', font_scale=1.2, rc={ 'axes.linewidth': '0.8', 'axes.grid': True, 'figure.constrained_layout.use': True, 'grid.linewidth': '0.8', 'legend.edgecolor': '1.0', 'legend.fontsize': 'small', 'legend.title_fontsize': 'small', 'xtick.major.width': '0.8', 'ytick.major.width': '0.8' }, ) exp_meta_paths = [ # Path(f'/home/mansheej/open_lth_data/lottery_78a119e24960764e0de0964887d2597f/'), # 50k x 2 @ 0.4 # Path(f'/home/mansheej/open_lth_data/lottery_acab023c0d3d306bb3ac850a9a4eeaf0/'), # 50k x 1 @ 0.2 Path(f'/home/mansheej/open_lth_data/lottery_b9105738f8034a3990d5962154252ff1/'), # 12.8k R x 4 @ 0.4 Path(f'/home/mansheej/open_lth_data/lottery_b8b68299cccbaa6d57152991ae1c7694/'), # 12.8k @ 3 x 4 @ 0.4 Path(f'/home/mansheej/open_lth_data/lottery_104ece4c18ea87469bdebd2865113e21/'), # 12.8k + 12400 @ 3 x 4 @ 0.4 Path(f'/home/mansheej/open_lth_data/lottery_e38b7fa264c20fff39689c38547e1008/'), # 12.8k + 37200 @ 3 x 4 @ 0.4 ] exp_paths = [[emp / f'replicate_{i}' for i in range(1, 5)] for emp in exp_meta_paths] plt.figure(figsize=(8.4, 4.8)) ls = [] for i, eps in enumerate(exp_paths): num_levels = 15 acc_run_level = [] for p in eps: acc_level = [] for l in range(num_levels + 1): df = pd.read_csv(p / f'level_{l}/main/logger', header=None) acc_level.append(df[2].iloc[-2]) acc_run_level.append(acc_level) acc_run_level = np.array(acc_run_level) x = np.arange(16) ys = acc_run_level y_mean, y_std = ys.mean(0), ys.std(0) c = f'C{i}' l = plt.plot(x, y_mean, c=c, alpha=0.9, linewidth=2) ls.append(l[0]) plt.fill_between(x, y_mean + y_std, y_mean - y_std, color=c, alpha=0.3) plt.legend( ls, [ # '2 Passes, All 50000 Examples, 782 Steps', # '1 Pass, All 50000 Examples, 391 Steps', '4 Passes, 12800 Random Examples, 400 Steps', '4 Passes, 12800 Smallest Scores w/ Offset 0 at Epoch 3, 400 Steps', '4 Passes, 12800 Smallest Scores w/ Offset 12400 at Epoch 3, 400 Steps', '4 Passes, 12800 Smallest Scores w/ Offset 37200 at Epoch 3, 400 Steps', ], ) plt.xlim(0, 15) plt.ylim(0.835, 0.925) plt.xticks(np.arange(0, 16, 2), [f'{f*100:.1f}' for f in 0.8**np.arange(0, 16, 2)]) plt.xlabel('% Weights Remaining') plt.ylabel('Test Accuracy') plt.title('CIFAR10 ResNet20: Ordering Test, LR 0.4 Only') sns.despine() plt.savefig('/home/mansheej/open_lth/figs/0020.svg') plt.show() ###Output _____no_output_____
notebooks/sets.ipynb
###Markdown Sets[![nbviewer](https://raw.githubusercontent.com/jupyter/design/master/logos/Badges/nbviewer_badge.svg)](https://nbviewer.org/github/matyama/pfds/blob/main/notebooks/sets.ipynb)| instance | persistence | empty | member | insert ||:------------------:|:-----------:|:-----:|:---------:|:---------:|| Binary Search Tree | ephemeral | O(1) | O(n) | O(n) || Red-Black Tree | ephemeral | O(1) | O(log(n)) | O(log(n)) | ###Code class Set s where -- | Construct new (empty) set empty :: Ord a => s a -- | Check whether a set contains given item member :: Ord a => a -> s a -> Bool -- | Add new item to a set while maintaining the item uniqueness property insert :: Ord a => a -> s a -> s a ###Output _____no_output_____ ###Markdown Unbalanced SetImplementation of an unbalanced set via a *Binary Search Tree (BST)*. ###Code data Tree a = Empty | Node (Tree a) a (Tree a) instance Set Tree where -- | Construct an empty set in O(1). empty = Empty -- | Check whether this set contains given item. -- | Since the underlying BST may not be balanced, this function may take O(n) steps in the worst case. member _ Empty = False member x (Node a y b) = case (compare x y) of EQ -> True LT -> member x a GT -> member x b -- | Add new item to this set if it's not present yet. -- | Similarly to 'member', for an unbalanced instance this may take up to O(n) steps. insert x Empty = Node Empty x Empty insert x s @ (Node a y b) = case (compare x y) of EQ -> s LT -> Node (insert x a) y b GT -> Node a y (insert x b) ###Output _____no_output_____ ###Markdown Balanced SetImplementation of a balanced set via a [Red-Black Tree](https://en.wikipedia.org/wiki/Red%E2%80%93black_tree) without any fancy optimizations. Specifically, in `ins` (e.g. for the left child) dosn't have to check for all the red-red violations in `balance` (actually it does not have to check the color of any node not on the search path). ###Code data Color = R | B data Tree a = Empty | Node Color (Tree a) a (Tree a) -- | Re-balance and locally repair the RBT color property by pushing -- | one of two consecutive red nodes with a black parent up the path to the root. balance :: Color -> Tree a -> a -> Tree a -> Tree a balance B (Node R (Node R a x b) y c) z d = Node R (Node B a x b) y (Node B c z d) balance B (Node R a x (Node R b y c)) z d = Node R (Node B a x b) y (Node B c z d) balance B a x (Node R (Node R b y c) z d) = Node R (Node B a x b) y (Node B c z d) balance B a x (Node R b y (Node R c z d)) = Node R (Node B a x b) y (Node B c z d) balance color a x b = Node color a x b instance Set Tree where -- | Construct an empty set in O(1). empty = Empty -- | Check whether this set contains given item. -- | Since the underlying RBT is balanced, this function takes O(log(n)) steps in the worst case. member _ Empty = False member x (Node _ a y b) = case (compare x y) of EQ -> True LT -> member x a GT -> member x b -- | Add new item to this set if it's not present yet. -- | -- | Call to 'insert' takes at most O(log(n)) steps because the tree is kept balanced by -- | 'balance' when backing up after adding new node and the fact that in a RB tree the deepest -- | leaf is at most twice as far from the root as the shallowest leaf is. insert x Empty = Node R Empty x Empty insert x s = let (Node _ a y b) = ins s in Node B a y b where ins Empty = Node R Empty x Empty ins s @ (Node color a y b) = case (compare x y) of EQ -> s LT -> balance color (ins a) y b GT -> balance color a y (ins b) ###Output _____no_output_____
UniFiCourseSpring2020/jupyter-notebooks.ipynb
###Markdown <img src="http://www.cerm.unifi.it/chianti/images/logo%20unifi_positivo.jpg" alt="UniFI logo" style="float: left; width: 20%; height: 20%;"> Massimo Nocentini, PhD.February 7, 2020: initAbstractA (very concise) introduction to the Python ecosystem. ###Code __AUTHORS__ = {'am': ("Andrea Marino", "[email protected]",), 'mn': ("Massimo Nocentini", "[email protected]", "https://github.com/massimo-nocentini/",)} __KEYWORDS__ = ['Python', 'Jupyter', 'notebooks', 'keynote',] ###Output _____no_output_____ ###Markdown IPython: Beyond Normal PythonThere are many options for development environments for Python, and I'm often asked which one I use in my own work.My answer sometimes surprises people: my preferred environment is [IPython](http://ipython.org/) plus a text editor (in my case, Emacs or Atom depending on my mood).IPython (short for *Interactive Python*) was started in 2001 by Fernando Perez as an enhanced Python interpreter, and has since grown into a project aiming to provide, in Perez's words, "Tools for the entire life cycle of research computing."If Python is the engine of our data science task, you might think of IPython as the interactive control panel. As well as being a useful interactive interface to Python, IPython also provides a number of useful syntactic additions to the language; we'll cover the most useful of these additions here.In addition, IPython is closely tied with the [Jupyter project](http://jupyter.org), which provides a browser-based notebook that is useful for development, collaboration, sharing, and even publication of data science results.The IPython notebook is actually a special case of the broader Jupyter notebook structure, which encompasses notebooks for Julia, R, and other programming languages.As an example of the usefulness of the notebook format, look no further than the page you are reading: the entire manuscript for this book was composed as a set of IPython notebooks. IPython is about using Python effectively for interactive scientific and data-intensive computing.This chapter will start by stepping through some of the IPython features that are useful to the practice of data science, focusing especially on the syntax it offers beyond the standard features of Python.Next, we will go into a bit more depth on some of the more useful "magic commands" that can speed-up common tasks in creating and using data science code.Finally, we will touch on some of the features of the notebook that make it useful in understanding data and sharing results. Shell or Notebook?There are two primary means of using IPython that we'll discuss in this chapter: the IPython shell and the IPython notebook.The bulk of the material in this chapter is relevant to both, and the examples will switch between them depending on what is most convenient.In the few sections that are relevant to just one or the other, we will explicitly state that fact.Before we start, some words on how to launch the IPython shell and IPython notebook. Launching the IPython ShellThis chapter, like most of this book, is not designed to be absorbed passively.I recommend that as you read through it, you follow along and experiment with the tools and syntax we cover: the muscle-memory you build through doing this will be far more useful than the simple act of reading about it.Start by launching the IPython interpreter by typing **``ipython``** on the command-line; alternatively, if you've installed a distribution like Anaconda or EPD, there may be a launcher specific to your system (we'll discuss this more fully in [Help and Documentation in IPython](01.01-Help-And-Documentation.ipynb)).Once you do this, you should see a prompt like the following:```IPython 4.0.1 -- An enhanced Interactive Python.? -> Introduction and overview of IPython's features.%quickref -> Quick reference.help -> Python's own help system.object? -> Details about 'object', use 'object??' for extra details.In [1]:```With that, you're ready to follow along. Launching the Jupyter NotebookThe Jupyter notebook is a browser-based graphical interface to the IPython shell, and builds on it a rich set of dynamic display capabilities.As well as executing Python/IPython statements, the notebook allows the user to include formatted text, static and dynamic visualizations, mathematical equations, JavaScript widgets, and much more.Furthermore, these documents can be saved in a way that lets other people open them and execute the code on their own systems.Though the IPython notebook is viewed and edited through your web browser window, it must connect to a running Python process in order to execute code.This process (known as a "kernel") can be started by running the following command in your system shell:```$ jupyter notebook``` This command will launch a local web server that will be visible to your browser.It immediately spits out a log showing what it is doing; that log will look something like this:```$ jupyter notebook[NotebookApp] Serving notebooks from local directory: /Users/jakevdp/PythonDataScienceHandbook[NotebookApp] 0 active kernels [NotebookApp] The IPython Notebook is running at: http://localhost:8888/[NotebookApp] Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).```Upon issuing the command, your default browser should automatically open and navigate to the listed local URL;the exact address will depend on your system.If the browser does not open automatically, you can open a window and manually open this address (*http://localhost:8888/* in this example). Help and Documentation in IPythonIf you read no other section in this chapter, read this one: I find the tools discussed here to be the most transformative contributions of IPython to my daily workflow.When a technologically-minded person is asked to help a friend, family member, or colleague with a computer problem, most of the time it's less a matter of knowing the answer as much as knowing how to quickly find an unknown answer.In data science it's the same: searchable web resources such as online documentation, mailing-list threads, and StackOverflow answers contain a wealth of information, even (especially?) if it is a topic you've found yourself searching before.*Being an effective practitioner of data science is less about memorizing the tool or command you should use for every possible situation, and more about learning to effectively find the information you don't know, whether through a web search engine or another means.* One of the most useful functions of IPython/Jupyter is to shorten the gap between the user and the type of documentation and search that will help them do their work effectively.While web searches still play a role in answering complicated questions, an amazing amount of information can be found through IPython alone.Some examples of the questions IPython can help answer in a few keystrokes:- How do I call this function? What arguments and options does it have?- What does the source code of this Python object look like?- What is in this package I imported? What attributes or methods does this object have?Here we'll discuss IPython's tools to quickly access this information, namely the ``?`` character to explore documentation, the ``??`` characters to explore source code, and the Tab key for auto-completion. Accessing Documentation with ``?``The Python language and its data science ecosystem is built with the user in mind, and one big part of that is access to documentation.Every Python object contains the reference to a string, known as a *doc string*, which in most cases will contain a concise summary of the object and how to use it.Python has a built-in ``help()`` function that can access this information and prints the results.For example, to see the documentation of the built-in ``len`` function, you can do the following: ###Code help(len) ###Output Help on built-in function len in module builtins: len(obj, /) Return the number of items in a container. ###Markdown This notation works for just about anything, including object methods: ###Code L = [1, 2, 3] help(L.insert) ###Output Help on built-in function insert: insert(index, object, /) method of builtins.list instance Insert object before index. ###Markdown or even objects themselves, with the documentation from their type: ###Code help(L) ###Output Help on list object: class list(object) | list(iterable=(), /) | | Built-in mutable sequence. | | If no argument is given, the constructor creates a new empty list. | The argument must be an iterable if specified. | | Methods defined here: | | __add__(self, value, /) | Return self+value. | | __contains__(self, key, /) | Return key in self. | | __delitem__(self, key, /) | Delete self[key]. | | __eq__(self, value, /) | Return self==value. | | __ge__(self, value, /) | Return self>=value. | | __getattribute__(self, name, /) | Return getattr(self, name). | | __getitem__(...) | x.__getitem__(y) <==> x[y] | | __gt__(self, value, /) | Return self>value. | | __iadd__(self, value, /) | Implement self+=value. | | __imul__(self, value, /) | Implement self*=value. | | __init__(self, /, *args, **kwargs) | Initialize self. See help(type(self)) for accurate signature. | | __iter__(self, /) | Implement iter(self). | | __le__(self, value, /) | Return self<=value. | | __len__(self, /) | Return len(self). | | __lt__(self, value, /) | Return self<value. | | __mul__(self, value, /) | Return self*value. | | __ne__(self, value, /) | Return self!=value. | | __repr__(self, /) | Return repr(self). | | __reversed__(self, /) | Return a reverse iterator over the list. | | __rmul__(self, value, /) | Return value*self. | | __setitem__(self, key, value, /) | Set self[key] to value. | | __sizeof__(self, /) | Return the size of the list in memory, in bytes. | | append(self, object, /) | Append object to the end of the list. | | clear(self, /) | Remove all items from list. | | copy(self, /) | Return a shallow copy of the list. | | count(self, value, /) | Return number of occurrences of value. | | extend(self, iterable, /) | Extend list by appending elements from the iterable. | | index(self, value, start=0, stop=9223372036854775807, /) | Return first index of value. | | Raises ValueError if the value is not present. | | insert(self, index, object, /) | Insert object before index. | | pop(self, index=-1, /) | Remove and return item at index (default last). | | Raises IndexError if list is empty or index is out of range. | | remove(self, value, /) | Remove first occurrence of value. | | Raises ValueError if the value is not present. | | reverse(self, /) | Reverse *IN PLACE*. | | sort(self, /, *, key=None, reverse=False) | Stable sort *IN PLACE*. | | ---------------------------------------------------------------------- | Static methods defined here: | | __new__(*args, **kwargs) from builtins.type | Create and return a new object. See help(type) for accurate signature. | | ---------------------------------------------------------------------- | Data and other attributes defined here: | | __hash__ = None ###Markdown Importantly, this will even work for functions or other objects you create yourself!Here we'll define a small function with a docstring: ###Code def square(a): """Return the square of a.""" return a ** 2 ###Output _____no_output_____ ###Markdown Note that to create a docstring for our function, we simply placed a string literal in the first line.Because doc strings are usually multiple lines, by convention we used Python's triple-quote notation for multi-line strings. ###Code help(square) ###Output Help on function square in module __main__: square(a) Return the square of a. ###Markdown This quick access to documentation via docstrings is one reason you should get in the habit of always adding such inline documentation to the code you write! Accessing Source Code with ``??``Because the Python language is so easily readable, another level of insight can usually be gained by reading the source code of the object you're curious about.IPython provides a shortcut to the source code with the double question mark (``??``):```ipythonIn [8]: square??Type: functionString form: Definition: square(a)Source:def square(a): "Return the square of a" return a ** 2```For simple functions like this, the double question-mark can give quick insight into the under-the-hood details. If you play with this much, you'll notice that sometimes the ``??`` suffix doesn't display any source code: this is generally because the object in question is not implemented in Python, but in C or some other compiled extension language.If this is the case, the ``??`` suffix gives the same output as the ``?`` suffix.You'll find this particularly with many of Python's built-in objects and types, for example ``len`` from above:```ipythonIn [9]: len??Type: builtin_function_or_methodString form: Namespace: Python builtinDocstring:len(object) -> integerReturn the number of items of a sequence or mapping.```Using ``?`` and/or ``??`` gives a powerful and quick interface for finding information about what any Python function or module does. Exploring Modules with Tab-CompletionIPython's other useful interface is the use of the tab key for auto-completion and exploration of the contents of objects, modules, and name-spaces.In the examples that follow, we'll use ```` to indicate when the Tab key should be pressed. Tab-completion of object contentsEvery Python object has various attributes and methods associated with it.Like with the ``help`` function discussed before, Python has a built-in ``dir`` function that returns a list of these, but the tab-completion interface is much easier to use in practice.To see a list of all available attributes of an object, you can type the name of the object followed by a period ("``.``") character and the Tab key:```ipythonIn [10]: L.L.append L.copy L.extend L.insert L.remove L.sort L.clear L.count L.index L.pop L.reverse ```To narrow-down the list, you can type the first character or several characters of the name, and the Tab key will find the matching attributes and methods:```ipythonIn [10]: L.cL.clear L.copy L.count In [10]: L.coL.copy L.count ``` If there is only a single option, pressing the Tab key will complete the line for you.For example, the following will instantly be replaced with ``L.count``:```ipythonIn [10]: L.cou```Though Python has no strictly-enforced distinction between public/external attributes and private/internal attributes, by convention a preceding underscore is used to denote such methods.For clarity, these private methods and special methods are omitted from the list by default, but it's possible to list them by explicitly typing the underscore:```ipythonIn [10]: L._L.__add__ L.__gt__ L.__reduce__L.__class__ L.__hash__ L.__reduce_ex__```For brevity, we've only shown the first couple lines of the output.Most of these are Python's special double-underscore methods (often nicknamed "dunder" methods). Tab completion when importingTab completion is also useful when importing objects from packages.Here we'll use it to find all possible imports in the ``itertools`` package that start with ``co``:```In [10]: from itertools import cocombinations compresscombinations_with_replacement count```Similarly, you can use tab-completion to see which imports are available on your system (this will change depending on which third-party scripts and modules are visible to your Python session):```In [10]: import Display all 399 possibilities? (y or n)Crypto dis py_compileCython distutils pyclbr... ... ...difflib pwd zmqIn [10]: import hhashlib hmac http heapq html husl ```(Note that for brevity, I did not print here all 399 importable packages and modules on my system.) Beyond tab completion: wildcard matchingTab completion is useful if you know the first few characters of the object or attribute you're looking for, but is little help if you'd like to match characters at the middle or end of the word.For this use-case, IPython provides a means of wildcard matching for names using the ``*`` character.For example, we can use this to list every object in the namespace that ends with ``Warning``:```ipythonIn [10]: *Warning?BytesWarning RuntimeWarningDeprecationWarning SyntaxWarningFutureWarning UnicodeWarningImportWarning UserWarningPendingDeprecationWarning WarningResourceWarning``` Notice that the ``*`` character matches any string, including the empty string.Similarly, suppose we are looking for a string method that contains the word ``find`` somewhere in its name.We can search for it this way:```ipythonIn [10]: str.*find*?str.findstr.rfind```I find this type of flexible wildcard search can be very useful for finding a particular command when getting to know a new package or reacquainting myself with a familiar one. IPython Magic CommandsThe previous two sections showed how IPython lets you use and explore Python efficiently and interactively.Here we'll begin discussing some of the enhancements that IPython adds on top of the normal Python syntax.These are known in IPython as *magic commands*, and are prefixed by the ``%`` character.These magic commands are designed to succinctly solve various common problems in standard data analysis.Magic commands come in two flavors: *line magics*, which are denoted by a single ``%`` prefix and operate on a single line of input, and *cell magics*, which are denoted by a double ``%%`` prefix and operate on multiple lines of input.We'll demonstrate and discuss a few brief examples here, and come back to more focused discussion of several useful magic commands later in the chapter. Running External Code: ``%run``As you begin developing more extensive code, you will likely find yourself working in both IPython for interactive exploration, as well as a text editor to store code that you want to reuse.Rather than running this code in a new window, it can be convenient to run it within your IPython session.This can be done with the ``%run`` magic.For example, imagine you've created a ``myscript.py`` file with the following contents: ###Code %%bash cat my-script.py ###Output def square(x): """square a number""" return x ** 2 for N in range(1, 4): print(N, "squared is", square(N)) ###Markdown You can execute this from your IPython session as follows: ###Code %run my-script.py ###Output 1 squared is 1 2 squared is 4 3 squared is 9 ###Markdown Note also that after you've run this script, any functions defined within it are available for use in your IPython session: ###Code square(5) ###Output _____no_output_____ ###Markdown There are several options to fine-tune how your code is run; you can see the documentation in the normal way, by typing **``%run?``** in the IPython interpreter. Timing Code Execution: ``%timeit``Another example of a useful magic function is ``%timeit``, which will automatically determine the execution time of the single-line Python statement that follows it.For example, we may want to check the performance of a list comprehension: ###Code %timeit L = [n ** 2 for n in range(1000)] ###Output 307 µs ± 262 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each) ###Markdown The benefit of ``%timeit`` is that for short commands it will automatically perform multiple runs in order to attain more robust results. For multi line statements, adding a second ``%`` sign will turn this into a cell magic that can handle multiple lines of input.For example, here's the equivalent construction with a ``for``-loop: ###Code %%timeit L = [] for n in range(1000): L.append(n ** 2) ###Output 346 µs ± 351 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each) ###Markdown We can immediately see that list comprehensions are about 10% faster than the equivalent ``for``-loop construction in this case. Help on Magic Functions: ``?``, ``%magic``, and ``%lsmagic``Like normal Python functions, IPython magic functions have docstrings, and this usefuldocumentation can be accessed in the standard manner.So, for example, to read the documentation of the ``%timeit`` magic simply type this:```ipythonIn [10]: %timeit?```Documentation for other functions can be accessed similarly.To access a general description of available magic functions, including some examples, you can type this:```ipythonIn [11]: %magic```For a quick and simple list of all available magic functions, type this:```ipythonIn [12]: %lsmagic```Finally, I'll mention that it is quite straightforward to define your own magic functions if you wish. Input and Output HistoryPreviously we saw that the IPython shell allows you to access previous commands with the up and down arrow keys, or equivalently the Ctrl-p/Ctrl-n shortcuts.Additionally, in both the shell and the notebook, IPython exposes several ways to obtain the output of previous commands, as well as string versions of the commands themselves.We'll explore those here. IPython's ``In`` and ``Out`` ObjectsBy now I imagine you're quite familiar with the ``In [1]:``/``Out[1]:`` style prompts used by IPython.But it turns out that these are not just pretty decoration: they give a clue as to how you can access previous inputs and outputs in your current session.Imagine you start a session that looks like this: ###Code import math math.sin(2) math.cos(2) ###Output _____no_output_____ ###Markdown We've imported the built-in ``math`` package, then computed the sine and the cosine of the number 2.These inputs and outputs are displayed in the shell with ``In``/``Out`` labels, but there's more–IPython actually creates some Python variables called ``In`` and ``Out`` that are automatically updated to reflect this history: ###Code print(str(In)[:1000]) print(str(Out)[:1000]) ###Output {29: 25, 30: 0.9092974268256817, 31: -0.4161468365471424, 32: -0.4161468365471424, 33: 0.9092974268256817, 49: False, 51: False, 61: ['images', 'introduction.ipynb', 'introduction.slides.html', 'jupyter-notebooks.ipynb', 'jupyter-notebooks.slides.html', 'Makefile', 'my-script.py', 'notebooks.ipynb'], 62: ['/home/mn/Developer/working-copies/pythons/on-python/UniFiCourseSpring2020'], 63: <class 'IPython.utils.text.SList'>, 71: '[\'\', \'__AUTHORS__ = {\\\'am\\\': ("Andrea Marino", \\n "[email protected]",),\\n', 72: '[\'\', \'__AUTHORS__ = {\\\'am\\\': ("Andrea Marino", \\n "[email protected]",),\\n \\\'mn\\\': ("Massimo Nocentini", \\n "[email protected]", \\n "https://github.com/massimo-nocentini/",)}\\n\\n__KEYWORDS__ = [\\\'Python\\\', \\\'Jupyter\\\', \\\'notebooks\\\', \\\'keynote\\\',]\', "outline = []\\noutline.append(\'Hello!\')\\noutline.append(\'Python\')\\noutl ###Markdown The ``In`` object is a list, which keeps track of the commands in order (the first item in the list is a place-holder so that ``In[1]`` can refer to the first command): ###Code print(In[1]) import math ###Output __AUTHORS__ = {'am': ("Andrea Marino", "[email protected]",), 'mn': ("Massimo Nocentini", "[email protected]", "https://github.com/massimo-nocentini/",)} __KEYWORDS__ = ['Python', 'Jupyter', 'notebooks', 'keynote',] ###Markdown The ``Out`` object is not a list but a dictionary mapping input numbers to their outputs (if any): ###Code print(Out[29]) ###Output 25 ###Markdown Note that not all operations have outputs: for example, ``import`` statements and ``print`` statements don't affect the output.The latter may be surprising, but makes sense if you consider that ``print`` is a function that returns ``None``; for brevity, any command that returns ``None`` is not added to ``Out``. Where this can be useful is if you want to interact with past results.For example, let's check the sum of ``sin(2) ** 2`` and ``cos(2) ** 2`` using the previously-computed results: ###Code Out[2] ** 2 + Out[3] ** 2 ###Output _____no_output_____ ###Markdown The result is ``1.0`` as we'd expect from the well-known trigonometric identity.In this case, using these previous results probably is not necessary, but it can become very handy if you execute a very expensive computation and want to reuse the result! Underscore Shortcuts and Previous OutputsThe standard Python shell contains just one simple shortcut for accessing previous output; the variable ``_`` (i.e., a single underscore) is kept updated with the previous output; this works in IPython as well: ###Code print(_) ###Output {29: 25, 30: 0.9092974268256817, 31: -0.4161468365471424, 32: -0.4161468365471424, 33: 0.9092974268256817, 49: False, 51: False, 61: ['images', 'introduction.ipynb', 'introduction.slides.html', 'jupyter-notebooks.ipynb', 'jupyter-notebooks.slides.html', 'Makefile', 'my-script.py', 'notebooks.ipynb'], 62: ['/home/mn/Developer/working-copies/pythons/on-python/UniFiCourseSpring2020'], 63: <class 'IPython.utils.text.SList'>, 71: '[\'\', \'__AUTHORS__ = {\\\'am\\\': ("Andrea Marino", \\n "[email protected]",),\\n', 72: '[\'\', \'__AUTHORS__ = {\\\'am\\\': ("Andrea Marino", \\n "[email protected]",),\\n \\\'mn\\\': ("Massimo Nocentini", \\n "[email protected]", \\n "https://github.com/massimo-nocentini/",)}\\n\\n__KEYWORDS__ = [\\\'Python\\\', \\\'Jupyter\\\', \\\'notebooks\\\', \\\'keynote\\\',]\', "outline = []\\noutline.append(\'Hello!\')\\noutline.append(\'Python\')\\noutl ###Markdown But IPython takes this a bit further—you can use a double underscore to access the second-to-last output, and a triple underscore to access the third-to-last output (skipping any commands with no output): ###Code print(__) print(___) ###Output -0.4161468365471424 0.9092974268256817 ###Markdown IPython stops there: more than three underscores starts to get a bit hard to count, and at that point it's easier to refer to the output by line number. There is one more shortcut we should mention, however–a shorthand for ``Out[X]`` is ``_X`` (i.e., a single underscore followed by the line number): ###Code Out[2], _2 ###Output _____no_output_____ ###Markdown Suppressing OutputSometimes you might wish to suppress the output of a statement (this is perhaps most common with the plotting commands that we'll explore in [Introduction to Matplotlib](04.00-Introduction-To-Matplotlib.ipynb)).Or maybe the command you're executing produces a result that you'd prefer not like to store in your output history, perhaps so that it can be deallocated when other references are removed.The easiest way to suppress the output of a command is to add a semicolon to the end omf the line: ###Code math.sin(2) + math.cos(2); ###Output _____no_output_____ ###Markdown Note that the result is computed silently, and the output is neither displayed on the screen or stored in the ``Out`` dictionary: ###Code 14 in Out ###Output _____no_output_____ ###Markdown Related Magic CommandsFor accessing a batch of previous inputs at once, the ``%history`` magic command is very helpful.Here is how you can print the first four inputs: ###Code %history -n 1-3 ###Output 1: __AUTHORS__ = {'am': ("Andrea Marino", "[email protected]",), 'mn': ("Massimo Nocentini", "[email protected]", "https://github.com/massimo-nocentini/",)} __KEYWORDS__ = ['Python', 'Jupyter', 'notebooks', 'keynote',] 2: outline = [] outline.append('Hello!') outline.append('Python') outline.append('Whys and refs') outline.append('On the shoulders of giants') outline.append('Set the env up') outline.append('Notebooks') outline.append('Course agenda') 3: help(len) ###Markdown As usual, you can type ``%history?`` for more information and a description of options available.Other similar magic commands are ``%rerun`` (which will re-execute some portion of the command history) and ``%save`` (which saves some set of the command history to a file).For more information, I suggest exploring these using the ``?`` help functionality discussed in [Help and Documentation in IPython](01.01-Help-And-Documentation.ipynb). IPython and Shell CommandsWhen working interactively with the standard Python interpreter, one of the frustrations is the need to switch between multiple windows to access Python tools and system command-line tools.IPython bridges this gap, and gives you a syntax for executing shell commands directly from within the IPython terminal.The magic happens with the exclamation point: anything appearing after ``!`` on a line will be executed not by the Python kernel, but by the system command-line.The following assumes you're on a Unix-like system, such as Linux or Mac OSX.Some of the examples that follow will fail on Windows, which uses a different type of shell by default (though with the 2016 announcement of native Bash shells on Windows, soon this may no longer be an issue!).If you're unfamiliar with shell commands, I'd suggest reviewing the [Shell Tutorial](http://swcarpentry.github.io/shell-novice/) put together by the always excellent Software Carpentry Foundation. Quick Introduction to the ShellA full intro to using the shell/terminal/command-line is well beyond the scope of this chapter, but for the uninitiated we will offer a quick introduction here.The shell is a way to interact textually with your computer.Ever since the mid 1980s, when Microsoft and Apple introduced the first versions of their now ubiquitous graphical operating systems, most computer users have interacted with their operating system through familiar clicking of menus and drag-and-drop movements.But operating systems existed long before these graphical user interfaces, and were primarily controlled through sequences of text input: at the prompt, the user would type a command, and the computer would do what the user told it to.Those early prompt systems are the precursors of the shells and terminals that most active data scientists still use today. Someone unfamiliar with the shell might ask why you would bother with this, when many results can be accomplished by simply clicking on icons and menus.A shell user might reply with another question: why hunt icons and click menus when you can accomplish things much more easily by typing?While it might sound like a typical tech preference impasse, when moving beyond basic tasks it quickly becomes clear that the shell offers much more control of advanced tasks, though admittedly the learning curve can intimidate the average computer user.As an example, here is a sample of a Linux/OSX shell session where a user explores, creates, and modifies directories and files on their system (``osx:~ $`` is the prompt, and everything after the ``$`` sign is the typed command; text that is preceded by a ```` is meant just as description, rather than something you would actually type in):```bashosx:~ $ echo "hello world" echo is like Python's print functionhello worldosx:~ $ pwd pwd = print working directory/home/jake this is the "path" that we're sitting inosx:~ $ ls ls = list working directory contentsnotebooks projects osx:~ $ cd projects/ cd = change directoryosx:projects $ pwd/home/jake/projects``` ```bashosx:projects $ lsdatasci_book mpld3 myproject.txtosx:projects $ mkdir myproject mkdir = make new directoryosx:projects $ cd myproject/osx:myproject $ mv ../myproject.txt ./ mv = move file. Here we're moving the file myproject.txt from one directory up (../) to the current directory (./)osx:myproject $ lsmyproject.txt```Notice that all of this is just a compact way to do familiar operations (navigating a directory structure, creating a directory, moving a file, etc.) by typing commands rather than clicking icons and menus.Note that with just a few commands (``pwd``, ``ls``, ``cd``, ``mkdir``, and ``cp``) you can do many of the most common file operations.It's when you go beyond these basics that the shell approach becomes really powerful. Shell Commands in IPythonAny command that works at the command-line can be used in IPython by prefixing it with the ``!`` character.For example, the ``ls``, ``pwd``, and ``echo`` commands can be run as follows: ###Code !ls !pwd !echo "printing from the shell" ###Output printing from the shell ###Markdown Passing Values to and from the ShellShell commands can not only be called from IPython, but can also be made to interact with the IPython namespace.For example, you can save the output of any shell command to a Python list using the assignment operator: ###Code contents = !ls contents directory = !pwd directory ###Output _____no_output_____ ###Markdown Note that these results are not returned as lists, but as a special shell return type defined in IPython: ###Code type(directory) ###Output _____no_output_____ ###Markdown This looks and acts a lot like a Python list, but has additional functionality, such asthe ``grep`` and ``fields`` methods and the ``s``, ``n``, and ``p`` properties that allow you to search, filter, and display the results in convenient ways.For more information on these, you can use IPython's built-in help features. Communication in the other direction–passing Python variables into the shell–is possible using the ``{varname}`` syntax: ###Code message = "hello from Python" !echo {message} ###Output hello from Python ###Markdown The curly braces contain the variable name, which is replaced by the variable's contents in the shell command. Errors and DebuggingCode development and data analysis always require a bit of trial and error, and IPython contains tools to streamline this process.This section will briefly cover some options for controlling Python's exception reporting, followed by exploring tools for debugging errors in code. Controlling Exceptions: ``%xmode``Most of the time when a Python script fails, it will raise an Exception.When the interpreter hits one of these exceptions, information about the cause of the error can be found in the *traceback*, which can be accessed from within Python.With the ``%xmode`` magic function, IPython allows you to control the amount of information printed when the exception is raised.Consider the following code: ###Code def func1(a, b): return a / b def func2(x): a = x b = x - 1 return func1(a, b) func2(1) ###Output _____no_output_____ ###Markdown Calling ``func2`` results in an error, and reading the printed trace lets us see exactly what happened.By default, this trace includes several lines showing the context of each step that led to the error.Using the ``%xmode`` magic function (short for *Exception mode*), we can change what information is printed.``%xmode`` takes a single argument, the mode, and there are three possibilities: ``Plain``, ``Context``, and ``Verbose``.The default is ``Context``, and gives output like that just shown before.``Plain`` is more compact and gives less information: ###Code %xmode Plain func2(1) ###Output _____no_output_____ ###Markdown The ``Verbose`` mode adds some extra information, including the arguments to any functions that are called: ###Code %xmode Verbose func2(1) ###Output _____no_output_____ ###Markdown This extra information can help narrow-in on why the exception is being raised.So why not use the ``Verbose`` mode all the time?As code gets complicated, this kind of traceback can get extremely long.Depending on the context, sometimes the brevity of ``Default`` mode is easier to work with. Debugging: When Reading Tracebacks Is Not EnoughThe standard Python tool for interactive debugging is ``pdb``, the Python debugger.This debugger lets the user step through the code line by line in order to see what might be causing a more difficult error.The IPython-enhanced version of this is ``ipdb``, the IPython debugger.There are many ways to launch and use both these debuggers; we won't cover them fully here.Refer to the online documentation of these two utilities to learn more.In IPython, perhaps the most convenient interface to debugging is the ``%debug`` magic command.If you call it after hitting an exception, it will automatically open an interactive debugging prompt at the point of the exception.The ``ipdb`` prompt lets you explore the current state of the stack, explore the available variables, and even run Python commands!Let's look at the most recent exception, then do some basic tasks–print the values of ``a`` and ``b``, and type ``quit`` to quit the debugging session: ###Code %debug ###Output > <ipython-input-77-586ccabd0db3>(2)func1()  1 def func1(a, b): ----> 2  return a / b  3   4 def func2(x):  5  a = x  ipdb> print(a) 1 ipdb> print(b) 0 ipdb> quit ###Markdown The interactive debugger allows much more than this, though–we can even step up and down through the stack and explore the values of variables there: ###Code %debug ###Output > <ipython-input-77-586ccabd0db3>(2)func1()  1 def func1(a, b): ----> 2  return a / b  3   4 def func2(x):  5  a = x  ipdb> up > <ipython-input-77-586ccabd0db3>(7)func2()  3   4 def func2(x):  5  a = x  6  b = x - 1 ----> 7  return func1(a, b)  ipdb> print(x) 1 ipdb> up > <ipython-input-82-7cb498ea7ed1>(1)<module>() ----> 1 func2(1)  ipdb> down > <ipython-input-77-586ccabd0db3>(7)func2()  3   4 def func2(x):  5  a = x  6  b = x - 1 ----> 7  return func1(a, b)  ipdb> quit ###Markdown This allows you to quickly find out not only what caused the error, but what function calls led up to the error. If you'd like the debugger to launch automatically whenever an exception is raised, you can use the ``%pdb`` magic function to turn on this automatic behavior: ###Code %xmode Plain %pdb on func2(1) ###Output Exception reporting mode: Plain Automatic pdb calling has been turned ON ###Markdown Finally, if you have a script that you'd like to run from the beginning in interactive mode, you can run it with the command ``%run -d``, and use the ``next`` command to step through the lines of code interactively. Partial list of debugging commandsThere are many more available commands for interactive debugging than we've listed here; the following table contains a description of some of the more common and useful ones:| Command | Description ||-----------------|-------------------------------------------------------------|| ``list`` | Show the current location in the file || ``h(elp)`` | Show a list of commands, or find help on a specific command || ``q(uit)`` | Quit the debugger and the program || ``c(ontinue)`` | Quit the debugger, continue in the program || ``n(ext)`` | Go to the next step of the program || ```` | Repeat the previous command || ``p(rint)`` | Print variables || ``s(tep)`` | Step into a subroutine || ``r(eturn)`` | Return out of a subroutine |For more information, use the ``help`` command in the debugger, or take a look at ``ipdb``'s [online documentation](https://github.com/gotcha/ipdb). Profiling and Timing CodeIn the process of developing code and creating data processing pipelines, there are often trade-offs you can make between various implementations.Early in developing your algorithm, it can be counterproductive to worry about such things. As Donald Knuth famously quipped, "We should forget about small efficiencies, say about 97% of the time: premature optimization is the root of all evil."But once you have your code working, it can be useful to dig into its efficiency a bit.Sometimes it's useful to check the execution time of a given command or set of commands; other times it's useful to dig into a multiline process and determine where the bottleneck lies in some complicated series of operations.IPython provides access to a wide array of functionality for this kind of timing and profiling of code.Here we'll discuss the following IPython magic commands:- ``%time``: Time the execution of a single statement- ``%timeit``: Time repeated execution of a single statement for more accuracy- ``%prun``: Run code with the profiler- ``%lprun``: Run code with the line-by-line profiler- ``%memit``: Measure the memory use of a single statement- ``%mprun``: Run code with the line-by-line memory profilerThe last four commands are not bundled with IPython–you'll need to get the ``line_profiler`` and ``memory_profiler`` extensions, which we will discuss in the following sections. Timing Code Snippets: ``%timeit`` and ``%time``We saw the ``%timeit`` line-magic and ``%%timeit`` cell-magic in the introduction to magic functions in [IPython Magic Commands](01.03-Magic-Commands.ipynb); it can be used to time the repeated execution of snippets of code: ###Code %timeit sum(range(100)) ###Output 1.3 µs ± 7.05 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each) ###Markdown Note that because this operation is so fast, ``%timeit`` automatically does a large number of repetitions. For slower commands, ``%timeit`` will automatically adjust and perform fewer repetitions: ###Code %%timeit total = 0 for i in range(1000): for j in range(1000): total += i * (-1) ** j ###Output 368 ms ± 3.52 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) ###Markdown Sometimes repeating an operation is not the best option.For example, if we have a list that we'd like to sort, we might be misled by a repeated operation.Sorting a pre-sorted list is much faster than sorting an unsorted list, so the repetition will skew the result: ###Code import random L = [random.random() for i in range(100000)] %timeit L.sort() ###Output 693 µs ± 39.3 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) ###Markdown For this, the ``%time`` magic function may be a better choice. It also is a good choice for longer-running commands, when short, system-related delays are unlikely to affect the result.Let's time the sorting of an unsorted and a presorted list: ###Code import random L = [random.random() for i in range(100000)] print("sorting an unsorted list:") %time L.sort() print("sorting an already sorted list:") %time L.sort() ###Output sorting an already sorted list: CPU times: user 1.19 ms, sys: 19 µs, total: 1.21 ms Wall time: 1.22 ms ###Markdown Notice how much faster the presorted list is to sort, but notice also how much longer the timing takes with ``%time`` versus ``%timeit``, even for the presorted list!This is a result of the fact that ``%timeit`` does some clever things under the hood to prevent system calls from interfering with the timing.For example, it prevents cleanup of unused Python objects (known as *garbage collection*) which might otherwise affect the timing.For this reason, ``%timeit`` results are usually noticeably faster than ``%time`` results. For ``%time`` as with ``%timeit``, using the double-percent-sign cell magic syntax allows timing of multiline scripts: ###Code %%time total = 0 for i in range(1000): for j in range(1000): total += i * (-1) ** j ###Output CPU times: user 465 ms, sys: 0 ns, total: 465 ms Wall time: 464 ms ###Markdown For more information on ``%time`` and ``%timeit``, as well as their available options, use the IPython help functionality (i.e., type ``%time?`` at the IPython prompt). Profiling Full Scripts: ``%prun``A program is made of many single statements, and sometimes timing these statements in context is more important than timing them on their own.Python contains a built-in code profiler (which you can read about in the Python documentation), but IPython offers a much more convenient way to use this profiler, in the form of the magic function ``%prun``.By way of example, we'll define a simple function that does some calculations: ###Code def sum_of_lists(N): total = 0 for i in range(5): L = [j ^ (j >> i) for j in range(N)] total += sum(L) return total ###Output _____no_output_____ ###Markdown Now we can call ``%prun`` with a function call to see the profiled results: ###Code %prun sum_of_lists(1000000) ###Output ###Markdown ```14 function calls in 0.705 seconds Ordered by: internal time ncalls tottime percall cumtime percall filename:lineno(function) 5 0.614 0.123 0.614 0.123 :4() 5 0.043 0.009 0.043 0.009 {built-in method builtins.sum} 1 0.036 0.036 0.693 0.693 :1(sum_of_lists) 1 0.012 0.012 0.705 0.705 :1() 1 0.000 0.000 0.705 0.705 {built-in method builtins.exec} 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}``` The result is a table that indicates, in order of total time on each function call, where the execution is spending the most time. In this case, the bulk of execution time is in the list comprehension inside ``sum_of_lists``.From here, we could start thinking about what changes we might make to improve the performance in the algorithm.For more information on ``%prun``, as well as its available options, use the IPython help functionality (i.e., type ``%prun?`` at the IPython prompt). Line-By-Line Profiling with ``%lprun``The function-by-function profiling of ``%prun`` is useful, but sometimes it's more convenient to have a line-by-line profile report.This is not built into Python or IPython, but there is a ``line_profiler`` package available for installation that can do this.Start by using Python's packaging tool, ``pip``, to install the ``line_profiler`` package:```$ pip install line_profiler```Next, you can use IPython to load the ``line_profiler`` IPython extension, offered as part of this package: ###Code %load_ext line_profiler ###Output _____no_output_____ ###Markdown Now the ``%lprun`` command will do a line-by-line profiling of any function–in this case, we need to tell it explicitly which functions we're interested in profiling: ###Code %lprun -f sum_of_lists sum_of_lists(5000) ###Output _____no_output_____ ###Markdown ```Timer unit: 1e-06 sTotal time: 0.006239 sFile: /home/mn/Developer/working-copies/pythons/on-python/UniFiCourseSpring2020/mprun_demo.pyFunction: sum_of_lists at line 1Line Hits Time Per Hit % Time Line Contents============================================================== 1 def sum_of_lists(N): 2 1 2.0 2.0 0.0 total = 0 3 6 6.0 1.0 0.1 for i in range(5): 4 5 5869.0 1173.8 94.1 L = [j ^ (j >> i) for j in range(N)] 5 5 218.0 43.6 3.5 total += sum(L) 6 5 144.0 28.8 2.3 del L remove reference to L 7 1 0.0 0.0 0.0 return total``` The result is a table that indicates, in order of total time on each function call, where the execution is spending the most time. In this case, the bulk of execution time is in the list comprehension inside ``sum_of_lists``.From here, we could start thinking about what changes we might make to improve the performance in the algorithm.For more information on ``%prun``, as well as its available options, use the IPython help functionality (i.e., type ``%prun?`` at the IPython prompt). Profiling Memory Use: ``%memit`` and ``%mprun``Another aspect of profiling is the amount of memory an operation uses.This can be evaluated with another IPython extension, the ``memory_profiler``.As with the ``line_profiler``, we start by ``pip``-installing the extension:```$ pip install memory_profiler```Then we can use IPython to load the extension: ###Code %load_ext memory_profiler ###Output _____no_output_____ ###Markdown The memory profiler extension contains two useful magic functions: the ``%memit`` magic (which offers a memory-measuring equivalent of ``%timeit``) and the ``%mprun`` function (which offers a memory-measuring equivalent of ``%lprun``).The ``%memit`` function can be used rather simply: ###Code %memit sum_of_lists(1000000) ###Output peak memory: 106.86 MiB, increment: 28.78 MiB ###Markdown We see that this function uses about 100 MB of memory. For a line-by-line description of memory use, we can use the ``%mprun`` magic.Unfortunately, this magic works only for functions defined in separate modules rather than the notebook itself, so we'll start by using the ``%%file`` magic to create a simple module called ``mprun_demo.py``, which contains our ``sum_of_lists`` function, with one addition that will make our memory profiling results more clear: ###Code %%file mprun_demo.py def sum_of_lists(N): total = 0 for i in range(5): L = [j ^ (j >> i) for j in range(N)] total += sum(L) del L # remove reference to L return total ###Output Overwriting mprun_demo.py ###Markdown We can now import the new version of this function and run the memory line profiler: ###Code from mprun_demo import sum_of_lists %mprun -f sum_of_lists sum_of_lists(1000000) ###Output *** KeyboardInterrupt exception caught in code being profiled.
timit_model_bidir_MFCC-without_silences.ipynb
###Markdown Prepare data ###Code from os.path import join from pathlib import Path import matplotlib.pyplot as plt import numpy as np import torch import torch.nn.functional as F import pytorch_lightning as pl import pandas as pd import seaborn as sns from torch import nn from torch.utils.data import random_split from torch.utils.data import DataLoader from pytorch_lightning.metrics.functional import accuracy, confusion_matrix from pytorch_lightning.callbacks.early_stopping import EarlyStopping from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint from audio_loader.features.raw_audio import WindowedAudio from audio_loader.features.mfcc import WindowedMFCC from audio_loader.ground_truth.timit import TimitGroundTruth from audio_loader.samplers.dynamic_sampler import DynamicSamplerFromGt from audio_loader.dl_frontends.pytorch.fill_ram import get_dataset_dynamic_size ###Output _____no_output_____ ###Markdown Dataloader lightning ###Code class TimitMFCCDataModule(pl.LightningDataModule): def __init__(self, data_dir, batch_size): super().__init__() self.data_dir = data_dir self.batch_size = batch_size def prepare_data(self): self.timit_gt = TimitGroundTruth(self.data_dir, phon_class="phon_class2", with_silences=False) self.mfcc_feature_processor = WindowedMFCC(400, 160, 16000, 13, delta_orders=[1, 2], delta_width=9) self.mfcc_sampler = DynamicSamplerFromGt([self.mfcc_feature_processor], self.timit_gt) self.original_train_dataset, self.collate_func = get_dataset_dynamic_size(self.mfcc_sampler, "train") self.test_dataset, self.collate_func = get_dataset_dynamic_size(self.mfcc_sampler, "test") def setup(self, stage=None): if stage == 'fit' or stage is None: self.val_nb_samples = round(len(self.original_train_dataset)/100) self.train_nb_samples = len(self.original_train_dataset) - self.val_nb_samples self.train_dataset, self.val_dataset = random_split( self.original_train_dataset, [self.train_nb_samples, self.val_nb_samples] ) if stage == 'test' or stage is None: return def train_dataloader(self): print(f"Train number of examples: {len(self.train_dataset)}\n") return DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True, collate_fn=self.collate_func, drop_last=True) def val_dataloader(self): #return DataLoader(self.val_dataset, batch_size=self.batch_size, shuffle=False, # collate_fn=self.collate_func, # drop_last=False) return DataLoader(self.test_dataset, self.batch_size , shuffle=False, collate_fn=self.collate_func, drop_last=False) def test_dataloader(self): return DataLoader(self.test_dataset, self.batch_size , shuffle=False, collate_fn=self.collate_func, drop_last=False) # the data mfcc_timit = TimitMFCCDataModule(join(Path.home(), "data/kaggle_TIMIT"), 16) ###Output _____no_output_____ ###Markdown Model definition ###Code class lit_mfcc_model(pl.LightningModule): def __init__(self, feature_size, labels): """Init all parameters. feature_size: int size of the expected features for the forward step labels: list Ground truth labels to display in the confusion matrix """ super().__init__() self.feature_size = feature_size self.labels = labels self.layer_1_grus = nn.GRU( feature_size, 550, 5, bidirectional=True, batch_first=True, dropout=0.2 ) self.bn_fwd = nn.BatchNorm1d(550) self.bn_bwd = nn.BatchNorm1d(550) self.layer_2_dense = torch.nn.Linear(1100, 128) self.bn_layer_2 = nn.BatchNorm1d(128) self.act_layer_2 = nn.LeakyReLU(0.1) # in pytorch kaldi it is softmax self.layer_3_dense = torch.nn.Linear(128, 58) def forward(self, x): """Forward of the model over the data.""" batch_size = x.batch_sizes[0] # shape: (num_layers*directions, batch_size, hidden_size?) h_0 = torch.zeros(5*2, batch_size, 550, device="cuda") output, h_n = self.layer_1_grus(x, h_0) fwd_h = h_n.view(5, 2, batch_size, 550)[-1, 0] bwd_h = h_n.view(5, 2, batch_size, 550)[-1, 1] fwd_h = self.bn_fwd(fwd_h.view(batch_size, 550)) bwd_h = self.bn_bwd(bwd_h.view(batch_size, 550)) h = torch.cat((fwd_h, bwd_h), 1) dense1 = self.bn_layer_2(self.act_layer_2(self.layer_2_dense(h))) return self.layer_3_dense(dense1) def configure_optimizers(self): optimizer = torch.optim.Adam(self.parameters(), lr=0.0004) return optimizer def training_step(self, batch, batch_idx): x, y = batch y_hat = self(x.to('cuda')) _, y = torch.stack(y).max(dim=1) loss = F.cross_entropy(y_hat, y) self.log('train_loss', loss) return loss def validation_step(self, batch, batch_idx): x, y = batch y_hat = self(x.to('cuda')) _, target = torch.stack(y).max(dim=1) _, pred = y_hat.max(dim=1) val_loss = F.cross_entropy(y_hat, target) return {'loss': val_loss, 'preds_strat1': pred, 'target': target} def validation_epoch_end(self, outputs): preds = torch.cat([tmp['preds_strat1'] for tmp in outputs]) targets = torch.cat([tmp['target'] for tmp in outputs]) # simple metrics self.log('val_loss', torch.stack([tmp['loss'] for tmp in outputs]).mean()) self.log('val_accuracy', accuracy(preds, targets)) def test_step(self, batch, batch_idx): x, y = batch y_hat = self(x.to('cuda')) _, target = torch.stack(y).max(dim=1) _, pred = y_hat.max(dim=1) test_loss = F.cross_entropy(y_hat, target) return {'loss': test_loss, 'preds_strat1': pred, 'target': target} def test_epoch_end(self, outputs): preds = torch.cat([tmp['preds_strat1'] for tmp in outputs]) targets = torch.cat([tmp['target'] for tmp in outputs]) # simple metrics self.log('test_loss', torch.stack([tmp['loss'] for tmp in outputs]).mean()) self.log('test_accuracy', accuracy(preds, targets)) # confusion matrix num_classes = len(self.labels) cm = confusion_matrix(preds, targets, num_classes=num_classes).cpu().numpy() normalized_cm = np.around(cm/cm.sum(axis=1)[:, None], 3)*100 # normalize by line df_cm = pd.DataFrame(normalized_cm, index=self.labels, columns=self.labels) plt.figure(figsize = (15,12)) fig_ = sns.heatmap(df_cm, annot=True, cmap='Spectral').get_figure() plt.close(fig_) self.logger.experiment.add_figure(f"Test - Confusion matrix", fig_, self.current_epoch) # get labels mfcc_timit.prepare_data() keys_timit = [i for i in range(mfcc_timit.timit_gt.phon_size)] labels = [mfcc_timit.timit_gt.index2phn[i] for i in keys_timit] # init model model = lit_mfcc_model(13*3, labels) # MFCC + delta+ deltas deltas model.to('cuda') ###Output _____no_output_____ ###Markdown Train model ###Code # trainer definition trainer = pl.Trainer( callbacks=[ EarlyStopping(monitor='val_loss', patience=10, mode="min") ], checkpoint_callback=ModelCheckpoint(save_top_k=5, monitor="val_loss", mode="min"), progress_bar_refresh_rate=1000, gpus=1, auto_select_gpus=True, precision=16, max_epochs=100 ) trainer.fit(model, mfcc_timit) trainer.test(model, mfcc_timit.test_dataloader()) ###Output _____no_output_____
docs/team-projects/Summer-2021/B1-Team1-US-Traffic-Accident-Analysis.ipynb
###Markdown US Traffic Accident Analysis Team Member: Yinghao Wang, Keshuo Liu, Yu Shu, Zhenyang Gai, Simeng Li, Kratik Gupta Problem Definition: Our goal is to find out the factors that influence the occurrence of the number of accidents happening in the United States. We will take factors such as location, weather, and daytime into consideration and use Tableau to provide geographical related visualization. We will make analysis through a large-scale dataset to increase the accuracy of detecting relationships between factors and accident rate. We aim at providing suggestions to DMV and drivers about safe driving. Data Source Link: 1. "US Traffic Accident": A Countrywide Traffic Accident Dataset (2016 - 2020). https://www.kaggle.com/sobhanmoosavi/us-accidents2. "US Population": United States Census Bureau. https://www.census.gov/ Data Cleaning Process: On Kaggle we found a countrywide car accident dataset, which covers 49 states of the USA. The accident data are collected from February 2016 to Dec 2020, using multiple APIs that provide streaming traffic incident (or event) data. These APIs broadcast traffic data captured by a variety of entities, such as the US and state departments of transportation, law enforcement agencies, traffic cameras, and traffic sensors within the road-networks. Currently, there are about 3 million accident records in this dataset; therefore, datasets need to be cleaned before conducting analysis. With available dataset, we selected records in each year to see how number of accidents changes in 5 years. In the us accident table, each year contains a great number of data. There is more than 3 million accident records. Considering efficiency when performing analysis on datasets and limitations in terms of data size and budget on Google Cloud Platform, 10% of the data in each year is randomly sampled and unioned into one table.Below is an example of random selection process for 2017 datasets: ###Code %%bigquery SELECT * FROM `ba775-project-team1.dataset_demo.us_traffic_accidents` WHERE rand() <= 0.1 AND extract(year from start_time) = 2017 LIMIT 5; ###Output Query complete after 0.00s: 100%|██████████| 2/2 [00:00<00:00, 689.34query/s] Downloading: 100%|██████████| 5/5 [00:01<00:00, 3.29rows/s] ###Markdown After the first step of selecting sample data, some invalid rows were observed. Furthermore, we don't need all 47 columns from the orignal dataset. For example we don't need "country" column since all records happened only in the United States. We also need to exclude some invalid data when the state name is null or weather condition is unknown if we want to process related analysis.The filtering process is presented below: ###Code %%bigquery select ID, Severity, State, Start_Time as Time, EXTRACT(month from Start_Time) Month, EXTRACT(hour FROM Start_Time) Hour, FORMAT_DATE('%A', EXTRACT(date FROM Start_Time)) AS Weekday, Weather_Condition as Weather, Temperature_F_ as Temp, Visibility_mi_ as Visibility, Precipitation_in_ as Preciputation, Railway, Station, Traffic_Signal from `ba775-project-team1.dataset_demo.us_traffic_accidents` where Weather_Condition <> 'nan' and Visibility_mi_ >= 0 and Precipitation_in_ >= 0 LIMIT 5; ###Output Query complete after 0.00s: 100%|██████████| 1/1 [00:00<00:00, 651.59query/s] Downloading: 100%|██████████| 5/5 [00:01<00:00, 3.12rows/s] ###Markdown Note that for all cleaning and filtering processes, a total of around 5000 accidents were filtered, which accounts for 0.17% of our observing dataset. With such small amount of data eliminated, we can still produce reliable output since it does not influence the analysis output significantlly. Analysis Topics To achieve our objective and get into project proposal, we chose aspects in time of a day, weekday, location (in State and some particular places) and weather to observe underlying affects on traffic accidents and corresponding severity. All data processing performed on Google Big Query. Location: **Which state has the highest number of traffic accidents happened?** We analyzed the data to find which state of the country has the highest accidents happened. We found out that California is the state where most of accidents occured at, and number of accidents happended in CA is doubled than the second highest state Florida. ###Code %%bigquery SELECT State, count(ID) as num_of_accidents FROM `ba775-project-team1.dataset_demo.sample_table_wpopulation` GROUP BY State ORDER BY num_of_accidents DESC LIMIT 5; ###Output Query complete after 0.00s: 100%|██████████| 1/1 [00:00<00:00, 1001.03query/s] Downloading: 100%|██████████| 5/5 [00:01<00:00, 2.76rows/s] ###Markdown Since state's population varies from each other, the number of private motor vehicle ownership would have an impact on number of accidents records. If we want to factor out state population to normalize the count, and generating new calculation field as accidents per million. California is not the state having the highest traffic accident number. We can see California now has a same accidents_per_million value with Minnesota! ###Code %%bigquery SELECT State, count(ID) as num_of_accidents, population, cast(count(ID)/population*1000000 as INTEGER) accidents_per_million FROM `ba775-project-team1.dataset_demo.sample_table_wpopulation` GROUP BY State, population ORDER BY accidents_per_million DESC limit 5; ###Output Query complete after 0.00s: 100%|██████████| 1/1 [00:00<00:00, 764.27query/s] Downloading: 100%|██████████| 5/5 [00:01<00:00, 3.25rows/s] ###Markdown **Which state is more prone to have more serious traffic accidents?** The distribution changes again when we take level of severity into account. We can observe that several states such as Montana, South and North Dakota do not have level-1 severity reocrds. The number of serious accidents happened in Florida, California and New York is larger than that of other states. ###Code %%bigquery SELECT State, count(ID) as num_of_accidents FROM `ba775-project-team1.dataset_demo.sample_table_wpopulation` WHERE Severity=4 GROUP BY State ORDER BY num_of_accidents DESC LIMIT 5; ###Output Query complete after 0.00s: 100%|██████████| 3/3 [00:00<00:00, 1618.80query/s] Downloading: 100%|██████████| 5/5 [00:01<00:00, 3.07rows/s] ###Markdown **What characteristics were associated with more traffic accidents?** By comparing the number of accidents that happened around the location factors between 2016 and 2020, it is clear that traffic signals were the most frequent spots. Junctions and Crossings were the next. ###Code %%bigquery SELECT Sum(case when (Amenity=TRUE and Amenity IS NOT NULL) then 1 else 0 end) Amenity, Sum(case when (Bump=TRUE and Bump IS NOT NULL) then 1 else 0 end) Bump, Sum(case when (Crossing=TRUE and Crossing IS NOT NULL) then 1 else 0 end) Crossing, Sum(case when (Give_Way=TRUE and Give_Way IS NOT NULL) then 1 else 0 end) Give_Way, Sum(case when (Junction=TRUE and Junction IS NOT NULL) then 1 else 0 end)Junction, Sum(case when (Railway=TRUE and Railway IS NOT NULL) then 1 else 0 end) Railway, Sum(case when (Roundabout=TRUE and Roundabout IS NOT NULL) then 1 else 0 end) Roundabout, Sum(case when (Station=TRUE and Station IS NOT NULL) then 1 else 0 end) Station, Sum(case when (Stop=TRUE and Stop IS NOT NULL) then 1 else 0 end) Stop, Sum(case when (Traffic_Calming=TRUE and Traffic_Calming IS NOT NULL) then 1 else 0 end) Traffic_Calming, Sum(case when (Traffic_Signal=TRUE and Traffic_Signal IS NOT NULL) then 1 else 0 end) Traffic_Signal, Sum(case when (Turning_Loop=TRUE and Turning_Loop IS NOT NULL) then 1 else 0 end) Turning_Loop from `ba775-project-team1.dataset_demo.sample_table_wpopulation`; ###Output Query complete after 0.00s: 100%|██████████| 2/2 [00:00<00:00, 1038.32query/s] Downloading: 100%|██████████| 1/1 [00:01<00:00, 1.55s/rows] ###Markdown **Since around traffic signals were the most frequent traffic accident spots as shown above, when did traffic accidents occur more often around traffic signals?** From the sampled data, traffic accidents occurred more often around traffic signals during the morning peak and noon break periods. ###Code %%bigquery SELECT Weekday, Hour, Traffic_Signal, ROUND(COUNT(ID)/SUM(COUNT(ID)) OVER(PARTITION BY Weekday, Hour) * 100,2) Percentage FROM `ba775-project-team1.dataset_demo.sample_table_wpopulation` GROUP BY Weekday, Traffic_Signal, Hour ORDER BY Traffic_Signal DESC, Percentage DESC LIMIT 6 ###Output Query complete after 0.00s: 100%|██████████| 6/6 [00:00<00:00, 2810.57query/s] Downloading: 100%|██████████| 6/6 [00:01<00:00, 3.77rows/s] ###Markdown Weather Conditions: In this section, we will discuss the frequency of traffic accidents occurance on different weather conditions. **In which weather condition doest the traffic accident occure more?** The top 5 weather conditions that most of traffic accidents occured are, fair, cloudy, most cloudy, partly cloudy and light rain. These 5 conditions represent over 80% of traffic accidents. Nearly half of the accidents happened in fair days, others were in cloudy, rainy or snowy days. Out of which cloudy occupies the highest portion. ###Code %%bigquery SELECT Weather_Condition, COUNT(ID) as number, ROUND( COUNT(ID)/(SELECT COUNT(ID) FROM `ba775-project-team1.dataset_demo.sample_table` ),6) AS percentage FROM `ba775-project-team1.dataset_demo.sample_table` GROUP BY Weather_Condition ORDER BY number desc LIMIT 10 %%bigquery select SUM(percentage) FROM ( SELECT Weather_Condition, COUNT(ID) as number, ROUND( COUNT(ID)/(SELECT COUNT(ID) FROM `ba775-project-team1.dataset_demo.sample_table` ),6) AS percentage FROM `ba775-project-team1.dataset_demo.sample_table` GROUP BY Weather_Condition ORDER BY number desc LIMIT 5 ) ###Output Query complete after 0.00s: 100%|██████████| 1/1 [00:00<00:00, 969.56query/s] Downloading: 100%|██████████| 1/1 [00:01<00:00, 1.31s/rows] ###Markdown **How severe the traffic accident is under different weather conditions?**Most of the traffic accidents happened in all kinds of weather conditions are in a severity of 2. ###Code %%bigquery SELECT Severity,Weather_Condition, COUNT(ID) as number, ROUND( COUNT(ID)/(SELECT COUNT(ID) FROM `ba775-project-team1.dataset_demo.sample_table` ),6) AS percentage FROM `ba775-project-team1.dataset_demo.sample_table` GROUP BY Weather_Condition, Severity ORDER BY number DESC LIMIT 10 ###Output Query complete after 0.00s: 100%|██████████| 1/1 [00:00<00:00, 862.14query/s] Downloading: 100%|██████████| 10/10 [00:01<00:00, 7.31rows/s] ###Markdown We put several weather factors into consideration: visibility, precipitation, temperature and wind speed. **Take visibility distance as an factor first, do more accidents happen when visibility distance is shorter(<=5miles)?** ###Code %%bigquery SELECT Month, round(avg(Visibility_mi_),2) Visibility, count(*) num_of_accidents from `ba775-project-team1.dataset_demo.sample_table` GROUP BY Month ORDER BY Month ###Output Query complete after 0.00s: 100%|██████████| 3/3 [00:00<00:00, 1171.70query/s] Downloading: 100%|██████████| 12/12 [00:01<00:00, 7.62rows/s] ###Markdown There is no significant negative relationship between average visibility distance and num of accidents in different months. we can observe that when visibility distance is slightly shorter than 10 miles (very clear view), the number of accidents is higher than that of longer ones. Also, when visibility distance is lower than 8 miles, the portion of severity level 3 and 4 is larger than that of longer visibility distance. To further discover that whether low visibility distance (manually defined as <= 5 miles), we use the following charts: In this way, we can observe a clear and repeated pattern of accidents happened with low visibility distance. It seems that traffic accidents counts tend to be far more in the first and forth quarter of a year, when visibility distance is less than or equal to 5 miles. **Did temperature affect the number of traffic accidents and severity accordingly?** The number of traffic accidents increased at the end of the year when temperatures were low. Level-2 severity accidents accounted for most of these late-year accidents, and the proportions of other level severity decreased. ###Code %%bigquery SELECT Month, Temperature_F_ Temperature, Severity, COUNT(ID) Count FROM `ba775-project-team1.dataset_demo.sample_table_wpopulation` WHERE is_nan(Temperature_F_) = False GROUP BY Month, Severity, Temperature_F_ ORDER BY COUNT(ID) DESC, Severity DESC, Month LIMIT 5 ###Output Query complete after 0.00s: 100%|██████████| 1/1 [00:00<00:00, 956.73query/s] Downloading: 100%|██████████| 5/5 [00:01<00:00, 3.25rows/s] ###Markdown Time: **Does total number of accidents increase year by year?** The number of accidents in the U.S. has been increasing every year since 2016, and the number of traffic accidents happened in 2020 is twice than that happened in 2019. ###Code %%bigquery SELECT Year, COUNT(Year) AS Year_accidents_n FROM `ba775-project-team1.dataset_demo.sample_table_wpopulation` GROUP BY Year ORDER BY Year_accidents_n; ###Output Query complete after 0.00s: 100%|██████████| 3/3 [00:00<00:00, 1593.98query/s] Downloading: 100%|██████████| 5/5 [00:01<00:00, 3.18rows/s] ###Markdown **In which months do more traffic accidents happen? Does the pattern vary year from year?** In 2016, the most accidents occurred in December; In 2017, the most accidents in January;In 2018, the most accidents in November; In 2019, the most accidents in October; In 2020, the most accidents in December.This shows that the most accidents occur in the United States in the last quarter of the year. ###Code %%bigquery SELECT Year, SUM(CASE WHEN Month = 1 THEN 1 ELSE 0 END) AS Jan, SUM(CASE WHEN Month = 2 THEN 1 ELSE 0 END) AS Feb, SUM(CASE WHEN Month = 3 THEN 1 ELSE 0 END) AS Mar, SUM(CASE WHEN Month = 4 THEN 1 ELSE 0 END) AS Apr, SUM(CASE WHEN Month = 5 THEN 1 ELSE 0 END) AS May, SUM(CASE WHEN Month = 6 THEN 1 ELSE 0 END) AS Jun, SUM(CASE WHEN Month = 7 THEN 1 ELSE 0 END) AS Jul, SUM(CASE WHEN Month = 8 THEN 1 ELSE 0 END) AS Aug, SUM(CASE WHEN Month = 9 THEN 1 ELSE 0 END) AS Sep, SUM(CASE WHEN Month = 10 THEN 1 ELSE 0 END) AS Oct, SUM(CASE WHEN Month = 11 THEN 1 ELSE 0 END) AS Nov, SUM(CASE WHEN Month = 12 THEN 1 ELSE 0 END) AS Dec FROM `ba775-project-team1.dataset_demo.sample_table_wpopulation` GROUP BY Year ORDER BY Year; ###Output Query complete after 0.00s: 100%|██████████| 1/1 [00:00<00:00, 784.42query/s] Downloading: 100%|██████████| 5/5 [00:02<00:00, 1.82rows/s] ###Markdown **In which hour of a day do more traffic accidents happen? Is number of accidents related to morning and evening traffic peak?** The hours of the day from 7:00 a.m. to 8:00 a.m. and 4:00 p.m. to 5:00 p.m. belong to the time of day when there are more accidents. These time periods are with heavy traffic when people usually go to work and school. The government and police can enforce traffic regulation and road transportation in these specific time periods. ###Code %%bigquery SELECT Hour, Severity, COUNT(ID) AS Hour_accidents_n FROM `ba775-project-team1.dataset_demo.sample_table_wpopulation` GROUP BY Hour, Severity ORDER BY Hour; ###Output _____no_output_____ ###Markdown The severity of the accident was greatest at 5 p.m. The severity of accidents is higher during from 7:00 a.m. to 8:00 a.m. and 4:00 p.m. to 5:00 p.m. **Does the number of traffic accidents relate to day of week: more accidents happen in weekdays rather than weekends? and how is the severity distribution?** The number of accidents on weekends is far more less than that of weekdays. And Most of the traffic accidents with severity level 2 or 3 also occurred on weekdays. ###Code %%bigquery SELECT Weekday, Severity, COUNT(ID) AS Weekday_accidents_n FROM `ba775-project-team1.dataset_demo.sample_table_wpopulation` GROUP BY Weekday, Severity ORDER BY Weekday_accidents_n; ###Output _____no_output_____ ###Markdown Use Bigquery Machine Learning To Predict Level of Severity for Accidents To help the government and drivers to better utilitize our analysis, we have decided to predict the level of severity of traffic accidents with given weather conditions and time factors. **What are the factors that influence the level of severity of traffic accidents?** Now, we are trying to predict the level of severity of traffic accidents given some representative features. We select the sampled dataset with over 1,561 thousand of records as evaluating and training data.Since we want to predict the level of severity of traffic accidents at a given weather condition, at a certain place, and at a certain time of a year using Big Query machine learning, and the level of severity is a multi-class variable. We decided to use a Logistic Regression model with multi class option. We consider several factors listed below: Month: 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12Hour: 0-23Visibility: 0-10 milesPrecipitation: 0-24 inchesWind Speed: 0-150 mphTemperature: 0-174 °FRailway: 0(False), 1(True)Station: 0(False), 1(True)Traffic_Signal: 0(False), 1(True) We select **level of severity** to be the dependent variable, and other factors to be the independent ones. Excluding ID because ID is unique for each traffic accident record and this unwillingly influences the overall accuracy of prediction model. ###Code %%bigquery CREATE OR REPLACE MODEL `ba775-project-team1.dataset_demo.model` OPTIONS(model_type='logistic_reg', labels = ['Severity']) AS SELECT * EXCEPT(ID) from `ba775-project-team1.dataset_demo.data_model` ###Output Query complete after 0.00s: 100%|██████████| 3/3 [00:00<00:00, 1613.61query/s] ###Markdown **Evaluation of the logistic regression model**: ###Code %%bigquery SELECT * FROM ML.EVALUATE ( MODEL dataset_demo.model, (SELECT * EXCEPT(ID) FROM `ba775-project-team1.dataset_demo.data_model`) ) ###Output Query complete after 0.00s: 100%|██████████| 9/9 [00:00<00:00, 4928.03query/s] Downloading: 100%|██████████| 1/1 [00:01<00:00, 1.45s/rows] ###Markdown Now, we obtain the bigquery machine model and the evaluation results. The precision is about 0.28 and roc_auc is about 0.69. The higher the roc_auc score is, the better the performance of the model at distinguishing classes. **Using logistic regression model to make predictions on level of severity** (with existing data): ###Code %%bigquery CREATE OR REPLACE TABLE dataset_demo.severity_predictions AS SELECT predicted_Severity, predicted_Severity_probs[OFFSET(0)].prob, Severity, Visibility, Temperature, Precipitation, Wind_speed, Hour, Month, Railway, Traffic_Signal, Station FROM ML.PREDICT ( MODEL `dataset_demo.model`, (SELECT * FROM `ba775-project-team1.dataset_demo.data_model`) ) ORDER BY prob DESC ###Output Query complete after 0.00s: 100%|██████████| 9/9 [00:00<00:00, 5126.12query/s] ###Markdown The table created from the above query returns the result for the prediction of severity concluded by the factors we used as parameters defining the logistic regression, and the resulting table includes the probability of predicted severity to be true based on the selected parameters. The results we got as part of logistic regression is saved in severity_predictions. ###Code %%bigquery SELECT * FROM dataset_demo.severity_predictions LIMIT 10 ###Output Query complete after 0.00s: 100%|██████████| 1/1 [00:00<00:00, 445.30query/s] Downloading: 100%|██████████| 10/10 [00:01<00:00, 7.17rows/s] ###Markdown **Overall accuracy of the predictions on each level of severity**: We determined the accuracy of our predicted results. The logistic regression model returned us a table with a 78% accurately predicted level of severity. The dataset we used was clustered around severity levels of 2 and 3, providing very few data points within level 1 and level 4 severity, which makes it difficult for the training of the model and maybe the reason our model has predicted 0 entries to lie within level 1 and level 4. ###Code %%bigquery SELECT COUNT(*) total_accidents, COUNTIF(Severity=1) actual_level1, COUNTIF(Severity=2) actual_level2, COUNTIF(Severity=3) actual_level3, COUNTIF(Severity=4) actual_level4, COUNTIF(Severity=1)/COUNT(*)*100 level1_rate_percent, COUNTIF(Severity=2)/COUNT(*)*100 level2_rate_percent, COUNTIF(Severity=3)/COUNT(*)*100 level3_rate_percent, COUNTIF(Severity=4)/COUNT(*)*100 level4_rate_percent, COUNTIF(predicted_Severity=1) predicted_level1, COUNTIF(predicted_Severity=2) predicted_level2, COUNTIF(predicted_Severity=3) predicted_level3, COUNTIF(predicted_Severity=4) predicted_level4, COUNTIF(Severity=1 AND predicted_Severity=1) true_predicted_level1, COUNTIF(Severity=2 AND predicted_Severity=2) true_predicted_level2, COUNTIF(Severity=3 AND predicted_Severity=3) true_predicted_level3, COUNTIF(Severity=4 AND predicted_Severity=4) true_predicted_level4, (COUNTIF(Severity=1 AND predicted_Severity=1) + COUNTIF(Severity=2 AND predicted_Severity=2) + COUNTIF(Severity=3 AND predicted_Severity=3) + COUNTIF(Severity=4 AND predicted_Severity=4)) /(COUNTIF(predicted_Severity=1)+COUNTIF(predicted_Severity=2)+COUNTIF(predicted_Severity=3)+ COUNTIF(predicted_Severity=4))*100 all_levels_rate_percent_predicted FROM `ba775-project-team1.dataset_demo.severity_predictions` ###Output Query complete after 0.00s: 100%|██████████| 2/2 [00:00<00:00, 883.01query/s] Downloading: 100%|██████████| 1/1 [00:01<00:00, 1.40s/rows] ###Markdown **Prediction on level of severity in unknown Future**: We used our model with some manually defined factors to foresee predicted level of severity. The logistic regression model analyzed the results based on the training data and gave us predicted results. We used following parameters:1) Month : March 2) Hour : 12 3) Visibility : 7 4) Precipitation : 0 in 5) Wind_speed : 10 mph 6) Temperature 17 C 7) Railway : False 8) Station : False 9) Traffic_Signal : True ###Code %%bigquery SELECT predicted_Severity, predicted_Severity_probs[OFFSET(0)].prob, # Severity, Visibility, Temperature, Precipitation, Wind_speed, Hour, Month, Traffic_Signal FROM ML.PREDICT ( MODEL `dataset_demo.model`, (SELECT 3 Month, 12 Hour, 7 Visibility, 0 Precipitation, 10 Wind_speed, 17 Temperature, 0 Railway,0 Station, 1 Traffic_Signal) ) ###Output Query complete after 0.00s: 100%|██████████| 7/7 [00:00<00:00, 4117.83query/s] Downloading: 100%|██████████| 1/1 [00:01<00:00, 1.39s/rows] ###Markdown Based on our sql logistic regression model, we predict that in March at 12pm, the level of severity of a traffic accident happens near a traffic signal with specific weather condition factors is 2, and the probability is 83.55%. Use Python Machine Learning To Predict Level of Severity for Accidents **CONCEPTS:**- **Why do we use Python machine learning based on python here?** Previously using sql logistic regression model to make predictions on the level of severity, but we find out that even if the overall model accuracy is 80%, there is NO CORRECT prediction on level 1 and 4.- **By working on machine learning, we can also anwser question below:**Which feature is more closely related to traffic accident? How to predict severity from this combination of important features?- **Why the output model is important?**Make sound driving advices to the situation may lead to serious accident.Help the Traffic Regulatory Bureau to effectively reduce the traffic accident rate. STEP 1: **Processing Dataset with SQL** - **4 factors correspond to traffic accident:** Location/Time/Weather/Places- **12 important features:** -- Location: State -- Time: Month/Hour/Weekday -- Weather: Weather/Visibility/Precipitation/Windspead/Temperature -- Places: Crossing/Junction/Traffic_Signal- **For each severity level choose 10000 rows of data. The overall features data are 40000 rows.** ###Code %%bigquery (SELECT ID, Severity, State, extract(month from Start_Time) Month, extract(hour from Start_Time) Hour, FORMAT_DATE('%A', EXTRACT(date FROM Start_Time)) Weekday, Weather_Condition, Visibility_mi_ as Visibility, Precipitation_in_ as Preciputation, Wind_Speed_mph_ as Windspead, Temperature_F_ as Temperature, case when Crossing = True then 1 else 0 end Crossing, case when Junction = True then 1 else 0 end Junction, case when Traffic_Signal = True then 1 else 0 end Traffic_Signal FROM `ba775-project-team1.dataset_demo.us_traffic_accidents` WHERE Visibility_mi_ >=0 and Precipitation_in_>=0 and Temperature_F_>=0 and Wind_Speed_mph_>=0 and Severity = 1 limit 10000) union all (SELECT ID, Severity, State, extract(month from Start_Time) Month, extract(hour from Start_Time) Hour, FORMAT_DATE('%A', EXTRACT(date FROM Start_Time)) Weekday, Weather_Condition, Visibility_mi_ as Visibility, Precipitation_in_ as Preciputation, Wind_Speed_mph_ as Windspead, Temperature_F_ as Temperature, case when Crossing = True then 1 else 0 end Crossing, case when Junction = True then 1 else 0 end Junction, case when Traffic_Signal = True then 1 else 0 end Traffic_Signal FROM `ba775-project-team1.dataset_demo.us_traffic_accidents` WHERE Visibility_mi_ >=0 and Precipitation_in_>=0 and Temperature_F_>=0 and Wind_Speed_mph_>=0 and Severity = 2 limit 10000) union all (SELECT ID, Severity, State, extract(month from Start_Time) Month, extract(hour from Start_Time) Hour, FORMAT_DATE('%A', EXTRACT(date FROM Start_Time)) Weekday, Weather_Condition, Visibility_mi_ as Visibility, Precipitation_in_ as Preciputation, Wind_Speed_mph_ as Windspead, Temperature_F_ as Temperature, case when Crossing = True then 1 else 0 end Crossing, case when Junction = True then 1 else 0 end Junction, case when Traffic_Signal = True then 1 else 0 end Traffic_Signal FROM `ba775-project-team1.dataset_demo.us_traffic_accidents` WHERE Visibility_mi_ >=0 and Precipitation_in_>=0 and Temperature_F_>=0 and Wind_Speed_mph_>=0 and Severity = 3 limit 10000) union all (SELECT ID, Severity, State, extract(month from Start_Time) Month, extract(hour from Start_Time) Hour, FORMAT_DATE('%A', EXTRACT(date FROM Start_Time)) Weekday, Weather_Condition, Visibility_mi_ as Visibility, Precipitation_in_ as Preciputation, Wind_Speed_mph_ as Windspead, Temperature_F_ as Temperature, case when Crossing = True then 1 else 0 end Crossing, case when Junction = True then 1 else 0 end Junction, case when Traffic_Signal = True then 1 else 0 end Traffic_Signal FROM `ba775-project-team1.dataset_demo.us_traffic_accidents` WHERE Visibility_mi_ >=0 and Precipitation_in_>=0 and Temperature_F_>=0 and Wind_Speed_mph_>=0 and Severity = 4 limit 10000) ###Output Query complete after 0.00s: 100%|██████████| 6/6 [00:00<00:00, 2708.04query/s] Downloading: 100%|██████████| 40000/40000 [00:01<00:00, 35451.27rows/s] ###Markdown STEP 2: **Processing Dataset with Python** - **Use python process text type features into ID type features.**- **Use python process ID type features into ONE-HOT encoding features. (Why?)** -- The integer values have a natural ordered relationship between each other and machine learning algorithms may be able to understand and harness this relationship. -- A one-hot encoding can avoid the model to assume a natural ordering between categories which may result in poor performance or unexpected results.- **Split all data into 80% train dataset and 20% test dataset** ###Code import pandas as pd import random import numpy as np # col_types = {} features = ["State", "Month", "Hour", "Weekday", "Weather", "Visibility", "Preciputation", "Windspead", "Temperature", "Junction", "Crossing", "Traffic_Signal"] def open_file(filename, mode='r'): return open(filename, mode, encoding='utf-8', errors='ignore') def write_file(filename, content): open_file(filename, mode="w").write(content) # split features data into 80% train dataset and 20% test dataset def random_sample(filename): with open_file(filename) as f_: lines = f_.readlines() random.shuffle(lines) len_test = int(len(lines) * 0.2) lines_test = lines[0:len_test] lines_train = lines[len_test:] train_w = open_file("Untitled Folder/data/ft.train.txt", mode="w") test_w = open_file("Untitled Folder/data/ft.test.txt", mode="w") for i in lines_train: train_w.write(i) for j in lines_test: test_w.write(j) # turn the features into id def feature_to_id(cate_list): cates = list(set(cate_list)) cate_to_id = dict(zip(cates, range(len(cates)))) return cates, cate_to_id def process_all_data(): ft = pd.read_csv('Untitled Folder/data/all_data.txt', sep='\t') ft = ft.drop(["ID"], axis=1) for col_ in features: col_values = ft[col_].values col_values = ["NAN" if pd.isnull(c) else c for c in col_values] c_, word_to_id = feature_to_id(col_values) # a = [word_to_id[cv] for cv in col_values] a = to_categorical([word_to_id[cv] for cv in col_values], len(word_to_id)) ft = pd.concat([ft, pd.DataFrame(a)], axis=1) ft = ft.drop([col_], axis=1) severity = ft.pop('Severity') ft.insert(loc=ft.shape[1], column='severity', value=severity, allow_duplicates=False) print(ft.head()) print(ft.shape[0]) print(ft.shape[1]) ft.to_csv('Untitled Folder/data/ft.all.txt', sep='\t', header=False, index=False) # turn the feature id into one-hot encode def to_categorical(y, num_classes=None): y = np.array(y, dtype='int') input_shape = y.shape if input_shape and input_shape[-1] == 1 and len(input_shape) > 1: input_shape = tuple(input_shape[:-1]) y = y.ravel() if not num_classes: num_classes = np.max(y) + 1 n = y.shape[0] categorical = np.zeros((n, num_classes)) categorical[np.arange(n), y] = 1 output_shape = input_shape + (num_classes,) categorical = np.reshape(categorical, output_shape) return categorical.tolist() process_all_data() random_sample("Untitled Folder/data/ft.all.txt") ###Output 0 1 2 3 4 5 6 7 8 9 ... 272 273 274 0 \ 0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 0.0 0.0 1 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 0.0 0.0 2 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 0.0 1.0 3 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 0.0 1.0 4 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 0.0 1.0 1 0 1 0 1 severity 0 1.0 1.0 0.0 1.0 0.0 2 1 1.0 1.0 0.0 1.0 0.0 2 2 0.0 1.0 0.0 1.0 0.0 2 3 0.0 1.0 0.0 1.0 0.0 2 4 0.0 1.0 0.0 1.0 0.0 2 [5 rows x 666 columns] 40000 666 ###Markdown STEP 3&4: **Training&Testing Models** ###Code import pickle from sklearn import svm, neural_network, linear_model, naive_bayes, neighbors, tree, ensemble, metrics def process_file(filename): conts, labs = [], [] with open_file(filename) as f_: for line in f_: cs = line.strip().split("\t") conts.append(cs[:-1]) labs.append(cs[-1]) print(np.array(conts).shape) return np.array(conts).astype("float"), np.array(labs).astype("int").tolist() def train(train_dir): train_feature, train_target = process_file(train_dir) print(np.array(train_feature).shape) print(np.array(train_target).shape) # train print("training...") model.fit(train_feature, train_target) def test(): test_feature, test_target = process_file("Untitled Folder/data/ft.test.txt") test_predict = model.predict(test_feature) # return predict classification # accuracy true_false = (test_predict == test_target) accuracy = np.count_nonzero(true_false) / float(len(test_target)) print() print("accuracy is %f" % accuracy) # precision recall f1-score print() print(metrics.classification_report(test_target, test_predict)) # Confusion Matrix print("Confusion Matrix...") print(metrics.confusion_matrix(test_target, test_predict)) ###Output _____no_output_____ ###Markdown - Model 1: **Random Forest** ###Code # ramdom forest model = ensemble.RandomForestClassifier() train("Untitled Folder/data/ft.train.txt") # print(model.feature_importances_) # only work for none one-hot and random forest test() ###Output (32000, 665) (32000, 665) (32000,) training... (8000, 665) accuracy is 0.732375 precision recall f1-score support 1 0.85 0.88 0.86 1976 2 0.67 0.79 0.72 2030 3 0.61 0.61 0.61 1984 4 0.82 0.66 0.73 2010 accuracy 0.73 8000 macro avg 0.74 0.73 0.73 8000 weighted avg 0.74 0.73 0.73 8000 Confusion Matrix... [[1729 21 74 152] [ 5 1595 406 24] [ 22 649 1201 112] [ 279 125 272 1334]] ###Markdown - Model 2: **Logistic Regression** ###Code # logistic regression model = linear_model.LogisticRegression(multi_class="multinomial", solver="lbfgs") train("Untitled Folder/data/ft.train.txt") test() ###Output (32000, 665) (32000, 665) (32000,) training... ###Markdown - Model 3: **Support Vector Machine (SVM)** ###Code # SVM model = svm.LinearSVC() train("Untitled Folder/data/ft.train.txt") test() ###Output (32000, 665) (32000, 665) (32000,) training... (8000, 665) accuracy is 0.699500 precision recall f1-score support 1 0.81 0.85 0.83 1976 2 0.65 0.78 0.71 2030 3 0.56 0.60 0.58 1984 4 0.83 0.57 0.67 2010 accuracy 0.70 8000 macro avg 0.71 0.70 0.70 8000 weighted avg 0.71 0.70 0.70 8000 Confusion Matrix... [[1675 27 125 149] [ 3 1586 434 7] [ 35 681 1197 71] [ 353 156 363 1138]] ###Markdown - Model 4: **Neural Network** ###Code # neural network model = neural_network.MLPClassifier(hidden_layer_sizes=(2048, 512), verbose=True, early_stopping=True) train("Untitled Folder/data/ft.train.txt") test() ###Output (32000, 665) (32000, 665) (32000,) training... Iteration 1, loss = 0.74865711 Validation score: 0.697187 Iteration 2, loss = 0.60890580 Validation score: 0.698750 Iteration 3, loss = 0.52429944 Validation score: 0.702187 Iteration 4, loss = 0.42935016 Validation score: 0.699063 Iteration 5, loss = 0.32662756 Validation score: 0.694688 Iteration 6, loss = 0.24469872 Validation score: 0.688438 Iteration 7, loss = 0.17598556 Validation score: 0.696250 Iteration 8, loss = 0.13310566 Validation score: 0.670000 Iteration 9, loss = 0.09692953 Validation score: 0.699063 Iteration 10, loss = 0.07682298 Validation score: 0.682187 Iteration 11, loss = 0.06248188 Validation score: 0.688438 Iteration 12, loss = 0.04879386 Validation score: 0.692187 Iteration 13, loss = 0.04217029 Validation score: 0.695000 Iteration 14, loss = 0.03824850 Validation score: 0.686875 Validation score did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. (8000, 665) accuracy is 0.704875 precision recall f1-score support 1 0.82 0.85 0.83 1976 2 0.67 0.74 0.71 2030 3 0.58 0.59 0.58 1984 4 0.77 0.64 0.70 2010 accuracy 0.70 8000 macro avg 0.71 0.71 0.70 8000 weighted avg 0.71 0.70 0.70 8000 Confusion Matrix... [[1680 20 84 192] [ 24 1508 459 39] [ 47 609 1164 164] [ 301 105 317 1287]]
VSOA.ipynb
###Markdown Importing Gurobi ###Code from gurobipy import* model = Model("VSOA") ###Output Academic license - for non-commercial use only ###Markdown Sets ###Code I = {1} J = {1,2,3,4} R = {0,1,2,3} ###Output _____no_output_____ ###Markdown Parameter final weight of supplier j ###Code J,w = multidict({1:0.2273, 2:0.2274, 3:0.2474, 4:0.3049}) ###Output _____no_output_____ ###Markdown defective rate of item i offered by supplier j ###Code J,q = multidict({1:0.08, 2:0.04, 3:0.03, 4:0.01}) ###Output _____no_output_____ ###Markdown on-time delivery rate of item i offered by supplier j ###Code J,t = multidict({1:0.83, 2:0.83, 3:0.95, 4:0.86}) ###Output _____no_output_____ ###Markdown maximum supply capacity of item i offered by supplier j ###Code J,C = multidict({1:400, 2:700, 3:600, 4:500}) ###Output _____no_output_____ ###Markdown unit price of item i quoted by supplier j ###Code J,p = multidict({1:411, 2:555, 3:629, 4:728}) ###Output _____no_output_____ ###Markdown total demand of item i ###Code I,D = multidict({1:800}) ###Output _____no_output_____ ###Markdown buyer's maximum acceptable defective rate of item i ###Code I,Q = multidict({1:0.02}) ###Output _____no_output_____ ###Markdown buyer's minimum acceptable on-time delivery rate of item i ###Code I,T = multidict({1:0.9}) ###Output _____no_output_____ ###Markdown discount coefficient associated with interval r of supplier j's discount schedule ###Code d = {(1,1):0, (1,2):0.1, (1,3):0.2, (2,1):0, (2,2):0.1, (2,3):0.2, (3,1):0, (3,2):0.1, (3,3):0.2, (4,1):0, (4,2):0.1, (4,3):0.2} ###Output _____no_output_____ ###Markdown upper limit in interval r of supplier j's discount schedule ###Code b = {(1,0):0, (1,1):10000, (1,2):20000, (1,3):10000000, (2,0):0, (2,1):10000, (2,2):20000, (2,3):10000000, (3,0):0, (3,1):10000, (3,2):20000, (3,3):10000000, (4,0):0, (4,1):10000, (4,2):20000, (4,3):10000000} ###Output _____no_output_____ ###Markdown Decision Variable Binary integer variable, equal to 1 if business volume purchased from supplier j falls on the discount interval r ###Code y = {} for j in J: for r in R: y[j,r] = model.addVar(vtype="b", name="y(%s,%s)"%(j,r)) ###Output _____no_output_____ ###Markdown Units of item i to purchase from supplier j ###Code x = {} for i in I: for j in J: x[i,j] = model.addVar(lb=0, vtype="c", name="x(%s,%s)"%(i,j)) ###Output _____no_output_____ ###Markdown Business volume purchased from supplier j in discount interval r ###Code v = {} for j in J: for r in R: v[j,r] = model.addVar(vtype="c", name="v(%s,%s)"%(j,r)) ###Output _____no_output_____ ###Markdown Objective Function Maximizing total weighted quantity of purchasing ###Code model.setObjectiveN(-quicksum(w[j]*x[i,j] for i,j in x),0) ###Output _____no_output_____ ###Markdown Minimize the total purchase cost ###Code model.setObjectiveN(quicksum((1-d[j,r])*v[j,r] for j in J for r in range(1,4)),1) ###Output _____no_output_____ ###Markdown Minimize the number of defective items ###Code model.setObjectiveN(quicksum(q[j]*x[i,j] for i,j in x),2) ###Output _____no_output_____ ###Markdown Maximizing the number of items delivered on time ###Code model.setObjectiveN(-quicksum(t[j]*x[i,j] for i,j in x),3) model.ModelSense = GRB.MINIMIZE ###Output _____no_output_____ ###Markdown Constraints ###Code model.addConstr(quicksum(v[j,r] for r in range(1,4) for j in J) == quicksum(p[j]*x[1,j] for j in J)) ###Output _____no_output_____ ###Markdown Capacity constraint ###Code for j in J: for i in I: model.addConstr(x[i,j] <= C[j]) ###Output _____no_output_____ ###Markdown Discount constraint ###Code for j in J: model.addConstr(quicksum(y[j,r] for r in range(1,4)) <= 1) for j in J: for r in range(1,4): model.addConstr(b[j,r-1]*y[j,r] <= v[j,r]) for j in J: for r in range(1,4): model.addConstr(v[j,r] <= (b[j,r]-1)*y[j,r]) ###Output _____no_output_____ ###Markdown Demand constraint ###Code for i in I: model.addConstr(quicksum(x[i,j] for j in J) == D[i]) ###Output _____no_output_____ ###Markdown Quality constraint ###Code for i in I: model.addConstr(quicksum(x[i,j]*q[j] for j in J) <= D[i]*Q[i]) ###Output _____no_output_____ ###Markdown Delivery constraint ###Code for i in I: model.addConstr(quicksum((1-t[j])*x[i,j] for j in J) <= (1-T[i])*D[i]) ###Output _____no_output_____ ###Markdown Results ###Code model.optimize() ###Output Optimize a model with 36 rows, 36 columns and 88 nonzeros Variable types: 20 continuous, 16 integer (16 binary) Coefficient statistics: Matrix range [1e-02, 1e+07] Objective range [1e-02, 1e+00] Bounds range [1e+00, 1e+00] RHS range [1e+00, 8e+02] --------------------------------------------------------------------------- Multi-objectives: starting optimization with 4 objectives (1 combined) ... --------------------------------------------------------------------------- --------------------------------------------------------------------------- Multi-objectives: optimize objective 1 (weighted) ... --------------------------------------------------------------------------- Optimize a model with 36 rows, 36 columns and 88 nonzeros Variable types: 20 continuous, 16 integer (16 binary) Coefficient statistics: Matrix range [1e-02, 1e+07] Objective range [8e-01, 1e+00] Bounds range [1e+00, 1e+00] RHS range [1e+00, 8e+02] Presolve removed 13 rows and 13 columns Presolve time: 0.20s Presolved: 23 rows, 23 columns, 65 nonzeros Variable types: 15 continuous, 8 integer (8 binary) Root relaxation: objective 4.328397e+05, 4 iterations, 0.05 seconds Nodes | Current Node | Objective Bounds | Work Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time * 0 0 0 432839.71636 432839.716 0.00% - 0s Explored 0 nodes (4 simplex iterations) in 0.48 seconds Thread count was 4 (of 4 available processors) Solution count 1: 432840 No other solutions better than 432840 Optimal solution found (tolerance 1.00e-04) Best objective 4.328397163636e+05, best bound 4.328397163636e+05, gap 0.0000% --------------------------------------------------------------------------- Multi-objectives: solved in 0.52 seconds, solution count 1 ###Markdown To ensure that the status is optimal ###Code assert model.Status == GRB.Status.OPTIMAL ###Output _____no_output_____ ###Markdown Creating a query number of multiple objectives and number of solutions ###Code nSolutions = model.SolCount nObjectives = model.NumObj print('Problem has', nObjectives, 'objectives') print('Gurobi found', nSolutions, 'solutions') ###Output Problem has 4 objectives Gurobi found 1 solutions ###Markdown Value for each objective function ###Code solutions = [] #for s in range(nSolutions): # model.params.SolutionNumber = s # print('Solution', s, ':', end='') for o in range(nObjectives): model.params.ObjNumber = o print('Objective value ',o+1, ': ', model.ObjNVal, end='') solutions.append(model.getAttr('Xn',x)) solutions EPS = 0.00000001 for (i,j) in x: if x[i,j].X > EPS: print("Item %1s bought from supplier %1s is: %3s"%(i,j, x[i,j].X)) ###Output Item 1 bought from supplier 2 is: 24.2424242424242 Item 1 bought from supplier 3 is: 363.63636363636374 Item 1 bought from supplier 4 is: 412.12121212121207
Algorithms - Bubble Sort, Selection Sort, Insertion Sort, Merge Sort.ipynb
###Markdown Bubble SortBubble sort is a very simple sorting algorithm with $O(n^2)$ complexity. It relies on doing multiple passes through a list, each time flipping adjacent elements if they are out of order. The passes become shorter and shorter because the end of the array is assured to be sorted. The algorithm can be stopped when there was a pass that had no exchanges. This is sometimes called "short bubble sort". ###Code from typing import List def make_test(): return [1,2,5,2,20,7,2,9,-3,7] def bubbleSort(nums: List[int]) -> List[int]: """bubble sort Algorithm""" comparisons = 0 insertions = 0 for pass_length in range(len(nums)-1,0,-1): exchanges = False for i in range(pass_length): if nums[i] > nums[i+1]: temp = nums[i] nums[i] = nums[i+1] nums[i+1] = temp insertions += 1 exchanges = True else: pass comparisons += 1 print(nums) if not exchanges: break print("\ninput length:",len(nums),"\nnumber of comparisons:",comparisons,"\nnumber of insertions:",insertions,"\n") return nums print("before:",make_test(),"\n") print("after:",bubbleSort(make_test())) ###Output before: [1, 2, 5, 2, 20, 7, 2, 9, -3, 7] [1, 2, 5, 2, 20, 7, 2, 9, -3, 7] [1, 2, 5, 2, 20, 7, 2, 9, -3, 7] [1, 2, 2, 5, 20, 7, 2, 9, -3, 7] [1, 2, 2, 5, 20, 7, 2, 9, -3, 7] [1, 2, 2, 5, 7, 20, 2, 9, -3, 7] [1, 2, 2, 5, 7, 2, 20, 9, -3, 7] [1, 2, 2, 5, 7, 2, 9, 20, -3, 7] [1, 2, 2, 5, 7, 2, 9, -3, 20, 7] [1, 2, 2, 5, 7, 2, 9, -3, 7, 20] [1, 2, 2, 5, 7, 2, 9, -3, 7, 20] [1, 2, 2, 5, 7, 2, 9, -3, 7, 20] [1, 2, 2, 5, 7, 2, 9, -3, 7, 20] [1, 2, 2, 5, 7, 2, 9, -3, 7, 20] [1, 2, 2, 5, 2, 7, 9, -3, 7, 20] [1, 2, 2, 5, 2, 7, 9, -3, 7, 20] [1, 2, 2, 5, 2, 7, -3, 9, 7, 20] [1, 2, 2, 5, 2, 7, -3, 7, 9, 20] [1, 2, 2, 5, 2, 7, -3, 7, 9, 20] [1, 2, 2, 5, 2, 7, -3, 7, 9, 20] [1, 2, 2, 5, 2, 7, -3, 7, 9, 20] [1, 2, 2, 2, 5, 7, -3, 7, 9, 20] [1, 2, 2, 2, 5, 7, -3, 7, 9, 20] [1, 2, 2, 2, 5, -3, 7, 7, 9, 20] [1, 2, 2, 2, 5, -3, 7, 7, 9, 20] [1, 2, 2, 2, 5, -3, 7, 7, 9, 20] [1, 2, 2, 2, 5, -3, 7, 7, 9, 20] [1, 2, 2, 2, 5, -3, 7, 7, 9, 20] [1, 2, 2, 2, 5, -3, 7, 7, 9, 20] [1, 2, 2, 2, -3, 5, 7, 7, 9, 20] [1, 2, 2, 2, -3, 5, 7, 7, 9, 20] [1, 2, 2, 2, -3, 5, 7, 7, 9, 20] [1, 2, 2, 2, -3, 5, 7, 7, 9, 20] [1, 2, 2, 2, -3, 5, 7, 7, 9, 20] [1, 2, 2, -3, 2, 5, 7, 7, 9, 20] [1, 2, 2, -3, 2, 5, 7, 7, 9, 20] [1, 2, 2, -3, 2, 5, 7, 7, 9, 20] [1, 2, 2, -3, 2, 5, 7, 7, 9, 20] [1, 2, -3, 2, 2, 5, 7, 7, 9, 20] [1, 2, -3, 2, 2, 5, 7, 7, 9, 20] [1, 2, -3, 2, 2, 5, 7, 7, 9, 20] [1, -3, 2, 2, 2, 5, 7, 7, 9, 20] [1, -3, 2, 2, 2, 5, 7, 7, 9, 20] [-3, 1, 2, 2, 2, 5, 7, 7, 9, 20] [-3, 1, 2, 2, 2, 5, 7, 7, 9, 20] [-3, 1, 2, 2, 2, 5, 7, 7, 9, 20] input length: 10 number of comparisons: 45 number of insertions: 16 after: [-3, 1, 2, 2, 2, 5, 7, 7, 9, 20] ###Markdown Selection SortSelection sort improves on bubble sort by only making a single comparison during each pass. It does so by looking for the maximum value on each pass, and placing it in the appropriate location in the array. The compliexity is still $O(n^2)$. ###Code def selectionSort(nums: List[int]) -> List[int]: """selection sort""" comparisons = 0 insertions = 0 for pass_length in range(len(nums),0,-1): max_index = 0 for index in range(pass_length): if nums[index]>nums[max_index]: max_index = index else: pass comparisons+=1 x = nums[index] nums[index] = nums[max_index] nums[max_index] = x insertions += 1 print(nums) print("\ninput length:",len(nums),"\nnumber of comparisons:",comparisons,"\nnumber of insertions:",insertions,"\n") return nums print("before:",make_test(),"\n") print("after:",selectionSort(make_test())) ###Output before: [1, 2, 5, 2, 20, 7, 2, 9, -3, 7] [1, 2, 5, 2, 7, 7, 2, 9, -3, 20] [1, 2, 5, 2, 7, 7, 2, -3, 9, 20] [1, 2, 5, 2, -3, 7, 2, 7, 9, 20] [1, 2, 5, 2, -3, 2, 7, 7, 9, 20] [1, 2, 2, 2, -3, 5, 7, 7, 9, 20] [1, -3, 2, 2, 2, 5, 7, 7, 9, 20] [1, -3, 2, 2, 2, 5, 7, 7, 9, 20] [1, -3, 2, 2, 2, 5, 7, 7, 9, 20] [-3, 1, 2, 2, 2, 5, 7, 7, 9, 20] [-3, 1, 2, 2, 2, 5, 7, 7, 9, 20] input length: 10 number of comparisons: 55 number of insertions: 10 after: [-3, 1, 2, 2, 2, 5, 7, 7, 9, 20] ###Markdown Insertion Sort Insertion sort gets by with even fewer comparisons, but still has complexity $O(n^2)$. It works by moving larger elements backwards in the list until they are in their right place. It is called insertion sort because this is the same as creating a sorted list of increasing size at the beginning of the array, which elements from the unsorted segment of the array are inserted to. ###Code def insertionSort(nums: List[int]) -> List[int]: """insertion sort""" comparisons = 0 insertions = 0 for index in range(1,len(nums)): current_value = nums[index] position = index while position > 0 and nums[position-1] > current_value: nums[position] = nums[position-1] position -= 1 comparisons += 1 nums[position] = current_value insertions += 1 print(nums) print("\ninput length:",len(nums),"\nnumber of comparisons:",comparisons,"\nnumber of insertions:",insertions,"\n") return nums print("before:",make_test(),"\n") print("after:",insertionSort(make_test())) ###Output before: [1, 2, 5, 2, 20, 7, 2, 9, -3, 7] [1, 2, 5, 2, 20, 7, 2, 9, -3, 7] [1, 2, 5, 2, 20, 7, 2, 9, -3, 7] [1, 2, 2, 5, 20, 7, 2, 9, -3, 7] [1, 2, 2, 5, 20, 7, 2, 9, -3, 7] [1, 2, 2, 5, 7, 20, 2, 9, -3, 7] [1, 2, 2, 2, 5, 7, 20, 9, -3, 7] [1, 2, 2, 2, 5, 7, 9, 20, -3, 7] [-3, 1, 2, 2, 2, 5, 7, 9, 20, 7] [-3, 1, 2, 2, 2, 5, 7, 7, 9, 20] input length: 10 number of comparisons: 16 number of insertions: 9 after: [-3, 1, 2, 2, 2, 5, 7, 7, 9, 20] ###Markdown Further improvements: Shell Sort, which has complexity between $O(n)$ and $O(n^2)$ and uses insertion sort. Merge SortMerge sort can do it in $O(nlog(n))$! It relies on recursive divide and conquer: in the first step, the list is split into sublists, which are sorted. In the second step, the sorted sublists are merged in a way so that they remain sorted. The merging of sorted arrays takes $n$ comparisons, and this operation has to be done $log(n)$ times because of the partition into $L = 2^n \rightarrow n = log(L)/log(2)$ subarrays. ###Code def mergeSortedArrays(a,b): sortedArray = [] i,j = 0,0 while i < len(a) and j < len(b): if a[i] < b[j]: sortedArray.append(a[i]) i+=1 else: sortedArray.append(b[j]) j+=1 # append the leftovers (one of those will be empty.) if len(a[i:]) == 0 and len(b[j:]) != 0: sortedArray += b[j:] elif len(b[j:]) == 0 and len(a[i:]) != 0: sortedArray += a[i:] else: assert False return sortedArray def mergeSort(nums: List[int]) -> List[int]: if len(nums) == 1: return nums # already sorted else: middle_index = len(nums)//2 left = mergeSort(nums[:middle_index]) right = mergeSort(nums[middle_index:]) return mergeSortedArrays(left,right) print("before:",make_test(),"\n") print("after:",mergeSort(make_test())) ###Output before: [1, 2, 5, 2, 20, 7, 2, 9, -3, 7] after: [-3, 1, 2, 2, 2, 5, 7, 7, 9, 20] ###Markdown On a related note, a D.E. Shaw interview question: how to merge two already sorted arrays _in place_ so that the result is sorted. ###Code array1 = list(range(0,20)) array2 = list(range(0,25)) print(array1) print(array2) p1 = 0 p2 = 0 while p1 < len(array1) and p2 < len(array2): if array1[p1] < array2[p2]: p1+=1 else: x = array1[-1] array1[p1+1:] = array1[p1:-1] array1[p1] = array2[p2] array2[:p2+1] = [x]+array2[:p2] p2+=1 #print(p1,p2) #print(array1,array2) p1 = 0 while p1 < p2: if array2[p1] < array2[p2]: p1+=1 else: x = array2[p2] array2[p1+1:p2+1] = array2[p1:p2] array2[p1] = x p2+=1 #print(p1,p2) #print(array1,array2) print(array1 == sorted(array1)) print(array2 == sorted(array2)) print(array1) print(array2) ###Output [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] True True [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9] [10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 21, 22, 23, 24]
survivor_league_optimise.ipynb
###Markdown TaskOptimise team picks for footballsurvivor.co.uk competiton. Game rules: pick a Premier League team each week that you expect to win their match. Start the season with 10 lives :* If team wins: drop 0 lives* If team draws: drop 1 life* If team loses: drop 2 lives**You can only pick each team once during the course of the season**If you run out of lives, you lose. If you survive, the winner is the person with the most remaining lives. ApproachApply linear sum assignment (a.k.a Hungarian algorithm) to select n teams over k weeks, such that a single team is assigned to exactly one week.The assignment will be made based on maximizing some value. In this case we will use a team's weekly win probability calculated by fivethirtyeight.com Standalone python script: `survivor_league_optimise.py` ###Code import json import survivor_league_optimise as optimise %matplotlib inline %load_ext autoreload %autoreload 2 ###Output _____no_output_____ ###Markdown [1] fivethirtyeight.com dataThe first step is to download and parse data from fivethirtyeight.com (which we will refer to as **fte**) to get a modelled probability of a team winning a match against a specific opponent, based on their recent performances (and other factors). The sum of these probabilities will be the value we are trying to maximize when we make our team picks.The **fte** data in nicely packaged in a json api which we will download and parse into a dataframe. ![title](fivethirtyeight.PNG) ###Code df_fte = optimise.get_fte_data(year=2017) df_fte.head() ###Output _____no_output_____ ###Markdown [2] footballsurvivor.co.uk dataNow that we have the probability of a team winning a particular match, we need to know which "gameweek"   that match belongs to, as our aim is to pick a single team for each gameweek. A gameweek is defined as a round of matches in which every team plays exactly once. To get this data we need to scrape footballsurvivor.co.uk (**fs**), which is slightly more convoluted as we first need to login to the website, whilst the data itself is embedded in the html.We create a `requests` session object to persist our authentication parameters after logging in, then use xpath to extract the gameweek definitions from the html.We will also return a boolean indicating whether a team was picked in that gameweek. At the start of the season this will be False for all records, but will be useful later on if we want to recalculate our picks, as we can easily exclude teams which have already been picked.![fs2](footballsurvivor.PNG) ###Code with open('fs_credentials.json', 'rb') as fs_cred_file: fs_credentials = json.load(fs_cred_file) # login to site fs_session = optimise.fs_session_login(fs_credentials) # get/parse html fs_league_url = 'https://footballsurvivor.co.uk/leagues/geo_punters_winner_takes_all/entries/70392/fixtures' df_fs = optimise.get_fs_data(fs_session, fs_league_url) df_fs.head() ###Output _____no_output_____ ###Markdown [3] Merging dataframes We now have a dataframe for each data source, so the next step is to merge them into a single dataframe. Whilst each dataframe has the same record unique "key" ('team', 'opp', 'loc'), the format of the team names for **fte** data is different to **fs**.We will firstly map the **fs** team names to their **fte** equivalents (as they are less verbose), then merge the dataframes. ###Code df_merged = optimise.merge_fs_fte_data(df_fs, df_fte) df_merged.head() ###Output _____no_output_____ ###Markdown [4] Reshape to probability matrix Currently we have a long-form dataframe with one row per team/gameweek combination. We need to reshape it into a wide "matrix" with one row per gameweek and one column per team as this is the format required by the the `scipy` function we use to optimise our picks. The values of the matrix will be the win probability (p_win) of that team/gameweek combination. ###Code df_prob_matrix = optimise.get_probability_matrix(df_merged) df_prob_matrix.iloc[:5,:5] ###Output _____no_output_____ ###Markdown [5] Optimise picksThe final step is to apply `scipy.linear_sum_assigment` to our matrix which selects exactly one element from each row and each column, such that the sum of the selected elements are maximized.To recap, the footballsurvivor competition requires us to pick a single team for each gamweek, without repeating any team. As better teams will consistently have a higher win probability across all gameweeks, one approach could be to pick teams in order of probability, with the most likely winners picked first. However, once we have exhausted the all stronger picks, we will have fewer gameweeks left in which to try and pick a winning match for the weaker teams.The motivation for our chosen approach is that by trying to maximize the probability of winning for all teams, we will (on average) have a better chance of picking a winner each time. ###Code df_picks = optimise.optimise_picks(df_prob_matrix, value_label='p_win') df_picks.head() ###Output _____no_output_____ ###Markdown We now have out optimised picks!Plotting the entire probability matrix, with our selected picks highlighted ###Code optimise.plot_picks_heatmap(df_prob_matrix, df_picks, plot_size=(11,7)) ###Output _____no_output_____ ###Markdown Notice that some teams have not been picked at all, this is because the footballsurvivor league used in this example was started towards the end of the season, and so their are not enough gameweeks left to require all team to be picked.This is not a problem as the algorithm automatically handles a non-square matrix by only picking the teams which maximize the objective (sum of probabilities). Calculating the average probability for our optimised picks: ###Code df_picks.p_win.mean() ###Output _____no_output_____
notebooks/docs/notebooks/1-1-evaluating-pretrained-models.ipynb
###Markdown Environments ###Code import torch torch.__version__ torch.cuda.is_available() ###Output _____no_output_____ ###Markdown Download pretrained `en-fr` translation model```curl https://dl.fbaipublicfiles.com/fairseq/models/wmt14.v2.en-fr.fconv-py.tar.bz2 | tar xvjf -``` ###Code %%bash tree pretrained_models/wmt14.v2.en-fr.fconv-py ###Output pretrained_models/wmt14.v2.en-fr.fconv-py ├── README.md ├── bpecodes ├── dict.en.txt ├── dict.fr.txt └── model.pt 0 directories, 5 files ###Markdown `win32` issue```import win32file```> DLL failsThe fix is```pip install pywin32==228 --force-reinstall``` `fairseq-interative` for `en-fr` ###Code %%sh MODEL_DIR=pretrained_models/wmt14.v2.en-fr.fconv-py fairseq-interactive \ --path $MODEL_DIR/model.pt $MODEL_DIR \ --beam 5 --source-lang en --target-lang fr \ --tokenizer moses \ --bpe subword_nmt --bpe-codes $MODEL_DIR/bpecodes ###Output _____no_output_____ ###Markdown Train the 1st model ###Code !pip install jupytext ###Output _____no_output_____
notebooks/testing_RF_contact400energy-PCA.ipynb
###Markdown mismo modelo, ahora con smote ###Code # forma 1 df_final['class'] = np.where((df_final['ddG_exp'] < -0.4) | (df_final['ddG_exp'] > 1),0,1) # forma 2 #df_final['class'] = np.where((df_final['ddG_exp'] < -0.4),1,0) print("Before OverSampling, counts of label '1': {}".format(sum(df_final['class'] == 1))) print("Before OverSampling, counts of label '0': {}".format(sum(df_final['class'] == 0))) df_final.loc[df_final['class'] == 0]['ddG_exp'].hist() X = df_final.drop('class',axis=1) y = df_final['class'] X_train, X_test, y_train, y_test = train_test_split(X, y,stratify = y,train_size=0.8,random_state=1212) #smote = ovs.SMOTE(random_state=1212,m_neighbors=10) smote = ovs.BorderlineSMOTE(random_state=1212,m_neighbors=12,kind='borderline-1') #adasyn = ovs.ADASYN(random_state=1212,n_neighbors=10) #X_train_re , y_train_re = adasyn.fit_sample(X_train,y_train) X_train_re , y_train_re = smote.fit_sample(X_train,y_train) # back to originalk shape and target X_train_normal = X_train_re[:,:-1] y_train_normal = X_train_re[:,-5] X_test_normal = X_test.iloc[:,:-1] y_test_normal = X_test.iloc[:,-5] X_train_re[:,-5] model = grid1.best_estimator_.fit(X_train_normal,y_train_normal) #print('CV test RMSE',np.sqrt(-grid.best_score_)) #print('CV train RMSE',np.sqrt(-grid.cv_results_['mean_train_score'].max())) y_test_pred = model.predict(X_test_normal.values) y_train_pred = model.predict(X_train_normal) print('Training score (r2): {}'.format(r2_score(y_train_normal, y_train_pred))) print('Test score (r2): {}'.format(r2_score(y_test_normal.values, y_test_pred))) print("\nRoot mean square error for test dataset: {}".format(np.round(np.sqrt(mean_squared_error(y_test_normal.values, y_test_pred)), 2))) print("Root mean square error for train dataset: {}".format(np.round(np.sqrt(mean_squared_error(y_train_normal, y_train_pred)), 2))) print("pearson corr: ",np.corrcoef(y_test_pred,y_test_normal.values)[0][1]) perror = PredictionError(model) perror.fit(X_train_normal, y_train_normal) # Fit the training data to the visualizer perror.score(X_test_normal.values, y_test_normal.values) # Evaluate the model on the test data g = perror.poof() visualizer = ResidualsPlot(model) visualizer.fit(X_train_normal, y_train_normal) # Fit the training data to the model visualizer.score(X_test_normal.values, y_test_normal.values) # Evaluate the model on the test data visualizer.poof() # Draw/show/poof the data ###Output _____no_output_____ ###Markdown Salvar modelo final, entrenado con el total de lso datos ###Code ABPRED_DIR = Path().cwd().parent DATA = ABPRED_DIR / "data" #dataframe final df_final = pd.read_csv(DATA/"../data/DF_features_400_2019.csv",index_col=0) # Quitar modelos por homologia deltraining set #df_final_onlyHM = df_final.loc[df_final.index.str.startswith("HM")] #df_final= df_final.loc[~df_final.index.str.startswith("HM")] index_ddg8 = (df_final['ddG(kcal/mol)']==8) df_final = df_final.loc[-index_ddg8] #testiar eliminando estructuras con ddg menor o igual a -4 kcal/mol , outliers index_ddg_4 = (df_final['ddG(kcal/mol)'] <= -4) df_final = df_final.loc[-index_ddg_4] pdb_names = df_final.index features_names = df_final.drop('ddG(kcal/mol)',axis=1).columns # forma 1 #df_final['class'] = np.where((df_final['ddG(kcal/mol)'] < -0.4) | (df_final['ddG(kcal/mol)'] > 2.4),0,1) # forma 2 df_final['class'] = np.where((df_final['ddG(kcal/mol)'] < 0),1,0) print("Before OverSampling, counts of label '1': {}".format(sum(df_final['class'] == 1))) print("Before OverSampling, counts of label '0': {}".format(sum(df_final['class'] == 0))) X = df_final.drop('class',axis=1) y = df_final['class'] smote = ovs.SMOTE(random_state=12,m_neighbors=25) X_re , y__re = smote.fit_sample(X,y) # back to originalk shape and target X_normal = X_re[:,:-1] y_normal = X_re[:,-1] final_rf = grid1.best_estimator_.fit(X_normal,y_normal) # save final model joblib.dump(final_rf, 'RFmodel_400.smote.v1.pkl') rmse_test = np.round(np.sqrt(mean_squared_error(y_test, y_pred_test)), 3) df_pred = pd.DataFrame({"Predicted ddG(kcal/mol)": y_pred_test, "Actual ddG(kcal/mol)": y_test.values}) pearsonr_test = round(df_pred.corr().iloc[0,1],3) g = sns.regplot(x="Actual ddG(kcal/mol)", y="Predicted ddG(kcal/mol)",data=df_pred) plt.title("Predicted vs Experimental ddG (Independent set: 123 complexes)") plt.text(-2,3,"pearsonr = %s" %pearsonr_test) plt.text(4.5,-0.5,"RMSE = %s" %rmse_test) #plt.savefig("RFmodel_300_testfit.png",dpi=600) df_train_pred = pd.DataFrame({"Predicted ddG(kcal/mol)": y_train, "Actual ddG(kcal/mol)": y_pred_train}) pearsonr_train = round(df_train_pred.corr().iloc[0,1],3) rmse_train = np.round(np.sqrt(mean_squared_error(y_train, y_pred_train)), 3) g = sns.regplot(x="Actual ddG(kcal/mol)", y="Predicted ddG(kcal/mol)",data=df_train_pred) plt.text(-0.4,6.5,"pearsonr = %s" %pearsonr_train) plt.text(3.5,-2.5,"RMSE = %s" %rmse_train) plt.title("Predicted vs Experimental ddG (Train set: 492 complexes)") #plt.savefig("RFmodel_300_trainfit.png",dpi=600) importances = list(model.feature_importances_) feature_list = df_final.columns # List of tuples with variable and importance feature_importances = [(feature, round(importance, 4)) for feature, importance in zip(feature_list, importances)] # Sort the feature importances by most important first feature_importances = sorted(feature_importances, key = lambda x: x[1], reverse = True) # Print out the feature and importances [print('Variable: {:20} Importance: {}'.format(*pair)) for pair in feature_importances] idx_features = model.feature_importances_.argsort()[::-1] plt.figure(figsize=(15,4)) plt.bar(np.arange(X_train.shape[1]), model.feature_importances_[idx_features]) plt.xticks(range(len(feature_list)),feature_list[idx_features]) plt.autoscale(enable=True, axis='x', tight=True) plt.xlabel(u"Feature importance") #plt.savefig("RFmodel_300_50features",dpi=600,bbox_inches="tight") residual = y_test.values - y_pred_test plt.scatter(x=y_pred_test.T, y=residual.T) from sklearn.model_selection import learning_curve def learning_curves(estimator, features, target, cv): train_sizes, train_scores, validation_scores = learning_curve( estimator, features, target,train_sizes = np.linspace(.1, 1.0, 10), cv = cv, scoring = 'neg_mean_squared_error',n_jobs=-1) train_scores_mean = -train_scores.mean(axis = 1) validation_scores_mean = -validation_scores.mean(axis = 1) plt.plot(train_sizes, np.sqrt(train_scores_mean), label = 'Training error') plt.plot(train_sizes, np.sqrt(validation_scores_mean), label = 'Validation error') plt.ylabel('RMSE', fontsize = 14) plt.xlabel('Training set size', fontsize = 14) title = 'Learning curve' plt.title(title, fontsize = 18, y = 1.03) plt.legend() plt.ylim(0,3) learning_curves(model,X,y,5) #plt.savefig("RFmodel_300_learnincurve.png",dpi=600,bbox_inches="tight") ###Output _____no_output_____ ###Markdown Manuall CV with smote ###Code # forma 1 df_final['class'] = np.where((df_final['ddG_exp'] < -0.4) | (df_final['ddG_exp'] > 2.4),0,1) # forma 2 #df_final['class'] = np.where((df_final['ddG_exp'] < 0),1,0) print("Before OverSampling, counts of label '1': {}".format(sum(df_final['class'] == 1))) print("Before OverSampling, counts of label '0': {}".format(sum(df_final['class'] == 0))) X = df_final.drop('class',axis=1) y = df_final['class'] X_train, X_test, y_train, y_test = train_test_split(X, y,stratify = y,train_size=0.70,random_state=1212) RandomForestRegressor? {'randomforestregressor__bootstrap': True, 'randomforestregressor__max_depth': 2, 'randomforestregressor__max_features': 37, 'randomforestregressor__max_leaf_nodes': 12, 'randomforestregressor__min_samples_leaf': 10, 'randomforestregressor__min_samples_split': 2, 'randomforestregressor__n_estimators': 50, 'randomforestregressor__random_state': 1212} cv_splits = 10 cv_test_mse = np.zeros(cv_splits) cv_test_r2 = np.zeros(cv_splits) cv_train_mse = np.zeros(cv_splits) cv_train_r2 = np.zeros(cv_splits) kf = KFold(n_splits=cv_splits,random_state=1212) #kf = StratifiedKFold(n_splits=cv_splits,random_state=12) for i,(train_index, validation_index) in enumerate(kf.split(X_train,y_train)): X_train_cv, X_validation_cv = X_train.values[train_index], X_train.values[validation_index] y_train_cv, y_validation_cv = y_train.values[train_index], y_train.values[validation_index] # Oversampling smote = ovs.SMOTE(random_state=1212,m_neighbors=25) #smote = ovs.BorderlineSMOTE(random_state=12,m_neighbors=40,kind='borderline-1') #adasyn = ovs.ADASYN(random_state=12,n_neighbors=40) #X_train_ovs , y_train_ovs = adasyn.fit_sample(X_train_cv,y_train_cv) X_train_ovs , y_train_ovs = smote.fit_sample(X_train_cv,y_train_cv) # Back to normal dataset X_train_normal = X_train_ovs[:,:-1] y_train_normal = X_train_ovs[:,-1] X_validation_normal = X_validation_cv[:,:-1] y_validation_normal = X_validation_cv[:,-1] # Model model = RandomForestRegressor(n_estimators=50,max_depth=2,max_features=30,max_leaf_nodes=10, min_samples_leaf=2,min_samples_split=10,random_state=1212) model.fit(X_train_normal,y_train_normal) # Test y_test_pred = model.predict(X_validation_normal) y_train_pred = model.predict(X_train_normal) # Scoring test_mse = mean_squared_error(y_validation_normal,y_test_pred) test_r2 = r2_score(y_validation_normal,y_test_pred) cv_test_mse[i] = test_mse cv_test_r2[i] = test_r2 train_mse = mean_squared_error(y_train_normal,y_train_pred) train_r2 = r2_score(y_train_normal,y_train_pred) cv_train_mse[i] = train_mse cv_train_r2[i] = train_r2 print("Test scores") print("RMSE",np.sqrt(cv_test_mse.mean())) print("r2",cv_test_r2.mean()) print("") print("Train scores") print("RMSE",np.sqrt(cv_train_mse.mean())) print("r2",cv_train_r2.mean()) ###Output Test scores RMSE 1.1960745680680778 r2 0.30202439829554173 Train scores RMSE 1.1520470964345404 r2 0.5917545697264857
data-science/K-means_Clustering.ipynb
###Markdown K-means clustering Authors Ndèye Gagnessiry Ndiaye and Christin Seifert LicenseThis work is licensed under the Creative Commons Attribution 3.0 Unported License https://creativecommons.org/licenses/by/3.0/ This notebook:- introduces k-means clustering using features from the Iris flower dataset ###Code import pandas as pd import numpy as np import pylab as plt import matplotlib.pyplot as plt from sklearn.cluster import KMeans import sklearn.metrics as sm ###Output _____no_output_____ ###Markdown We load the Iris flower data set. From the four measured features (e.g 'SepalLength','SepalWidth','PetalLength','PetalWidth'), two features were selected to perform k-means clustering : 'SepalLength' and 'PetalLength'. ###Code from sklearn import datasets iris = datasets.load_iris() #iris.data #iris.feature_names iris.target #iris.target_names x = pd.DataFrame(iris.data) x.columns = ['SepalLength','SepalWidth','PetalLength','PetalWidth'] y = pd.DataFrame(iris.target) y.columns = ['Targets'] iris = x[['SepalLength', 'PetalLength']] X= np.array ([[ 6,5], [ 6.2, 5.2], [ 5.8,4.8]]) model_1 = KMeans(n_clusters=3, random_state=42,max_iter=1,n_init=1, init = X ).fit(iris) centroids_1 = model_1.cluster_centers_ labels_1=(model_1.labels_) print(centroids_1) print(labels_1) model_10= KMeans(n_clusters=3, random_state=42,max_iter=10, n_init=1, init = X).fit(iris) centroids_10 = model_10.cluster_centers_ labels_10=(model_10.labels_) print(centroids_10) print(labels_10) model_11= KMeans(n_clusters=3, random_state=42,max_iter=11,n_init=1, init = X).fit(iris) centroids_max = model_11.cluster_centers_ labels_max=(model_11.labels_) print(centroids_max) print(labels_max) '''model_999= KMeans(n_clusters=3, random_state=42,max_iter=999).fit(iris) centroids_max = model.cluster_centers_ labels_max=(model.labels_) print(centroids_max) print(labels_max)''' ###Output _____no_output_____ ###Markdown The following plots show for each iteration (ie. iter=1; iter=10 ;iter= max) the cluster centroids(blue) and the target data points. Each cluster is distinguished by a different color. ###Code # Set the size of the plot plt.figure(figsize=(24,10)) # Create a colormap colormap = np.array(['red', 'lime', 'black']) #colormap = {0: 'r', 1: 'g', 2: 'b'} # Plot Original plt.subplot(1, 4, 1) plt.scatter(x.SepalLength, x.PetalLength, c="K", s=40) plt.scatter(X[:,0],X[:,1], c="b") plt.title('Initial centroids') # Plot the Models Classifications plt.subplot(1, 4, 2) plt.scatter(iris.SepalLength, iris.PetalLength, c=colormap[labels_1], s=40) plt.scatter(centroids_1[:,0],centroids_1[:,1], c="b") plt.title('K Mean Clustering(iter=1)') plt.subplot(1, 4, 3) plt.scatter(iris.SepalLength, iris.PetalLength, c=colormap[labels_10], s=40) plt.scatter(centroids_10[:,0],centroids_10[:,1], c="b") plt.title('K Mean Clustering (iter=10)') plt.subplot(1, 4, 4) plt.scatter(iris.SepalLength, iris.PetalLength, c=colormap[labels_max], s=40) plt.scatter(centroids_max[:,0],centroids_max[:,1], c="b") plt.title('K Mean Clustering (iter= MAX)') plt.show() ###Output _____no_output_____ ###Markdown We compute the confusion matrices for each iteration and calculate the purity metric. ###Code def confusion(y,labels): cm = sm.confusion_matrix(y, labels) return cm # Confusion Matrix (iter=1) set_list = ["setosa","versicolor","virginica"] cluster_list = ["c1", "c2", "c3"] data = confusion(y, labels_1) pd.DataFrame(data,cluster_list, set_list) # Confusion Matrix (iter=10) set_list = ["setosa","versicolor","virginica"] cluster_list = ["c1", "c2", "c3"] data = confusion(y, labels_10) pd.DataFrame(data,cluster_list, set_list) # Confusion Matrix (iter=max) set_list = ["setosa","versicolor","virginica"] cluster_list = ["c1", "c2", "c3"] data = confusion(y, labels_max) pd.DataFrame(data,cluster_list, set_list) # Calculate purity of each confusion matrix def Purity(cm): M=[] S=0 for i in cm: k = max(i) M.append(k) for i in M: S+=i Purity=S/150 return Purity metric_list = ["iter= 1", "iter= 10", "iter= MAX"] set_list = ["Purity metric"] data = np.array([Purity(confusion(y, labels_1)),Purity(confusion(y, labels_10)),Purity(confusion(y, labels_max))]) pd.DataFrame(data,metric_list, set_list) ###Output _____no_output_____ ###Markdown We select all the four measured features (e.g 'SepalLength','SepalWidth','PetalLength','PetalWidth') for different values of k (e.g k=2, k=3, k=4, k=6) and without random state. We compute the confusion matrix for each k and calculate the purity. ###Code #k=2 , random-state= 0 model = KMeans(n_clusters=2,).fit(x) centroids = model.cluster_centers_ labels=(model.labels_) print(centroids) print(labels) #Confusion matrix set_list = ["setosa","versicolor","virginica"] cluster_list = ["c1", "c2", "c3"] data = confusion(y, labels) pd.DataFrame(data,set_list, cluster_list) print ("Purity(k=2)= %f " % Purity(confusion(y, labels))) #k=3 , random-state= 0 model = KMeans(n_clusters=3,).fit(x) centroids = model.cluster_centers_ labels=(model.labels_) print(centroids) print(labels) #Confusion matrix set_list = ["setosa","versicolor","virginica"] cluster_list = ["c1", "c2", "c3"] data = confusion(y, labels) pd.DataFrame(data,set_list, cluster_list) print ("Purity(k=3)= %f " % Purity(confusion(y, labels))) #k=4 , random-state= 0 model = KMeans(n_clusters=4,).fit(x) centroids = model.cluster_centers_ labels=(model.labels_) print(centroids) print(labels) # Confusion Matrix set_list = ["setosa","versicolor","virginica","undefined"] cluster_list = ["c1", "c2", "c3","c4"] data = confusion(y, labels) pd.DataFrame(data,set_list, cluster_list) print ("Purity(k=4)= %f " % Purity(confusion(y, labels))) #k=6 , random-state= 0 model = KMeans(n_clusters=6,).fit(x) centroids = model.cluster_centers_ labels=(model.labels_) print(centroids) print(labels) # Confusion Matrix set_list = ["setosa","versicolor","virginica","undefined_1","undefined_2","undefined_3"] cluster_list = ["c1", "c2", "c3","c4","c5","c6"] data = confusion(y, labels) pd.DataFrame(data,set_list, cluster_list) print ("Purity(k=6)= %f " % Purity(confusion(y, labels))) ###Output Purity(k=6)= 0.526667
novel_compound_predictor/NNM_ContextualCentric_Temporal.ipynb
###Markdown Notebook dealing with the final preprocessing steps for the neural networks. ###Code import torch import torch.nn as nn import torchvision.transforms as transforms import torch.utils.data import pandas as pd from torch.utils import data from numpy import array from numpy import argmax import argparse from torch.autograd import Variable from torch import optim import numpy as np import os from sklearn.model_selection import train_test_split import logging import pickle as pkl import warnings pd.options.mode.chained_assignment = None os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"]="4" import glob import random random.seed(1991) #torch.set_default_tensor_type('torch.cuda.DoubleTensor') torch.manual_seed(1991) if not torch.cuda.is_available(): print("WARNING: You have a CUDA device, so you should probably run with --cuda") device = torch.device("cuda") heads=pd.read_pickle("/data/dharp/compounding/datasets/heads_CompoundCentric_DecadeCentric_300.pkl") heads.reset_index(inplace=True) heads=heads.drop(['decade'],axis=1).groupby(['head']).mean() heads.info() heads.head() modifiers=pd.read_pickle("/data/dharp/compounding/datasets/modifiers_CompoundCentric_DecadeCentric_300.pkl") modifiers.reset_index(inplace=True) modifiers=modifiers.drop(['decade'],axis=1).groupby(['modifier']).mean() modifiers.info() modifiers.head() novel_compounds_list = pkl.load( open( "/data/dharp/compounding/datasets/novel_compounds_list.pkl", "rb" ) ) m, h = zip(*novel_compounds_list) heads_list=list(set(h)) modifiers_list=list(set(m)) novel_compounds=pd.DataFrame(novel_compounds_list) novel_compounds.columns=['modifier','head'] positive_heads=pd.merge(novel_compounds.drop('modifier',axis=1),heads.reset_index(),on=["head"]) positive_modifiers=pd.merge(novel_compounds.drop('head',axis=1),modifiers.reset_index(),on=["modifier"]) #positive_df['Plausibility']=True positive_heads.head() positive_heads_tensor = torch.tensor(positive_heads.drop('head',axis=1).values) positive_heads_tensor.shape positive_modifiers_tensor = torch.tensor(positive_modifiers.drop('modifier',axis=1).values) positive_modifiers_tensor.shape positive_Y=torch.ones(positive_modifiers_tensor.shape[0]) positive_Y.shape positive_class=torch.cat((positive_modifiers_tensor, positive_heads_tensor), 1) positive_class.shape def neg_df_creator(file): pkl_file=pkl.load( open(file,'rb')) df=pd.DataFrame(pkl_file) df.columns=['modifier','head'] negative_heads=pd.merge(df.drop('modifier',axis=1),heads.reset_index(),on=["head"]) negative_modifiers=pd.merge(df.drop('head',axis=1),modifiers.reset_index(),on=["modifier"]) negative_heads_tensor = torch.tensor(negative_heads.drop('head',axis=1).values) negative_modifiers_tensor = torch.tensor(negative_modifiers.drop('modifier',axis=1).values) negative_Y=torch.zeros(negative_modifiers_tensor.shape[0]) negative_class=torch.cat((negative_modifiers_tensor, negative_heads_tensor), 1) return negative_class,negative_Y def tensor_joiner(files): tensor_list=[] for file in files: negative_class,negative_Y=neg_df_creator(file) X=torch.cat((positive_class, negative_class), 0) Y=torch.cat((positive_Y,negative_Y),0) tensor_list.append([X,Y]) return tensor_list corrupt_modifier_files=[] for file in glob.glob("/data/dharp/compounding/datasets/corrupt_modifier*"): corrupt_modifier_files.append(file) corrupt_modifiers=tensor_joiner(corrupt_modifier_files) corrupt_head_files=[] for file in glob.glob("/data/dharp/compounding/datasets/corrupt_head*"): corrupt_head_files.append(file) corrupt_heads=tensor_joiner(corrupt_head_files) input_size = 600 hidden_size = 300 num_classes = 2 num_epochs = 50 batch_size = 100 learning_rate = 0.001 class NeuralNet(nn.Module): def __init__(self, input_size, hidden_size, num_classes): super(NeuralNet, self).__init__() self.fc1 = nn.Linear(input_size, hidden_size) self.relu = nn.ReLU() self.fc2 = nn.Linear(hidden_size, num_classes,bias=False) def forward(self, x): out = self.fc1(x) out = self.relu(out) out = self.fc2(out) return out model = NeuralNet(input_size, hidden_size, num_classes).to(device) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) def looper(datasets): total_accuracy=[] for dataset in datasets: X=dataset[0] Y=dataset[1] n = len(X) # how many total elements you have n_test = int( n * .2 ) # number of test/val elements n_train = n - n_test idx = list(range(n)) # indices to all elements random.shuffle(idx) # in-place shuffle the indices to facilitate random splitting train_idx = idx[:n_train] test_idx = idx[n_train:] trX=X[train_idx].float().to(device) teX=X[test_idx].float().to(device) trY=Y[train_idx].long().to(device) teY=Y[test_idx].long().to(device) model = NeuralNet(input_size, hidden_size, num_classes).to(device) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) n_examples=trX.shape[0] for i in range(num_epochs): cost = 0. num_batches = n_examples // batch_size for k in range(num_batches): start, end = k * batch_size, (k + 1) * batch_size outputs = model(trX[start:end]) loss = criterion(outputs, trY[start:end]) optimizer.zero_grad() loss.backward() optimizer.step() #if (k+1) % 100 == 0: #print ('Epoch [{}/{}], Loss: {:.4f}'.format(i+1, num_epochs, loss.item())) with torch.no_grad(): correct=0 total=0 outputs = model(teX) _, predicted = torch.max(outputs.data, 1) total += teY.size(0) correct += (predicted == teY).sum().item() curr_acc=100 * correct / total print(curr_acc) total_accuracy.append(curr_acc) return total_accuracy cor_head_acc=looper(corrupt_heads) ###Output 83.2059056156077 82.82801652166272 82.704982863169 82.14254328148344 83.13560066789701 82.47649178310924 82.27436505844099 82.50285613850075 82.90710958783724 82.67861850777749 ###Markdown 82.9158977063010883.1268125494331782.5995254416029582.7664996924158583.1443887863608483.0565076017224782.9071095878372483.2234818525353782.6786185077774982.74892345548818 ###Code round(np.mean(cor_head_acc),2) round(np.std(cor_head_acc),2) cor_mod_acc=looper(corrupt_modifiers) ###Output 82.50285613850075 82.6610422708498 82.50285613850075 81.52737498901485 83.08287195711398 82.27436505844099 83.14438878636084 82.4677036646454 83.06529572018631 82.26557693997715 ###Markdown 82.8807452324457383.4080323402759582.2919412953686681.8788997275683282.7225591000966782.2128482291941282.4589155461815682.564372967747682.661042270849882.3007294138325 ###Code round(np.mean(cor_mod_acc),2) round(np.std(cor_mod_acc),2) ###Output _____no_output_____
docs/contents/tools/classes/pdbfixer_PDBFixer/to_molsysmt_MolSys.ipynb
###Markdown To molsysmt.MolSys ###Code from molsysmt.tools import pdbfixer_PDBFixer #pdbfixer_PDBFixer.to_molsysmt_MolSys(item) ###Output _____no_output_____
lesson_walkthrough.ipynb
###Markdown Lesson Walkthrough This notebook walks through the material for each of the lesson topics, in order, to ensure they run. This notebook is *not* the "source" for the lesson material, e.g. through `pandoc` conversion. It is rather an environment to quickly sketch out and test ideas for lesson development. Introduction (to the lesson overall) This lesson introduces MongoDB, a document-oriented database that departs from the relational nature of SQL. Because SQL is so well-established and prevalent as a database technology, the terms "relational database" and "SQL" are often synonymous. Furthermore, the term "NoSQL" has come to encompass a whole class of database technologies that are non-relational in nature. There are other "NoSQL" technologies such as key-value stores and column stores that differ from MongoDB's document/collection approach, but we will only cover MongoDB today.MongoDB trades off the strictness of SQL for a degree of simplicity and flexibility often desirable for scientists who MongoDB trades off the strictness of SQL for a degree of simplicity andflexibility often desirable for scientists who are often not certain about thebest schema design for new/changing data sets. Whereas SQL lends itself well toenforcing a schema and thus ensuring data validation at the database level, asystem like MongoDB does not require setting/migrating schemas to get startedwith data management. However, this means that application-level datavalidation (e.g. before adding to the database) is particularlyimportant. MongoDB can pick up on implicit schema in your data through thecreation of indexes, which will speed up queries significantly for large datasets.This lesson was originally developed at the [Lawrence Berkeley National Laboratory](http://lbl.gov), where many scientists use MongoDB servers hosted by [NERSC](http://nersc.gov) to [manage workflows](https://pythonhosted.org/FireWorks/) and [process data](https://pythonhosted.org/pymatgen-db/) on supercomputing clusters. The example data for this lesson is from the [Materials Project](https://materialsproject.org), which hosts computed information for tens of thousands of known and predicted inorganic crystalline compounds. Prerequisites Getting Ready Introduction/Setup I'd like at least two challenges per topic. A challenge should be a multiple-choice question (MCQ) or a Parson problem. Likely will be two MCQs. Come up with these first, then base the topic material on it. Connect to / Import into a Mock Database To accommodate folks who cannot get a real database server running locally on their system, and also to demonstrate the utility of "mocking" for testing real access patterns without an external dependency, we will use the `mongomock` Python library to walk through MongoDB ideas for the first few topics. If you have a Mongo server running, feel free to use `pymongo.MongoClient` instead. First, let's connect to the (mock) server an get a handle for our client. ###Code import json from mongomock import MongoClient from pymongo import MongoClient def reset_materials(): client = MongoClient() db = client.swc db.materials.drop() with open('data/mongo-novice-materials.json') as f: db.materials.insert_many(json.load(f)) from mongomock import MongoClient from pymongo import MongoClient client = MongoClient() ###Output _____no_output_____ ###Markdown A MongoDB instance can host multiple databases, which are created dynamically. Here, we will supply a database name as an attribute of the client object, which will prompt MongoDB to create the database with that name if it doesn't exists. ###Code # Refer to the Software Carpentry ("swc") database db = client.swc print(db) ###Output Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'swc') ###Markdown In Python, we can `print` something to look at it's string representation, which is often designed to be human-readable and can give us an idea of what's going on. We see here that what we're calling `db` is a Database with the name "swc" that we're accessing through a Mongo client connected to localhost on port 27017. ###Code print(db.materials) ###Output Collection(Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'swc'), 'materials') ###Markdown A MongoDB database is organized as a set of collections, each of which contains a set of documents. To first order, you can for now think of a collection as corresponding to a table in SQL and a collection document as corresponding to a table row in SQL.Just as with databases themselves, database collections are created dynamically in MongoDB. Above, we created a `materials` collection in our database simply by referring to it by name.Now, let's load data from a file and import it as documents into our collection: ###Code import json ###Output _____no_output_____ ###Markdown We first import the `json` module from the Python standard library. `JSON`, which stands for "Javascript Object Notation", is a way to express simple data structures that is widely used in web-based applications. We'll go over the format in the next topic when we construct a document to insert into our collection, but for now let's focus on importing data that we're given. ###Code db.materials.drop() with open('data/mongo-novice-materials.json') as f: db.materials.insert_many(json.load(f)) ###Output _____no_output_____ ###Markdown We are using a Python context manager to open a file and ensure that it is closed when we are done processing the file contents. In this case, we use the `json` module to load the file contents as Python-native data structures, which we then hand off to the `insert_many` method of database collections to insert all of the loaded documents. ###Code db.materials.count() ###Output _____no_output_____ ###Markdown To confirm we have the data loaded, we use the `count()` method of a collection object and see that we have more than zero documents in our `materials` collection. ###Code with open('data/mongo-novice-materials.json') as f: dataset = json.load(f) print(type(dataset)) print(type(dataset[0])) db.publications.count() ###Output _____no_output_____ ###Markdown Insert Data ###Code !wget "https://raw.githubusercontent.com/materialsproject/pymatgen/master/pymatgen/core/periodic_table.json" -O data/periodic_table.json import json with open('data/periodic_table.json') as f: periodic_table = json.load(f) from operator import itemgetter def pluck(key, mappings): return map(itemgetter(key), mappings) db.elements.insert_many([v for v in periodic_table.values()]) def project_only(*keys): doc = {"_id": 0} doc.update({k: 1 for k in keys}) return doc periodic_table['Cu'] [d for d in db.elements.find({}, project_only('Name', 'Atomic no')).limit(3)] [d for d in db.materials.find({}, project_only('material_id', 'pretty_formula', 'nelements')).limit(3)] db.materials.find_one({"chemsys": "Na-O"}) from datetime import datetime material = {"fake": True, "elements": ["Na", "O"], "band_gap": 1.736, "last_updated": datetime.utcnow(), "spacegroup": {"crystal_system": "hexagonal", "number": 189}} result = db.materials.insert_one(material) result.inserted_id print(result.inserted_id.generation_time) from bson import json_util print(json_util.dumps(db.materials.find_one(result.inserted_id), indent=2)) result = db.materials.delete_many({"fake": True}) result.deleted_count ###Output _____no_output_____ ###Markdown Find data You can use the `find()` method to issue a query to retrieve data from a collection in MongoDB. All queries in MongoDB have the scope of a single collection.Queries can return all documents in a collection or only the documents that match a specified filter or criteria. You can specify the filter or criteria in a document and pass it as a parameter to the `find()` method.The `find()` method returns query results in a cursor, which is an iterable object that yields documents. Gotta catch 'em all To return all documents in a collection, call the `find()` method without a criteria document. ###Code cursor = db.materials.find() ###Output _____no_output_____ ###Markdown Let's iterate over the cursor and print a few material ids. ###Code how_many = 5 counter = 0 for document in cursor: if counter < how_many: print(document['material_id']) counter += 1 else: break ###Output mp-568345 mp-12671 mp-1703 mp-5152 mp-569624 ###Markdown There's an easier way to limit how many documents are yielded by a cursor: ###Code for document in cursor.limit(5): print(document['material_id']) ###Output mp-552787 mp-188 mp-600216 mp-2310 mp-780541 ###Markdown Query by a top-level field The following operation finds documents whose **nelements** field equals **3**: ###Code cursor = db.materials.find({"nelements": 3}) ###Output _____no_output_____ ###Markdown Let's print ("pretty print", for nice indentation) a few of the results: ###Code from pprint import pprint for doc in cursor.limit(3): pprint(doc) ###Output {'_id': ObjectId('56ce4c367943f62692beb002'), 'chemsys': 'Er-O-S', 'elasticity': None, 'elements': ['Er', 'O', 'S'], 'material_id': 'mp-12671', 'nelements': 3, 'pretty_formula': 'Er2SO2', 'spacegroup': {'crystal_system': 'trigonal', 'hall': '-P 3 2=', 'number': 164, 'point_group': '-3m', 'source': 'spglib', 'symbol': 'P-3m1'}} {'_id': ObjectId('56ce4c367943f62692beb004'), 'chemsys': 'La-O-Si', 'elasticity': None, 'elements': ['La', 'O', 'Si'], 'material_id': 'mp-5152', 'nelements': 3, 'pretty_formula': 'La2SiO5', 'spacegroup': {'crystal_system': 'monoclinic', 'hall': '-P 2yab', 'number': 14, 'point_group': '2/m', 'source': 'spglib', 'symbol': 'P2_1/c'}} {'_id': ObjectId('56ce4c367943f62692beb007'), 'chemsys': 'Cl-Fe-O', 'elasticity': {'G_Reuss': 4.800058831100799, 'G_VRH': 15.695815587428928, 'G_Voigt': 26.591572343757058, 'K_Reuss': 12.632559688113227, 'K_VRH': 27.637901832532986, 'K_Voigt': 42.643243976952746, 'calculations': {'energy_cutoff': 700.0, 'kpoint_density': 7000, 'pseudopotentials': ['Fe_pv', 'Cl', 'O']}, 'elastic_anisotropy': 25.074876418379876, 'elastic_tensor': [[132.83779269421643, 4.717792594504168, 41.313046328714506, 0.0, 0.0, 0.0], [4.717792594504168, 13.477772725303467, 4.593045165520949, 0.0, 0.0, 0.0], [41.313046328714506, 4.593045165520949, 136.22586219557556, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.98697116, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 51.08350865666667, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 2.581534060000001]], 'poisson_ratio': 0.26124289904174286}, 'elements': ['Cl', 'Fe', 'O'], 'material_id': 'mp-552787', 'nelements': 3, 'pretty_formula': 'FeClO', 'spacegroup': {'crystal_system': 'orthorhombic', 'hall': 'P 2 2ab -1ab', 'number': 59, 'point_group': 'mmm', 'source': 'spglib', 'symbol': 'Pmmn'}} ###Markdown Projection to select fields That last query returned all fields for each document. We can use a projection, specified as JSON, to indicate which fields we want. The `_id` field is included by default -- we must be explicit if we don't want it returned. ###Code cursor = db.materials.find({"nelements": 3}, {"material_id": 1, "pretty_formula": 1, "_id": 0}) for doc in cursor.limit(3): pprint(doc) ###Output {'material_id': 'mp-12671', 'pretty_formula': 'Er2SO2'} {'material_id': 'mp-5152', 'pretty_formula': 'La2SiO5'} {'material_id': 'mp-552787', 'pretty_formula': 'FeClO'} ###Markdown Query by a field in an embedded document To specify a condition on a field within an embedded document, use dot notation. Dot notation requires quotes around the whole dotted field name. ###Code cursor = db.materials.find({"spacegroup.crystal_system": "cubic"}) print(cursor.count()) ###Output 9408 ###Markdown Projection can take advantage of the same dot notation: ###Code cursor = db.materials.find({"nelements": 2}, {"spacegroup.crystal_system": 1, "elements": 1, "_id": 0}) for doc in cursor.limit(3): pprint(doc) ###Output {'elements': ['Yb', 'Zn'], 'spacegroup': {'crystal_system': 'cubic'}} {'elements': ['Cr', 'Hf'], 'spacegroup': {'crystal_system': 'hexagonal'}} {'elements': ['B', 'Lu'], 'spacegroup': {'crystal_system': 'cubic'}} ###Markdown Query by a field in an array How many materials in our collection contain iron? When a field is an array, testing for membership has the same form as testing for equality: ###Code db.materials.find({"elements": "Fe"}).count() ###Output _____no_output_____ ###Markdown If you supply an array as the value under test, we can see that four polymorphs of iron are present in our collection: ###Code db.materials.find({"elements": ["Fe"]}).count() ###Output _____no_output_____ ###Markdown Challenge: Dot Notation and Projections Which query below yields documents containing the crystal system and spacegroup number for all binary compounds? ###Code cursor = db.materials.find({"nelements": 2}, {"spacegroup": {"crystal_system": 1, "number": 1}}) for doc in cursor.limit(3): pprint(doc) cursor = db.materials.find({"nelements": 2}, {"spacegroup.crystal_system": 1, "spacegroup.number": 1}) for doc in cursor.limit(3): pprint(doc) ###Output {'_id': ObjectId('56ce4c367943f62692beb003'), 'spacegroup': {'crystal_system': 'cubic', 'number': 221}} {'_id': ObjectId('56ce4c367943f62692beb005'), 'spacegroup': {'crystal_system': 'hexagonal', 'number': 194}} {'_id': ObjectId('56ce4c367943f62692beb006'), 'spacegroup': {'crystal_system': 'cubic', 'number': 221}} ###Markdown Challenge: Combining Conditions Which query below returns the number of binary oxides (oxygen-containing, two-element materials) in our collection? ###Code db.materials.find({"elements": "O"}).limit(2).count() db.materials.find({"elements": "O", "nelements": 2}).count() db.materials.find({"elements": ["O"], "nelements": 2}).count() ###Output _____no_output_____ ###Markdown Specify Conditions with Operators ###Code COMMON_PROJ = { "material_id": 1, "pretty_formula": 1, "_id": 0, } def print_a_few_for(query): proj = {k: 1 for k in query} proj.update(COMMON_PROJ) for doc in db.materials.find(query, proj).limit(5): pprint(doc) query = {"nelements": {"$gte": 3}} print_a_few_for(query) print_a_few_for({"nelements": {"$lt": 3}}) db.materials.find({"chemsys": "Fe-O", "spacegroup.crystal_system": "cubic"}).count() db.materials.find({ "$or": [{"nelements": 2}, {"nelements": 4}] }).count() print_a_few_for({"elasticity": {"$exists": True}}) print_a_few_for({"elasticity": {"$ne": None}}) ###Output {'elasticity': {'G_Reuss': 64.86603926197118, 'G_VRH': 66.1852057595004, 'G_Voigt': 67.50437225702963, 'K_Reuss': 189.54199905990907, 'K_VRH': 189.5741939942109, 'K_Voigt': 189.60638892851273, 'calculations': {'energy_cutoff': 700.0, 'kpoint_density': 7000, 'pseudopotentials': ['Hf_pv', 'Cr_pv']}, 'elastic_anisotropy': 0.2037075326032598, 'elastic_tensor': [[295.8188907773902, 133.82065045871653, 140.1170929005653, 0.0, 0.0, -0.036991826666666644], [133.82065045871653, 299.3255081549423, 141.6470726644416, 0.0, 0.0, -0.05824203666666673], [140.1170929005653, 141.6470726644416, 280.1434693768354, 0.0, 0.0, -0.0630635066666666], [0.0, 0.0, 0.0, 52.12082853999999, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 52.19335160333334, 0.0], [-0.036991826666666644, -0.05824203666666673, -0.0630635066666666, 0.0, 0.0, 79.97333038000001]], 'poisson_ratio': 0.34363428744148256}, 'material_id': 'mp-569624', 'pretty_formula': 'HfCr2'} {'elasticity': {'G_Reuss': 4.800058831100799, 'G_VRH': 15.695815587428928, 'G_Voigt': 26.591572343757058, 'K_Reuss': 12.632559688113227, 'K_VRH': 27.637901832532986, 'K_Voigt': 42.643243976952746, 'calculations': {'energy_cutoff': 700.0, 'kpoint_density': 7000, 'pseudopotentials': ['Fe_pv', 'Cl', 'O']}, 'elastic_anisotropy': 25.074876418379876, 'elastic_tensor': [[132.83779269421643, 4.717792594504168, 41.313046328714506, 0.0, 0.0, 0.0], [4.717792594504168, 13.477772725303467, 4.593045165520949, 0.0, 0.0, 0.0], [41.313046328714506, 4.593045165520949, 136.22586219557556, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.98697116, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 51.08350865666667, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 2.581534060000001]], 'poisson_ratio': 0.26124289904174286}, 'material_id': 'mp-552787', 'pretty_formula': 'FeClO'} {'elasticity': {'G_Reuss': 89.22439690238315, 'G_VRH': 91.1977479844793, 'G_Voigt': 93.17109906657547, 'K_Reuss': 225.23046094201425, 'K_VRH': 225.23046094240684, 'K_Voigt': 225.23046094279943, 'calculations': {'energy_cutoff': 700.0, 'kpoint_density': 7000, 'pseudopotentials': ['Al', 'Pt']}, 'elastic_anisotropy': 0.2211672088169152, 'elastic_tensor': [[319.43298108763224, 178.12963573334946, 178.129548438403, 0.0, 0.0, 0.0], [178.12963573334946, 319.43208297658407, 178.129473057102, 0.0, 0.0, 0.0], [178.129548438403, 178.129473057102, 319.43176996326986, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 108.18424078333338, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 108.18423757000005, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 108.18429138000002]], 'poisson_ratio': 0.32162138374300725}, 'material_id': 'mp-188', 'pretty_formula': 'AlPt3'} {'elasticity': {'G_Reuss': 36.0849960555562, 'G_VRH': 36.138594567747134, 'G_Voigt': 36.192193079938065, 'K_Reuss': 102.02676355863561, 'K_VRH': 102.04618920065883, 'K_Voigt': 102.06561484268204, 'calculations': {'energy_cutoff': 700.0, 'kpoint_density': 7000, 'pseudopotentials': ['Cd', 'O']}, 'elastic_anisotropy': 0.015234201718120843, 'elastic_tensor': [[149.72856490003238, 76.18447389649583, 76.0971442247916, 0.0, 0.0, 0.0], [76.18447389649583, 155.32679754703915, 76.82214981963244, 0.0, 0.16773408205128196, 0.0], [76.0971442247916, 76.82214981963244, 155.32763525522702, 0.0, 0.11635495448717942, 0.0], [0.0, 0.0, 0.0, 34.600412473333336, 0.0, 0.0], [0.0, 0.16773408205128196, 0.11635495448717942, 0.0, 34.666930369230755, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 34.600545970000006]], 'poisson_ratio': 0.3416257412326539}, 'material_id': 'mp-2310', 'pretty_formula': 'CdO2'} {'elasticity': {'G_Reuss': 140.01490613546238, 'G_VRH': 142.6665329138408, 'G_Voigt': 145.31815969221918, 'K_Reuss': 136.562024021793, 'K_VRH': 136.97260811326998, 'K_Voigt': 137.38319220474696, 'calculations': {'energy_cutoff': 700.0, 'kpoint_density': 7000, 'pseudopotentials': ['Ta_pv', 'Be_sv']}, 'elastic_anisotropy': 0.19539490063027198, 'elastic_tensor': [[376.54891743906546, -7.386712349243056, 59.36280676664959, 0.0, 0.0, 0.0], [-7.386712349243056, 376.54900803035565, 59.36166355240516, 0.0, 0.0, 0.0], [59.36280676664959, 59.36166355240516, 260.67528843367813, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 149.2932377633333, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 149.29423271666673, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 127.19150933666668]], 'poisson_ratio': 0.11342874565238646}, 'material_id': 'mp-567842', 'pretty_formula': 'TaBe12'} ###Markdown Sorting ###Code import pymongo cursor = db.materials.find().sort([ ("borough", pymongo.ASCENDING), ("address.zipcode", pymongo.DESCENDING) ]) COMMON_PROJ = { "material_id": 1, "pretty_formula": 1, "spacegroup.number": 1, "_id": 0, } def cursor_with_minimal_projection(query): proj = {k: 1 for k in query} proj.update(COMMON_PROJ) cursor = db.materials.find(query, proj) return cursor def print_a_few_for(query): cursor = cursor_with_minimal_projection(query) for doc in cursor.limit(5): pprint(doc) filt = {"elasticity": {"$ne": None}} proj = {"elasticity.poisson_ratio": 1} proj.update(COMMON_PROJ) cursor = db.materials.find(filt, proj).sort("elasticity.poisson_ratio") for doc in cursor.limit(5): pprint(doc) cursor = db.materials.find(filt, proj).sort("elasticity.poisson_ratio", -1) for doc in cursor.limit(5): pprint(doc) cursor = db.materials.find(filt, proj).sort([ ("nelements", -1), ("elasticity.poisson_ratio", -1), ]) for doc in cursor.limit(5): pprint(doc) cursor = db.materials.find({ "nelements": {"$lt": 3} }, { "_id": 0, "nelements": 1, "elasticity.K_VRH": 1, "pretty_formula": 1, "spacegroup.crystal_system": 1, "material_id": 1, }) cursor = cursor.sort([("nelements", 1),("elasticity.K_VRH", -1)]) for doc in cursor.limit(5): pprint(doc) cursor = db.materials.find({ "nelements": {"$lt": 3} }, { "_id": 0, "nelements": 1, "elasticity.K_VRH": 1, "pretty_formula": 1, "spacegroup.crystal_system": 1, "material_id": 1, }, sort=[ ("nelements", 1), ("elasticity.K_VRH", -1) ]) for doc in cursor.limit(5): pprint(doc) systems = filter(None, db.materials.distinct("spacegroup.crystal_system")) for s in systems: materials = list(db.materials.find( {"spacegroup.crystal_system": s}, {"spacegroup.number": 1} ).sort("spacegroup.number")) print(s, materials[0]["spacegroup"]["number"], materials[-1]["spacegroup"]["number"]) ###Output cubic 195 230 trigonal 143 167 monoclinic 3 15 hexagonal 169 194 orthorhombic 16 74 tetragonal 75 142 triclinic 1 2 ###Markdown Update ###Code import json db.elements.drop() with open('data/periodic_table.json') as f: for symbol, rest in json.load(f).items(): doc = {"Symbol": symbol} doc.update(rest) db.elements.insert_one(doc) halogens = ["F", "Cl", "Br", "I", "At"] def halide_systems(): for halogen in halogens: X = db.elements.find_one({"Symbol": halogen})["X"] for doc in db.elements.find({"X": {"$lt": X}}): yield "-".join(sorted([doc["Symbol"], halogen])) [s for s in list(halide_systems()) if 'Cu' in s] reset_materials() db.materials.find({"chemsys": "Cu-F"}).count() result = db.materials.update_one( {"chemsys": "Cu-F"}, { "$set": { "tags": ["halide"] }, "$currentDate": {"lastModified": True} } ) result.matched_count result.modified_count db.materials.find_one({"chemsys": "Cu-F"}, {"tags": 1, "lastModified": 1, "material_id": 1}) result = db.materials.update_one( {"material_id": "mp-1229"}, {"$set": {"elasticity.calculations.source": "Private communication"}} ) print(result.matched_count, result.modified_count) result = db.materials.update_many( {"chemsys": {"$in": list(halide_systems())}}, {"$addToSet": {"tags": "halide"}} ) result.matched_count, result.modified_count db.materials.find({"tags": "halide"}).count() result = db.materials.update_one( {"material_id": "mp-NaN"}, {"$set": {"elasticity.calculations.source": "Private communication"}} ) result.matched_count, result.modified_count result = db.materials.update_one( {"material_id": "mp-NaN"}, {"$set": {"elasticity.calculations.source": "Private communication"}}, upsert=True ) result.matched_count, result.modified_count, result.upserted_id db.materials.find_one({"material_id": "mp-NaN"}) ###Output _____no_output_____ ###Markdown Removing ###Code result = db.materials.delete_many({"material_id": "mp-NaN"}) result = db.materials.delete_many({"nelements": 1}) result.deleted_count result = db.materials.delete_many({}) result.deleted_count db.materials.count() db.materials.drop() ###Output _____no_output_____ ###Markdown Re(Importing) ###Code import json from mongomock import MongoClient #from pymongo import MongoClient def reset_materials(): client = MongoClient() db = client.swc db.materials.drop() with open('data/mongo-novice-materials.json') as f: db.materials.insert_many(json.load(f)) %timeit reset_materials() with open('data/materials.mongoimportable.json', 'w') as outfile: with open('data/mongo-novice-materials.json') as infile: docs = json.load(infile) outfile.writelines([json.dumps(doc)+'\n' for doc in docs]) db.materials.drop() !mongoimport --db swc --collection materials < data/materials.mongoimportable.json ###Output connected to: 127.0.0.1 2016-02-25T10:46:31.004-0800 53700 17900/second 2016-02-25T10:46:31.600-0800 check 9 66140 2016-02-25T10:46:31.607-0800 imported 66140 objects ###Markdown Aggregation ###Code cursor = db.materials.aggregate( [ {"$group": {"_id": "$nelements", "count": {"$sum": 1}}} ] ) for doc in cursor: print(doc) cursor = db.materials.aggregate( [ {"$match": {"elements": "O"}}, {"$group": {"_id": "$nelements", "count": {"$sum": 1}}} ] ) for doc in cursor: print(doc) cursor = db.materials.aggregate( [ {"$match": {"elements": "O"}}, {"$group": {"_id": "$nelements", "count": {"$sum": 1}}}, {"$sort": {"count": -1}} ] ) for doc in cursor: print(doc) db.materials.find_one() cursor = db.materials.aggregate( [ {"$group": { "_id": "$spacegroup.number", "count": {"$sum": 1}, "symbol": {"$first": "$spacegroup.symbol"}, }}, {"$sort": {"count": -1}}, {"$limit": 10} ] ) for doc in cursor: print(doc) def get_chemistry(doc): anion = db.elements.find( {"Symbol": {"$in": doc["elements"]}}, {"Symbol": 1, "X": 1, "_id": 0} ).sort([('X', -1)])[0]["Symbol"] if anion == "O": return "Oxide" elif anion == "S": return "Sulfide" elif anion == "F": return "Fluoride" elif anion == "Cl": return "Chloride" return None for doc in db.materials.find({"elements": {"$in": ["O", "S", "F", "Cl"]}}, {"elements": 1}): anion = get_chemistry(doc) db.materials.update_one({"_id": doc["_id"]}, {"$set": {"anion_chemistry": anion}}) cursor = db.materials.aggregate( [ {"$match": {"anion_chemistry": "Oxide"}}, {"$group": { "_id": "$spacegroup.number", "count": {"$sum": 1}, "symbol": {"$first": "$spacegroup.symbol"}, }}, {"$sort": {"count": -1}}, {"$limit": 10} ] ) for doc in cursor: print(doc) ###Output {'_id': 14, 'count': 4013, 'symbol': 'P2_1/c'} {'_id': 1, 'count': 3424, 'symbol': 'P1'} {'_id': 2, 'count': 3039, 'symbol': 'P-1'} {'_id': 15, 'count': 1856, 'symbol': 'C2/c'} {'_id': 62, 'count': 1702, 'symbol': 'Pmnb'} {'_id': 12, 'count': 1415, 'symbol': 'C2/m'} {'_id': 8, 'count': 975, 'symbol': 'Cm'} {'_id': 11, 'count': 877, 'symbol': 'P2_1/m'} {'_id': 4, 'count': 759, 'symbol': 'P2_1'} {'_id': 166, 'count': 606, 'symbol': 'R-3m'} ###Markdown Indexes ###Code %%timeit db.materials.distinct("spacegroup.crystal_system") db.materials.create_index([("spacegroup.crystal_system", 1)]) pprint(db.materials.index_information()) %%timeit db.materials.distinct("spacegroup.crystal_system") %timeit db.materials.find_one({"material_id": "mp-49"}, {"pretty_formula": 1, "_id": 0}) db.materials.create_index([("material_id", 1), ("pretty_formula", 1)]) %timeit db.materials.find_one({"material_id": "mp-49"}, {"pretty_formula": 1, "_id": 0}) db.materials.drop_index([("material_id", 1), ("pretty_formula", 1)]) db.materials.create_index([("pretty_formula", 1), ("material_id", 1)]) %timeit db.materials.find_one({"material_id": "mp-49"}, {"pretty_formula": 1, "_id": 0}) ###Output 100 loops, best of 3: 12.1 ms per loop
Coursera-lectures/BD_UCSD/big-data-4/notebooks/clustering.ipynb
###Markdown Dry Days ###Code utils.parallel_plot(P[P['relative_humidity'] < -0.5], P) ###Output _____no_output_____ ###Markdown Warm Days ###Code utils.parallel_plot(P[P['air_temp'] > 0.5], P) ###Output _____no_output_____ ###Markdown Cool Days ###Code utils.parallel_plot(P[(P['relative_humidity'] > 0.5) & (P['air_temp'] < 0.5)], P) ###Output _____no_output_____ ###Markdown Other Days ###Code utils.parallel_plot(P.iloc[[2]], P) ###Output _____no_output_____
grafici/Prova_Mappa.ipynb
###Markdown https://medium.com/@ardito.bryan/creating-a-covid-19-map-of-italy-using-python-1f3182c1fd88 ###Code italy.plot(figsize=(10, 10)) italy2.plot(figsize=(10, 10)) vaccini = pd.read_csv('../dati/vaccini-summary-latest.csv') vaccini = vaccini.set_index('area') sum_ = vaccini.loc[['PAT', 'PAB']].sum().to_frame().transpose() sum_ = sum_.rename(index={0: 'TRE'}) vaccini.drop(['PAT', 'PAB'],inplace=True) vaccini = vaccini.append(sum_) vaccini = vaccini.sort_index() italy.sort_values(by = 'NOME_REG', inplace = True) italy.reset_index(drop=True, inplace = True) italy italy.index = vaccini.index italy italy = italy.merge(vaccini,left_index=True, right_index=True, how='right') italy['percentuale_somministrazione'] = italy['dosi_somministrate'] / italy['dosi_consegnate'] * 100 italy import matplotlib.colors as mcolors import matplotlib.cm as cm fig, ax = plt.subplots(1, figsize=(15, 15)) col = 'percentuale_somministrazione' ax.set_title('Percentuale somministrazioni per regione', fontdict={'fontsize':'25', 'fontweight':'3'}, color='Orange', fontfamily='monospace') normalize = mcolors.Normalize(vmin=italy['percentuale_somministrazione'].min(), vmax=italy['percentuale_somministrazione'].max()) #colormap = cm.jet scalarmappaple = cm.ScalarMappable(norm=normalize, cmap="Blues") #scalarmappaple.set_array(italy['percentuale_somministrazione']) italy.plot(ax=ax, column=col, cmap="Blues" # cmap=colormap, # label='ciao', # legend=True, # categorical=False # alpha = 1 ) plt.axis('off') plt.colorbar(scalarmappaple) vaccini emr = gpd.read_file('regions-with-provinces/emilia-romagna/emilia-romagna.shp') emr.plot(figsize=(10, 10)) emr['popolazione'] = 0 emr.loc[0, 'popolazione'] = 453317 emr.loc[1, 'popolazione'] = 531891 emr.loc[2, 'popolazione'] = 284329 emr.loc[3, 'popolazione'] = 398322 emr.loc[4, 'popolazione'] = 705545 emr.loc[5, 'popolazione'] = 1020096 emr.loc[6, 'popolazione'] = 343742 emr.loc[7, 'popolazione'] = 388863 emr.loc[8, 'popolazione'] = 339796 emr province = pd.read_csv('../../contagio/COVID-19/dati-province/dpc-covid19-ita-province.csv') province.data = pd.to_datetime(province.data) province.data = province.data.dt.strftime('%d/%m/%Y') province_grouped = province.groupby(province.denominazione_provincia) ferrara = province_grouped.get_group('Ferrara') bologna = province_grouped.get_group('Bologna') rimini = province_grouped.get_group('Rimini') ravenna = province_grouped.get_group('Ravenna') modena = province_grouped.get_group('Modena') forlì_cesena = province_grouped.get_group('Forlì-Cesena') piacenza = province_grouped.get_group('Piacenza') reggio_emilia = province_grouped.get_group("Reggio nell'Emilia") parma = province_grouped.get_group('Parma') parma from datetime import date, timedelta yesterday = (date.today() - timedelta(days=1)).strftime('%d/%m/%Y') today = date.today().strftime('%d/%m/%Y') province_grouped = province.groupby(province.data) province_oggi = province_grouped.get_group(yesterday) province_oggi province_gruppo = province_oggi.groupby(province.denominazione_regione) province_emr1 = province_gruppo.get_group('Emilia-Romagna') province_emr1 province_emr = province_emr1.set_index('denominazione_provincia') province_emr = province_emr.sort_index() emr.sort_values(by = 'NOME_PRO', inplace = True) emr.reset_index(drop=True, inplace = True) province_emr.drop(['Fuori Regione / Provincia Autonoma', 'In fase di definizione/aggiornamento'],inplace=True) emr.index = province_emr.index emr_def = emr.merge(province_emr,left_index=True, right_index=True, how='right') emr_def['per_100000'] = emr_def['totale_casi'] / emr['popolazione'] * 1000 emr_def fig, ax = plt.subplots(1, figsize=(15, 15)) col = 'per_100000' ax.set_title('Casi per provincia per 1000 abitanti', fontdict={'fontsize':'25', 'fontweight':'3'}, color='Orange', fontfamily='monospace') normalize = mcolors.Normalize(vmin=emr_def['per_100000'].min(), vmax=emr_def['per_100000'].max()) #colormap = cm.jet scalarmappaple = cm.ScalarMappable(norm=normalize, cmap="Blues") scalarmappaple.set_array(emr_def['per_100000']) emr_def.plot(ax=ax, column=col, cmap="Blues" # cmap=colormap, # label='ciao', # legend=True, # categorical=False # alpha = 1 ) plt.axis('off') cbar = plt.colorbar(scalarmappaple, shrink=0.3) cbar.ax.tick_params(labelsize=11) ax.text(1, 0, 'Grafico di Aidin Attar - Fonte dati: https://github.com/italia/covid19-opendata-vaccini', transform=ax.transAxes, fontsize=8, color='black', alpha=0.8, ha='right', va='bottom', rotation='0') #print(italy) plt.savefig('emr.png', dpi = 300) ###Output _____no_output_____
demo_notebook.ipynb
###Markdown Trace DemoThis notebook uses the trace package to run through multiple example calculations. The following are included:1. A toy example2. Verifying the lensmaker's equation3. Calculating RMS of plano-convex lens4. Optimizing a lens5. Modeling and optimizing a telescopeThe idea is to demonstrate increasingly advanced usage of trace.**NOTICE:**Each section in the notebook can be run independently, but *must be run in order*! ###Code import numpy as np import matplotlib.pyplot as plt import trace as tr # Can be fun for interactive 3D graphics # %matplotlib notebook ###Output _____no_output_____ ###Markdown Basic Usage (Toy Example)The following demonstrates the basic principles of trace. ###Code # Make a scene and include a lense scene = tr.Scene() # Convex at origin, pointing upwards lens = tr.SphereLens(.1, 0.8, 1, pos=tr.pos(0, 0, 0), axis=tr.vec(0, 0, 1), n=2.0) lamp = tr.DenseSource(pos=tr.pos(0, 0, 2), k=tr.vec(0, 0, -1), radius=0.8) # Add the elements to the simulation and run (notice that many elements can be added at once) scene.add(lens, lamp) scene.trace() # Show result tr.graphics.render_3d(scene, extend=25) plt.show() ###Output _____no_output_____ ###Markdown Verifying the Lensmaker's EquationHere we use trace to run a simulation which verifies the[lensmaker's equation](https://en.m.wikipedia.org/wiki/Focal_lengthGeneral_optical_systems) which relating thegeometry of a lens to it's focal distance:$$ \frac{1}{f} = (n-1)\left(\frac{1}{R_1}-\frac{1}{R_2}+\frac{(n-1)d}{n R_1 R_2}\right). $$Given $f$, the distance from the surface of the lense to the focal point is given by:$$ \text{F} = f\left(1-\frac{(n-1) d}{n R_1}\right). $$We will find, that the simulation agrees with this equation. ###Code # Calculate the folca length using the lensmake's equation N = 2 R1 = 10 R2 = -R1 d = 4 distance = 1 / (N-1) / (1/R1 - 1/R2 + (N-1)*d/N/R1/R2) * (1 - (N-1)*d/N/R1) print("Distance to surface: {}".format(distance)) # Assemble the scene using trace and calculate # the distance to the surface scene = tr.Scene(n=1) lens_f = tr.SphereLens(1/R1, 6, 4, pos=tr.pos(-d/2,0,0), axis=tr.vec(-1,0,0), n=N) lens_b = tr.SphereLens(1/R2, 6, 0, pos=tr.pos(+d/2,0,0), axis=tr.vec(-1,0,0), n=1) ray = tr.Ray(tr.pos(-10, 0.001, 0.001), k=tr.vec(1,0,0)) scene.add(lens_f, lens_b, ray) scene.trace() _, focal_pt = ray.intersect_axis(tr.pos(0,0,0), tr.vec(1,0,0)) # The distance to the surface is the focal point - extend of lens in positive x print("Distance to surface: {}".format(focal_pt[0] - d/2)) ax = tr.graphics.render_3d(scene, extend=10, labels=False).axes[0] ax.scatter(*focal_pt, label="Focal Point Simulation", marker="x", color="tab:red", s=50) ax.scatter(distance+d/2, 0, 0, label="Focal Point Equation", marker="x", color="tab:orange", s=50) ax.legend() plt.show() ###Output Distance to surface: 4.4444441851639755 ###Markdown Modelling a plano-convex singlet lensThis examples demonstrates how the trace package can be used to complete task 15 in the laboratory manual.Calcualting the RMS for the curved surface facing the beam is done in great detail with plots of all intermediate steps. The other way around is done in a single cell, showing only a plot of the final scene and result. ###Code # Start by preparing a scene scene = tr.Scene() # Pano-convex lens (all lengths in mm) # This lens faces the source with the curved surface lens_front = tr.SphereLens(0.02, 6, 5, pos=tr.pos(0,0,0), axis=tr.vec(1,0,0), n=1.5168) lens_back = tr.PlaneLens(pos=tr.pos(-5,-6,-6), normal=tr.vec(1,0,0), width=tr.vec(0,12,0), height=tr.vec(0,0,12), n=1) lamp = tr.DenseSource(pos=tr.pos(25,0,0), k=tr.vec(-1,0,0), radius=5, density=1) scene.add(lens_front, lens_back, lamp) # Render the current scene, to validate the # placement of objects is correct scene.trace() tr.graphics.render_3d(scene, extend=130) plt.show() # Find focal point using single ray scene = scene.reset() focus_ray = tr.Ray(tr.pos(8,0.1,0.1), k=tr.vec(-1,0,0)) scene.add(focus_ray) scene.trace_ray(focus_ray) pt_ray, pt_axi = focus_ray.intersect_axis(tr.pos(0,0,0), tr.vec(1,0,0)) print("Focal Point: {}".format(pt_axi)) # Display the focal point fig = tr.graphics.render_3d(scene, extend=130) fig.axes[0].scatter(*pt_axi, color="red", marker="x", s=200, label="Focal Point") fig.legend() plt.show() # Now that we know the focal point, place a screen there and record the rms scene = scene.reset() # Note: This automagically removes the focus ray screen = tr.Screen(pos=pt_axi-tr.vec(0,5,5), normal=tr.vec(1,0,0), width=tr.vec(0,10,0), height=tr.vec(0,0,10)) scene.add(screen) scene.trace() # Calculate RMS from focal point print("RMS: {}mm".format(screen.RMS(pt_axi))) # Make nice output graphics tr.graphics.render_3d(scene) ax = tr.graphics.render_2d(screen).axes[0] ax.set_title("Focal Plane") ax.plot(pt_axi[1], pt_axi[2], "xr", label="Focal Point", markersize=14) ax.set_xlabel("y in mm") ax.set_ylabel("z in mm") ax.legend() plt.show() # Without running through all the visuals, do the same trace from the other side scene = tr.Scene() # Pano-convex lens (all lengths in mm) # This lens faces the source with the flat surface lens_front = tr.SphereLens(-0.02, 6, 0, pos=tr.pos(-5,0,0), axis=tr.vec(1,0,0), n=1) lens_back = tr.PlaneLens(pos=tr.pos(0,-6,-6), normal=tr.vec(1,0,0), width=tr.vec(0,12,0), height=tr.vec(0,0,12), n=1.5168) scene.add(lens_front, lens_back, lamp) # Note: Reuse lamp from above # Find focal point focus_ray = tr.Ray(tr.pos(8,0.1,0.1), k=tr.vec(-1,0,0)) scene.add(focus_ray) scene.trace_ray(focus_ray) _, pt_axi = focus_ray.intersect_axis(tr.pos(0,0,0), tr.vec(1,0,0)) print("Focal Point: {}".format(pt_axi)) # Insert screen at focal point scene = scene.reset() screen = tr.Screen(pos=pt_axi-tr.vec(0,5,5), normal=tr.vec(1,0,0), width=tr.vec(0,10,0), height=tr.vec(0,0,10)) scene.add(screen) scene.trace() # Calculate RMS from focal point print("RMS: {}mm".format(screen.RMS(pt_axi))) # Make nice output graphics tr.graphics.render_3d(scene, labels=False) ax = tr.graphics.render_2d(screen).axes[0] ax.set_title("Focal Plane") ax.plot(pt_axi[1], pt_axi[2], "xr", label="Focal Point", markersize=14) ax.set_xlabel("y in mm") ax.set_ylabel("z in mm") ax.legend() plt.show() ###Output Focal Point: [-101.74833565 0. 0. ] RMS: 0.030094346151877782mm ###Markdown ResultThis example should demonstrate the ease of use provided by trace, as well as how this lens has a much worse performace the 'wrong' way around.Numerical results for RMS of the plano-convex lens:1. Curved surface facing beam: ${RMS}_\text{c} = 0.00756\text{mm}$2. Flat surface facing beam: ${RMS}_\text{f} = 0.03009\text{mm}$The RMS when facing the beam with the flat surface is more than $30\times$ greater! Lens optimizationThis optimizes the design of a lens using trace. This is the last task in the laboratory manual. Volatile typesGeometry is immutable. This has several advantages, such as making interactive sessions like the one abovemore safe and allowing very efficient implementations of things like `Scene.reset()`, which creates a newscene, but is able to share all geometry objects.Hovever, creating an new object every time we want to update the curvature of a lens is rather costly. To that end, `trace` introduces volatile types. These are less safe and are not supported in operations like `Scene.reset()`, butallow for efficient in-place variation of geometry attributes. (Note that there is a special VolatileScene, that canreset itself in-place, even if containing volatile geometry.)To create a volatile type, any variable parameters must be passed as `Variable(initial_value)`. Before this object canbe used, it must be marked as volatile by calling `obj = make_volatile(obj)`. Once a volatile type has been marked volatile, the associated variables may be altered at any time by calling `variable.set(new_value)`. Note, that thisdoes not perform runtime-checks on the new values.(This functionality is implemented in `trace.optim`) ###Code # We will need a numeric function optimizer for this, like the ones provided by scipy import scipy.optimize as sp_optim # Next, we need to defined a function that calculates the RMS based on the lens curvatures # Our lens is 5 mm deep and has initial curvatures of 0.02 fit_scene = tr.VolatileScene(n=1) curvature_f = tr.Variable(0.02) curvature_b = tr.Variable(-0.02) fit_lens_f = tr.make_volatile(tr.SphereLens(curvature_f, 6, 5, pos=tr.pos(0,0,0), axis=tr.vec(1,0,0), n=1.5168)) fit_lens_b = tr.make_volatile(tr.SphereLens(curvature_b, 6, 0, pos=tr.pos(-5,0,0), axis=tr.vec(1,0,0), n=1)) fit_lamp = tr.DenseSource(radius=5, pos=tr.pos(4,0,0), k=tr.vec(-1,0,0), density=0.8) fit_screen = tr.Screen(pos=tr.pos(-50,0,0)-tr.vec(0,5,5), normal=tr.vec(1,0,0), width=tr.vec(0,10,0), height=tr.vec(0,0,10)) fit_scene.add(fit_lens_f, fit_lens_b, fit_lamp, fit_screen) def fn_to_min(x): # Step 0: Reset the scene and apply variables # (this is different from resetting before, since we use a VolatileScene) fit_scene.reset() curvature_f.set(x[0]) curvature_b.set(x[1]) # Step 1: Trace rays and report RMS fit_scene.trace() return fit_screen.RMS(tr.pos(-50,0,0)) # Let's see if our setup is working ... print("Initial RMS: {}".format(fn_to_min([0.02, -0.02]))) tr.graphics.render_3d(fit_scene, labels=False) tr.graphics.render_2d(fit_screen) plt.show() # Now we use scipy to minimize fn_to_min result = sp_optim.minimize(fn_to_min, [0.02, -0.02], bounds=[(1e-10, 0.1), (-0.1, -1e-10)]) print(result) # Finally, we can render the resulting image print("Optimized RMS: {}".format(fn_to_min(result.x))) tr.graphics.render_3d(fit_scene, labels=False) tr.graphics.render_2d(fit_screen) plt.show() ###Output Optimized RMS: 0.008512098549243199 ###Markdown ResultThis example should demonstrate the principles of using trace to optimize a set of variable geometry parameters.Using a function minimizer, we found a configuration that has a focal point in the desired plane with${RMS} = 0.00851\text{mm}$. Modelling a telescopeThis example uses trace to model a simple telescope, which uses one collimating mirror, a second mirror to redirectthe beam and a lens to restore the rays being parallel. Essentially, the setup will shrink a big beam into a smallerone.Trace is used to model this telescope and to adjust the properties of the terminating lens to achieve a parallelbeam at the end. ###Code # As per usual, we start with a scene scene = tr.VolatileScene() # We will choose a refractive index and curvature to get a nice even picture # (just to demonstrate, that we can vary any parameter that is a scalar) ref_index = tr.Variable(15) curv_eyepiece = tr.Variable(0.02) curv_eyepiece_neg = tr.Variable(-0.02) # Place mirrors and lenses to assemble our telescope big_mirror = tr.SphereMirror(-0.035, 6, 1, pos=tr.pos(0,0,0), axis=tr.vec(0,-1,0)) small_mirror = tr.PlaneMirror(pos=tr.pos(-1,-10,-1), width=tr.vec(2,0,0), height=tr.vec(0,-2,2), normal=tr.vec(0,1,1)) eyepiece_b = tr.make_volatile(tr.SphereLens(curv_eyepiece, 2, 1, pos=tr.pos(0,-10.8,6), axis=tr.vec(0,0,-1), n=ref_index)) eyepiece_t = tr.make_volatile(tr.SphereLens(curv_eyepiece_neg, 2, 1, pos=tr.pos(0,-10.8,7), axis=tr.vec(0,0,-1), n=1)) screen = tr.Screen(pos=tr.pos(-2,-12.8,10), width=tr.vec(4,0,0), height=tr.vec(0,4,0), normal=tr.vec(0,0,-1)) the_universe_or_something = tr.DenseSource(pos=tr.pos(0,-20,0), k=tr.vec(0,1,0), radius=5.5, density=1) scene.add(big_mirror, small_mirror, eyepiece_b, eyepiece_t, screen) scene.add(the_universe_or_something) # We will get parallel beams on the screen, by minimizing # the sum of the differences in k-vectors def min_max_difference(x): # Step 0: Reset the scene and apply variables # (this is different from resetting before, since we use a VolatileScene) scene.reset() ref_index.set(x[0]) curv_eyepiece.set(x[1]) curv_eyepiece_neg.set(-x[1]) # Step 1: Trace rays scene.trace() # Step 2: We want our beam to be nice and parallel return sum([abs(ray.k[0]) + abs(ray.k[1]) for ray in screen.hits]) import scipy.optimize as sp_optim # in case, this example is run on its own result = sp_optim.minimize( min_max_difference, [15, 0.02], method="SLSQP", bounds=[(1, 5), (0.01, 1.0)], options={"maxiter": 200}) print(result) # Render the result scene.reset() scene.trace() tr.graphics.render_3d(scene, extend=0, labels=False) ax = tr.graphics.render_2d(screen).axes[0] ax.set_title("Image at eyepiece") plt.show() ###Output _____no_output_____ ###Markdown ResultThis example demonstrates a more complex scene. The principle of optimizing given geometry parameters is the sameas in the previous example.The final image output shows a nice (appriximately) even patter on the output plane. Chromatic aberrationThis uses trace to render the familiar prism graphic. It also demonstrates how to model materials for which therefractive index depends on the ray in some way. Variable refractive indicesTo model a material with variable refractive index, a callable object can be passed instead of a number. The objectshould accept a `Ray` or `None` and return a number (the refractive index). For indices that depend on the wavelength,a decorator is available (`tr.materials.accept_ray`). When using this decorator, your function is always directlypassed the wavelength.Trace also provides some common glass materials from `tr.materials`:- BK7- BAF10- BAK1- FK51A(**Note:** More types are provided by `tr.materials.online(type_str)`, and a list of available types can be found in`tr.materials.online_types`. These types are downloaded from an online-source and as such may be _unavailable_ or_erroneous_ at any time and without warning.) ###Code # We need a scene with light rays at different frequencies scene = tr.Scene() # We will use BK7 glass glass = tr.materials.BK7 # Build a 2D prism ln, lx, ly = tr.vec(-1,0,1), tr.vec(0,2,0), tr.vec(+1,0,1) rn, rx, ry = tr.vec(+1,0,1), tr.vec(0,2,0), tr.vec(-1,0,1) prism_left = tr.PlaneLens(pos=tr.vec(-2,0,0), normal=ln, width=2*lx, height=2*ly, n=glass) prism_right = tr.PlaneLens(pos=tr.vec(2,0,0), normal=rn, width=2*rx, height=2*ry, n=1.3) scene.add(prism_left, prism_right) # Now we want to add a range of rays scene = scene.reset() for i in range(0, 10): wavelen=400e-9 + i * 300e-9 / 9 scene.add(tr.Ray( origin=tr.pos(-2,1,1), k=tr.vec(1,0,.47), frequency=tr.materials.c/wavelen )) scene.trace() # Since we care about the ray wavelengths in this simulation, we # use the 'chromatic' flag of render_3d to tell trace that we want # rays to be rendered in the color corresponding to their wavelength tr.graphics.render_3d(scene, labels=None, extend=5, chromatic=True) plt.show() ###Output _____no_output_____ ###Markdown Demo notebook This notebook just demostrates how to use my solution to solve abstract reasoning tasks ###Code %load_ext autoreload %autoreload 2 import os from src.predictors import * from src.submission_utils import run_parallel TRAIN_PATH = 'data/training' VAL_PATH = 'data/evaluation' train_files = [x for x in os.listdir(TRAIN_PATH) if x[-4:]=='json'] valid_files = [x for x in os.listdir(VAL_PATH) if x[-4:]=='json'] predictors =[ Fill({}), Pattern({}), Colors({}), EliminateColor({}), EliminateDuplicates({}), ConnectDotsAllColors({}), ReconstructMosaic({}), ReconstructMosaicRRExtract({}), FillLines({}), ] preprocess_params = [ "initial", ] color_params = ["coverage", "unique", "corners", "top", "grid"] result = run_parallel(train_files, TRAIN_PATH, predictors, preprocess_params, color_params, timeout = 1200, processes = 20, max_memory_by_process = 1.4e+10 ) len(set([x['output_id'][:-2] for x in result])) result = run_parallel(valid_files, VAL_PATH, predictors, preprocess_params, color_params, timeout = 1200, processes = 20, max_memory_by_process = 1.4e+10 ) len(set([x['output_id'][:-2] for x in result])) ###Output _____no_output_____ ###Markdown CNNGeometric demo notebookThis notebook shows how to run a trained model on a given image pair Imports ###Code from __future__ import print_function, division import os import argparse import torch import torch.nn as nn from torch.utils.data import Dataset, DataLoader from model.cnn_geometric_model import CNNGeometric from data.pf_dataset import PFDataset from data.download_datasets import download_PF_willow from image.normalization import NormalizeImageDict, normalize_image from util.torch_util import BatchTensorToVars, str_to_bool from geotnf.transformation import GeometricTnf from geotnf.point_tnf import * import matplotlib.pyplot as plt from skimage import io import warnings from torchvision.transforms import Normalize from collections import OrderedDict warnings.filterwarnings('ignore') ###Output _____no_output_____ ###Markdown Parameters ###Code feature_extraction_cnn = 'resnet101' if feature_extraction_cnn=='vgg': model_aff_path = 'trained_models/best_pascal_checkpoint_adam_affine_grid_loss.pth.tar' model_tps_path = 'trained_models/best_pascal_checkpoint_adam_tps_grid_loss.pth.tar' elif feature_extraction_cnn=='resnet101': model_aff_path = 'trained_models/best_pascal_checkpoint_adam_affine_grid_loss_resnet_random.pth.tar' model_tps_path = 'trained_models/best_pascal_checkpoint_adam_tps_grid_loss_resnet_random.pth.tar' source_image_path='datasets/PF-dataset/duck(S)/060_0036.png' target_image_path='datasets/PF-dataset/duck(S)/060_0013.png' ###Output _____no_output_____ ###Markdown Load models ###Code use_cuda = torch.cuda.is_available() do_aff = not model_aff_path=='' do_tps = not model_tps_path=='' # Create model print('Creating CNN model...') if do_aff: # Setting the Output Dimension to 6 for the Affine Geometric Model model_aff = CNNGeometric(use_cuda=use_cuda, output_dim=6, feature_extraction_cnn=feature_extraction_cnn) if do_tps: # Setting the Output Dimension to 18 for the tps Geometric Model model_tps = CNNGeometric(use_cuda=use_cuda,output_dim=18, feature_extraction_cnn=feature_extraction_cnn) # Load trained weights print('Loading trained model weights...') if do_aff: checkpoint = torch.load(model_aff_path, map_location=lambda storage, loc: storage) checkpoint['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint['state_dict'].items()]) model_aff.load_state_dict(checkpoint['state_dict']) if do_tps: checkpoint = torch.load(model_tps_path, map_location=lambda storage, loc: storage) checkpoint['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint['state_dict'].items()]) model_tps.load_state_dict(checkpoint['state_dict']) ###Output Creating CNN model... Loading trained model weights... ###Markdown Create image transformers ###Code tpsTnf = GeometricTnf(geometric_model='tps', use_cuda=use_cuda) affTnf = GeometricTnf(geometric_model='affine', use_cuda=use_cuda) ###Output _____no_output_____ ###Markdown Load and preprocess images ###Code resizeCNN = GeometricTnf(out_h=240, out_w=240, use_cuda = False) normalizeTnf = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) def preprocess_image(image): # convert to torch Variable image = np.expand_dims(image.transpose((2,0,1)),0) image = torch.Tensor(image.astype(np.float32)/255.0) image_var = Variable(image,requires_grad=False) # Resize image using bilinear sampling with identity affine tnf image_var = resizeCNN(image_var) # Normalize image image_var = normalize_image(image_var) return image_var source_image = io.imread(source_image_path) target_image = io.imread(target_image_path) source_image_var = preprocess_image(source_image) target_image_var = preprocess_image(target_image) if use_cuda: source_image_var = source_image_var.cuda() target_image_var = target_image_var.cuda() batch = {'source_image': source_image_var, 'target_image':target_image_var} resizeTgt = GeometricTnf(out_h=target_image.shape[0], out_w=target_image.shape[1], use_cuda = use_cuda) ###Output _____no_output_____ ###Markdown Evaluate model ###Code if do_aff: model_aff.eval() if do_tps: model_tps.eval() # Evaluate models if do_aff: theta_aff=model_aff(batch) warped_image_aff = affTnf(batch['source_image'],theta_aff.view(-1,2,3)) if do_tps: theta_tps=model_tps(batch) warped_image_tps = tpsTnf(batch['source_image'],theta_tps) if do_aff and do_tps: theta_aff_tps=model_tps({'source_image': warped_image_aff, 'target_image': batch['target_image']}) warped_image_aff_tps = tpsTnf(warped_image_aff,theta_aff_tps) ###Output _____no_output_____ ###Markdown Process result ###Code # Un-normalize images and convert to numpy if do_aff: warped_image_aff_np = normalize_image(resizeTgt(warped_image_aff),forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy() if do_tps: warped_image_tps_np = normalize_image(resizeTgt(warped_image_tps),forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy() if do_aff and do_tps: warped_image_aff_tps_np = normalize_image(resizeTgt(warped_image_aff_tps),forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy() ###Output _____no_output_____ ###Markdown Display ###Code N_subplots = 2+int(do_aff)+int(do_tps)+int(do_aff and do_tps) fig, axs = plt.subplots(1,N_subplots) axs[0].imshow(source_image) axs[0].set_title('src') axs[1].imshow(target_image) axs[1].set_title('tgt') subplot_idx = 2 if do_aff: axs[subplot_idx].imshow(warped_image_aff_np) axs[subplot_idx].set_title('aff') subplot_idx +=1 if do_tps: axs[subplot_idx].imshow(warped_image_tps_np) axs[subplot_idx].set_title('tps') subplot_idx +=1 if do_aff and do_tps: axs[subplot_idx].imshow(warped_image_aff_tps_np) axs[subplot_idx].set_title('aff+tps') for i in range(N_subplots): axs[i].axis('off') fig.set_dpi(150) plt.show() ###Output _____no_output_____ ###Markdown Core usage and comparison to pandas ###Code x_in = np.random.rand(1000,10000) window_length = 1001 # must be odd integer percentile = 10 # 0-100 initialize_rp() # not necessary, just used to compile all the jit functions and store in cache tic = time() # This is the core function. Note that the rolling percentile is calculated along dim 1 (rows), and that edges are currently set to NaNs. x_out_rp = rolling_percentile(x_in, win_len=window_length, ptile=percentile) print(f'rolling_percentile computation time: {time() - tic} seconds') x_in_df = pd.DataFrame(x_in) tic = time() # This is pandas' version. Note that these settings are intended to match how 'rolling_percentile' works. x_out_pd = x_in_df.rolling(window=window_length, center=True, axis=1).quantile(quantile=(percentile/100)) print(f'pandas computation time: {time() - tic} seconds') x_out_pd = np.array(x_out_pd) win_half_len = window_length//2 outputs_equivalent_check = np.allclose(x_out_rp[:,win_half_len:-(win_half_len+1)], x_out_pd[:,win_half_len:-(win_half_len+1)]) print(f'Outputs from rolling_percentile and pandas are exactly the same: {outputs_equivalent_check}') plt.figure() plt.plot(x_out_rp[0]) plt.plot(x_out_pd[0]) ###Output _____no_output_____ ###Markdown Benchmarking ###Code from blist import sortedlist as blist_sortedlist from sortedcontainers import SortedList as sortcont_SortedList from bisect import bisect_left, insort from blist import blist import rolling_quantiles as rq x_in = np.random.rand(5000, 108000) ptile = 10 wins_toTest = 2**np.arange(1,15)+1 wins_toTest = [3, 31, 301, 3001, 18001] print(f'window lengths to test: {wins_toTest}') # driver code for some of the functions below # driver for: # - sortedcontainers' SortedList # - blist's sortedlist def rp_fun_sl_type1(x_in, win_len, ptile, sortedListObj): win_len_half = int(win_len/2) win_len_half idx_ptile = int(win_len * (ptile/100)) # initialize output out_ptile = np.empty_like(x_in) out_ptile[:] = np.nan for jj in range(x_in.shape[0]): x_win_sorted = sortedListObj(x_in[jj][0:win_len]) for ii in range(win_len_half, x_in.shape[1] - win_len_half-1): out_ptile[jj][ii] = x_win_sorted[idx_ptile] # centered rolling window idx_new = ii + win_len_half + 1 val_new = x_in[jj, idx_new] idx_old = ii-win_len_half val_old = x_in[jj, idx_old] x_win_sorted.remove(val_old) x_win_sorted.add(val_new) return out_ptile # driver for: # - blist' blist # - list def rp_fun_sl_type2(x_in, win_len, ptile, sortedListObj): win_len_half = int(win_len/2) win_len_half idx_ptile = int(win_len * (ptile/100)) # initialize output out_ptile = np.empty_like(x_in) out_ptile[:] = np.nan for jj in range(x_in.shape[0]): x_win_sorted = sortedListObj(np.sort(x_in[jj][0:win_len])) for ii in range(win_len_half, x_in.shape[1] - win_len_half-1): out_ptile[jj][ii] = x_win_sorted[idx_ptile] # centered rolling window idx_new = ii + win_len_half + 1 val_new = x_in[jj, idx_new] idx_old = ii-win_len_half val_old = x_in[jj, idx_old] del x_win_sorted[bisect_left(x_win_sorted, val_old)] insort(x_win_sorted, val_new) return out_ptile import multiprocessing as mp from functools import partial def rq_driver(x_in, win_len, ptile=10): pipe = rq.Pipeline( rq.LowPass(window=win_len, quantile=(ptile/100)) ) return pipe.feed(x_in) def multiprocessing_pool_along_axis(x_in, function, axis=0, **kwargs): pool = mp.Pool(processes=None) if axis==0: results = pool.map(partial(function , **kwargs), [x_in[ii] for ii in np.arange(x_in.shape[0])]) pool.close() pool.join() return np.row_stack(results) elif axis==1: results = pool.map(partial(function , **kwargs), [x_in[:,ii] for ii in np.arange(x_in.shape[1])]) pool.close() pool.join() return np.column_stack(results) def rp_rq(x_in, win_len, ptile=10): output_rq = multiprocessing_pool_along_axis(x_in, rq_driver, axis=0, **{'win_len': win_len , 'ptile': ptile} ) def rp_mine(x_in, win_len, ptile=10): return rolling_percentile(x_in, win_len=win_len, ptile=ptile) def rp_pandas(x_in, win_len, ptile=10): x_in_df = pd.DataFrame(x_in) x_out_pd = x_in_df.rolling(window=win_len, center=True, axis=1).quantile(quantile=(ptile/100)) return np.array(x_out_pd) # def rp_rq_forLoop(x_in, win_len, ptile=10): # pipe = rq.Pipeline( rq.LowPass(window=win_len, quantile=(ptile/100)) ) # return np.array([pipe.feed(x_in[ii]) for ii in range(x_in.shape[0])]) def rp_sortedcontainers(x_in, win_len, ptile=10): return rp_fun_sl_type1(x_in, win_len=win_len, ptile=ptile, sortedListObj=sortcont_SortedList) def rp_blist_sortedlist(x_in, win_len, ptile=10): return rp_fun_sl_type1(x_in, win_len=win_len, ptile=ptile, sortedListObj=blist_sortedlist) def rp_blist(x_in, win_len, ptile=10): return rp_fun_sl_type2(x_in, win_len=win_len, ptile=ptile, sortedListObj=blist) def rp_list(x_in, win_len, ptile=10): return rp_fun_sl_type2(x_in, win_len=win_len, ptile=ptile, sortedListObj=list) rp_functions_dict = { 'mine': rp_mine, # very fast for small win_len (<10,000), slow for large win_len (>10,000) 'rolling_quantiles': rp_rq, # fast for all win_len 'pandas': rp_pandas, # medium for all win_len # 'sortedcontainers': rp_sortedcontainers, # slow for small win_len, fast for win_len>100,000 # 'blist_sortedlist': rp_blist_sortedlist, # slow # 'blist': rp_blist, # slow # 'list': rp_list, # slow } rp_functions = list(rp_functions_dict.items()) out_times = np.zeros((len(wins_toTest), len(rp_functions)), dtype='float64') for ii, win_len in enumerate(wins_toTest): for jj, rp_fun in enumerate(rp_functions): print(f'win_len: {win_len}, running: {rp_fun[0]}') tic = time() out_ptile = rp_fun[1](x_in, win_len=win_len, ptile=ptile) out_times[ii,jj] = time() - tic %matplotlib notebook n_toShow = 4 plt.figure() plt.plot(wins_toTest, out_times[:,:n_toShow]); plt.xscale('log') plt.yscale('log') plt.xlabel('window length') plt.ylabel('time in (s)') plt.legend(list(rp_functions_dict.keys())[:n_toShow]) %matplotlib notebook n_toShow = 4 plt.figure() plt.plot(wins_toTest, out_times[:,:n_toShow]); plt.xscale('log') plt.yscale('log') plt.xlabel('window length') plt.ylabel('time in (s)') plt.legend(list(rp_functions_dict.keys())[:n_toShow]) %matplotlib notebook n_toShow = 4 plt.figure() plt.plot(wins_toTest, out_times[:,:n_toShow]); plt.xscale('log') plt.yscale('log') plt.xlabel('window length') plt.ylabel('time in (s)') plt.legend(list(rp_functions_dict.keys())[:n_toShow]) ###Output _____no_output_____ ###Markdown CNNGeometric demo notebookThis notebook shows how to run a trained model on a given image pair Imports ###Code from __future__ import print_function, division import os import argparse import torch import torch.nn as nn from torch.utils.data import Dataset, DataLoader from model.cnn_geometric_model import CNNGeometric from data.pf_dataset import PFDataset from data.download_datasets import download_PF_willow from image.normalization import NormalizeImageDict, normalize_image from util.torch_util import BatchTensorToVars, str_to_bool from geotnf.transformation import GeometricTnf from geotnf.point_tnf import * import matplotlib.pyplot as plt from skimage import io import warnings from torchvision.transforms import Normalize from collections import OrderedDict warnings.filterwarnings('ignore') ###Output _____no_output_____ ###Markdown Parameters ###Code feature_extraction_cnn = 'resnet101' if feature_extraction_cnn=='vgg': model_aff_path = 'trained_models/best_pascal_checkpoint_adam_affine_grid_loss.pth.tar' model_tps_path = 'trained_models/best_pascal_checkpoint_adam_tps_grid_loss.pth.tar' elif feature_extraction_cnn=='resnet101': model_aff_path = 'trained_models/best_pascal_checkpoint_adam_affine_grid_loss_resnet_random.pth.tar' model_tps_path = 'trained_models/best_pascal_checkpoint_adam_tps_grid_loss_resnet_random.pth.tar' source_image_path='datasets/PF-dataset/duck(S)/060_0036.png' target_image_path='datasets/PF-dataset/duck(S)/060_0013.png' ###Output _____no_output_____ ###Markdown Load models ###Code use_cuda = torch.cuda.is_available() do_aff = not model_aff_path=='' do_tps = not model_tps_path=='' # Create model print('Creating CNN model...') if do_aff: model_aff = CNNGeometric(use_cuda=use_cuda,geometric_model='affine',feature_extraction_cnn=feature_extraction_cnn) if do_tps: model_tps = CNNGeometric(use_cuda=use_cuda,geometric_model='tps',feature_extraction_cnn=feature_extraction_cnn) # Load trained weights print('Loading trained model weights...') if do_aff: checkpoint = torch.load(model_aff_path, map_location=lambda storage, loc: storage) checkpoint['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint['state_dict'].items()]) model_aff.load_state_dict(checkpoint['state_dict']) if do_tps: checkpoint = torch.load(model_tps_path, map_location=lambda storage, loc: storage) checkpoint['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint['state_dict'].items()]) model_tps.load_state_dict(checkpoint['state_dict']) ###Output Creating CNN model... Loading trained model weights... ###Markdown Create image transformers ###Code tpsTnf = GeometricTnf(geometric_model='tps', use_cuda=use_cuda) affTnf = GeometricTnf(geometric_model='affine', use_cuda=use_cuda) ###Output _____no_output_____ ###Markdown Load and preprocess images ###Code resizeCNN = GeometricTnf(out_h=240, out_w=240, use_cuda = False) normalizeTnf = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) def preprocess_image(image): # convert to torch Variable image = np.expand_dims(image.transpose((2,0,1)),0) image = torch.Tensor(image.astype(np.float32)/255.0) image_var = Variable(image,requires_grad=False) # Resize image using bilinear sampling with identity affine tnf image_var = resizeCNN(image_var) # Normalize image image_var = normalize_image(image_var) return image_var source_image = io.imread(source_image_path) target_image = io.imread(target_image_path) source_image_var = preprocess_image(source_image) target_image_var = preprocess_image(target_image) if use_cuda: source_image_var = source_image_var.cuda() target_image_var = target_image_var.cuda() batch = {'source_image': source_image_var, 'target_image':target_image_var} resizeTgt = GeometricTnf(out_h=target_image.shape[0], out_w=target_image.shape[1], use_cuda = use_cuda) ###Output _____no_output_____ ###Markdown Evaluate model ###Code if do_aff: model_aff.eval() if do_tps: model_tps.eval() # Evaluate models if do_aff: theta_aff=model_aff(batch) warped_image_aff = affTnf(batch['source_image'],theta_aff.view(-1,2,3)) if do_tps: theta_tps=model_tps(batch) warped_image_tps = tpsTnf(batch['source_image'],theta_tps) if do_aff and do_tps: theta_aff_tps=model_tps({'source_image': warped_image_aff, 'target_image': batch['target_image']}) warped_image_aff_tps = tpsTnf(warped_image_aff,theta_aff_tps) ###Output _____no_output_____ ###Markdown Process result ###Code # Un-normalize images and convert to numpy if do_aff: warped_image_aff_np = normalize_image(resizeTgt(warped_image_aff),forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy() if do_tps: warped_image_tps_np = normalize_image(resizeTgt(warped_image_tps),forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy() if do_aff and do_tps: warped_image_aff_tps_np = normalize_image(resizeTgt(warped_image_aff_tps),forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy() ###Output _____no_output_____ ###Markdown Display ###Code N_subplots = 2+int(do_aff)+int(do_tps)+int(do_aff and do_tps) fig, axs = plt.subplots(1,N_subplots) axs[0].imshow(source_image) axs[0].set_title('src') axs[1].imshow(target_image) axs[1].set_title('tgt') subplot_idx = 2 if do_aff: axs[subplot_idx].imshow(warped_image_aff_np) axs[subplot_idx].set_title('aff') subplot_idx +=1 if do_tps: axs[subplot_idx].imshow(warped_image_tps_np) axs[subplot_idx].set_title('tps') subplot_idx +=1 if do_aff and do_tps: axs[subplot_idx].imshow(warped_image_aff_tps_np) axs[subplot_idx].set_title('aff+tps') for i in range(N_subplots): axs[i].axis('off') fig.set_dpi(150) plt.show() ###Output _____no_output_____ ###Markdown ###Code import gzip, zlib import shutil import urllib.request from io import StringIO #get external libary to work with tweetids !git clone https://github.com/SocialMediaLab/Tweets_Sampling_Toolkit.git # navigate to the working directory %cd Tweets_Sampling_Toolkit # install packages requirements (This is an optional step) !pip install -r requirements.txt # Import Tweets Sampling Library import tweets_sampling # load and unpack an external gzip'ed CSV file with Tweet IDs gz_file = "https://figshare.com/ndownloader/files/31249681" data = urllib.request.urlopen(gz_file) data_obj = data.read() bytes_data = gzip.decompress(data_obj) out_file = 'sample1.csv' data_obj = data.read() with open(out_file, 'wb') as f: f.write(bytes_data) # Load a local CSV file # The entire file will not be read, instead, its length will be # calculated based on its length ifm = tweets_sampling.id_file_manager(out_file) ifm.id_count # Create a sample containing 20% of our original file # Alternatively, you can use sample_mode="absolute" to create a # sample with (for example) 3000 IDs percent_sample = ifm.get_random_sample( 0.2, 'percent_sample.csv', sample_mode='percent' ) percent_sample.id_count # Split the large file into 5 subsets # Each ID from the original subset will be present in one of the # five files pages = ifm.get_page_samples(5, 'pages.csv') # Print each resulting file's name and ID count for page in pages: print(f'{page.file_name}: {page.id_count}') # Comparing Files # We will create two samples to compare to each other a = ifm.get_random_sample(0.2, 'percent_sample1.csv', sample_mode='percent') b = ifm.get_random_sample(0.2, 'percent_sample2.csv', sample_mode='percent') # Get all of tweet ids that are in both a and b # One of the files will be automatically sorted to allow a # binary search algorithm to check for overlap intersection = a.get_intersection(b, 'intersection.csv') intersection.id_count #Difference # Get all of the IDs that are in a, but not b difference = a.get_difference(b, 'difference.csv') difference.id_count # Get all of the files that are in either a or b union = a.get_union(b, 'union.csv') union.id_count ###Output Creating union file Sorting file (Step 1) ###Markdown AWSomeA clean wrapper over boto3 inspired by the user friendly aws cli. SessionsAll actions take an optional session parameter. If none is specified it will create one with whatever default configuration you have in ~/.aws/ directory.Since AWSome encourages writing your script once and running it in different environments, it isn't advisable to have a default session because there is the risk that you might run your script accidentally in your real S3. On the other hand it's not too clean or practical to give every action a session and you would have to remember to create new sessions and give your script the right ones.The recommended way is to write your script without worrying about sessions and use one of our context managers to provide it explicitly without changing your code. ###Code from awsome import s3 # Default session buckets = s3.ls() # Explicitly defined session session = boto3.session.Session( aws_access_key_id='XXXXXX', aws_secret_access_key='XXXXXX', region_name='eu-west-1' ) keys = s3.ls('s3://bucket/key', session=session) # Explicitly defined session with context manager from awsome.playground import boto3_session session = boto3.session.Session( aws_access_key_id='XXXXXX', aws_secret_access_key='XXXXXX', region_name='eu-west-1' ) with boto3_session(session): buckets = s3.ls() # Profile defined in ~/.aws/credentials from awsome.playground import aws_profile with aws_profile(profile='myprofile'): buckets = s3.ls() ###Output _____no_output_____ ###Markdown Example usage of AWSome We will rename all the files from a bucket with a foo/bar/ prefix and copy them to another bucket. ###Code import pprint from awsome import s3 from awsome.playground import s3_sandbox, dry_run, aws_profile, create_mock_keys pp = pprint.PrettyPrinter(indent=4) ###Output _____no_output_____ ###Markdown Intervention script We create the function that will do the renaming/copy intervention. ###Code def intervention(): keys = s3.ls('s3://testbucket/foo/bar/') for key in keys: new_key = key.replace('customers', 'clients') # Rename all objects with the same key s3.move_key(from_bucket='testbucket', from_key=key, to_bucket='testbucket', to_key=new_key) # Move all objects to the production bucket with the same key s3.copy_key(from_bucket='testbucket', from_key=new_key, to_bucket='prodbucket') ###Output _____no_output_____ ###Markdown Testing the script Before running the script in production we should do a few tests to make sure it's doing what we think it is. Creating test data We will create some dummy files that mimic the structure of the files in our real aws instance to test the intervention on them. ###Code def create_test_data(): for i in range(1, 6): # Files we will be changing key = f'foo/bar/customers_{i}.csv' s3.upload_string(data='some data', bucket='testbucket', key=key) # Files we want untouched key = f'foo/baz/companies_{i}.csv' s3.upload_string(data='some data', bucket='testbucket', key=key) ###Output _____no_output_____ ###Markdown Checking the test data Let's check that the test data is created correctly. Of course we don't want to actually create those dummy files in aws. Instead we use the s3_sandbox context manager that provides a moto s3 instance where we can run our tests as they would run in a real S3 instance. We just need to pass it the name of the buckets it needs to create. ###Code with s3_sandbox(['testbucket', 'prodbucket']): create_test_data() print('\nTest bucket:') pp.pprint(s3.ls('s3://testbucket/', recursive=True)) print('\nProd bucket:') pp.pprint(s3.ls('s3://prodbucket/', recursive=True)) ###Output echo 'some data' | aws s3 cp - s3://testbucket/foo/bar/customers_1.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/baz/companies_1.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/bar/customers_2.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/baz/companies_2.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/bar/customers_3.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/baz/companies_3.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/bar/customers_4.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/baz/companies_4.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/bar/customers_5.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/baz/companies_5.csv Test bucket: aws s3 ls --recursive s3://testbucket/ [ 'foo/bar/customers_1.csv', 'foo/bar/customers_2.csv', 'foo/bar/customers_3.csv', 'foo/bar/customers_4.csv', 'foo/bar/customers_5.csv', 'foo/baz/companies_1.csv', 'foo/baz/companies_2.csv', 'foo/baz/companies_3.csv', 'foo/baz/companies_4.csv', 'foo/baz/companies_5.csv'] Prod bucket: aws s3 ls --recursive s3://prodbucket/ [] ###Markdown We can see that the sample data has been created correctly. Checking the intervention script To make sure that the script does what we want it to we will execute it with a dry run. This means that we won't actually execute the commands, just print the equivalent aws cli commands so we can visually inspect them.One exception is that we don't want to patch the ls function (it doesn't change S3 so it is reasonable not to patch it) because we depend on its output to generate the rest of the commands. We will need set patch_ls to false.The dry_run can only be executed inside a sandbox unless you provide the argument safe=False. ###Code with s3_sandbox(['testbucket', 'prodbucket']): create_test_data() print('\n') with dry_run(patch_ls=False): intervention() ###Output echo 'some data' | aws s3 cp - s3://testbucket/foo/bar/customers_1.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/baz/companies_1.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/bar/customers_2.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/baz/companies_2.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/bar/customers_3.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/baz/companies_3.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/bar/customers_4.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/baz/companies_4.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/bar/customers_5.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/baz/companies_5.csv aws s3 ls s3://testbucket/foo/bar/ aws s3 cp s3://testbucket/foo/bar/customers_2.csv s3://testbucket/foo/bar/clients_2.csv aws s3 rm s3://testbucket/foo/bar/customers_2.csv aws s3 cp s3://testbucket/foo/bar/clients_2.csv s3://prodbucket/foo/bar/clients_2.csv aws s3 cp s3://testbucket/foo/bar/customers_4.csv s3://testbucket/foo/bar/clients_4.csv aws s3 rm s3://testbucket/foo/bar/customers_4.csv aws s3 cp s3://testbucket/foo/bar/clients_4.csv s3://prodbucket/foo/bar/clients_4.csv aws s3 cp s3://testbucket/foo/bar/customers_3.csv s3://testbucket/foo/bar/clients_3.csv aws s3 rm s3://testbucket/foo/bar/customers_3.csv aws s3 cp s3://testbucket/foo/bar/clients_3.csv s3://prodbucket/foo/bar/clients_3.csv aws s3 cp s3://testbucket/foo/bar/customers_5.csv s3://testbucket/foo/bar/clients_5.csv aws s3 rm s3://testbucket/foo/bar/customers_5.csv aws s3 cp s3://testbucket/foo/bar/clients_5.csv s3://prodbucket/foo/bar/clients_5.csv aws s3 cp s3://testbucket/foo/bar/customers_1.csv s3://testbucket/foo/bar/clients_1.csv aws s3 rm s3://testbucket/foo/bar/customers_1.csv aws s3 cp s3://testbucket/foo/bar/clients_1.csv s3://prodbucket/foo/bar/clients_1.csv ###Markdown Executing the intervention script in a sandbox This is where it gets interesting. We have inspected the dry run and everything looks reasonable, but you can never be too careful. To make sure we get it right we will execute the real script inside a moto S3 sandbox. ###Code with s3_sandbox(['testbucket', 'prodbucket']): create_test_data() print('\nTest bucket before:') pp.pprint(s3.ls('s3://testbucket/', recursive=True)) print('\nProd bucket before:') pp.pprint(s3.ls('s3://prodbucket/', recursive=True)) print('\n\nStarting intervention:') intervention() print('Ending intervention:') print('\n\nTest bucket after:') pp.pprint(s3.ls('s3://testbucket/', recursive=True)) print('\nProd bucket after:') pp.pprint(s3.ls('s3://prodbucket/', recursive=True)) ###Output echo 'some data' | aws s3 cp - s3://testbucket/foo/bar/customers_1.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/baz/companies_1.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/bar/customers_2.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/baz/companies_2.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/bar/customers_3.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/baz/companies_3.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/bar/customers_4.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/baz/companies_4.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/bar/customers_5.csv echo 'some data' | aws s3 cp - s3://testbucket/foo/baz/companies_5.csv Test bucket before: aws s3 ls --recursive s3://testbucket/ [ 'foo/bar/customers_1.csv', 'foo/bar/customers_2.csv', 'foo/bar/customers_3.csv', 'foo/bar/customers_4.csv', 'foo/bar/customers_5.csv', 'foo/baz/companies_1.csv', 'foo/baz/companies_2.csv', 'foo/baz/companies_3.csv', 'foo/baz/companies_4.csv', 'foo/baz/companies_5.csv'] Prod bucket before: aws s3 ls --recursive s3://prodbucket/ [] Starting intervention: aws s3 ls s3://testbucket/foo/bar/ aws s3 cp s3://testbucket/foo/bar/customers_2.csv s3://testbucket/foo/bar/clients_2.csv aws s3 rm s3://testbucket/foo/bar/customers_2.csv aws s3 cp s3://testbucket/foo/bar/clients_2.csv s3://prodbucket/foo/bar/clients_2.csv aws s3 cp s3://testbucket/foo/bar/customers_4.csv s3://testbucket/foo/bar/clients_4.csv aws s3 rm s3://testbucket/foo/bar/customers_4.csv aws s3 cp s3://testbucket/foo/bar/clients_4.csv s3://prodbucket/foo/bar/clients_4.csv aws s3 cp s3://testbucket/foo/bar/customers_3.csv s3://testbucket/foo/bar/clients_3.csv aws s3 rm s3://testbucket/foo/bar/customers_3.csv aws s3 cp s3://testbucket/foo/bar/clients_3.csv s3://prodbucket/foo/bar/clients_3.csv aws s3 cp s3://testbucket/foo/bar/customers_5.csv s3://testbucket/foo/bar/clients_5.csv aws s3 rm s3://testbucket/foo/bar/customers_5.csv aws s3 cp s3://testbucket/foo/bar/clients_5.csv s3://prodbucket/foo/bar/clients_5.csv aws s3 cp s3://testbucket/foo/bar/customers_1.csv s3://testbucket/foo/bar/clients_1.csv aws s3 rm s3://testbucket/foo/bar/customers_1.csv aws s3 cp s3://testbucket/foo/bar/clients_1.csv s3://prodbucket/foo/bar/clients_1.csv Ending intervention: Test bucket after: aws s3 ls --recursive s3://testbucket/ [ 'foo/bar/clients_1.csv', 'foo/bar/clients_2.csv', 'foo/bar/clients_3.csv', 'foo/bar/clients_4.csv', 'foo/bar/clients_5.csv', 'foo/baz/companies_1.csv', 'foo/baz/companies_2.csv', 'foo/baz/companies_3.csv', 'foo/baz/companies_4.csv', 'foo/baz/companies_5.csv'] Prod bucket after: aws s3 ls --recursive s3://prodbucket/ [ 'foo/bar/clients_1.csv', 'foo/bar/clients_2.csv', 'foo/bar/clients_3.csv', 'foo/bar/clients_4.csv', 'foo/bar/clients_5.csv'] ###Markdown Finally we have succesfully validated our script, and we can rest assured that it will do what we intend it to. Creating mock keysSay you want to replicate your keys from your real bucket into your sandbox. There is an easy way to do that.First we need to read the keys from your buckets: ###Code with aws_profile(profile='myprofile'): test_keys = s3.ls('s3://test-bucket/', recursive=True) prod_keys = s3.ls('s3://production-bucket/', recursive=True) ###Output _____no_output_____ ###Markdown Next we can use the function create_mock_keys to populate those keys with random (short) data, therefore recreating the same keys that we have in our real buckets. This can help us be even more sure that the script runs correctly.We can rest assured that create_mock_keys will not write any data to our real S3 because it only works inside a sandbox. ###Code from awsome.playground import s3_sandbox with s3_sandbox(buckets=['test-bucket', 'production-bucket']): create_mock_keys('test-bucket', test_keys) create_mock_keys('production-bucket', prod_keys) intervention() pp.pprint(s3.ls('s3://test-bucket/.../', recursive=True)) print('\n') pp.pprint(s3.ls('s3://prod-bucket/.../', recursive=True)) ###Output _____no_output_____ ###Markdown Empirical Reliability Predict demonstration Demonstration of the empirical reliability predict procedure for a tidal turbine pitch system (a particularly failure critical area). ###Code import scipy.stats as ss import matplotlib.pyplot as plt import pymc3 as pm import numpy as np # set font plt.rcParams["font.family"] = "Times New Roman" import classes_funcs as cf ###Output _____no_output_____ ###Markdown ____ Component designs Electro-mechanical pitch system consists of dynamic seal, roller bearing unit, gearbox and electric motor. Each component requires specific design information, influence factors and uncertainty levels. ###Code brg_design_params = {'cw': 0.1, 'FR': 20, 'n': 2, 'd': 750, 'd1': 850, 'v0': 100, 'lambda_base': 0.02} brg_influence_factors = {'Cr':1, 'Ct':1, 'Csf':1.3, 'Cc':1} brg_inf_factor_uncertainty = {'Cr_unc':0.1, 'Ct_unc':0.1, 'Csf_unc':0.3, 'Cc_unc':0.1, 'Cv_unc':0.1, 'Ccw_unc':0.1, 'lambda_base_unc':0.3} seal_design_params = {'diam': 22, # inches 'Tr': 140, # rated temp fahrenheit 'To': 122, # op temp fahrenheit 'f': 15, # surface finish is micro_inch RMS 'E': 0.7, # Youngs Modulus MPa 'C': 1, # Contact pressure MPa 'lambda_base': 0.02} # failures/yr seal_influence_factors = {'Cp':0.25, # fluid pressure 'Cq':4.2, # allowable leakage 'Cv':1, # fluid viscosity 'Cn':3.5, # contaminant influence factor 'Cpv':0.5} # pressure velocity seal_inf_factor_uncertainty = {'Cp_unc':0.1, 'Cq_unc':0.1, 'Ch_unc':0.1, 'Cf_unc':0.1, 'Cv_unc':0.1, 'Ct_unc':0.1, 'Cn_unc':0.3, 'Cpv_unc':0.1, 'lambda_base_unc':0.1} gb_design_params = {'V_ratio': 0.5, # ratio of operating speed to design speed 'L_ratio': 0.5, # ratio of operating load to design load 'Ae': 0.3, # gear misallignment deg 'Vl': 150, # rated lubricant viscosity 'V0': 103, # operating lub viscosity 'lambda_base': 0.2} # failures/year gb_influence_factors = {'Cv':0.53, # lubrication deviation factor (same as value for brg because its the same lub system) 'Cgt':1, 'Cgv':1} gb_inf_factor_uncertainty = {'Cgs_unc':0.1, 'Cgp_unc':0.2, 'Cga_unc':0.1, 'Cv_unc':0.1, 'Cgt_unc':0.1, 'Cgv_unc':0.1, 'lambda_base_unc':0.2} em_design_params = {'op_time': 0.3, # percentage of time that PS is operating 0.3 = 30% 'Vd': 0.05, # voltage tolerance (how much over rated - 5% according to US Mil) 'temp': 50, # operating temp Celsius 'lambda_base': (2.7/114.2), # EM overall base failure rate 2.7/mill hrs operating from US Mil handbook (for DC motor) 'lambda_win_base':(40/114.2), # winding base failure rate 40/mill hrs operating from US Mil handbook 'lambda_bs' :(3.2/114.2), # brushes failure rate 3.2/mill hrs operating from US Mil handbook 'lambda_stat' :(0.001/114.2), # stator housing failure rate 0.001/mill hrs operating from US Mil handbook 'lambda_arm' :(0.0061/114.2)} # armature shaft failure rate (correction factors all = 1) em_influence_factors = {'Csf':2, # service factor for motor base failure rate 'Calt':1} # altitude factor - subsea so is =1 em_inf_factor_uncertainty = {'lambda_win_base_unc':0.2, 'lambda_bs_unc':0.1, 'lambda_stat_unc':0.1, 'lambda_arm_unc':0.1, 'lambda_base_unc':0.1} ###Output _____no_output_____ ###Markdown Object creation Objects are created for each component. ###Code brg1 = cf.bearing(brg_design_params,brg_influence_factors,brg_inf_factor_uncertainty) brg2 = cf.bearing(brg_design_params,brg_influence_factors,brg_inf_factor_uncertainty) brg3 = cf.bearing(brg_design_params,brg_influence_factors,brg_inf_factor_uncertainty) seal1 = cf.seal(seal_design_params,seal_influence_factors,seal_inf_factor_uncertainty) seal2 = cf.seal(seal_design_params,seal_influence_factors,seal_inf_factor_uncertainty) seal3 = cf.seal(seal_design_params,seal_influence_factors,seal_inf_factor_uncertainty) gb = cf.gearbox(gb_design_params,gb_influence_factors,gb_inf_factor_uncertainty) em = cf.electric_motor(em_design_params,em_influence_factors,em_inf_factor_uncertainty) ###Output _____no_output_____ ###Markdown Failure rate model ###Code def monte_carlo(): component_list = brg1,brg2,brg3,seal1,seal2,seal3,gb,em rvs = [component_list[i].random_variates(1) for i in range(len(component_list))] # list comprehension for component rvs sum_rvs = [sum(i) for i in zip(*rvs)] # sum list of lists return sum_rvs ps_rvs = [monte_carlo() for i in range(1,1000)] ###Output _____no_output_____ ###Markdown Priors ###Code # beta and lambda prior distributions ps_shape,ps_loc,ps_scale=ss.lognorm.fit(ps_rvs) ps_pdf = ss.lognorm.pdf(ps_rvs,s=ps_shape,loc=ps_loc,scale=ps_scale) betas = [0.437,0.807,0.815,0.460,0.228,0.573,0.760,0.535,0.257,0.269,0.465,0.397,0.569,0.347,0.412,0.470,0.442,0.675] beta_shape,beta_loc,beta_scale = ss.lognorm.fit(betas) beta_rvs = ss.lognorm.rvs(s=beta_shape,scale=beta_scale,loc=beta_loc,size=1000) beta_pdf = ss.lognorm.pdf(x=beta_rvs,s=beta_shape,scale=beta_scale,loc=beta_loc) fig,ax = plt.subplots() _=ax.scatter(ps_rvs,ps_pdf,color='black',s=1,label='PS Prior') _=ax.scatter(beta_rvs,beta_pdf,color='grey',s=1,label='β Prior') _=ax.set(ylabel='Density',xlabel='λ (failures/yr)') _=ax.grid() _=ax.legend() #_=plt.savefig(fname='prior_dists') ps_std = ss.lognorm.std(s=ps_shape,loc=ps_loc,scale=ps_scale) # std of ps failure rate dist ps_mean = ss.lognorm.mean(s=ps_shape,loc=ps_loc,scale=ps_scale) # expected value of ps failure rate dist beta_std = ss.lognorm.std(s=beta_shape,scale=beta_scale,loc=beta_loc) # std of ps failure rate dist beta_mean = ss.lognorm.mean(s=beta_shape,scale=beta_scale,loc=beta_loc) # expected value of ps failure rate dist ps_mean,beta_mean ###Output _____no_output_____ ###Markdown Likelihood ###Code wt_data = ss.weibull_min.rvs(c=0.61, loc=0, scale=0.5, size=10) # likelihood (observed data eg wind failure data) # fit dist to observed data wt_shape,wt_loc,wt_scale = ss.weibull_min.fit(wt_data,loc=0) wt_rvs = ss.weibull_min.rvs(c=wt_shape,scale=wt_scale, loc=wt_loc,size=1000) wt_sf = ss.weibull_min.sf(wt_rvs,c=wt_shape,loc=0, scale=wt_scale) fig,ax = plt.subplots() #_=ax.scatter(np.sort(wt_data), range(len(wt_data)), lw=1,c='r',s=10,label='Failures') _=ax.plot(np.sort(wt_rvs), np.sort(-wt_sf)*-1,label='Likelihood',c='k') _=ax.set(xlim=[0,3],ylim=[0,1],ylabel='R(t)',xlabel='Time (yrs)') _=ax.grid() _=ax.legend() _=plt.savefig(fname='likelihood_wbl') ###Output _____no_output_____ ###Markdown Bayesian update ###Code with pm.Model() as model: rate = pm.Lognormal('Rate',mu=ps_mean,sd=ps_std) shape = pm.Lognormal('Shape',mu=beta_mean,sd=beta_std) obs = pm.Weibull('obs',alpha=shape,beta=1/rate,observed=wt_data) # beta = scale = 1/rate, alpha = shape trace = pm.sample(10000,live_plot=True) _=pm.traceplot(trace) _=pm.plot_posterior(trace) #12plt.savefig(fname='posteriors') pm.summary(trace) pm.waic(trace,model) pm.find_MAP(model=model) ###Output logp = -474.64, ||grad|| = 0.15484: 100%|█████████████████████████████████████████████| 14/14 [00:00<00:00, 466.22it/s] ###Markdown Updated Reliability ###Code # use updated parameter values to estimate reliability rel_update_rvs = ss.weibull_min.rvs(size=1000,c=0.61,scale=1/1.97,loc=0) # rvs generation with new parameter values upd_shape,upd_scale,upd_loc = ss.weibull_min.fit(rel_update_rvs) # weibull fit upd_sf = ss.weibull_min.sf(rel_update_rvs,upd_shape,upd_scale,upd_loc) prior_rel_rvs = ss.weibull_min.rvs(c=beta_mean,scale=1/ps_mean,loc=0,size=1000) prior_rel_sf = ss.weibull_min.sf(prior_rel_rvs,c=beta_mean,scale=1/ps_mean,loc=0) fig,ax = plt.subplots() _=ax.plot(np.sort(rel_update_rvs), np.sort(-upd_sf)*-1,label='Updated',ls='--',c='k') _=ax.plot(np.sort(prior_rel_rvs),np.sort(-prior_rel_sf)*-1,label='Prior',c='k') _=ax.set(xlim=[0,3],ylim=[0,1],ylabel='R(t)',xlabel='Time (yrs)') _=ax.grid() _=ax.legend() #_=plt.savefig(fname='updated_reliability') ###Output _____no_output_____ ###Markdown WeakAlign demo notebookThis notebook shows how to run a trained model on a given image pair Imports ###Code from __future__ import print_function, division import os from os.path import exists import argparse import torch import torch.nn as nn from torch.utils.data import Dataset, DataLoader from model.cnn_geometric_model import CNNGeometric, TwoStageCNNGeometric from data.pf_dataset import PFDataset from data.download_datasets import download_PF_pascal from image.normalization import NormalizeImageDict, normalize_image from util.torch_util import BatchTensorToVars, str_to_bool from geotnf.transformation import GeometricTnf from geotnf.point_tnf import * import matplotlib.pyplot as plt from skimage import io import warnings from torchvision.transforms import Normalize from collections import OrderedDict import torch.nn.functional as F warnings.filterwarnings('ignore') from model.loss import TransformedGridLoss, WeakInlierCount, TwoStageWeakInlierCount ###Output _____no_output_____ ###Markdown Parameters ###Code # Select one of the following models: # cnngeo_vgg16, cnngeo_resnet101, proposed_resnet101 model_selection = 'proposed_resnet101' model_aff_path = '' model_tps_path = '' model_aff_tps_path = '' if model_selection=='cnngeo_vgg16': model_aff_path = 'trained_models/trained_models/cnngeo_vgg16_affine.pth.tar' model_tps_path = 'trained_models/trained_models/cnngeo_vgg16_tps.pth.tar' feature_extraction_cnn = 'vgg' elif model_selection=='cnngeo_resnet101': model_aff_path = 'trained_models/trained_models/cnngeo_resnet101_affine.pth.tar' model_tps_path = 'trained_models/trained_models/cnngeo_resnet101_tps.pth.tar' feature_extraction_cnn = 'resnet101' elif model_selection=='proposed_resnet101': model_aff_tps_path = 'trained_models/weakalign_resnet101_affine_tps.pth.tar' feature_extraction_cnn = 'resnet101' # source_image_path='datasets/proposal-flow-pascal/PF-dataset-PASCAL/JPEGImages/2008_006325.jpg' # target_image_path='datasets/proposal-flow-pascal/PF-dataset-PASCAL/JPEGImages/2010_004954.jpg' source_image_path='datasets/3.JPEG' target_image_path='datasets/4.JPEG' if not exists(source_image_path): download_PF_pascal('datasets/proposal-flow-pascal/') ###Output _____no_output_____ ###Markdown Load models ###Code use_cuda = torch.cuda.is_available() model = TwoStageCNNGeometric(use_cuda=use_cuda, return_correlation=False, feature_extraction_cnn=feature_extraction_cnn) # load pre-trained model if model_aff_tps_path!='': checkpoint = torch.load(model_aff_tps_path, map_location=lambda storage, loc: storage) checkpoint['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint['state_dict'].items()]) for name, param in model.FeatureExtraction.state_dict().items(): model.FeatureExtraction.state_dict()[name].copy_(checkpoint['state_dict']['FeatureExtraction.' + name]) for name, param in model.FeatureRegression.state_dict().items(): model.FeatureRegression.state_dict()[name].copy_(checkpoint['state_dict']['FeatureRegression.' + name]) for name, param in model.FeatureRegression2.state_dict().items(): model.FeatureRegression2.state_dict()[name].copy_(checkpoint['state_dict']['FeatureRegression2.' + name]) else: checkpoint_aff = torch.load(model_aff_path, map_location=lambda storage, loc: storage) checkpoint_aff['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint_aff['state_dict'].items()]) for name, param in model.FeatureExtraction.state_dict().items(): model.FeatureExtraction.state_dict()[name].copy_(checkpoint_aff['state_dict']['FeatureExtraction.' + name]) for name, param in model.FeatureRegression.state_dict().items(): model.FeatureRegression.state_dict()[name].copy_(checkpoint_aff['state_dict']['FeatureRegression.' + name]) checkpoint_tps = torch.load(model_tps_path, map_location=lambda storage, loc: storage) checkpoint_tps['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint_tps['state_dict'].items()]) for name, param in model.FeatureRegression2.state_dict().items(): model.FeatureRegression2.state_dict()[name].copy_(checkpoint_tps['state_dict']['FeatureRegression.' + name]) ###Output _____no_output_____ ###Markdown Create image transformers ###Code tpsTnf = GeometricTnf(geometric_model='tps', use_cuda=use_cuda) affTnf = GeometricTnf(geometric_model='affine', use_cuda=use_cuda) ###Output _____no_output_____ ###Markdown Load and preprocess images ###Code resizeCNN = GeometricTnf(out_h=240, out_w=240, use_cuda = False) normalizeTnf = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) def preprocess_image(image): # convert to torch Variable image = np.expand_dims(image.transpose((2,0,1)),0) image = torch.Tensor(image.astype(np.float32)/255.0) image_var = Variable(image,requires_grad=False) # Resize image using bilinear sampling with identity affine tnf image_var = resizeCNN(image_var) # Normalize image image_var = normalize_image(image_var) return image_var source_image = io.imread(source_image_path) target_image = io.imread(target_image_path) source_image_var = preprocess_image(source_image) target_image_var = preprocess_image(target_image) if use_cuda: source_image_var = source_image_var.cuda() target_image_var = target_image_var.cuda() batch = {'source_image': source_image_var, 'target_image':target_image_var} resizeTgt = GeometricTnf(out_h=target_image.shape[0], out_w=target_image.shape[1], use_cuda = use_cuda) ###Output _____no_output_____ ###Markdown Evaluate model ###Code model.eval() # Evaluate model theta_aff,theta_aff_tps=model(batch) ###Output _____no_output_____ ###Markdown Compute warped images ###Code def affTpsTnf(source_image, theta_aff, theta_aff_tps, use_cuda=use_cuda): tpstnf = GeometricTnf(geometric_model = 'tps',use_cuda=use_cuda) sampling_grid = tpstnf(image_batch=source_image, theta_batch=theta_aff_tps, return_sampling_grid=True)[1] X = sampling_grid[:,:,:,0].unsqueeze(3) Y = sampling_grid[:,:,:,1].unsqueeze(3) Xp = X*theta_aff[:,0].unsqueeze(1).unsqueeze(2)+Y*theta_aff[:,1].unsqueeze(1).unsqueeze(2)+theta_aff[:,2].unsqueeze(1).unsqueeze(2) Yp = X*theta_aff[:,3].unsqueeze(1).unsqueeze(2)+Y*theta_aff[:,4].unsqueeze(1).unsqueeze(2)+theta_aff[:,5].unsqueeze(1).unsqueeze(2) sg = torch.cat((Xp,Yp),3) warped_image_batch = F.grid_sample(source_image, sg) return warped_image_batch warped_image_aff = affTnf(batch['source_image'],theta_aff.view(-1,2,3)) warped_image_aff_tps = affTpsTnf(batch['source_image'],theta_aff,theta_aff_tps) from image.normalization_omniglot import NormalizeImageDict, normalize_image import cv2 img = cv2.imread('0966_02.png') img.shape def preprocess_image(image): # convert to torch Variable image = np.expand_dims(image.transpose((2,0,1)),0) image = torch.Tensor(image.astype(np.float32)/255.0) image_var = Variable(image,requires_grad=False) # Resize image using bilinear sampling with identity affine tnf image_var = resizeCNN(image_var) # Normalize image image_var = normalize_image(image_var) return image_var x = preprocess_image(img) x.shape ###Output _____no_output_____ ###Markdown Display ###Code # Un-normalize images and convert to numpy warped_image_aff_np = normalize_image(resizeTgt(warped_image_aff),forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy() warped_image_aff_tps_np = normalize_image(resizeTgt(warped_image_aff_tps),forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy() N_subplots = 4 fig, axs = plt.subplots(1,N_subplots) axs[0].imshow(source_image) axs[0].set_title('src') axs[1].imshow(target_image) axs[1].set_title('tgt') axs[2].imshow(warped_image_aff_np) axs[2].set_title('aff') axs[3].imshow(warped_image_aff_tps_np) axs[3].set_title('aff+tps') for i in range(N_subplots): axs[i].axis('off') fig.set_dpi(150) plt.show() ###Output _____no_output_____ ###Markdown SIRAH Candidate Pipeline Demo NotebookThis notebook demostrates how to set up and run the SIRAH candidate pipeline and desribes its basic features by Mi DaiApril 13, 2020 **0. prerequisites** **0.0 Before you start, follow the instructions under README [here](https://github.com/mi-dai/sirahtargetspipeline-to-select-sirah-target) to set up conda enviroment and install other required packages** **0.1 Credentials**The following credentials need to be obtained and set up in order to use all functions of the pipeline- **[alerce]**- **[lasair]** - **[TNS api key]**- **[SDSS CasJobs]** In the directory of this notebook`cp -r credentials_template/ credentials/` Then fill in the credential info for each file. **0.2 local GLADE database**I have converted the downloaded GLADE catalogue into an sqlite3 database for easy query. For now you can grab the compiled database [here](https://www.dropbox.com/s/aib3ze9vaxmknp7/glade_v23.db?dl=0), and put it into a directory named `db/` in the directory of this notebook (Later I will make sure the code that generates the db works so that you can make your own) **0.3 The following cells set up autoreload and import necessary packages for this notebook** ###Code %load_ext autoreload %autoreload 2 import pandas as pd import numpy as np import matplotlib.pyplot as plt import os ###Output _____no_output_____ ###Markdown **0.4 Define the follow environment variables if you haven't done so** ###Code os.environ['SFD_DIR'] = '/home/mi/sfddata-master' #modify this to point to the dust map downloaded from https://github.com/kbarbary/sfddata os.environ['SIRAHPIPE_DIR'] = os.getcwd() #Or your sirah_target_pipe dir if you are running in other directories ###Output _____no_output_____ ###Markdown **1. Using the pipeline** **1.1 import the SIRAHPipe module first** ###Code from pipeline import SIRAHPipe ###Output _____no_output_____ ###Markdown **1.2 initialize the pipeline** This cell defines the pipeline and chooses the brokers, crossmatch calalogues, and the selection cuts **Currently implemented brokers:** [alerce](http://alerce.science/), [lasair](https://lasair.roe.ac.uk/streams/),[tns](https://wis-tns.weizmann.ac.il/) **Crossmatch catalogues:** [sdss](https://skyserver.sdss.org/CasJobs/SubmitJob.aspx),[ned](https://ned.ipac.caltech.edu/forms/nnd.html),[glade](http://glade.elte.hu/) **Selection cuts:**zcut,ztflimcut,hostdistcut,olddetectioncut,magzcut,rbcut (see 1.4 running the pipeline for descriptions on the cuts)For this demo we select all of the above options: ###Code pipe = SIRAHPipe(brokers=['alerce','lasair','tns'],xmatch_catalogues=['sdss','ned','glade'], selection_cuts=['zcut','ztflimcut','hostdistcut','olddetectioncut','magzcut','rbcut']) ###Output Setting up SkyServer... Setting up local db [db/glade_v23.db] Brokers to query: ['alerce', 'lasair', 'tns'] Crossmatch catalogues: ['sdss', 'ned', 'glade'] Cuts to apply: ['zcut', 'ztflimcut', 'hostdistcut', 'olddetectioncut', 'magzcut', 'rbcut'] ###Markdown **1.3 using the realtime mode**Let's record current date and time. The pipeline can run in realtime or non-realtime mode. if `realtime==True`, the sql query includes conditions on the latest magnitude provided by the brokers; if `realtime==False`, the latest magnitudes are calculated offline by querying all the detections before the specific date. This is to mimic realtime query but can be used for an earlier date for testing and comparing. Note that this may not produce the exact results as the real time mode as the broker databases may change. For this demo we set `realtime = True` ###Code from astropy.time import Time from datetime import datetime print(Time(datetime.today()),'mjd=',Time(datetime.today()).mjd) realtime = True ###Output 2020-04-22 18:13:58.102328 mjd= 58961.75970026116 ###Markdown **1.4 running the pipeline** This is the main part of the pipeline that many options can be specified. Here I list some useful ones (and their default values): - **[query options]** - **mjdstart, mjdend:** mjd range to query - **gmax(=20.), rmax(=20.), gmin(=16.), rmin(=16.):** max/min magnitude ranges to query (ZTF specifically, for objects on TNS that are not ZTF the bands are hard code to be g at the moment) - **qlim:** number of objects to query for brokers (this is a universal number set for all brokers but currently not applied to tns) - **skip_ztf(=True):** set to True to skip importing TNS objects that has internal ZTF names (since it's already in the Alerce/Lasair queries) - **use_sherlock(=True):** include Lasair's sherlock classification and crossmatch results in query, default is True. This can be set to False in case sherlock is not available or for testing purpose - **[selection cut options]** - **[zcut] zlow(=0.016), zhigh(=0.08), zerrlim(=0.02):** limit on redshift and redshift error, for specz the zerr is currently set to 0.001. Increase the number to include photoz results - **[magzcut] dmag_min(=1.), dmag_max(=4.5), magabs=(-19.1):** cut on the mag difference from a given absolute magnitude (peak mag for example) - this can be translated as phase cut if the sedmodel is known (see mag/z plot below). - **[rbcut] rb(=0.5):** real/bogus score for ZTF objects (higher rb value is more likely to be real) - **[hostdistcut] dist_min(=2.), dist_max(=None):** distance to host (in arcsec) - **[ztflimcut] maglim(=19.8), nobs(=1):** set mag lim for objects that has <= nobs detections. This is used to cut on single detections closer to the (ZTF) limiting mag - **[olddetectioncut] dayslim(=5), fromday(=None):** Remove objects whose latest detection is > *dayslim* days from *fromday* (These are potentially too old)- **[other options]** - **[magz plot options]** - **sedmodel=('salt2'):** sedmodel to generate the mag vs z lines. This can be any sncosmo model listed [here](https://sncosmo.readthedocs.io/en/v2.1.x/source-list.html) - **sedmodel_pardict=(None):** dictionary of parameters to set for the sedmodel`pipe.run()` **runs the pipeline and applies the selection cuts defined in 1.2 initialize the pipeline, and makes a mag/z plot for the objects that passed all cuts. This may take a while (a few minutes to about less than an hour) depending on how large the query is and how many objects to be crossmatched.** ###Code import time today = Time(datetime.today()).mjd start = time.time() pipe.run(mjdstart=today-10,mjdend=today,qlim=100,zerrlim=0.01,dmag_max=4.5,gmax=22,rmax=22,realtime=realtime,skip_ztf=True,use_sherlock=True) # pipe.run(mjdstart=today-10,mjdend=today,qlim=100,zerrlim=0.01,dmag_max=4.5,gmax=22,rmax=22,realtime=realtime, # skip_ztf=True,use_sherlock=True,sedmodel='s11-2005hl',magabs=-18) end = time.time() print("Time used: {:.2f} mins".format((end - start)/60.)) ###Output queryresult size: 100 queryresult size: 100 queryresult size: 166 Table [sncoor] uploaded successfully. Query Job is submitted. JobID=47354091 Job 47354091 is finished Cross-matching GLADE catalog using astropy module search_around_sky Done. Time=0.021483612060546876 minutes Cross match NED for 0.01 < sdss_photoz < 0.08, num = 2 Table [sncoor] uploaded successfully. Query Job is submitted. JobID=47354099 Job 47354099 is finished Cross-matching GLADE catalog using astropy module search_around_sky Done. Time=0.023793903986612956 minutes Selecting 0.016 < z < 0.080 and zerr < 0.010 Number fails cut [flag_zcut]: 779/912 Cut on magnitude lim for nobs <= 1: maglim = 19.8 Number fails cut [flag_ztflimcut]: 14/912 Cut on distance to host: 2 < dist < None (Arcsec) Number fails cut [flag_hostdistcut]: 113/912 Cut on days since last detection from 2020-04-22 18:16:02.215: dt < 5 Number fails cut [flag_olddetectioncut]: 459/912 ###Markdown **1.5 Miscellaneous features**Here I describe some miscellaneous features that may be useful in analyzing the query results 1.5.1 *SIRAHPipe.results*After the pipeline runs, all the query results, including the selection cut flags, are saved in `SIRAHPipe.results` ###Code pipe.results.head() ###Output _____no_output_____ ###Markdown 1.5.2 the *SIRAHPipe.MakeCuts* module- **(re)applying cuts using** ***SIRAHPipe.MakeCuts.[Cutname]*** The `SIRAHPipe.MakeCuts` module contains all the selection cut functions that can be reapplied after the pipeline runs to change the selection criteria. e.g. We can reapply the redshift cut with narrower `zerrlim=0.005` or apply new cuts that are not selected in the initialization phase - **make mag/z plot using** ***SIRAHPipe.MakeCuts.plot()***- **return a pandas DataFrame for objects that passed all cuts using** ***SIRAHPipe.MakeCuts.aftercuts()*** Note that `SIRAHPipe.MakeCuts.aftercuts()` returns all entries passing the cuts so there can be multiple entries for the same object. You may use `pd.DataFrame.sort_values()` and `pd.DataFrame.drop_duplicates()` to select unique results or define your own ranking ###Code ## apply additional cut here pipe.MakeCuts.zcut(zerrlim=0.005) # pipe.MakeCuts.olddetectioncut() pipe.MakeCuts.magzcut(dmag_max=3.,dmag_min=0.,magabs=-18) # pipe.MakeCuts.ztflimcut() pipe.MakeCuts.plot(magabs=-18,magz_plot=True,sedmodel='snana-2004fe',phase=[-10,-5,0]) # pipe.MakeCuts.plot(magabs=-19,magz_plot=True) plt.show() df = pipe.MakeCuts.aftercuts() cols = ['xmatch_rank','xmatch_db','distance'] sort_cols = [x for x in cols if x in df.columns] df = df.sort_values(sort_cols).drop_duplicates('oid') cols = ['oid','nobs','firstmjd','lastmjd','gmaglatest','rmaglatest','classearly','classification','distance','separationArcsec','dmag_g','dmag_r', 'z','zerr','xmatch_rank','objID','xmatch_objid','xmatch_table_name'] cols_exist = [x for x in cols if x in df.columns] df[cols_exist].head() ###Output Selecting 0.016 < z < 0.080 and zerr < 0.005 Selecting candidates based on mag vs z: 0.00 < dmag (from max) < 3.00 Plotting MakeCuts... ###Markdown **2. Making plots for selected candidates** The main function for making light curve and phase estimate plots is *utils.gen_plots* `gen_plots()` requires a pandas DataFrame as input that comes from running the pipeline or a self-defined `pd.DataFrame` that has the required columns as the pipeline results - set **interactive = True** to make interactive plots in jupyter notebook (this doesn't seem to work with jupyter lab) if interactive=True, the image will be an interactive Aladin widget; otherwise the image will be retrieved from the PS1 image server- set **savepdf = True** to out pdfs currently the pdfs are plotted in the order of decreasing phase estimation**Here are some useful *gen_plots* options:** - **magabs(=-19.1):** For Ia, - **extra_lc(=False):** set to *True* is extra photometry is available. The photometry need to be placed in `data/extra_photometry` as `[objectname].txt`- **update_lc_prediction(=False):** replot the lc prediction- **last_detection_max(=5):** don't include objects with last detection that is >5 days old- **source(='ztf'):** provide correct photometry format ('ztf' or 'tns')- **broker(='alerce'):** for ztf objects only, broker to query for light curve points ('alerce' or 'lasair')- **plot_ylim(=(21,15)):** ylims for the light curve and mag/z plots- **ps1_image_size(=320):** image size for PS1 images (in pixels), actual image size is 0.25 arcsec/pixel ###Code from utils import * import matplotlib.pyplot as plt import os from PyPDF2 import PdfFileMerger # %matplotlib inline interactive = False savepdf = True querydate = date.today() if not os.path.isdir('demo'): os.mkdir('demo') if savepdf: pdf_file = 'demo/Candidates_{}.pdf'.format(querydate.strftime("%m-%d-%Y")) folder = 'demo/{}'.format(querydate.strftime("%m-%d-%Y")) if not os.path.isdir(folder): os.mkdir(folder) pdflist = [] orderlist = [] # display(target[target.oid=='ZTF20aamfpft']) for i,row in df[0:2].iterrows(): if savepdf: f = '{}/{}.pdf'.format(folder,row.oid) else: f = None source = 'tns' if row['Broker'] == 'tns' else 'ztf' res = gen_plots(row,interactive=interactive,pdf_file=f,source=source,plot_ylim=(22,15),broker='lasair', sedmodel='salt2',magabs=-19.1) if savepdf and not res['too_old']: pdflist.append(f) orderlist.append(res['phase_tuesday']) if savepdf: idx_ordered = np.argsort(orderlist) merger = PdfFileMerger() for pdf in np.array(pdflist)[idx_ordered]: merger.append(pdf) merger.write(pdf_file) merger.close() ###Output ZTF20aaurfhs: z=0.0352 +/- 0.0010 ra = 17:01:22.507 dec = +20:18:58.35 mwebv=0.058 ###Markdown NDBC Data Buoy Class DemoSome simple examples of use cases for the NDBC DataBuoy class Import Class and Printer ###Code from NDBC.NDBC import DataBuoy import pprint printer = pprint.PrettyPrinter(indent=4) ###Output _____no_output_____ ###Markdown Instantiate Buoy Object - Get Station Metadata---This data is scraped from the station website. In order to ensure no data is lost, all the data scraped is retained in original string format ###Code n42 = DataBuoy(46042) n42.get_station_metadata() printer.pprint(n42.station_info) ###Output { 'Air temp height': '3.7 m above site elevation', 'Anemometer height': '4.1 m above site elevation', 'Barometer elevation': '2.7 m above mean sea level', 'Sea temp depth': '1.5 m below water line', 'Site elevation': 'sea level', 'Watch circle radius': '1793 yards', 'Water depth': '1645.9 m', 'lat': '36.785 N', 'lon': '122.398 W'} ###Markdown Get Most Recent Monthly Standard Meteorology Data---Standard meteorological data is fetched from QC'd .txt files. Data is loaded into a pandas DataFrame and metadata associated with the file (headers) is stored as the measured units. Data can also be accessed using dot notation for known data packages (new v.1.1.0)To faciliate rapid iterations, simple getter methods have been added to allow dot notation access.To see a list of known data packages that can be downloaded by the DataBuoy class, simply review the DATA_PACKAGES constant ###Code n42.get_data() # printer.pprint(n42.data) # Using @property function for dot notation printer.pprint(n42.stdmet) # Reviewing known data packages printer.pprint(n42.DATA_PACKAGES) ###Output Apr 2021 not available. Mar 2021 not available. ###Markdown Get Specific time Period---The `get_data()` method supports providing lists of months (for current 12 month period) and years. Additionally you can specify the data package (_see above output_) as well as whether or not to use the datetime values as the index of the dataframe. ###Code n42.get_data(months=[1,2], data_type='swden', datetime_index=True) printer.pprint(n42.swden) ###Output .0200 .0325 .0375 .0425 .0475 .0525 .0575 .0625 \ 2021-01-01 00:40:00 0.0 0.0 0.0 0.00 1.17 9.11 24.25 24.95 2021-01-01 01:40:00 0.0 0.0 0.0 0.00 0.00 13.76 26.55 22.40 2021-01-01 02:40:00 0.0 0.0 0.0 0.00 0.93 4.40 16.03 33.95 2021-01-01 03:40:00 0.0 0.0 0.0 0.07 1.14 6.95 27.94 45.68 2021-01-01 04:40:00 0.0 0.0 0.0 0.00 0.76 3.64 11.23 18.23 ... ... ... ... ... ... ... ... ... 2021-02-28 19:40:00 0.0 0.0 0.0 0.00 0.00 0.00 0.06 0.25 2021-02-28 20:40:00 0.0 0.0 0.0 0.02 0.05 0.08 0.24 1.02 2021-02-28 21:40:00 0.0 0.0 0.0 0.00 0.00 0.15 0.30 0.36 2021-02-28 22:40:00 0.0 0.0 0.0 0.00 0.01 0.09 0.10 0.32 2021-02-28 23:40:00 0.0 0.0 0.0 0.00 0.00 0.00 0.18 0.25 .0675 .0725 ... .3300 .3400 .3500 .3650 .3850 \ 2021-01-01 00:40:00 15.84 20.44 ... 0.20 0.13 0.07 0.06 0.05 2021-01-01 01:40:00 24.12 30.09 ... 0.16 0.06 0.16 0.06 0.03 2021-01-01 02:40:00 41.48 38.02 ... 0.14 0.14 0.10 0.07 0.05 2021-01-01 03:40:00 41.92 30.11 ... 0.09 0.08 0.15 0.09 0.04 2021-01-01 04:40:00 29.84 27.19 ... 0.16 0.08 0.08 0.07 0.08 ... ... ... ... ... ... ... ... ... 2021-02-28 19:40:00 1.42 2.50 ... 0.08 0.10 0.04 0.02 0.02 2021-02-28 20:40:00 3.97 4.97 ... 0.06 0.05 0.10 0.04 0.03 2021-02-28 21:40:00 1.63 4.18 ... 0.08 0.04 0.05 0.06 0.02 2021-02-28 22:40:00 2.84 3.82 ... 0.05 0.05 0.08 0.06 0.03 2021-02-28 23:40:00 1.78 3.97 ... 0.08 0.08 0.06 0.03 0.02 .4050 .4250 .4450 .4650 .4850 2021-01-01 00:40:00 0.03 0.01 0.01 0.00 0.0 2021-01-01 01:40:00 0.05 0.02 0.01 0.00 0.0 2021-01-01 02:40:00 0.03 0.02 0.01 0.00 0.0 2021-01-01 03:40:00 0.02 0.01 0.00 0.01 0.0 2021-01-01 04:40:00 0.05 0.01 0.01 0.00 0.0 ... ... ... ... ... ... 2021-02-28 19:40:00 0.03 0.01 0.00 0.00 0.0 2021-02-28 20:40:00 0.01 0.01 0.00 0.00 0.0 2021-02-28 21:40:00 0.01 0.01 0.00 0.00 0.0 2021-02-28 22:40:00 0.01 0.01 0.00 0.00 0.0 2021-02-28 23:40:00 0.01 0.01 0.00 0.00 0.0 [4239 rows x 47 columns] ###Markdown Usage demo of *2D Lookup Table Generator*Language: Python3Author: Neilay KhasnabishDate: 17/10/2021 1. Use main script ###Code ''' 2D Lookup Table Generator Author : Neilay Khasnabish Date : 17/10/2021 Description : This script analyzes data, generates 2D lookup table, and compare outputs. This script generates 2D lookup table which is compatible with Simulink 2D lookup table ''' # Python libraries used import pandas as pd import numpy as np import matplotlib.pyplot as plt from matplotlib import cm from matplotlib.ticker import LinearLocator import seaborn from mpl_toolkits.mplot3d import Axes3D from scipy.interpolate import griddata from scipy.interpolate import LinearNDInterpolator from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import AdaBoostRegressor # Lookup table generator class class lookup_table_generator : def __init__(self, dataMeasurementIP, dataMeasurementOP, tableAxesValues, dimensionTable = 2) : self.dimensionTable = dimensionTable self.dataMeasurementIP = dataMeasurementIP self.dataMeasurementOP = dataMeasurementOP self.tableAxesValues = tableAxesValues def checkData(self) : if self.dimensionTable != 2 : print('Error: Dimention of lookup table must be 2') else : dataMeasurementIP = self.dataMeasurementIP dataMeasurementOP = self.dataMeasurementOP tableAxesValues = self.tableAxesValues if dataMeasurementIP.isnull().values.any() or dataMeasurementOP.isnull().values.any() : print('Warning : Measurement data had NULL or undefined values') dataMeasurementIP = dataMeasurementIP.interpolate() dataMeasurementOP = dataMeasurementOP.interpolate() else : # Pandas to numpy array inputData = dataMeasurementIP.to_numpy() columnName = dataMeasurementIP.columns axesVal = tableAxesValues.to_numpy() outputData = dataMeasurementOP.to_numpy() # Plausibility check [rI, cI] = np.shape(inputData) [rO, cO] = np.shape(outputData) [rV, cV] = np.shape(axesVal) if cI == cV : if self.dimensionTable == cV : # Check if data is sparse seaborn.set(style='whitegrid') columnNames = dataMeasurementIP.columns seaborn.scatterplot(x=columnNames[0], y=columnNames[1], data=dataMeasurementIP) plt.title('Quality of data is good if the complete grid is covered by the data points') else : print('Error : Dimension of look up table must be same as columns of input data') else : print('Error : Number of axes of lookup table must be same as columns of input data') def gridInterpolator(self, method = 'nearest') : if self.dimensionTable != 2 : print('Error: Dimention of lookup table must be 2') else : dataMeasurementIP = self.dataMeasurementIP dataMeasurementOP = self.dataMeasurementOP tableAxesValues = self.tableAxesValues if dataMeasurementIP.isnull().values.any() or dataMeasurementOP.isnull().values.any() : print('Warning : Measurement data had NULL or undefined values') dataMeasurementIP = dataMeasurementIP.interpolate() dataMeasurementOP = dataMeasurementOP.interpolate() else : # Pandas to numpy array inputData = dataMeasurementIP.to_numpy() columnName = dataMeasurementIP.columns axesVal = tableAxesValues.to_numpy() outputData = dataMeasurementOP.to_numpy() # Plausibility check [rI, cI] = np.shape(inputData) [rO, cO] = np.shape(outputData) [rV, cV] = np.shape(axesVal) if cI == cV : if self.dimensionTable == cV : xAxis = axesVal[:,0] # Row : Input 1 yAxis = axesVal[:,1] # Column : Input 2 # 2D interpolation : Griddata grid_x, grid_y = np.meshgrid(xAxis, yAxis) grid_z0 = griddata(inputData, outputData, (grid_x, grid_y), method=method) reshapedOP = grid_z0.reshape(int(len(xAxis)), int(len(yAxis))) reshapedOP = np.transpose(reshapedOP) # Calculating the ouputs from look up table input1 = inputData[:,0] # Row input2 = inputData[:,1] # Col opCalc = [] for i in range(len(input1)) : # Input1 and Input2 length are same dummyErr = 10000000000000000000 pos1 = 0 # Find the nearest match of input 1 for j1 in range(len(xAxis)) : temp = np.abs(input1[i] - xAxis[j1]) if temp < dummyErr : pos1 = j1 dummyErr = temp dummyErr = 10000000000000000000 pos2 = 0 # Find the nearest match of input 2 for j2 in range(len(yAxis)) : temp = np.abs(input2[i] - yAxis[j2]) if temp < dummyErr : pos2 = j2 dummyErr = temp # Nearest neighbour interpolation opCalc.append(reshapedOP[int(pos1), int(pos2)]) maeErr = np.mean(np.abs(np.concatenate(outputData) - opCalc)) maeErr = "{:.2f}".format(maeErr) # Surfaceplot fig = plt.figure() ax = fig.gca(projection='3d') surf = ax.plot_surface(grid_x, grid_y, reshapedOP, cmap=cm.coolwarm, linewidth=0, antialiased=False) plt.xlabel(columnName[0]) plt.ylabel(columnName[1]) fig.colorbar(surf, shrink=0.8, aspect=5) strTitle = ' '.join([str(method), ' | Mean Absolute Error: ', str(maeErr), ' unit']) plt.title(strTitle) plt.show() # Series plot plt.figure() plt.plot(outputData, 'g') plt.plot(opCalc, 'r') plt.xlabel('Samples') plt.ylabel('Magnitude') plt.legend('Measurement data', 'Output of lookup table') strTitle = ' '.join(['Validation by Nearest Neighbor Interp.', ' | MAE: ', str(maeErr), ' unit']) plt.title(strTitle) plt.show() # Store result to text file fileNew = open("gridDataLookUpTable.txt","w") fileNew.write('Input1 : \n') np.savetxt(fileNew, (np.transpose(xAxis)), fmt="%f") fileNew.write('\nInput2 : \n') np.savetxt(fileNew, (np.transpose(yAxis)), fmt="%f") fileNew.write('\nGenerated lookup table : \n') fileNew.write('[') np.savetxt(fileNew, (reshapedOP), fmt="%f") fileNew.write(']') fileNew.close() else : print('Error : Dimension of lookup table must be same as columns of input data') else : print('Error : Number of axes of lookup table must be same as columns of input data') def linearInterpolator(self) : if self.dimensionTable != 2 : print('Error: Dimention of lookup table must be 2') else : dataMeasurementIP = self.dataMeasurementIP dataMeasurementOP = self.dataMeasurementOP tableAxesValues = self.tableAxesValues if dataMeasurementIP.isnull().values.any() or dataMeasurementOP.isnull().values.any() : print('Warning : Measurement data had NULL or undefined values') dataMeasurementIP = dataMeasurementIP.interpolate() dataMeasurementOP = dataMeasurementOP.interpolate() else : # Pandas to numpy array inputData = dataMeasurementIP.to_numpy() columnName = dataMeasurementIP.columns axesVal = tableAxesValues.to_numpy() outputData = dataMeasurementOP.to_numpy() # Plausibility check [rI, cI] = np.shape(inputData) [rO, cO] = np.shape(outputData) [rV, cV] = np.shape(axesVal) if cI == cV : if self.dimensionTable == cV : xAxis = axesVal[:,0] # Row : Input 1 yAxis = axesVal[:,1] # Column : Input 2 # 2D interpolation grid_x, grid_y = np.meshgrid(xAxis, yAxis) interp = LinearNDInterpolator(list(zip(inputData[:,0], inputData[:,1])), outputData) grid_z0 = interp(grid_x, grid_y) reshapedOP = grid_z0.reshape(int(len(xAxis)), int(len(yAxis))) reshapedOP = np.transpose(reshapedOP) # Calculating the ouputs from look up table input1 = inputData[:,0] # Row input2 = inputData[:,1] # Col opCalc = [] for i in range(len(input1)) : # Input1 and Input2 length are same dummyErr = 10000000000000000000 pos1 = 0 # Find the nearest match of input 1 for j1 in range(len(xAxis)) : temp = np.abs(input1[i] - xAxis[j1]) if temp < dummyErr : pos1 = j1 dummyErr = temp dummyErr = 10000000000000000000 pos2 = 0 # Find the nearest match of input 2 for j2 in range(len(yAxis)) : temp = np.abs(input2[i] - yAxis[j2]) if temp < dummyErr : pos2 = j2 dummyErr = temp # Nearest neighbour interpolation opCalc.append(reshapedOP[int(pos1), int(pos2)]) maeErr = np.mean(np.abs(np.concatenate(outputData) - opCalc)) maeErr = "{:.2f}".format(maeErr) # Surfaceplot fig = plt.figure() ax = fig.gca(projection='3d') surf = ax.plot_surface(grid_x, grid_y, reshapedOP, cmap=cm.coolwarm, linewidth=0, antialiased=False) plt.xlabel(columnName[0]) plt.ylabel(columnName[1]) fig.colorbar(surf, shrink=0.8, aspect=5) strTitle = ' '.join([str('Linear Interp.'), ' | Mean Absolute Error: ', str(maeErr), ' unit']) plt.title(strTitle) plt.show() # Series plot plt.figure() plt.plot(outputData, 'g') plt.plot(opCalc, 'r') plt.xlabel('Samples') plt.ylabel('Magnitude') plt.legend('Measurement data', 'Output of lookup table') strTitle = ' '.join(['Validation by Nearest Neighbor Interp.', ' | MAE: ', str(maeErr), ' unit']) plt.title(strTitle) plt.show() # Store result to text file fileNew = open("linearLookUpTable.txt","w") fileNew.write('Input1 : \n') np.savetxt(fileNew, (np.transpose(xAxis)), fmt="%f") fileNew.write('\nInput2 : \n') np.savetxt(fileNew, (np.transpose(yAxis)), fmt="%f") fileNew.write('\nGenerated lookup table : \n') fileNew.write('[') np.savetxt(fileNew, (reshapedOP), fmt="%f") fileNew.write(']') fileNew.close() else : print('Error : Dimension of lookup table must be same as columns of input data') else : print('Error : Number of axes of lookup table must be same as columns of input data') def dtInterpolator(self, treeDepth = 10, estimatorSize = 500, learnRate = 0.001) : if self.dimensionTable != 2 : print('Error: Dimention of lookup table must be 2') else : dataMeasurementIP = self.dataMeasurementIP dataMeasurementOP = self.dataMeasurementOP tableAxesValues = self.tableAxesValues if dataMeasurementIP.isnull().values.any() or dataMeasurementOP.isnull().values.any() : print('Warning : Measurement data had NULL or undefined values') dataMeasurementIP = dataMeasurementIP.interpolate() dataMeasurementOP = dataMeasurementOP.interpolate() else : # Pandas to numpy array inputData = dataMeasurementIP.to_numpy() columnName = dataMeasurementIP.columns axesVal = tableAxesValues.to_numpy() outputData = dataMeasurementOP.to_numpy() # Plausibility check [rI, cI] = np.shape(inputData) [rO, cO] = np.shape(outputData) [rV, cV] = np.shape(axesVal) if cI == cV : if self.dimensionTable == cV : xAxis = axesVal[:,0] # Row : Input 1 yAxis = axesVal[:,1] # Column : Input 2 grid_x, grid_y = np.meshgrid(xAxis, yAxis) # Train Decision tree print('Training started') mdl = DecisionTreeRegressor(max_depth=treeDepth) regrMdl = AdaBoostRegressor(base_estimator=mdl, n_estimators=estimatorSize, learning_rate=learnRate) regrMdl.fit(inputData, np.ravel(outputData)) # Create table reshapedOP = np.zeros((int(len(xAxis)), int(len(yAxis)))) print('Generation started') for i1 in range(len(xAxis)) : for j1 in range(len(xAxis)) : Ip = np.array([xAxis[i1], yAxis[j1]]) Ip = Ip.reshape(1,2) reshapedOP[i1, j1] = regrMdl.predict(Ip) print('All done') # Calculating the ouputs from look up table input1 = inputData[:,0] # Row input2 = inputData[:,1] # Col opCalc = [] for i in range(len(input1)) : # Input1 and Input2 length are same dummyErr = 10000000000000000000 pos1 = 0 # Find the nearest match of xAxis for j1 in range(len(xAxis)) : temp = np.abs(input1[i] - xAxis[j1]) if temp < dummyErr : pos1 = j1 dummyErr = temp dummyErr = 10000000000000000000 pos2 = 0 # Find the nearest match of yAxis for j2 in range(len(yAxis)) : temp = np.abs(input2[i] - yAxis[j2]) if temp < dummyErr : pos2 = j2 dummyErr = temp # Nearest neighbour interpolation opCalc.append(reshapedOP[int(pos1), int(pos2)]) maeErr = np.mean(np.abs(np.concatenate(outputData) - opCalc)) maeErr = "{:.2f}".format(maeErr) # Surfaceplot fig = plt.figure() ax = fig.gca(projection='3d') surf = ax.plot_surface(grid_x, grid_y, reshapedOP, cmap=cm.coolwarm, linewidth=0, antialiased=False) plt.xlabel(columnName[0]) plt.ylabel(columnName[1]) fig.colorbar(surf, shrink=0.8, aspect=5) strTitle = ' '.join([str('Decision Tree Interp.'), ' | Mean Absolute Error: ', str(maeErr), ' unit']) plt.title(strTitle) plt.show() # Series plot plt.figure() plt.plot(outputData, 'g') plt.plot(opCalc, 'r') plt.xlabel('Samples') plt.ylabel('Magnitude') plt.legend('Measurement data', 'Output of lookup table') strTitle = ' '.join(['Validation by Nearest Neighbor Interp.', ' | MAE: ', str(maeErr), ' unit']) plt.title(strTitle) plt.show() # Store result to text file fileNew = open("decisionTreeLookUpTable.txt","w") fileNew.write('Input1 : \n') np.savetxt(fileNew, (np.transpose(xAxis)), fmt="%f") fileNew.write('\nInput2 : \n') np.savetxt(fileNew, (np.transpose(yAxis)), fmt="%f") fileNew.write('\nGenerated lookup table : \n') fileNew.write('[') np.savetxt(fileNew, (reshapedOP), fmt="%f") fileNew.write(']') fileNew.close() else : print('Error : Dimension of lookup table must be same as columns of input data') else : print('Error : Number of axes of lookup table must be same as columns of input data') ###Output _____no_output_____ ###Markdown 2. Read data from *data.xlsx* ###Code fileAddr = 'https://raw.githubusercontent.com/neilay-khasnabish/2D_Look_Up_Table_Generator/main/Data.xlsx' # Format of measurement input data dataMeasurementIP = pd.read_excel(fileAddr, 'ActualDataInput') # Format of measurement output data dataMeasurementOP = pd.read_excel(fileAddr, 'ActualDataOutput') # Format of Input1 and Input2 axes values tableAxesValues = pd.read_excel(fileAddr, 'LookupTableAxes') ###Output _____no_output_____ ###Markdown Please have a look at the *Data.xlsx* file attached to the Github repo to understand the format of inputs 3. Create class ###Code lookup_table_class = lookup_table_generator(dataMeasurementIP, dataMeasurementOP, tableAxesValues) ###Output _____no_output_____ ###Markdown 4. Check quality of data ###Code lookup_table_class.checkData() ###Output _____no_output_____ ###Markdown **Explanation of the above image**: If the data points are spread over the entire grid, the quality of data is good. If the data is spread diagonally or does not cover the entire range, the quality of data is not good to create the 2D lookup table. The above figure is an example of good data. The blue dots are the data points. 5. Generate lookup table with different algorithms ###Code # Grid based method : 'nearest', 'linear', 'cubic' lookup_table_class.gridInterpolator(method = 'nearest') ###Output _____no_output_____ ###Markdown **Explanation of the above image**: Nearest neighbour approach fits the look up table. The MAE (Mean Absolute Error) tells the quality of the fit. ###Code # Grid based method : 'nearest', 'linear', 'cubic' lookup_table_class.gridInterpolator(method = 'cubic') ###Output /usr/local/lib/python2.7/dist-packages/matplotlib/colors.py:504: RuntimeWarning: invalid value encountered in less xa[xa < 0] = -1 ###Markdown **Explanation of the above image**: Cubic approach does not fit the look up table. The MAE (Mean Absolute Error) tells the quality of the fit. ###Code # The arguments are: depth of the tree, number of estimators, and learning rate # The arguments are the tuning parameters of the Decision Tree Regressor # These tuning parameters can be adjusted to avoid overfit and underfit lookup_table_class.dtInterpolator(50, 250, 0.001) ###Output Training started Generation started All done ###Markdown **Explanation of the above image**: Decision tree based approach fits the look up table. The MAE (Mean Absolute Error) tells the quality of the fit. 6. Other ways to give inputs ###Code # Input and output measurement data can be read as pandas dataframe # Format of measurement input data read in terms of pandas dataframe from Excel or CSV # dataMeasurementIP = pd.read_excel(fileName, 'ActualDataInput') # Format of measurement output data read in terms of pandas dataframe from Excel or CSV # dataMeasurementOP = pd.read_excel(fileName, 'ActualDataOutput') # Format of Input1 and Input2 axes values read in terms of pandas dataframe from Excel or CSV tableAxesValues = pd.read_excel(fileAddr, 'LookupTableAxes') # Another way to generate the Input1 and Input2 axes values (points where interpolation to be made) Input1 = np.linspace(0, 1600, num=100) Input2 = np.linspace(0, 100, num=100) # Length of Input1 and Input2 must be same for Grid based interpolation print(len(Input1), len(Input2)) # Create pandas dataframe data = {'Input1' : Input1, 'Input2' : Input2} tableAxesValues = pd.DataFrame(data = data) print(tableAxesValues.head()) lookup_table_class = lookup_table_generator(dataMeasurementIP, dataMeasurementOP, tableAxesValues) ###Output (100, 100) Input1 Input2 0 0.000000 0.000000 1 16.161616 1.010101 2 32.323232 2.020202 3 48.484848 3.030303 4 64.646465 4.040404 ###Markdown Let us check the interpolation with the Neirest Neighbor approach ###Code lookup_table_class.gridInterpolator(method = 'nearest') ###Output _____no_output_____ ###Markdown **Explanation of the above image**: With this new axis points, we can see difference with the previusly generated graph using the same algorithm. The MAE (Mean Absolute Error) is different. ###Code lookup_table_class.gridInterpolator(method = 'cubic') ###Output _____no_output_____ ###Markdown ISO 281 Bearing Life Calculation Example___ Calculate L10 life (rolling contact fatigue) using ISO 281 method - Fatigue Life estimation Standard Libraries ###Code import numpy as np import pandas as pd import matplotlib.pyplot as plt import math import glob import os import scipy.stats as ss # fonts and warnings pd.options.mode.chained_assignment = None # off with setwithcopy plt.rcParams["font.family"] = "Times New Roman" font = {'weight' : 'bold', 'size' : 14} import matplotlib matplotlib.rc('font', **font) ###Output _____no_output_____ ###Markdown Classes module ###Code import classes_funcs as cf ###Output _____no_output_____ ###Markdown ___ Program Run Choose file locations for loading data and column headers ###Code file_location_fld = r' file_location_ebb = r cols = 'Time','PS deg','Fxy','Fz','My' ###Output _____no_output_____ ###Markdown Bearing design parameters ###Code brg_design_parameter_desc = pd.DataFrame({'Parameter':['i','z','dw','alpha','dp','Pu','kind','H'], 'Unit':['-','-','mm','degrees','mm','N','-','mm'],'Description':['Number of rows','Number of elements in a row','Individual brg diameter', 'Contact angle','Pitch diameter','Fatigue limit','Type', 'Height of inidividual unit']}) brg_design_parameter_desc ###Output _____no_output_____ ###Markdown Bearing designs ###Code skf_8110 = cf.brg_design(i=1,z=50,dw=77.5,alpha=60,D=1465,H=75,d=1270,pu=620000,kind='roller',ca_manuf=3190000) # create skf BT8110 bearing object skf_e168 = cf.brg_design(i=1,z=40,dw=77.5,alpha=60,D=1270,H=67,d=1016,pu=465000,kind='roller',ca_manuf=2750000) # create skf EE168400 bearing object brgs = skf_e168 ###Output _____no_output_____ ###Markdown Combine ebb and flood load cases with duty cycles (time spent at each load case) ###Code fld_load_cases = cf.load_case_comb(file_location_fld,cols,brg_p=brgs.p,brg_dp=brgs.dp()) # flood load cases for each brg ebb_load_cases = cf.load_case_comb(file_location_ebb,cols,brg_p=brgs.p,brg_dp=brgs.dp()) # ebb load cases for each brg duty_cycles = [0.1224,0.1672,0.1672,0.1582,0.1582,0.1194,0.0806,0.0179,0.009,0.2189,0.2139,0.1244,0.1592,0.1194,0.0796,0.0299,0.0547] t_profiles = cf.tidal_profile_comb(duty_cycles,[fld_load_cases.lc_df(),ebb_load_cases.lc_df()],brgs.p,brgs.Ca_rot(),brgs.z) ###Output _____no_output_____ ###Markdown Fatigue life ###Code lifes = cf.life_calcs(t_profiles.Ca_osc(),t_profiles.dyn_equiv_osc(),'roller',brgs.dp(),'high cleanliness',brgs.pu,rel_level=90,use_ISO_correction='Yes') l10_osc = lifes.L10_mill_osc() l10_hr = lifes.L10_hrs(t_profiles.N_opm_ave()) # correct l10 values for intermittent operation (ps does not operate continuously) T_op = 0.22 # percentage LISO_osc = l10_osc/T_op LISO_hr = l10_hr/T_op LISO_hr ###Output _____no_output_____ ###Markdown Reliability ###Code # reliability functions plotted using estimated beta and calculate LISO values as parameters of Weibull distribution fig,(ax,ax2)=plt.subplots(1,2,figsize=(14,4)) wbl_params_bt1 = pd.DataFrame({'Beta':[0.8,1,1.2],'Eta':[734585,418515,287624]}) time = np.linspace(1,1500000,1000) labels_bt = 'β = 0.8, η = 734,585','β = 1, η = 418,515','β = 1.2, η = 287,624' ls = '-','--',':' for i in range(len(wbl_params_bt1)): ax.plot(time,ss.weibull_min.sf(time,c=wbl_params_bt1.iloc[i,0],scale=wbl_params_bt1.iloc[i,1],loc=0),label=labels_bt[i],c='k',ls=ls[i]) _=ax.set(xlabel='Time (hrs)',ylabel='R(t)',title='BT1') _=ax.grid() _=ax.legend() wbl_params_ee1 = pd.DataFrame({'Beta':[0.8,1,1.2],'Eta':[274193,156216,107359]}) time = np.linspace(1,500000,1000) labels_ee = 'β = 0.8, η = 274,193','β = 1, η = 156,216','β = 1.2, η = 107,359' for i in range(len(wbl_params_ee1)): ax2.plot(time,ss.weibull_min.sf(time,c=wbl_params_ee1.iloc[i,0],scale=wbl_params_ee1.iloc[i,1],loc=0),label=labels_ee[i],c='k',ls=ls[i]) _=ax2.set(xlabel='Time (hrs)',ylabel='R(t)',title='EE168') _=ax2.grid() _=ax2.legend() #_=plt.savefig(fname='pof_rel_func') bt1_df = pd.DataFrame({'Time':time,'R(t)':ss.weibull_min.sf(time,c=wbl_params_bt1.iloc[i,0],scale=wbl_params_bt1.iloc[i,1],loc=0)}) bt1_df[bt1_df['R(t)']>=0.9] ###Output _____no_output_____ ###Markdown Plots___ ###Code fld_data = fld_load_cases.load_data()[0] fig,ax = plt.subplots(figsize=(14,4)) _=ax.plot(range(len(fld_data)),fld_data['My'],label='My',c='k') _=ax.plot(range(len(fld_data)),fld_data['Fxy'],label='Fr',c='k',ls='--') _=ax.plot(range(len(fld_data)),fld_data['Fz'],label='Fa',c='k',ls=':') _=ax.legend(loc=1) _=ax.set(ylabel='Force (N)',xlabel='Time (s)') _=ax.grid() #_=plt.savefig(fname='blade_root_forces') rolling = fld_load_cases.load_data()[5].rolling(window=10).mean() fig,ax = plt.subplots(figsize=(14,4)) _=ax.plot(rolling['My'],c='k',label='My') #_=ax.plot(rolling['Fxy'],c='k',label='Fxy',ls=':') ax2=ax.twinx() _=ax2.plot(rolling['PS deg'],c='k',ls='--',label='Blade angle') _=ax.set(ylabel='Force (N)',xlabel='Time (s)') _=ax2.set(ylabel='Angle (deg)') _=ax.legend() _=ax2.legend() #_=plt.savefig(fname='angle_my') fld_my = pd.concat([fld_load_cases.load_data()[i]['My'] for i in range(len(fld_load_cases.load_data()))]) ebb_my = pd.concat([ebb_load_cases.load_data()[i]['My'] for i in range(len(ebb_load_cases.load_data()))]) fig,(ax1,ax2) = plt.subplots(1,2,figsize=(14,4),sharey=True) _=ax1.hist(fld_my,bins=190,color='skyblue',ec='darkblue') _=ax1.set(xlabel='My (N)',ylabel='Frequency',title='Flood',xlim=[100000,2400000]) _=ax2.hist(ebb_my,bins=150,color='skyblue',ec='darkblue') _=ax2.set(xlabel='My (N)',title='Ebb',xlim=[100000,2400000]) _=ax1.grid() _=ax2.grid() _=plt.savefig(fname='My_hists') ###Output _____no_output_____ ###Markdown LISO equation Sensitivity analysis (bearing design and turbine params) ###Code def LISO(aiso,Ca,theta_e,z,p,Pe,T_op): # calculate L10 values (for sensitivity analysis) return aiso*(((Ca*((180/theta_e)**(3/10))*z**(0.033))/Pe)**p)*(1/T_op) def min_max(x): # min max normalisation (scales x between 0 and 1) return (x-min(x))/(max(x)-min(x)) aiso_vals = np.linspace(0.05,0.15,10) L_ISO_aiso = [LISO(aiso_vals[i],Ca=skf_8110.Ca_rot(),theta_e=t_profiles.theta_equiv(), z=skf_8110.z,p=skf_8110.p,Pe=t_profiles.dyn_equiv_osc(),T_op=T_op) for i in range(len(aiso_vals))] Ca_vals = np.linspace(1000000,10000000,10) L_ISO_Ca = [LISO(aiso=0.1,Ca=Ca_vals[i],theta_e=t_profiles.theta_equiv(), z=skf_8110.z,p=skf_8110.p,Pe=t_profiles.dyn_equiv_osc(),T_op=T_op) for i in range(len(Ca_vals))] theta_e_vals = np.linspace(1,20,20) L_ISO_theta = [LISO(aiso=0.1,Ca=skf_8110.Ca_rot(),theta_e=theta_e_vals[i], z=skf_8110.z,p=skf_8110.p,Pe=t_profiles.dyn_equiv_osc(),T_op=T_op) for i in range(len(theta_e_vals))] z_vals = np.linspace(20,60,40) L_ISO_z = [LISO(aiso=0.1,Ca=skf_8110.Ca_rot(),theta_e=t_profiles.theta_equiv(), z=z_vals[i],p=skf_8110.p,Pe=t_profiles.dyn_equiv_osc(),T_op=T_op) for i in range(len(z_vals))] p_vals = np.linspace(1,5,20) L_ISO_p = [LISO(aiso=0.1,Ca=skf_8110.Ca_rot(),theta_e=t_profiles.theta_equiv(), z=skf_8110.z,p=p_vals[i],Pe=t_profiles.dyn_equiv_osc(),T_op=T_op) for i in range(len(p_vals))] Pe_vals = np.linspace(1000000,10000000,100) L_ISO_Pe = [LISO(aiso=0.1,Ca=skf_8110.Ca_rot(),theta_e=t_profiles.theta_equiv(), z=skf_8110.z,p=skf_8110.p,Pe=Pe_vals[i],T_op=T_op) for i in range(len(Pe_vals))] Top_vals = np.linspace(0.1,0.8,20) L_ISO_Top = [LISO(aiso=0.1,Ca=skf_8110.Ca_rot(),theta_e=t_profiles.theta_equiv(), z=skf_8110.z,p=skf_8110.p,Pe=t_profiles.dyn_equiv_osc(),T_op=Top_vals[i]) for i in range(len(Top_vals))] # plot fig,(ax1,ax2) = plt.subplots(1,2,figsize=(14,4),sharey=True) # bearing parameters _=ax1.plot(min_max(aiso_vals),min_max(L_ISO_aiso),label='a,iso',c='k',ls='-') _=ax1.plot(min_max(Ca_vals),min_max(L_ISO_Ca),label='Ca',c='k',ls=':') _=ax1.plot(min_max(z_vals),min_max(L_ISO_z),label='z',c='k',ls='--') _=ax1.plot(min_max(p_vals),min_max(L_ISO_p),label='p',c='k',ls='-.') _=ax1.legend() _=ax1.grid() _=ax1.set(ylabel='Normalised Liso (-)',xlabel='Normalised bearing parameter (-)') # Turbine parameters _=ax2.plot(min_max(theta_e_vals),min_max(L_ISO_theta),label='θe',c='k',ls='-') _=ax2.plot(min_max(Pe_vals),min_max(L_ISO_Pe),label='Pe',c='k',ls=':') _=ax2.plot(min_max(Top_vals),min_max(L_ISO_Top),label='T,op',c='k',ls='--') _=ax2.legend() _=ax2.grid() _=ax2.set(xlabel='Normalised turbine parameter (-)') #_=plt.savefig(fname='L10_sensitivity') ###Output _____no_output_____ ###Markdown Low-dimensions ###Code algo_list = ['CMA', 'NoisyBandit', 'NoisyOnePlusOne', 'PSO', 'RandomSearch', 'SPSA', 'TBPSA'] func_list = ['rosenbrock', 'sphere4', 'rastrigin', 'griewank', 'deceptivepath'] dim_list = [2, 3, 5, 8] eps_list = [0.5, 0.10, 0.05, 0.03, 0.02, 0.01, 0] log_list = [False] EVAL_BUDGET = 1000 #CREATE A NEW FILE IF CHANGING THE NUMBER OF EVALUATIONS saved_file = "results-low-dim.pkl" #File to read from where previous expts were stored # or new file to write to save_interval = 600 initials = [0.0, 5.0, -5.0, 23.46, -23.46] run_exp(algo_list, func_list, dim_list, eps_list, log_list, EVAL_BUDGET, saved_file, 'pkl', initials, save_interval) exp_df = pd.read_pickle('results-low-dim.pkl') stripped_expt_df = exp_df.drop(columns = ['exp_data', 'min_params', 'f_min', 'time']) results_summary(stripped_expt_df) ###Output _____no_output_____ ###Markdown Plotting low-dimensions ###Code eval_budget = 1000 filter_func = lambda z: ((z['func'] in ['rastrigin']) and (z['dim'] == 5) and (z['log'] in [False]) and (z['algo'] in ['CMA', 'NoisyBandit', 'NoisyOnePlusOne', 'PSO', 'RandomSearch', 'SPSA', 'TBPSA']) and (z['starter'] in [5.0]) and (z['noise_level'] in [0.03])) use_tex = False fig_test = plot_regular(exp_df, filter_func, use_tex, plot_evals = eval_budget, y_field = 'f_min', logplot='y') ###Output _____no_output_____ ###Markdown Medium Dimensions Medium Dimensions Part 1 ###Code algo_list = ['CMA', 'NoisyBandit', 'NoisyOnePlusOne', 'PSO', 'RandomSearch', 'SPSA', 'TBPSA'] func_list = ['rosenbrock', 'sphere4', 'rastrigin', 'griewank', 'deceptivepath'] dim_list = [12, 15, 20] eps_list = [0.5, 0.10, 0.05, 0.03, 0.02, 0.01, 0] log_list = [False] EVAL_BUDGET = 1000 #CREATE A NEW FILE IF CHANGING THE NUMBER OF EVALUATIONS saved_file = "results-mid-dim-1.pkl" #File to read from where previous expts were stored # or new file to write to save_interval = 600 initials = [0.0, 5.0, -5.0, 23.46, -23.46] ###Output _____no_output_____ ###Markdown Medium Dimensions Part 2 ###Code algo_list = ['CMA', 'NoisyBandit', 'NoisyOnePlusOne', 'PSO', 'RandomSearch', 'SPSA', 'TBPSA'] func_list = ['rosenbrock', 'sphere4', 'rastrigin', 'griewank', 'deceptivepath'] dim_list = [30, 50, 100] eps_list = [0.5, 0.10, 0.05, 0.03, 0.02, 0.01, 0] log_list = [False] EVAL_BUDGET = 1000 #CREATE A NEW FILE IF CHANGING THE NUMBER OF EVALUATIONS saved_file = "results-mid-dim-2.pkl" #File to read from where previous expts were stored # or new file to write to save_interval = 600 initials = [0.0, 5.0, -5.0, 23.46, -23.46] ###Output _____no_output_____ ###Markdown High Dimensions Part 1 ###Code algo_list = ['CMA', 'NoisyBandit', 'NoisyOnePlusOne', 'PSO', 'RandomSearch', 'SPSA', 'TBPSA'] func_list = ['rosenbrock', 'sphere4', 'rastrigin', 'griewank', 'deceptivepath'] dim_list = [100, 200] eps_list = [0.5, 0.10, 0.05, 0.03, 0.02, 0.01, 0] log_list = [False] EVAL_BUDGET = 1000 #CREATE A NEW FILE IF CHANGING THE NUMBER OF EVALUATIONS saved_file = "results-high-dim-1.pkl" #File to read from where previous expts were stored # or new file to write to save_interval = 600 initials = [0.0, 5.0, -5.0, 23.46, -23.46] ###Output _____no_output_____ ###Markdown Part 2 ###Code algo_list = ['CMA', 'NoisyBandit', 'NoisyOnePlusOne', 'PSO', 'RandomSearch', 'SPSA', 'TBPSA'] func_list = ['rosenbrock', 'sphere4', 'rastrigin', 'griewank', 'deceptivepath'] dim_list = [500, 1000] eps_list = [0.5, 0.10, 0.05, 0.03, 0.02, 0.01, 0] log_list = [False] EVAL_BUDGET = 1000 #CREATE A NEW FILE IF CHANGING THE NUMBER OF EVALUATIONS saved_file = "results-high-dim-2.pkl" #File to read from where previous expts were stored # or new file to write to save_interval = 600 initials = [0.0, 5.0, -5.0, 23.46, -23.46] ###Output _____no_output_____ ###Markdown Plotting Evaluations to a specified Goal ###Code shortlisted_algos = ['CMA','NoisyOnePlusOne','TBPSA','PSO'] test_func = 'rosenbrock' dim_list = [2, 3, 5, 8, 10] noise_level = 0.03 logify_flag = False goalX = 1e-5 initial_vals = [5.0] plot_evalstoX(exp_df, shortlisted_algos, test_func, dim_list, logify_flag, noise_level, initial_vals, goalX) ###Output _____no_output_____ ###Markdown A simple demo of the cleaned dataPlease check the readme if you are interested in getting the data used in the analysis. ###Code import pandas as pd import numpy as np from pprint import pprint pd.set_option('display.max_columns', 100) pd.set_option('display.width', 200) pd.set_option('max_colwidth', 50) ## Custom Package import custom_script from custom_script.data_cleaning import * data_file = 'gameRecordsDataAnon_16-01-2021.json' df = data_pipeline_df(data_file) df.head() df.shape df.iloc[0] ###Output _____no_output_____ ###Markdown CNNGeometric demo notebookThis notebook shows how to run a trained model on a given image pair Imports ###Code from __future__ import print_function, division import os import argparse import torch import torch.nn as nn from torch.utils.data import Dataset, DataLoader from model.cnn_geometric_model import CNNGeometric from data.pf_dataset import PFDataset from data.download_datasets import download_PF_willow from image.normalization import NormalizeImageDict, normalize_image from util.torch_util import BatchTensorToVars, str_to_bool from geotnf.transformation import GeometricTnf from geotnf.point_tnf import * import matplotlib.pyplot as plt from skimage import io import warnings from torchvision.transforms import Normalize from collections import OrderedDict warnings.filterwarnings('ignore') ###Output _____no_output_____ ###Markdown Parameters ###Code feature_extraction_cnn = 'vgg' if feature_extraction_cnn=='vgg': model_aff_path = 'trained_models/best_streetview_checkpoint_adam_affine_grid_loss_PAMI.pth.tar' model_tps_path = 'trained_models/best_streetview_checkpoint_adam_tps_grid_loss_PAMI.pth.tar' elif feature_extraction_cnn=='resnet101': model_aff_path = 'trained_models/best_pascal_checkpoint_adam_affine_grid_loss_resnet_random.pth.tar' model_tps_path = 'trained_models/best_pascal_checkpoint_adam_tps_grid_loss_resnet_random.pth.tar' source_image_path='datasets/PF-dataset/duck(S)/060_0036.png' target_image_path='datasets/PF-dataset/duck(S)/060_0013.png' ###Output _____no_output_____ ###Markdown Load models ###Code use_cuda = torch.cuda.is_available() do_aff = not model_aff_path=='' do_tps = not model_tps_path=='' # Create model print('Creating CNN model...') if do_aff: model_aff = CNNGeometric(use_cuda=use_cuda,output_dim = 6,feature_extraction_cnn=feature_extraction_cnn) if do_tps: model_tps = CNNGeometric(use_cuda=use_cuda,output_dim = 18,feature_extraction_cnn=feature_extraction_cnn) # Load trained weights print('Loading trained model weights...') if do_aff: checkpoint = torch.load(model_aff_path, map_location=lambda storage, loc: storage) checkpoint['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint['state_dict'].items()]) model_aff.load_state_dict(checkpoint['state_dict']) if do_tps: checkpoint = torch.load(model_tps_path, map_location=lambda storage, loc: storage) checkpoint['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint['state_dict'].items()]) model_tps.load_state_dict(checkpoint['state_dict']) ###Output Creating CNN model... Loading trained model weights... ###Markdown Create image transformers ###Code tpsTnf = GeometricTnf(geometric_model='tps', use_cuda=use_cuda) affTnf = GeometricTnf(geometric_model='affine', use_cuda=use_cuda) ###Output _____no_output_____ ###Markdown Load and preprocess images ###Code resizeCNN = GeometricTnf(out_h=240, out_w=240, use_cuda = False) normalizeTnf = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) def preprocess_image(image): # convert to torch Variable image = np.expand_dims(image.transpose((2,0,1)),0) image = torch.Tensor(image.astype(np.float32)/255.0) image_var = Variable(image,requires_grad=False) # Resize image using bilinear sampling with identity affine tnf image_var = resizeCNN(image_var) # Normalize image image_var = normalize_image(image_var) return image_var source_image = io.imread(source_image_path) target_image = io.imread(target_image_path) source_image_var = preprocess_image(source_image) target_image_var = preprocess_image(target_image) if use_cuda: source_image_var = source_image_var.cuda() target_image_var = target_image_var.cuda() batch = {'source_image': source_image_var, 'target_image':target_image_var} resizeTgt = GeometricTnf(out_h=target_image.shape[0], out_w=target_image.shape[1], use_cuda = use_cuda) ###Output _____no_output_____ ###Markdown Evaluate model ###Code if do_aff: model_aff.eval() if do_tps: model_tps.eval() # Evaluate models if do_aff: theta_aff=model_aff(batch) warped_image_aff = affTnf(batch['source_image'],theta_aff.view(-1,2,3)) if do_tps: theta_tps=model_tps(batch) warped_image_tps = tpsTnf(batch['source_image'],theta_tps) if do_aff and do_tps: theta_aff_tps=model_tps({'source_image': warped_image_aff, 'target_image': batch['target_image']}) warped_image_aff_tps = tpsTnf(warped_image_aff,theta_aff_tps) ###Output _____no_output_____ ###Markdown Process result ###Code # Un-normalize images and convert to numpy if do_aff: warped_image_aff_np = normalize_image(resizeTgt(warped_image_aff),forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy() if do_tps: warped_image_tps_np = normalize_image(resizeTgt(warped_image_tps),forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy() if do_aff and do_tps: warped_image_aff_tps_np = normalize_image(resizeTgt(warped_image_aff_tps),forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy() ###Output _____no_output_____ ###Markdown Display ###Code N_subplots = 2+int(do_aff)+int(do_tps)+int(do_aff and do_tps) fig, axs = plt.subplots(1,N_subplots) axs[0].imshow(source_image) axs[0].set_title('src') axs[1].imshow(target_image) axs[1].set_title('tgt') subplot_idx = 2 if do_aff: axs[subplot_idx].imshow(warped_image_aff_np) axs[subplot_idx].set_title('aff') subplot_idx +=1 if do_tps: axs[subplot_idx].imshow(warped_image_tps_np) axs[subplot_idx].set_title('tps') subplot_idx +=1 if do_aff and do_tps: axs[subplot_idx].imshow(warped_image_aff_tps_np) axs[subplot_idx].set_title('aff+tps') for i in range(N_subplots): axs[i].axis('off') fig.set_dpi(150) plt.show() ###Output _____no_output_____
1-DataModeling/P1_PostG/etl.ipynb
###Markdown ETL ProcessesUse this notebook to develop the ETL process for each of your tables before completing the `etl.py` file to load the whole datasets. ###Code import os import glob import psycopg2 import pandas as pd from sql_queries import * conn = psycopg2.connect("host=127.0.0.1 dbname=sparkifydb user=student password=student") cur = conn.cursor() def get_files(filepath): all_files = [] for root, dirs, files in os.walk(filepath): files = glob.glob(os.path.join(root,'*.json')) for f in files : all_files.append(os.path.abspath(f)) return all_files ###Output _____no_output_____ ###Markdown Process `song_data`In this first part, you'll perform ETL on the first dataset, `song_data`, to create the `songs` and `artists` dimensional tables.Let's perform ETL on a single song file and load a single record into each table to start.- Use the `get_files` function provided above to get a list of all song JSON files in `data/song_data`- Select the first song in this list- Read the song file and view the data ###Code song_files = get_files('data/song_data') filepath = song_files[0] df = pd.read_json(filepath,lines=True) df.head() ###Output _____no_output_____ ###Markdown 1: `songs` Table Extract Data for Songs Table- Select columns for song ID, title, artist ID, year, and duration- Use `df.values` to select just the values from the dataframe- Index to select the first (only) record in the dataframe- Convert the array to a list and set it to `song_data` ###Code song_data=df[['song_id','title','artist_id','year','duration']].values[0].tolist() song_data ###Output _____no_output_____ ###Markdown Insert Record into Song TableImplement the `song_table_insert` query in `sql_queries.py` and run the cell below to insert a record for this song into the `songs` table. Remember to run `create_tables.py` before running the cell below to ensure you've created/resetted the `songs` table in the sparkify database. ###Code cur.execute(song_table_insert, song_data) conn.commit() df ###Output _____no_output_____ ###Markdown Run `test.ipynb` to see if you've successfully added a record to this table. 2: `artists` Table Extract Data for Artists Table- Select columns for artist ID, name, location, latitude, and longitude- Use `df.values` to select just the values from the dataframe- Index to select the first (only) record in the dataframe- Convert the array to a list and set it to `artist_data` ###Code artist_data = df[['artist_id','artist_name','artist_location','artist_latitude','artist_longitude']].values[0].tolist() artist_data ###Output _____no_output_____ ###Markdown Insert Record into Artist TableImplement the `artist_table_insert` query in `sql_queries.py` and run the cell below to insert a record for this song's artist into the `artists` table. Remember to run `create_tables.py` before running the cell below to ensure you've created/resetted the `artists` table in the sparkify database. ###Code cur.execute(artist_table_insert, artist_data) conn.commit() ###Output _____no_output_____ ###Markdown Run `test.ipynb` to see if you've successfully added a record to this table. Process `log_data`In this part, you'll perform ETL on the second dataset, `log_data`, to create the `time` and `users` dimensional tables, as well as the `songplays` fact table.Let's perform ETL on a single log file and load a single record into each table.- Use the `get_files` function provided above to get a list of all log JSON files in `data/log_data`- Select the first log file in this list- Read the log file and view the data ###Code log_files = get_files('data/log_data') filepath = log_files[0] df = pd.read_json(filepath,lines=True) df.head() ###Output _____no_output_____ ###Markdown 3: `time` Table Extract Data for Time Table- Filter records by `NextSong` action- Convert the `ts` timestamp column to datetime - Hint: the current timestamp is in milliseconds- Extract the timestamp, hour, day, week of year, month, year, and weekday from the `ts` column and set `time_data` to a list containing these values in order - Hint: use pandas' [`dt` attribute](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.dt.html) to access easily datetimelike properties.- Specify labels for these columns and set to `column_labels`- Create a dataframe, `time_df,` containing the time data for this file by combining `column_labels` and `time_data` into a dictionary and converting this into a dataframe ###Code df = df[df['page'] == 'NextSong'] df.head() # Extract Time Column [0] df[['ts']].values.tolist()[0] df['ts'] =pd.to_datetime(df['ts'],unit='ms') df['ts'].head() t = pd.Series(df['ts'],index=df.index) tm = pd.to_datetime(t,unit='ms') td = [] for data in tm: td.append([data,data.hour,data.day,data.weekofyear,data.month,data.year,data.dayofweek]) time_data = [[data,data.hour,data.day,data.weekofyear,data.month,data.year,data.dayofweek] for data in tm] time_df = pd.DataFrame(time_data) conn.close() time_df t = pd.Series(df['ts'],index=df.index) tm = pd.to_datetime(t,unit='ms') type(tm) t = pd.Timestamp(tm,unit='ms') for x in t: print(pd.Timestamp(x,unit='ms')) td = [] for tm in t: td.append([tm,tm.hour,tm.day,tm.weekofyear,tm.month,tm.year,tm.day_name()]) # insert time data records time_data = ([tm,tm.hour,tm.day,tm.week,tm.month,tm.year,tm.dayofweek] for tm in t) column_labels = ('start_time','hour','day','week','month','year','weekday') time_df = pd.DataFrame(time_data,columns=column_labels) for i, row in time_df.iterrows(): print(row) ts =pd.to_datetime(df['ts'],unit='ms') ts.dt.timestamp t = pd.Series(df['ts'],index=df.index) t.dt.timestamp t.dt.time df['ts'] time_data = ([val,val.hour,val.day,val.week,val.month,val.year,val.dayofweek] for val in df['ts']) column_labels = ('start_time','hour','day','week','month','year','weekday') time_df = pd.DataFrame(time_data,columns = column_labels) time_df.head() ###Output _____no_output_____ ###Markdown Insert Records into Time TableImplement the `time_table_insert` query in `sql_queries.py` and run the cell below to insert records for the timestamps in this log file into the `time` table. Remember to run `create_tables.py` before running the cell below to ensure you've created/resetted the `time` table in the sparkify database. ###Code for i, row in time_df.iterrows(): cur.execute(time_table_insert, list(row)) conn.commit() ###Output _____no_output_____ ###Markdown Run `test.ipynb` to see if you've successfully added records to this table. 4: `users` Table Extract Data for Users Table- Select columns for user ID, first name, last name, gender and level and set to `user_df` ###Code df.head(3) user_df = df[['userId','firstName','lastName','gender','level']] user_df.head() ###Output _____no_output_____ ###Markdown Insert Records into Users TableImplement the `user_table_insert` query in `sql_queries.py` and run the cell below to insert records for the users in this log file into the `users` table. Remember to run `create_tables.py` before running the cell below to ensure you've created/resetted the `users` table in the sparkify database. ###Code for i, row in user_df.iterrows(): cur.execute(user_table_insert, row) conn.commit() ###Output _____no_output_____ ###Markdown Run `test.ipynb` to see if you've successfully added records to this table. 5: `songplays` Table Extract Data and Songplays TableThis one is a little more complicated since information from the songs table, artists table, and original log file are all needed for the `songplays` table. Since the log file does not specify an ID for either the song or the artist, you'll need to get the song ID and artist ID by querying the songs and artists tables to find matches based on song title, artist name, and song duration time.- Implement the `song_select` query in `sql_queries.py` to find the song ID and artist ID based on the title, artist name, and duration of a song.- Select the timestamp, user ID, level, song ID, artist ID, session ID, location, and user agent and set to `songplay_data` Insert Records into Songplays Table- Implement the `songplay_table_insert` query and run the cell below to insert records for the songplay actions in this log file into the `songplays` table. Remember to run `create_tables.py` before running the cell below to ensure you've created/resetted the `songplays` table in the sparkify database. ###Code df.head(2) for index, row in df.iterrows(): # get songid and artistid from song and artist tables cur.execute(song_select, (row.song, row.artist, row.length)) results = cur.fetchone() if results: songid, artistid = results else: songid, artistid = None, None # insert songplay record songplay_data = (row.ts,row.userId,row.level,songid,artistid,row.sessionId,row.location,row.userAgent) cur.execute(songplay_table_insert, songplay_data) conn.commit() ###Output _____no_output_____ ###Markdown Run `test.ipynb` to see if you've successfully added records to this table. Close Connection to Sparkify Database ###Code conn.close() ###Output _____no_output_____
Hw-3.ipynb
###Markdown 1)Write a jupyter notebook to perform Bisection Search root finding. Numerically find the two roots of the function:Use a tolerance of 1.0e-6 for the allowed deviation of f(x) from 0.2) Given your starting guesses for the bracketing values around the roots, how many iterations does your method take to converge?3) Have your notebook make a plot of f(x) vs. x as a line, and indicated with differently colored points your initial bracketing values and the roots. In the plot, use limits of x=[0,3] and y=[-0.5, 2.1]. Add a horizontal line at z=0. Plot f(x) at a 1000 evenly spaced values of x=[0,3].4) Create an issue for your repository and tag your TA using “@zbriesem“ or “@adwasser“. For instance, “Please grade my homework, @zbriesem.”. CLEAR ALL THE CELLS BEFORE YOU COMMIT THE NOTEBOOK.5) Your TA will clone your code and email you commented version of the code and a grade. To get the full grade possible, all the notebooks will need to run to completion without errors and produce the requested plots.6) Call the repository “astr-119-hw-3” and the notebook “hw-3.ipynb”. ###Code %matplotlib inline import numpy as np import matplotlib.pyplot as plt ###Output _____no_output_____ ###Markdown Define a function for which we'd like to find the roots ###Code def function_for_roots(x): a=1.01 b=-3.04 c=2.07 return a*x**2+b*x+c#get the roots of ax^2 +bx + c ###Output _____no_output_____ ###Markdown We need a function to check whether our initial values are valid ###Code def check_initial_values(f,x_min,x_max,tol): #check our initial guesses y_min=f(x_min) y_max=f(x_max) #check that x_min and x_max contain a zero crossing if(y_min*y_max>=0.0): print("No zero crossing found in the range = ",x_min,x_max) s="f(%f) = %f, f(%f) = %f"%(x_min,y_min,x_max,y_max) print(s) return 0 #if x_min is a root, then return flag == 1 if(np.fabs(y_min)<tol): return 1 #if x_max is a root, then return flag == 2 if(np.fabs(y_max)<tol): return 2 #if we reach this point, the bracket is valid #and we will return 3 return 3 ###Output _____no_output_____ ###Markdown Now we will define the main work function that actually performs the iterative search ###Code def bisection_root_finding(f,x_min_start,x_max_start,tol): #this function uses bisection search to find a root x_min=x_min_start#minimum x in bracket x_max=x_max_start#maximum x in bracket x_mid=0.0#mid point y_min=f(x_min)#function value at x_min y_max=f(x_max)#function value at x_max y_mid=0.0#function value at mid point imax=10000#set a maximum number of iterations i=0#iteration counter #check the initial values flag=check_initial_values(f,x_min,x_max,tol) if(flag==0): print("Error in bisection_root_finding().") raise ValueError('Initial values invalid',x_min,x_max) elif(flag==1): #lucky guess return x_min elif(flag==2): #another lucky guess return x_max #if we reach here, the we need to conduct the search #set a flag flag=1 #enter a while loop while(flag): x_mid=0.5*(x_min+x_max)#mid point y_mid=f(x_mid)#function value at x_mid #check if x_mid is a root if(np.fabs(y_mid)<tol): flag=0 else: #x_mid is not a root #if the product of the function at the midpoint #and at one of the end points is greater than #zero, replace this end point if(f(x_min)*f(x_mid)>0): #replace x_min with x_mid x_min=x_mid else: #replace x_max with x_mid x_max=x_mid #print out the iteration print(x_min,f(x_min),x_max,f(x_max)) #count the iteratin i+=1 #if we have exceeded the max number #of iterations, exit if(i>=imax): print("Exceeded max number of iterations = ",i) s="Min bracket f(%f) = %f"%(x_min,f(x_min)) print(s) s="Max bracket f(%f) = %f"%(x_max,f(x_max)) print(s) s="Mid bracket f(%f) = %f"%(x_mid,f(x_mid)) print(s) raise StopIteration('Stopping iterations after ',i) #we are done! print('The numer of iterations required to find the root: ',i) return x_mid ###Output _____no_output_____ ###Markdown Perform the search ###Code x_min=0.2 x_max=1.7 tolerance =1.0e-6 #print the initial guess print(x_min,function_for_roots(x_min)) print(x_max,function_for_roots(x_max)) x_root=bisection_root_finding(function_for_roots,x_min,x_max,tolerance) y_root=function_for_roots(x_root) s="Root found with y(%f) = %f"%(x_root,y_root) print(s) xVals=np.linspace(0,3,1000) yVals=function_for_roots(xVals) def zeroFunc(x):#takes in x and returns zero. return 0*x#because I'm lazy and this seems the most conveninet way to instantiate an array of 1000 zeroes z=zeroFunc(xVals) fig=plt.figure(figsize=(8,8)) plt.plot(xVals,yVals,label=r'$y(x) = 1.01x^2-3.04x+2.07$') plt.plot(xVals,z,label=r'$y(x) = 0$') plt.plot(0.2,function_for_roots(0.2),'o',label="Upper Bracket")#Plots Upper bracket as a point plt.plot(1.7, function_for_roots(1.7),'o', label="Lower Bracket")#plots lower bracket as a point plt.plot(x_root,y_root,'o',label="Root")#plots the root as a point plt.xlim([0,3]) plt.ylim([-.5,2.1]) plt.legend(loc=1,framealpha=.05) ###Output _____no_output_____
Car Evaluation.ipynb
###Markdown Comparison versus Acceptibility ###Code sns.countplot(x='Price',hue = 'Acceptibility',data=df) sns.countplot(x='Maint',hue='Acceptibility',data=df) sns.countplot(x='Doors',hue='Acceptibility',data=df) sns.countplot(x='Luggage',hue='Acceptibility',data=df) sns.countplot(x='Safety',hue='Acceptibility',data=df) ###Output _____no_output_____ ###Markdown Label Encoding Changing the string values to float values. 1. For acceptibility ###Code df.Acceptibility.replace(('unacc','acc','good','vgood'),(0,1,2,3),inplace=True) df.head(5) ###Output _____no_output_____ ###Markdown 2. For Lugagge boot ###Code df.Luggage.replace(('small','med','big'),(0,1,2),inplace=True) df.head(5) ###Output _____no_output_____ ###Markdown 3.For Safety ###Code df.Safety.replace(('low','med','high'),(0,1,2),inplace=True) df.head(5) ###Output _____no_output_____ ###Markdown 4.For Maintainence ###Code df.Maint.replace(('low','med','high','vhigh'),(0,1,2,3),inplace=True) df.head(5) ###Output _____no_output_____ ###Markdown 5. For Price ###Code df.Price.replace(('low','med','high','vhigh'),(0,1,2,3),inplace=True) df.head(5) ###Output _____no_output_____ ###Markdown 6. For Persons ###Code df.Persons.replace(('more'),5,inplace=True) df.head(15) ###Output _____no_output_____ ###Markdown 7. For Doors ###Code df.Doors.replace(('5more'),5,inplace=True) df.head(10) ###Output _____no_output_____ ###Markdown New DataIn case of price maintainence and safety.0 - low1 - med2 - high3 - very highIn case of acceptibility0 - unacceptable1 - acceptable2 - good 3 - very goodAnd in case of Luggage Boot 0 - small1 - Medium2 - Big ###Code df.corr() sns.heatmap(df.corr(), annot=True) ###Output _____no_output_____ ###Markdown Splitting datasets into independent and dependent variables ###Code x = df.iloc[:,:6] y = df.iloc[:,6] print("Shape of x:-",x.shape) print("Shape of y:-",y.shape) ###Output Shape of x:- (1728, 6) Shape of y:- (1728,) ###Markdown Training Data ###Code from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.30, random_state = 1) print("Shape of x Test",x_test.shape) print("Shape of x Train",x_train.shape) print("Shape of y Test",y_train.shape) print("Shape of y train",y_test.shape) from sklearn.preprocessing import StandardScaler sc = StandardScaler() x_train = sc.fit_transform(x_train) x_test = sc.transform(x_test) ###Output /Users/vaibhav/anaconda3/lib/python3.7/site-packages/sklearn/preprocessing/data.py:645: DataConversionWarning: Data with input dtype int64, object were all converted to float64 by StandardScaler. return self.partial_fit(X, y) /Users/vaibhav/anaconda3/lib/python3.7/site-packages/sklearn/base.py:464: DataConversionWarning: Data with input dtype int64, object were all converted to float64 by StandardScaler. return self.fit(X, **fit_params).transform(X) /Users/vaibhav/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:6: DataConversionWarning: Data with input dtype int64, object were all converted to float64 by StandardScaler. ###Markdown Modelling KNeighbours Classification ###Code from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import confusion_matrix ###Output _____no_output_____ ###Markdown Creating a model ###Code knn = KNeighborsClassifier(n_neighbors=3) ###Output _____no_output_____ ###Markdown Feeding the training data into the model ###Code knn.fit(x_train, y_train) ###Output _____no_output_____ ###Markdown Predicting the value for x_test ###Code prediction = knn.predict(x_test) ###Output _____no_output_____ ###Markdown Finding the training and testing accuracy ###Code print("Training Accuracy:",knn.score(x_train,y_train)) print("Testing Accuracy:",knn.score(x_test,y_test)) ###Output Testing Accuracy: 0.953757225433526 ###Markdown Printing the confusion matrix ###Code cm = confusion_matrix(y_test, prediction) print(cm) ###Output [[358 10 0 0] [ 9 102 0 0] [ 1 2 19 0] [ 0 2 0 16]] ###Markdown Car Evaluation ProblemCar Evaluation Dataset: http://archive.ics.uci.edu/ml/datasets/Car+EvaluationPurpose: Provide an estimate for the acceptability of a car's price based on 6 fatures. ###Code import numpy as np import pandas as pd from sklearn import svm from sklearn.preprocessing import LabelEncoder from sklearn.utils import shuffle from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import cross_val_score from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import AdaBoostClassifier import matplotlib.pyplot as plt import seaborn as sns ###Output _____no_output_____ ###Markdown 1. Import Dataset ###Code #Car Evaluation Dataset: http://archive.ics.uci.edu/ml/datasets/Car+Evaluation #Purpose: Provide an estimate for the acceptability of a car's price based on 6 fatures: headers = ["Buying Price", "Maintanence", "#Doors", "#Persons", "Trunk Size", "Safety", "Acceptability"] #data = np.loadtxt('./datasets/carPrice/car_data.csv', delimiter = ',', dtype = str) data = pd.read_csv('./datasets/CAR/car.data.csv', header = None, names = headers) #print(data[:5])#Data preview data.head()#Data preview #Checking what data types we have in the dataset print(data.dtypes) #Result shows that they are all string objects #Check for any Null values in the Dataset data[data.isnull().any(axis=1)] #Result shows that there are no null values #Ordinal encoding using SKlearn's label encoder function lb_Make = LabelEncoder() data["Buying Price"] = lb_Make.fit_transform(data["Buying Price"]) data["Maintanence"] = lb_Make.fit_transform(data["Maintanence"]) data["#Doors"] = lb_Make.fit_transform(data["#Doors"]) data["#Persons"] = lb_Make.fit_transform(data["#Persons"]) data["Trunk Size"] = lb_Make.fit_transform(data["Trunk Size"]) data["Safety"] = lb_Make.fit_transform(data["Safety"]) data["Acceptability"] = lb_Make.fit_transform(data["Acceptability"]) data.head() #Convert Data to a numpy Array data = data.values #Relabel the last column as either acceptable or unacceptable to convert this problem into a classification problem data[data[:,6] == 0,6] = 1 data[data[:,6] == 1,6] = 1 data[data[:,6] == 3,6] = 1 data[data[:,6] == 2,6] = 0 print(data[0:5,:])#Preview #Data splitting partitionVal = 0.8 breakNum = int(partitionVal*len(data)) X_train_val = data[:breakNum,:6] Y_train_val = data[:breakNum,6] X_test = data[breakNum:,:6] Y_test = data[breakNum:,6] print(X_train_val.shape, X_test.shape, Y_train_val.shape, Y_test.shape) ###Output (1382, 6) (346, 6) (1382,) (346,) ###Markdown 2. Declare classifier functions ###Code global count count = 0 def draw_heatmap(acc, acc_desc, C_list, character): global count plt.figure(figsize = (2,4)) ax = sns.heatmap(acc, annot=True, fmt='.3f', yticklabels=C_list, xticklabels=[]) ax.collections[0].colorbar.set_label("accuracy") ax.set(ylabel='$' + character + '$') plt.title(acc_desc + ' w.r.t $' + character + '$') sns.set_style("whitegrid", {'axes.grid' : False}) plt.savefig('./Results/carEvaluation/' + str(count) + '.jpg', bbox_inches = 'tight') plt.show() count+=1 def svm_func(): #SVM binary classification classifier = svm.SVC(kernel = 'linear') C_list = [10**-5, 10**-4, 10**-3, 10**-2, 10**-1, 1] # Different C to try. parameters = {'C': C_list} clf = GridSearchCV(classifier, parameters, return_train_score = 'true', cv=5)#Perform a grid Search to identify the best C clf.fit(X_train_val, Y_train_val)#fit the classifier with the training data #Extract the training and validation accuracies and plot them as heat maps tovisualize the best C parameter train_acc = clf.cv_results_['mean_train_score'] draw_heatmap(train_acc.reshape(-1,1), 'train accuracy', C_list, 'C') val_acc = clf.cv_results_['mean_test_score'] draw_heatmap(val_acc.reshape(-1,1), 'val accuracy', C_list, 'C') #Find the optimal C parameter and use that to redefine the classifier optimal_classifier = svm.SVC(kernel = 'linear', C = clf.best_params_['C'] ) for i,j in enumerate(C_list): if j == clf.best_params_['C']: best_train_acc = train_acc[i] #Find test accuracy optimal_classifier.fit(X_train_val, Y_train_val) test_acc = optimal_classifier.score(X_test, Y_test) return test_acc, best_train_acc, clf.best_params_['C'] def svm_func_boost(): classifier = svm.SVC(kernel = 'linear') C_list = [10**-5, 10**-4, 10**-3, 10**-2, 10**-1, 1] # Different C to try. parameters = {'C': C_list} clf = GridSearchCV(classifier, parameters, return_train_score = 'true', cv=5)#Perform a grid Search to identify the best C clf.fit(X_train_val, Y_train_val)#fit the classifier with the training data optimal_classifier = svm.SVC(kernel = 'linear', C = clf.best_params_['C'] ) boostedSVM = AdaBoostClassifier(optimal_classifier, algorithm = "SAMME") boostedSVM.fit(X_train_val, Y_train_val) test_acc = boostedSVM.score(X_test,Y_test) for i,j in enumerate(C_list): if j == clf.best_params_['C']: best_train_acc = train_acc[i] return test_acc, best_train_acc, clf.best_params_['C'] def decision_Tree(): #Decision Tree Classifier D_list = np.array([1, 2, 3, 4, 5]) parameters = {'max_depth':D_list} classifier_grid = GridSearchCV(DecisionTreeClassifier(criterion="entropy"), parameters, cv=5, return_train_score=True) classifier_grid.fit(X_train_val, Y_train_val) #plot heatmaps draw_heatmap(classifier_grid.cv_results_['mean_train_score'].reshape(5,1), 'DT train accuracy', D_list, 'D') draw_heatmap(classifier_grid.cv_results_['mean_test_score'].reshape(5,1), 'DT val accuracy', D_list, 'D') #train/test with best parameter D_star = classifier_grid.best_params_['max_depth'] classifier_test = DecisionTreeClassifier(max_depth=D_star, criterion="entropy") classifier_test.fit(X_train_val, Y_train_val) Desicion_test_acc = classifier_test.score(X_test,Y_test) train_acc = classifier_grid.cv_results_['mean_train_score'] for i,j in enumerate(D_list): if j == D_star: best_train_acc = train_acc[i] return Desicion_test_acc, best_train_acc, D_star def decision_tree_boost(): D_list = np.array([1, 2, 3, 4, 5]) parameters = {'max_depth':D_list} classifier_grid = GridSearchCV(DecisionTreeClassifier(criterion="entropy"), parameters, cv=5, return_train_score=True) classifier_grid.fit(X_train_val, Y_train_val) D_star = classifier_grid.best_params_['max_depth'] classifier_test = DecisionTreeClassifier(max_depth=D_star, criterion="entropy") boostedSVM = AdaBoostClassifier(classifier_test, algorithm = "SAMME") boostedSVM.fit(X_train_val, Y_train_val) test_acc = boostedSVM.score(X_test,Y_test) train_acc = classifier_grid.cv_results_['mean_train_score'] for i,j in enumerate(D_list): if j == D_star: best_train_acc = train_acc[i] return test_acc, best_train_acc, D_star def rand_Forest(): #random forest classifier D_list = np.array([1, 2, 3, 4, 5]) parameters = {'max_depth':D_list} classifier_grid = GridSearchCV(RandomForestClassifier(criterion="entropy"), parameters, cv=5, return_train_score=True) classifier_grid.fit(X_train_val, Y_train_val) #plot heatmaps draw_heatmap(classifier_grid.cv_results_['mean_train_score'].reshape(5,1), 'RF train accuracy', D_list, 'K') draw_heatmap(classifier_grid.cv_results_['mean_test_score'].reshape(5,1), 'RF val accuracy', D_list, 'K') #train/test with best parameter D_star = classifier_grid.best_params_['max_depth'] classifier_test1 = RandomForestClassifier(max_depth=D_star, criterion="entropy") classifier_test1.fit(X_train_val, Y_train_val) randForest_acc = classifier_test1.score(X_test,Y_test) train_acc = classifier_grid.cv_results_['mean_train_score'] for i,j in enumerate(D_list): if j == D_star: best_train_acc = train_acc[i] return randForest_acc, best_train_acc, D_star def random_forest_boost(): D_list = np.array([1, 2, 3, 4, 5]) parameters = {'max_depth':D_list} classifier_grid = GridSearchCV(RandomForestClassifier(criterion="entropy"), parameters, cv=5, return_train_score=True) classifier_grid.fit(X_train_val, Y_train_val) D_star = classifier_grid.best_params_['max_depth'] classifier_test1 = RandomForestClassifier(max_depth=D_star, criterion="entropy") boostedSVM = AdaBoostClassifier(classifier_test1, algorithm = "SAMME") boostedSVM.fit(X_train_val, Y_train_val) test_acc = boostedSVM.score(X_test,Y_test) train_acc = classifier_grid.cv_results_['mean_train_score'] for i,j in enumerate(D_list): if j == D_star: best_train_acc = train_acc[i] return test_acc, best_train_acc, D_star #knn classifier def knn_classifier(): k_list = np.array([1, 2, 3, 4, 5, 6]) parameters = {'n_neighbors':k_list} classifier_grid = GridSearchCV(KNeighborsClassifier(), parameters, cv=5, return_train_score=True) classifier_grid.fit(X_train_val, Y_train_val) #plot heatmaps draw_heatmap(classifier_grid.cv_results_['mean_train_score'].reshape(6,1), 'KNN train accuracy', k_list, 'K') draw_heatmap(classifier_grid.cv_results_['mean_test_score'].reshape(6,1), 'KNN val accuracy', k_list, 'K') #train/test with best parameter k_star = classifier_grid.best_params_['n_neighbors'] classifier_test2 = KNeighborsClassifier(n_neighbors=k_star) classifier_test2.fit(X_train_val,Y_train_val) knn_acc = classifier_test2.score(X_test,Y_test) train_acc = classifier_grid.cv_results_['mean_train_score'] for i,j in enumerate(k_list): if j == k_star: best_train_acc = train_acc[i] return knn_acc, best_train_acc, k_star def knn_boost(): k_list = np.array([1, 2, 3, 4, 5, 6]) parameters = {'n_neighbors':k_list} classifier_grid = GridSearchCV(KNeighborsClassifier(), parameters, cv=5, return_train_score=True) classifier_grid.fit(X_train_val, Y_train_val) k_star = classifier_grid.best_params_['n_neighbors'] classifier_test2 = KNeighborsClassifier(n_neighbors=k_star) boostedSVM = AdaBoostClassifier(classifier_test2, algorithm = "SAMME") boostedSVM.fit(X_train_val, Y_train_val) test_acc = boostedSVM.score(X_test,Y_test) train_acc = classifier_grid.cv_results_['mean_train_score'] for i,j in enumerate(k_list): if j == k_star: best_train_acc = train_acc[i] return test_acc, best_train_acc, k_star ###Output _____no_output_____ ###Markdown 3. Classification ###Code partitionVal = [0.8,0.5,0.2] result_table = np.zeros((3,7)) result_table1 = np.zeros((3,7)) result_table2 = np.zeros((3,7)) for i, partition in enumerate(partitionVal): print("Partition: ", partition) knn_test_acc = [] rand_forest_test_acc = [] decision_tree_test_acc = [] svm_test_acc = [] knn_boosted_test_acc = [] rand_forest_boosted_test_acc = [] decision_tree_boosted_test_acc = [] svm_boosted_test_acc = [] NUM_TRIALS = 3 for trial in range(NUM_TRIALS): np.random.shuffle(data) #shuffle data breakNum = int(partition*len(data)) #Find the point where to partition the data X_train_val = data[:breakNum,:6] Y_train_val = data[:breakNum,6] X_test = data[breakNum:,:6] Y_test = data[breakNum:,6] #classifiers = ["SVM", "Decision Tree", "Random Forest", "KNN"] test_acc,best_train0,C0 = svm_func() svm_test_acc.append(test_acc)#call the svm classifier test_acc,best_train1,C1 = knn_classifier() knn_test_acc.append(test_acc)#call the knn classifier test_acc,best_train2,C2 = decision_Tree() decision_tree_test_acc.append(test_acc)#call the Decision Tree classifier test_acc,best_train3,C3 = rand_Forest() rand_forest_test_acc.append(test_acc)#call the Random Forest classifier #knn_boosted_test_acc.append(knn_boost()) test_acc,best_train4,C4 = svm_func_boost() svm_boosted_test_acc.append(test_acc)#call the svm boosted classifier test_acc,best_train5,C5 = decision_tree_boost() decision_tree_boosted_test_acc.append(test_acc) test_acc,best_train6,C6 = random_forest_boost() rand_forest_boosted_test_acc.append(test_acc) result_table[i, 0] = sum(svm_test_acc)/NUM_TRIALS result_table[i, 1] = sum(knn_test_acc)/NUM_TRIALS result_table[i, 2] = sum(decision_tree_test_acc)/NUM_TRIALS result_table[i, 3] = sum(rand_forest_boosted_test_acc)/NUM_TRIALS result_table[i, 4] = sum(svm_boosted_test_acc)/NUM_TRIALS result_table[i, 5] = sum(decision_tree_boosted_test_acc)/NUM_TRIALS result_table[i, 6] = sum(rand_forest_boosted_test_acc)/NUM_TRIALS result_table1[i, 0] = best_train0 result_table1[i, 1] = best_train1 result_table1[i, 2] = best_train2 result_table1[i, 3] = best_train3 result_table1[i, 4] = best_train4 result_table1[i, 5] = best_train5 result_table1[i, 6] = best_train6 result_table2[i, 0] = C0 result_table2[i, 1] = C1 result_table2[i, 2] = C2 result_table2[i, 3] = C3 result_table2[i, 4] = C4 result_table2[i, 5] = C5 result_table2[i, 6] = C6 #Average all test accuracies for all 3 trials print("Test Accuracy Average for knn = ", sum(knn_test_acc)/NUM_TRIALS) #print("Test Accuracy Average for Boosted knn = ", sum(knn_boosted_test_acc)/NUM_TRIALS) print("Test Accuracy Average for Random Forest = ", sum(rand_forest_test_acc)/NUM_TRIALS) print("Test Accuracy Average for Boosted Random Forest = ", sum(rand_forest_boosted_test_acc)/NUM_TRIALS) print("Test Accuracy Average for Decision Tree = ", sum(decision_tree_test_acc)/NUM_TRIALS) print("Test Accuracy Average for Boosted Decision Tree = ", sum(decision_tree_boosted_test_acc)/NUM_TRIALS) print("Test Accuracy Average for SVM = ", sum(svm_test_acc)/NUM_TRIALS) print("Test Accuracy Average for Boosted SVM = ", sum(svm_boosted_test_acc)/NUM_TRIALS) #y-axis: partition #x-axis: classifier #print(result_table) print(result_table1) print(result_table2) ###Output [[ 0.73317339 0.97576101 0.93704745 0.93469369 0.72608696 0.93704745 0.93324785] [ 0.73495353 0.96006709 0.9435747 0.93807877 0.73478261 0.9435747 0.93546925] [ 0.71884288 0.95001782 0.95362269 0.95583869 0.68115942 0.95362269 0.95146717]] [[ 1.00000000e-01 5.00000000e+00 5.00000000e+00 5.00000000e+00 1.00000000e-01 5.00000000e+00 5.00000000e+00] [ 1.00000000e+00 5.00000000e+00 5.00000000e+00 5.00000000e+00 1.00000000e+00 5.00000000e+00 5.00000000e+00] [ 1.00000000e-05 3.00000000e+00 5.00000000e+00 5.00000000e+00 1.00000000e-05 5.00000000e+00 5.00000000e+00]]
in_progress/Tutorial-Start_to_Finish-GiRaFFE_NRPy-1D_tests-staggered.ipynb
###Markdown window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); Start-to-Finish Example: `GiRaFFE_NRPy` 1D tests Author: Patrick Nelson Adapted from [Start-to-Finish Example: Head-On Black Hole Collision](../Tutorial-Start_to_Finish-BSSNCurvilinear-Two_BHs_Collide.ipynb) This module implements a basic GRFFE code to evolve one-dimensional GRFFE waves. NRPy+ Source Code for this module: * [GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Exact_Wald.py](../../edit/in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Exact_Wald.py) [\[**tutorial**\]](Tutorial-GiRaFFEfood_NRPy_Exact_Wald.ipynb) Generates Exact Wald initial data* [GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Aligned_Rotator.py](../../edit/in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Aligned_Rotator.py) [\[**tutorial**\]](Tutorial-GiRaFFEfood_NRPy_Aligned_Rotator.ipynb) Generates Aligned Rotator initial data* [GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_1D_tests.py](../../edit/in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_1D_tests.py) [\[**tutorial**\]](Tutorial-GiRaFFEfood_NRPy_1D_tests.ipynb) Generates Alfv&eacute;n Wave initial data.* [GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Afield_flux.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Afield_flux.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy_staggered-Afield_flux.ipynb) Generates the expressions to find the flux term of the induction equation.* [GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_A2B.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_A2B.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy_staggered-A2B.ipynb) Generates the driver to compute the magnetic field from the vector potential/* [GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-BCs.ipynb) Generates the code to apply boundary conditions to the vector potential, scalar potential, and three-velocity.* [GiRaFFE_NRPy/GiRaFFE_NRPy_C2P_P2C.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_C2P_P2C.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-C2P_P2C.ipynb) Generates the conservative-to-primitive and primitive-to-conservative solvers.* [GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Metric_Face_Values.ipynb) Generates code to interpolate metric gridfunctions to cell faces.* [GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-PPM.ipynb) Genearates code to reconstruct primitive variables on cell faces.* [GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Source_Terms.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Source_Terms.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy_staggered-Source_Terms.ipynb) Generates the expressions to find the flux term of the Poynting flux evolution equation.* [GiRaFFE_NRPy/Stilde_flux.py](../../edit/in_progress/GiRaFFE_NRPy/Stilde_flux.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Stilde_flux.ipynb) Generates the expressions to find the flux term of the Poynting flux evolution equation.* [../GRFFE/equations.py](../../edit/GRFFE/equations.py) [\[**tutorial**\]](../Tutorial-GRFFE_Equations-Cartesian.ipynb) Generates code necessary to compute the source terms.* [../GRHD/equations.py](../../edit/GRHD/equations.py) [\[**tutorial**\]](../Tutorial-GRHD_Equations-Cartesian.ipynb) Generates code necessary to compute the source terms.Here we use NRPy+ to generate the C source code necessary to set up initial data for an Alfv&eacute;n wave (see [the original GiRaFFE paper](https://arxiv.org/pdf/1704.00599.pdf)). Then we use it to generate the RHS expressions for [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html) time integration based on the [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4). Table of Contents$$\label{toc}$$This notebook is organized as follows1. [Step 1](initializenrpy): Set core NRPy+ parameters for numerical grids1. [Step 2](grffe): Output C code for GRFFE evolution 1. [Step 2.a](mol): Output macros for Method of Lines timestepping1. [Step 3](gf_id): Import `GiRaFFEfood_NRPy` initial data modules1. [Step 4](cparams): Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h`1. [Step 5](mainc): `GiRaFFE_NRPy_standalone.c`: The Main C Code Step 1: Set up core functions and parameters for solving GRFFE equations \[Back to [top](toc)\]$$\label{setup}$$ ###Code import shutil, os, sys # Standard Python modules for multiplatform OS-level functions # First, we'll add the parent directory to the list of directories Python will check for modules. nrpy_dir_path = os.path.join("..") if nrpy_dir_path not in sys.path: sys.path.append(nrpy_dir_path) # Step P1: Import needed NRPy+ core modules: from outputC import outCfunction, lhrh # NRPy+: Core C code output module import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends import finite_difference as fin # NRPy+: Finite difference C code generation module import NRPy_param_funcs as par # NRPy+: Parameter interface import grid as gri # NRPy+: Functions having to do with numerical grids import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface # Step P2: Create C code output directory: Ccodesdir = os.path.join("GiRaFFE_staggered_standalone_Ccodes/") # First remove C code output directory if it exists # Courtesy https://stackoverflow.com/questions/303200/how-do-i-remove-delete-a-folder-that-is-not-empty # !rm -r ScalarWaveCurvilinear_Playground_Ccodes shutil.rmtree(Ccodesdir, ignore_errors=True) # Then create a fresh directory cmd.mkdir(Ccodesdir) # Step P3: Create executable output directory: outdir = os.path.join(Ccodesdir,"output/") cmd.mkdir(outdir) # Step P5: Set timestepping algorithm (we adopt the Method of Lines) REAL = "double" # Best to use double here. default_CFL_FACTOR= 0.5 # (GETS OVERWRITTEN WHEN EXECUTED.) In pure axisymmetry (symmetry_axes = 2 below) 1.0 works fine. Otherwise 0.5 or lower. # Step P6: Set the finite differencing order to 2. par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",2) thismodule = "Start_to_Finish-GiRaFFE_NRPy-1D_tests" TINYDOUBLE = par.Cparameters("REAL", thismodule, "TINYDOUBLE", 1e-100) import GiRaFFE_NRPy.GiRaFFE_NRPy_Main_Driver_staggered as md # par.set_paramsvals_value("GiRaFFE_NRPy.GiRaFFE_NRPy_C2P_P2C::enforce_speed_limit_StildeD = False") par.set_paramsvals_value("GiRaFFE_NRPy.GiRaFFE_NRPy_C2P_P2C::enforce_current_sheet_prescription = False") ###Output _____no_output_____ ###Markdown Step 2: Output C code for GRFFE evolution \[Back to [top](toc)\]$$\label{grffe}$$We will first write the C codes needed for GRFFE evolution. We have already written a module to generate all these codes and call the functions in the appropriate order, so we will import that here. We will take the slightly unusual step of doing this before we generate the initial data functions because the main driver module will register all the gridfunctions we need. It will also generate functions that, in addition to their normal spot in the MoL timestepping, will need to be called during the initial data step to make sure all the variables are appropriately filled in. All of this is handled with a single call to `GiRaFFE_NRPy_Main_Driver_generate_all()`, which will register gridfunctions, write all the C code kernels, and write the C code functions to call those. ###Code md.GiRaFFE_NRPy_Main_Driver_generate_all(Ccodesdir) ###Output Output C function calculate_StildeD0_source_term() to file GiRaFFE_staggered_standalone_Ccodes/RHSs/calculate_StildeD0_source_term.h Output C function calculate_StildeD1_source_term() to file GiRaFFE_staggered_standalone_Ccodes/RHSs/calculate_StildeD1_source_term.h Output C function calculate_StildeD2_source_term() to file GiRaFFE_staggered_standalone_Ccodes/RHSs/calculate_StildeD2_source_term.h Output C function calculate_Stilde_rhsD() to file GiRaFFE_staggered_standalone_Ccodes/RHSs/calculate_Stilde_rhsD.h Output C function GiRaFFE_NRPy_cons_to_prims() to file GiRaFFE_staggered_standalone_Ccodes/C2P/GiRaFFE_NRPy_cons_to_prims.h Output C function GiRaFFE_NRPy_prims_to_cons() to file GiRaFFE_staggered_standalone_Ccodes/C2P/GiRaFFE_NRPy_prims_to_cons.h Output C function Workaround_ADM_to_BSSN() to file GiRaFFE_staggered_standalone_Ccodes/Workaround_ADM_to_BSSN.h ###Markdown Step 2.a: Output macros for Method of Lines timestepping \[Back to [top](toc)\]$$\label{mol}$$Now, we generate the code to implement the method of lines using the fourth-order Runge-Kutta algorithm. ###Code RK_method = "RK4" # Step 3: Generate Runge-Kutta-based (RK-based) timestepping code. # As described above the Table of Contents, this is a 3-step process: # 3.A: Evaluate RHSs (RHS_string) # 3.B: Apply boundary conditions (post_RHS_string, pt 1) import MoLtimestepping.C_Code_Generation as MoL from MoLtimestepping.RK_Butcher_Table_Dictionary import Butcher_dict RK_order = Butcher_dict[RK_method][1] cmd.mkdir(os.path.join(Ccodesdir,"MoLtimestepping/")) MoL.MoL_C_Code_Generation(RK_method, RHS_string = """ GiRaFFE_NRPy_RHSs(&params,auxevol_gfs,RK_INPUT_GFS,RK_OUTPUT_GFS);""", post_RHS_string = """ GiRaFFE_NRPy_post_step(&params,xx,auxevol_gfs,RK_OUTPUT_GFS,n+1);\n""", outdir = os.path.join(Ccodesdir,"MoLtimestepping/")) ###Output _____no_output_____ ###Markdown Step 3: Import `GiRaFFEfood_NRPy` initial data modules \[Back to [top](toc)\]$$\label{gf_id}$$With the preliminaries out of the way, we will write the C functions to set up initial data. There are two categories of initial data that must be set: the spacetime metric variables, and the GRFFE plasma variables. We will set up the spacetime first. ###Code # There are several initial data routines we need to test. We'll control which one we use with a string option initial_data = "AlfvenWave" # Valid options: "ExactWald", "AlignedRotator", "AlfvenWave", "FastWave" spacetime = "flat" # Valid options: "ShiftedKerrSchild", "flat" if spacetime == "ShiftedKerrSchild": # Exact Wald is more complicated. We'll need the Shifted Kerr Schild metric in Cartesian coordinates. import BSSN.ShiftedKerrSchild as sks sks.ShiftedKerrSchild(True) import reference_metric as rfm par.set_parval_from_str("reference_metric::CoordSystem","Cartesian") rfm.reference_metric() # Use the Jacobian matrix to transform the vectors to Cartesian coordinates. drrefmetric__dx_0UDmatrix = sp.Matrix([[sp.diff(rfm.xxSph[0],rfm.xx[0]), sp.diff(rfm.xxSph[0],rfm.xx[1]), sp.diff(rfm.xxSph[0],rfm.xx[2])], [sp.diff(rfm.xxSph[1],rfm.xx[0]), sp.diff(rfm.xxSph[1],rfm.xx[1]), sp.diff(rfm.xxSph[1],rfm.xx[2])], [sp.diff(rfm.xxSph[2],rfm.xx[0]), sp.diff(rfm.xxSph[2],rfm.xx[1]), sp.diff(rfm.xxSph[2],rfm.xx[2])]]) dx__drrefmetric_0UDmatrix = drrefmetric__dx_0UDmatrix.inv() gammaDD = ixp.zerorank2() for i in range(3): for j in range(3): for k in range(3): for l in range(3): gammaDD[i][j] += drrefmetric__dx_0UDmatrix[(k,i)]*drrefmetric__dx_0UDmatrix[(l,j)]*sks.gammaSphDD[k][l].subs(sks.r,rfm.xxSph[0]).subs(sks.th,rfm.xxSph[1]) betaU = ixp.zerorank1() for i in range(3): for j in range(3): betaU[i] += dx__drrefmetric_0UDmatrix[(i,j)]*sks.betaSphU[j].subs(sks.r,rfm.xxSph[0]).subs(sks.th,rfm.xxSph[1]) alpha = sks.alphaSph.subs(sks.r,rfm.xxSph[0]).subs(sks.th,rfm.xxSph[1]) # Description and options for this initial data desc = "Generate a spinning black hole with Shifted Kerr Schild metric." loopopts_id ="AllPoints,Read_xxs" elif spacetime == "flat": gammaDD = ixp.zerorank2(DIM=3) for i in range(3): for j in range(3): if i==j: gammaDD[i][j] = sp.sympify(1) # else: leave as zero betaU = ixp.zerorank1() # All should be 0 alpha = sp.sympify(1) # Description and options for this initial data desc = "Generate a flat spacetime metric." loopopts_id ="AllPoints" # we don't need to read coordinates for flat spacetime. # For testing: Also set inverse metric: gammaUU, unused_gammaDET = ixp.symm_matrix_inverter3x3(gammaDD) name = "set_initial_spacetime_metric_data" values_to_print = [ lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD00"),rhs=gammaDD[0][0]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD01"),rhs=gammaDD[0][1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD02"),rhs=gammaDD[0][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD11"),rhs=gammaDD[1][1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD12"),rhs=gammaDD[1][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD22"),rhs=gammaDD[2][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU0"),rhs=betaU[0]), lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU1"),rhs=betaU[1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU2"),rhs=betaU[2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","alpha"),rhs=alpha), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaUU00"),rhs=gammaUU[0][0]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaUU01"),rhs=gammaUU[0][1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaUU02"),rhs=gammaUU[0][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaUU11"),rhs=gammaUU[1][1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaUU12"),rhs=gammaUU[1][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaUU22"),rhs=gammaUU[2][2]) ] outCfunction( outfile = os.path.join(Ccodesdir,name+".h"), desc=desc, name=name, params ="const paramstruct *params,REAL *xx[3],REAL *auxevol_gfs", body = fin.FD_outputC("returnstring",values_to_print,params="outCverbose=False").replace("IDX4","IDX4S"), loopopts = loopopts_id) ###Output Output C function set_initial_spacetime_metric_data() to file GiRaFFE_staggered_standalone_Ccodes/set_initial_spacetime_metric_data.h ###Markdown Now, we will write out the initial data function for the GRFFE variables. ###Code if initial_data=="AlfvenWave": import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_1D_tests as gid gid.GiRaFFEfood_NRPy_1D_tests(stagger = True) desc = "Generate Alfven wave 1D initial test data for GiRaFFEfood_NRPy." elif initial_data=="FastWave": import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_1D_tests_fast_wave as gid gid.GiRaFFEfood_NRPy_1D_tests_fast_wave() desc = "Generate fast wave 1D initial test data for GiRaFFEfood_NRPy." elif initial_data=="AlignedRotator": import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_Aligned_Rotator as gid gid.GiRaFFEfood_NRPy_Aligned_Rotator() desc = "Generate aligned rotator initial test data for GiRaFFEfood_NRPy." elif initial_data=="ExactWald": import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_Exact_Wald as gid M,r0 = sp.symbols("M r0") gid.GiRaFFEfood_NRPy_Exact_Wald(gammaDD,M,r0) desc = "Generate exact Wald initial test data for GiRaFFEfood_NRPy." else: print("Unsupported Initial Data string "+initial_data+"! Supported ID: AlfvenWave, FastWave, AlignedRotator, or ExactWald") name = "initial_data" values_to_print = [\ lhrh(lhs=gri.gfaccess("out_gfs","AD0"),rhs=gid.AD[0]),\ lhrh(lhs=gri.gfaccess("out_gfs","AD1"),rhs=gid.AD[1]),\ lhrh(lhs=gri.gfaccess("out_gfs","AD2"),rhs=gid.AD[2]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU0"),rhs=gid.ValenciavU[0]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU1"),rhs=gid.ValenciavU[1]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU2"),rhs=gid.ValenciavU[2]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","BU0"),rhs=gid.BU[0]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","BU1"),rhs=gid.BU[1]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","BU2"),rhs=gid.BU[2]),\ lhrh(lhs=gri.gfaccess("out_gfs","psi6Phi"),rhs=sp.sympify(0))\ ] outCfunction( outfile = os.path.join(Ccodesdir,name+".h"), desc=desc, name=name, params ="const paramstruct *params,REAL *xx[3],REAL *auxevol_gfs,REAL *out_gfs", body = fin.FD_outputC("returnstring",values_to_print,params="outCverbose=False").replace("IDX4","IDX4S"), loopopts ="AllPoints,Read_xxs") ###Output Output C function initial_data() to file GiRaFFE_staggered_standalone_Ccodes/initial_data.h ###Markdown Step 4: Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h` \[Back to [top](toc)\]$$\label{cparams}$$Based on declared NRPy+ Cparameters, first we generate `declare_Cparameters_struct.h`, `set_Cparameters_default.h`, and `set_Cparameters[-SIMD].h`.Then we output `free_parameters.h`, which sets initial data parameters, as well as grid domain & reference metric parameters, applying `domain_size` and `sinh_width`/`SymTP_bScale` (if applicable) as set above ###Code # Step 3.e: Output C codes needed for declaring and setting Cparameters; also set free_parameters.h # Step 3.e.i: Generate declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h par.generate_Cparameters_Ccodes(os.path.join(Ccodesdir)) # Step 3.e.ii: Set free_parameters.h with open(os.path.join(Ccodesdir,"free_parameters.h"),"w") as file: file.write("""// Override parameter defaults with values based on command line arguments and NGHOSTS. params.Nxx0 = atoi(argv[1]); params.Nxx1 = atoi(argv[2]); params.Nxx2 = atoi(argv[3]); params.Nxx_plus_2NGHOSTS0 = params.Nxx0 + 2*NGHOSTS; params.Nxx_plus_2NGHOSTS1 = params.Nxx1 + 2*NGHOSTS; params.Nxx_plus_2NGHOSTS2 = params.Nxx2 + 2*NGHOSTS; // Step 0d: Set up space and time coordinates // Step 0d.i: Declare \Delta x^i=dxx{0,1,2} and invdxx{0,1,2}, as well as xxmin[3] and xxmax[3]: const REAL xxmin[3] = {-1.5,-0.1,-0.1}; const REAL xxmax[3] = { 1.5, 0.1, 0.1}; //const REAL xxmin[3] = {-1.5,-1.5,-1.5}; //const REAL xxmax[3] = { 1.5, 1.5, 1.5}; params.dxx0 = (xxmax[0] - xxmin[0]) / ((REAL)params.Nxx0+1); params.dxx1 = (xxmax[1] - xxmin[1]) / ((REAL)params.Nxx1+1); params.dxx2 = (xxmax[2] - xxmin[2]) / ((REAL)params.Nxx2+1); printf("dxx0,dxx1,dxx2 = %.5e,%.5e,%.5e\\n",params.dxx0,params.dxx1,params.dxx2); params.invdx0 = 1.0 / params.dxx0; params.invdx1 = 1.0 / params.dxx1; params.invdx2 = 1.0 / params.dxx2; const int poison_grids = 0; // Standard GRFFE parameters: params.GAMMA_SPEED_LIMIT = 2000.0; params.diss_strength = 0.1; """) if initial_data=="ExactWald": with open(os.path.join(out_dir,"free_parameters.h"),"a") as file: file.write("""params.r0 = 0.4; params.a = 0.0; """) ###Output _____no_output_____ ###Markdown Step 4: Set up boundary condition functions for chosen singular, curvilinear coordinate system \[Back to [top](toc)\]$$\label{bc_functs}$$Next apply singular, curvilinear coordinate boundary conditions [as documented in the corresponding NRPy+ tutorial notebook](Tutorial-Start_to_Finish-Curvilinear_BCs.ipynb)...But, for the moment, we're actually just using this because it writes the file `gridfunction_defines.h`. ###Code import CurviBoundaryConditions.CurviBoundaryConditions as cbcs cbcs.Set_up_CurviBoundaryConditions(os.path.join(Ccodesdir,"boundary_conditions/"),Cparamspath=os.path.join("../"),enable_copy_of_static_Ccodes=False) ###Output Wrote to file "GiRaFFE_staggered_standalone_Ccodes/boundary_conditions/parity_conditions_symbolic_dot_products.h" Evolved parity: ( AD0:1, AD1:2, AD2:3, StildeD0:1, StildeD1:2, StildeD2:3, psi6Phi:0 ) AuxEvol parity: ( BU0:1, BU1:2, BU2:3, B_lU0:1, B_lU1:2, B_lU2:3, B_rU0:1, B_rU1:2, B_rU2:3, BstaggerU0:1, BstaggerU1:2, BstaggerU2:3, Bstagger_lU0:1, Bstagger_lU1:2, Bstagger_lU2:3, Bstagger_rU0:1, Bstagger_rU1:2, Bstagger_rU2:3, Stilde_flux_HLLED0:1, Stilde_flux_HLLED1:2, Stilde_flux_HLLED2:3, ValenciavU0:1, ValenciavU1:2, ValenciavU2:3, Valenciav_lU0:1, Valenciav_lU1:2, Valenciav_lU2:3, Valenciav_llU0:1, Valenciav_llU1:2, Valenciav_llU2:3, Valenciav_lrU0:1, Valenciav_lrU1:2, Valenciav_lrU2:3, Valenciav_rU0:1, Valenciav_rU1:2, Valenciav_rU2:3, Valenciav_rlU0:1, Valenciav_rlU1:2, Valenciav_rlU2:3, Valenciav_rrU0:1, Valenciav_rrU1:2, Valenciav_rrU2:3, alpha:0, alpha_face:0, betaU0:1, betaU1:2, betaU2:3, beta_faceU0:1, beta_faceU1:2, beta_faceU2:3, cmax_x:0, cmax_y:0, cmax_z:0, cmin_x:0, cmin_y:0, cmin_z:0, gammaDD00:4, gammaDD01:5, gammaDD02:6, gammaDD11:7, gammaDD12:8, gammaDD22:9, gammaUU00:4, gammaUU01:5, gammaUU02:6, gammaUU11:7, gammaUU12:8, gammaUU22:9, gamma_faceDD00:4, gamma_faceDD01:5, gamma_faceDD02:6, gamma_faceDD11:7, gamma_faceDD12:8, gamma_faceDD22:9, gamma_faceUU00:4, gamma_faceUU01:5, gamma_faceUU02:6, gamma_faceUU11:7, gamma_faceUU12:8, gamma_faceUU22:9, phi:0, phi_face:0, psi6_temp:0, psi6center:0 ) Wrote to file "GiRaFFE_staggered_standalone_Ccodes/boundary_conditions/EigenCoord_Cart_to_xx.h" ###Markdown Step 5: `GiRaFFE_NRPy_standalone.c`: The Main C Code \[Back to [top](toc)\]$$\label{mainc}$$ ###Code # Part P0: Define REAL, set the number of ghost cells NGHOSTS (from NRPy+'s FD_CENTDERIVS_ORDER), # and set the CFL_FACTOR (which can be overwritten at the command line) with open(os.path.join(Ccodesdir,"GiRaFFE_NRPy_REAL__NGHOSTS__CFL_FACTOR.h"), "w") as file: file.write(""" // Part P0.a: Set the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER #define NGHOSTS """+str(3)+""" #define NGHOSTS_A2B """+str(2)+""" // Part P0.b: Set the numerical precision (REAL) to double, ensuring all floating point // numbers are stored to at least ~16 significant digits #define REAL """+REAL+""" // Part P0.c: Set the CFL Factor. Can be overwritten at command line. REAL CFL_FACTOR = """+str(default_CFL_FACTOR)+";") %%writefile $Ccodesdir/GiRaFFE_NRPy_standalone.c // Step P0: Define REAL and NGHOSTS; and declare CFL_FACTOR. This header is generated in NRPy+. #include "GiRaFFE_NRPy_REAL__NGHOSTS__CFL_FACTOR.h" #include "declare_Cparameters_struct.h" const int NSKIP_1D_OUTPUT = 1; // Step P1: Import needed header files #include "stdio.h" #include "stdlib.h" #include "math.h" #include "time.h" #include "stdint.h" // Needed for Windows GCC 6.x compatibility #ifndef M_PI #define M_PI 3.141592653589793238462643383279502884L #endif #ifndef M_SQRT1_2 #define M_SQRT1_2 0.707106781186547524400844362104849039L #endif // Step P2: Declare the IDX4S(gf,i,j,k) macro, which enables us to store 4-dimensions of // data in a 1D array. In this case, consecutive values of "i" // (all other indices held to a fixed value) are consecutive in memory, where // consecutive values of "j" (fixing all other indices) are separated by // Nxx_plus_2NGHOSTS0 elements in memory. Similarly, consecutive values of // "k" are separated by Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1 in memory, etc. #define IDX4S(g,i,j,k) \ ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) + Nxx_plus_2NGHOSTS2 * (g) ) ) ) #define IDX4ptS(g,idx) ( (idx) + (Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2) * (g) ) #define IDX3S(i,j,k) ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) ) ) ) #define LOOP_REGION(i0min,i0max, i1min,i1max, i2min,i2max) \ for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) #define LOOP_ALL_GFS_GPS(ii) _Pragma("omp parallel for") \ for(int (ii)=0;(ii)<Nxx_plus_2NGHOSTS_tot*NUM_EVOL_GFS;(ii)++) // Step P3: Set gridfunction macros #include "boundary_conditions/gridfunction_defines.h" // Step P4: Include the RHS, BC, and primitive recovery functions #include "GiRaFFE_NRPy_Main_Driver.h" // Step P5: Include the initial data functions #include "set_initial_spacetime_metric_data.h" #include "initial_data.h" // main() function: // Step 0: Read command-line input, set up grid structure, allocate memory for gridfunctions, set up coordinates // Step 1: Set up scalar wave initial data // Step 2: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm, // applying quadratic extrapolation outer boundary conditions. // Step 3: Output relative error between numerical and exact solution. // Step 4: Free all allocated memory int main(int argc, const char *argv[]) { paramstruct params; #include "set_Cparameters_default.h" // Step 0a: Read command-line input, error out if nonconformant if(argc != 4 || atoi(argv[1]) < NGHOSTS || atoi(argv[2]) < NGHOSTS || atoi(argv[3]) < NGHOSTS) { printf("Error: Expected three command-line arguments: ./GiRaFFE_NRPy_standalone [Nx] [Ny] [Nz],\n"); printf("where Nx is the number of grid points in the x direction, and so forth.\n"); printf("Nx,Ny,Nz MUST BE larger than NGHOSTS (= %d)\n",NGHOSTS); exit(1); } // Step 0c: Set free parameters, overwriting Cparameters defaults // by hand or with command-line input, as desired. #include "free_parameters.h" #include "set_Cparameters-nopointer.h" // ... and then set up the numerical grid structure in time: const REAL t_final = 0.5; const REAL CFL_FACTOR = 0.5; // Set the CFL Factor // Step 0c: Allocate memory for gridfunctions const int Nxx_plus_2NGHOSTS_tot = Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2; // Step 0k: Allocate memory for gridfunctions #include "MoLtimestepping/RK_Allocate_Memory.h" REAL *restrict auxevol_gfs = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot); REAL *evol_gfs_exact = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot); REAL *auxevol_gfs_exact = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot); // For debugging, it can be useful to set everything to NaN initially. if(poison_grids) { for(int ii=0;ii<NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) { y_n_gfs[ii] = 1.0/0.0; y_nplus1_running_total_gfs[ii] = 1.0/0.0; //k_odd_gfs[ii] = 1.0/0.0; //k_even_gfs[ii] = 1.0/0.0; diagnostic_output_gfs[ii] = 1.0/0.0; evol_gfs_exact[ii] = 1.0/0.0; } for(int ii=0;ii<NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) { auxevol_gfs[ii] = 1.0/0.0; auxevol_gfs_exact[ii] = 1.0/0.0; } } // Step 0d: Set up coordinates: Set dx, and then dt based on dx_min and CFL condition // This is probably already defined above, but just in case... #ifndef MIN #define MIN(A, B) ( ((A) < (B)) ? (A) : (B) ) #endif REAL dt = CFL_FACTOR * MIN(dxx0,MIN(dxx1,dxx2)); // CFL condition int Nt = (int)(t_final / dt + 0.5); // The number of points in time. //Add 0.5 to account for C rounding down integers. // Step 0e: Set up cell-centered Cartesian coordinate grids REAL *xx[3]; xx[0] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS0); xx[1] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS1); xx[2] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS2); for(int j=0;j<Nxx_plus_2NGHOSTS0;j++) xx[0][j] = xxmin[0] + (j-NGHOSTS+1)*dxx0; for(int j=0;j<Nxx_plus_2NGHOSTS1;j++) xx[1][j] = xxmin[1] + (j-NGHOSTS+1)*dxx1; for(int j=0;j<Nxx_plus_2NGHOSTS2;j++) xx[2][j] = xxmin[2] + (j-NGHOSTS+1)*dxx2; // Step 1: Set up initial data to be exact solution at time=0: REAL time = 0.0; set_initial_spacetime_metric_data(&params,xx,auxevol_gfs); initial_data(&params,xx,auxevol_gfs,y_n_gfs); /* // Code to perturb the initial data: for(int ii=0;ii<NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) { y_n_gfs[ii] += 1.0e-15; y_nplus1_running_total_gfs[ii] += 1.0e-15; //k_odd_gfs[ii] = 1.0/0.0; //k_even_gfs[ii] = 1.0/0.0; diagnostic_output_gfs[ii] += 1.0e-15; evol_gfs_exact[ii] += 1.0e-15; } for(int ii=0;ii<NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) { auxevol_gfs[ii] += 1.0e-15; auxevol_gfs_exact[ii] += 1.0e-15; } */ // Fill in the remaining quantities //apply_bcs_potential(&params,y_n_gfs); GiRaFFE_compute_B_and_Bstagger_from_A(&params, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD00GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD01GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD02GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD11GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD12GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD22GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*PSI6_TEMPGF, /* Temporary storage,overwritten */ y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD0GF, y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD1GF, y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD2GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU0GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU1GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU2GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU0GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU1GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU2GF); //override_BU_with_old_GiRaFFE(&params,auxevol_gfs,0); GiRaFFE_NRPy_prims_to_cons(&params,auxevol_gfs,y_n_gfs); //apply_bcs_velocity(&params,auxevol_gfs); // Extra stack, useful for debugging: GiRaFFE_NRPy_cons_to_prims(&params,xx,auxevol_gfs,y_n_gfs); //GiRaFFE_NRPy_prims_to_cons(&params,auxevol_gfs,y_n_gfs); //GiRaFFE_NRPy_cons_to_prims(&params,xx,auxevol_gfs,y_n_gfs); //GiRaFFE_NRPy_prims_to_cons(&params,auxevol_gfs,y_n_gfs); //GiRaFFE_NRPy_cons_to_prims(&params,xx,auxevol_gfs,y_n_gfs); for(int n=0;n<=Nt;n++) { // Main loop to progress forward in time. //for(int n=0;n<=1;n++) { // Main loop to progress forward in time. // Step 1a: Set current time to correct value & compute exact solution time = ((REAL)n)*dt; /* Step 2: Validation: Output relative error between numerical and exact solution, */ if((n)%NSKIP_1D_OUTPUT ==0) { // Step 2c: Output relative error between exact & numerical at center of grid. const int i0mid=Nxx_plus_2NGHOSTS0/2; const int i1mid=Nxx_plus_2NGHOSTS1/2; const int i2mid=Nxx_plus_2NGHOSTS2/2; char filename[100]; sprintf(filename,"out%d-%08d.txt",Nxx0,n); FILE *out2D = fopen(filename, "w"); for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) { const int idx = IDX3S(i0,i1mid,i2mid); fprintf(out2D,"%.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e\n", xx[0][i0], auxevol_gfs[IDX4ptS(BU0GF,idx)],auxevol_gfs[IDX4ptS(BU1GF,idx)],auxevol_gfs[IDX4ptS(BU2GF,idx)], y_n_gfs[IDX4ptS(AD0GF,idx)],y_n_gfs[IDX4ptS(AD1GF,idx)],y_n_gfs[IDX4ptS(AD2GF,idx)], y_n_gfs[IDX4ptS(STILDED0GF,idx)],y_n_gfs[IDX4ptS(STILDED1GF,idx)],y_n_gfs[IDX4ptS(STILDED2GF,idx)], auxevol_gfs[IDX4ptS(VALENCIAVU0GF,idx)],auxevol_gfs[IDX4ptS(VALENCIAVU1GF,idx)],auxevol_gfs[IDX4ptS(VALENCIAVU2GF,idx)], y_n_gfs[IDX4ptS(PSI6PHIGF,idx)]); } fclose(out2D); // For convergence testing, we'll shift the grid x -> x-1 and output initial data again, giving the exact solution. LOOP_REGION(0,Nxx_plus_2NGHOSTS0,0,1,0,1) { xx[0][i0] += -mu_AW*time; //xx[0][i0] += -time; } set_initial_spacetime_metric_data(&params,xx,auxevol_gfs_exact); initial_data(&params,xx,auxevol_gfs_exact,evol_gfs_exact); // Fill in the remaining quantities //driver_A_to_B(&params,evol_gfs_exact,auxevol_gfs_exact); GiRaFFE_NRPy_prims_to_cons(&params,auxevol_gfs_exact,evol_gfs_exact); // And now, we'll set the grid back to rights. LOOP_REGION(0,Nxx_plus_2NGHOSTS0,0,1,0,1) { xx[0][i0] -= -mu_AW*time; //xx[0][i0] -= -time; } sprintf(filename,"out%d-%08d_exact.txt",Nxx0,n); FILE *out2D_exact = fopen(filename, "w"); for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) { const int idx = IDX3S(i0,i1mid,i2mid); fprintf(out2D_exact,"%.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e\n", xx[0][i0], auxevol_gfs_exact[IDX4ptS(BU0GF,idx)],auxevol_gfs_exact[IDX4ptS(BU1GF,idx)],auxevol_gfs_exact[IDX4ptS(BU2GF,idx)], evol_gfs_exact[IDX4ptS(AD0GF,idx)],evol_gfs_exact[IDX4ptS(AD1GF,idx)],evol_gfs_exact[IDX4ptS(AD2GF,idx)], evol_gfs_exact[IDX4ptS(STILDED0GF,idx)],evol_gfs_exact[IDX4ptS(STILDED1GF,idx)],evol_gfs_exact[IDX4ptS(STILDED2GF,idx)], auxevol_gfs_exact[IDX4ptS(VALENCIAVU0GF,idx)],auxevol_gfs_exact[IDX4ptS(VALENCIAVU1GF,idx)],auxevol_gfs_exact[IDX4ptS(VALENCIAVU2GF,idx)], evol_gfs_exact[IDX4ptS(PSI6PHIGF,idx)]); } fclose(out2D_exact); } // Step 3: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm, // applying quadratic extrapolation outer boundary conditions. // Step 3.b: Step forward one timestep (t -> t+dt) in time using // chosen RK-like MoL timestepping algorithm #include "MoLtimestepping/RK_MoL.h" } // End main loop to progress forward in time. // Step 4: Free all allocated memory #include "MoLtimestepping/RK_Free_Memory.h" free(auxevol_gfs); free(auxevol_gfs_exact); free(evol_gfs_exact); for(int i=0;i<3;i++) free(xx[i]); return 0; } cmd.C_compile(os.path.join(Ccodesdir,"GiRaFFE_NRPy_standalone.c"), os.path.join(Ccodesdir,"output","GiRaFFE_NRPy_standalone"),compile_mode="safe") # !gcc -g -O2 -fopenmp GiRaFFE_standalone_Ccodes/GiRaFFE_NRPy_standalone.c -o GiRaFFE_NRPy_standalone -lm # Change to output directory os.chdir(outdir) # Clean up existing output files cmd.delete_existing_files("out*.txt") cmd.delete_existing_files("out*.png") # cmd.Execute(os.path.join(Ccodesdir,"output","GiRaFFE_NRPy_standalone"), "640 16 16", os.path.join(outdir,"out640.txt")) # !taskset -c 0-7 ./GiRaFFE_NRPy_standalone 119 7 7 cmd.Execute("GiRaFFE_NRPy_standalone", "119 7 7","out119.txt") # cmd.Execute("GiRaFFE_NRPy_standalone", "119 119 119","out119.txt") # cmd.Execute("GiRaFFE_NRPy_standalone", "239 15 15","out239.txt") # !OMP_NUM_THREADS=1 valgrind --track-origins=yes -v ./GiRaFFE_NRPy_standalone 1280 32 32 # Return to root directory os.chdir(os.path.join("../../")) ###Output Compiling executable... (EXEC): Executing `gcc -O2 -g -fopenmp GiRaFFE_staggered_standalone_Ccodes/GiRaFFE_NRPy_standalone.c -o GiRaFFE_staggered_standalone_Ccodes/output/GiRaFFE_NRPy_standalone -lm`... (BENCH): Finished executing in 1.6124725341796875 seconds. Finished compilation. (EXEC): Executing `taskset -c 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 ./GiRaFFE_NRPy_standalone 119 7 7`... (BENCH): Finished executing in 1.0101072788238525 seconds. ###Markdown Now, we will load the data generated by the simulation and plot it in order to test for convergence. ###Code import numpy as np import matplotlib.pyplot as plt # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 labels = ["x","BU0","BU1","BU2","AD0","AD1","AD2","StildeD0","StildeD1","StildeD2","ValenciavU0","ValenciavU1","ValenciavU2", "psi6Phi"] Data_numer = np.loadtxt(os.path.join(Ccodesdir,"output","out119-00000040.txt")) # Data_num_2 = np.loadtxt(os.path.join(Ccodesdir,"output","out239-00000080.txt")) # Data_old = np.loadtxt("/home/penelson/OldCactus/Cactus/exe/ABE-GiRaFFEfood_1D_AlfvenWave/giraffe-grmhd_primitives_bi.x.asc") # Data_o_2 = np.loadtxt("/home/penelson/OldCactus/Cactus/exe/ABE-GiRaFFEfood_1D_AlfvenWave_2/giraffe-grmhd_primitives_bi.x.asc") # Data_numer = Data_old[5000:5125,11:15] # The column range is chosen for compatibility with the plotting script. # Data_num_2 = Data_o_2[19600:19845,11:15] # The column range is chosen for compatibility with the plotting script. Data_exact = np.loadtxt(os.path.join(Ccodesdir,"output","out119-00000040_exact.txt")) # Data_exa_2 = np.loadtxt(os.path.join(Ccodesdir,"output","out239-00000080_exact.txt")) predicted_order = 2.0 column = 3 plt.figure() # # plt.plot(Data_exact[2:-2,0],np.log2(np.absolute((Data_numer[2:-2,column]-Data_exact[2:-2,column])/\ # # (Data_num_2[2:-2:2,column]-Data_exa_2[2:-2:2,column]))),'.') plt.plot(Data_exact[:,0],Data_exact[:,column],label="Exact") plt.plot(Data_exact[:,0],Data_numer[:,column],'.',label="Approximate") # plt.plot(Data_exact[:,0],Data_exact[:,column]-Data_numer[:,column]) # plt.xlim(-0.0,1.0) # # plt.ylim(-1.0,5.0) # # plt.ylim(-0.0005,0.0005) plt.xlabel(labels[0]) plt.ylabel(labels[column]) # plt.legend() plt.show() # Plotting scripts for comparison with original GiRaFFE: # old_files = ["", # "giraffe-grmhd_primitives_bi.x.asc","giraffe-grmhd_primitives_bi.x.asc","giraffe-grmhd_primitives_bi.x.asc", # "giraffe-em_ax.x.asc","giraffe-em_ay.x.asc","giraffe-em_az.x.asc", # # "cell_centered_Ai.txt","cell_centered_Ai.txt","cell_centered_Ai.txt", # "giraffe-grmhd_conservatives.x.asc","giraffe-grmhd_conservatives.x.asc","giraffe-grmhd_conservatives.x.asc", # "giraffe-grmhd_primitives_allbutbi.x.asc","giraffe-grmhd_primitives_allbutbi.x.asc","giraffe-grmhd_primitives_allbutbi.x.asc", # "giraffe-em_psi6phi.x.asc"] # column = 5 # column_old = [0,12,13,14,12,12,12,12,13,14,12,13,14,12] # # old_path = "/home/penelson/OldCactus/Cactus/exe/ABE-GiRaFFEfood_1D_AlfvenWave" # old_path = "C:\\Users\\Patrick\\Documents\\nrpytutorial\\in_progress\\ABE-GiRaFFEfood_1D_AlfvenWave\\" # # perturb_path = "C:\\Users\\Patrick\\Documents\\nrpytutorial\\in_progress\\ABE-GiRaFFEfood_1D_AlfvenWave_perturb\\" # new_path = os.path.join(Ccodesdir,"output") # data_old = np.loadtxt(os.path.join(old_path,old_files[column])) # # data_per = np.loadtxt(os.path.join(perturb_path,old_files[column])) # n=1 # data_old = data_old[n*125:n*125+125,:]# Select only the nth timestep # data_new = np.loadtxt(os.path.join(new_path,"out119-00000001.txt")) # deltaA_old = data_old[125:250,:] - data_old[0:125,:] # data_new_t0 = np.loadtxt(os.path.join(new_path,"out119-00000000.txt")) # deltaA_new = data_new[:,:] - data_new_t0[:,:] # plt.figure() # plt.plot(data_new[3:-3,0],data_new[3:-3,column]-data_old[3:-3,column_old[column]]) # plt.plot(data_new[3:-3,0],data_per[3:-3,column_old[column]]-data_old[3:-3,column_old[column]]) # For perturbation testing! # plt.plot(data_new[:,0],data_old[:,column_old[column]]) # plt.plot(data_new[:,0],data_new[:,column],'.') # plt.plot(data_new[:,0],data_new[:,column]-((3*np.sin(5*np.pi*data_new[:,0]/np.sqrt(1 - (-0.5)**2))/20 + 23/20)*(data_new[:,0]/2 + np.sqrt(1 - (-0.5)**2)/20 + np.absolute(data_new[:,0] + np.sqrt(1 - (-0.5)**2)/10)/2)*(-1e-100/2 + data_new[:,0]/2 - np.sqrt(1 - (-0.5)**2)/20 - np.absolute(-1e-100 + data_new[:,0] - np.sqrt(1 - (-0.5)**2)/10)/2)/((-1e-100 + data_new[:,0] - np.sqrt(1 - (-0.5)**2)/10)*(1e-100 + data_new[:,0] + np.sqrt(1 - (-0.5)**2)/10)) + 13*(data_new[:,0]/2 - np.sqrt(1 - (-0.5)**2)/20 + np.absolute(data_new[:,0] - np.sqrt(1 - (-0.5)**2)/10)/2)/(10*(1e-100 + data_new[:,0] - np.sqrt(1 - (-0.5)**2)/10)) + (-1e-100/2 + data_new[:,0]/2 + np.sqrt(1 - (-0.5)**2)/20 - np.absolute(-1e-100 + data_new[:,0] + np.sqrt(1 - (-0.5)**2)/10)/2)/(-1e-100 + data_new[:,0] + np.sqrt(1 - (-0.5)**2)/10))/np.sqrt(1 - (-0.5)**2)) # plt.plot(data_new[1:,0]-(data_new[0,0]-data_new[1,0])/2.0,(data_new[0:-1,column]+data_new[1:,column])/2,'.',label="GiRaFFE_NRPy+injected BU") # plt.plot(data_new[1:,0]-(data_new[0,0]-data_new[1,0])/2.0,data_old[1:,column_old[column]],label="old GiRaFFE") # -(data_old[0,9]-data_old[1,9])/2.0 # plt.plot(data_new[3:-3,0],deltaA_new[3:-3,column],'.') # plt.plot(data_new[3:-3,0],deltaA_old[3:-3,column_old[column]]-deltaA_new[3:-3,column]) # plt.xlim(-0.1,0.1) # plt.ylim(-0.2,0.2) # plt.legend() # plt.show() # print(np.argmin(deltaA_old[3:-3,column_old[column]]-deltaA_new[3:-3,column])) def SDA(a,b): return 1.0-np.log10(2.0*np.abs(a-b)/(np.abs(a)+np.abs(b))+1.0e-15) Data_stable = np.loadtxt(os.path.join(Ccodesdir,"..","out119-00000040_stable.txt")) numbers_to_check = Data_numer[:,column]-Data_stable[:,column] for index in range(len(numbers_to_check)): # print(SDA(Data_numer[index,column],Data_stable[index,column])) if SDA(Data_numer[index,column],Data_stable[index,column])<10: print("Error: number of SDAs too low: "+str(SDA(Data_numer[index,column],Data_stable[index,column]))) sys.exit(1) ###Output _____no_output_____ ###Markdown This code will create an animation of the wave over time. ###Code # import matplotlib.pyplot as plt from matplotlib.pyplot import savefig from IPython.display import HTML import matplotlib.image as mgimg import glob import sys from matplotlib import animation cmd.delete_existing_files("out119-00*.png") globby = glob.glob(os.path.join(Ccodesdir,'output','out119-00*.txt')) file_list = [] for x in sorted(globby): file_list.append(x) number_of_files = int(len(file_list)/2) for timestep in range(number_of_files): fig = plt.figure() numer_filename = file_list[2*timestep] exact_filename = file_list[2*timestep+1] Numer = np.loadtxt(numer_filename) Exact = np.loadtxt(exact_filename) plt.title("Alfven Wave") plt.xlabel("x") plt.ylabel("BU2") plt.xlim(-0.5,0.5) plt.ylim(1.0,1.7) plt.plot(Numer[3:-3,0],Numer[3:-3,3],'.',label="Numerical") plt.plot(Exact[3:-3,0],Exact[3:-3,3],label="Exact") plt.legend() savefig(numer_filename+".png",dpi=150) plt.close(fig) sys.stdout.write("%c[2K" % 27) sys.stdout.write("Processing file "+numer_filename+"\r") sys.stdout.flush() ## VISUALIZATION ANIMATION, PART 2: Combine PNGs to generate movie ## # https://stackoverflow.com/questions/14908576/how-to-remove-frame-from-matplotlib-pyplot-figure-vs-matplotlib-figure-frame # https://stackoverflow.com/questions/23176161/animating-pngs-in-matplotlib-using-artistanimation # !rm -f GiRaFFE_NRPy-1D_tests.mp4 cmd.delete_existing_files("GiRaFFE_NRPy-1D_tests.mp4") fig = plt.figure(frameon=False) ax = fig.add_axes([0, 0, 1, 1]) ax.axis('off') myimages = [] for i in range(number_of_files): img = mgimg.imread(file_list[2*i]+".png") imgplot = plt.imshow(img) myimages.append([imgplot]) ani = animation.ArtistAnimation(fig, myimages, interval=100, repeat_delay=1000) plt.close() ani.save('GiRaFFE_NRPy-1D_tests.mp4', fps=5,dpi=150) %%HTML <video width="480" height="360" controls> <source src="GiRaFFE_NRPy-1D_tests.mp4" type="video/mp4"> </video> import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-GiRaFFE_NRPy_Main_Driver",location_of_template_file=os.path.join("..")) ###Output Created Tutorial-GiRaFFE_NRPy_Main_Driver.tex, and compiled LaTeX file to PDF file Tutorial-GiRaFFE_NRPy_Main_Driver.pdf ###Markdown window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); Start-to-Finish Example: $\text{GiRaFFE_HO}$ 1D tests Author: Patrick Nelson Adapted from [Start-to-Finish Example: Head-On Black Hole Collision](../Tutorial-Start_to_Finish-BSSNCurvilinear-Two_BHs_Collide.ipynb) This module implements a basic GRFFE code to evolve one-dimensional GRFFE waves. NRPy+ Source Code for this module: * [GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Exact_Wald.py](../../edit/in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Exact_Wald.py) [\[**tutorial**\]](Tutorial-GiRaFFEfood_NRPy_Exact_Wald.ipynb) Generates Exact Wald initial data* [GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Aligned_Rotator.py](../../edit/in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Aligned_Rotator.py) [\[**tutorial**\]](Tutorial-GiRaFFEfood_NRPy_Aligned_Rotator.ipynb) Generates Aligned Rotator initial data* [GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_1D_tests.py](../../edit/in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_1D_tests.py) [\[**tutorial**\]](Tutorial-GiRaFFEfood_NRPy_1D_tests.ipynb) Generates Alfv&eacute;n Wave initial data.* [GiRaFFE_NRPy/Afield_flux.py](../../edit/in_progress/GiRaFFE_NRPy/Afield_flux.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Afield_flux.ipynb) Generates the expressions to find the flux term of the induction equation.* [GiRaFFE_NRPy/GiRaFFE_NRPy_A2B.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_A2B.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Afield_flux.ipynb) Generates the driver to compute the magnetic field from the vector potential/* [GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-BCs.ipynb) Generates the code to apply boundary conditions to the vector potential, scalar potential, and three-velocity.* [GiRaFFE_NRPy/GiRaFFE_NRPy_C2P_P2C.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_C2P_P2C.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-C2P_P2C.ipynb) Generates the conservative-to-primitive and primitive-to-conservative solvers.* [GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Metric_Face_Values.ipynb) Generates code to interpolate metric gridfunctions to cell faces.* [GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-PPM.ipynb) Genearates code to reconstruct primitive variables on cell faces.* [GiRaFFE_NRPy/Stilde_flux.py](../../edit/in_progress/GiRaFFE_NRPy/Stilde_flux.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Stilde_flux.ipynb) Generates the expressions to find the flux term of the Poynting flux evolution equation.* [../GRFFE/equations.py](../../edit/GRFFE/equations.py) [\[**tutorial**\]](../Tutorial-GRFFE_Equations-Cartesian.ipynb) Generates code necessary to compute the source terms.* [../GRHD/equations.py](../../edit/GRHD/equations.py) [\[**tutorial**\]](../Tutorial-GRHD_Equations-Cartesian.ipynb) Generates code necessary to compute the source terms.Here we use NRPy+ to generate the C source code necessary to set up initial data for an Alfv&eacute;n wave (see [the original GiRaFFE paper](https://arxiv.org/pdf/1704.00599.pdf)). Then we use it to generate the RHS expressions for [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html) time integration based on the [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4).The entire algorithm is outlined below, with NRPy+-based components highlighted in green.1. Allocate memory for gridfunctions, including temporary storage for the RK4 time integration.1. (**Step 2** below) Set gridfunction values to initial data (**[documented in previous module](Tutorial-GiRaFFEfood_HO_1D_tests.ipynb)**).1. Evolve the initial data forward in time using RK4 time integration. At each RK4 substep, do the following: 1. (**Step 3A** below) Evaluate GRFFE RHS expressions. 1. (**Step 4** below) Apply singular, curvilinear coordinate boundary conditions [*a la* the SENR/NRPy+ paper](https://arxiv.org/abs/1712.07658)1. (**Step 3B** below) At the end of each iteration in time, output the FFE variables. (This is in Step 3B, because Step 4 requires that *all* gridfunctions be defined.)1. Repeat above steps at two numerical resolutions to confirm convergence to the expected value. Table of Contents$$\label{toc}$$This notebook is organized as follows1. [Step 1](initializenrpy): Set core NRPy+ parameters for numerical grids1. [Step 2](grffe): Output C code for GRFFE evolution1. [Step 3](gf_id): Import `GiRaFFEfood_NRPy` initial data modules1. [Step 4](cparams): Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h`1. [Step 5](mainc): `GiRaFFE_NRPy_standalone.c`: The Main C Code Step 1: Set up core functions and parameters for solving GRFFE equations \[Back to [top](toc)\]$$\label{setup}$$ ###Code import shutil, os, sys # Standard Python modules for multiplatform OS-level functions # First, we'll add the parent directory to the list of directories Python will check for modules. nrpy_dir_path = os.path.join("..") if nrpy_dir_path not in sys.path: sys.path.append(nrpy_dir_path) # Step P1: Import needed NRPy+ core modules: from outputC import outCfunction, lhrh # NRPy+: Core C code output module import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends import finite_difference as fin # NRPy+: Finite difference C code generation module import NRPy_param_funcs as par # NRPy+: Parameter interface import grid as gri # NRPy+: Functions having to do with numerical grids import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface # Step P2: Create C code output directory: Ccodesdir = os.path.join("GiRaFFE_staggered_standalone_Ccodes/") # First remove C code output directory if it exists # Courtesy https://stackoverflow.com/questions/303200/how-do-i-remove-delete-a-folder-that-is-not-empty # !rm -r ScalarWaveCurvilinear_Playground_Ccodes shutil.rmtree(Ccodesdir, ignore_errors=True) # Then create a fresh directory cmd.mkdir(Ccodesdir) # Step P3: Create executable output directory: outdir = os.path.join(Ccodesdir,"output/") cmd.mkdir(Ccodesdir) cmd.mkdir(outdir) # Step P5: Set timestepping algorithm (we adopt the Method of Lines) REAL = "double" # Best to use double here. default_CFL_FACTOR= 0.5 # (GETS OVERWRITTEN WHEN EXECUTED.) In pure axisymmetry (symmetry_axes = 2 below) 1.0 works fine. Otherwise 0.5 or lower. # Step P6: Set the finite differencing order to 2. par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",2) thismodule = "Start_to_Finish-GiRaFFE_NRPy-1D_tests" TINYDOUBLE = par.Cparameters("REAL", thismodule, "TINYDOUBLE", 1e-100) import GiRaFFE_NRPy.GiRaFFE_NRPy_Main_Driver_staggered as md # par.set_paramsvals_value("GiRaFFE_NRPy.GiRaFFE_NRPy_C2P_P2C::enforce_speed_limit_StildeD = False") par.set_paramsvals_value("GiRaFFE_NRPy.GiRaFFE_NRPy_C2P_P2C::enforce_current_sheet_prescription = False") ###Output _____no_output_____ ###Markdown Step 2: Output C code for GRFFE evolution \[Back to [top](toc)\]$$\label{grffe}$$We will first write the the C codes needed for GRFFE evolution. We have already written a module to generate all these codes in order and call the functions in the appropriate order, so we will import that here. We will take the slightly unusual step of doing this before we generate the initial data functions because the main driver module will register all the gridfunctions we need. We will also include the file early on, because it will generate functions that, in addition to their normal spot in the MoL timestepping, will need to be called during the initial data step to make sure all the variables are appropriately filled in. ###Code md.GiRaFFE_NRPy_Main_Driver_generate_all(Ccodesdir) RK_method = "Euler" # Step 3: Generate Runge-Kutta-based (RK-based) timestepping code. # As described above the Table of Contents, this is a 3-step process: # 3.A: Evaluate RHSs (RHS_string) # 3.B: Apply boundary conditions (post_RHS_string, pt 1) import MoLtimestepping.C_Code_Generation as MoL from MoLtimestepping.RK_Butcher_Table_Dictionary import Butcher_dict RK_order = Butcher_dict[RK_method][1] cmd.mkdir(os.path.join(Ccodesdir,"MoLtimestepping/")) MoL.MoL_C_Code_Generation(RK_method, RHS_string = """ GiRaFFE_NRPy_RHSs(&params,auxevol_gfs,RK_INPUT_GFS,RK_OUTPUT_GFS);""", post_RHS_string = """ GiRaFFE_NRPy_post_step(&params,xx,auxevol_gfs,RK_OUTPUT_GFS,n+1);\n""", outdir = os.path.join(Ccodesdir,"MoLtimestepping/")) shutil.copy("Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.h",os.path.join(Ccodesdir,"RHSs")) shutil.copy("A_i_rhs_no_gauge_terms.h",os.path.join(Ccodesdir,"RHSs")) cmd.mkdir(os.path.join(Ccodesdir,"A2B/")) shutil.copy("compute_B_and_Bstagger_from_A.h",os.path.join(Ccodesdir,"A2B")) ###Output Output C function calculate_StildeD0_source_term() to file GiRaFFE_staggered_standalone_Ccodes/RHSs\calculate_StildeD0_source_term.h Output C function calculate_StildeD1_source_term() to file GiRaFFE_staggered_standalone_Ccodes/RHSs\calculate_StildeD1_source_term.h Output C function calculate_StildeD2_source_term() to file GiRaFFE_staggered_standalone_Ccodes/RHSs\calculate_StildeD2_source_term.h Output C function calculate_Stilde_flux_D0_right() to file GiRaFFE_staggered_standalone_Ccodes/RHSs\calculate_Stilde_flux_D0_right.h Output C function calculate_Stilde_flux_D0_left() to file GiRaFFE_staggered_standalone_Ccodes/RHSs\calculate_Stilde_flux_D0_left.h Output C function calculate_Stilde_flux_D1_right() to file GiRaFFE_staggered_standalone_Ccodes/RHSs\calculate_Stilde_flux_D1_right.h Output C function calculate_Stilde_flux_D1_left() to file GiRaFFE_staggered_standalone_Ccodes/RHSs\calculate_Stilde_flux_D1_left.h Output C function calculate_Stilde_flux_D2_right() to file GiRaFFE_staggered_standalone_Ccodes/RHSs\calculate_Stilde_flux_D2_right.h Output C function calculate_Stilde_flux_D2_left() to file GiRaFFE_staggered_standalone_Ccodes/RHSs\calculate_Stilde_flux_D2_left.h Output C function GiRaFFE_NRPy_cons_to_prims() to file GiRaFFE_staggered_standalone_Ccodes/C2P\GiRaFFE_NRPy_cons_to_prims.h Output C function GiRaFFE_NRPy_prims_to_cons() to file GiRaFFE_staggered_standalone_Ccodes/C2P\GiRaFFE_NRPy_prims_to_cons.h ###Markdown Step 3: Import `GiRaFFEfood_NRPy` initial data modules \[Back to [top](toc)\]$$\label{gf_id}$$With the preliminaries out of the way, we will write the C functions to set up initial data. There are two categories of initial data that must be set: the spacetime metric variables, and the GRFFE plasma variables. We will set up the spacetime first. ###Code # There are several initial data routines we need to test. We'll control which one we use with a string option initial_data = "AlfvenWave" # Valid options: "ExactWald", "AlignedRotator", "AlfvenWave", "FastWave" spacetime = "flat" # Valid options: "ShiftedKerrSchild", "flat" if spacetime == "ShiftedKerrSchild": # Exact Wald is more complicated. We'll need the Shifted Kerr Schild metric in Cartesian coordinates. import BSSN.ShiftedKerrSchild as sks sks.ShiftedKerrSchild(True) import reference_metric as rfm par.set_parval_from_str("reference_metric::CoordSystem","Cartesian") rfm.reference_metric() # Use the Jacobian matrix to transform the vectors to Cartesian coordinates. drrefmetric__dx_0UDmatrix = sp.Matrix([[sp.diff(rfm.xxSph[0],rfm.xx[0]), sp.diff(rfm.xxSph[0],rfm.xx[1]), sp.diff(rfm.xxSph[0],rfm.xx[2])], [sp.diff(rfm.xxSph[1],rfm.xx[0]), sp.diff(rfm.xxSph[1],rfm.xx[1]), sp.diff(rfm.xxSph[1],rfm.xx[2])], [sp.diff(rfm.xxSph[2],rfm.xx[0]), sp.diff(rfm.xxSph[2],rfm.xx[1]), sp.diff(rfm.xxSph[2],rfm.xx[2])]]) dx__drrefmetric_0UDmatrix = drrefmetric__dx_0UDmatrix.inv() gammaDD = ixp.zerorank2() for i in range(3): for j in range(3): for k in range(3): for l in range(3): gammaDD[i][j] += drrefmetric__dx_0UDmatrix[(k,i)]*drrefmetric__dx_0UDmatrix[(l,j)]*sks.gammaSphDD[k][l].subs(sks.r,rfm.xxSph[0]).subs(sks.th,rfm.xxSph[1]) betaU = ixp.zerorank1() for i in range(3): for j in range(3): betaU[i] += dx__drrefmetric_0UDmatrix[(i,j)]*sks.betaSphU[j].subs(sks.r,rfm.xxSph[0]).subs(sks.th,rfm.xxSph[1]) alpha = sks.alphaSph.subs(sks.r,rfm.xxSph[0]).subs(sks.th,rfm.xxSph[1]) # Description and options for this initial data desc = "Generate a spinning black hole with Shifted Kerr Schild metric." loopopts_id ="AllPoints,Read_xxs" elif spacetime == "flat": gammaDD = ixp.zerorank2(DIM=3) for i in range(3): for j in range(3): if i==j: gammaDD[i][j] = sp.sympify(1) # else: leave as zero betaU = ixp.zerorank1() # All should be 0 alpha = sp.sympify(1) # Description and options for this initial data desc = "Generate a flat spacetime metric." loopopts_id ="AllPoints" # we don't need to read coordinates for flat spacetime. name = "set_initial_spacetime_metric_data" values_to_print = [\ lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD00"),rhs=gammaDD[0][0]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD01"),rhs=gammaDD[0][1]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD02"),rhs=gammaDD[0][2]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD11"),rhs=gammaDD[1][1]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD12"),rhs=gammaDD[1][2]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD22"),rhs=gammaDD[2][2]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU0"),rhs=betaU[0]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU1"),rhs=betaU[1]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU2"),rhs=betaU[2]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","alpha"),rhs=alpha),\ ] outCfunction( outfile = os.path.join(Ccodesdir,name+".h"), desc=desc, name=name, params ="const paramstruct *params,REAL *xx[3],REAL *auxevol_gfs", body = fin.FD_outputC("returnstring",values_to_print,params="outCverbose=False").replace("IDX4","IDX4S"), loopopts = loopopts_id) ###Output Output C function set_initial_spacetime_metric_data() to file GiRaFFE_staggered_standalone_Ccodes/set_initial_spacetime_metric_data.h ###Markdown Now, we will write out the initial data function for the GRFFE variables. ###Code if initial_data=="AlfvenWave": import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_1D_tests as gid gid.GiRaFFEfood_NRPy_1D_tests(stagger = True) desc = "Generate Alfven wave 1D initial test data for GiRaFFEfood_NRPy." elif initial_data=="FastWave": import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_1D_tests_fast_wave as gid gid.GiRaFFEfood_NRPy_1D_tests_fast_wave() desc = "Generate fast wave 1D initial test data for GiRaFFEfood_NRPy." elif initial_data=="AlignedRotator": import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_Aligned_Rotator as gid gid.GiRaFFEfood_NRPy_Aligned_Rotator() desc = "Generate aligned rotator initial test data for GiRaFFEfood_NRPy." elif initial_data=="ExactWald": import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_Exact_Wald as gid M,r0 = sp.symbols("M r0") gid.GiRaFFEfood_NRPy_Exact_Wald(gammaDD,M,r0) desc = "Generate exact Wald initial test data for GiRaFFEfood_NRPy." else: print("Unsupported Initial Data string "+initial_data+"! Supported ID: AlfvenWave, FastWave, AlignedRotator, or ExactWald") name = "initial_data" values_to_print = [\ lhrh(lhs=gri.gfaccess("out_gfs","AD0"),rhs=gid.AD[0]),\ lhrh(lhs=gri.gfaccess("out_gfs","AD1"),rhs=gid.AD[1]),\ lhrh(lhs=gri.gfaccess("out_gfs","AD2"),rhs=gid.AD[2]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU0"),rhs=gid.ValenciavU[0]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU1"),rhs=gid.ValenciavU[1]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU2"),rhs=gid.ValenciavU[2]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","BU0"),rhs=gid.BU[0]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","BU1"),rhs=gid.BU[1]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","BU2"),rhs=gid.BU[2]),\ lhrh(lhs=gri.gfaccess("out_gfs","psi6Phi"),rhs=sp.sympify(0))\ ] outCfunction( outfile = os.path.join(Ccodesdir,name+".h"), desc=desc, name=name, params ="const paramstruct *params,REAL *xx[3],REAL *auxevol_gfs,REAL *out_gfs", body = fin.FD_outputC("returnstring",values_to_print,params="outCverbose=False").replace("IDX4","IDX4S"), loopopts ="AllPoints,Read_xxs") ###Output Output C function initial_data() to file GiRaFFE_staggered_standalone_Ccodes/initial_data.h ###Markdown Step 4: Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h` \[Back to [top](toc)\]$$\label{cparams}$$Based on declared NRPy+ Cparameters, first we generate `declare_Cparameters_struct.h`, `set_Cparameters_default.h`, and `set_Cparameters[-SIMD].h`.Then we output `free_parameters.h`, which sets initial data parameters, as well as grid domain & reference metric parameters, applying `domain_size` and `sinh_width`/`SymTP_bScale` (if applicable) as set above ###Code # Step 3.e: Output C codes needed for declaring and setting Cparameters; also set free_parameters.h # Step 3.e.i: Generate declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h par.generate_Cparameters_Ccodes(os.path.join(Ccodesdir)) # Step 3.e.ii: Set free_parameters.h with open(os.path.join(Ccodesdir,"free_parameters.h"),"w") as file: file.write("""// Override parameter defaults with values based on command line arguments and NGHOSTS. params.Nxx0 = atoi(argv[1]); params.Nxx1 = atoi(argv[2]); params.Nxx2 = atoi(argv[3]); params.Nxx_plus_2NGHOSTS0 = params.Nxx0 + 2*NGHOSTS; params.Nxx_plus_2NGHOSTS1 = params.Nxx1 + 2*NGHOSTS; params.Nxx_plus_2NGHOSTS2 = params.Nxx2 + 2*NGHOSTS; // Step 0d: Set up space and time coordinates // Step 0d.i: Declare \Delta x^i=dxx{0,1,2} and invdxx{0,1,2}, as well as xxmin[3] and xxmax[3]: const REAL xxmin[3] = {-1.5,-0.1,-0.1}; const REAL xxmax[3] = { 1.5, 0.1, 0.1}; //const REAL xxmin[3] = {-1.5,-1.5,-1.5}; //const REAL xxmax[3] = { 1.5, 1.5, 1.5}; params.dxx0 = (xxmax[0] - xxmin[0]) / ((REAL)params.Nxx0+1); params.dxx1 = (xxmax[1] - xxmin[1]) / ((REAL)params.Nxx1+1); params.dxx2 = (xxmax[2] - xxmin[2]) / ((REAL)params.Nxx2+1); printf("dxx0,dxx1,dxx2 = %.5e,%.5e,%.5e\\n",params.dxx0,params.dxx1,params.dxx2); params.invdx0 = 1.0 / params.dxx0; params.invdx1 = 1.0 / params.dxx1; params.invdx2 = 1.0 / params.dxx2; const int poison_grids = 0; // Standard GRFFE parameters: params.GAMMA_SPEED_LIMIT = 2000.0; params.diss_strength = 0.1; """) if initial_data=="ExactWald": with open(os.path.join(out_dir,"free_parameters.h"),"a") as file: file.write("""params.r0 = 0.4; params.a = 0.0; """) ###Output _____no_output_____ ###Markdown Step 4: Set up boundary condition functions for chosen singular, curvilinear coordinate system \[Back to [top](toc)\]$$\label{bc_functs}$$Next apply singular, curvilinear coordinate boundary conditions [as documented in the corresponding NRPy+ tutorial notebook](Tutorial-Start_to_Finish-Curvilinear_BCs.ipynb)...But, for the moment, we're actually just using this because it writes the file `gridfunction_defines.h`. ###Code import CurviBoundaryConditions.CurviBoundaryConditions as cbcs cbcs.Set_up_CurviBoundaryConditions(os.path.join(Ccodesdir,"boundary_conditions/"),Cparamspath=os.path.join("../"),enable_copy_of_static_Ccodes=False) ###Output Wrote to file "GiRaFFE_staggered_standalone_Ccodes/boundary_conditions/parity_conditions_symbolic_dot_products.h" Evolved parity: ( AD0:1, AD1:2, AD2:3, StildeD0:1, StildeD1:2, StildeD2:3, psi6Phi:0 ) AuxEvol parity: ( BU0:1, BU1:2, BU2:3, B_lU0:1, B_lU1:2, B_lU2:3, B_rU0:1, B_rU1:2, B_rU2:3, BstaggerU0:1, BstaggerU1:2, BstaggerU2:3, Bstagger_lU0:1, Bstagger_lU1:2, Bstagger_lU2:3, Bstagger_rU0:1, Bstagger_rU1:2, Bstagger_rU2:3, ValenciavU0:1, ValenciavU1:2, ValenciavU2:3, Valenciav_lU0:1, Valenciav_lU1:2, Valenciav_lU2:3, Valenciav_llU0:1, Valenciav_llU1:2, Valenciav_llU2:3, Valenciav_lrU0:1, Valenciav_lrU1:2, Valenciav_lrU2:3, Valenciav_rU0:1, Valenciav_rU1:2, Valenciav_rU2:3, Valenciav_rlU0:1, Valenciav_rlU1:2, Valenciav_rlU2:3, Valenciav_rrU0:1, Valenciav_rrU1:2, Valenciav_rrU2:3, alpha:0, alpha_face:0, betaU0:1, betaU1:2, betaU2:3, beta_faceU0:1, beta_faceU1:2, beta_faceU2:3, cmax_x:0, cmax_y:0, cmax_z:0, cmin_x:0, cmin_y:0, cmin_z:0, gammaDD00:4, gammaDD01:5, gammaDD02:6, gammaDD11:7, gammaDD12:8, gammaDD22:9, gamma_faceDD00:4, gamma_faceDD01:5, gamma_faceDD02:6, gamma_faceDD11:7, gamma_faceDD12:8, gamma_faceDD22:9, psi6_temp:0, psi6center:0 ) Wrote to file "GiRaFFE_staggered_standalone_Ccodes/boundary_conditions/EigenCoord_Cart_to_xx.h" ###Markdown Step 5: `GiRaFFE_NRPy_standalone.c`: The Main C Code \[Back to [top](toc)\]$$\label{mainc}$$ ###Code # Part P0: Define REAL, set the number of ghost cells NGHOSTS (from NRPy+'s FD_CENTDERIVS_ORDER), # and set the CFL_FACTOR (which can be overwritten at the command line) with open(os.path.join(Ccodesdir,"GiRaFFE_NRPy_REAL__NGHOSTS__CFL_FACTOR.h"), "w") as file: file.write(""" // Part P0.a: Set the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER #define NGHOSTS """+str(3)+""" #define NGHOSTS_A2B """+str(2)+""" // Part P0.b: Set the numerical precision (REAL) to double, ensuring all floating point // numbers are stored to at least ~16 significant digits #define REAL """+REAL+""" // Part P0.c: Set the CFL Factor. Can be overwritten at command line. REAL CFL_FACTOR = """+str(default_CFL_FACTOR)+";") %%writefile $Ccodesdir/GiRaFFE_NRPy_standalone.c // Step P0: Define REAL and NGHOSTS; and declare CFL_FACTOR. This header is generated in NRPy+. #include "GiRaFFE_NRPy_REAL__NGHOSTS__CFL_FACTOR.h" #include "declare_Cparameters_struct.h" const int NSKIP_1D_OUTPUT = 1; // Step P1: Import needed header files #include "stdio.h" #include "stdlib.h" #include "math.h" #include "time.h" #include "stdint.h" // Needed for Windows GCC 6.x compatibility #ifndef M_PI #define M_PI 3.141592653589793238462643383279502884L #endif #ifndef M_SQRT1_2 #define M_SQRT1_2 0.707106781186547524400844362104849039L #endif // Step P2: Declare the IDX4S(gf,i,j,k) macro, which enables us to store 4-dimensions of // data in a 1D array. In this case, consecutive values of "i" // (all other indices held to a fixed value) are consecutive in memory, where // consecutive values of "j" (fixing all other indices) are separated by // Nxx_plus_2NGHOSTS0 elements in memory. Similarly, consecutive values of // "k" are separated by Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1 in memory, etc. #define IDX4S(g,i,j,k) \ ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) + Nxx_plus_2NGHOSTS2 * (g) ) ) ) #define IDX4ptS(g,idx) ( (idx) + (Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2) * (g) ) #define IDX3S(i,j,k) ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) ) ) ) #define LOOP_REGION(i0min,i0max, i1min,i1max, i2min,i2max) \ for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) #define LOOP_ALL_GFS_GPS(ii) _Pragma("omp parallel for") \ for(int (ii)=0;(ii)<Nxx_plus_2NGHOSTS_tot*NUM_EVOL_GFS;(ii)++) // Step P3: Set gridfunction macros #include "boundary_conditions/gridfunction_defines.h" // Step P4: Include the RHS, BC, and primitive recovery functions #include "GiRaFFE_NRPy_Main_Driver.h" // Step P5: Include the initial data functions #include "set_initial_spacetime_metric_data.h" #include "initial_data.h" // main() function: // Step 0: Read command-line input, set up grid structure, allocate memory for gridfunctions, set up coordinates // Step 1: Set up scalar wave initial data // Step 2: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm, // applying quadratic extrapolation outer boundary conditions. // Step 3: Output relative error between numerical and exact solution. // Step 4: Free all allocated memory int main(int argc, const char *argv[]) { paramstruct params; #include "set_Cparameters_default.h" // Step 0a: Read command-line input, error out if nonconformant if(argc != 4 || atoi(argv[1]) < NGHOSTS || atoi(argv[2]) < NGHOSTS || atoi(argv[3]) < NGHOSTS) { printf("Error: Expected three command-line arguments: ./GiRaFFE_NRPy_standalone [Nx] [Ny] [Nz],\n"); printf("where Nx is the number of grid points in the x direction, and so forth.\n"); printf("Nx,Ny,Nz MUST BE larger than NGHOSTS (= %d)\n",NGHOSTS); exit(1); } // Step 0c: Set free parameters, overwriting Cparameters defaults // by hand or with command-line input, as desired. #include "free_parameters.h" #include "set_Cparameters-nopointer.h" // ... and then set up the numerical grid structure in time: const REAL t_final = 0.5; const REAL CFL_FACTOR = 0.5; // Set the CFL Factor // Step 0c: Allocate memory for gridfunctions const int Nxx_plus_2NGHOSTS_tot = Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2; // Step 0k: Allocate memory for gridfunctions #include "MoLtimestepping/RK_Allocate_Memory.h" REAL *restrict auxevol_gfs = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot); REAL *evol_gfs_exact = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot); REAL *auxevol_gfs_exact = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot); // For debugging, it can be useful to set everything to NaN initially. if(poison_grids) { for(int ii=0;ii<NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) { y_n_gfs[ii] = 1.0/0.0; y_nplus1_running_total_gfs[ii] = 1.0/0.0; //k_odd_gfs[ii] = 1.0/0.0; //k_even_gfs[ii] = 1.0/0.0; diagnostic_output_gfs[ii] = 1.0/0.0; evol_gfs_exact[ii] = 1.0/0.0; } for(int ii=0;ii<NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) { auxevol_gfs[ii] = 1.0/0.0; auxevol_gfs_exact[ii] = 1.0/0.0; } } // Step 0d: Set up coordinates: Set dx, and then dt based on dx_min and CFL condition // This is probably already defined above, but just in case... #ifndef MIN #define MIN(A, B) ( ((A) < (B)) ? (A) : (B) ) #endif REAL dt = CFL_FACTOR * MIN(dxx0,MIN(dxx1,dxx2)); // CFL condition int Nt = (int)(t_final / dt + 0.5); // The number of points in time. //Add 0.5 to account for C rounding down integers. // Step 0e: Set up cell-centered Cartesian coordinate grids REAL *xx[3]; xx[0] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS0); xx[1] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS1); xx[2] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS2); for(int j=0;j<Nxx_plus_2NGHOSTS0;j++) xx[0][j] = xxmin[0] + (j-NGHOSTS+1)*dxx0; for(int j=0;j<Nxx_plus_2NGHOSTS1;j++) xx[1][j] = xxmin[1] + (j-NGHOSTS+1)*dxx1; for(int j=0;j<Nxx_plus_2NGHOSTS2;j++) xx[2][j] = xxmin[2] + (j-NGHOSTS+1)*dxx2; // Step 1: Set up initial data to be exact solution at time=0: REAL time = 0.0; set_initial_spacetime_metric_data(&params,xx,auxevol_gfs); initial_data(&params,xx,auxevol_gfs,y_n_gfs); // Code to perturb the initial data: for(int ii=0;ii<NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) { y_n_gfs[ii] += 1.0e-15; y_nplus1_running_total_gfs[ii] += 1.0e-15; //k_odd_gfs[ii] = 1.0/0.0; //k_even_gfs[ii] = 1.0/0.0; diagnostic_output_gfs[ii] += 1.0e-15; evol_gfs_exact[ii] += 1.0e-15; } for(int ii=0;ii<NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) { auxevol_gfs[ii] += 1.0e-15; auxevol_gfs_exact[ii] += 1.0e-15; } // Fill in the remaining quantities //apply_bcs_potential(&params,y_n_gfs); GiRaFFE_compute_B_and_Bstagger_from_A(&params, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD00GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD01GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD02GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD11GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD12GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD22GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*PSI6_TEMPGF, /* Temporary storage,overwritten */ y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD0GF, y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD1GF, y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD2GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU0GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU1GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU2GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU0GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU1GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU2GF); //override_BU_with_old_GiRaFFE(&params,auxevol_gfs,0); GiRaFFE_NRPy_prims_to_cons(&params,auxevol_gfs,y_n_gfs); //apply_bcs_velocity(&params,auxevol_gfs); // Extra stack, useful for debugging: GiRaFFE_NRPy_cons_to_prims(&params,xx,auxevol_gfs,y_n_gfs); //GiRaFFE_NRPy_prims_to_cons(&params,auxevol_gfs,y_n_gfs); //GiRaFFE_NRPy_cons_to_prims(&params,xx,auxevol_gfs,y_n_gfs); //GiRaFFE_NRPy_prims_to_cons(&params,auxevol_gfs,y_n_gfs); //GiRaFFE_NRPy_cons_to_prims(&params,xx,auxevol_gfs,y_n_gfs); for(int n=0;n<=Nt;n++) { // Main loop to progress forward in time. //for(int n=0;n<=1;n++) { // Main loop to progress forward in time. // Step 1a: Set current time to correct value & compute exact solution time = ((REAL)n)*dt; /* Step 2: Validation: Output relative error between numerical and exact solution, */ if((n)%NSKIP_1D_OUTPUT ==0) { // Step 2c: Output relative error between exact & numerical at center of grid. const int i0mid=Nxx_plus_2NGHOSTS0/2; const int i1mid=Nxx_plus_2NGHOSTS1/2; const int i2mid=Nxx_plus_2NGHOSTS2/2; char filename[100]; sprintf(filename,"out%d-%08d.txt",Nxx0,n); FILE *out2D = fopen(filename, "w"); for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) { const int idx = IDX3S(i0,i1mid,i2mid); fprintf(out2D,"%.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e\n", xx[0][i0], auxevol_gfs[IDX4ptS(BU0GF,idx)],auxevol_gfs[IDX4ptS(BU1GF,idx)],auxevol_gfs[IDX4ptS(BU2GF,idx)], y_n_gfs[IDX4ptS(AD0GF,idx)],y_n_gfs[IDX4ptS(AD1GF,idx)],y_n_gfs[IDX4ptS(AD2GF,idx)], y_n_gfs[IDX4ptS(STILDED0GF,idx)],y_n_gfs[IDX4ptS(STILDED1GF,idx)],y_n_gfs[IDX4ptS(STILDED2GF,idx)], auxevol_gfs[IDX4ptS(VALENCIAVU0GF,idx)],auxevol_gfs[IDX4ptS(VALENCIAVU1GF,idx)],auxevol_gfs[IDX4ptS(VALENCIAVU2GF,idx)], y_n_gfs[IDX4ptS(PSI6PHIGF,idx)]); } fclose(out2D); // For convergence testing, we'll shift the grid x -> x-1 and output initial data again, giving the exact solution. LOOP_REGION(0,Nxx_plus_2NGHOSTS0,0,1,0,1) { //xx[0][i0] += -mu_AW*time; xx[0][i0] += -time; } set_initial_spacetime_metric_data(&params,xx,auxevol_gfs_exact); initial_data(&params,xx,auxevol_gfs_exact,evol_gfs_exact); // Fill in the remaining quantities //driver_A_to_B(&params,evol_gfs_exact,auxevol_gfs_exact); GiRaFFE_NRPy_prims_to_cons(&params,auxevol_gfs_exact,evol_gfs_exact); // And now, we'll set the grid back to rights. LOOP_REGION(0,Nxx_plus_2NGHOSTS0,0,1,0,1) { //xx[0][i0] -= -mu_AW*time; xx[0][i0] -= -time; } sprintf(filename,"out%d-%08d_exact.txt",Nxx0,n); FILE *out2D_exact = fopen(filename, "w"); for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) { const int idx = IDX3S(i0,i1mid,i2mid); fprintf(out2D_exact,"%.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e\n", xx[0][i0], auxevol_gfs_exact[IDX4ptS(BU0GF,idx)],auxevol_gfs_exact[IDX4ptS(BU1GF,idx)],auxevol_gfs_exact[IDX4ptS(BU2GF,idx)], evol_gfs_exact[IDX4ptS(AD0GF,idx)],evol_gfs_exact[IDX4ptS(AD1GF,idx)],evol_gfs_exact[IDX4ptS(AD2GF,idx)], evol_gfs_exact[IDX4ptS(STILDED0GF,idx)],evol_gfs_exact[IDX4ptS(STILDED1GF,idx)],evol_gfs_exact[IDX4ptS(STILDED2GF,idx)], auxevol_gfs_exact[IDX4ptS(VALENCIAVU0GF,idx)],auxevol_gfs_exact[IDX4ptS(VALENCIAVU1GF,idx)],auxevol_gfs_exact[IDX4ptS(VALENCIAVU2GF,idx)], evol_gfs_exact[IDX4ptS(PSI6PHIGF,idx)]); } fclose(out2D_exact); } // Step 3: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm, // applying quadratic extrapolation outer boundary conditions. // Step 3.b: Step forward one timestep (t -> t+dt) in time using // chosen RK-like MoL timestepping algorithm #include "MoLtimestepping/RK_MoL.h" } // End main loop to progress forward in time. // Step 4: Free all allocated memory #include "MoLtimestepping/RK_Free_Memory.h" free(auxevol_gfs); free(auxevol_gfs_exact); free(evol_gfs_exact); for(int i=0;i<3;i++) free(xx[i]); return 0; } cmd.C_compile(os.path.join(Ccodesdir,"GiRaFFE_NRPy_standalone.c"), os.path.join(Ccodesdir,"output","GiRaFFE_NRPy_standalone"),compile_mode="safe") # !gcc -g -O2 -fopenmp GiRaFFE_standalone_Ccodes/GiRaFFE_NRPy_standalone.c -o GiRaFFE_NRPy_standalone -lm # Change to output directory os.chdir(outdir) # Clean up existing output files cmd.delete_existing_files("out*.txt") cmd.delete_existing_files("out*.png") # cmd.Execute(os.path.join(Ccodesdir,"output","GiRaFFE_NRPy_standalone"), "640 16 16", os.path.join(outdir,"out640.txt")) cmd.Execute("GiRaFFE_NRPy_standalone", "119 7 7","out119.txt") # cmd.Execute("GiRaFFE_NRPy_standalone", "119 119 119","out119.txt") # cmd.Execute("GiRaFFE_NRPy_standalone", "239 15 15","out239.txt") # !OMP_NUM_THREADS=1 valgrind --track-origins=yes -v ./GiRaFFE_NRPy_standalone 1280 32 32 # Return to root directory os.chdir(os.path.join("../../")) ###Output Compiling executable... (EXEC): Executing `gcc -O2 -g -fopenmp GiRaFFE_staggered_standalone_Ccodes/GiRaFFE_NRPy_standalone.c -o GiRaFFE_staggered_standalone_Ccodes/output\GiRaFFE_NRPy_standalone.exe -lm`... (BENCH): Finished executing in 19.296103715896606 seconds. Finished compilation. (EXEC): Executing `cmd /c GiRaFFE_NRPy_standalone 119 7 7`... (BENCH): Finished executing in 4.64326548576355 seconds. ###Markdown Now, we will load the data generated by the simulation and plot it in order to test for convergence. ###Code import numpy as np import matplotlib.pyplot as plt # Data_numer = np.loadtxt(os.path.join(Ccodesdir,"output","out119-00000001.txt")) # # Data_num_2 = np.loadtxt(os.path.join(Ccodesdir,"output","out239-00000080.txt")) # # Data_old = np.loadtxt("/home/penelson/OldCactus/Cactus/exe/ABE-GiRaFFEfood_1D_AlfvenWave/giraffe-grmhd_primitives_bi.x.asc") # # Data_o_2 = np.loadtxt("/home/penelson/OldCactus/Cactus/exe/ABE-GiRaFFEfood_1D_AlfvenWave_2/giraffe-grmhd_primitives_bi.x.asc") # # Data_numer = Data_old[5000:5125,11:15] # The column range is chosen for compatibility with the plotting script. # # Data_num_2 = Data_o_2[19600:19845,11:15] # The column range is chosen for compatibility with the plotting script. # Data_exact = np.loadtxt(os.path.join(Ccodesdir,"output","out119-00000001_exact.txt")) # # Data_exa_2 = np.loadtxt(os.path.join(Ccodesdir,"output","out239-00000080_exact.txt")) # predicted_order = 2.0 # column = 5 # # # plt.plot(Data_exact[2:-2,0],np.log2(np.absolute((Data_numer[2:-2,column]-Data_exact[2:-2,column])/\ # # # (Data_num_2[2:-2:2,column]-Data_exa_2[2:-2:2,column]))),'.') # plt.plot(Data_exact[:,0],Data_exact[:,column]) # plt.plot(Data_exact[:,0],Data_numer[:,column],'.') # # plt.xlim(-0.0,1.0) # # # plt.ylim(-1.0,5.0) # # # plt.ylim(-0.0005,0.0005) # # plt.xlabel("x") # # plt.ylabel("BU2") # plt.show() # data_list = Data_exact[:,column]-Data_numer[:,column] # print(np.sqrt(np.sum(np.square(data_list))/len(data_list))) # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 labels = ["x","BU0","BU1","BU2","AD0","AD1","AD2","StildeD0","StildeD1","StildeD2","ValenciavU0","ValenciavU1","ValenciavU2", "psi6Phi"] old_files = ["", "giraffe-grmhd_primitives_bi.x.asc","giraffe-grmhd_primitives_bi.x.asc","giraffe-grmhd_primitives_bi.x.asc", "giraffe-em_ax.x.asc","giraffe-em_ay.x.asc","giraffe-em_az.x.asc", # "cell_centered_Ai.txt","cell_centered_Ai.txt","cell_centered_Ai.txt", "giraffe-grmhd_conservatives.x.asc","giraffe-grmhd_conservatives.x.asc","giraffe-grmhd_conservatives.x.asc", "giraffe-grmhd_primitives_allbutbi.x.asc","giraffe-grmhd_primitives_allbutbi.x.asc","giraffe-grmhd_primitives_allbutbi.x.asc", "giraffe-em_psi6phi.x.asc"] column = 3 column_old = [0,12,13,14,12,12,12,12,13,14,12,13,14,12] # old_path = "/home/penelson/OldCactus/Cactus/exe/ABE-GiRaFFEfood_1D_AlfvenWave" old_path = "C:\\Users\\Patrick\\Documents\\nrpytutorial\\in_progress\\ABE-GiRaFFEfood_1D_AlfvenWave_rand\\" # perturb_path = "C:\\Users\\Patrick\\Documents\\nrpytutorial\\in_progress\\ABE-GiRaFFEfood_1D_AlfvenWave_perturb\\" new_path = os.path.join(Ccodesdir,"output") data_old = np.loadtxt(os.path.join(old_path,old_files[column])) # data_per = np.loadtxt(os.path.join(perturb_path,old_files[column])) n=2 data_old = data_old[n*125:n*125+125,:]# Select only the nth timestep data_new = np.loadtxt(os.path.join(new_path,"out119-00000002.txt")) # deltaA_old = data_old[125:250,:] - data_old[0:125,:] # data_new_t0 = np.loadtxt(os.path.join(new_path,"out119-00000000.txt")) # deltaA_new = data_new[:,:] - data_new_t0[:,:] plt.figure() # plt.plot(data_new[:,0],data_new[:,column]-data_old[:,column_old[column]]) # plt.plot(data_new[3:-3,0],data_per[3:-3,column_old[column]]-data_old[3:-3,column_old[column]]) # For perturbation testing! # plt.plot(data_new[:,0],data_old[:,column_old[column]]) plt.plot(data_new[:,0],data_new[:,column],'.') # plt.plot(data_new[:,0],data_new[:,column]-((3*np.sin(5*np.pi*data_new[:,0]/np.sqrt(1 - (-0.5)**2))/20 + 23/20)*(data_new[:,0]/2 + np.sqrt(1 - (-0.5)**2)/20 + np.absolute(data_new[:,0] + np.sqrt(1 - (-0.5)**2)/10)/2)*(-1e-100/2 + data_new[:,0]/2 - np.sqrt(1 - (-0.5)**2)/20 - np.absolute(-1e-100 + data_new[:,0] - np.sqrt(1 - (-0.5)**2)/10)/2)/((-1e-100 + data_new[:,0] - np.sqrt(1 - (-0.5)**2)/10)*(1e-100 + data_new[:,0] + np.sqrt(1 - (-0.5)**2)/10)) + 13*(data_new[:,0]/2 - np.sqrt(1 - (-0.5)**2)/20 + np.absolute(data_new[:,0] - np.sqrt(1 - (-0.5)**2)/10)/2)/(10*(1e-100 + data_new[:,0] - np.sqrt(1 - (-0.5)**2)/10)) + (-1e-100/2 + data_new[:,0]/2 + np.sqrt(1 - (-0.5)**2)/20 - np.absolute(-1e-100 + data_new[:,0] + np.sqrt(1 - (-0.5)**2)/10)/2)/(-1e-100 + data_new[:,0] + np.sqrt(1 - (-0.5)**2)/10))/np.sqrt(1 - (-0.5)**2)) # plt.plot(data_new[1:,0]-(data_new[0,0]-data_new[1,0])/2.0,(data_new[0:-1,column]+data_new[1:,column])/2,'.',label="GiRaFFE_NRPy+injected BU") # plt.plot(data_new[1:,0]-(data_new[0,0]-data_new[1,0])/2.0,data_old[1:,column_old[column]],label="old GiRaFFE") # -(data_old[0,9]-data_old[1,9])/2.0 # plt.plot(data_new[3:-3,0],deltaA_new[3:-3,column],'.') # plt.plot(data_new[3:-3,0],deltaA_old[3:-3,column_old[column]]-deltaA_new[3:-3,column]) # plt.xlim(-0.1,0.1) # plt.ylim(-0.2,0.2) # plt.legend() plt.xlabel(labels[0]) plt.ylabel(labels[column]) plt.show() # print(np.argmin(deltaA_old[3:-3,column_old[column]]-deltaA_new[3:-3,column])) ###Output _____no_output_____ ###Markdown This code will create an animation of the wave over time. ###Code # import matplotlib.pyplot as plt from matplotlib.pyplot import savefig from IPython.display import HTML import matplotlib.image as mgimg import glob import sys from matplotlib import animation cmd.delete_existing_files("out119-00*.png") globby = glob.glob(os.path.join(Ccodesdir,'output','out119-00*.txt')) file_list = [] for x in sorted(globby): file_list.append(x) number_of_files = int(len(file_list)/2) for timestep in range(number_of_files): fig = plt.figure() numer_filename = file_list[2*timestep] exact_filename = file_list[2*timestep+1] Numer = np.loadtxt(numer_filename) Exact = np.loadtxt(exact_filename) plt.title("Alfven Wave") plt.xlabel("x") plt.ylabel("BU2") plt.xlim(-0.5,0.5) plt.ylim(1.0,1.7) plt.plot(Numer[3:-3,0],Numer[3:-3,3],'.',label="Numerical") plt.plot(Exact[3:-3,0],Exact[3:-3,3],label="Exact") plt.legend() savefig(numer_filename+".png",dpi=150) plt.close(fig) sys.stdout.write("%c[2K" % 27) sys.stdout.write("Processing file "+numer_filename+"\r") sys.stdout.flush() ## VISUALIZATION ANIMATION, PART 2: Combine PNGs to generate movie ## # https://stackoverflow.com/questions/14908576/how-to-remove-frame-from-matplotlib-pyplot-figure-vs-matplotlib-figure-frame # https://stackoverflow.com/questions/23176161/animating-pngs-in-matplotlib-using-artistanimation # !rm -f GiRaFFE_NRPy-1D_tests.mp4 cmd.delete_existing_files("GiRaFFE_NRPy-1D_tests.mp4") fig = plt.figure(frameon=False) ax = fig.add_axes([0, 0, 1, 1]) ax.axis('off') myimages = [] for i in range(number_of_files): img = mgimg.imread(file_list[2*i]+".png") imgplot = plt.imshow(img) myimages.append([imgplot]) ani = animation.ArtistAnimation(fig, myimages, interval=100, repeat_delay=1000) plt.close() ani.save('GiRaFFE_NRPy-1D_tests.mp4', fps=5,dpi=150) %%HTML <video width="480" height="360" controls> <source src="GiRaFFE_NRPy-1D_tests.mp4" type="video/mp4"> </video> ###Output _____no_output_____ ###Markdown window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); Start-to-Finish Example: `GiRaFFE_NRPy` 1D tests Authors: Patrick Nelson & Terrence Pierre Jacques Adapted from [Start-to-Finish Example: Head-On Black Hole Collision](../Tutorial-Start_to_Finish-BSSNCurvilinear-Two_BHs_Collide.ipynb) This module compiles and runs code tests for all 1D initial data options available in GiRaFFE-NRPy+, evolving one-dimensional GRFFE waves. NRPy+ Source Code for this module: * Main python module for all 1D initial data: [GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_1D_tests.py](../../edit/in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_1D_tests.py) __Options:__ 1. [Fast Wave](Tutorial-GiRaFFEfood_NRPy_1D_tests-fast_wave.ipynb) 1. [Alfven Wave](Tutorial-GiRaFFEfood_NRPy_1D_alfven_wave.ipynb) 1. [Degenerate Alfven Wave](Tutorial-GiRaFFEfood_NRPy_1D_tests-degen_Alfven_wave.ipynb) 1. [Three Alfven Waves](Tutorial-GiRaFFEfood_NRPy_1D_tests-three_waves.ipynb) 1. [FFE Breakdown](Tutorial-GiRaFFEfood_NRPy_1D_tests-FFE_breakdown.ipynb)* [GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Afield_flux.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Afield_flux.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy_staggered-Afield_flux.ipynb) Generates the expressions to find the flux term of the induction equation.* [GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_A2B.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_A2B.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy_staggered-A2B.ipynb) Generates the driver to compute the magnetic field from the vector potential/* [GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-BCs.ipynb) Generates the code to apply boundary conditions to the vector potential, scalar potential, and three-velocity.* [GiRaFFE_NRPy/GiRaFFE_NRPy_C2P_P2C.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_C2P_P2C.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-C2P_P2C.ipynb) Generates the conservative-to-primitive and primitive-to-conservative solvers.* [GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Metric_Face_Values.ipynb) Generates code to interpolate metric gridfunctions to cell faces.* [GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-PPM.ipynb) Genearates code to reconstruct primitive variables on cell faces.* [GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Source_Terms.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Source_Terms.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy_staggered-Source_Terms.ipynb) Generates the expressions to find the flux term of the Poynting flux evolution equation.* [GiRaFFE_NRPy/Stilde_flux.py](../../edit/in_progress/GiRaFFE_NRPy/Stilde_flux.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Stilde_flux.ipynb) Generates the expressions to find the flux term of the Poynting flux evolution equation.* [../GRFFE/equations.py](../../edit/GRFFE/equations.py) [\[**tutorial**\]](../Tutorial-GRFFE_Equations-Cartesian.ipynb) Generates code necessary to compute the source terms.* [../GRHD/equations.py](../../edit/GRHD/equations.py) [\[**tutorial**\]](../Tutorial-GRHD_Equations-Cartesian.ipynb) Generates code necessary to compute the source terms.Here we use NRPy+ to generate the C source code necessary to set up initial data for an Alfv&eacute;n wave (see [the original GiRaFFE paper](https://arxiv.org/pdf/1704.00599.pdf)). Then we use it to generate the RHS expressions for [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html) time integration based on the [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4). Table of Contents$$\label{toc}$$This notebook is organized as follows1. [Step 1](initializenrpy): Set core NRPy+ parameters for numerical grids1. [Step 2](grffe): Output C code for GRFFE evolution 1. [Step 2.a](mol): Output macros for Method of Lines timestepping1. [Step 3](gf_id): Import `GiRaFFEfood_NRPy` initial data modules1. [Step 4](cparams): Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h`1. [Step 5](mainc): `GiRaFFE_NRPy_standalone.c`: The Main C Code1. [Step 6](compileexec): Compile and execute C codes1. [Step 7](plots): Data Visualization1. [Step 8](latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file Step 1: Set up core functions and parameters for solving GRFFE equations \[Back to [top](toc)\]$$\label{setup}$$ ###Code import shutil, os, sys # Standard Python modules for multiplatform OS-level functions # First, we'll add the parent directory to the list of directories Python will check for modules. nrpy_dir_path = os.path.join("..") if nrpy_dir_path not in sys.path: sys.path.append(nrpy_dir_path) # Step P1: Import needed NRPy+ core modules: from outputC import outCfunction, lhrh # NRPy+: Core C code output module import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends import finite_difference as fin # NRPy+: Finite difference C code generation module import NRPy_param_funcs as par # NRPy+: Parameter interface import grid as gri # NRPy+: Functions having to do with numerical grids import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface # Step P2: Create C code output directory: Ccodesdir = os.path.join("GiRaFFE_staggered_1D_Tests_standalone_Ccodes/") # First remove C code output directory if it exists # Courtesy https://stackoverflow.com/questions/303200/how-do-i-remove-delete-a-folder-that-is-not-empty # !rm -r ScalarWaveCurvilinear_Playground_Ccodes shutil.rmtree(Ccodesdir, ignore_errors=True) # Then create a fresh directory cmd.mkdir(Ccodesdir) # Step P3: Create executable output directory: outdir = os.path.join(Ccodesdir,"output/") cmd.mkdir(outdir) # Step P5: Set timestepping algorithm (we adopt the Method of Lines) REAL = "double" # Best to use double here. default_CFL_FACTOR= 0.5 # (GETS OVERWRITTEN WHEN EXECUTED.) In pure axisymmetry (symmetry_axes = 2 below) 1.0 works fine. Otherwise 0.5 or lower. # Step P6: Set the finite differencing order to 2. par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",4) thismodule = "Start_to_Finish-GiRaFFE_NRPy-1D_tests" TINYDOUBLE = par.Cparameters("REAL", thismodule, "TINYDOUBLE", 1e-100) import GiRaFFE_NRPy.GiRaFFE_NRPy_Main_Driver_staggered as md # par.set_paramsvals_value("GiRaFFE_NRPy.GiRaFFE_NRPy_C2P_P2C::enforce_speed_limit_StildeD = False") par.set_paramsvals_value("GiRaFFE_NRPy.GiRaFFE_NRPy_C2P_P2C::enforce_current_sheet_prescription = False") ###Output _____no_output_____ ###Markdown Step 2: Output C code for GRFFE evolution \[Back to [top](toc)\]$$\label{grffe}$$We will first write the C codes needed for GRFFE evolution. We have already written a module to generate all these codes and call the functions in the appropriate order, so we will import that here. We will take the slightly unusual step of doing this before we generate the initial data functions because the main driver module will register all the gridfunctions we need. It will also generate functions that, in addition to their normal spot in the MoL timestepping, will need to be called during the initial data step to make sure all the variables are appropriately filled in. All of this is handled with a single call to `GiRaFFE_NRPy_Main_Driver_generate_all()`, which will register gridfunctions, write all the C code kernels, and write the C code functions to call those. ###Code md.GiRaFFE_NRPy_Main_Driver_generate_all(Ccodesdir) ###Output Output C function calculate_StildeD0_source_term() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/RHSs/calculate_StildeD0_source_term.h Output C function calculate_StildeD1_source_term() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/RHSs/calculate_StildeD1_source_term.h Output C function calculate_StildeD2_source_term() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/RHSs/calculate_StildeD2_source_term.h Output C function calculate_Stilde_rhsD() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/RHSs/calculate_Stilde_rhsD.h Output C function GiRaFFE_NRPy_cons_to_prims() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/C2P/GiRaFFE_NRPy_cons_to_prims.h Output C function GiRaFFE_NRPy_prims_to_cons() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/C2P/GiRaFFE_NRPy_prims_to_cons.h ###Markdown Step 2.a: Output macros for Method of Lines timestepping \[Back to [top](toc)\]$$\label{mol}$$Now, we generate the code to implement the method of lines using the fourth-order Runge-Kutta algorithm. ###Code RK_method = "RK4" # Step 3: Generate Runge-Kutta-based (RK-based) timestepping code. # As described above the Table of Contents, this is a 3-step process: # 3.A: Evaluate RHSs (RHS_string) # 3.B: Apply boundary conditions (post_RHS_string, pt 1) import MoLtimestepping.C_Code_Generation as MoL from MoLtimestepping.RK_Butcher_Table_Dictionary import Butcher_dict RK_order = Butcher_dict[RK_method][1] cmd.mkdir(os.path.join(Ccodesdir,"MoLtimestepping/")) MoL.MoL_C_Code_Generation(RK_method, RHS_string = """ GiRaFFE_NRPy_RHSs(&params,auxevol_gfs,RK_INPUT_GFS,RK_OUTPUT_GFS);""", post_RHS_string = """ GiRaFFE_NRPy_post_step(&params,xx,auxevol_gfs,RK_OUTPUT_GFS,n+1);\n""", outdir = os.path.join(Ccodesdir,"MoLtimestepping/")) ###Output _____no_output_____ ###Markdown Step 3: Import `GiRaFFEfood_NRPy` initial data modules \[Back to [top](toc)\]$$\label{gf_id}$$With the preliminaries out of the way, we will write the C functions to set up initial data. There are two categories of initial data that must be set: the spacetime metric variables, and the GRFFE plasma variables. We will set up the spacetime first, namely the Minkowski spacetime. ###Code gammaDD = ixp.zerorank2(DIM=3) for i in range(3): for j in range(3): if i==j: gammaDD[i][j] = sp.sympify(1) # else: leave as zero betaU = ixp.zerorank1() # All should be 0 alpha = sp.sympify(1) # Description and options for this initial data desc = "Generate a flat spacetime metric." loopopts_id ="AllPoints" # we don't need to read coordinates for flat spacetime. # For testing: Also set inverse metric: gammaUU, unused_gammaDET = ixp.symm_matrix_inverter3x3(gammaDD) name = "set_initial_spacetime_metric_data" values_to_print = [ lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD00"),rhs=gammaDD[0][0]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD01"),rhs=gammaDD[0][1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD02"),rhs=gammaDD[0][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD11"),rhs=gammaDD[1][1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD12"),rhs=gammaDD[1][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD22"),rhs=gammaDD[2][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU0"),rhs=betaU[0]), lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU1"),rhs=betaU[1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU2"),rhs=betaU[2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","alpha"),rhs=alpha) ] outCfunction( outfile = os.path.join(Ccodesdir,name+".h"), desc=desc, name=name, params ="const paramstruct *params,REAL *xx[3],REAL *auxevol_gfs", body = fin.FD_outputC("returnstring",values_to_print,params="outCverbose=False"), loopopts = loopopts_id) ###Output Output C function set_initial_spacetime_metric_data() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/set_initial_spacetime_metric_data.h ###Markdown Now, we will write out the initials data function for the GRFFE variables. ###Code initial_data_dir = os.path.join(Ccodesdir,"InitialData/") cmd.mkdir(initial_data_dir) ID_opts = ["AlfvenWave", "ThreeAlfvenWaves", "DegenAlfvenWave", "FastWave", "FFEBD"] import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy as gid for initial_data in ID_opts: if initial_data=="AlfvenWave": gid.GiRaFFEfood_NRPy_generate_initial_data(ID_type = initial_data, stagger_enable = True) desc = "Generate Alfven wave 1D initial data for GiRaFFEfood_NRPy." elif initial_data=="ThreeAlfvenWaves": gid.GiRaFFEfood_NRPy_generate_initial_data(ID_type = "ThreeWaves", stagger_enable = True) desc = "Generate three Alfven wave 1D initial data for GiRaFFEfood_NRPy." elif initial_data=="DegenAlfvenWave": gid.GiRaFFEfood_NRPy_generate_initial_data(ID_type = initial_data, stagger_enable = True) desc = "Generate degenerate Alfven wave 1D initial data for GiRaFFEfood_NRPy." elif initial_data=="FastWave": gid.GiRaFFEfood_NRPy_generate_initial_data(ID_type = initial_data, stagger_enable = True) desc = "Generate fast wave 1D initial data for GiRaFFEfood_NRPy." elif initial_data=="FFEBD": gid.GiRaFFEfood_NRPy_generate_initial_data(ID_type = "FFE_Breakdown", stagger_enable = True) desc = "Generate FFE breakdown 1D initial data for GiRaFFEfood_NRPy." name = initial_data values_to_print = [ lhrh(lhs=gri.gfaccess("out_gfs","AD0"),rhs=gid.AD[0]), lhrh(lhs=gri.gfaccess("out_gfs","AD1"),rhs=gid.AD[1]), lhrh(lhs=gri.gfaccess("out_gfs","AD2"),rhs=gid.AD[2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU0"),rhs=gid.ValenciavU[0]), lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU1"),rhs=gid.ValenciavU[1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU2"),rhs=gid.ValenciavU[2]), # lhrh(lhs=gri.gfaccess("auxevol_gfs","BU0"),rhs=gid.BU[0]), # lhrh(lhs=gri.gfaccess("auxevol_gfs","BU1"),rhs=gid.BU[1]), # lhrh(lhs=gri.gfaccess("auxevol_gfs","BU2"),rhs=gid.BU[2]), lhrh(lhs=gri.gfaccess("out_gfs","psi6Phi"),rhs=sp.sympify(0)) ] outCfunction( outfile = os.path.join(initial_data_dir,name+".c"), desc=desc, name=name, params ="const paramstruct *params, REAL *xx[3], REAL *auxevol_gfs, REAL *out_gfs", body = fin.FD_outputC("returnstring",values_to_print,params="outCverbose=False"), rel_path_to_Cparams='../', loopopts ="AllPoints,Read_xxs") inital_data_body = """ const char *option1 = "AlfvenWave"; const char *option2 = "ThreeAlfvenWaves"; const char *option3 = "DegenAlfvenWave"; const char *option4 = "FastWave"; const char *option5 = "FFEBD"; if (strcmp(initial_data_option, option1) == 0) { AlfvenWave(params, xx, auxevol_gfs, out_gfs); } else if (strcmp(initial_data_option, option2) == 0) { ThreeAlfvenWaves(params, xx, auxevol_gfs, out_gfs); } else if (strcmp(initial_data_option, option3) == 0) { DegenAlfvenWave(params, xx, auxevol_gfs, out_gfs); } else if (strcmp(initial_data_option, option4) == 0) { FastWave(params, xx, auxevol_gfs, out_gfs); } else if (strcmp(initial_data_option, option5) == 0) { FFEBD(params, xx, auxevol_gfs, out_gfs); } else { printf("ERROR: Invalid choice of initial data."); exit(1); } """ name = "initial_data" desc = "Main initial data function." includes = ["AlfvenWave.c", "ThreeAlfvenWaves.c", "DegenAlfvenWave.c", "FastWave.c", "FFEBD.c"] outCfunction( outfile = os.path.join(initial_data_dir,name+".h"), desc=desc, name=name, params ="const char *initial_data_option, const paramstruct *restrict params,REAL *xx[3],REAL *restrict auxevol_gfs,REAL *restrict out_gfs", body = inital_data_body, includes = includes, prefunc="#include <string.h>", rel_path_to_Cparams='../', loopopts ="") ###Output Output C function AlfvenWave() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/AlfvenWave.c Output C function ThreeAlfvenWaves() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/ThreeAlfvenWaves.c Output C function DegenAlfvenWave() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/DegenAlfvenWave.c Output C function FastWave() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/FastWave.c Output C function FFEBD() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/FFEBD.c Output C function initial_data() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/initial_data.h ###Markdown Step 4: Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h` \[Back to [top](toc)\]$$\label{cparams}$$Based on declared NRPy+ Cparameters, first we generate `declare_Cparameters_struct.h`, `set_Cparameters_default.h`, and `set_Cparameters[-SIMD].h`.Then we output `free_parameters.h`, which sets initial data parameters, as well as grid domain & reference metric parameters, applying `domain_size` and `sinh_width`/`SymTP_bScale` (if applicable) as set above ###Code # Step 3.e: Output C codes needed for declaring and setting Cparameters; also set free_parameters.h # Step 3.e.i: Generate declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h par.generate_Cparameters_Ccodes(os.path.join(Ccodesdir)) # Step 3.e.ii: Set free_parameters.h with open(os.path.join(Ccodesdir,"free_parameters.h"),"w") as file: file.write("""// Override parameter defaults with values based on command line arguments and NGHOSTS. params.Nxx0 = atoi(argv[1]); params.Nxx1 = atoi(argv[2]); params.Nxx2 = atoi(argv[3]); params.Nxx_plus_2NGHOSTS0 = params.Nxx0 + 2*NGHOSTS; params.Nxx_plus_2NGHOSTS1 = params.Nxx1 + 2*NGHOSTS; params.Nxx_plus_2NGHOSTS2 = params.Nxx2 + 2*NGHOSTS; // Step 0d: Set up space and time coordinates // Step 0d.i: Declare \Delta x^i=dxx{0,1,2} and invdxx{0,1,2}, as well as xxmin[3] and xxmax[3]: const REAL xxmin[3] = {-1.3255,-0.085,-0.085}; const REAL xxmax[3] = { 1.6745, 0.115, 0.115}; params.dxx0 = (xxmax[0] - xxmin[0]) / ((REAL)params.Nxx0+1); params.dxx1 = (xxmax[1] - xxmin[1]) / ((REAL)params.Nxx1+1); params.dxx2 = (xxmax[2] - xxmin[2]) / ((REAL)params.Nxx2+1); printf("dxx0,dxx1,dxx2 = %.5e,%.5e,%.5e\\n",params.dxx0,params.dxx1,params.dxx2); params.invdx0 = 1.0 / params.dxx0; params.invdx1 = 1.0 / params.dxx1; params.invdx2 = 1.0 / params.dxx2; const int poison_grids = 0; // Standard GRFFE parameters: params.GAMMA_SPEED_LIMIT = 2000.0; params.diss_strength = 0.1; """) ###Output _____no_output_____ ###Markdown Step 4: Set up boundary condition functions for chosen singular, curvilinear coordinate system \[Back to [top](toc)\]$$\label{bc_functs}$$Next apply singular, curvilinear coordinate boundary conditions [as documented in the corresponding NRPy+ tutorial notebook](Tutorial-Start_to_Finish-Curvilinear_BCs.ipynb)...But, for the moment, we're actually just using this because it writes the file `gridfunction_defines.h`. ###Code import CurviBoundaryConditions.CurviBoundaryConditions as cbcs cbcs.Set_up_CurviBoundaryConditions(os.path.join(Ccodesdir,"boundary_conditions/"),Cparamspath=os.path.join("../"),enable_copy_of_static_Ccodes=False) ###Output Wrote to file "GiRaFFE_staggered_1D_Tests_standalone_Ccodes/boundary_conditions/parity_conditions_symbolic_dot_products.h" Evolved parity: ( AD0:1, AD1:2, AD2:3, StildeD0:1, StildeD1:2, StildeD2:3, psi6Phi:0 ) AuxEvol parity: ( BU0:1, BU1:2, BU2:3, B_lU0:1, B_lU1:2, B_lU2:3, B_rU0:1, B_rU1:2, B_rU2:3, BstaggerU0:1, BstaggerU1:2, BstaggerU2:3, Bstagger_lU0:1, Bstagger_lU1:2, Bstagger_lU2:3, Bstagger_rU0:1, Bstagger_rU1:2, Bstagger_rU2:3, Stilde_flux_HLLED0:1, Stilde_flux_HLLED1:2, Stilde_flux_HLLED2:3, ValenciavU0:1, ValenciavU1:2, ValenciavU2:3, Valenciav_lU0:1, Valenciav_lU1:2, Valenciav_lU2:3, Valenciav_llU0:1, Valenciav_llU1:2, Valenciav_llU2:3, Valenciav_lrU0:1, Valenciav_lrU1:2, Valenciav_lrU2:3, Valenciav_rU0:1, Valenciav_rU1:2, Valenciav_rU2:3, Valenciav_rlU0:1, Valenciav_rlU1:2, Valenciav_rlU2:3, Valenciav_rrU0:1, Valenciav_rrU1:2, Valenciav_rrU2:3, alpha:0, alpha_face:0, betaU0:1, betaU1:2, betaU2:3, beta_faceU0:1, beta_faceU1:2, beta_faceU2:3, cmax_x:0, cmax_y:0, cmax_z:0, cmin_x:0, cmin_y:0, cmin_z:0, gammaDD00:4, gammaDD01:5, gammaDD02:6, gammaDD11:7, gammaDD12:8, gammaDD22:9, gamma_faceDD00:4, gamma_faceDD01:5, gamma_faceDD02:6, gamma_faceDD11:7, gamma_faceDD12:8, gamma_faceDD22:9, psi6_temp:0, psi6center:0 ) Wrote to file "GiRaFFE_staggered_1D_Tests_standalone_Ccodes/boundary_conditions/EigenCoord_Cart_to_xx.h" ###Markdown Step 5: `GiRaFFE_NRPy_standalone.c`: The Main C Code \[Back to [top](toc)\]$$\label{mainc}$$ ###Code # Part P0: Define REAL, set the number of ghost cells NGHOSTS (from NRPy+'s FD_CENTDERIVS_ORDER), # and set the CFL_FACTOR (which can be overwritten at the command line) with open(os.path.join(Ccodesdir,"GiRaFFE_NRPy_REAL__NGHOSTS__CFL_FACTOR.h"), "w") as file: file.write(""" // Part P0.a: Set the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER #define NGHOSTS """+str(3)+""" #define NGHOSTS_A2B """+str(2)+""" // Part P0.b: Set the numerical precision (REAL) to double, ensuring all floating point // numbers are stored to at least ~16 significant digits #define REAL """+REAL+""" // Part P0.c: Set the CFL Factor. Can be overwritten at command line. REAL CFL_FACTOR = """+str(default_CFL_FACTOR)+";") %%writefile $Ccodesdir/GiRaFFE_NRPy_standalone.c // Step P0: Define REAL and NGHOSTS; and declare CFL_FACTOR. This header is generated in NRPy+. #include "GiRaFFE_NRPy_REAL__NGHOSTS__CFL_FACTOR.h" #include "declare_Cparameters_struct.h" const int NSKIP_1D_OUTPUT = 1; // Step P1: Import needed header files #include "stdio.h" #include "stdlib.h" #include "math.h" #include "time.h" #include "stdint.h" // Needed for Windows GCC 6.x compatibility #ifndef M_PI #define M_PI 3.141592653589793238462643383279502884L #endif #ifndef M_SQRT1_2 #define M_SQRT1_2 0.707106781186547524400844362104849039L #endif // Step P2: Declare the IDX4S(gf,i,j,k) macro, which enables us to store 4-dimensions of // data in a 1D array. In this case, consecutive values of "i" // (all other indices held to a fixed value) are consecutive in memory, where // consecutive values of "j" (fixing all other indices) are separated by // Nxx_plus_2NGHOSTS0 elements in memory. Similarly, consecutive values of // "k" are separated by Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1 in memory, etc. #define IDX4S(g,i,j,k) \ ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) + Nxx_plus_2NGHOSTS2 * (g) ) ) ) #define IDX4ptS(g,idx) ( (idx) + (Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2) * (g) ) #define IDX3S(i,j,k) ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) ) ) ) #define LOOP_REGION(i0min,i0max, i1min,i1max, i2min,i2max) \ for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) #define LOOP_ALL_GFS_GPS(ii) _Pragma("omp parallel for") \ for(int (ii)=0;(ii)<Nxx_plus_2NGHOSTS_tot*NUM_EVOL_GFS;(ii)++) // Step P3: Set gridfunction macros #include "boundary_conditions/gridfunction_defines.h" // Step P4: Include the RHS, BC, and primitive recovery functions #include "GiRaFFE_NRPy_Main_Driver.h" // Step P5: Include the initial data functions #include "set_initial_spacetime_metric_data.h" #include "InitialData/initial_data.h" // main() function: // Step 0: Read command-line input, set up grid structure, allocate memory for gridfunctions, set up coordinates // Step 1: Set up scalar wave initial data // Step 2: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm, // applying quadratic extrapolation outer boundary conditions. // Step 3: Output relative error between numerical and exact solution. // Step 4: Free all allocated memory int main(int argc, const char *argv[]) { paramstruct params; #include "set_Cparameters_default.h" // Step 0a: Read command-line input, error out if nonconformant if(argc != 5 || atoi(argv[1]) < NGHOSTS || atoi(argv[2]) < NGHOSTS || atoi(argv[3]) < NGHOSTS) { printf("Error: Expected three command-line arguments: ./GiRaFFE_NRPy_standalone [Nx] [Ny] [Nz],\n"); printf("where Nx is the number of grid points in the x direction, and so forth.\n"); printf("Nx,Ny,Nz MUST BE larger than NGHOSTS (= %d)\n",NGHOSTS); exit(1); } // Step 0c: Set free parameters, overwriting Cparameters defaults // by hand or with command-line input, as desired. #include "free_parameters.h" #include "set_Cparameters-nopointer.h" // ... and then set up the numerical grid structure in time: const REAL t_final = 2.0; const REAL CFL_FACTOR = 0.5; // Set the CFL Factor // Step 0c: Allocate memory for gridfunctions const int Nxx_plus_2NGHOSTS_tot = Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2; // Step 0k: Allocate memory for gridfunctions #include "MoLtimestepping/RK_Allocate_Memory.h" REAL *restrict auxevol_gfs = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot); REAL *evol_gfs_exact = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot); REAL *auxevol_gfs_exact = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot); // For debugging, it can be useful to set everything to NaN initially. if(poison_grids) { for(int ii=0;ii<NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) { y_n_gfs[ii] = 1.0/0.0; y_nplus1_running_total_gfs[ii] = 1.0/0.0; //k_odd_gfs[ii] = 1.0/0.0; //k_even_gfs[ii] = 1.0/0.0; diagnostic_output_gfs[ii] = 1.0/0.0; evol_gfs_exact[ii] = 1.0/0.0; } for(int ii=0;ii<NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) { auxevol_gfs[ii] = 1.0/0.0; auxevol_gfs_exact[ii] = 1.0/0.0; } } // Step 0d: Set up coordinates: Set dx, and then dt based on dx_min and CFL condition // This is probably already defined above, but just in case... #ifndef MIN #define MIN(A, B) ( ((A) < (B)) ? (A) : (B) ) #endif REAL dt = CFL_FACTOR * MIN(dxx0,MIN(dxx1,dxx2)); // CFL condition int Nt = (int)(t_final / dt + 0.5); // The number of points in time. //Add 0.5 to account for C rounding down integers. // Step 0e: Set up cell-centered Cartesian coordinate grids REAL *xx[3]; xx[0] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS0); xx[1] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS1); xx[2] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS2); for(int j=0;j<Nxx_plus_2NGHOSTS0;j++) xx[0][j] = xxmin[0] + (j-NGHOSTS+1)*dxx0; for(int j=0;j<Nxx_plus_2NGHOSTS1;j++) xx[1][j] = xxmin[1] + (j-NGHOSTS+1)*dxx1; for(int j=0;j<Nxx_plus_2NGHOSTS2;j++) xx[2][j] = xxmin[2] + (j-NGHOSTS+1)*dxx2; // Step 1: Set up initial data to be exact solution at time=0: REAL time = 0.0; set_initial_spacetime_metric_data(&params, xx, auxevol_gfs); const char *initial_data_option = argv[4]; initial_data(initial_data_option, &params, xx, auxevol_gfs, y_n_gfs); // Fill in the remaining quantities GiRaFFE_compute_B_and_Bstagger_from_A(&params, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD00GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD01GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD02GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD11GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD12GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD22GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*PSI6_TEMPGF, /* Temporary storage,overwritten */ y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD0GF, y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD1GF, y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD2GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU0GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU1GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU2GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU0GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU1GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU2GF); //override_BU_with_old_GiRaFFE(&params,auxevol_gfs,0); GiRaFFE_NRPy_prims_to_cons(&params,auxevol_gfs,y_n_gfs); // Extra stack, useful for debugging: GiRaFFE_NRPy_cons_to_prims(&params,xx,auxevol_gfs,y_n_gfs); for(int n=0;n<=Nt;n++) { // Main loop to progress forward in time. //for(int n=0;n<=1;n++) { // Main loop to progress forward in time. // Step 1a: Set current time to correct value & compute exact solution time = ((REAL)n)*dt; /* Step 2: Validation: Output relative error between numerical and exact solution, */ //if(time == 0.0 || time == 0.5 || time == 1.0 || time == 2.0 || time == 0.02 || time == 0.56) { if(1) { // Step 2c: Output relative error between exact & numerical at center of grid. const int i0mid=Nxx_plus_2NGHOSTS0/2; const int i1mid=Nxx_plus_2NGHOSTS1/2; const int i2mid=Nxx_plus_2NGHOSTS2/2; char filename[100]; sprintf(filename,"out%d__%s-%08d.txt", Nxx0, initial_data_option, n); FILE *out2D = fopen(filename, "w"); for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) { const int idx = IDX3S(i0,i1mid,i2mid); fprintf(out2D,"%.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e\n", xx[0][i0], auxevol_gfs[IDX4ptS(BU0GF,idx)],auxevol_gfs[IDX4ptS(BU1GF,idx)],auxevol_gfs[IDX4ptS(BU2GF,idx)], y_n_gfs[IDX4ptS(AD0GF,idx)],y_n_gfs[IDX4ptS(AD1GF,idx)],y_n_gfs[IDX4ptS(AD2GF,idx)], y_n_gfs[IDX4ptS(STILDED0GF,idx)],y_n_gfs[IDX4ptS(STILDED1GF,idx)],y_n_gfs[IDX4ptS(STILDED2GF,idx)], auxevol_gfs[IDX4ptS(VALENCIAVU0GF,idx)],auxevol_gfs[IDX4ptS(VALENCIAVU1GF,idx)],auxevol_gfs[IDX4ptS(VALENCIAVU2GF,idx)], y_n_gfs[IDX4ptS(PSI6PHIGF,idx)], time); } fclose(out2D); // For convergence testing, we'll shift the grid x -> x-1 and output initial data again, giving the exact solution. LOOP_REGION(0,Nxx_plus_2NGHOSTS0,0,1,0,1) { xx[0][i0] += -mu_AW*time; //xx[0][i0] += -time; } set_initial_spacetime_metric_data(&params,xx,auxevol_gfs_exact); initial_data(initial_data_option, &params,xx,auxevol_gfs_exact,evol_gfs_exact); // Fill in the remaining quantities GiRaFFE_compute_B_and_Bstagger_from_A(&params, auxevol_gfs_exact+Nxx_plus_2NGHOSTS_tot*GAMMADD00GF, auxevol_gfs_exact+Nxx_plus_2NGHOSTS_tot*GAMMADD01GF, auxevol_gfs_exact+Nxx_plus_2NGHOSTS_tot*GAMMADD02GF, auxevol_gfs_exact+Nxx_plus_2NGHOSTS_tot*GAMMADD11GF, auxevol_gfs_exact+Nxx_plus_2NGHOSTS_tot*GAMMADD12GF, auxevol_gfs_exact+Nxx_plus_2NGHOSTS_tot*GAMMADD22GF, auxevol_gfs_exact+Nxx_plus_2NGHOSTS_tot*PSI6_TEMPGF, /* Temporary storage,overwritten */ evol_gfs_exact+Nxx_plus_2NGHOSTS_tot*AD0GF, evol_gfs_exact+Nxx_plus_2NGHOSTS_tot*AD1GF, evol_gfs_exact+Nxx_plus_2NGHOSTS_tot*AD2GF, auxevol_gfs_exact+Nxx_plus_2NGHOSTS_tot*BU0GF, auxevol_gfs_exact+Nxx_plus_2NGHOSTS_tot*BU1GF, auxevol_gfs_exact+Nxx_plus_2NGHOSTS_tot*BU2GF, auxevol_gfs_exact+Nxx_plus_2NGHOSTS_tot*BSTAGGERU0GF, auxevol_gfs_exact+Nxx_plus_2NGHOSTS_tot*BSTAGGERU1GF, auxevol_gfs_exact+Nxx_plus_2NGHOSTS_tot*BSTAGGERU2GF); GiRaFFE_NRPy_prims_to_cons(&params,auxevol_gfs_exact,evol_gfs_exact); // And now, we'll set the grid back to rights. LOOP_REGION(0,Nxx_plus_2NGHOSTS0,0,1,0,1) { xx[0][i0] -= -mu_AW*time; //xx[0][i0] -= -time; } sprintf(filename,"out%d-%08d_exact.txt",Nxx0,n); FILE *out2D_exact = fopen(filename, "w"); for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) { const int idx = IDX3S(i0,i1mid,i2mid); fprintf(out2D_exact,"%.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e\n", xx[0][i0], auxevol_gfs_exact[IDX4ptS(BU0GF,idx)],auxevol_gfs_exact[IDX4ptS(BU1GF,idx)],auxevol_gfs_exact[IDX4ptS(BU2GF,idx)], evol_gfs_exact[IDX4ptS(AD0GF,idx)],evol_gfs_exact[IDX4ptS(AD1GF,idx)],evol_gfs_exact[IDX4ptS(AD2GF,idx)], evol_gfs_exact[IDX4ptS(STILDED0GF,idx)],evol_gfs_exact[IDX4ptS(STILDED1GF,idx)],evol_gfs_exact[IDX4ptS(STILDED2GF,idx)], auxevol_gfs_exact[IDX4ptS(VALENCIAVU0GF,idx)],auxevol_gfs_exact[IDX4ptS(VALENCIAVU1GF,idx)],auxevol_gfs_exact[IDX4ptS(VALENCIAVU2GF,idx)], evol_gfs_exact[IDX4ptS(PSI6PHIGF,idx)]); } fclose(out2D_exact); } // Step 3: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm, // applying quadratic extrapolation outer boundary conditions. // Step 3.b: Step forward one timestep (t -> t+dt) in time using // chosen RK-like MoL timestepping algorithm #include "MoLtimestepping/RK_MoL.h" } // End main loop to progress forward in time. // Step 4: Free all allocated memory #include "MoLtimestepping/RK_Free_Memory.h" free(auxevol_gfs); free(auxevol_gfs_exact); free(evol_gfs_exact); for(int i=0;i<3;i++) free(xx[i]); return 0; } ###Output Writing GiRaFFE_staggered_1D_Tests_standalone_Ccodes//GiRaFFE_NRPy_standalone.c ###Markdown Step 6: Compile generated C codes & perform GRFFE simulations \[Back to [top](toc)\]$$\label{compileexec}$$To aid in the cross-platform-compatible (with Windows, MacOS, & Linux) compilation and execution, we make use of `cmdline_helper` [(**Tutorial**)](Tutorial-cmdline_helper.ipynb). ###Code cmd.C_compile(os.path.join(Ccodesdir,"GiRaFFE_NRPy_standalone.c"), os.path.join(Ccodesdir,"output","GiRaFFE_NRPy_standalone"),compile_mode="optimized") # Change to output directory os.chdir(outdir) # Clean up existing output files cmd.delete_existing_files("out*.txt") cmd.delete_existing_files("out*.png") # ID options are: "AlfvenWave", "ThreeAlfvenWaves", "DegenAlfvenWave", "FastWave", "FFEBD" # for opt in ID_opts: opt = "AlfvenWave" cmd.Execute("GiRaFFE_NRPy_standalone", "299 4 4 "+opt, "out_298"+opt+".txt") # cmd.Execute("GiRaFFE_NRPy_standalone", "1280 9 9 "+opt, "out_1280"+opt+".txt") # cmd.Execute("GiRaFFE_NRPy_standalone", "1280 32 32 "+opt, "out_"+opt+".txt") # cmd.Execute("GiRaFFE_NRPy_standalone", "149 9 9 AlfvenWave","out149.txt") # Return to root directory os.chdir(os.path.join("../../")) ###Output Compiling executable... (EXEC): Executing `gcc -std=gnu99 -Ofast -fopenmp -march=native -funroll-loops GiRaFFE_staggered_1D_Tests_standalone_Ccodes/GiRaFFE_NRPy_standalone.c -o GiRaFFE_staggered_1D_Tests_standalone_Ccodes/output/GiRaFFE_NRPy_standalone -lm`... (BENCH): Finished executing in 3.2181918621063232 seconds. Finished compilation. (EXEC): Executing `taskset -c 0,1,2,3 ./GiRaFFE_NRPy_standalone 299 4 4 AlfvenWave`... (BENCH): Finished executing in 18.254799842834473 seconds. ###Markdown Step 7: Data Visualization \[Back to [top](toc)\]$$\label{plots}$$Now we plot the data and recreate figure 1 from the [GiRaFFE paper](https://arxiv.org/pdf/1704.00599.pdf). We reconstruct the electric field via$$E_i = -\epsilon_{ijk}v^j B^k$$the `calc_E` function below. We also calculate the FFE condition $B^2 - E^2$ below using the `calc_Bsquared_minus_Esquared` function. ###Code eDDD = ixp.LeviCivitaSymbol_dim3_rank3() def calc_E(data): VU0 = data[:, 10] VU1 = data[:, 11] VU2 = data[:, 12] BU0 = data[:, 1] BU1 = data[:, 2] BU2 = data[:, 3] VU = [VU0, VU1, VU2] BU = [BU0, BU1, BU2] ED = np.zeros((VU0.size, 3)) for i in range(3): for j in range(3): for k in range(3): ED[:,i] = ED[:,i] - eDDD[i][j][k]*VU[j]*BU[k] return ED def calc_Bsquared_minus_Esquared(data): EU = calc_E(data) BU0 = data[:, 1] BU1 = data[:, 2] BU2 = data[:, 3] return (BU0**2 + BU1**2 + BU2**2) - (EU[:,0]**2 + EU[:,1]**2 + EU[:,2]**2) import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import matplotlib as mpl # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 labels = ["x","BU0","BU1","BU2","AD0","AD1","AD2","StildeD0","StildeD1","StildeD2","ValenciavU0","ValenciavU1","ValenciavU2", "psi6Phi"] fig = plt.figure(figsize=(6, 15)) # spec = mpl.gridspec.GridSpec(ncols=6, nrows=2,wspace=0.65, hspace=0.4) # 6 columns evenly divides both 2 & 3 # ax1 = fig.add_subplot(spec[0,0:2]) # row 0 with axes spanning 2 cols on evens # ax2 = fig.add_subplot(spec[0,2:4]) # ax3 = fig.add_subplot(spec[0,4:]) # ax4 = fig.add_subplot(spec[1,1:3]) # row 0 with axes spanning 2 cols on odds # ax5 = fig.add_subplot(spec[1,3:5]) gs = gridspec.GridSpec(nrows=5, ncols=1, hspace=0.5) ax1 = fig.add_subplot(gs[0, 0]) ax2 = fig.add_subplot(gs[1, 0]) ax3 = fig.add_subplot(gs[2, 0]) ax4 = fig.add_subplot(gs[3, 0]) ax5 = fig.add_subplot(gs[4, 0]) Data_num_Fast_A = np.loadtxt(os.path.join(Ccodesdir,"output","out299__FastWave-00000000.txt")) Data_num_Fast_B = np.loadtxt(os.path.join(Ccodesdir,"output","out299__FastWave-00000100.txt")) E_Fast_A = calc_E(Data_num_Fast_A) E_Fast_B = calc_E(Data_num_Fast_B) ax1.scatter(Data_num_Fast_A[:,0], np.abs(E_Fast_A[:,2]), s=1,label = 't = 0') ax1.plot(Data_num_Fast_B[:,0], np.abs(E_Fast_B[:,2]), 'k-', label = 't = 0.5') ax1.set_xlim(-0.5, 1.5) ax1.set_ylim(0.6) ax1.text(0.95, 0.01, 'Fast Wave', verticalalignment='bottom', horizontalalignment='right', transform=ax1.transAxes, color='black', fontsize=14) ax1.set_xlabel('x') ax1.set_ylabel(r'$|E^z|$') ax1.legend() Data_num_Alf_A = np.loadtxt(os.path.join(Ccodesdir,"output","out299__AlfvenWave-00000000.txt")) Data_num_Alf_B = np.loadtxt(os.path.join(Ccodesdir,"output","out299__AlfvenWave-00000400.txt")) ax2.scatter(Data_num_Alf_A[:,0], Data_num_Alf_A[:,3], s=1, label = 't = 0') ax2.plot(Data_num_Alf_B[:,0], Data_num_Alf_B[:,3], 'k-', label = 't = 2.0') ax2.set_xlim(-1.5, 1.5) ax2.set_ylim(1.1) ax2.text(0.95, 0.01, 'Alfven Wave', verticalalignment='bottom', horizontalalignment='right', transform=ax2.transAxes, color='black', fontsize=14) ax2.set_xlabel('x') ax2.set_ylabel(r'$B^z$') ax2.legend(loc='center right') Data_num_DegenAlf_A = np.loadtxt(os.path.join(Ccodesdir,"output","out299__DegenAlfvenWave-00000000.txt")) Data_num_DegenAlf_B = np.loadtxt(os.path.join(Ccodesdir,"output","out299__DegenAlfvenWave-00000200.txt")) E_DegenAlf_A = calc_E(Data_num_DegenAlf_A) E_DegenAlf_B = calc_E(Data_num_DegenAlf_B) ax3.scatter(Data_num_DegenAlf_A[:,0], E_DegenAlf_A[:,1], s=1, label = 't = 0') ax3.plot(Data_num_DegenAlf_B[:,0], E_DegenAlf_B[:,1], 'k-', label = 't = 1.0') ax3.set_xlim(-1.5, 1.5) ax3.set_ylim(-1.35) ax3.text(0.95, 0.01, 'Deg. Alfven Wave', verticalalignment='bottom', horizontalalignment='right', transform=ax3.transAxes, color='black', fontsize=14) ax3.set_xlabel('x') ax3.set_ylabel(r'$E^y$') ax3.legend() # Data_num_ThreeAlf_A = np.loadtxt(os.path.join(Ccodesdir,"output","out149__ThreeAlfvenWaves-00000000.txt")) Data_num_ThreeAlf_B = np.loadtxt(os.path.join(Ccodesdir,"output","out299__ThreeAlfvenWaves-00000112.txt")) # ax2.plot(Data_num_ThreeAlf_A[:,0], Data_num_ThreeAlf_A[:,2], 'k-') ax4.scatter(Data_num_ThreeAlf_B[:,0], Data_num_ThreeAlf_B[:,2], s=1, label = 't = 0.56') ax4.set_xlim(-1.0, 1.0) # ax4.set_ylim() ax4.text(0.95, 0.01, 'Three Waves', verticalalignment='bottom', horizontalalignment='right', transform=ax4.transAxes, color='black', fontsize=14) ax4.set_xlabel('x') ax4.set_ylabel(r'$B^y$') ax4.legend(loc='center') Data_num_FFEBD_A = np.loadtxt(os.path.join(Ccodesdir,"output","out299__FFEBD-00000000.txt")) Data_num_FFEBD_B = np.loadtxt(os.path.join(Ccodesdir,"output","out299__FFEBD-00000004.txt")) B2mE2_A = calc_Bsquared_minus_Esquared(Data_num_FFEBD_A) B2mE2_B = calc_Bsquared_minus_Esquared(Data_num_FFEBD_B) ax5.scatter(Data_num_FFEBD_A[:,0], B2mE2_A, s=1, label = 't = 0') ax5.plot(Data_num_FFEBD_B[:,0], B2mE2_B, 'k-', label = 't = 0.02') ax5.set_xlim(-0.4, 0.6) ax5.text(0.95, 0.01, 'FFE Breakdown', verticalalignment='bottom', horizontalalignment='right', transform=ax5.transAxes, color='black', fontsize=14) ax5.set_xlabel('x') ax5.set_ylabel(r'$B^2 - E^2$') ax5.legend() plt.savefig(os.path.join(Ccodesdir,"output","NRPy-GiRaFFE"), dpi=800, bbox_inches="tight") plt.close(fig) img1 = plt.imread(os.path.join(Ccodesdir,"output","NRPy-GiRaFFE.png")) img2 = plt.imread(os.path.join("GiRaFFE_NRPy/example_par_files/figure1_GiRaFFE_paper.png")) NUM_ROWS = 1 IMGs_IN_ROW = 2 f, ax = plt.subplots(NUM_ROWS, IMGs_IN_ROW, figsize=(28,18)) plt.subplots_adjust(wspace=0.05) plt.axis('off') ax[0].imshow(img1) ax[1].imshow(img2) ax[0].set_title('image 1') ax[1].set_title('image 2') # title = 'side by side view of images' # f.suptitle(title, fontsize=16) plt.tight_layout() # plt.xticks([]) # plt.yticks([]) plt.show() # import matplotlib.pyplot as plt from matplotlib.pyplot import savefig from IPython.display import HTML import matplotlib.image as mgimg import glob import sys from matplotlib import animation cmd.delete_existing_files(os.path.join(Ccodesdir,'output',"out299*.png")) globby = glob.glob(os.path.join(Ccodesdir,'output','out299-*.txt')) file_list_exact = [] for x in sorted(globby): file_list_exact.append(x) globby = glob.glob(os.path.join(Ccodesdir,'output','out299__AlfvenWave*.txt')) file_list_approx = [] for x in sorted(globby): file_list_approx.append(x) number_of_files = int(len(file_list_exact)) for timestep in range(number_of_files): fig = plt.figure() numer_filename = file_list_approx[timestep] exact_filename = file_list_exact[timestep] Numer = np.loadtxt(numer_filename) Exact = np.loadtxt(exact_filename) plt.title("Alfvén Wave") plt.xlabel("x") plt.ylabel("$B^z$") # plt.xlim(-0.5,0.5) # plt.ylim(1.0,1.7) plt.plot(Numer[:,0],Numer[:,3],'.',label="Numerical") plt.plot(Exact[:,0],Exact[:,3],label="Exact") plt.legend() savefig(numer_filename+".png",dpi=150) plt.close(fig) sys.stdout.write("%c[2K" % 27) sys.stdout.write("Processing file "+numer_filename+"\r") sys.stdout.flush() ## VISUALIZATION ANIMATION, PART 2: Combine PNGs to generate movie ## # https://stackoverflow.com/questions/14908576/how-to-remove-frame-from-matplotlib-pyplot-figure-vs-matplotlib-figure-frame # https://stackoverflow.com/questions/23176161/animating-pngs-in-matplotlib-using-artistanimation # !rm -f GiRaFFE_NRPy-1D_tests.mp4 cmd.delete_existing_files("GiRaFFE_NRPy-1D_tests.mp4") fig = plt.figure(frameon=False) ax = fig.add_axes([0, 0, 1, 1]) ax.axis('off') myimages = [] from math import floor for i in range(floor(number_of_files/6)): img = mgimg.imread(file_list_approx[6*i]+".png") imgplot = plt.imshow(img) myimages.append([imgplot]) ani = animation.ArtistAnimation(fig, myimages, interval=100, repeat_delay=1000) plt.close() ani.save('GiRaFFE_NRPy-1D_tests.mp4', fps=5,dpi=150) %%HTML <video width="480" height="360" controls> <source src="GiRaFFE_NRPy-1D_tests.mp4" type="video/mp4"> </video> ###Output _____no_output_____ ###Markdown Step 8: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-Start_to_Finish-GiRaFFE_NRPy-1D_tests-staggered.pdf](Tutorial-Start_to_Finish-GiRaFFE_NRPy-1D_tests-staggered.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.) ###Code import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-Start_to_Finish-GiRaFFE_NRPy-1D_tests-staggered",location_of_template_file=os.path.join("..")) ###Output _____no_output_____ ###Markdown window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); Start-to-Finish Example: $\text{GiRaFFE_HO}$ 1D tests Author: Patrick Nelson Adapted from [Start-to-Finish Example: Head-On Black Hole Collision](../Tutorial-Start_to_Finish-BSSNCurvilinear-Two_BHs_Collide.ipynb) This module implements a basic GRFFE code to evolve one-dimensional GRFFE waves. NRPy+ Source Code for this module: * [GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Exact_Wald.py](../../edit/in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Exact_Wald.py) [\[**tutorial**\]](Tutorial-GiRaFFEfood_NRPy_Exact_Wald.ipynb) Generates Exact Wald initial data* [GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Aligned_Rotator.py](../../edit/in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Aligned_Rotator.py) [\[**tutorial**\]](Tutorial-GiRaFFEfood_NRPy_Aligned_Rotator.ipynb) Generates Aligned Rotator initial data* [GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_1D_tests.py](../../edit/in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_1D_tests.py) [\[**tutorial**\]](Tutorial-GiRaFFEfood_NRPy_1D_tests.ipynb) Generates Alfv&eacute;n Wave initial data.* [GiRaFFE_NRPy/Afield_flux.py](../../edit/in_progress/GiRaFFE_NRPy/Afield_flux.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Afield_flux.ipynb) Generates the expressions to find the flux term of the induction equation.* [GiRaFFE_NRPy/GiRaFFE_NRPy_A2B.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_A2B.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Afield_flux.ipynb) Generates the driver to compute the magnetic field from the vector potential/* [GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-BCs.ipynb) Generates the code to apply boundary conditions to the vector potential, scalar potential, and three-velocity.* [GiRaFFE_NRPy/GiRaFFE_NRPy_C2P_P2C.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_C2P_P2C.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-C2P_P2C.ipynb) Generates the conservative-to-primitive and primitive-to-conservative solvers.* [GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Metric_Face_Values.ipynb) Generates code to interpolate metric gridfunctions to cell faces.* [GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-PPM.ipynb) Genearates code to reconstruct primitive variables on cell faces.* [GiRaFFE_NRPy/Stilde_flux.py](../../edit/in_progress/GiRaFFE_NRPy/Stilde_flux.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Stilde_flux.ipynb) Generates the expressions to find the flux term of the Poynting flux evolution equation.* [../GRFFE/equations.py](../../edit/GRFFE/equations.py) [\[**tutorial**\]](../Tutorial-GRFFE_Equations-Cartesian.ipynb) Generates code necessary to compute the source terms.* [../GRHD/equations.py](../../edit/GRHD/equations.py) [\[**tutorial**\]](../Tutorial-GRHD_Equations-Cartesian.ipynb) Generates code necessary to compute the source terms.Here we use NRPy+ to generate the C source code necessary to set up initial data for an Alfv&eacute;n wave (see [the original GiRaFFE paper](https://arxiv.org/pdf/1704.00599.pdf)). Then we use it to generate the RHS expressions for [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html) time integration based on the [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4).The entire algorithm is outlined below, with NRPy+-based components highlighted in green.1. Allocate memory for gridfunctions, including temporary storage for the RK4 time integration.1. (**Step 2** below) Set gridfunction values to initial data (**[documented in previous module](Tutorial-GiRaFFEfood_HO_1D_tests.ipynb)**).1. Evolve the initial data forward in time using RK4 time integration. At each RK4 substep, do the following: 1. (**Step 3A** below) Evaluate GRFFE RHS expressions. 1. (**Step 4** below) Apply singular, curvilinear coordinate boundary conditions [*a la* the SENR/NRPy+ paper](https://arxiv.org/abs/1712.07658)1. (**Step 3B** below) At the end of each iteration in time, output the FFE variables. (This is in Step 3B, because Step 4 requires that *all* gridfunctions be defined.)1. Repeat above steps at two numerical resolutions to confirm convergence to the expected value. Table of Contents$$\label{toc}$$This notebook is organized as follows1. [Step 1](initializenrpy): Set core NRPy+ parameters for numerical grids1. [Step 2](grffe): Output C code for GRFFE evolution1. [Step 3](gf_id): Import `GiRaFFEfood_NRPy` initial data modules1. [Step 4](cparams): Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h`1. [Step 5](mainc): `GiRaFFE_NRPy_standalone.c`: The Main C Code Step 1: Set up core functions and parameters for solving GRFFE equations \[Back to [top](toc)\]$$\label{setup}$$ ###Code import shutil, os, sys # Standard Python modules for multiplatform OS-level functions # First, we'll add the parent directory to the list of directories Python will check for modules. nrpy_dir_path = os.path.join("..") if nrpy_dir_path not in sys.path: sys.path.append(nrpy_dir_path) # Step P1: Import needed NRPy+ core modules: from outputC import outCfunction, lhrh # NRPy+: Core C code output module import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends import finite_difference as fin # NRPy+: Finite difference C code generation module import NRPy_param_funcs as par # NRPy+: Parameter interface import grid as gri # NRPy+: Functions having to do with numerical grids import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface # Step P2: Create C code output directory: Ccodesdir = os.path.join("GiRaFFE_staggered_standalone_Ccodes/") # First remove C code output directory if it exists # Courtesy https://stackoverflow.com/questions/303200/how-do-i-remove-delete-a-folder-that-is-not-empty # !rm -r ScalarWaveCurvilinear_Playground_Ccodes shutil.rmtree(Ccodesdir, ignore_errors=True) # Then create a fresh directory cmd.mkdir(Ccodesdir) # Step P3: Create executable output directory: outdir = os.path.join(Ccodesdir,"output/") cmd.mkdir(Ccodesdir) cmd.mkdir(outdir) # Step P5: Set timestepping algorithm (we adopt the Method of Lines) REAL = "double" # Best to use double here. default_CFL_FACTOR= 0.5 # (GETS OVERWRITTEN WHEN EXECUTED.) In pure axisymmetry (symmetry_axes = 2 below) 1.0 works fine. Otherwise 0.5 or lower. # Step P6: Set the finite differencing order to 2. par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",2) thismodule = "Start_to_Finish-GiRaFFE_NRPy-1D_tests" TINYDOUBLE = par.Cparameters("REAL", thismodule, "TINYDOUBLE", 1e-100) import GiRaFFE_NRPy.GiRaFFE_NRPy_Main_Driver_staggered as md # par.set_paramsvals_value("GiRaFFE_NRPy.GiRaFFE_NRPy_C2P_P2C::enforce_speed_limit_StildeD = False") par.set_paramsvals_value("GiRaFFE_NRPy.GiRaFFE_NRPy_C2P_P2C::enforce_current_sheet_prescription = False") ###Output _____no_output_____ ###Markdown Step 2: Output C code for GRFFE evolution \[Back to [top](toc)\]$$\label{grffe}$$We will first write the the C codes needed for GRFFE evolution. We have already written a module to generate all these codes in order and call the functions in the appropriate order, so we will import that here. We will take the slightly unusual step of doing this before we generate the initial data functions because the main driver module will register all the gridfunctions we need. We will also include the file early on, because it will generate functions that, in addition to their normal spot in the MoL timestepping, will need to be called during the initial data step to make sure all the variables are appropriately filled in. ###Code md.GiRaFFE_NRPy_Main_Driver_generate_all(Ccodesdir) RK_method = "Euler" # Step 3: Generate Runge-Kutta-based (RK-based) timestepping code. # As described above the Table of Contents, this is a 3-step process: # 3.A: Evaluate RHSs (RHS_string) # 3.B: Apply boundary conditions (post_RHS_string, pt 1) import MoLtimestepping.C_Code_Generation as MoL from MoLtimestepping.RK_Butcher_Table_Dictionary import Butcher_dict RK_order = Butcher_dict[RK_method][1] cmd.mkdir(os.path.join(Ccodesdir,"MoLtimestepping/")) MoL.MoL_C_Code_Generation(RK_method, RHS_string = """ GiRaFFE_NRPy_RHSs(&params,auxevol_gfs,RK_INPUT_GFS,RK_OUTPUT_GFS);""", post_RHS_string = """ GiRaFFE_NRPy_post_step(&params,xx,auxevol_gfs,RK_OUTPUT_GFS,n+1);\n""", outdir = os.path.join(Ccodesdir,"MoLtimestepping/")) shutil.copy("Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.h",os.path.join(Ccodesdir,"RHSs")) shutil.copy("A_i_rhs_no_gauge_terms.h",os.path.join(Ccodesdir,"RHSs")) cmd.mkdir(os.path.join(Ccodesdir,"A2B/")) shutil.copy("compute_B_and_Bstagger_from_A.h",os.path.join(Ccodesdir,"A2B")) ###Output Output C function calculate_StildeD0_source_term() to file GiRaFFE_staggered_standalone_Ccodes/RHSs\calculate_StildeD0_source_term.h Output C function calculate_StildeD1_source_term() to file GiRaFFE_staggered_standalone_Ccodes/RHSs\calculate_StildeD1_source_term.h Output C function calculate_StildeD2_source_term() to file GiRaFFE_staggered_standalone_Ccodes/RHSs\calculate_StildeD2_source_term.h Output C function calculate_Stilde_flux_D0_right() to file GiRaFFE_staggered_standalone_Ccodes/RHSs\calculate_Stilde_flux_D0_right.h Output C function calculate_Stilde_flux_D0_left() to file GiRaFFE_staggered_standalone_Ccodes/RHSs\calculate_Stilde_flux_D0_left.h Output C function calculate_Stilde_flux_D1_right() to file GiRaFFE_staggered_standalone_Ccodes/RHSs\calculate_Stilde_flux_D1_right.h Output C function calculate_Stilde_flux_D1_left() to file GiRaFFE_staggered_standalone_Ccodes/RHSs\calculate_Stilde_flux_D1_left.h Output C function calculate_Stilde_flux_D2_right() to file GiRaFFE_staggered_standalone_Ccodes/RHSs\calculate_Stilde_flux_D2_right.h Output C function calculate_Stilde_flux_D2_left() to file GiRaFFE_staggered_standalone_Ccodes/RHSs\calculate_Stilde_flux_D2_left.h Output C function GiRaFFE_NRPy_cons_to_prims() to file GiRaFFE_staggered_standalone_Ccodes/C2P\GiRaFFE_NRPy_cons_to_prims.h Output C function GiRaFFE_NRPy_prims_to_cons() to file GiRaFFE_staggered_standalone_Ccodes/C2P\GiRaFFE_NRPy_prims_to_cons.h ###Markdown Step 3: Import `GiRaFFEfood_NRPy` initial data modules \[Back to [top](toc)\]$$\label{gf_id}$$With the preliminaries out of the way, we will write the C functions to set up initial data. There are two categories of initial data that must be set: the spacetime metric variables, and the GRFFE plasma variables. We will set up the spacetime first. ###Code # There are several initial data routines we need to test. We'll control which one we use with a string option initial_data = "AlfvenWave" # Valid options: "ExactWald", "AlignedRotator", "AlfvenWave", "FastWave" spacetime = "flat" # Valid options: "ShiftedKerrSchild", "flat" if spacetime == "ShiftedKerrSchild": # Exact Wald is more complicated. We'll need the Shifted Kerr Schild metric in Cartesian coordinates. import BSSN.ShiftedKerrSchild as sks sks.ShiftedKerrSchild(True) import reference_metric as rfm par.set_parval_from_str("reference_metric::CoordSystem","Cartesian") rfm.reference_metric() # Use the Jacobian matrix to transform the vectors to Cartesian coordinates. drrefmetric__dx_0UDmatrix = sp.Matrix([[sp.diff(rfm.xxSph[0],rfm.xx[0]), sp.diff(rfm.xxSph[0],rfm.xx[1]), sp.diff(rfm.xxSph[0],rfm.xx[2])], [sp.diff(rfm.xxSph[1],rfm.xx[0]), sp.diff(rfm.xxSph[1],rfm.xx[1]), sp.diff(rfm.xxSph[1],rfm.xx[2])], [sp.diff(rfm.xxSph[2],rfm.xx[0]), sp.diff(rfm.xxSph[2],rfm.xx[1]), sp.diff(rfm.xxSph[2],rfm.xx[2])]]) dx__drrefmetric_0UDmatrix = drrefmetric__dx_0UDmatrix.inv() gammaDD = ixp.zerorank2() for i in range(3): for j in range(3): for k in range(3): for l in range(3): gammaDD[i][j] += drrefmetric__dx_0UDmatrix[(k,i)]*drrefmetric__dx_0UDmatrix[(l,j)]*sks.gammaSphDD[k][l].subs(sks.r,rfm.xxSph[0]).subs(sks.th,rfm.xxSph[1]) betaU = ixp.zerorank1() for i in range(3): for j in range(3): betaU[i] += dx__drrefmetric_0UDmatrix[(i,j)]*sks.betaSphU[j].subs(sks.r,rfm.xxSph[0]).subs(sks.th,rfm.xxSph[1]) alpha = sks.alphaSph.subs(sks.r,rfm.xxSph[0]).subs(sks.th,rfm.xxSph[1]) # Description and options for this initial data desc = "Generate a spinning black hole with Shifted Kerr Schild metric." loopopts_id ="AllPoints,Read_xxs" elif spacetime == "flat": gammaDD = ixp.zerorank2(DIM=3) for i in range(3): for j in range(3): if i==j: gammaDD[i][j] = sp.sympify(1) # else: leave as zero betaU = ixp.zerorank1() # All should be 0 alpha = sp.sympify(1) # Description and options for this initial data desc = "Generate a flat spacetime metric." loopopts_id ="AllPoints" # we don't need to read coordinates for flat spacetime. name = "set_initial_spacetime_metric_data" values_to_print = [\ lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD00"),rhs=gammaDD[0][0]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD01"),rhs=gammaDD[0][1]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD02"),rhs=gammaDD[0][2]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD11"),rhs=gammaDD[1][1]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD12"),rhs=gammaDD[1][2]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD22"),rhs=gammaDD[2][2]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU0"),rhs=betaU[0]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU1"),rhs=betaU[1]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU2"),rhs=betaU[2]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","alpha"),rhs=alpha),\ ] outCfunction( outfile = os.path.join(Ccodesdir,name+".h"), desc=desc, name=name, params ="const paramstruct *params,REAL *xx[3],REAL *auxevol_gfs", body = fin.FD_outputC("returnstring",values_to_print,params="outCverbose=False").replace("IDX4","IDX4S"), loopopts = loopopts_id) ###Output Output C function set_initial_spacetime_metric_data() to file GiRaFFE_staggered_standalone_Ccodes/set_initial_spacetime_metric_data.h ###Markdown Now, we will write out the initial data function for the GRFFE variables. ###Code if initial_data=="AlfvenWave": import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_1D_tests as gid gid.GiRaFFEfood_NRPy_1D_tests(stagger = True) desc = "Generate Alfven wave 1D initial test data for GiRaFFEfood_NRPy." elif initial_data=="FastWave": import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_1D_tests_fast_wave as gid gid.GiRaFFEfood_NRPy_1D_tests_fast_wave() desc = "Generate fast wave 1D initial test data for GiRaFFEfood_NRPy." elif initial_data=="AlignedRotator": import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_Aligned_Rotator as gid gid.GiRaFFEfood_NRPy_Aligned_Rotator() desc = "Generate aligned rotator initial test data for GiRaFFEfood_NRPy." elif initial_data=="ExactWald": import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_Exact_Wald as gid M,r0 = sp.symbols("M r0") gid.GiRaFFEfood_NRPy_Exact_Wald(gammaDD,M,r0) desc = "Generate exact Wald initial test data for GiRaFFEfood_NRPy." else: print("Unsupported Initial Data string "+initial_data+"! Supported ID: AlfvenWave, FastWave, AlignedRotator, or ExactWald") name = "initial_data" values_to_print = [\ lhrh(lhs=gri.gfaccess("out_gfs","AD0"),rhs=gid.AD[0]),\ lhrh(lhs=gri.gfaccess("out_gfs","AD1"),rhs=gid.AD[1]),\ lhrh(lhs=gri.gfaccess("out_gfs","AD2"),rhs=gid.AD[2]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU0"),rhs=gid.ValenciavU[0]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU1"),rhs=gid.ValenciavU[1]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU2"),rhs=gid.ValenciavU[2]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","BU0"),rhs=gid.BU[0]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","BU1"),rhs=gid.BU[1]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","BU2"),rhs=gid.BU[2]),\ lhrh(lhs=gri.gfaccess("out_gfs","psi6Phi"),rhs=sp.sympify(0))\ ] outCfunction( outfile = os.path.join(Ccodesdir,name+".h"), desc=desc, name=name, params ="const paramstruct *params,REAL *xx[3],REAL *auxevol_gfs,REAL *out_gfs", body = fin.FD_outputC("returnstring",values_to_print,params="outCverbose=False").replace("IDX4","IDX4S"), loopopts ="AllPoints,Read_xxs") ###Output Output C function initial_data() to file GiRaFFE_staggered_standalone_Ccodes/initial_data.h ###Markdown Step 4: Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h` \[Back to [top](toc)\]$$\label{cparams}$$Based on declared NRPy+ Cparameters, first we generate `declare_Cparameters_struct.h`, `set_Cparameters_default.h`, and `set_Cparameters[-SIMD].h`.Then we output `free_parameters.h`, which sets initial data parameters, as well as grid domain & reference metric parameters, applying `domain_size` and `sinh_width`/`SymTP_bScale` (if applicable) as set above ###Code # Step 3.e: Output C codes needed for declaring and setting Cparameters; also set free_parameters.h # Step 3.e.i: Generate declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h par.generate_Cparameters_Ccodes(os.path.join(Ccodesdir)) # Step 3.e.ii: Set free_parameters.h with open(os.path.join(Ccodesdir,"free_parameters.h"),"w") as file: file.write("""// Override parameter defaults with values based on command line arguments and NGHOSTS. params.Nxx0 = atoi(argv[1]); params.Nxx1 = atoi(argv[2]); params.Nxx2 = atoi(argv[3]); params.Nxx_plus_2NGHOSTS0 = params.Nxx0 + 2*NGHOSTS; params.Nxx_plus_2NGHOSTS1 = params.Nxx1 + 2*NGHOSTS; params.Nxx_plus_2NGHOSTS2 = params.Nxx2 + 2*NGHOSTS; // Step 0d: Set up space and time coordinates // Step 0d.i: Declare \Delta x^i=dxx{0,1,2} and invdxx{0,1,2}, as well as xxmin[3] and xxmax[3]: const REAL xxmin[3] = {-1.5,-0.1,-0.1}; const REAL xxmax[3] = { 1.5, 0.1, 0.1}; //const REAL xxmin[3] = {-1.5,-1.5,-1.5}; //const REAL xxmax[3] = { 1.5, 1.5, 1.5}; params.dxx0 = (xxmax[0] - xxmin[0]) / ((REAL)params.Nxx0+1); params.dxx1 = (xxmax[1] - xxmin[1]) / ((REAL)params.Nxx1+1); params.dxx2 = (xxmax[2] - xxmin[2]) / ((REAL)params.Nxx2+1); printf("dxx0,dxx1,dxx2 = %.5e,%.5e,%.5e\\n",params.dxx0,params.dxx1,params.dxx2); params.invdx0 = 1.0 / params.dxx0; params.invdx1 = 1.0 / params.dxx1; params.invdx2 = 1.0 / params.dxx2; const int poison_grids = 0; // Standard GRFFE parameters: params.GAMMA_SPEED_LIMIT = 2000.0; params.diss_strength = 0.1; """) if initial_data=="ExactWald": with open(os.path.join(out_dir,"free_parameters.h"),"a") as file: file.write("""params.r0 = 0.4; params.a = 0.0; """) ###Output _____no_output_____ ###Markdown Step 4: Set up boundary condition functions for chosen singular, curvilinear coordinate system \[Back to [top](toc)\]$$\label{bc_functs}$$Next apply singular, curvilinear coordinate boundary conditions [as documented in the corresponding NRPy+ tutorial notebook](Tutorial-Start_to_Finish-Curvilinear_BCs.ipynb)...But, for the moment, we're actually just using this because it writes the file `gridfunction_defines.h`. ###Code import CurviBoundaryConditions.CurviBoundaryConditions as cbcs cbcs.Set_up_CurviBoundaryConditions(os.path.join(Ccodesdir,"boundary_conditions/"),Cparamspath=os.path.join("../"),enable_copy_of_static_Ccodes=False) ###Output Wrote to file "GiRaFFE_staggered_standalone_Ccodes/boundary_conditions/parity_conditions_symbolic_dot_products.h" Evolved parity: ( AD0:1, AD1:2, AD2:3, StildeD0:1, StildeD1:2, StildeD2:3, psi6Phi:0 ) AuxEvol parity: ( BU0:1, BU1:2, BU2:3, B_lU0:1, B_lU1:2, B_lU2:3, B_rU0:1, B_rU1:2, B_rU2:3, BstaggerU0:1, BstaggerU1:2, BstaggerU2:3, Bstagger_lU0:1, Bstagger_lU1:2, Bstagger_lU2:3, Bstagger_rU0:1, Bstagger_rU1:2, Bstagger_rU2:3, ValenciavU0:1, ValenciavU1:2, ValenciavU2:3, Valenciav_lU0:1, Valenciav_lU1:2, Valenciav_lU2:3, Valenciav_llU0:1, Valenciav_llU1:2, Valenciav_llU2:3, Valenciav_lrU0:1, Valenciav_lrU1:2, Valenciav_lrU2:3, Valenciav_rU0:1, Valenciav_rU1:2, Valenciav_rU2:3, Valenciav_rlU0:1, Valenciav_rlU1:2, Valenciav_rlU2:3, Valenciav_rrU0:1, Valenciav_rrU1:2, Valenciav_rrU2:3, alpha:0, alpha_face:0, betaU0:1, betaU1:2, betaU2:3, beta_faceU0:1, beta_faceU1:2, beta_faceU2:3, cmax_x:0, cmax_y:0, cmax_z:0, cmin_x:0, cmin_y:0, cmin_z:0, gammaDD00:4, gammaDD01:5, gammaDD02:6, gammaDD11:7, gammaDD12:8, gammaDD22:9, gamma_faceDD00:4, gamma_faceDD01:5, gamma_faceDD02:6, gamma_faceDD11:7, gamma_faceDD12:8, gamma_faceDD22:9, psi6_temp:0, psi6center:0 ) Wrote to file "GiRaFFE_staggered_standalone_Ccodes/boundary_conditions/EigenCoord_Cart_to_xx.h" ###Markdown Step 5: `GiRaFFE_NRPy_standalone.c`: The Main C Code \[Back to [top](toc)\]$$\label{mainc}$$ ###Code # Part P0: Define REAL, set the number of ghost cells NGHOSTS (from NRPy+'s FD_CENTDERIVS_ORDER), # and set the CFL_FACTOR (which can be overwritten at the command line) with open(os.path.join(Ccodesdir,"GiRaFFE_NRPy_REAL__NGHOSTS__CFL_FACTOR.h"), "w") as file: file.write(""" // Part P0.a: Set the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER #define NGHOSTS """+str(3)+""" #define NGHOSTS_A2B """+str(2)+""" // Part P0.b: Set the numerical precision (REAL) to double, ensuring all floating point // numbers are stored to at least ~16 significant digits #define REAL """+REAL+""" // Part P0.c: Set the CFL Factor. Can be overwritten at command line. REAL CFL_FACTOR = """+str(default_CFL_FACTOR)+";") %%writefile $Ccodesdir/GiRaFFE_NRPy_standalone.c // Step P0: Define REAL and NGHOSTS; and declare CFL_FACTOR. This header is generated in NRPy+. #include "GiRaFFE_NRPy_REAL__NGHOSTS__CFL_FACTOR.h" #include "declare_Cparameters_struct.h" const int NSKIP_1D_OUTPUT = 1; // Step P1: Import needed header files #include "stdio.h" #include "stdlib.h" #include "math.h" #include "time.h" #include "stdint.h" // Needed for Windows GCC 6.x compatibility #ifndef M_PI #define M_PI 3.141592653589793238462643383279502884L #endif #ifndef M_SQRT1_2 #define M_SQRT1_2 0.707106781186547524400844362104849039L #endif // Step P2: Declare the IDX4S(gf,i,j,k) macro, which enables us to store 4-dimensions of // data in a 1D array. In this case, consecutive values of "i" // (all other indices held to a fixed value) are consecutive in memory, where // consecutive values of "j" (fixing all other indices) are separated by // Nxx_plus_2NGHOSTS0 elements in memory. Similarly, consecutive values of // "k" are separated by Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1 in memory, etc. #define IDX4S(g,i,j,k) \ ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) + Nxx_plus_2NGHOSTS2 * (g) ) ) ) #define IDX4ptS(g,idx) ( (idx) + (Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2) * (g) ) #define IDX3S(i,j,k) ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) ) ) ) #define LOOP_REGION(i0min,i0max, i1min,i1max, i2min,i2max) \ for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) #define LOOP_ALL_GFS_GPS(ii) _Pragma("omp parallel for") \ for(int (ii)=0;(ii)<Nxx_plus_2NGHOSTS_tot*NUM_EVOL_GFS;(ii)++) // Step P3: Set gridfunction macros #include "boundary_conditions/gridfunction_defines.h" // Step P4: Include the RHS, BC, and primitive recovery functions #include "GiRaFFE_NRPy_Main_Driver.h" // Step P5: Include the initial data functions #include "set_initial_spacetime_metric_data.h" #include "initial_data.h" // main() function: // Step 0: Read command-line input, set up grid structure, allocate memory for gridfunctions, set up coordinates // Step 1: Set up scalar wave initial data // Step 2: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm, // applying quadratic extrapolation outer boundary conditions. // Step 3: Output relative error between numerical and exact solution. // Step 4: Free all allocated memory int main(int argc, const char *argv[]) { paramstruct params; #include "set_Cparameters_default.h" // Step 0a: Read command-line input, error out if nonconformant if(argc != 4 || atoi(argv[1]) < NGHOSTS || atoi(argv[2]) < NGHOSTS || atoi(argv[3]) < NGHOSTS) { printf("Error: Expected three command-line arguments: ./GiRaFFE_NRPy_standalone [Nx] [Ny] [Nz],\n"); printf("where Nx is the number of grid points in the x direction, and so forth.\n"); printf("Nx,Ny,Nz MUST BE larger than NGHOSTS (= %d)\n",NGHOSTS); exit(1); } // Step 0c: Set free parameters, overwriting Cparameters defaults // by hand or with command-line input, as desired. #include "free_parameters.h" #include "set_Cparameters-nopointer.h" // ... and then set up the numerical grid structure in time: const REAL t_final = 0.5; const REAL CFL_FACTOR = 0.5; // Set the CFL Factor // Step 0c: Allocate memory for gridfunctions const int Nxx_plus_2NGHOSTS_tot = Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2; // Step 0k: Allocate memory for gridfunctions #include "MoLtimestepping/RK_Allocate_Memory.h" REAL *restrict auxevol_gfs = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot); REAL *evol_gfs_exact = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot); REAL *auxevol_gfs_exact = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot); // For debugging, it can be useful to set everything to NaN initially. if(poison_grids) { for(int ii=0;ii<NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) { y_n_gfs[ii] = 1.0/0.0; y_nplus1_running_total_gfs[ii] = 1.0/0.0; //k_odd_gfs[ii] = 1.0/0.0; //k_even_gfs[ii] = 1.0/0.0; diagnostic_output_gfs[ii] = 1.0/0.0; evol_gfs_exact[ii] = 1.0/0.0; } for(int ii=0;ii<NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) { auxevol_gfs[ii] = 1.0/0.0; auxevol_gfs_exact[ii] = 1.0/0.0; } } // Step 0d: Set up coordinates: Set dx, and then dt based on dx_min and CFL condition // This is probably already defined above, but just in case... #ifndef MIN #define MIN(A, B) ( ((A) < (B)) ? (A) : (B) ) #endif REAL dt = CFL_FACTOR * MIN(dxx0,MIN(dxx1,dxx2)); // CFL condition int Nt = (int)(t_final / dt + 0.5); // The number of points in time. //Add 0.5 to account for C rounding down integers. // Step 0e: Set up cell-centered Cartesian coordinate grids REAL *xx[3]; xx[0] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS0); xx[1] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS1); xx[2] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS2); for(int j=0;j<Nxx_plus_2NGHOSTS0;j++) xx[0][j] = xxmin[0] + (j-NGHOSTS+1)*dxx0; for(int j=0;j<Nxx_plus_2NGHOSTS1;j++) xx[1][j] = xxmin[1] + (j-NGHOSTS+1)*dxx1; for(int j=0;j<Nxx_plus_2NGHOSTS2;j++) xx[2][j] = xxmin[2] + (j-NGHOSTS+1)*dxx2; // Step 1: Set up initial data to be exact solution at time=0: REAL time = 0.0; set_initial_spacetime_metric_data(&params,xx,auxevol_gfs); initial_data(&params,xx,auxevol_gfs,y_n_gfs); // Code to perturb the initial data: for(int ii=0;ii<NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) { y_n_gfs[ii] += 1.0e-15; y_nplus1_running_total_gfs[ii] += 1.0e-15; //k_odd_gfs[ii] = 1.0/0.0; //k_even_gfs[ii] = 1.0/0.0; diagnostic_output_gfs[ii] += 1.0e-15; evol_gfs_exact[ii] += 1.0e-15; } for(int ii=0;ii<NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) { auxevol_gfs[ii] += 1.0e-15; auxevol_gfs_exact[ii] += 1.0e-15; } // Fill in the remaining quantities //apply_bcs_potential(&params,y_n_gfs); GiRaFFE_compute_B_and_Bstagger_from_A(&params, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD00GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD01GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD02GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD11GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD12GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD22GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*PSI6_TEMPGF, /* Temporary storage,overwritten */ y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD0GF, y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD1GF, y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD2GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU0GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU1GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU2GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU0GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU1GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU2GF); //override_BU_with_old_GiRaFFE(&params,auxevol_gfs,0); GiRaFFE_NRPy_prims_to_cons(&params,auxevol_gfs,y_n_gfs); //apply_bcs_velocity(&params,auxevol_gfs); // Extra stack, useful for debugging: GiRaFFE_NRPy_cons_to_prims(&params,xx,auxevol_gfs,y_n_gfs); //GiRaFFE_NRPy_prims_to_cons(&params,auxevol_gfs,y_n_gfs); //GiRaFFE_NRPy_cons_to_prims(&params,xx,auxevol_gfs,y_n_gfs); //GiRaFFE_NRPy_prims_to_cons(&params,auxevol_gfs,y_n_gfs); //GiRaFFE_NRPy_cons_to_prims(&params,xx,auxevol_gfs,y_n_gfs); for(int n=0;n<=Nt;n++) { // Main loop to progress forward in time. //for(int n=0;n<=1;n++) { // Main loop to progress forward in time. // Step 1a: Set current time to correct value & compute exact solution time = ((REAL)n)*dt; /* Step 2: Validation: Output relative error between numerical and exact solution, */ if((n)%NSKIP_1D_OUTPUT ==0) { // Step 2c: Output relative error between exact & numerical at center of grid. const int i0mid=Nxx_plus_2NGHOSTS0/2; const int i1mid=Nxx_plus_2NGHOSTS1/2; const int i2mid=Nxx_plus_2NGHOSTS2/2; char filename[100]; sprintf(filename,"out%d-%08d.txt",Nxx0,n); FILE *out2D = fopen(filename, "w"); for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) { const int idx = IDX3S(i0,i1mid,i2mid); fprintf(out2D,"%.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e\n", xx[0][i0], auxevol_gfs[IDX4ptS(BU0GF,idx)],auxevol_gfs[IDX4ptS(BU1GF,idx)],auxevol_gfs[IDX4ptS(BU2GF,idx)], y_n_gfs[IDX4ptS(AD0GF,idx)],y_n_gfs[IDX4ptS(AD1GF,idx)],y_n_gfs[IDX4ptS(AD2GF,idx)], y_n_gfs[IDX4ptS(STILDED0GF,idx)],y_n_gfs[IDX4ptS(STILDED1GF,idx)],y_n_gfs[IDX4ptS(STILDED2GF,idx)], auxevol_gfs[IDX4ptS(VALENCIAVU0GF,idx)],auxevol_gfs[IDX4ptS(VALENCIAVU1GF,idx)],auxevol_gfs[IDX4ptS(VALENCIAVU2GF,idx)], y_n_gfs[IDX4ptS(PSI6PHIGF,idx)]); } fclose(out2D); // For convergence testing, we'll shift the grid x -> x-1 and output initial data again, giving the exact solution. LOOP_REGION(0,Nxx_plus_2NGHOSTS0,0,1,0,1) { //xx[0][i0] += -mu_AW*time; xx[0][i0] += -time; } set_initial_spacetime_metric_data(&params,xx,auxevol_gfs_exact); initial_data(&params,xx,auxevol_gfs_exact,evol_gfs_exact); // Fill in the remaining quantities //driver_A_to_B(&params,evol_gfs_exact,auxevol_gfs_exact); GiRaFFE_NRPy_prims_to_cons(&params,auxevol_gfs_exact,evol_gfs_exact); // And now, we'll set the grid back to rights. LOOP_REGION(0,Nxx_plus_2NGHOSTS0,0,1,0,1) { //xx[0][i0] -= -mu_AW*time; xx[0][i0] -= -time; } sprintf(filename,"out%d-%08d_exact.txt",Nxx0,n); FILE *out2D_exact = fopen(filename, "w"); for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) { const int idx = IDX3S(i0,i1mid,i2mid); fprintf(out2D_exact,"%.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e\n", xx[0][i0], auxevol_gfs_exact[IDX4ptS(BU0GF,idx)],auxevol_gfs_exact[IDX4ptS(BU1GF,idx)],auxevol_gfs_exact[IDX4ptS(BU2GF,idx)], evol_gfs_exact[IDX4ptS(AD0GF,idx)],evol_gfs_exact[IDX4ptS(AD1GF,idx)],evol_gfs_exact[IDX4ptS(AD2GF,idx)], evol_gfs_exact[IDX4ptS(STILDED0GF,idx)],evol_gfs_exact[IDX4ptS(STILDED1GF,idx)],evol_gfs_exact[IDX4ptS(STILDED2GF,idx)], auxevol_gfs_exact[IDX4ptS(VALENCIAVU0GF,idx)],auxevol_gfs_exact[IDX4ptS(VALENCIAVU1GF,idx)],auxevol_gfs_exact[IDX4ptS(VALENCIAVU2GF,idx)], evol_gfs_exact[IDX4ptS(PSI6PHIGF,idx)]); } fclose(out2D_exact); } // Step 3: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm, // applying quadratic extrapolation outer boundary conditions. // Step 3.b: Step forward one timestep (t -> t+dt) in time using // chosen RK-like MoL timestepping algorithm #include "MoLtimestepping/RK_MoL.h" } // End main loop to progress forward in time. // Step 4: Free all allocated memory #include "MoLtimestepping/RK_Free_Memory.h" free(auxevol_gfs); free(auxevol_gfs_exact); free(evol_gfs_exact); for(int i=0;i<3;i++) free(xx[i]); return 0; } cmd.C_compile(os.path.join(Ccodesdir,"GiRaFFE_NRPy_standalone.c"), os.path.join(Ccodesdir,"output","GiRaFFE_NRPy_standalone"),compile_mode="safe") # !gcc -g -O2 -fopenmp GiRaFFE_standalone_Ccodes/GiRaFFE_NRPy_standalone.c -o GiRaFFE_NRPy_standalone -lm # Change to output directory os.chdir(outdir) # Clean up existing output files cmd.delete_existing_files("out*.txt") cmd.delete_existing_files("out*.png") # cmd.Execute(os.path.join(Ccodesdir,"output","GiRaFFE_NRPy_standalone"), "640 16 16", os.path.join(outdir,"out640.txt")) cmd.Execute("GiRaFFE_NRPy_standalone", "119 7 7","out119.txt") # cmd.Execute("GiRaFFE_NRPy_standalone", "119 119 119","out119.txt") # cmd.Execute("GiRaFFE_NRPy_standalone", "239 15 15","out239.txt") # !OMP_NUM_THREADS=1 valgrind --track-origins=yes -v ./GiRaFFE_NRPy_standalone 1280 32 32 # Return to root directory os.chdir(os.path.join("../../")) ###Output Compiling executable... (EXEC): Executing `gcc -O2 -g -fopenmp GiRaFFE_staggered_standalone_Ccodes/GiRaFFE_NRPy_standalone.c -o GiRaFFE_staggered_standalone_Ccodes/output\GiRaFFE_NRPy_standalone.exe -lm`... (BENCH): Finished executing in 9.460079193115234 seconds. Finished compilation. (EXEC): Executing `cmd /c GiRaFFE_NRPy_standalone 119 7 7`... (BENCH): Finished executing in 4.665024042129517 seconds. ###Markdown Now, we will load the data generated by the simulation and plot it in order to test for convergence. ###Code import numpy as np import matplotlib.pyplot as plt # Data_numer = np.loadtxt(os.path.join(Ccodesdir,"output","out119-00000001.txt")) # # Data_num_2 = np.loadtxt(os.path.join(Ccodesdir,"output","out239-00000080.txt")) # # Data_old = np.loadtxt("/home/penelson/OldCactus/Cactus/exe/ABE-GiRaFFEfood_1D_AlfvenWave/giraffe-grmhd_primitives_bi.x.asc") # # Data_o_2 = np.loadtxt("/home/penelson/OldCactus/Cactus/exe/ABE-GiRaFFEfood_1D_AlfvenWave_2/giraffe-grmhd_primitives_bi.x.asc") # # Data_numer = Data_old[5000:5125,11:15] # The column range is chosen for compatibility with the plotting script. # # Data_num_2 = Data_o_2[19600:19845,11:15] # The column range is chosen for compatibility with the plotting script. # Data_exact = np.loadtxt(os.path.join(Ccodesdir,"output","out119-00000001_exact.txt")) # # Data_exa_2 = np.loadtxt(os.path.join(Ccodesdir,"output","out239-00000080_exact.txt")) # predicted_order = 2.0 # column = 5 # # # plt.plot(Data_exact[2:-2,0],np.log2(np.absolute((Data_numer[2:-2,column]-Data_exact[2:-2,column])/\ # # # (Data_num_2[2:-2:2,column]-Data_exa_2[2:-2:2,column]))),'.') # plt.plot(Data_exact[:,0],Data_exact[:,column]) # plt.plot(Data_exact[:,0],Data_numer[:,column],'.') # # plt.xlim(-0.0,1.0) # # # plt.ylim(-1.0,5.0) # # # plt.ylim(-0.0005,0.0005) # # plt.xlabel("x") # # plt.ylabel("BU2") # plt.show() # data_list = Data_exact[:,column]-Data_numer[:,column] # print(np.sqrt(np.sum(np.square(data_list))/len(data_list))) # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 labels = ["x","BU0","BU1","BU2","AD0","AD1","AD2","StildeD0","StildeD1","StildeD2","ValenciavU0","ValenciavU1","ValenciavU2", "psi6Phi"] old_files = ["", "giraffe-grmhd_primitives_bi.x.asc","giraffe-grmhd_primitives_bi.x.asc","giraffe-grmhd_primitives_bi.x.asc", "giraffe-em_ax.x.asc","giraffe-em_ay.x.asc","giraffe-em_az.x.asc", # "cell_centered_Ai.txt","cell_centered_Ai.txt","cell_centered_Ai.txt", "giraffe-grmhd_conservatives.x.asc","giraffe-grmhd_conservatives.x.asc","giraffe-grmhd_conservatives.x.asc", "giraffe-grmhd_primitives_allbutbi.x.asc","giraffe-grmhd_primitives_allbutbi.x.asc","giraffe-grmhd_primitives_allbutbi.x.asc", "giraffe-em_psi6phi.x.asc"] column = 3 column_old = [0,12,13,14,12,12,12,12,13,14,12,13,14,12] # old_path = "/home/penelson/OldCactus/Cactus/exe/ABE-GiRaFFEfood_1D_AlfvenWave" old_path = "C:\\Users\\Patrick\\Documents\\nrpytutorial\\in_progress\\ABE-GiRaFFEfood_1D_AlfvenWave\\" # perturb_path = "C:\\Users\\Patrick\\Documents\\nrpytutorial\\in_progress\\ABE-GiRaFFEfood_1D_AlfvenWave_perturb\\" new_path = os.path.join(Ccodesdir,"output") data_old = np.loadtxt(os.path.join(old_path,old_files[column])) # data_per = np.loadtxt(os.path.join(perturb_path,old_files[column])) n=2 data_old = data_old[n*125:n*125+125,:]# Select only the nth timestep data_new = np.loadtxt(os.path.join(new_path,"out119-00000002.txt")) # deltaA_old = data_old[125:250,:] - data_old[0:125,:] # data_new_t0 = np.loadtxt(os.path.join(new_path,"out119-00000000.txt")) # deltaA_new = data_new[:,:] - data_new_t0[:,:] plt.figure() # plt.plot(data_new[:,0],data_new[:,column]-data_old[:,column_old[column]]) # plt.plot(data_new[3:-3,0],data_per[3:-3,column_old[column]]-data_old[3:-3,column_old[column]]) # For perturbation testing! plt.plot(data_new[:,0],data_old[:,column_old[column]]) plt.plot(data_new[:,0],data_new[:,column],'.') # plt.plot(data_new[:,0],data_new[:,column]-((3*np.sin(5*np.pi*data_new[:,0]/np.sqrt(1 - (-0.5)**2))/20 + 23/20)*(data_new[:,0]/2 + np.sqrt(1 - (-0.5)**2)/20 + np.absolute(data_new[:,0] + np.sqrt(1 - (-0.5)**2)/10)/2)*(-1e-100/2 + data_new[:,0]/2 - np.sqrt(1 - (-0.5)**2)/20 - np.absolute(-1e-100 + data_new[:,0] - np.sqrt(1 - (-0.5)**2)/10)/2)/((-1e-100 + data_new[:,0] - np.sqrt(1 - (-0.5)**2)/10)*(1e-100 + data_new[:,0] + np.sqrt(1 - (-0.5)**2)/10)) + 13*(data_new[:,0]/2 - np.sqrt(1 - (-0.5)**2)/20 + np.absolute(data_new[:,0] - np.sqrt(1 - (-0.5)**2)/10)/2)/(10*(1e-100 + data_new[:,0] - np.sqrt(1 - (-0.5)**2)/10)) + (-1e-100/2 + data_new[:,0]/2 + np.sqrt(1 - (-0.5)**2)/20 - np.absolute(-1e-100 + data_new[:,0] + np.sqrt(1 - (-0.5)**2)/10)/2)/(-1e-100 + data_new[:,0] + np.sqrt(1 - (-0.5)**2)/10))/np.sqrt(1 - (-0.5)**2)) # plt.plot(data_new[1:,0]-(data_new[0,0]-data_new[1,0])/2.0,(data_new[0:-1,column]+data_new[1:,column])/2,'.',label="GiRaFFE_NRPy+injected BU") # plt.plot(data_new[1:,0]-(data_new[0,0]-data_new[1,0])/2.0,data_old[1:,column_old[column]],label="old GiRaFFE") # -(data_old[0,9]-data_old[1,9])/2.0 # plt.plot(data_new[3:-3,0],deltaA_new[3:-3,column],'.') # plt.plot(data_new[3:-3,0],deltaA_old[3:-3,column_old[column]]-deltaA_new[3:-3,column]) # plt.xlim(-0.1,0.1) # plt.ylim(-0.2,0.2) # plt.legend() plt.xlabel(labels[0]) plt.ylabel(labels[column]) plt.show() # print(np.argmin(deltaA_old[3:-3,column_old[column]]-deltaA_new[3:-3,column])) ###Output _____no_output_____ ###Markdown This code will create an animation of the wave over time. ###Code # import matplotlib.pyplot as plt from matplotlib.pyplot import savefig from IPython.display import HTML import matplotlib.image as mgimg import glob import sys from matplotlib import animation cmd.delete_existing_files("out119-00*.png") globby = glob.glob(os.path.join(Ccodesdir,'output','out119-00*.txt')) file_list = [] for x in sorted(globby): file_list.append(x) number_of_files = int(len(file_list)/2) for timestep in range(number_of_files): fig = plt.figure() numer_filename = file_list[2*timestep] exact_filename = file_list[2*timestep+1] Numer = np.loadtxt(numer_filename) Exact = np.loadtxt(exact_filename) plt.title("Alfven Wave") plt.xlabel("x") plt.ylabel("BU2") plt.xlim(-0.5,0.5) plt.ylim(1.0,1.7) plt.plot(Numer[3:-3,0],Numer[3:-3,3],'.',label="Numerical") plt.plot(Exact[3:-3,0],Exact[3:-3,3],label="Exact") plt.legend() savefig(numer_filename+".png",dpi=150) plt.close(fig) sys.stdout.write("%c[2K" % 27) sys.stdout.write("Processing file "+numer_filename+"\r") sys.stdout.flush() ## VISUALIZATION ANIMATION, PART 2: Combine PNGs to generate movie ## # https://stackoverflow.com/questions/14908576/how-to-remove-frame-from-matplotlib-pyplot-figure-vs-matplotlib-figure-frame # https://stackoverflow.com/questions/23176161/animating-pngs-in-matplotlib-using-artistanimation # !rm -f GiRaFFE_NRPy-1D_tests.mp4 cmd.delete_existing_files("GiRaFFE_NRPy-1D_tests.mp4") fig = plt.figure(frameon=False) ax = fig.add_axes([0, 0, 1, 1]) ax.axis('off') myimages = [] for i in range(number_of_files): img = mgimg.imread(file_list[2*i]+".png") imgplot = plt.imshow(img) myimages.append([imgplot]) ani = animation.ArtistAnimation(fig, myimages, interval=100, repeat_delay=1000) plt.close() ani.save('GiRaFFE_NRPy-1D_tests.mp4', fps=5,dpi=150) %%HTML <video width="480" height="360" controls> <source src="GiRaFFE_NRPy-1D_tests.mp4" type="video/mp4"> </video> ###Output _____no_output_____ ###Markdown window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); Start-to-Finish Example: `GiRaFFE_NRPy` 1D tests Authors: Patrick Nelson & Terrence Pierre Jacques Adapted from [Start-to-Finish Example: Head-On Black Hole Collision](../Tutorial-Start_to_Finish-BSSNCurvilinear-Two_BHs_Collide.ipynb) This module compiles and runs code tests for all 1D initial data options available in GiRaFFE-NRPy+, evolving one-dimensional GRFFE waves. NRPy+ Source Code for this module: * Main python module for all 1D initial data: [GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_1D_tests.py](../../edit/in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_1D_tests.py) __Options:__ 1. [Fast Wave](Tutorial-GiRaFFEfood_NRPy_1D_tests-fast_wave.ipynb) 1. [Alfven Wave](Tutorial-GiRaFFEfood_NRPy_1D_alfven_wave.ipynb) 1. [Degenerate Alfven Wave](Tutorial-GiRaFFEfood_NRPy_1D_tests-degen_Alfven_wave.ipynb) 1. [Three Alfven Waves](Tutorial-GiRaFFEfood_NRPy_1D_tests-three_waves.ipynb) 1. [FFE Breakdown](Tutorial-GiRaFFEfood_NRPy_1D_tests-FFE_breakdown.ipynb)* [GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Afield_flux.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Afield_flux.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy_staggered-Afield_flux.ipynb) Generates the expressions to find the flux term of the induction equation.* [GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_A2B.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_A2B.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy_staggered-A2B.ipynb) Generates the driver to compute the magnetic field from the vector potential/* [GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-BCs.ipynb) Generates the code to apply boundary conditions to the vector potential, scalar potential, and three-velocity.* [GiRaFFE_NRPy/GiRaFFE_NRPy_C2P_P2C.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_C2P_P2C.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-C2P_P2C.ipynb) Generates the conservative-to-primitive and primitive-to-conservative solvers.* [GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Metric_Face_Values.ipynb) Generates code to interpolate metric gridfunctions to cell faces.* [GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-PPM.ipynb) Genearates code to reconstruct primitive variables on cell faces.* [GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Source_Terms.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Source_Terms.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy_staggered-Source_Terms.ipynb) Generates the expressions to find the flux term of the Poynting flux evolution equation.* [GiRaFFE_NRPy/Stilde_flux.py](../../edit/in_progress/GiRaFFE_NRPy/Stilde_flux.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Stilde_flux.ipynb) Generates the expressions to find the flux term of the Poynting flux evolution equation.* [../GRFFE/equations.py](../../edit/GRFFE/equations.py) [\[**tutorial**\]](../Tutorial-GRFFE_Equations-Cartesian.ipynb) Generates code necessary to compute the source terms.* [../GRHD/equations.py](../../edit/GRHD/equations.py) [\[**tutorial**\]](../Tutorial-GRHD_Equations-Cartesian.ipynb) Generates code necessary to compute the source terms.Here we use NRPy+ to generate the C source code necessary to set up initial data for an Alfv&eacute;n wave (see [the original GiRaFFE paper](https://arxiv.org/pdf/1704.00599.pdf)). Then we use it to generate the RHS expressions for [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html) time integration based on the [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4). Table of Contents$$\label{toc}$$This notebook is organized as follows1. [Step 1](initializenrpy): Set core NRPy+ parameters for numerical grids1. [Step 2](grffe): Output C code for GRFFE evolution 1. [Step 2.a](mol): Output macros for Method of Lines timestepping1. [Step 3](gf_id): Import `GiRaFFEfood_NRPy` initial data modules1. [Step 4](cparams): Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h`1. [Step 5](mainc): `GiRaFFE_NRPy_standalone.c`: The Main C Code1. [Step 6](compileexec): Compile and execute C codes1. [Step 7](plots): Data Visualization1. [Step 8](latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file Step 1: Set up core functions and parameters for solving GRFFE equations \[Back to [top](toc)\]$$\label{setup}$$ ###Code import shutil, os, sys # Standard Python modules for multiplatform OS-level functions # First, we'll add the parent directory to the list of directories Python will check for modules. nrpy_dir_path = os.path.join("..") if nrpy_dir_path not in sys.path: sys.path.append(nrpy_dir_path) # Step P1: Import needed NRPy+ core modules: from outputC import outCfunction, lhrh # NRPy+: Core C code output module import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends import finite_difference as fin # NRPy+: Finite difference C code generation module import NRPy_param_funcs as par # NRPy+: Parameter interface import grid as gri # NRPy+: Functions having to do with numerical grids import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface # Step P2: Create C code output directory: Ccodesdir = os.path.join("GiRaFFE_staggered_1D_Tests_standalone_Ccodes/") # First remove C code output directory if it exists # Courtesy https://stackoverflow.com/questions/303200/how-do-i-remove-delete-a-folder-that-is-not-empty # !rm -r ScalarWaveCurvilinear_Playground_Ccodes shutil.rmtree(Ccodesdir, ignore_errors=True) # Then create a fresh directory cmd.mkdir(Ccodesdir) # Step P3: Create executable output directory: outdir = os.path.join(Ccodesdir,"output/") cmd.mkdir(outdir) # Step P5: Set timestepping algorithm (we adopt the Method of Lines) REAL = "double" # Best to use double here. default_CFL_FACTOR= 0.5 # (GETS OVERWRITTEN WHEN EXECUTED.) In pure axisymmetry (symmetry_axes = 2 below) 1.0 works fine. Otherwise 0.5 or lower. # Step P6: Set the finite differencing order to 2. par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",4) thismodule = "Start_to_Finish-GiRaFFE_NRPy-1D_tests" TINYDOUBLE = par.Cparameters("REAL", thismodule, "TINYDOUBLE", 1e-100) import GiRaFFE_NRPy.GiRaFFE_NRPy_Main_Driver_staggered as md # par.set_paramsvals_value("GiRaFFE_NRPy.GiRaFFE_NRPy_C2P_P2C::enforce_speed_limit_StildeD = False") par.set_paramsvals_value("GiRaFFE_NRPy.GiRaFFE_NRPy_C2P_P2C::enforce_current_sheet_prescription = False") ###Output _____no_output_____ ###Markdown Step 2: Output C code for GRFFE evolution \[Back to [top](toc)\]$$\label{grffe}$$We will first write the C codes needed for GRFFE evolution. We have already written a module to generate all these codes and call the functions in the appropriate order, so we will import that here. We will take the slightly unusual step of doing this before we generate the initial data functions because the main driver module will register all the gridfunctions we need. It will also generate functions that, in addition to their normal spot in the MoL timestepping, will need to be called during the initial data step to make sure all the variables are appropriately filled in. All of this is handled with a single call to `GiRaFFE_NRPy_Main_Driver_generate_all()`, which will register gridfunctions, write all the C code kernels, and write the C code functions to call those. ###Code md.GiRaFFE_NRPy_Main_Driver_generate_all(Ccodesdir) ###Output Output C function calculate_StildeD0_source_term() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/RHSs/calculate_StildeD0_source_term.h Output C function calculate_StildeD1_source_term() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/RHSs/calculate_StildeD1_source_term.h Output C function calculate_StildeD2_source_term() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/RHSs/calculate_StildeD2_source_term.h Output C function calculate_Stilde_rhsD() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/RHSs/calculate_Stilde_rhsD.h Output C function GiRaFFE_NRPy_cons_to_prims() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/C2P/GiRaFFE_NRPy_cons_to_prims.h Output C function GiRaFFE_NRPy_prims_to_cons() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/C2P/GiRaFFE_NRPy_prims_to_cons.h ###Markdown Step 2.a: Output macros for Method of Lines timestepping \[Back to [top](toc)\]$$\label{mol}$$Now, we generate the code to implement the method of lines using the fourth-order Runge-Kutta algorithm. ###Code RK_method = "RK4" # Step 3: Generate Runge-Kutta-based (RK-based) timestepping code. # As described above the Table of Contents, this is a 3-step process: # 3.A: Evaluate RHSs (RHS_string) # 3.B: Apply boundary conditions (post_RHS_string, pt 1) import MoLtimestepping.C_Code_Generation as MoL from MoLtimestepping.RK_Butcher_Table_Dictionary import Butcher_dict RK_order = Butcher_dict[RK_method][1] cmd.mkdir(os.path.join(Ccodesdir,"MoLtimestepping/")) MoL.MoL_C_Code_Generation(RK_method, RHS_string = """ GiRaFFE_NRPy_RHSs(&params,auxevol_gfs,RK_INPUT_GFS,RK_OUTPUT_GFS);""", post_RHS_string = """ GiRaFFE_NRPy_post_step(&params,xx,auxevol_gfs,RK_OUTPUT_GFS,n+1);\n""", outdir = os.path.join(Ccodesdir,"MoLtimestepping/")) ###Output _____no_output_____ ###Markdown Step 3: Import `GiRaFFEfood_NRPy` initial data modules \[Back to [top](toc)\]$$\label{gf_id}$$With the preliminaries out of the way, we will write the C functions to set up initial data. There are two categories of initial data that must be set: the spacetime metric variables, and the GRFFE plasma variables. We will set up the spacetime first, namely the Minkowski spacetime. ###Code gammaDD = ixp.zerorank2(DIM=3) for i in range(3): for j in range(3): if i==j: gammaDD[i][j] = sp.sympify(1) # else: leave as zero betaU = ixp.zerorank1() # All should be 0 alpha = sp.sympify(1) # Description and options for this initial data desc = "Generate a flat spacetime metric." loopopts_id ="AllPoints" # we don't need to read coordinates for flat spacetime. # For testing: Also set inverse metric: gammaUU, unused_gammaDET = ixp.symm_matrix_inverter3x3(gammaDD) name = "set_initial_spacetime_metric_data" values_to_print = [ lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD00"),rhs=gammaDD[0][0]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD01"),rhs=gammaDD[0][1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD02"),rhs=gammaDD[0][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD11"),rhs=gammaDD[1][1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD12"),rhs=gammaDD[1][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD22"),rhs=gammaDD[2][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU0"),rhs=betaU[0]), lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU1"),rhs=betaU[1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU2"),rhs=betaU[2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","alpha"),rhs=alpha) ] outCfunction( outfile = os.path.join(Ccodesdir,name+".h"), desc=desc, name=name, params ="const paramstruct *params,REAL *xx[3],REAL *auxevol_gfs", body = fin.FD_outputC("returnstring",values_to_print,params="outCverbose=False").replace("IDX4","IDX4S"), loopopts = loopopts_id) ###Output Output C function set_initial_spacetime_metric_data() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/set_initial_spacetime_metric_data.h ###Markdown Now, we will write out the initials data function for the GRFFE variables. ###Code initial_data_dir = os.path.join(Ccodesdir,"InitialData/") cmd.mkdir(initial_data_dir) ID_opts = ["AlfvenWave", "ThreeAlfvenWaves", "DegenAlfvenWave", "FastWave", "FFEBD"] for initial_data in ID_opts: if initial_data=="AlfvenWave": import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_1D_tests as gid gid.GiRaFFEfood_NRPy_1D_tests(stagger = True) desc = "Generate Alfven wave 1D initial data for GiRaFFEfood_NRPy." elif initial_data=="ThreeAlfvenWaves": import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_1D_tests_three_waves as gid gid.GiRaFFEfood_NRPy_1D_tests_three_waves(stagger = True) desc = "Generate three Alfven wave 1D initial data for GiRaFFEfood_NRPy." elif initial_data=="DegenAlfvenWave": import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_1D_tests_degen_Alfven_wave as gid gid.GiRaFFEfood_NRPy_1D_tests_degen_Alfven_wave(stagger = True) desc = "Generate degenerate Alfven wave 1D initial data for GiRaFFEfood_NRPy." elif initial_data=="FastWave": import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_1D_tests_fast_wave as gid gid.GiRaFFEfood_NRPy_1D_tests_fast_wave(stagger = True) desc = "Generate fast wave 1D initial data for GiRaFFEfood_NRPy." elif initial_data=="FFEBD": import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_1D_tests_FFE_breakdown as gid gid.GiRaFFEfood_NRPy_1D_tests_FFE_breakdown(stagger = True) desc = "Generate FFE breakdown 1D initial data for GiRaFFEfood_NRPy." name = initial_data values_to_print = [\ lhrh(lhs=gri.gfaccess("out_gfs","AD0"),rhs=gid.AD[0]),\ lhrh(lhs=gri.gfaccess("out_gfs","AD1"),rhs=gid.AD[1]),\ lhrh(lhs=gri.gfaccess("out_gfs","AD2"),rhs=gid.AD[2]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU0"),rhs=gid.ValenciavU[0]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU1"),rhs=gid.ValenciavU[1]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU2"),rhs=gid.ValenciavU[2]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","BU0"),rhs=gid.BU[0]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","BU1"),rhs=gid.BU[1]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","BU2"),rhs=gid.BU[2]),\ lhrh(lhs=gri.gfaccess("out_gfs","psi6Phi"),rhs=sp.sympify(0))\ ] outCfunction( outfile = os.path.join(initial_data_dir,name+".c"), desc=desc, name=name, params ="const paramstruct *params, REAL *xx[3], REAL *auxevol_gfs, REAL *out_gfs", body = fin.FD_outputC("returnstring",values_to_print,params="outCverbose=False").replace("IDX4","IDX4S"), rel_path_to_Cparams='../', loopopts ="AllPoints,Read_xxs") inital_data_body = """ const char *option1 = "AlfvenWave"; const char *option2 = "ThreeAlfvenWaves"; const char *option3 = "DegenAlfvenWave"; const char *option4 = "FastWave"; const char *option5 = "FFEBD"; if (strcmp(initial_data_option, option1) == 0) { AlfvenWave(params, xx, auxevol_gfs, out_gfs); } else if (strcmp(initial_data_option, option2) == 0) { ThreeAlfvenWaves(params, xx, auxevol_gfs, out_gfs); } else if (strcmp(initial_data_option, option3) == 0) { DegenAlfvenWave(params, xx, auxevol_gfs, out_gfs); } else if (strcmp(initial_data_option, option4) == 0) { FastWave(params, xx, auxevol_gfs, out_gfs); } else if (strcmp(initial_data_option, option5) == 0) { FFEBD(params, xx, auxevol_gfs, out_gfs); } else { printf("ERROR: Invalid choice of initial data."); exit(1); } """ name = "initial_data" desc = "Main initial data function." includes = ["AlfvenWave.c", "ThreeAlfvenWaves.c", "DegenAlfvenWave.c", "FastWave.c", "FFEBD.c"] outCfunction( outfile = os.path.join(initial_data_dir,name+".h"), desc=desc, name=name, params ="const char *initial_data_option, const paramstruct *restrict params,REAL *xx[3],REAL *restrict auxevol_gfs,REAL *restrict out_gfs", body = inital_data_body, includes = includes, prefunc="#include <string.h>", rel_path_to_Cparams='../', loopopts ="") ###Output Output C function AlfvenWave() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/AlfvenWave.c Output C function ThreeAlfvenWaves() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/ThreeAlfvenWaves.c Output C function DegenAlfvenWave() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/DegenAlfvenWave.c Output C function FastWave() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/FastWave.c Output C function FFEBD() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/FFEBD.c Output C function initial_data() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/initial_data.h ###Markdown Step 4: Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h` \[Back to [top](toc)\]$$\label{cparams}$$Based on declared NRPy+ Cparameters, first we generate `declare_Cparameters_struct.h`, `set_Cparameters_default.h`, and `set_Cparameters[-SIMD].h`.Then we output `free_parameters.h`, which sets initial data parameters, as well as grid domain & reference metric parameters, applying `domain_size` and `sinh_width`/`SymTP_bScale` (if applicable) as set above ###Code # Step 3.e: Output C codes needed for declaring and setting Cparameters; also set free_parameters.h # Step 3.e.i: Generate declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h par.generate_Cparameters_Ccodes(os.path.join(Ccodesdir)) # Step 3.e.ii: Set free_parameters.h with open(os.path.join(Ccodesdir,"free_parameters.h"),"w") as file: file.write("""// Override parameter defaults with values based on command line arguments and NGHOSTS. params.Nxx0 = atoi(argv[1]); params.Nxx1 = atoi(argv[2]); params.Nxx2 = atoi(argv[3]); params.Nxx_plus_2NGHOSTS0 = params.Nxx0 + 2*NGHOSTS; params.Nxx_plus_2NGHOSTS1 = params.Nxx1 + 2*NGHOSTS; params.Nxx_plus_2NGHOSTS2 = params.Nxx2 + 2*NGHOSTS; // Step 0d: Set up space and time coordinates // Step 0d.i: Declare \Delta x^i=dxx{0,1,2} and invdxx{0,1,2}, as well as xxmin[3] and xxmax[3]: const REAL xxmin[3] = {-1.3255,-0.085,-0.085}; const REAL xxmax[3] = { 1.6745, 0.115, 0.115}; params.dxx0 = (xxmax[0] - xxmin[0]) / ((REAL)params.Nxx0+1); params.dxx1 = (xxmax[1] - xxmin[1]) / ((REAL)params.Nxx1+1); params.dxx2 = (xxmax[2] - xxmin[2]) / ((REAL)params.Nxx2+1); printf("dxx0,dxx1,dxx2 = %.5e,%.5e,%.5e\\n",params.dxx0,params.dxx1,params.dxx2); params.invdx0 = 1.0 / params.dxx0; params.invdx1 = 1.0 / params.dxx1; params.invdx2 = 1.0 / params.dxx2; const int poison_grids = 0; // Standard GRFFE parameters: params.GAMMA_SPEED_LIMIT = 2000.0; params.diss_strength = 0.1; """) ###Output _____no_output_____ ###Markdown Step 4: Set up boundary condition functions for chosen singular, curvilinear coordinate system \[Back to [top](toc)\]$$\label{bc_functs}$$Next apply singular, curvilinear coordinate boundary conditions [as documented in the corresponding NRPy+ tutorial notebook](Tutorial-Start_to_Finish-Curvilinear_BCs.ipynb)...But, for the moment, we're actually just using this because it writes the file `gridfunction_defines.h`. ###Code import CurviBoundaryConditions.CurviBoundaryConditions as cbcs cbcs.Set_up_CurviBoundaryConditions(os.path.join(Ccodesdir,"boundary_conditions/"),Cparamspath=os.path.join("../"),enable_copy_of_static_Ccodes=False) ###Output Wrote to file "GiRaFFE_staggered_1D_Tests_standalone_Ccodes/boundary_conditions/parity_conditions_symbolic_dot_products.h" Evolved parity: ( AD0:1, AD1:2, AD2:3, StildeD0:1, StildeD1:2, StildeD2:3, psi6Phi:0 ) AuxEvol parity: ( BU0:1, BU1:2, BU2:3, B_lU0:1, B_lU1:2, B_lU2:3, B_rU0:1, B_rU1:2, B_rU2:3, BstaggerU0:1, BstaggerU1:2, BstaggerU2:3, Bstagger_lU0:1, Bstagger_lU1:2, Bstagger_lU2:3, Bstagger_rU0:1, Bstagger_rU1:2, Bstagger_rU2:3, Stilde_flux_HLLED0:1, Stilde_flux_HLLED1:2, Stilde_flux_HLLED2:3, ValenciavU0:1, ValenciavU1:2, ValenciavU2:3, Valenciav_lU0:1, Valenciav_lU1:2, Valenciav_lU2:3, Valenciav_llU0:1, Valenciav_llU1:2, Valenciav_llU2:3, Valenciav_lrU0:1, Valenciav_lrU1:2, Valenciav_lrU2:3, Valenciav_rU0:1, Valenciav_rU1:2, Valenciav_rU2:3, Valenciav_rlU0:1, Valenciav_rlU1:2, Valenciav_rlU2:3, Valenciav_rrU0:1, Valenciav_rrU1:2, Valenciav_rrU2:3, alpha:0, alpha_face:0, betaU0:1, betaU1:2, betaU2:3, beta_faceU0:1, beta_faceU1:2, beta_faceU2:3, cmax_x:0, cmax_y:0, cmax_z:0, cmin_x:0, cmin_y:0, cmin_z:0, gammaDD00:4, gammaDD01:5, gammaDD02:6, gammaDD11:7, gammaDD12:8, gammaDD22:9, gamma_faceDD00:4, gamma_faceDD01:5, gamma_faceDD02:6, gamma_faceDD11:7, gamma_faceDD12:8, gamma_faceDD22:9, psi6_temp:0, psi6center:0 ) Wrote to file "GiRaFFE_staggered_1D_Tests_standalone_Ccodes/boundary_conditions/EigenCoord_Cart_to_xx.h" ###Markdown Step 5: `GiRaFFE_NRPy_standalone.c`: The Main C Code \[Back to [top](toc)\]$$\label{mainc}$$ ###Code # Part P0: Define REAL, set the number of ghost cells NGHOSTS (from NRPy+'s FD_CENTDERIVS_ORDER), # and set the CFL_FACTOR (which can be overwritten at the command line) with open(os.path.join(Ccodesdir,"GiRaFFE_NRPy_REAL__NGHOSTS__CFL_FACTOR.h"), "w") as file: file.write(""" // Part P0.a: Set the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER #define NGHOSTS """+str(3)+""" #define NGHOSTS_A2B """+str(2)+""" // Part P0.b: Set the numerical precision (REAL) to double, ensuring all floating point // numbers are stored to at least ~16 significant digits #define REAL """+REAL+""" // Part P0.c: Set the CFL Factor. Can be overwritten at command line. REAL CFL_FACTOR = """+str(default_CFL_FACTOR)+";") %%writefile $Ccodesdir/GiRaFFE_NRPy_standalone.c // Step P0: Define REAL and NGHOSTS; and declare CFL_FACTOR. This header is generated in NRPy+. #include "GiRaFFE_NRPy_REAL__NGHOSTS__CFL_FACTOR.h" #include "declare_Cparameters_struct.h" const int NSKIP_1D_OUTPUT = 1; // Step P1: Import needed header files #include "stdio.h" #include "stdlib.h" #include "math.h" #include "time.h" #include "stdint.h" // Needed for Windows GCC 6.x compatibility #ifndef M_PI #define M_PI 3.141592653589793238462643383279502884L #endif #ifndef M_SQRT1_2 #define M_SQRT1_2 0.707106781186547524400844362104849039L #endif // Step P2: Declare the IDX4S(gf,i,j,k) macro, which enables us to store 4-dimensions of // data in a 1D array. In this case, consecutive values of "i" // (all other indices held to a fixed value) are consecutive in memory, where // consecutive values of "j" (fixing all other indices) are separated by // Nxx_plus_2NGHOSTS0 elements in memory. Similarly, consecutive values of // "k" are separated by Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1 in memory, etc. #define IDX4S(g,i,j,k) \ ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) + Nxx_plus_2NGHOSTS2 * (g) ) ) ) #define IDX4ptS(g,idx) ( (idx) + (Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2) * (g) ) #define IDX3S(i,j,k) ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) ) ) ) #define LOOP_REGION(i0min,i0max, i1min,i1max, i2min,i2max) \ for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) #define LOOP_ALL_GFS_GPS(ii) _Pragma("omp parallel for") \ for(int (ii)=0;(ii)<Nxx_plus_2NGHOSTS_tot*NUM_EVOL_GFS;(ii)++) // Step P3: Set gridfunction macros #include "boundary_conditions/gridfunction_defines.h" // Step P4: Include the RHS, BC, and primitive recovery functions #include "GiRaFFE_NRPy_Main_Driver.h" // Step P5: Include the initial data functions #include "set_initial_spacetime_metric_data.h" #include "InitialData/initial_data.h" // main() function: // Step 0: Read command-line input, set up grid structure, allocate memory for gridfunctions, set up coordinates // Step 1: Set up scalar wave initial data // Step 2: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm, // applying quadratic extrapolation outer boundary conditions. // Step 3: Output relative error between numerical and exact solution. // Step 4: Free all allocated memory int main(int argc, const char *argv[]) { paramstruct params; #include "set_Cparameters_default.h" // Step 0a: Read command-line input, error out if nonconformant if(argc != 5 || atoi(argv[1]) < NGHOSTS || atoi(argv[2]) < NGHOSTS || atoi(argv[3]) < NGHOSTS) { printf("Error: Expected three command-line arguments: ./GiRaFFE_NRPy_standalone [Nx] [Ny] [Nz],\n"); printf("where Nx is the number of grid points in the x direction, and so forth.\n"); printf("Nx,Ny,Nz MUST BE larger than NGHOSTS (= %d)\n",NGHOSTS); exit(1); } // Step 0c: Set free parameters, overwriting Cparameters defaults // by hand or with command-line input, as desired. #include "free_parameters.h" #include "set_Cparameters-nopointer.h" // ... and then set up the numerical grid structure in time: const REAL t_final = 2.0; const REAL CFL_FACTOR = 0.5; // Set the CFL Factor // Step 0c: Allocate memory for gridfunctions const int Nxx_plus_2NGHOSTS_tot = Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2; // Step 0k: Allocate memory for gridfunctions #include "MoLtimestepping/RK_Allocate_Memory.h" REAL *restrict auxevol_gfs = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot); REAL *evol_gfs_exact = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot); REAL *auxevol_gfs_exact = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot); // For debugging, it can be useful to set everything to NaN initially. if(poison_grids) { for(int ii=0;ii<NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) { y_n_gfs[ii] = 1.0/0.0; y_nplus1_running_total_gfs[ii] = 1.0/0.0; //k_odd_gfs[ii] = 1.0/0.0; //k_even_gfs[ii] = 1.0/0.0; diagnostic_output_gfs[ii] = 1.0/0.0; evol_gfs_exact[ii] = 1.0/0.0; } for(int ii=0;ii<NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) { auxevol_gfs[ii] = 1.0/0.0; auxevol_gfs_exact[ii] = 1.0/0.0; } } // Step 0d: Set up coordinates: Set dx, and then dt based on dx_min and CFL condition // This is probably already defined above, but just in case... #ifndef MIN #define MIN(A, B) ( ((A) < (B)) ? (A) : (B) ) #endif REAL dt = CFL_FACTOR * MIN(dxx0,MIN(dxx1,dxx2)); // CFL condition int Nt = (int)(t_final / dt + 0.5); // The number of points in time. //Add 0.5 to account for C rounding down integers. // Step 0e: Set up cell-centered Cartesian coordinate grids REAL *xx[3]; xx[0] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS0); xx[1] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS1); xx[2] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS2); for(int j=0;j<Nxx_plus_2NGHOSTS0;j++) xx[0][j] = xxmin[0] + (j-NGHOSTS+1)*dxx0; for(int j=0;j<Nxx_plus_2NGHOSTS1;j++) xx[1][j] = xxmin[1] + (j-NGHOSTS+1)*dxx1; for(int j=0;j<Nxx_plus_2NGHOSTS2;j++) xx[2][j] = xxmin[2] + (j-NGHOSTS+1)*dxx2; // Step 1: Set up initial data to be exact solution at time=0: REAL time = 0.0; set_initial_spacetime_metric_data(&params, xx, auxevol_gfs); const char *initial_data_option = argv[4]; initial_data(initial_data_option, &params, xx, auxevol_gfs, y_n_gfs); // Fill in the remaining quantities GiRaFFE_compute_B_and_Bstagger_from_A(&params, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD00GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD01GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD02GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD11GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD12GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD22GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*PSI6_TEMPGF, /* Temporary storage,overwritten */ y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD0GF, y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD1GF, y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD2GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU0GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU1GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU2GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU0GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU1GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU2GF); //override_BU_with_old_GiRaFFE(&params,auxevol_gfs,0); GiRaFFE_NRPy_prims_to_cons(&params,auxevol_gfs,y_n_gfs); // Extra stack, useful for debugging: GiRaFFE_NRPy_cons_to_prims(&params,xx,auxevol_gfs,y_n_gfs); for(int n=0;n<=Nt;n++) { // Main loop to progress forward in time. //for(int n=0;n<=1;n++) { // Main loop to progress forward in time. // Step 1a: Set current time to correct value & compute exact solution time = ((REAL)n)*dt; /* Step 2: Validation: Output relative error between numerical and exact solution, */ if(time == 0.0 || time == 0.5 || time == 1.0 || time == 2.0 || time == 0.02 || time == 0.56) { // Step 2c: Output relative error between exact & numerical at center of grid. const int i0mid=Nxx_plus_2NGHOSTS0/2; const int i1mid=Nxx_plus_2NGHOSTS1/2; const int i2mid=Nxx_plus_2NGHOSTS2/2; char filename[100]; sprintf(filename,"out%d__%s-%08d.txt", Nxx0, initial_data_option, n); FILE *out2D = fopen(filename, "w"); for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) { const int idx = IDX3S(i0,i1mid,i2mid); fprintf(out2D,"%.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e\n", xx[0][i0], auxevol_gfs[IDX4ptS(BU0GF,idx)],auxevol_gfs[IDX4ptS(BU1GF,idx)],auxevol_gfs[IDX4ptS(BU2GF,idx)], y_n_gfs[IDX4ptS(AD0GF,idx)],y_n_gfs[IDX4ptS(AD1GF,idx)],y_n_gfs[IDX4ptS(AD2GF,idx)], y_n_gfs[IDX4ptS(STILDED0GF,idx)],y_n_gfs[IDX4ptS(STILDED1GF,idx)],y_n_gfs[IDX4ptS(STILDED2GF,idx)], auxevol_gfs[IDX4ptS(VALENCIAVU0GF,idx)],auxevol_gfs[IDX4ptS(VALENCIAVU1GF,idx)],auxevol_gfs[IDX4ptS(VALENCIAVU2GF,idx)], y_n_gfs[IDX4ptS(PSI6PHIGF,idx)], time); } fclose(out2D); // For convergence testing, we'll shift the grid x -> x-1 and output initial data again, giving the exact solution. LOOP_REGION(0,Nxx_plus_2NGHOSTS0,0,1,0,1) { xx[0][i0] += -mu_AW*time; //xx[0][i0] += -time; } set_initial_spacetime_metric_data(&params,xx,auxevol_gfs_exact); initial_data(initial_data_option, &params,xx,auxevol_gfs_exact,evol_gfs_exact); // Fill in the remaining quantities //driver_A_to_B(&params,evol_gfs_exact,auxevol_gfs_exact); GiRaFFE_NRPy_prims_to_cons(&params,auxevol_gfs_exact,evol_gfs_exact); // And now, we'll set the grid back to rights. LOOP_REGION(0,Nxx_plus_2NGHOSTS0,0,1,0,1) { xx[0][i0] -= -mu_AW*time; //xx[0][i0] -= -time; } sprintf(filename,"out%d-%08d_exact.txt",Nxx0,n); FILE *out2D_exact = fopen(filename, "w"); for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) { const int idx = IDX3S(i0,i1mid,i2mid); fprintf(out2D_exact,"%.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e\n", xx[0][i0], auxevol_gfs_exact[IDX4ptS(BU0GF,idx)],auxevol_gfs_exact[IDX4ptS(BU1GF,idx)],auxevol_gfs_exact[IDX4ptS(BU2GF,idx)], evol_gfs_exact[IDX4ptS(AD0GF,idx)],evol_gfs_exact[IDX4ptS(AD1GF,idx)],evol_gfs_exact[IDX4ptS(AD2GF,idx)], evol_gfs_exact[IDX4ptS(STILDED0GF,idx)],evol_gfs_exact[IDX4ptS(STILDED1GF,idx)],evol_gfs_exact[IDX4ptS(STILDED2GF,idx)], auxevol_gfs_exact[IDX4ptS(VALENCIAVU0GF,idx)],auxevol_gfs_exact[IDX4ptS(VALENCIAVU1GF,idx)],auxevol_gfs_exact[IDX4ptS(VALENCIAVU2GF,idx)], evol_gfs_exact[IDX4ptS(PSI6PHIGF,idx)]); } fclose(out2D_exact); } // Step 3: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm, // applying quadratic extrapolation outer boundary conditions. // Step 3.b: Step forward one timestep (t -> t+dt) in time using // chosen RK-like MoL timestepping algorithm #include "MoLtimestepping/RK_MoL.h" } // End main loop to progress forward in time. // Step 4: Free all allocated memory #include "MoLtimestepping/RK_Free_Memory.h" free(auxevol_gfs); free(auxevol_gfs_exact); free(evol_gfs_exact); for(int i=0;i<3;i++) free(xx[i]); return 0; } ###Output Writing GiRaFFE_staggered_1D_Tests_standalone_Ccodes//GiRaFFE_NRPy_standalone.c ###Markdown Step 6: Compile generated C codes & perform GRFFE simulations \[Back to [top](toc)\]$$\label{compileexec}$$To aid in the cross-platform-compatible (with Windows, MacOS, & Linux) compilation and execution, we make use of `cmdline_helper` [(**Tutorial**)](Tutorial-cmdline_helper.ipynb). ###Code cmd.C_compile(os.path.join(Ccodesdir,"GiRaFFE_NRPy_standalone.c"), os.path.join(Ccodesdir,"output","GiRaFFE_NRPy_standalone"),compile_mode="optimized") # Change to output directory os.chdir(outdir) # Clean up existing output files cmd.delete_existing_files("out*.txt") cmd.delete_existing_files("out*.png") # ID options are: "AlfvenWave", "ThreeAlfvenWaves", "DegenAlfvenWave", "FastWave", "FFEBD" for opt in ID_opts: cmd.Execute("GiRaFFE_NRPy_standalone", "299 4 4 "+opt, "out_298"+opt+".txt") # cmd.Execute("GiRaFFE_NRPy_standalone", "1280 9 9 "+opt, "out_1280"+opt+".txt") # cmd.Execute("GiRaFFE_NRPy_standalone", "1280 32 32 "+opt, "out_"+opt+".txt") # cmd.Execute("GiRaFFE_NRPy_standalone", "149 9 9 AlfvenWave","out149.txt") # Return to root directory os.chdir(os.path.join("../../")) ###Output Compiling executable... (EXEC): Executing `gcc -Ofast -fopenmp -march=native -funroll-loops GiRaFFE_staggered_1D_Tests_standalone_Ccodes/GiRaFFE_NRPy_standalone.c -o GiRaFFE_staggered_1D_Tests_standalone_Ccodes/output/GiRaFFE_NRPy_standalone -lm`... (BENCH): Finished executing in 2.2119667530059814 seconds. Finished compilation. (EXEC): Executing `taskset -c 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 ./GiRaFFE_NRPy_standalone 299 4 4 AlfvenWave`... (BENCH): Finished executing in 7.819005966186523 seconds. (EXEC): Executing `taskset -c 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 ./GiRaFFE_NRPy_standalone 299 4 4 ThreeAlfvenWaves`... (BENCH): Finished executing in 7.619282245635986 seconds. (EXEC): Executing `taskset -c 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 ./GiRaFFE_NRPy_standalone 299 4 4 DegenAlfvenWave`... (BENCH): Finished executing in 7.620393991470337 seconds. (EXEC): Executing `taskset -c 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 ./GiRaFFE_NRPy_standalone 299 4 4 FastWave`... (BENCH): Finished executing in 7.823338985443115 seconds. (EXEC): Executing `taskset -c 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 ./GiRaFFE_NRPy_standalone 299 4 4 FFEBD`... (BENCH): Finished executing in 8.019806623458862 seconds. ###Markdown Step 7: Data Visualization \[Back to [top](toc)\]$$\label{plots}$$Now we plot the data and recreate figure 1 from the [GiRaFFE paper](https://arxiv.org/pdf/1704.00599.pdf). We reconstruct the electric field via$$E_i = -\epsilon_{ijk}v^j B^k$$the `calc_E` function below. We also calculate the FFE condition $B^2 - E^2$ below using the `calc_Bsquared_minus_Esquared` function. ###Code eDDD = ixp.LeviCivitaSymbol_dim3_rank3() def calc_E(data): VU0 = data[:, 10] VU1 = data[:, 11] VU2 = data[:, 12] BU0 = data[:, 1] BU1 = data[:, 2] BU2 = data[:, 3] VU = [VU0, VU1, VU2] BU = [BU0, BU1, BU2] ED = np.zeros((VU0.size, 3)) for i in range(3): for j in range(3): for k in range(3): ED[:,i] = ED[:,i] - eDDD[i][j][k]*VU[j]*BU[k] return ED def calc_Bsquared_minus_Esquared(data): EU = calc_E(data) BU0 = data[:, 1] BU1 = data[:, 2] BU2 = data[:, 3] return (BU0**2 + BU1**2 + BU2**2) - (EU[:,0]**2 + EU[:,1]**2 + EU[:,2]**2) import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import matplotlib as mpl # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 labels = ["x","BU0","BU1","BU2","AD0","AD1","AD2","StildeD0","StildeD1","StildeD2","ValenciavU0","ValenciavU1","ValenciavU2", "psi6Phi"] fig = plt.figure(figsize=(6, 15)) # spec = mpl.gridspec.GridSpec(ncols=6, nrows=2,wspace=0.65, hspace=0.4) # 6 columns evenly divides both 2 & 3 # ax1 = fig.add_subplot(spec[0,0:2]) # row 0 with axes spanning 2 cols on evens # ax2 = fig.add_subplot(spec[0,2:4]) # ax3 = fig.add_subplot(spec[0,4:]) # ax4 = fig.add_subplot(spec[1,1:3]) # row 0 with axes spanning 2 cols on odds # ax5 = fig.add_subplot(spec[1,3:5]) gs = gridspec.GridSpec(nrows=5, ncols=1, hspace=0.5) ax1 = fig.add_subplot(gs[0, 0]) ax2 = fig.add_subplot(gs[1, 0]) ax3 = fig.add_subplot(gs[2, 0]) ax4 = fig.add_subplot(gs[3, 0]) ax5 = fig.add_subplot(gs[4, 0]) Data_num_Fast_A = np.loadtxt(os.path.join(Ccodesdir,"output","out299__FastWave-00000000.txt")) Data_num_Fast_B = np.loadtxt(os.path.join(Ccodesdir,"output","out299__FastWave-00000100.txt")) E_Fast_A = calc_E(Data_num_Fast_A) E_Fast_B = calc_E(Data_num_Fast_B) ax1.scatter(Data_num_Fast_A[:,0], np.abs(E_Fast_A[:,2]), s=1,label = 't = 0') ax1.plot(Data_num_Fast_B[:,0], np.abs(E_Fast_B[:,2]), 'k-', label = 't = 0.5') ax1.set_xlim(-0.5, 1.5) ax1.set_ylim(0.6) ax1.text(0.95, 0.01, 'Fast Wave', verticalalignment='bottom', horizontalalignment='right', transform=ax1.transAxes, color='black', fontsize=14) ax1.set_xlabel('x') ax1.set_ylabel(r'$|E^z|$') ax1.legend() Data_num_Alf_A = np.loadtxt(os.path.join(Ccodesdir,"output","out299__AlfvenWave-00000000.txt")) Data_num_Alf_B = np.loadtxt(os.path.join(Ccodesdir,"output","out299__AlfvenWave-00000400.txt")) ax2.scatter(Data_num_Alf_A[:,0], Data_num_Alf_A[:,3], s=1, label = 't = 0') ax2.plot(Data_num_Alf_B[:,0], Data_num_Alf_B[:,3], 'k-', label = 't = 2.0') ax2.set_xlim(-1.5, 1.5) ax2.set_ylim(1.1) ax2.text(0.95, 0.01, 'Alfven Wave', verticalalignment='bottom', horizontalalignment='right', transform=ax2.transAxes, color='black', fontsize=14) ax2.set_xlabel('x') ax2.set_ylabel(r'$B^z$') ax2.legend(loc='center right') Data_num_DegenAlf_A = np.loadtxt(os.path.join(Ccodesdir,"output","out299__DegenAlfvenWave-00000000.txt")) Data_num_DegenAlf_B = np.loadtxt(os.path.join(Ccodesdir,"output","out299__DegenAlfvenWave-00000200.txt")) E_DegenAlf_A = calc_E(Data_num_DegenAlf_A) E_DegenAlf_B = calc_E(Data_num_DegenAlf_B) ax3.scatter(Data_num_DegenAlf_A[:,0], E_DegenAlf_A[:,1], s=1, label = 't = 0') ax3.plot(Data_num_DegenAlf_B[:,0], E_DegenAlf_B[:,1], 'k-', label = 't = 1.0') ax3.set_xlim(-1.5, 1.5) ax3.set_ylim(-1.35) ax3.text(0.95, 0.01, 'Deg. Alfven Wave', verticalalignment='bottom', horizontalalignment='right', transform=ax3.transAxes, color='black', fontsize=14) ax3.set_xlabel('x') ax3.set_ylabel(r'$E^y$') ax3.legend() # Data_num_ThreeAlf_A = np.loadtxt(os.path.join(Ccodesdir,"output","out149__ThreeAlfvenWaves-00000000.txt")) Data_num_ThreeAlf_B = np.loadtxt(os.path.join(Ccodesdir,"output","out299__ThreeAlfvenWaves-00000112.txt")) # ax2.plot(Data_num_ThreeAlf_A[:,0], Data_num_ThreeAlf_A[:,2], 'k-') ax4.scatter(Data_num_ThreeAlf_B[:,0], Data_num_ThreeAlf_B[:,2], s=1, label = 't = 0.56') ax4.set_xlim(-1.0, 1.0) # ax4.set_ylim() ax4.text(0.95, 0.01, 'Three Waves', verticalalignment='bottom', horizontalalignment='right', transform=ax4.transAxes, color='black', fontsize=14) ax4.set_xlabel('x') ax4.set_ylabel(r'$B^y$') ax4.legend(loc='center') Data_num_FFEBD_A = np.loadtxt(os.path.join(Ccodesdir,"output","out299__FFEBD-00000000.txt")) Data_num_FFEBD_B = np.loadtxt(os.path.join(Ccodesdir,"output","out299__FFEBD-00000004.txt")) B2mE2_A = calc_Bsquared_minus_Esquared(Data_num_FFEBD_A) B2mE2_B = calc_Bsquared_minus_Esquared(Data_num_FFEBD_B) ax5.scatter(Data_num_FFEBD_A[:,0], B2mE2_A, s=1, label = 't = 0') ax5.plot(Data_num_FFEBD_B[:,0], B2mE2_B, 'k-', label = 't = 0.02') ax5.set_xlim(-0.4, 0.6) ax5.text(0.95, 0.01, 'FFE Breakdown', verticalalignment='bottom', horizontalalignment='right', transform=ax5.transAxes, color='black', fontsize=14) ax5.set_xlabel('x') ax5.set_ylabel(r'$B^2 - E^2$') ax5.legend() plt.savefig(os.path.join(Ccodesdir,"output","NRPy-GiRaFFE"), dpi=800, bbox_inches="tight") plt.close(fig) img1 = plt.imread(os.path.join(Ccodesdir,"output","NRPy-GiRaFFE.png")) img2 = plt.imread(os.path.join("GiRaFFE_NRPy/example_par_files/figure1_GiRaFFE_paper.png")) NUM_ROWS = 1 IMGs_IN_ROW = 2 f, ax = plt.subplots(NUM_ROWS, IMGs_IN_ROW, figsize=(28,18)) plt.subplots_adjust(wspace=0.05) plt.axis('off') ax[0].imshow(img1) ax[1].imshow(img2) ax[0].set_title('image 1') ax[1].set_title('image 2') # title = 'side by side view of images' # f.suptitle(title, fontsize=16) plt.tight_layout() # plt.xticks([]) # plt.yticks([]) plt.show() ###Output _____no_output_____ ###Markdown Step 8: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-Start_to_Finish-GiRaFFE_NRPy-1D_tests-staggered.pdf](Tutorial-Start_to_Finish-GiRaFFE_NRPy-1D_tests-staggered.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.) ###Code import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-Start_to_Finish-GiRaFFE_NRPy-1D_tests-staggered",location_of_template_file=os.path.join("..")) ###Output Created Tutorial-Start_to_Finish-GiRaFFE_NRPy-1D_tests-staggered.tex, and compiled LaTeX file to PDF file Tutorial-Start_to_Finish- GiRaFFE_NRPy-1D_tests-staggered.pdf ###Markdown window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); Start-to-Finish Example: `GiRaFFE_NRPy` 1D tests Authors: Patrick Nelson & Terrence Pierre Jacques Adapted from [Start-to-Finish Example: Head-On Black Hole Collision](../Tutorial-Start_to_Finish-BSSNCurvilinear-Two_BHs_Collide.ipynb) This module compiles and runs code tests for all 1D initial data options available in GiRaFFE-NRPy+, evolving one-dimensional GRFFE waves. NRPy+ Source Code for this module: * Main python module for all 1D initial data: [GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_1D_tests.py](../../edit/in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_1D_tests.py) __Options:__ 1. [Fast Wave](Tutorial-GiRaFFEfood_NRPy_1D_tests-fast_wave.ipynb) 1. [Alfven Wave](Tutorial-GiRaFFEfood_NRPy_1D_alfven_wave.ipynb) 1. [Degenerate Alfven Wave](Tutorial-GiRaFFEfood_NRPy_1D_tests-degen_Alfven_wave.ipynb) 1. [Three Alfven Waves](Tutorial-GiRaFFEfood_NRPy_1D_tests-three_waves.ipynb) 1. [FFE Breakdown](Tutorial-GiRaFFEfood_NRPy_1D_tests-FFE_breakdown.ipynb)* [GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Afield_flux.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Afield_flux.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy_staggered-Afield_flux.ipynb) Generates the expressions to find the flux term of the induction equation.* [GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_A2B.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_A2B.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy_staggered-A2B.ipynb) Generates the driver to compute the magnetic field from the vector potential/* [GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-BCs.ipynb) Generates the code to apply boundary conditions to the vector potential, scalar potential, and three-velocity.* [GiRaFFE_NRPy/GiRaFFE_NRPy_C2P_P2C.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_C2P_P2C.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-C2P_P2C.ipynb) Generates the conservative-to-primitive and primitive-to-conservative solvers.* [GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Metric_Face_Values.ipynb) Generates code to interpolate metric gridfunctions to cell faces.* [GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-PPM.ipynb) Genearates code to reconstruct primitive variables on cell faces.* [GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Source_Terms.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Source_Terms.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy_staggered-Source_Terms.ipynb) Generates the expressions to find the flux term of the Poynting flux evolution equation.* [GiRaFFE_NRPy/Stilde_flux.py](../../edit/in_progress/GiRaFFE_NRPy/Stilde_flux.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Stilde_flux.ipynb) Generates the expressions to find the flux term of the Poynting flux evolution equation.* [../GRFFE/equations.py](../../edit/GRFFE/equations.py) [\[**tutorial**\]](../Tutorial-GRFFE_Equations-Cartesian.ipynb) Generates code necessary to compute the source terms.* [../GRHD/equations.py](../../edit/GRHD/equations.py) [\[**tutorial**\]](../Tutorial-GRHD_Equations-Cartesian.ipynb) Generates code necessary to compute the source terms.Here we use NRPy+ to generate the C source code necessary to set up initial data for an Alfv&eacute;n wave (see [the original GiRaFFE paper](https://arxiv.org/pdf/1704.00599.pdf)). Then we use it to generate the RHS expressions for [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html) time integration based on the [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4). Table of Contents$$\label{toc}$$This notebook is organized as follows1. [Step 1](initializenrpy): Set core NRPy+ parameters for numerical grids1. [Step 2](grffe): Output C code for GRFFE evolution 1. [Step 2.a](mol): Output macros for Method of Lines timestepping1. [Step 3](gf_id): Import `GiRaFFEfood_NRPy` initial data modules1. [Step 4](cparams): Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h`1. [Step 5](mainc): `GiRaFFE_NRPy_standalone.c`: The Main C Code1. [Step 6](compileexec): Compile and execute C codes1. [Step 7](plots): Data Visualization1. [Step 8](latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file Step 1: Set up core functions and parameters for solving GRFFE equations \[Back to [top](toc)\]$$\label{setup}$$ ###Code import shutil, os, sys # Standard Python modules for multiplatform OS-level functions # First, we'll add the parent directory to the list of directories Python will check for modules. nrpy_dir_path = os.path.join("..") if nrpy_dir_path not in sys.path: sys.path.append(nrpy_dir_path) # Step P1: Import needed NRPy+ core modules: from outputC import outCfunction, lhrh # NRPy+: Core C code output module import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends import finite_difference as fin # NRPy+: Finite difference C code generation module import NRPy_param_funcs as par # NRPy+: Parameter interface import grid as gri # NRPy+: Functions having to do with numerical grids import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface # Step P2: Create C code output directory: Ccodesdir = os.path.join("GiRaFFE_staggered_1D_Tests_standalone_Ccodes/") # First remove C code output directory if it exists # Courtesy https://stackoverflow.com/questions/303200/how-do-i-remove-delete-a-folder-that-is-not-empty # !rm -r ScalarWaveCurvilinear_Playground_Ccodes shutil.rmtree(Ccodesdir, ignore_errors=True) # Then create a fresh directory cmd.mkdir(Ccodesdir) # Step P3: Create executable output directory: outdir = os.path.join(Ccodesdir,"output/") cmd.mkdir(outdir) # Step P5: Set timestepping algorithm (we adopt the Method of Lines) REAL = "double" # Best to use double here. default_CFL_FACTOR= 0.5 # (GETS OVERWRITTEN WHEN EXECUTED.) In pure axisymmetry (symmetry_axes = 2 below) 1.0 works fine. Otherwise 0.5 or lower. # Step P6: Set the finite differencing order to 2. par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",4) thismodule = "Start_to_Finish-GiRaFFE_NRPy-1D_tests" TINYDOUBLE = par.Cparameters("REAL", thismodule, "TINYDOUBLE", 1e-100) import GiRaFFE_NRPy.GiRaFFE_NRPy_Main_Driver_staggered as md # par.set_paramsvals_value("GiRaFFE_NRPy.GiRaFFE_NRPy_C2P_P2C::enforce_speed_limit_StildeD = False") par.set_paramsvals_value("GiRaFFE_NRPy.GiRaFFE_NRPy_C2P_P2C::enforce_current_sheet_prescription = False") ###Output _____no_output_____ ###Markdown Step 2: Output C code for GRFFE evolution \[Back to [top](toc)\]$$\label{grffe}$$We will first write the C codes needed for GRFFE evolution. We have already written a module to generate all these codes and call the functions in the appropriate order, so we will import that here. We will take the slightly unusual step of doing this before we generate the initial data functions because the main driver module will register all the gridfunctions we need. It will also generate functions that, in addition to their normal spot in the MoL timestepping, will need to be called during the initial data step to make sure all the variables are appropriately filled in. All of this is handled with a single call to `GiRaFFE_NRPy_Main_Driver_generate_all()`, which will register gridfunctions, write all the C code kernels, and write the C code functions to call those. ###Code md.GiRaFFE_NRPy_Main_Driver_generate_all(Ccodesdir) ###Output Output C function calculate_StildeD0_source_term() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/RHSs/calculate_StildeD0_source_term.h Output C function calculate_StildeD1_source_term() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/RHSs/calculate_StildeD1_source_term.h Output C function calculate_StildeD2_source_term() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/RHSs/calculate_StildeD2_source_term.h Output C function calculate_Stilde_rhsD() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/RHSs/calculate_Stilde_rhsD.h Output C function GiRaFFE_NRPy_cons_to_prims() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/C2P/GiRaFFE_NRPy_cons_to_prims.h Output C function GiRaFFE_NRPy_prims_to_cons() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/C2P/GiRaFFE_NRPy_prims_to_cons.h ###Markdown Step 2.a: Output macros for Method of Lines timestepping \[Back to [top](toc)\]$$\label{mol}$$Now, we generate the code to implement the method of lines using the fourth-order Runge-Kutta algorithm. ###Code RK_method = "RK4" # Step 3: Generate Runge-Kutta-based (RK-based) timestepping code. # As described above the Table of Contents, this is a 3-step process: # 3.A: Evaluate RHSs (RHS_string) # 3.B: Apply boundary conditions (post_RHS_string, pt 1) import MoLtimestepping.C_Code_Generation as MoL from MoLtimestepping.RK_Butcher_Table_Dictionary import Butcher_dict RK_order = Butcher_dict[RK_method][1] cmd.mkdir(os.path.join(Ccodesdir,"MoLtimestepping/")) MoL.MoL_C_Code_Generation(RK_method, RHS_string = """ GiRaFFE_NRPy_RHSs(&params,auxevol_gfs,RK_INPUT_GFS,RK_OUTPUT_GFS);""", post_RHS_string = """ GiRaFFE_NRPy_post_step(&params,xx,auxevol_gfs,RK_OUTPUT_GFS,n+1);\n""", outdir = os.path.join(Ccodesdir,"MoLtimestepping/")) ###Output _____no_output_____ ###Markdown Step 3: Import `GiRaFFEfood_NRPy` initial data modules \[Back to [top](toc)\]$$\label{gf_id}$$With the preliminaries out of the way, we will write the C functions to set up initial data. There are two categories of initial data that must be set: the spacetime metric variables, and the GRFFE plasma variables. We will set up the spacetime first, namely the Minkowski spacetime. ###Code gammaDD = ixp.zerorank2(DIM=3) for i in range(3): for j in range(3): if i==j: gammaDD[i][j] = sp.sympify(1) # else: leave as zero betaU = ixp.zerorank1() # All should be 0 alpha = sp.sympify(1) # Description and options for this initial data desc = "Generate a flat spacetime metric." loopopts_id ="AllPoints" # we don't need to read coordinates for flat spacetime. # For testing: Also set inverse metric: gammaUU, unused_gammaDET = ixp.symm_matrix_inverter3x3(gammaDD) name = "set_initial_spacetime_metric_data" values_to_print = [ lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD00"),rhs=gammaDD[0][0]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD01"),rhs=gammaDD[0][1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD02"),rhs=gammaDD[0][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD11"),rhs=gammaDD[1][1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD12"),rhs=gammaDD[1][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD22"),rhs=gammaDD[2][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU0"),rhs=betaU[0]), lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU1"),rhs=betaU[1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU2"),rhs=betaU[2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","alpha"),rhs=alpha) ] outCfunction( outfile = os.path.join(Ccodesdir,name+".h"), desc=desc, name=name, params ="const paramstruct *params,REAL *xx[3],REAL *auxevol_gfs", body = fin.FD_outputC("returnstring",values_to_print,params="outCverbose=False"), loopopts = loopopts_id) ###Output Output C function set_initial_spacetime_metric_data() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/set_initial_spacetime_metric_data.h ###Markdown Now, we will write out the initials data function for the GRFFE variables. ###Code initial_data_dir = os.path.join(Ccodesdir,"InitialData/") cmd.mkdir(initial_data_dir) ID_opts = ["AlfvenWave", "ThreeAlfvenWaves", "DegenAlfvenWave", "FastWave", "FFEBD"] import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy as gid for initial_data in ID_opts: if initial_data=="AlfvenWave": gid.GiRaFFEfood_NRPy_generate_initial_data(ID_type = initial_data, stagger_enable = True) desc = "Generate Alfven wave 1D initial data for GiRaFFEfood_NRPy." elif initial_data=="ThreeAlfvenWaves": gid.GiRaFFEfood_NRPy_generate_initial_data(ID_type = "ThreeWaves", stagger_enable = True) desc = "Generate three Alfven wave 1D initial data for GiRaFFEfood_NRPy." elif initial_data=="DegenAlfvenWave": gid.GiRaFFEfood_NRPy_generate_initial_data(ID_type = initial_data, stagger_enable = True) desc = "Generate degenerate Alfven wave 1D initial data for GiRaFFEfood_NRPy." elif initial_data=="FastWave": gid.GiRaFFEfood_NRPy_generate_initial_data(ID_type = initial_data, stagger_enable = True) desc = "Generate fast wave 1D initial data for GiRaFFEfood_NRPy." elif initial_data=="FFEBD": gid.GiRaFFEfood_NRPy_generate_initial_data(ID_type = "FFE_Breakdown", stagger_enable = True) desc = "Generate FFE breakdown 1D initial data for GiRaFFEfood_NRPy." name = initial_data values_to_print = [ lhrh(lhs=gri.gfaccess("out_gfs","AD0"),rhs=gid.AD[0]), lhrh(lhs=gri.gfaccess("out_gfs","AD1"),rhs=gid.AD[1]), lhrh(lhs=gri.gfaccess("out_gfs","AD2"),rhs=gid.AD[2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU0"),rhs=gid.ValenciavU[0]), lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU1"),rhs=gid.ValenciavU[1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU2"),rhs=gid.ValenciavU[2]), # lhrh(lhs=gri.gfaccess("auxevol_gfs","BU0"),rhs=gid.BU[0]), # lhrh(lhs=gri.gfaccess("auxevol_gfs","BU1"),rhs=gid.BU[1]), # lhrh(lhs=gri.gfaccess("auxevol_gfs","BU2"),rhs=gid.BU[2]), lhrh(lhs=gri.gfaccess("out_gfs","psi6Phi"),rhs=sp.sympify(0)) ] outCfunction( outfile = os.path.join(initial_data_dir,name+".c"), desc=desc, name=name, params ="const paramstruct *params, REAL *xx[3], REAL *auxevol_gfs, REAL *out_gfs", body = fin.FD_outputC("returnstring",values_to_print,params="outCverbose=False"), rel_path_to_Cparams='../', loopopts ="AllPoints,Read_xxs") inital_data_body = """ const char *option1 = "AlfvenWave"; const char *option2 = "ThreeAlfvenWaves"; const char *option3 = "DegenAlfvenWave"; const char *option4 = "FastWave"; const char *option5 = "FFEBD"; if (strcmp(initial_data_option, option1) == 0) { AlfvenWave(params, xx, auxevol_gfs, out_gfs); } else if (strcmp(initial_data_option, option2) == 0) { ThreeAlfvenWaves(params, xx, auxevol_gfs, out_gfs); } else if (strcmp(initial_data_option, option3) == 0) { DegenAlfvenWave(params, xx, auxevol_gfs, out_gfs); } else if (strcmp(initial_data_option, option4) == 0) { FastWave(params, xx, auxevol_gfs, out_gfs); } else if (strcmp(initial_data_option, option5) == 0) { FFEBD(params, xx, auxevol_gfs, out_gfs); } else { printf("ERROR: Invalid choice of initial data."); exit(1); } """ name = "initial_data" desc = "Main initial data function." includes = ["AlfvenWave.c", "ThreeAlfvenWaves.c", "DegenAlfvenWave.c", "FastWave.c", "FFEBD.c"] outCfunction( outfile = os.path.join(initial_data_dir,name+".h"), desc=desc, name=name, params ="const char *initial_data_option, const paramstruct *restrict params,REAL *xx[3],REAL *restrict auxevol_gfs,REAL *restrict out_gfs", body = inital_data_body, includes = includes, prefunc="#include <string.h>", rel_path_to_Cparams='../', loopopts ="") ###Output Output C function AlfvenWave() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/AlfvenWave.c Output C function ThreeAlfvenWaves() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/ThreeAlfvenWaves.c Output C function DegenAlfvenWave() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/DegenAlfvenWave.c Output C function FastWave() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/FastWave.c Output C function FFEBD() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/FFEBD.c Output C function initial_data() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/initial_data.h ###Markdown Step 4: Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h` \[Back to [top](toc)\]$$\label{cparams}$$Based on declared NRPy+ Cparameters, first we generate `declare_Cparameters_struct.h`, `set_Cparameters_default.h`, and `set_Cparameters[-SIMD].h`.Then we output `free_parameters.h`, which sets initial data parameters, as well as grid domain & reference metric parameters, applying `domain_size` and `sinh_width`/`SymTP_bScale` (if applicable) as set above ###Code # Step 3.e: Output C codes needed for declaring and setting Cparameters; also set free_parameters.h # Step 3.e.i: Generate declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h par.generate_Cparameters_Ccodes(os.path.join(Ccodesdir)) # Step 3.e.ii: Set free_parameters.h with open(os.path.join(Ccodesdir,"free_parameters.h"),"w") as file: file.write("""// Override parameter defaults with values based on command line arguments and NGHOSTS. params.Nxx0 = atoi(argv[1]); params.Nxx1 = atoi(argv[2]); params.Nxx2 = atoi(argv[3]); params.Nxx_plus_2NGHOSTS0 = params.Nxx0 + 2*NGHOSTS; params.Nxx_plus_2NGHOSTS1 = params.Nxx1 + 2*NGHOSTS; params.Nxx_plus_2NGHOSTS2 = params.Nxx2 + 2*NGHOSTS; // Step 0d: Set up space and time coordinates // Step 0d.i: Declare \Delta x^i=dxx{0,1,2} and invdxx{0,1,2}, as well as xxmin[3] and xxmax[3]: const REAL xxmin[3] = {-1.3255,-0.085,-0.085}; const REAL xxmax[3] = { 1.6745, 0.115, 0.115}; params.dxx0 = (xxmax[0] - xxmin[0]) / ((REAL)params.Nxx0+1); params.dxx1 = (xxmax[1] - xxmin[1]) / ((REAL)params.Nxx1+1); params.dxx2 = (xxmax[2] - xxmin[2]) / ((REAL)params.Nxx2+1); printf("dxx0,dxx1,dxx2 = %.5e,%.5e,%.5e\\n",params.dxx0,params.dxx1,params.dxx2); params.invdx0 = 1.0 / params.dxx0; params.invdx1 = 1.0 / params.dxx1; params.invdx2 = 1.0 / params.dxx2; const int poison_grids = 0; // Standard GRFFE parameters: params.GAMMA_SPEED_LIMIT = 2000.0; params.diss_strength = 0.1; """) ###Output _____no_output_____ ###Markdown Step 4: Set up boundary condition functions for chosen singular, curvilinear coordinate system \[Back to [top](toc)\]$$\label{bc_functs}$$Next apply singular, curvilinear coordinate boundary conditions [as documented in the corresponding NRPy+ tutorial notebook](Tutorial-Start_to_Finish-Curvilinear_BCs.ipynb)...But, for the moment, we're actually just using this because it writes the file `gridfunction_defines.h`. ###Code import CurviBoundaryConditions.CurviBoundaryConditions as cbcs cbcs.Set_up_CurviBoundaryConditions(os.path.join(Ccodesdir,"boundary_conditions/"),Cparamspath=os.path.join("../"),enable_copy_of_static_Ccodes=False) ###Output Wrote to file "GiRaFFE_staggered_1D_Tests_standalone_Ccodes/boundary_conditions/parity_conditions_symbolic_dot_products.h" Evolved parity: ( AD0:1, AD1:2, AD2:3, StildeD0:1, StildeD1:2, StildeD2:3, psi6Phi:0 ) AuxEvol parity: ( BU0:1, BU1:2, BU2:3, B_lU0:1, B_lU1:2, B_lU2:3, B_rU0:1, B_rU1:2, B_rU2:3, BstaggerU0:1, BstaggerU1:2, BstaggerU2:3, Bstagger_lU0:1, Bstagger_lU1:2, Bstagger_lU2:3, Bstagger_rU0:1, Bstagger_rU1:2, Bstagger_rU2:3, Stilde_flux_HLLED0:1, Stilde_flux_HLLED1:2, Stilde_flux_HLLED2:3, ValenciavU0:1, ValenciavU1:2, ValenciavU2:3, Valenciav_lU0:1, Valenciav_lU1:2, Valenciav_lU2:3, Valenciav_llU0:1, Valenciav_llU1:2, Valenciav_llU2:3, Valenciav_lrU0:1, Valenciav_lrU1:2, Valenciav_lrU2:3, Valenciav_rU0:1, Valenciav_rU1:2, Valenciav_rU2:3, Valenciav_rlU0:1, Valenciav_rlU1:2, Valenciav_rlU2:3, Valenciav_rrU0:1, Valenciav_rrU1:2, Valenciav_rrU2:3, alpha:0, alpha_face:0, betaU0:1, betaU1:2, betaU2:3, beta_faceU0:1, beta_faceU1:2, beta_faceU2:3, cmax_x:0, cmax_y:0, cmax_z:0, cmin_x:0, cmin_y:0, cmin_z:0, gammaDD00:4, gammaDD01:5, gammaDD02:6, gammaDD11:7, gammaDD12:8, gammaDD22:9, gamma_faceDD00:4, gamma_faceDD01:5, gamma_faceDD02:6, gamma_faceDD11:7, gamma_faceDD12:8, gamma_faceDD22:9, psi6_temp:0, psi6center:0 ) Wrote to file "GiRaFFE_staggered_1D_Tests_standalone_Ccodes/boundary_conditions/EigenCoord_Cart_to_xx.h" ###Markdown Step 5: `GiRaFFE_NRPy_standalone.c`: The Main C Code \[Back to [top](toc)\]$$\label{mainc}$$ ###Code # Part P0: Define REAL, set the number of ghost cells NGHOSTS (from NRPy+'s FD_CENTDERIVS_ORDER), # and set the CFL_FACTOR (which can be overwritten at the command line) with open(os.path.join(Ccodesdir,"GiRaFFE_NRPy_REAL__NGHOSTS__CFL_FACTOR.h"), "w") as file: file.write(""" // Part P0.a: Set the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER #define NGHOSTS """+str(3)+""" #define NGHOSTS_A2B """+str(2)+""" // Part P0.b: Set the numerical precision (REAL) to double, ensuring all floating point // numbers are stored to at least ~16 significant digits #define REAL """+REAL+""" // Part P0.c: Set the CFL Factor. Can be overwritten at command line. REAL CFL_FACTOR = """+str(default_CFL_FACTOR)+";") %%writefile $Ccodesdir/GiRaFFE_NRPy_standalone.c // Step P0: Define REAL and NGHOSTS; and declare CFL_FACTOR. This header is generated in NRPy+. #include "GiRaFFE_NRPy_REAL__NGHOSTS__CFL_FACTOR.h" #include "declare_Cparameters_struct.h" const int NSKIP_1D_OUTPUT = 1; // Step P1: Import needed header files #include "stdio.h" #include "stdlib.h" #include "math.h" #include "time.h" #include "stdint.h" // Needed for Windows GCC 6.x compatibility #ifndef M_PI #define M_PI 3.141592653589793238462643383279502884L #endif #ifndef M_SQRT1_2 #define M_SQRT1_2 0.707106781186547524400844362104849039L #endif // Step P2: Declare the IDX4S(gf,i,j,k) macro, which enables us to store 4-dimensions of // data in a 1D array. In this case, consecutive values of "i" // (all other indices held to a fixed value) are consecutive in memory, where // consecutive values of "j" (fixing all other indices) are separated by // Nxx_plus_2NGHOSTS0 elements in memory. Similarly, consecutive values of // "k" are separated by Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1 in memory, etc. #define IDX4S(g,i,j,k) \ ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) + Nxx_plus_2NGHOSTS2 * (g) ) ) ) #define IDX4ptS(g,idx) ( (idx) + (Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2) * (g) ) #define IDX3S(i,j,k) ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) ) ) ) #define LOOP_REGION(i0min,i0max, i1min,i1max, i2min,i2max) \ for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) #define LOOP_ALL_GFS_GPS(ii) _Pragma("omp parallel for") \ for(int (ii)=0;(ii)<Nxx_plus_2NGHOSTS_tot*NUM_EVOL_GFS;(ii)++) // Step P3: Set gridfunction macros #include "boundary_conditions/gridfunction_defines.h" // Step P4: Include the RHS, BC, and primitive recovery functions #include "GiRaFFE_NRPy_Main_Driver.h" // Step P5: Include the initial data functions #include "set_initial_spacetime_metric_data.h" #include "InitialData/initial_data.h" // main() function: // Step 0: Read command-line input, set up grid structure, allocate memory for gridfunctions, set up coordinates // Step 1: Set up scalar wave initial data // Step 2: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm, // applying quadratic extrapolation outer boundary conditions. // Step 3: Output relative error between numerical and exact solution. // Step 4: Free all allocated memory int main(int argc, const char *argv[]) { paramstruct params; #include "set_Cparameters_default.h" // Step 0a: Read command-line input, error out if nonconformant if(argc != 5 || atoi(argv[1]) < NGHOSTS || atoi(argv[2]) < NGHOSTS || atoi(argv[3]) < NGHOSTS) { printf("Error: Expected three command-line arguments: ./GiRaFFE_NRPy_standalone [Nx] [Ny] [Nz],\n"); printf("where Nx is the number of grid points in the x direction, and so forth.\n"); printf("Nx,Ny,Nz MUST BE larger than NGHOSTS (= %d)\n",NGHOSTS); exit(1); } // Step 0c: Set free parameters, overwriting Cparameters defaults // by hand or with command-line input, as desired. #include "free_parameters.h" #include "set_Cparameters-nopointer.h" // ... and then set up the numerical grid structure in time: const REAL t_final = 2.0; const REAL CFL_FACTOR = 0.5; // Set the CFL Factor // Step 0c: Allocate memory for gridfunctions const int Nxx_plus_2NGHOSTS_tot = Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2; // Step 0k: Allocate memory for gridfunctions #include "MoLtimestepping/RK_Allocate_Memory.h" REAL *restrict auxevol_gfs = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot); REAL *evol_gfs_exact = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot); REAL *auxevol_gfs_exact = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot); // For debugging, it can be useful to set everything to NaN initially. if(poison_grids) { for(int ii=0;ii<NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) { y_n_gfs[ii] = 1.0/0.0; y_nplus1_running_total_gfs[ii] = 1.0/0.0; //k_odd_gfs[ii] = 1.0/0.0; //k_even_gfs[ii] = 1.0/0.0; diagnostic_output_gfs[ii] = 1.0/0.0; evol_gfs_exact[ii] = 1.0/0.0; } for(int ii=0;ii<NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) { auxevol_gfs[ii] = 1.0/0.0; auxevol_gfs_exact[ii] = 1.0/0.0; } } // Step 0d: Set up coordinates: Set dx, and then dt based on dx_min and CFL condition // This is probably already defined above, but just in case... #ifndef MIN #define MIN(A, B) ( ((A) < (B)) ? (A) : (B) ) #endif REAL dt = CFL_FACTOR * MIN(dxx0,MIN(dxx1,dxx2)); // CFL condition int Nt = (int)(t_final / dt + 0.5); // The number of points in time. //Add 0.5 to account for C rounding down integers. // Step 0e: Set up cell-centered Cartesian coordinate grids REAL *xx[3]; xx[0] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS0); xx[1] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS1); xx[2] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS2); for(int j=0;j<Nxx_plus_2NGHOSTS0;j++) xx[0][j] = xxmin[0] + (j-NGHOSTS+1)*dxx0; for(int j=0;j<Nxx_plus_2NGHOSTS1;j++) xx[1][j] = xxmin[1] + (j-NGHOSTS+1)*dxx1; for(int j=0;j<Nxx_plus_2NGHOSTS2;j++) xx[2][j] = xxmin[2] + (j-NGHOSTS+1)*dxx2; // Step 1: Set up initial data to be exact solution at time=0: REAL time = 0.0; set_initial_spacetime_metric_data(&params, xx, auxevol_gfs); const char *initial_data_option = argv[4]; initial_data(initial_data_option, &params, xx, auxevol_gfs, y_n_gfs); // Fill in the remaining quantities GiRaFFE_compute_B_and_Bstagger_from_A(&params, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD00GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD01GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD02GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD11GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD12GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD22GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*PSI6_TEMPGF, /* Temporary storage,overwritten */ y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD0GF, y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD1GF, y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD2GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU0GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU1GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU2GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU0GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU1GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU2GF); //override_BU_with_old_GiRaFFE(&params,auxevol_gfs,0); GiRaFFE_NRPy_prims_to_cons(&params,auxevol_gfs,y_n_gfs); // Extra stack, useful for debugging: GiRaFFE_NRPy_cons_to_prims(&params,xx,auxevol_gfs,y_n_gfs); for(int n=0;n<=Nt;n++) { // Main loop to progress forward in time. //for(int n=0;n<=1;n++) { // Main loop to progress forward in time. // Step 1a: Set current time to correct value & compute exact solution time = ((REAL)n)*dt; /* Step 2: Validation: Output relative error between numerical and exact solution, */ if(time == 0.0 || time == 0.5 || time == 1.0 || time == 2.0 || time == 0.02 || time == 0.56) { // Step 2c: Output relative error between exact & numerical at center of grid. const int i0mid=Nxx_plus_2NGHOSTS0/2; const int i1mid=Nxx_plus_2NGHOSTS1/2; const int i2mid=Nxx_plus_2NGHOSTS2/2; char filename[100]; sprintf(filename,"out%d__%s-%08d.txt", Nxx0, initial_data_option, n); FILE *out2D = fopen(filename, "w"); for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) { const int idx = IDX3S(i0,i1mid,i2mid); fprintf(out2D,"%.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e\n", xx[0][i0], auxevol_gfs[IDX4ptS(BU0GF,idx)],auxevol_gfs[IDX4ptS(BU1GF,idx)],auxevol_gfs[IDX4ptS(BU2GF,idx)], y_n_gfs[IDX4ptS(AD0GF,idx)],y_n_gfs[IDX4ptS(AD1GF,idx)],y_n_gfs[IDX4ptS(AD2GF,idx)], y_n_gfs[IDX4ptS(STILDED0GF,idx)],y_n_gfs[IDX4ptS(STILDED1GF,idx)],y_n_gfs[IDX4ptS(STILDED2GF,idx)], auxevol_gfs[IDX4ptS(VALENCIAVU0GF,idx)],auxevol_gfs[IDX4ptS(VALENCIAVU1GF,idx)],auxevol_gfs[IDX4ptS(VALENCIAVU2GF,idx)], y_n_gfs[IDX4ptS(PSI6PHIGF,idx)], time); } fclose(out2D); // For convergence testing, we'll shift the grid x -> x-1 and output initial data again, giving the exact solution. LOOP_REGION(0,Nxx_plus_2NGHOSTS0,0,1,0,1) { xx[0][i0] += -mu_AW*time; //xx[0][i0] += -time; } set_initial_spacetime_metric_data(&params,xx,auxevol_gfs_exact); initial_data(initial_data_option, &params,xx,auxevol_gfs_exact,evol_gfs_exact); // Fill in the remaining quantities //driver_A_to_B(&params,evol_gfs_exact,auxevol_gfs_exact); GiRaFFE_NRPy_prims_to_cons(&params,auxevol_gfs_exact,evol_gfs_exact); // And now, we'll set the grid back to rights. LOOP_REGION(0,Nxx_plus_2NGHOSTS0,0,1,0,1) { xx[0][i0] -= -mu_AW*time; //xx[0][i0] -= -time; } sprintf(filename,"out%d-%08d_exact.txt",Nxx0,n); FILE *out2D_exact = fopen(filename, "w"); for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) { const int idx = IDX3S(i0,i1mid,i2mid); fprintf(out2D_exact,"%.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e\n", xx[0][i0], auxevol_gfs_exact[IDX4ptS(BU0GF,idx)],auxevol_gfs_exact[IDX4ptS(BU1GF,idx)],auxevol_gfs_exact[IDX4ptS(BU2GF,idx)], evol_gfs_exact[IDX4ptS(AD0GF,idx)],evol_gfs_exact[IDX4ptS(AD1GF,idx)],evol_gfs_exact[IDX4ptS(AD2GF,idx)], evol_gfs_exact[IDX4ptS(STILDED0GF,idx)],evol_gfs_exact[IDX4ptS(STILDED1GF,idx)],evol_gfs_exact[IDX4ptS(STILDED2GF,idx)], auxevol_gfs_exact[IDX4ptS(VALENCIAVU0GF,idx)],auxevol_gfs_exact[IDX4ptS(VALENCIAVU1GF,idx)],auxevol_gfs_exact[IDX4ptS(VALENCIAVU2GF,idx)], evol_gfs_exact[IDX4ptS(PSI6PHIGF,idx)]); } fclose(out2D_exact); } // Step 3: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm, // applying quadratic extrapolation outer boundary conditions. // Step 3.b: Step forward one timestep (t -> t+dt) in time using // chosen RK-like MoL timestepping algorithm #include "MoLtimestepping/RK_MoL.h" } // End main loop to progress forward in time. // Step 4: Free all allocated memory #include "MoLtimestepping/RK_Free_Memory.h" free(auxevol_gfs); free(auxevol_gfs_exact); free(evol_gfs_exact); for(int i=0;i<3;i++) free(xx[i]); return 0; } ###Output Writing GiRaFFE_staggered_1D_Tests_standalone_Ccodes//GiRaFFE_NRPy_standalone.c ###Markdown Step 6: Compile generated C codes & perform GRFFE simulations \[Back to [top](toc)\]$$\label{compileexec}$$To aid in the cross-platform-compatible (with Windows, MacOS, & Linux) compilation and execution, we make use of `cmdline_helper` [(**Tutorial**)](Tutorial-cmdline_helper.ipynb). ###Code cmd.C_compile(os.path.join(Ccodesdir,"GiRaFFE_NRPy_standalone.c"), os.path.join(Ccodesdir,"output","GiRaFFE_NRPy_standalone"),compile_mode="optimized") # Change to output directory os.chdir(outdir) # Clean up existing output files cmd.delete_existing_files("out*.txt") cmd.delete_existing_files("out*.png") # ID options are: "AlfvenWave", "ThreeAlfvenWaves", "DegenAlfvenWave", "FastWave", "FFEBD" for opt in ID_opts: cmd.Execute("GiRaFFE_NRPy_standalone", "299 4 4 "+opt, "out_298"+opt+".txt") # cmd.Execute("GiRaFFE_NRPy_standalone", "1280 9 9 "+opt, "out_1280"+opt+".txt") # cmd.Execute("GiRaFFE_NRPy_standalone", "1280 32 32 "+opt, "out_"+opt+".txt") # cmd.Execute("GiRaFFE_NRPy_standalone", "149 9 9 AlfvenWave","out149.txt") # Return to root directory os.chdir(os.path.join("../../")) ###Output Compiling executable... (EXEC): Executing `gcc -std=gnu99 -Ofast -fopenmp -march=native -funroll-loops GiRaFFE_staggered_1D_Tests_standalone_Ccodes/GiRaFFE_NRPy_standalone.c -o GiRaFFE_staggered_1D_Tests_standalone_Ccodes/output/GiRaFFE_NRPy_standalone -lm`... (BENCH): Finished executing in 3.0177741050720215 seconds. Finished compilation. (EXEC): Executing `taskset -c 0,1,2,3 ./GiRaFFE_NRPy_standalone 299 4 4 AlfvenWave`... (BENCH): Finished executing in 15.846468687057495 seconds. (EXEC): Executing `taskset -c 0,1,2,3 ./GiRaFFE_NRPy_standalone 299 4 4 ThreeAlfvenWaves`... (BENCH): Finished executing in 15.84378170967102 seconds. (EXEC): Executing `taskset -c 0,1,2,3 ./GiRaFFE_NRPy_standalone 299 4 4 DegenAlfvenWave`... (BENCH): Finished executing in 15.844474077224731 seconds. (EXEC): Executing `taskset -c 0,1,2,3 ./GiRaFFE_NRPy_standalone 299 4 4 FastWave`... (BENCH): Finished executing in 15.850006580352783 seconds. (EXEC): Executing `taskset -c 0,1,2,3 ./GiRaFFE_NRPy_standalone 299 4 4 FFEBD`... (BENCH): Finished executing in 16.047165870666504 seconds. ###Markdown Step 7: Data Visualization \[Back to [top](toc)\]$$\label{plots}$$Now we plot the data and recreate figure 1 from the [GiRaFFE paper](https://arxiv.org/pdf/1704.00599.pdf). We reconstruct the electric field via$$E_i = -\epsilon_{ijk}v^j B^k$$the `calc_E` function below. We also calculate the FFE condition $B^2 - E^2$ below using the `calc_Bsquared_minus_Esquared` function. ###Code eDDD = ixp.LeviCivitaSymbol_dim3_rank3() def calc_E(data): VU0 = data[:, 10] VU1 = data[:, 11] VU2 = data[:, 12] BU0 = data[:, 1] BU1 = data[:, 2] BU2 = data[:, 3] VU = [VU0, VU1, VU2] BU = [BU0, BU1, BU2] ED = np.zeros((VU0.size, 3)) for i in range(3): for j in range(3): for k in range(3): ED[:,i] = ED[:,i] - eDDD[i][j][k]*VU[j]*BU[k] return ED def calc_Bsquared_minus_Esquared(data): EU = calc_E(data) BU0 = data[:, 1] BU1 = data[:, 2] BU2 = data[:, 3] return (BU0**2 + BU1**2 + BU2**2) - (EU[:,0]**2 + EU[:,1]**2 + EU[:,2]**2) import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import matplotlib as mpl # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 labels = ["x","BU0","BU1","BU2","AD0","AD1","AD2","StildeD0","StildeD1","StildeD2","ValenciavU0","ValenciavU1","ValenciavU2", "psi6Phi"] fig = plt.figure(figsize=(6, 15)) # spec = mpl.gridspec.GridSpec(ncols=6, nrows=2,wspace=0.65, hspace=0.4) # 6 columns evenly divides both 2 & 3 # ax1 = fig.add_subplot(spec[0,0:2]) # row 0 with axes spanning 2 cols on evens # ax2 = fig.add_subplot(spec[0,2:4]) # ax3 = fig.add_subplot(spec[0,4:]) # ax4 = fig.add_subplot(spec[1,1:3]) # row 0 with axes spanning 2 cols on odds # ax5 = fig.add_subplot(spec[1,3:5]) gs = gridspec.GridSpec(nrows=5, ncols=1, hspace=0.5) ax1 = fig.add_subplot(gs[0, 0]) ax2 = fig.add_subplot(gs[1, 0]) ax3 = fig.add_subplot(gs[2, 0]) ax4 = fig.add_subplot(gs[3, 0]) ax5 = fig.add_subplot(gs[4, 0]) Data_num_Fast_A = np.loadtxt(os.path.join(Ccodesdir,"output","out299__FastWave-00000000.txt")) Data_num_Fast_B = np.loadtxt(os.path.join(Ccodesdir,"output","out299__FastWave-00000100.txt")) E_Fast_A = calc_E(Data_num_Fast_A) E_Fast_B = calc_E(Data_num_Fast_B) ax1.scatter(Data_num_Fast_A[:,0], np.abs(E_Fast_A[:,2]), s=1,label = 't = 0') ax1.plot(Data_num_Fast_B[:,0], np.abs(E_Fast_B[:,2]), 'k-', label = 't = 0.5') ax1.set_xlim(-0.5, 1.5) ax1.set_ylim(0.6) ax1.text(0.95, 0.01, 'Fast Wave', verticalalignment='bottom', horizontalalignment='right', transform=ax1.transAxes, color='black', fontsize=14) ax1.set_xlabel('x') ax1.set_ylabel(r'$|E^z|$') ax1.legend() Data_num_Alf_A = np.loadtxt(os.path.join(Ccodesdir,"output","out299__AlfvenWave-00000000.txt")) Data_num_Alf_B = np.loadtxt(os.path.join(Ccodesdir,"output","out299__AlfvenWave-00000400.txt")) ax2.scatter(Data_num_Alf_A[:,0], Data_num_Alf_A[:,3], s=1, label = 't = 0') ax2.plot(Data_num_Alf_B[:,0], Data_num_Alf_B[:,3], 'k-', label = 't = 2.0') ax2.set_xlim(-1.5, 1.5) ax2.set_ylim(1.1) ax2.text(0.95, 0.01, 'Alfven Wave', verticalalignment='bottom', horizontalalignment='right', transform=ax2.transAxes, color='black', fontsize=14) ax2.set_xlabel('x') ax2.set_ylabel(r'$B^z$') ax2.legend(loc='center right') Data_num_DegenAlf_A = np.loadtxt(os.path.join(Ccodesdir,"output","out299__DegenAlfvenWave-00000000.txt")) Data_num_DegenAlf_B = np.loadtxt(os.path.join(Ccodesdir,"output","out299__DegenAlfvenWave-00000200.txt")) E_DegenAlf_A = calc_E(Data_num_DegenAlf_A) E_DegenAlf_B = calc_E(Data_num_DegenAlf_B) ax3.scatter(Data_num_DegenAlf_A[:,0], E_DegenAlf_A[:,1], s=1, label = 't = 0') ax3.plot(Data_num_DegenAlf_B[:,0], E_DegenAlf_B[:,1], 'k-', label = 't = 1.0') ax3.set_xlim(-1.5, 1.5) ax3.set_ylim(-1.35) ax3.text(0.95, 0.01, 'Deg. Alfven Wave', verticalalignment='bottom', horizontalalignment='right', transform=ax3.transAxes, color='black', fontsize=14) ax3.set_xlabel('x') ax3.set_ylabel(r'$E^y$') ax3.legend() # Data_num_ThreeAlf_A = np.loadtxt(os.path.join(Ccodesdir,"output","out149__ThreeAlfvenWaves-00000000.txt")) Data_num_ThreeAlf_B = np.loadtxt(os.path.join(Ccodesdir,"output","out299__ThreeAlfvenWaves-00000112.txt")) # ax2.plot(Data_num_ThreeAlf_A[:,0], Data_num_ThreeAlf_A[:,2], 'k-') ax4.scatter(Data_num_ThreeAlf_B[:,0], Data_num_ThreeAlf_B[:,2], s=1, label = 't = 0.56') ax4.set_xlim(-1.0, 1.0) # ax4.set_ylim() ax4.text(0.95, 0.01, 'Three Waves', verticalalignment='bottom', horizontalalignment='right', transform=ax4.transAxes, color='black', fontsize=14) ax4.set_xlabel('x') ax4.set_ylabel(r'$B^y$') ax4.legend(loc='center') Data_num_FFEBD_A = np.loadtxt(os.path.join(Ccodesdir,"output","out299__FFEBD-00000000.txt")) Data_num_FFEBD_B = np.loadtxt(os.path.join(Ccodesdir,"output","out299__FFEBD-00000004.txt")) B2mE2_A = calc_Bsquared_minus_Esquared(Data_num_FFEBD_A) B2mE2_B = calc_Bsquared_minus_Esquared(Data_num_FFEBD_B) ax5.scatter(Data_num_FFEBD_A[:,0], B2mE2_A, s=1, label = 't = 0') ax5.plot(Data_num_FFEBD_B[:,0], B2mE2_B, 'k-', label = 't = 0.02') ax5.set_xlim(-0.4, 0.6) ax5.text(0.95, 0.01, 'FFE Breakdown', verticalalignment='bottom', horizontalalignment='right', transform=ax5.transAxes, color='black', fontsize=14) ax5.set_xlabel('x') ax5.set_ylabel(r'$B^2 - E^2$') ax5.legend() plt.savefig(os.path.join(Ccodesdir,"output","NRPy-GiRaFFE"), dpi=800, bbox_inches="tight") plt.close(fig) img1 = plt.imread(os.path.join(Ccodesdir,"output","NRPy-GiRaFFE.png")) img2 = plt.imread(os.path.join("GiRaFFE_NRPy/example_par_files/figure1_GiRaFFE_paper.png")) NUM_ROWS = 1 IMGs_IN_ROW = 2 f, ax = plt.subplots(NUM_ROWS, IMGs_IN_ROW, figsize=(28,18)) plt.subplots_adjust(wspace=0.05) plt.axis('off') ax[0].imshow(img1) ax[1].imshow(img2) ax[0].set_title('image 1') ax[1].set_title('image 2') # title = 'side by side view of images' # f.suptitle(title, fontsize=16) plt.tight_layout() # plt.xticks([]) # plt.yticks([]) plt.show() ###Output _____no_output_____ ###Markdown Step 8: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-Start_to_Finish-GiRaFFE_NRPy-1D_tests-staggered.pdf](Tutorial-Start_to_Finish-GiRaFFE_NRPy-1D_tests-staggered.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.) ###Code import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-Start_to_Finish-GiRaFFE_NRPy-1D_tests-staggered",location_of_template_file=os.path.join("..")) ###Output Created Tutorial-Start_to_Finish-GiRaFFE_NRPy-1D_tests-staggered.tex, and compiled LaTeX file to PDF file Tutorial-Start_to_Finish- GiRaFFE_NRPy-1D_tests-staggered.pdf ###Markdown window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); Start-to-Finish Example: `GiRaFFE_NRPy` 1D tests Author: Patrick Nelson Adapted from [Start-to-Finish Example: Head-On Black Hole Collision](../Tutorial-Start_to_Finish-BSSNCurvilinear-Two_BHs_Collide.ipynb) This module implements a basic GRFFE code to evolve one-dimensional GRFFE waves. NRPy+ Source Code for this module: * [GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Exact_Wald.py](../../edit/in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Exact_Wald.py) [\[**tutorial**\]](Tutorial-GiRaFFEfood_NRPy_Exact_Wald.ipynb) Generates Exact Wald initial data* [GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Aligned_Rotator.py](../../edit/in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Aligned_Rotator.py) [\[**tutorial**\]](Tutorial-GiRaFFEfood_NRPy_Aligned_Rotator.ipynb) Generates Aligned Rotator initial data* [GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_1D_tests.py](../../edit/in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_1D_tests.py) [\[**tutorial**\]](Tutorial-GiRaFFEfood_NRPy_1D_tests.ipynb) Generates Alfv&eacute;n Wave initial data.* [GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Afield_flux.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Afield_flux.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy_staggered-Afield_flux.ipynb) Generates the expressions to find the flux term of the induction equation.* [GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_A2B.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_A2B.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy_staggered-A2B.ipynb) Generates the driver to compute the magnetic field from the vector potential/* [GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-BCs.ipynb) Generates the code to apply boundary conditions to the vector potential, scalar potential, and three-velocity.* [GiRaFFE_NRPy/GiRaFFE_NRPy_C2P_P2C.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_C2P_P2C.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-C2P_P2C.ipynb) Generates the conservative-to-primitive and primitive-to-conservative solvers.* [GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Metric_Face_Values.ipynb) Generates code to interpolate metric gridfunctions to cell faces.* [GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-PPM.ipynb) Genearates code to reconstruct primitive variables on cell faces.* [GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Source_Terms.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Source_Terms.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy_staggered-Source_Terms.ipynb) Generates the expressions to find the flux term of the Poynting flux evolution equation.* [GiRaFFE_NRPy/Stilde_flux.py](../../edit/in_progress/GiRaFFE_NRPy/Stilde_flux.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Stilde_flux.ipynb) Generates the expressions to find the flux term of the Poynting flux evolution equation.* [../GRFFE/equations.py](../../edit/GRFFE/equations.py) [\[**tutorial**\]](../Tutorial-GRFFE_Equations-Cartesian.ipynb) Generates code necessary to compute the source terms.* [../GRHD/equations.py](../../edit/GRHD/equations.py) [\[**tutorial**\]](../Tutorial-GRHD_Equations-Cartesian.ipynb) Generates code necessary to compute the source terms.Here we use NRPy+ to generate the C source code necessary to set up initial data for an Alfv&eacute;n wave (see [the original GiRaFFE paper](https://arxiv.org/pdf/1704.00599.pdf)). Then we use it to generate the RHS expressions for [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html) time integration based on the [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4). Table of Contents$$\label{toc}$$This notebook is organized as follows1. [Step 1](initializenrpy): Set core NRPy+ parameters for numerical grids1. [Step 2](grffe): Output C code for GRFFE evolution 1. [Step 2.a](mol): Output macros for Method of Lines timestepping1. [Step 3](gf_id): Import `GiRaFFEfood_NRPy` initial data modules1. [Step 4](cparams): Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h`1. [Step 5](mainc): `GiRaFFE_NRPy_standalone.c`: The Main C Code Step 1: Set up core functions and parameters for solving GRFFE equations \[Back to [top](toc)\]$$\label{setup}$$ ###Code import shutil, os, sys # Standard Python modules for multiplatform OS-level functions # First, we'll add the parent directory to the list of directories Python will check for modules. nrpy_dir_path = os.path.join("..") if nrpy_dir_path not in sys.path: sys.path.append(nrpy_dir_path) # Step P1: Import needed NRPy+ core modules: from outputC import outCfunction, lhrh # NRPy+: Core C code output module import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends import finite_difference as fin # NRPy+: Finite difference C code generation module import NRPy_param_funcs as par # NRPy+: Parameter interface import grid as gri # NRPy+: Functions having to do with numerical grids import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface # Step P2: Create C code output directory: Ccodesdir = os.path.join("GiRaFFE_staggered_standalone_Ccodes/") # First remove C code output directory if it exists # Courtesy https://stackoverflow.com/questions/303200/how-do-i-remove-delete-a-folder-that-is-not-empty # !rm -r ScalarWaveCurvilinear_Playground_Ccodes shutil.rmtree(Ccodesdir, ignore_errors=True) # Then create a fresh directory cmd.mkdir(Ccodesdir) # Step P3: Create executable output directory: outdir = os.path.join(Ccodesdir,"output/") cmd.mkdir(outdir) # Step P5: Set timestepping algorithm (we adopt the Method of Lines) REAL = "double" # Best to use double here. default_CFL_FACTOR= 0.5 # (GETS OVERWRITTEN WHEN EXECUTED.) In pure axisymmetry (symmetry_axes = 2 below) 1.0 works fine. Otherwise 0.5 or lower. # Step P6: Set the finite differencing order to 2. par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",2) thismodule = "Start_to_Finish-GiRaFFE_NRPy-1D_tests" TINYDOUBLE = par.Cparameters("REAL", thismodule, "TINYDOUBLE", 1e-100) import GiRaFFE_NRPy.GiRaFFE_NRPy_Main_Driver_staggered as md # par.set_paramsvals_value("GiRaFFE_NRPy.GiRaFFE_NRPy_C2P_P2C::enforce_speed_limit_StildeD = False") par.set_paramsvals_value("GiRaFFE_NRPy.GiRaFFE_NRPy_C2P_P2C::enforce_current_sheet_prescription = False") ###Output _____no_output_____ ###Markdown Step 2: Output C code for GRFFE evolution \[Back to [top](toc)\]$$\label{grffe}$$We will first write the C codes needed for GRFFE evolution. We have already written a module to generate all these codes and call the functions in the appropriate order, so we will import that here. We will take the slightly unusual step of doing this before we generate the initial data functions because the main driver module will register all the gridfunctions we need. It will also generate functions that, in addition to their normal spot in the MoL timestepping, will need to be called during the initial data step to make sure all the variables are appropriately filled in. All of this is handled with a single call to `GiRaFFE_NRPy_Main_Driver_generate_all()`, which will register gridfunctions, write all the C code kernels, and write the C code functions to call those. ###Code md.GiRaFFE_NRPy_Main_Driver_generate_all(Ccodesdir) ###Output Output C function calculate_StildeD0_source_term() to file GiRaFFE_staggered_standalone_Ccodes/RHSs/calculate_StildeD0_source_term.h Output C function calculate_StildeD1_source_term() to file GiRaFFE_staggered_standalone_Ccodes/RHSs/calculate_StildeD1_source_term.h Output C function calculate_StildeD2_source_term() to file GiRaFFE_staggered_standalone_Ccodes/RHSs/calculate_StildeD2_source_term.h Output C function calculate_Stilde_rhsD() to file GiRaFFE_staggered_standalone_Ccodes/RHSs/calculate_Stilde_rhsD.h Output C function GiRaFFE_NRPy_cons_to_prims() to file GiRaFFE_staggered_standalone_Ccodes/C2P/GiRaFFE_NRPy_cons_to_prims.h Output C function GiRaFFE_NRPy_prims_to_cons() to file GiRaFFE_staggered_standalone_Ccodes/C2P/GiRaFFE_NRPy_prims_to_cons.h Output C function Workaround_ADM_to_BSSN() to file GiRaFFE_staggered_standalone_Ccodes/Workaround_ADM_to_BSSN.h ###Markdown Step 2.a: Output macros for Method of Lines timestepping \[Back to [top](toc)\]$$\label{mol}$$Now, we generate the code to implement the method of lines using the fourth-order Runge-Kutta algorithm. ###Code RK_method = "RK4" # Step 3: Generate Runge-Kutta-based (RK-based) timestepping code. # As described above the Table of Contents, this is a 3-step process: # 3.A: Evaluate RHSs (RHS_string) # 3.B: Apply boundary conditions (post_RHS_string, pt 1) import MoLtimestepping.C_Code_Generation as MoL from MoLtimestepping.RK_Butcher_Table_Dictionary import Butcher_dict RK_order = Butcher_dict[RK_method][1] cmd.mkdir(os.path.join(Ccodesdir,"MoLtimestepping/")) MoL.MoL_C_Code_Generation(RK_method, RHS_string = """ GiRaFFE_NRPy_RHSs(&params,auxevol_gfs,RK_INPUT_GFS,RK_OUTPUT_GFS);""", post_RHS_string = """ GiRaFFE_NRPy_post_step(&params,xx,auxevol_gfs,RK_OUTPUT_GFS,n+1);\n""", outdir = os.path.join(Ccodesdir,"MoLtimestepping/")) ###Output _____no_output_____ ###Markdown Step 3: Import `GiRaFFEfood_NRPy` initial data modules \[Back to [top](toc)\]$$\label{gf_id}$$With the preliminaries out of the way, we will write the C functions to set up initial data. There are two categories of initial data that must be set: the spacetime metric variables, and the GRFFE plasma variables. We will set up the spacetime first. ###Code # There are several initial data routines we need to test. We'll control which one we use with a string option initial_data = "AlfvenWave" # Valid options: "ExactWald", "AlignedRotator", "AlfvenWave", "FastWave" spacetime = "flat" # Valid options: "ShiftedKerrSchild", "flat" if spacetime == "ShiftedKerrSchild": # Exact Wald is more complicated. We'll need the Shifted Kerr Schild metric in Cartesian coordinates. import BSSN.ShiftedKerrSchild as sks sks.ShiftedKerrSchild(True) import reference_metric as rfm par.set_parval_from_str("reference_metric::CoordSystem","Cartesian") rfm.reference_metric() # Use the Jacobian matrix to transform the vectors to Cartesian coordinates. drrefmetric__dx_0UDmatrix = sp.Matrix([[sp.diff(rfm.xxSph[0],rfm.xx[0]), sp.diff(rfm.xxSph[0],rfm.xx[1]), sp.diff(rfm.xxSph[0],rfm.xx[2])], [sp.diff(rfm.xxSph[1],rfm.xx[0]), sp.diff(rfm.xxSph[1],rfm.xx[1]), sp.diff(rfm.xxSph[1],rfm.xx[2])], [sp.diff(rfm.xxSph[2],rfm.xx[0]), sp.diff(rfm.xxSph[2],rfm.xx[1]), sp.diff(rfm.xxSph[2],rfm.xx[2])]]) dx__drrefmetric_0UDmatrix = drrefmetric__dx_0UDmatrix.inv() gammaDD = ixp.zerorank2() for i in range(3): for j in range(3): for k in range(3): for l in range(3): gammaDD[i][j] += drrefmetric__dx_0UDmatrix[(k,i)]*drrefmetric__dx_0UDmatrix[(l,j)]*sks.gammaSphDD[k][l].subs(sks.r,rfm.xxSph[0]).subs(sks.th,rfm.xxSph[1]) betaU = ixp.zerorank1() for i in range(3): for j in range(3): betaU[i] += dx__drrefmetric_0UDmatrix[(i,j)]*sks.betaSphU[j].subs(sks.r,rfm.xxSph[0]).subs(sks.th,rfm.xxSph[1]) alpha = sks.alphaSph.subs(sks.r,rfm.xxSph[0]).subs(sks.th,rfm.xxSph[1]) # Description and options for this initial data desc = "Generate a spinning black hole with Shifted Kerr Schild metric." loopopts_id ="AllPoints,Read_xxs" elif spacetime == "flat": gammaDD = ixp.zerorank2(DIM=3) for i in range(3): for j in range(3): if i==j: gammaDD[i][j] = sp.sympify(1) # else: leave as zero betaU = ixp.zerorank1() # All should be 0 alpha = sp.sympify(1) # Description and options for this initial data desc = "Generate a flat spacetime metric." loopopts_id ="AllPoints" # we don't need to read coordinates for flat spacetime. # For testing: Also set inverse metric: gammaUU, unused_gammaDET = ixp.symm_matrix_inverter3x3(gammaDD) name = "set_initial_spacetime_metric_data" values_to_print = [ lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD00"),rhs=gammaDD[0][0]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD01"),rhs=gammaDD[0][1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD02"),rhs=gammaDD[0][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD11"),rhs=gammaDD[1][1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD12"),rhs=gammaDD[1][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD22"),rhs=gammaDD[2][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU0"),rhs=betaU[0]), lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU1"),rhs=betaU[1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU2"),rhs=betaU[2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","alpha"),rhs=alpha), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaUU00"),rhs=gammaUU[0][0]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaUU01"),rhs=gammaUU[0][1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaUU02"),rhs=gammaUU[0][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaUU11"),rhs=gammaUU[1][1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaUU12"),rhs=gammaUU[1][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaUU22"),rhs=gammaUU[2][2]) ] outCfunction( outfile = os.path.join(Ccodesdir,name+".h"), desc=desc, name=name, params ="const paramstruct *params,REAL *xx[3],REAL *auxevol_gfs", body = fin.FD_outputC("returnstring",values_to_print,params="outCverbose=False").replace("IDX4","IDX4S"), loopopts = loopopts_id) ###Output Output C function set_initial_spacetime_metric_data() to file GiRaFFE_staggered_standalone_Ccodes/set_initial_spacetime_metric_data.h ###Markdown Now, we will write out the initial data function for the GRFFE variables. ###Code if initial_data=="AlfvenWave": import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_1D_tests as gid gid.GiRaFFEfood_NRPy_1D_tests(stagger = True) desc = "Generate Alfven wave 1D initial test data for GiRaFFEfood_NRPy." elif initial_data=="FastWave": import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_1D_tests_fast_wave as gid gid.GiRaFFEfood_NRPy_1D_tests_fast_wave() desc = "Generate fast wave 1D initial test data for GiRaFFEfood_NRPy." elif initial_data=="AlignedRotator": import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_Aligned_Rotator as gid gid.GiRaFFEfood_NRPy_Aligned_Rotator() desc = "Generate aligned rotator initial test data for GiRaFFEfood_NRPy." elif initial_data=="ExactWald": import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_Exact_Wald as gid M,r0 = sp.symbols("M r0") gid.GiRaFFEfood_NRPy_Exact_Wald(gammaDD,M,r0) desc = "Generate exact Wald initial test data for GiRaFFEfood_NRPy." else: print("Unsupported Initial Data string "+initial_data+"! Supported ID: AlfvenWave, FastWave, AlignedRotator, or ExactWald") name = "initial_data" values_to_print = [\ lhrh(lhs=gri.gfaccess("out_gfs","AD0"),rhs=gid.AD[0]),\ lhrh(lhs=gri.gfaccess("out_gfs","AD1"),rhs=gid.AD[1]),\ lhrh(lhs=gri.gfaccess("out_gfs","AD2"),rhs=gid.AD[2]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU0"),rhs=gid.ValenciavU[0]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU1"),rhs=gid.ValenciavU[1]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU2"),rhs=gid.ValenciavU[2]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","BU0"),rhs=gid.BU[0]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","BU1"),rhs=gid.BU[1]),\ lhrh(lhs=gri.gfaccess("auxevol_gfs","BU2"),rhs=gid.BU[2]),\ lhrh(lhs=gri.gfaccess("out_gfs","psi6Phi"),rhs=sp.sympify(0))\ ] outCfunction( outfile = os.path.join(Ccodesdir,name+".h"), desc=desc, name=name, params ="const paramstruct *params,REAL *xx[3],REAL *auxevol_gfs,REAL *out_gfs", body = fin.FD_outputC("returnstring",values_to_print,params="outCverbose=False").replace("IDX4","IDX4S"), loopopts ="AllPoints,Read_xxs") ###Output Output C function initial_data() to file GiRaFFE_staggered_standalone_Ccodes/initial_data.h ###Markdown Step 4: Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h` \[Back to [top](toc)\]$$\label{cparams}$$Based on declared NRPy+ Cparameters, first we generate `declare_Cparameters_struct.h`, `set_Cparameters_default.h`, and `set_Cparameters[-SIMD].h`.Then we output `free_parameters.h`, which sets initial data parameters, as well as grid domain & reference metric parameters, applying `domain_size` and `sinh_width`/`SymTP_bScale` (if applicable) as set above ###Code # Step 3.e: Output C codes needed for declaring and setting Cparameters; also set free_parameters.h # Step 3.e.i: Generate declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h par.generate_Cparameters_Ccodes(os.path.join(Ccodesdir)) # Step 3.e.ii: Set free_parameters.h with open(os.path.join(Ccodesdir,"free_parameters.h"),"w") as file: file.write("""// Override parameter defaults with values based on command line arguments and NGHOSTS. params.Nxx0 = atoi(argv[1]); params.Nxx1 = atoi(argv[2]); params.Nxx2 = atoi(argv[3]); params.Nxx_plus_2NGHOSTS0 = params.Nxx0 + 2*NGHOSTS; params.Nxx_plus_2NGHOSTS1 = params.Nxx1 + 2*NGHOSTS; params.Nxx_plus_2NGHOSTS2 = params.Nxx2 + 2*NGHOSTS; // Step 0d: Set up space and time coordinates // Step 0d.i: Declare \Delta x^i=dxx{0,1,2} and invdxx{0,1,2}, as well as xxmin[3] and xxmax[3]: const REAL xxmin[3] = {-1.5,-0.1,-0.1}; const REAL xxmax[3] = { 1.5, 0.1, 0.1}; //const REAL xxmin[3] = {-1.5,-1.5,-1.5}; //const REAL xxmax[3] = { 1.5, 1.5, 1.5}; params.dxx0 = (xxmax[0] - xxmin[0]) / ((REAL)params.Nxx0+1); params.dxx1 = (xxmax[1] - xxmin[1]) / ((REAL)params.Nxx1+1); params.dxx2 = (xxmax[2] - xxmin[2]) / ((REAL)params.Nxx2+1); printf("dxx0,dxx1,dxx2 = %.5e,%.5e,%.5e\\n",params.dxx0,params.dxx1,params.dxx2); params.invdx0 = 1.0 / params.dxx0; params.invdx1 = 1.0 / params.dxx1; params.invdx2 = 1.0 / params.dxx2; const int poison_grids = 0; // Standard GRFFE parameters: params.GAMMA_SPEED_LIMIT = 2000.0; params.diss_strength = 0.1; """) if initial_data=="ExactWald": with open(os.path.join(out_dir,"free_parameters.h"),"a") as file: file.write("""params.r0 = 0.4; params.a = 0.0; """) ###Output _____no_output_____ ###Markdown Step 4: Set up boundary condition functions for chosen singular, curvilinear coordinate system \[Back to [top](toc)\]$$\label{bc_functs}$$Next apply singular, curvilinear coordinate boundary conditions [as documented in the corresponding NRPy+ tutorial notebook](Tutorial-Start_to_Finish-Curvilinear_BCs.ipynb)...But, for the moment, we're actually just using this because it writes the file `gridfunction_defines.h`. ###Code import CurviBoundaryConditions.CurviBoundaryConditions as cbcs cbcs.Set_up_CurviBoundaryConditions(os.path.join(Ccodesdir,"boundary_conditions/"),Cparamspath=os.path.join("../"),enable_copy_of_static_Ccodes=False) ###Output Wrote to file "GiRaFFE_staggered_standalone_Ccodes/boundary_conditions/parity_conditions_symbolic_dot_products.h" Evolved parity: ( AD0:1, AD1:2, AD2:3, StildeD0:1, StildeD1:2, StildeD2:3, psi6Phi:0 ) AuxEvol parity: ( BU0:1, BU1:2, BU2:3, B_lU0:1, B_lU1:2, B_lU2:3, B_rU0:1, B_rU1:2, B_rU2:3, BstaggerU0:1, BstaggerU1:2, BstaggerU2:3, Bstagger_lU0:1, Bstagger_lU1:2, Bstagger_lU2:3, Bstagger_rU0:1, Bstagger_rU1:2, Bstagger_rU2:3, Stilde_flux_HLLED0:1, Stilde_flux_HLLED1:2, Stilde_flux_HLLED2:3, ValenciavU0:1, ValenciavU1:2, ValenciavU2:3, Valenciav_lU0:1, Valenciav_lU1:2, Valenciav_lU2:3, Valenciav_llU0:1, Valenciav_llU1:2, Valenciav_llU2:3, Valenciav_lrU0:1, Valenciav_lrU1:2, Valenciav_lrU2:3, Valenciav_rU0:1, Valenciav_rU1:2, Valenciav_rU2:3, Valenciav_rlU0:1, Valenciav_rlU1:2, Valenciav_rlU2:3, Valenciav_rrU0:1, Valenciav_rrU1:2, Valenciav_rrU2:3, alpha:0, alpha_face:0, betaU0:1, betaU1:2, betaU2:3, beta_faceU0:1, beta_faceU1:2, beta_faceU2:3, cmax_x:0, cmax_y:0, cmax_z:0, cmin_x:0, cmin_y:0, cmin_z:0, gammaDD00:4, gammaDD01:5, gammaDD02:6, gammaDD11:7, gammaDD12:8, gammaDD22:9, gammaUU00:4, gammaUU01:5, gammaUU02:6, gammaUU11:7, gammaUU12:8, gammaUU22:9, gamma_faceDD00:4, gamma_faceDD01:5, gamma_faceDD02:6, gamma_faceDD11:7, gamma_faceDD12:8, gamma_faceDD22:9, gamma_faceUU00:4, gamma_faceUU01:5, gamma_faceUU02:6, gamma_faceUU11:7, gamma_faceUU12:8, gamma_faceUU22:9, phi:0, phi_face:0, psi6_temp:0, psi6center:0 ) Wrote to file "GiRaFFE_staggered_standalone_Ccodes/boundary_conditions/EigenCoord_Cart_to_xx.h" ###Markdown Step 5: `GiRaFFE_NRPy_standalone.c`: The Main C Code \[Back to [top](toc)\]$$\label{mainc}$$ ###Code # Part P0: Define REAL, set the number of ghost cells NGHOSTS (from NRPy+'s FD_CENTDERIVS_ORDER), # and set the CFL_FACTOR (which can be overwritten at the command line) with open(os.path.join(Ccodesdir,"GiRaFFE_NRPy_REAL__NGHOSTS__CFL_FACTOR.h"), "w") as file: file.write(""" // Part P0.a: Set the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER #define NGHOSTS """+str(3)+""" #define NGHOSTS_A2B """+str(2)+""" // Part P0.b: Set the numerical precision (REAL) to double, ensuring all floating point // numbers are stored to at least ~16 significant digits #define REAL """+REAL+""" // Part P0.c: Set the CFL Factor. Can be overwritten at command line. REAL CFL_FACTOR = """+str(default_CFL_FACTOR)+";") %%writefile $Ccodesdir/GiRaFFE_NRPy_standalone.c // Step P0: Define REAL and NGHOSTS; and declare CFL_FACTOR. This header is generated in NRPy+. #include "GiRaFFE_NRPy_REAL__NGHOSTS__CFL_FACTOR.h" #include "declare_Cparameters_struct.h" const int NSKIP_1D_OUTPUT = 1; // Step P1: Import needed header files #include "stdio.h" #include "stdlib.h" #include "math.h" #include "time.h" #include "stdint.h" // Needed for Windows GCC 6.x compatibility #ifndef M_PI #define M_PI 3.141592653589793238462643383279502884L #endif #ifndef M_SQRT1_2 #define M_SQRT1_2 0.707106781186547524400844362104849039L #endif // Step P2: Declare the IDX4S(gf,i,j,k) macro, which enables us to store 4-dimensions of // data in a 1D array. In this case, consecutive values of "i" // (all other indices held to a fixed value) are consecutive in memory, where // consecutive values of "j" (fixing all other indices) are separated by // Nxx_plus_2NGHOSTS0 elements in memory. Similarly, consecutive values of // "k" are separated by Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1 in memory, etc. #define IDX4S(g,i,j,k) \ ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) + Nxx_plus_2NGHOSTS2 * (g) ) ) ) #define IDX4ptS(g,idx) ( (idx) + (Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2) * (g) ) #define IDX3S(i,j,k) ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) ) ) ) #define LOOP_REGION(i0min,i0max, i1min,i1max, i2min,i2max) \ for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) #define LOOP_ALL_GFS_GPS(ii) _Pragma("omp parallel for") \ for(int (ii)=0;(ii)<Nxx_plus_2NGHOSTS_tot*NUM_EVOL_GFS;(ii)++) // Step P3: Set gridfunction macros #include "boundary_conditions/gridfunction_defines.h" // Step P4: Include the RHS, BC, and primitive recovery functions #include "GiRaFFE_NRPy_Main_Driver.h" // Step P5: Include the initial data functions #include "set_initial_spacetime_metric_data.h" #include "initial_data.h" // main() function: // Step 0: Read command-line input, set up grid structure, allocate memory for gridfunctions, set up coordinates // Step 1: Set up scalar wave initial data // Step 2: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm, // applying quadratic extrapolation outer boundary conditions. // Step 3: Output relative error between numerical and exact solution. // Step 4: Free all allocated memory int main(int argc, const char *argv[]) { paramstruct params; #include "set_Cparameters_default.h" // Step 0a: Read command-line input, error out if nonconformant if(argc != 4 || atoi(argv[1]) < NGHOSTS || atoi(argv[2]) < NGHOSTS || atoi(argv[3]) < NGHOSTS) { printf("Error: Expected three command-line arguments: ./GiRaFFE_NRPy_standalone [Nx] [Ny] [Nz],\n"); printf("where Nx is the number of grid points in the x direction, and so forth.\n"); printf("Nx,Ny,Nz MUST BE larger than NGHOSTS (= %d)\n",NGHOSTS); exit(1); } // Step 0c: Set free parameters, overwriting Cparameters defaults // by hand or with command-line input, as desired. #include "free_parameters.h" #include "set_Cparameters-nopointer.h" // ... and then set up the numerical grid structure in time: const REAL t_final = 0.5; const REAL CFL_FACTOR = 0.5; // Set the CFL Factor // Step 0c: Allocate memory for gridfunctions const int Nxx_plus_2NGHOSTS_tot = Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2; // Step 0k: Allocate memory for gridfunctions #include "MoLtimestepping/RK_Allocate_Memory.h" REAL *restrict auxevol_gfs = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot); REAL *evol_gfs_exact = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot); REAL *auxevol_gfs_exact = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot); // For debugging, it can be useful to set everything to NaN initially. if(poison_grids) { for(int ii=0;ii<NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) { y_n_gfs[ii] = 1.0/0.0; y_nplus1_running_total_gfs[ii] = 1.0/0.0; //k_odd_gfs[ii] = 1.0/0.0; //k_even_gfs[ii] = 1.0/0.0; diagnostic_output_gfs[ii] = 1.0/0.0; evol_gfs_exact[ii] = 1.0/0.0; } for(int ii=0;ii<NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) { auxevol_gfs[ii] = 1.0/0.0; auxevol_gfs_exact[ii] = 1.0/0.0; } } // Step 0d: Set up coordinates: Set dx, and then dt based on dx_min and CFL condition // This is probably already defined above, but just in case... #ifndef MIN #define MIN(A, B) ( ((A) < (B)) ? (A) : (B) ) #endif REAL dt = CFL_FACTOR * MIN(dxx0,MIN(dxx1,dxx2)); // CFL condition int Nt = (int)(t_final / dt + 0.5); // The number of points in time. //Add 0.5 to account for C rounding down integers. // Step 0e: Set up cell-centered Cartesian coordinate grids REAL *xx[3]; xx[0] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS0); xx[1] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS1); xx[2] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS2); for(int j=0;j<Nxx_plus_2NGHOSTS0;j++) xx[0][j] = xxmin[0] + (j-NGHOSTS+1)*dxx0; for(int j=0;j<Nxx_plus_2NGHOSTS1;j++) xx[1][j] = xxmin[1] + (j-NGHOSTS+1)*dxx1; for(int j=0;j<Nxx_plus_2NGHOSTS2;j++) xx[2][j] = xxmin[2] + (j-NGHOSTS+1)*dxx2; // Step 1: Set up initial data to be exact solution at time=0: REAL time = 0.0; set_initial_spacetime_metric_data(&params,xx,auxevol_gfs); initial_data(&params,xx,auxevol_gfs,y_n_gfs); /* // Code to perturb the initial data: for(int ii=0;ii<NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) { y_n_gfs[ii] += 1.0e-15; y_nplus1_running_total_gfs[ii] += 1.0e-15; //k_odd_gfs[ii] = 1.0/0.0; //k_even_gfs[ii] = 1.0/0.0; diagnostic_output_gfs[ii] += 1.0e-15; evol_gfs_exact[ii] += 1.0e-15; } for(int ii=0;ii<NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) { auxevol_gfs[ii] += 1.0e-15; auxevol_gfs_exact[ii] += 1.0e-15; } */ // Fill in the remaining quantities GiRaFFE_compute_B_and_Bstagger_from_A(&params, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD00GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD01GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD02GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD11GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD12GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD22GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*PSI6_TEMPGF, /* Temporary storage,overwritten */ y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD0GF, y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD1GF, y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD2GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU0GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU1GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU2GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU0GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU1GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU2GF); //override_BU_with_old_GiRaFFE(&params,auxevol_gfs,0); GiRaFFE_NRPy_prims_to_cons(&params,auxevol_gfs,y_n_gfs); // Extra stack, useful for debugging: GiRaFFE_NRPy_cons_to_prims(&params,xx,auxevol_gfs,y_n_gfs); //GiRaFFE_NRPy_prims_to_cons(&params,auxevol_gfs,y_n_gfs); //GiRaFFE_NRPy_cons_to_prims(&params,xx,auxevol_gfs,y_n_gfs); //GiRaFFE_NRPy_prims_to_cons(&params,auxevol_gfs,y_n_gfs); //GiRaFFE_NRPy_cons_to_prims(&params,xx,auxevol_gfs,y_n_gfs); for(int n=0;n<=Nt;n++) { // Main loop to progress forward in time. //for(int n=0;n<=1;n++) { // Main loop to progress forward in time. // Step 1a: Set current time to correct value & compute exact solution time = ((REAL)n)*dt; /* Step 2: Validation: Output relative error between numerical and exact solution, */ if((n)%NSKIP_1D_OUTPUT ==0) { // Step 2c: Output relative error between exact & numerical at center of grid. const int i0mid=Nxx_plus_2NGHOSTS0/2; const int i1mid=Nxx_plus_2NGHOSTS1/2; const int i2mid=Nxx_plus_2NGHOSTS2/2; char filename[100]; sprintf(filename,"out%d-%08d.txt",Nxx0,n); FILE *out2D = fopen(filename, "w"); for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) { const int idx = IDX3S(i0,i1mid,i2mid); fprintf(out2D,"%.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e\n", xx[0][i0], auxevol_gfs[IDX4ptS(BU0GF,idx)],auxevol_gfs[IDX4ptS(BU1GF,idx)],auxevol_gfs[IDX4ptS(BU2GF,idx)], y_n_gfs[IDX4ptS(AD0GF,idx)],y_n_gfs[IDX4ptS(AD1GF,idx)],y_n_gfs[IDX4ptS(AD2GF,idx)], y_n_gfs[IDX4ptS(STILDED0GF,idx)],y_n_gfs[IDX4ptS(STILDED1GF,idx)],y_n_gfs[IDX4ptS(STILDED2GF,idx)], auxevol_gfs[IDX4ptS(VALENCIAVU0GF,idx)],auxevol_gfs[IDX4ptS(VALENCIAVU1GF,idx)],auxevol_gfs[IDX4ptS(VALENCIAVU2GF,idx)], y_n_gfs[IDX4ptS(PSI6PHIGF,idx)]); } fclose(out2D); // For convergence testing, we'll shift the grid x -> x-1 and output initial data again, giving the exact solution. LOOP_REGION(0,Nxx_plus_2NGHOSTS0,0,1,0,1) { xx[0][i0] += -mu_AW*time; //xx[0][i0] += -time; } set_initial_spacetime_metric_data(&params,xx,auxevol_gfs_exact); initial_data(&params,xx,auxevol_gfs_exact,evol_gfs_exact); // Fill in the remaining quantities //driver_A_to_B(&params,evol_gfs_exact,auxevol_gfs_exact); GiRaFFE_NRPy_prims_to_cons(&params,auxevol_gfs_exact,evol_gfs_exact); // And now, we'll set the grid back to rights. LOOP_REGION(0,Nxx_plus_2NGHOSTS0,0,1,0,1) { xx[0][i0] -= -mu_AW*time; //xx[0][i0] -= -time; } sprintf(filename,"out%d-%08d_exact.txt",Nxx0,n); FILE *out2D_exact = fopen(filename, "w"); for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) { const int idx = IDX3S(i0,i1mid,i2mid); fprintf(out2D_exact,"%.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e\n", xx[0][i0], auxevol_gfs_exact[IDX4ptS(BU0GF,idx)],auxevol_gfs_exact[IDX4ptS(BU1GF,idx)],auxevol_gfs_exact[IDX4ptS(BU2GF,idx)], evol_gfs_exact[IDX4ptS(AD0GF,idx)],evol_gfs_exact[IDX4ptS(AD1GF,idx)],evol_gfs_exact[IDX4ptS(AD2GF,idx)], evol_gfs_exact[IDX4ptS(STILDED0GF,idx)],evol_gfs_exact[IDX4ptS(STILDED1GF,idx)],evol_gfs_exact[IDX4ptS(STILDED2GF,idx)], auxevol_gfs_exact[IDX4ptS(VALENCIAVU0GF,idx)],auxevol_gfs_exact[IDX4ptS(VALENCIAVU1GF,idx)],auxevol_gfs_exact[IDX4ptS(VALENCIAVU2GF,idx)], evol_gfs_exact[IDX4ptS(PSI6PHIGF,idx)]); } fclose(out2D_exact); } // Step 3: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm, // applying quadratic extrapolation outer boundary conditions. // Step 3.b: Step forward one timestep (t -> t+dt) in time using // chosen RK-like MoL timestepping algorithm #include "MoLtimestepping/RK_MoL.h" } // End main loop to progress forward in time. // Step 4: Free all allocated memory #include "MoLtimestepping/RK_Free_Memory.h" free(auxevol_gfs); free(auxevol_gfs_exact); free(evol_gfs_exact); for(int i=0;i<3;i++) free(xx[i]); return 0; } cmd.C_compile(os.path.join(Ccodesdir,"GiRaFFE_NRPy_standalone.c"), os.path.join(Ccodesdir,"output","GiRaFFE_NRPy_standalone"),compile_mode="safe") # !gcc -g -O2 -fopenmp GiRaFFE_standalone_Ccodes/GiRaFFE_NRPy_standalone.c -o GiRaFFE_NRPy_standalone -lm # Change to output directory os.chdir(outdir) # Clean up existing output files cmd.delete_existing_files("out*.txt") cmd.delete_existing_files("out*.png") # cmd.Execute(os.path.join(Ccodesdir,"output","GiRaFFE_NRPy_standalone"), "640 16 16", os.path.join(outdir,"out640.txt")) # !taskset -c 0-7 ./GiRaFFE_NRPy_standalone 119 7 7 cmd.Execute("GiRaFFE_NRPy_standalone", "119 7 7","out119.txt") # cmd.Execute("GiRaFFE_NRPy_standalone", "119 119 119","out119.txt") # cmd.Execute("GiRaFFE_NRPy_standalone", "239 15 15","out239.txt") # !OMP_NUM_THREADS=1 valgrind --track-origins=yes -v ./GiRaFFE_NRPy_standalone 1280 32 32 # Return to root directory os.chdir(os.path.join("../../")) ###Output Compiling executable... (EXEC): Executing `gcc -O2 -g -fopenmp GiRaFFE_staggered_standalone_Ccodes/GiRaFFE_NRPy_standalone.c -o GiRaFFE_staggered_standalone_Ccodes/output/GiRaFFE_NRPy_standalone -lm`... (BENCH): Finished executing in 1.6124725341796875 seconds. Finished compilation. (EXEC): Executing `taskset -c 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 ./GiRaFFE_NRPy_standalone 119 7 7`... (BENCH): Finished executing in 1.0101072788238525 seconds. ###Markdown Now, we will load the data generated by the simulation and plot it in order to test for convergence. ###Code import numpy as np import matplotlib.pyplot as plt # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 labels = ["x","BU0","BU1","BU2","AD0","AD1","AD2","StildeD0","StildeD1","StildeD2","ValenciavU0","ValenciavU1","ValenciavU2", "psi6Phi"] Data_numer = np.loadtxt(os.path.join(Ccodesdir,"output","out119-00000040.txt")) # Data_num_2 = np.loadtxt(os.path.join(Ccodesdir,"output","out239-00000080.txt")) # Data_old = np.loadtxt("/home/penelson/OldCactus/Cactus/exe/ABE-GiRaFFEfood_1D_AlfvenWave/giraffe-grmhd_primitives_bi.x.asc") # Data_o_2 = np.loadtxt("/home/penelson/OldCactus/Cactus/exe/ABE-GiRaFFEfood_1D_AlfvenWave_2/giraffe-grmhd_primitives_bi.x.asc") # Data_numer = Data_old[5000:5125,11:15] # The column range is chosen for compatibility with the plotting script. # Data_num_2 = Data_o_2[19600:19845,11:15] # The column range is chosen for compatibility with the plotting script. Data_exact = np.loadtxt(os.path.join(Ccodesdir,"output","out119-00000040_exact.txt")) # Data_exa_2 = np.loadtxt(os.path.join(Ccodesdir,"output","out239-00000080_exact.txt")) predicted_order = 2.0 column = 3 plt.figure() # # plt.plot(Data_exact[2:-2,0],np.log2(np.absolute((Data_numer[2:-2,column]-Data_exact[2:-2,column])/\ # # (Data_num_2[2:-2:2,column]-Data_exa_2[2:-2:2,column]))),'.') plt.plot(Data_exact[:,0],Data_exact[:,column],label="Exact") plt.plot(Data_exact[:,0],Data_numer[:,column],'.',label="Approximate") # plt.plot(Data_exact[:,0],Data_exact[:,column]-Data_numer[:,column]) # plt.xlim(-0.0,1.0) # # plt.ylim(-1.0,5.0) # # plt.ylim(-0.0005,0.0005) plt.xlabel(labels[0]) plt.ylabel(labels[column]) # plt.legend() plt.show() # Plotting scripts for comparison with original GiRaFFE: # old_files = ["", # "giraffe-grmhd_primitives_bi.x.asc","giraffe-grmhd_primitives_bi.x.asc","giraffe-grmhd_primitives_bi.x.asc", # "giraffe-em_ax.x.asc","giraffe-em_ay.x.asc","giraffe-em_az.x.asc", # # "cell_centered_Ai.txt","cell_centered_Ai.txt","cell_centered_Ai.txt", # "giraffe-grmhd_conservatives.x.asc","giraffe-grmhd_conservatives.x.asc","giraffe-grmhd_conservatives.x.asc", # "giraffe-grmhd_primitives_allbutbi.x.asc","giraffe-grmhd_primitives_allbutbi.x.asc","giraffe-grmhd_primitives_allbutbi.x.asc", # "giraffe-em_psi6phi.x.asc"] # column = 5 # column_old = [0,12,13,14,12,12,12,12,13,14,12,13,14,12] # # old_path = "/home/penelson/OldCactus/Cactus/exe/ABE-GiRaFFEfood_1D_AlfvenWave" # old_path = "C:\\Users\\Patrick\\Documents\\nrpytutorial\\in_progress\\ABE-GiRaFFEfood_1D_AlfvenWave\\" # # perturb_path = "C:\\Users\\Patrick\\Documents\\nrpytutorial\\in_progress\\ABE-GiRaFFEfood_1D_AlfvenWave_perturb\\" # new_path = os.path.join(Ccodesdir,"output") # data_old = np.loadtxt(os.path.join(old_path,old_files[column])) # # data_per = np.loadtxt(os.path.join(perturb_path,old_files[column])) # n=1 # data_old = data_old[n*125:n*125+125,:]# Select only the nth timestep # data_new = np.loadtxt(os.path.join(new_path,"out119-00000001.txt")) # deltaA_old = data_old[125:250,:] - data_old[0:125,:] # data_new_t0 = np.loadtxt(os.path.join(new_path,"out119-00000000.txt")) # deltaA_new = data_new[:,:] - data_new_t0[:,:] # plt.figure() # plt.plot(data_new[3:-3,0],data_new[3:-3,column]-data_old[3:-3,column_old[column]]) # plt.plot(data_new[3:-3,0],data_per[3:-3,column_old[column]]-data_old[3:-3,column_old[column]]) # For perturbation testing! # plt.plot(data_new[:,0],data_old[:,column_old[column]]) # plt.plot(data_new[:,0],data_new[:,column],'.') # plt.plot(data_new[:,0],data_new[:,column]-((3*np.sin(5*np.pi*data_new[:,0]/np.sqrt(1 - (-0.5)**2))/20 + 23/20)*(data_new[:,0]/2 + np.sqrt(1 - (-0.5)**2)/20 + np.absolute(data_new[:,0] + np.sqrt(1 - (-0.5)**2)/10)/2)*(-1e-100/2 + data_new[:,0]/2 - np.sqrt(1 - (-0.5)**2)/20 - np.absolute(-1e-100 + data_new[:,0] - np.sqrt(1 - (-0.5)**2)/10)/2)/((-1e-100 + data_new[:,0] - np.sqrt(1 - (-0.5)**2)/10)*(1e-100 + data_new[:,0] + np.sqrt(1 - (-0.5)**2)/10)) + 13*(data_new[:,0]/2 - np.sqrt(1 - (-0.5)**2)/20 + np.absolute(data_new[:,0] - np.sqrt(1 - (-0.5)**2)/10)/2)/(10*(1e-100 + data_new[:,0] - np.sqrt(1 - (-0.5)**2)/10)) + (-1e-100/2 + data_new[:,0]/2 + np.sqrt(1 - (-0.5)**2)/20 - np.absolute(-1e-100 + data_new[:,0] + np.sqrt(1 - (-0.5)**2)/10)/2)/(-1e-100 + data_new[:,0] + np.sqrt(1 - (-0.5)**2)/10))/np.sqrt(1 - (-0.5)**2)) # plt.plot(data_new[1:,0]-(data_new[0,0]-data_new[1,0])/2.0,(data_new[0:-1,column]+data_new[1:,column])/2,'.',label="GiRaFFE_NRPy+injected BU") # plt.plot(data_new[1:,0]-(data_new[0,0]-data_new[1,0])/2.0,data_old[1:,column_old[column]],label="old GiRaFFE") # -(data_old[0,9]-data_old[1,9])/2.0 # plt.plot(data_new[3:-3,0],deltaA_new[3:-3,column],'.') # plt.plot(data_new[3:-3,0],deltaA_old[3:-3,column_old[column]]-deltaA_new[3:-3,column]) # plt.xlim(-0.1,0.1) # plt.ylim(-0.2,0.2) # plt.legend() # plt.show() # print(np.argmin(deltaA_old[3:-3,column_old[column]]-deltaA_new[3:-3,column])) def SDA(a,b): return 1.0-np.log10(2.0*np.abs(a-b)/(np.abs(a)+np.abs(b))+1.0e-15) Data_stable = np.loadtxt(os.path.join(Ccodesdir,"..","out119-00000040_stable.txt")) numbers_to_check = Data_numer[:,column]-Data_stable[:,column] for index in range(len(numbers_to_check)): # print(SDA(Data_numer[index,column],Data_stable[index,column])) if SDA(Data_numer[index,column],Data_stable[index,column])<10: print("Error: number of SDAs too low: "+str(SDA(Data_numer[index,column],Data_stable[index,column]))) sys.exit(1) ###Output _____no_output_____ ###Markdown This code will create an animation of the wave over time. ###Code # import matplotlib.pyplot as plt from matplotlib.pyplot import savefig from IPython.display import HTML import matplotlib.image as mgimg import glob import sys from matplotlib import animation cmd.delete_existing_files("out119-00*.png") globby = glob.glob(os.path.join(Ccodesdir,'output','out119-00*.txt')) file_list = [] for x in sorted(globby): file_list.append(x) number_of_files = int(len(file_list)/2) for timestep in range(number_of_files): fig = plt.figure() numer_filename = file_list[2*timestep] exact_filename = file_list[2*timestep+1] Numer = np.loadtxt(numer_filename) Exact = np.loadtxt(exact_filename) plt.title("Alfven Wave") plt.xlabel("x") plt.ylabel("BU2") plt.xlim(-0.5,0.5) plt.ylim(1.0,1.7) plt.plot(Numer[3:-3,0],Numer[3:-3,3],'.',label="Numerical") plt.plot(Exact[3:-3,0],Exact[3:-3,3],label="Exact") plt.legend() savefig(numer_filename+".png",dpi=150) plt.close(fig) sys.stdout.write("%c[2K" % 27) sys.stdout.write("Processing file "+numer_filename+"\r") sys.stdout.flush() ## VISUALIZATION ANIMATION, PART 2: Combine PNGs to generate movie ## # https://stackoverflow.com/questions/14908576/how-to-remove-frame-from-matplotlib-pyplot-figure-vs-matplotlib-figure-frame # https://stackoverflow.com/questions/23176161/animating-pngs-in-matplotlib-using-artistanimation # !rm -f GiRaFFE_NRPy-1D_tests.mp4 cmd.delete_existing_files("GiRaFFE_NRPy-1D_tests.mp4") fig = plt.figure(frameon=False) ax = fig.add_axes([0, 0, 1, 1]) ax.axis('off') myimages = [] for i in range(number_of_files): img = mgimg.imread(file_list[2*i]+".png") imgplot = plt.imshow(img) myimages.append([imgplot]) ani = animation.ArtistAnimation(fig, myimages, interval=100, repeat_delay=1000) plt.close() ani.save('GiRaFFE_NRPy-1D_tests.mp4', fps=5,dpi=150) %%HTML <video width="480" height="360" controls> <source src="GiRaFFE_NRPy-1D_tests.mp4" type="video/mp4"> </video> import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-GiRaFFE_NRPy_Main_Driver",location_of_template_file=os.path.join("..")) ###Output Created Tutorial-GiRaFFE_NRPy_Main_Driver.tex, and compiled LaTeX file to PDF file Tutorial-GiRaFFE_NRPy_Main_Driver.pdf ###Markdown window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); Start-to-Finish Example: `GiRaFFE_NRPy` 1D tests Authors: Patrick Nelson & Terrence Pierre Jacques Adapted from [Start-to-Finish Example: Head-On Black Hole Collision](../Tutorial-Start_to_Finish-BSSNCurvilinear-Two_BHs_Collide.ipynb) This module compiles and runs code tests for all 1D initial data options available in GiRaFFE-NRPy+, evolving one-dimensional GRFFE waves. NRPy+ Source Code for this module: * Main python module for all 1D initial data: [GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_1D_tests.py](../../edit/in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_1D_tests.py) __Options:__ 1. [Fast Wave](Tutorial-GiRaFFEfood_NRPy_1D_tests-fast_wave.ipynb) 1. [Alfven Wave](Tutorial-GiRaFFEfood_NRPy_1D_alfven_wave.ipynb) 1. [Degenerate Alfven Wave](Tutorial-GiRaFFEfood_NRPy_1D_tests-degen_Alfven_wave.ipynb) 1. [Three Alfven Waves](Tutorial-GiRaFFEfood_NRPy_1D_tests-three_waves.ipynb) 1. [FFE Breakdown](Tutorial-GiRaFFEfood_NRPy_1D_tests-FFE_breakdown.ipynb)* [GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Afield_flux.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Afield_flux.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy_staggered-Afield_flux.ipynb) Generates the expressions to find the flux term of the induction equation.* [GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_A2B.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_A2B.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy_staggered-A2B.ipynb) Generates the driver to compute the magnetic field from the vector potential/* [GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_BCs.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-BCs.ipynb) Generates the code to apply boundary conditions to the vector potential, scalar potential, and three-velocity.* [GiRaFFE_NRPy/GiRaFFE_NRPy_C2P_P2C.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_C2P_P2C.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-C2P_P2C.ipynb) Generates the conservative-to-primitive and primitive-to-conservative solvers.* [GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_Metric_Face_Values.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Metric_Face_Values.ipynb) Generates code to interpolate metric gridfunctions to cell faces.* [GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-PPM.ipynb) Genearates code to reconstruct primitive variables on cell faces.* [GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Source_Terms.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_staggered_Source_Terms.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy_staggered-Source_Terms.ipynb) Generates the expressions to find the flux term of the Poynting flux evolution equation.* [GiRaFFE_NRPy/Stilde_flux.py](../../edit/in_progress/GiRaFFE_NRPy/Stilde_flux.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Stilde_flux.ipynb) Generates the expressions to find the flux term of the Poynting flux evolution equation.* [../GRFFE/equations.py](../../edit/GRFFE/equations.py) [\[**tutorial**\]](../Tutorial-GRFFE_Equations-Cartesian.ipynb) Generates code necessary to compute the source terms.* [../GRHD/equations.py](../../edit/GRHD/equations.py) [\[**tutorial**\]](../Tutorial-GRHD_Equations-Cartesian.ipynb) Generates code necessary to compute the source terms.Here we use NRPy+ to generate the C source code necessary to set up initial data for an Alfv&eacute;n wave (see [the original GiRaFFE paper](https://arxiv.org/pdf/1704.00599.pdf)). Then we use it to generate the RHS expressions for [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html) time integration based on the [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4). Table of Contents$$\label{toc}$$This notebook is organized as follows1. [Step 1](initializenrpy): Set core NRPy+ parameters for numerical grids1. [Step 2](grffe): Output C code for GRFFE evolution 1. [Step 2.a](mol): Output macros for Method of Lines timestepping1. [Step 3](gf_id): Import `GiRaFFEfood_NRPy` initial data modules1. [Step 4](cparams): Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h`1. [Step 5](mainc): `GiRaFFE_NRPy_standalone.c`: The Main C Code1. [Step 6](compileexec): Compile and execute C codes1. [Step 7](plots): Data Visualization1. [Step 8](latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file Step 1: Set up core functions and parameters for solving GRFFE equations \[Back to [top](toc)\]$$\label{setup}$$ ###Code import shutil, os, sys # Standard Python modules for multiplatform OS-level functions # First, we'll add the parent directory to the list of directories Python will check for modules. nrpy_dir_path = os.path.join("..") if nrpy_dir_path not in sys.path: sys.path.append(nrpy_dir_path) # Step P1: Import needed NRPy+ core modules: from outputC import outCfunction, lhrh # NRPy+: Core C code output module import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends import finite_difference as fin # NRPy+: Finite difference C code generation module import NRPy_param_funcs as par # NRPy+: Parameter interface import grid as gri # NRPy+: Functions having to do with numerical grids import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface # Step P2: Create C code output directory: Ccodesdir = os.path.join("GiRaFFE_staggered_1D_Tests_standalone_Ccodes/") # First remove C code output directory if it exists # Courtesy https://stackoverflow.com/questions/303200/how-do-i-remove-delete-a-folder-that-is-not-empty # !rm -r ScalarWaveCurvilinear_Playground_Ccodes shutil.rmtree(Ccodesdir, ignore_errors=True) # Then create a fresh directory cmd.mkdir(Ccodesdir) # Step P3: Create executable output directory: outdir = os.path.join(Ccodesdir,"output/") cmd.mkdir(outdir) # Step P5: Set timestepping algorithm (we adopt the Method of Lines) REAL = "double" # Best to use double here. default_CFL_FACTOR= 0.5 # (GETS OVERWRITTEN WHEN EXECUTED.) In pure axisymmetry (symmetry_axes = 2 below) 1.0 works fine. Otherwise 0.5 or lower. # Step P6: Set the finite differencing order to 2. par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",4) thismodule = "Start_to_Finish-GiRaFFE_NRPy-1D_tests" TINYDOUBLE = par.Cparameters("REAL", thismodule, "TINYDOUBLE", 1e-100) import GiRaFFE_NRPy.GiRaFFE_NRPy_Main_Driver_staggered as md # par.set_paramsvals_value("GiRaFFE_NRPy.GiRaFFE_NRPy_C2P_P2C::enforce_speed_limit_StildeD = False") par.set_paramsvals_value("GiRaFFE_NRPy.GiRaFFE_NRPy_C2P_P2C::enforce_current_sheet_prescription = False") ###Output _____no_output_____ ###Markdown Step 2: Output C code for GRFFE evolution \[Back to [top](toc)\]$$\label{grffe}$$We will first write the C codes needed for GRFFE evolution. We have already written a module to generate all these codes and call the functions in the appropriate order, so we will import that here. We will take the slightly unusual step of doing this before we generate the initial data functions because the main driver module will register all the gridfunctions we need. It will also generate functions that, in addition to their normal spot in the MoL timestepping, will need to be called during the initial data step to make sure all the variables are appropriately filled in. All of this is handled with a single call to `GiRaFFE_NRPy_Main_Driver_generate_all()`, which will register gridfunctions, write all the C code kernels, and write the C code functions to call those. ###Code md.GiRaFFE_NRPy_Main_Driver_generate_all(Ccodesdir) ###Output Output C function calculate_StildeD0_source_term() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/RHSs/calculate_StildeD0_source_term.h Output C function calculate_StildeD1_source_term() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/RHSs/calculate_StildeD1_source_term.h Output C function calculate_StildeD2_source_term() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/RHSs/calculate_StildeD2_source_term.h Output C function calculate_Stilde_rhsD() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/RHSs/calculate_Stilde_rhsD.h Output C function GiRaFFE_NRPy_cons_to_prims() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/C2P/GiRaFFE_NRPy_cons_to_prims.h Output C function GiRaFFE_NRPy_prims_to_cons() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/C2P/GiRaFFE_NRPy_prims_to_cons.h ###Markdown Step 2.a: Output macros for Method of Lines timestepping \[Back to [top](toc)\]$$\label{mol}$$Now, we generate the code to implement the method of lines using the fourth-order Runge-Kutta algorithm. ###Code RK_method = "RK4" # Step 3: Generate Runge-Kutta-based (RK-based) timestepping code. # As described above the Table of Contents, this is a 3-step process: # 3.A: Evaluate RHSs (RHS_string) # 3.B: Apply boundary conditions (post_RHS_string, pt 1) import MoLtimestepping.C_Code_Generation as MoL from MoLtimestepping.RK_Butcher_Table_Dictionary import Butcher_dict RK_order = Butcher_dict[RK_method][1] cmd.mkdir(os.path.join(Ccodesdir,"MoLtimestepping/")) MoL.MoL_C_Code_Generation(RK_method, RHS_string = """ GiRaFFE_NRPy_RHSs(&params,auxevol_gfs,RK_INPUT_GFS,RK_OUTPUT_GFS);""", post_RHS_string = """ GiRaFFE_NRPy_post_step(&params,xx,auxevol_gfs,RK_OUTPUT_GFS,n+1);\n""", outdir = os.path.join(Ccodesdir,"MoLtimestepping/")) ###Output _____no_output_____ ###Markdown Step 3: Import `GiRaFFEfood_NRPy` initial data modules \[Back to [top](toc)\]$$\label{gf_id}$$With the preliminaries out of the way, we will write the C functions to set up initial data. There are two categories of initial data that must be set: the spacetime metric variables, and the GRFFE plasma variables. We will set up the spacetime first, namely the Minkowski spacetime. ###Code gammaDD = ixp.zerorank2(DIM=3) for i in range(3): for j in range(3): if i==j: gammaDD[i][j] = sp.sympify(1) # else: leave as zero betaU = ixp.zerorank1() # All should be 0 alpha = sp.sympify(1) # Description and options for this initial data desc = "Generate a flat spacetime metric." loopopts_id ="AllPoints" # we don't need to read coordinates for flat spacetime. # For testing: Also set inverse metric: gammaUU, unused_gammaDET = ixp.symm_matrix_inverter3x3(gammaDD) name = "set_initial_spacetime_metric_data" values_to_print = [ lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD00"),rhs=gammaDD[0][0]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD01"),rhs=gammaDD[0][1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD02"),rhs=gammaDD[0][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD11"),rhs=gammaDD[1][1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD12"),rhs=gammaDD[1][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD22"),rhs=gammaDD[2][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU0"),rhs=betaU[0]), lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU1"),rhs=betaU[1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU2"),rhs=betaU[2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","alpha"),rhs=alpha) ] outCfunction( outfile = os.path.join(Ccodesdir,name+".h"), desc=desc, name=name, params ="const paramstruct *params,REAL *xx[3],REAL *auxevol_gfs", body = fin.FD_outputC("returnstring",values_to_print,params="outCverbose=False"), loopopts = loopopts_id) ###Output Output C function set_initial_spacetime_metric_data() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/set_initial_spacetime_metric_data.h ###Markdown Now, we will write out the initials data function for the GRFFE variables. ###Code initial_data_dir = os.path.join(Ccodesdir,"InitialData/") cmd.mkdir(initial_data_dir) ID_opts = ["AlfvenWave", "ThreeAlfvenWaves", "DegenAlfvenWave", "FastWave", "FFEBD"] import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy as gid for initial_data in ID_opts: if initial_data=="AlfvenWave": gid.GiRaFFEfood_NRPy_generate_initial_data(ID_type = initial_data, stagger_enable = True) desc = "Generate Alfven wave 1D initial data for GiRaFFEfood_NRPy." elif initial_data=="ThreeAlfvenWaves": gid.GiRaFFEfood_NRPy_generate_initial_data(ID_type = "ThreeWaves", stagger_enable = True) desc = "Generate three Alfven wave 1D initial data for GiRaFFEfood_NRPy." elif initial_data=="DegenAlfvenWave": gid.GiRaFFEfood_NRPy_generate_initial_data(ID_type = initial_data, stagger_enable = True) desc = "Generate degenerate Alfven wave 1D initial data for GiRaFFEfood_NRPy." elif initial_data=="FastWave": gid.GiRaFFEfood_NRPy_generate_initial_data(ID_type = initial_data, stagger_enable = True) desc = "Generate fast wave 1D initial data for GiRaFFEfood_NRPy." elif initial_data=="FFEBD": gid.GiRaFFEfood_NRPy_generate_initial_data(ID_type = "FFE_Breakdown", stagger_enable = True) desc = "Generate FFE breakdown 1D initial data for GiRaFFEfood_NRPy." name = initial_data values_to_print = [ lhrh(lhs=gri.gfaccess("out_gfs","AD0"),rhs=gid.AD[0]), lhrh(lhs=gri.gfaccess("out_gfs","AD1"),rhs=gid.AD[1]), lhrh(lhs=gri.gfaccess("out_gfs","AD2"),rhs=gid.AD[2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU0"),rhs=gid.ValenciavU[0]), lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU1"),rhs=gid.ValenciavU[1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","ValenciavU2"),rhs=gid.ValenciavU[2]), # lhrh(lhs=gri.gfaccess("auxevol_gfs","BU0"),rhs=gid.BU[0]), # lhrh(lhs=gri.gfaccess("auxevol_gfs","BU1"),rhs=gid.BU[1]), # lhrh(lhs=gri.gfaccess("auxevol_gfs","BU2"),rhs=gid.BU[2]), lhrh(lhs=gri.gfaccess("out_gfs","psi6Phi"),rhs=sp.sympify(0)) ] outCfunction( outfile = os.path.join(initial_data_dir,name+".c"), desc=desc, name=name, params ="const paramstruct *params, REAL *xx[3], REAL *auxevol_gfs, REAL *out_gfs", body = fin.FD_outputC("returnstring",values_to_print,params="outCverbose=False"), rel_path_to_Cparams='../', loopopts ="AllPoints,Read_xxs") inital_data_body = """ const char *option1 = "AlfvenWave"; const char *option2 = "ThreeAlfvenWaves"; const char *option3 = "DegenAlfvenWave"; const char *option4 = "FastWave"; const char *option5 = "FFEBD"; if (strcmp(initial_data_option, option1) == 0) { AlfvenWave(params, xx, auxevol_gfs, out_gfs); } else if (strcmp(initial_data_option, option2) == 0) { ThreeAlfvenWaves(params, xx, auxevol_gfs, out_gfs); } else if (strcmp(initial_data_option, option3) == 0) { DegenAlfvenWave(params, xx, auxevol_gfs, out_gfs); } else if (strcmp(initial_data_option, option4) == 0) { FastWave(params, xx, auxevol_gfs, out_gfs); } else if (strcmp(initial_data_option, option5) == 0) { FFEBD(params, xx, auxevol_gfs, out_gfs); } else { printf("ERROR: Invalid choice of initial data."); exit(1); } """ name = "initial_data" desc = "Main initial data function." includes = ["AlfvenWave.c", "ThreeAlfvenWaves.c", "DegenAlfvenWave.c", "FastWave.c", "FFEBD.c"] outCfunction( outfile = os.path.join(initial_data_dir,name+".h"), desc=desc, name=name, params ="const char *initial_data_option, const paramstruct *restrict params,REAL *xx[3],REAL *restrict auxevol_gfs,REAL *restrict out_gfs", body = inital_data_body, includes = includes, prefunc="#include <string.h>", rel_path_to_Cparams='../', loopopts ="") ###Output Output C function AlfvenWave() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/AlfvenWave.c Output C function ThreeAlfvenWaves() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/ThreeAlfvenWaves.c Output C function DegenAlfvenWave() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/DegenAlfvenWave.c Output C function FastWave() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/FastWave.c Output C function FFEBD() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/FFEBD.c Output C function initial_data() to file GiRaFFE_staggered_1D_Tests_standalone_Ccodes/InitialData/initial_data.h ###Markdown Step 4: Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h` \[Back to [top](toc)\]$$\label{cparams}$$Based on declared NRPy+ Cparameters, first we generate `declare_Cparameters_struct.h`, `set_Cparameters_default.h`, and `set_Cparameters[-SIMD].h`.Then we output `free_parameters.h`, which sets initial data parameters, as well as grid domain & reference metric parameters, applying `domain_size` and `sinh_width`/`SymTP_bScale` (if applicable) as set above ###Code # Step 3.e: Output C codes needed for declaring and setting Cparameters; also set free_parameters.h # Step 3.e.i: Generate declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h par.generate_Cparameters_Ccodes(os.path.join(Ccodesdir)) # Step 3.e.ii: Set free_parameters.h with open(os.path.join(Ccodesdir,"free_parameters.h"),"w") as file: file.write("""// Override parameter defaults with values based on command line arguments and NGHOSTS. params.Nxx0 = atoi(argv[1]); params.Nxx1 = atoi(argv[2]); params.Nxx2 = atoi(argv[3]); params.Nxx_plus_2NGHOSTS0 = params.Nxx0 + 2*NGHOSTS; params.Nxx_plus_2NGHOSTS1 = params.Nxx1 + 2*NGHOSTS; params.Nxx_plus_2NGHOSTS2 = params.Nxx2 + 2*NGHOSTS; // Step 0d: Set up space and time coordinates // Step 0d.i: Declare \Delta x^i=dxx{0,1,2} and invdxx{0,1,2}, as well as xxmin[3] and xxmax[3]: const REAL xxmin[3] = {-1.3255,-0.085,-0.085}; const REAL xxmax[3] = { 1.6745, 0.115, 0.115}; params.dxx0 = (xxmax[0] - xxmin[0]) / ((REAL)params.Nxx0+1); params.dxx1 = (xxmax[1] - xxmin[1]) / ((REAL)params.Nxx1+1); params.dxx2 = (xxmax[2] - xxmin[2]) / ((REAL)params.Nxx2+1); printf("dxx0,dxx1,dxx2 = %.5e,%.5e,%.5e\\n",params.dxx0,params.dxx1,params.dxx2); params.invdx0 = 1.0 / params.dxx0; params.invdx1 = 1.0 / params.dxx1; params.invdx2 = 1.0 / params.dxx2; const int poison_grids = 0; // Standard GRFFE parameters: params.GAMMA_SPEED_LIMIT = 2000.0; params.diss_strength = 0.1; """) ###Output _____no_output_____ ###Markdown Step 4: Set up boundary condition functions for chosen singular, curvilinear coordinate system \[Back to [top](toc)\]$$\label{bc_functs}$$Next apply singular, curvilinear coordinate boundary conditions [as documented in the corresponding NRPy+ tutorial notebook](Tutorial-Start_to_Finish-Curvilinear_BCs.ipynb)...But, for the moment, we're actually just using this because it writes the file `gridfunction_defines.h`. ###Code import CurviBoundaryConditions.CurviBoundaryConditions as cbcs cbcs.Set_up_CurviBoundaryConditions(os.path.join(Ccodesdir,"boundary_conditions/"),Cparamspath=os.path.join("../"),enable_copy_of_static_Ccodes=False) ###Output Wrote to file "GiRaFFE_staggered_1D_Tests_standalone_Ccodes/boundary_conditions/parity_conditions_symbolic_dot_products.h" Evolved parity: ( AD0:1, AD1:2, AD2:3, StildeD0:1, StildeD1:2, StildeD2:3, psi6Phi:0 ) AuxEvol parity: ( BU0:1, BU1:2, BU2:3, B_lU0:1, B_lU1:2, B_lU2:3, B_rU0:1, B_rU1:2, B_rU2:3, BstaggerU0:1, BstaggerU1:2, BstaggerU2:3, Bstagger_lU0:1, Bstagger_lU1:2, Bstagger_lU2:3, Bstagger_rU0:1, Bstagger_rU1:2, Bstagger_rU2:3, Stilde_flux_HLLED0:1, Stilde_flux_HLLED1:2, Stilde_flux_HLLED2:3, ValenciavU0:1, ValenciavU1:2, ValenciavU2:3, Valenciav_lU0:1, Valenciav_lU1:2, Valenciav_lU2:3, Valenciav_llU0:1, Valenciav_llU1:2, Valenciav_llU2:3, Valenciav_lrU0:1, Valenciav_lrU1:2, Valenciav_lrU2:3, Valenciav_rU0:1, Valenciav_rU1:2, Valenciav_rU2:3, Valenciav_rlU0:1, Valenciav_rlU1:2, Valenciav_rlU2:3, Valenciav_rrU0:1, Valenciav_rrU1:2, Valenciav_rrU2:3, alpha:0, alpha_face:0, betaU0:1, betaU1:2, betaU2:3, beta_faceU0:1, beta_faceU1:2, beta_faceU2:3, cmax_x:0, cmax_y:0, cmax_z:0, cmin_x:0, cmin_y:0, cmin_z:0, gammaDD00:4, gammaDD01:5, gammaDD02:6, gammaDD11:7, gammaDD12:8, gammaDD22:9, gamma_faceDD00:4, gamma_faceDD01:5, gamma_faceDD02:6, gamma_faceDD11:7, gamma_faceDD12:8, gamma_faceDD22:9, psi6_temp:0, psi6center:0 ) Wrote to file "GiRaFFE_staggered_1D_Tests_standalone_Ccodes/boundary_conditions/EigenCoord_Cart_to_xx.h" ###Markdown Step 5: `GiRaFFE_NRPy_standalone.c`: The Main C Code \[Back to [top](toc)\]$$\label{mainc}$$ ###Code # Part P0: Define REAL, set the number of ghost cells NGHOSTS (from NRPy+'s FD_CENTDERIVS_ORDER), # and set the CFL_FACTOR (which can be overwritten at the command line) with open(os.path.join(Ccodesdir,"GiRaFFE_NRPy_REAL__NGHOSTS__CFL_FACTOR.h"), "w") as file: file.write(""" // Part P0.a: Set the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER #define NGHOSTS """+str(3)+""" #define NGHOSTS_A2B """+str(2)+""" // Part P0.b: Set the numerical precision (REAL) to double, ensuring all floating point // numbers are stored to at least ~16 significant digits #define REAL """+REAL+""" // Part P0.c: Set the CFL Factor. Can be overwritten at command line. REAL CFL_FACTOR = """+str(default_CFL_FACTOR)+";") %%writefile $Ccodesdir/GiRaFFE_NRPy_standalone.c // Step P0: Define REAL and NGHOSTS; and declare CFL_FACTOR. This header is generated in NRPy+. #include "GiRaFFE_NRPy_REAL__NGHOSTS__CFL_FACTOR.h" #include "declare_Cparameters_struct.h" const int NSKIP_1D_OUTPUT = 1; // Step P1: Import needed header files #include "stdio.h" #include "stdlib.h" #include "math.h" #include "time.h" #include "stdint.h" // Needed for Windows GCC 6.x compatibility #ifndef M_PI #define M_PI 3.141592653589793238462643383279502884L #endif #ifndef M_SQRT1_2 #define M_SQRT1_2 0.707106781186547524400844362104849039L #endif // Step P2: Declare the IDX4S(gf,i,j,k) macro, which enables us to store 4-dimensions of // data in a 1D array. In this case, consecutive values of "i" // (all other indices held to a fixed value) are consecutive in memory, where // consecutive values of "j" (fixing all other indices) are separated by // Nxx_plus_2NGHOSTS0 elements in memory. Similarly, consecutive values of // "k" are separated by Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1 in memory, etc. #define IDX4S(g,i,j,k) \ ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) + Nxx_plus_2NGHOSTS2 * (g) ) ) ) #define IDX4ptS(g,idx) ( (idx) + (Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2) * (g) ) #define IDX3S(i,j,k) ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) ) ) ) #define LOOP_REGION(i0min,i0max, i1min,i1max, i2min,i2max) \ for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) #define LOOP_ALL_GFS_GPS(ii) _Pragma("omp parallel for") \ for(int (ii)=0;(ii)<Nxx_plus_2NGHOSTS_tot*NUM_EVOL_GFS;(ii)++) // Step P3: Set gridfunction macros #include "boundary_conditions/gridfunction_defines.h" // Step P4: Include the RHS, BC, and primitive recovery functions #include "GiRaFFE_NRPy_Main_Driver.h" // Step P5: Include the initial data functions #include "set_initial_spacetime_metric_data.h" #include "InitialData/initial_data.h" // main() function: // Step 0: Read command-line input, set up grid structure, allocate memory for gridfunctions, set up coordinates // Step 1: Set up scalar wave initial data // Step 2: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm, // applying quadratic extrapolation outer boundary conditions. // Step 3: Output relative error between numerical and exact solution. // Step 4: Free all allocated memory int main(int argc, const char *argv[]) { paramstruct params; #include "set_Cparameters_default.h" // Step 0a: Read command-line input, error out if nonconformant if(argc != 5 || atoi(argv[1]) < NGHOSTS || atoi(argv[2]) < NGHOSTS || atoi(argv[3]) < NGHOSTS) { printf("Error: Expected three command-line arguments: ./GiRaFFE_NRPy_standalone [Nx] [Ny] [Nz],\n"); printf("where Nx is the number of grid points in the x direction, and so forth.\n"); printf("Nx,Ny,Nz MUST BE larger than NGHOSTS (= %d)\n",NGHOSTS); exit(1); } // Step 0c: Set free parameters, overwriting Cparameters defaults // by hand or with command-line input, as desired. #include "free_parameters.h" #include "set_Cparameters-nopointer.h" // ... and then set up the numerical grid structure in time: const REAL t_final = 2.0; const REAL CFL_FACTOR = 0.5; // Set the CFL Factor // Step 0c: Allocate memory for gridfunctions const int Nxx_plus_2NGHOSTS_tot = Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2; // Step 0k: Allocate memory for gridfunctions #include "MoLtimestepping/RK_Allocate_Memory.h" REAL *restrict auxevol_gfs = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot); REAL *evol_gfs_exact = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot); REAL *auxevol_gfs_exact = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot); // For debugging, it can be useful to set everything to NaN initially. if(poison_grids) { for(int ii=0;ii<NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) { y_n_gfs[ii] = 1.0/0.0; y_nplus1_running_total_gfs[ii] = 1.0/0.0; //k_odd_gfs[ii] = 1.0/0.0; //k_even_gfs[ii] = 1.0/0.0; diagnostic_output_gfs[ii] = 1.0/0.0; evol_gfs_exact[ii] = 1.0/0.0; } for(int ii=0;ii<NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot;ii++) { auxevol_gfs[ii] = 1.0/0.0; auxevol_gfs_exact[ii] = 1.0/0.0; } } // Step 0d: Set up coordinates: Set dx, and then dt based on dx_min and CFL condition // This is probably already defined above, but just in case... #ifndef MIN #define MIN(A, B) ( ((A) < (B)) ? (A) : (B) ) #endif REAL dt = CFL_FACTOR * MIN(dxx0,MIN(dxx1,dxx2)); // CFL condition int Nt = (int)(t_final / dt + 0.5); // The number of points in time. //Add 0.5 to account for C rounding down integers. // Step 0e: Set up cell-centered Cartesian coordinate grids REAL *xx[3]; xx[0] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS0); xx[1] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS1); xx[2] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS2); for(int j=0;j<Nxx_plus_2NGHOSTS0;j++) xx[0][j] = xxmin[0] + (j-NGHOSTS+1)*dxx0; for(int j=0;j<Nxx_plus_2NGHOSTS1;j++) xx[1][j] = xxmin[1] + (j-NGHOSTS+1)*dxx1; for(int j=0;j<Nxx_plus_2NGHOSTS2;j++) xx[2][j] = xxmin[2] + (j-NGHOSTS+1)*dxx2; // Step 1: Set up initial data to be exact solution at time=0: REAL time = 0.0; set_initial_spacetime_metric_data(&params, xx, auxevol_gfs); const char *initial_data_option = argv[4]; initial_data(initial_data_option, &params, xx, auxevol_gfs, y_n_gfs); // Fill in the remaining quantities GiRaFFE_compute_B_and_Bstagger_from_A(&params, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD00GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD01GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD02GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD11GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD12GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*GAMMADD22GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*PSI6_TEMPGF, /* Temporary storage,overwritten */ y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD0GF, y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD1GF, y_n_gfs+Nxx_plus_2NGHOSTS_tot*AD2GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU0GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU1GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BU2GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU0GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU1GF, auxevol_gfs+Nxx_plus_2NGHOSTS_tot*BSTAGGERU2GF); //override_BU_with_old_GiRaFFE(&params,auxevol_gfs,0); GiRaFFE_NRPy_prims_to_cons(&params,auxevol_gfs,y_n_gfs); // Extra stack, useful for debugging: GiRaFFE_NRPy_cons_to_prims(&params,xx,auxevol_gfs,y_n_gfs); for(int n=0;n<=Nt;n++) { // Main loop to progress forward in time. //for(int n=0;n<=1;n++) { // Main loop to progress forward in time. // Step 1a: Set current time to correct value & compute exact solution time = ((REAL)n)*dt; /* Step 2: Validation: Output relative error between numerical and exact solution, */ if(time == 0.0 || time == 0.5 || time == 1.0 || time == 2.0 || time == 0.02 || time == 0.56) { // Step 2c: Output relative error between exact & numerical at center of grid. const int i0mid=Nxx_plus_2NGHOSTS0/2; const int i1mid=Nxx_plus_2NGHOSTS1/2; const int i2mid=Nxx_plus_2NGHOSTS2/2; char filename[100]; sprintf(filename,"out%d__%s-%08d.txt", Nxx0, initial_data_option, n); FILE *out2D = fopen(filename, "w"); for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) { const int idx = IDX3S(i0,i1mid,i2mid); fprintf(out2D,"%.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e\n", xx[0][i0], auxevol_gfs[IDX4ptS(BU0GF,idx)],auxevol_gfs[IDX4ptS(BU1GF,idx)],auxevol_gfs[IDX4ptS(BU2GF,idx)], y_n_gfs[IDX4ptS(AD0GF,idx)],y_n_gfs[IDX4ptS(AD1GF,idx)],y_n_gfs[IDX4ptS(AD2GF,idx)], y_n_gfs[IDX4ptS(STILDED0GF,idx)],y_n_gfs[IDX4ptS(STILDED1GF,idx)],y_n_gfs[IDX4ptS(STILDED2GF,idx)], auxevol_gfs[IDX4ptS(VALENCIAVU0GF,idx)],auxevol_gfs[IDX4ptS(VALENCIAVU1GF,idx)],auxevol_gfs[IDX4ptS(VALENCIAVU2GF,idx)], y_n_gfs[IDX4ptS(PSI6PHIGF,idx)], time); } fclose(out2D); // For convergence testing, we'll shift the grid x -> x-1 and output initial data again, giving the exact solution. LOOP_REGION(0,Nxx_plus_2NGHOSTS0,0,1,0,1) { xx[0][i0] += -mu_AW*time; //xx[0][i0] += -time; } set_initial_spacetime_metric_data(&params,xx,auxevol_gfs_exact); initial_data(initial_data_option, &params,xx,auxevol_gfs_exact,evol_gfs_exact); // Fill in the remaining quantities //driver_A_to_B(&params,evol_gfs_exact,auxevol_gfs_exact); GiRaFFE_NRPy_prims_to_cons(&params,auxevol_gfs_exact,evol_gfs_exact); // And now, we'll set the grid back to rights. LOOP_REGION(0,Nxx_plus_2NGHOSTS0,0,1,0,1) { xx[0][i0] -= -mu_AW*time; //xx[0][i0] -= -time; } sprintf(filename,"out%d-%08d_exact.txt",Nxx0,n); FILE *out2D_exact = fopen(filename, "w"); for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) { const int idx = IDX3S(i0,i1mid,i2mid); fprintf(out2D_exact,"%.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e %.16e\n", xx[0][i0], auxevol_gfs_exact[IDX4ptS(BU0GF,idx)],auxevol_gfs_exact[IDX4ptS(BU1GF,idx)],auxevol_gfs_exact[IDX4ptS(BU2GF,idx)], evol_gfs_exact[IDX4ptS(AD0GF,idx)],evol_gfs_exact[IDX4ptS(AD1GF,idx)],evol_gfs_exact[IDX4ptS(AD2GF,idx)], evol_gfs_exact[IDX4ptS(STILDED0GF,idx)],evol_gfs_exact[IDX4ptS(STILDED1GF,idx)],evol_gfs_exact[IDX4ptS(STILDED2GF,idx)], auxevol_gfs_exact[IDX4ptS(VALENCIAVU0GF,idx)],auxevol_gfs_exact[IDX4ptS(VALENCIAVU1GF,idx)],auxevol_gfs_exact[IDX4ptS(VALENCIAVU2GF,idx)], evol_gfs_exact[IDX4ptS(PSI6PHIGF,idx)]); } fclose(out2D_exact); } // Step 3: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm, // applying quadratic extrapolation outer boundary conditions. // Step 3.b: Step forward one timestep (t -> t+dt) in time using // chosen RK-like MoL timestepping algorithm #include "MoLtimestepping/RK_MoL.h" } // End main loop to progress forward in time. // Step 4: Free all allocated memory #include "MoLtimestepping/RK_Free_Memory.h" free(auxevol_gfs); free(auxevol_gfs_exact); free(evol_gfs_exact); for(int i=0;i<3;i++) free(xx[i]); return 0; } ###Output Writing GiRaFFE_staggered_1D_Tests_standalone_Ccodes//GiRaFFE_NRPy_standalone.c ###Markdown Step 6: Compile generated C codes & perform GRFFE simulations \[Back to [top](toc)\]$$\label{compileexec}$$To aid in the cross-platform-compatible (with Windows, MacOS, & Linux) compilation and execution, we make use of `cmdline_helper` [(**Tutorial**)](Tutorial-cmdline_helper.ipynb). ###Code cmd.C_compile(os.path.join(Ccodesdir,"GiRaFFE_NRPy_standalone.c"), os.path.join(Ccodesdir,"output","GiRaFFE_NRPy_standalone"),compile_mode="optimized") # Change to output directory os.chdir(outdir) # Clean up existing output files cmd.delete_existing_files("out*.txt") cmd.delete_existing_files("out*.png") # ID options are: "AlfvenWave", "ThreeAlfvenWaves", "DegenAlfvenWave", "FastWave", "FFEBD" for opt in ID_opts: cmd.Execute("GiRaFFE_NRPy_standalone", "299 4 4 "+opt, "out_298"+opt+".txt") # cmd.Execute("GiRaFFE_NRPy_standalone", "1280 9 9 "+opt, "out_1280"+opt+".txt") # cmd.Execute("GiRaFFE_NRPy_standalone", "1280 32 32 "+opt, "out_"+opt+".txt") # cmd.Execute("GiRaFFE_NRPy_standalone", "149 9 9 AlfvenWave","out149.txt") # Return to root directory os.chdir(os.path.join("../../")) ###Output Compiling executable... (EXEC): Executing `gcc -std=gnu99 -Ofast -fopenmp -march=native -funroll-loops GiRaFFE_staggered_1D_Tests_standalone_Ccodes/GiRaFFE_NRPy_standalone.c -o GiRaFFE_staggered_1D_Tests_standalone_Ccodes/output/GiRaFFE_NRPy_standalone -lm`... (BENCH): Finished executing in 3.215022563934326 seconds. Finished compilation. (EXEC): Executing `taskset -c 0,1,2,3 ./GiRaFFE_NRPy_standalone 299 4 4 AlfvenWave`... (BENCH): Finished executing in 17.259787559509277 seconds. (EXEC): Executing `taskset -c 0,1,2,3 ./GiRaFFE_NRPy_standalone 299 4 4 ThreeAlfvenWaves`... (BENCH): Finished executing in 17.4488844871521 seconds. (EXEC): Executing `taskset -c 0,1,2,3 ./GiRaFFE_NRPy_standalone 299 4 4 DegenAlfvenWave`... (BENCH): Finished executing in 17.24630904197693 seconds. (EXEC): Executing `taskset -c 0,1,2,3 ./GiRaFFE_NRPy_standalone 299 4 4 FastWave`... (BENCH): Finished executing in 17.657445907592773 seconds. (EXEC): Executing `taskset -c 0,1,2,3 ./GiRaFFE_NRPy_standalone 299 4 4 FFEBD`... (BENCH): Finished executing in 18.245166063308716 seconds. ###Markdown Step 7: Data Visualization \[Back to [top](toc)\]$$\label{plots}$$Now we plot the data and recreate figure 1 from the [GiRaFFE paper](https://arxiv.org/pdf/1704.00599.pdf). We reconstruct the electric field via$$E_i = -\epsilon_{ijk}v^j B^k$$the `calc_E` function below. We also calculate the FFE condition $B^2 - E^2$ below using the `calc_Bsquared_minus_Esquared` function. ###Code eDDD = ixp.LeviCivitaSymbol_dim3_rank3() def calc_E(data): VU0 = data[:, 10] VU1 = data[:, 11] VU2 = data[:, 12] BU0 = data[:, 1] BU1 = data[:, 2] BU2 = data[:, 3] VU = [VU0, VU1, VU2] BU = [BU0, BU1, BU2] ED = np.zeros((VU0.size, 3)) for i in range(3): for j in range(3): for k in range(3): ED[:,i] = ED[:,i] - eDDD[i][j][k]*VU[j]*BU[k] return ED def calc_Bsquared_minus_Esquared(data): EU = calc_E(data) BU0 = data[:, 1] BU1 = data[:, 2] BU2 = data[:, 3] return (BU0**2 + BU1**2 + BU2**2) - (EU[:,0]**2 + EU[:,1]**2 + EU[:,2]**2) import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import matplotlib as mpl # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 labels = ["x","BU0","BU1","BU2","AD0","AD1","AD2","StildeD0","StildeD1","StildeD2","ValenciavU0","ValenciavU1","ValenciavU2", "psi6Phi"] fig = plt.figure(figsize=(6, 15)) # spec = mpl.gridspec.GridSpec(ncols=6, nrows=2,wspace=0.65, hspace=0.4) # 6 columns evenly divides both 2 & 3 # ax1 = fig.add_subplot(spec[0,0:2]) # row 0 with axes spanning 2 cols on evens # ax2 = fig.add_subplot(spec[0,2:4]) # ax3 = fig.add_subplot(spec[0,4:]) # ax4 = fig.add_subplot(spec[1,1:3]) # row 0 with axes spanning 2 cols on odds # ax5 = fig.add_subplot(spec[1,3:5]) gs = gridspec.GridSpec(nrows=5, ncols=1, hspace=0.5) ax1 = fig.add_subplot(gs[0, 0]) ax2 = fig.add_subplot(gs[1, 0]) ax3 = fig.add_subplot(gs[2, 0]) ax4 = fig.add_subplot(gs[3, 0]) ax5 = fig.add_subplot(gs[4, 0]) Data_num_Fast_A = np.loadtxt(os.path.join(Ccodesdir,"output","out299__FastWave-00000000.txt")) Data_num_Fast_B = np.loadtxt(os.path.join(Ccodesdir,"output","out299__FastWave-00000100.txt")) E_Fast_A = calc_E(Data_num_Fast_A) E_Fast_B = calc_E(Data_num_Fast_B) ax1.scatter(Data_num_Fast_A[:,0], np.abs(E_Fast_A[:,2]), s=1,label = 't = 0') ax1.plot(Data_num_Fast_B[:,0], np.abs(E_Fast_B[:,2]), 'k-', label = 't = 0.5') ax1.set_xlim(-0.5, 1.5) ax1.set_ylim(0.6) ax1.text(0.95, 0.01, 'Fast Wave', verticalalignment='bottom', horizontalalignment='right', transform=ax1.transAxes, color='black', fontsize=14) ax1.set_xlabel('x') ax1.set_ylabel(r'$|E^z|$') ax1.legend() Data_num_Alf_A = np.loadtxt(os.path.join(Ccodesdir,"output","out299__AlfvenWave-00000000.txt")) Data_num_Alf_B = np.loadtxt(os.path.join(Ccodesdir,"output","out299__AlfvenWave-00000400.txt")) ax2.scatter(Data_num_Alf_A[:,0], Data_num_Alf_A[:,3], s=1, label = 't = 0') ax2.plot(Data_num_Alf_B[:,0], Data_num_Alf_B[:,3], 'k-', label = 't = 2.0') ax2.set_xlim(-1.5, 1.5) ax2.set_ylim(1.1) ax2.text(0.95, 0.01, 'Alfven Wave', verticalalignment='bottom', horizontalalignment='right', transform=ax2.transAxes, color='black', fontsize=14) ax2.set_xlabel('x') ax2.set_ylabel(r'$B^z$') ax2.legend(loc='center right') Data_num_DegenAlf_A = np.loadtxt(os.path.join(Ccodesdir,"output","out299__DegenAlfvenWave-00000000.txt")) Data_num_DegenAlf_B = np.loadtxt(os.path.join(Ccodesdir,"output","out299__DegenAlfvenWave-00000200.txt")) E_DegenAlf_A = calc_E(Data_num_DegenAlf_A) E_DegenAlf_B = calc_E(Data_num_DegenAlf_B) ax3.scatter(Data_num_DegenAlf_A[:,0], E_DegenAlf_A[:,1], s=1, label = 't = 0') ax3.plot(Data_num_DegenAlf_B[:,0], E_DegenAlf_B[:,1], 'k-', label = 't = 1.0') ax3.set_xlim(-1.5, 1.5) ax3.set_ylim(-1.35) ax3.text(0.95, 0.01, 'Deg. Alfven Wave', verticalalignment='bottom', horizontalalignment='right', transform=ax3.transAxes, color='black', fontsize=14) ax3.set_xlabel('x') ax3.set_ylabel(r'$E^y$') ax3.legend() # Data_num_ThreeAlf_A = np.loadtxt(os.path.join(Ccodesdir,"output","out149__ThreeAlfvenWaves-00000000.txt")) Data_num_ThreeAlf_B = np.loadtxt(os.path.join(Ccodesdir,"output","out299__ThreeAlfvenWaves-00000112.txt")) # ax2.plot(Data_num_ThreeAlf_A[:,0], Data_num_ThreeAlf_A[:,2], 'k-') ax4.scatter(Data_num_ThreeAlf_B[:,0], Data_num_ThreeAlf_B[:,2], s=1, label = 't = 0.56') ax4.set_xlim(-1.0, 1.0) # ax4.set_ylim() ax4.text(0.95, 0.01, 'Three Waves', verticalalignment='bottom', horizontalalignment='right', transform=ax4.transAxes, color='black', fontsize=14) ax4.set_xlabel('x') ax4.set_ylabel(r'$B^y$') ax4.legend(loc='center') Data_num_FFEBD_A = np.loadtxt(os.path.join(Ccodesdir,"output","out299__FFEBD-00000000.txt")) Data_num_FFEBD_B = np.loadtxt(os.path.join(Ccodesdir,"output","out299__FFEBD-00000004.txt")) B2mE2_A = calc_Bsquared_minus_Esquared(Data_num_FFEBD_A) B2mE2_B = calc_Bsquared_minus_Esquared(Data_num_FFEBD_B) ax5.scatter(Data_num_FFEBD_A[:,0], B2mE2_A, s=1, label = 't = 0') ax5.plot(Data_num_FFEBD_B[:,0], B2mE2_B, 'k-', label = 't = 0.02') ax5.set_xlim(-0.4, 0.6) ax5.text(0.95, 0.01, 'FFE Breakdown', verticalalignment='bottom', horizontalalignment='right', transform=ax5.transAxes, color='black', fontsize=14) ax5.set_xlabel('x') ax5.set_ylabel(r'$B^2 - E^2$') ax5.legend() plt.savefig(os.path.join(Ccodesdir,"output","NRPy-GiRaFFE"), dpi=800, bbox_inches="tight") plt.close(fig) img1 = plt.imread(os.path.join(Ccodesdir,"output","NRPy-GiRaFFE.png")) img2 = plt.imread(os.path.join("GiRaFFE_NRPy/example_par_files/figure1_GiRaFFE_paper.png")) NUM_ROWS = 1 IMGs_IN_ROW = 2 f, ax = plt.subplots(NUM_ROWS, IMGs_IN_ROW, figsize=(28,18)) plt.subplots_adjust(wspace=0.05) plt.axis('off') ax[0].imshow(img1) ax[1].imshow(img2) ax[0].set_title('image 1') ax[1].set_title('image 2') # title = 'side by side view of images' # f.suptitle(title, fontsize=16) plt.tight_layout() # plt.xticks([]) # plt.yticks([]) plt.show() ###Output _____no_output_____ ###Markdown Step 8: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-Start_to_Finish-GiRaFFE_NRPy-1D_tests-staggered.pdf](Tutorial-Start_to_Finish-GiRaFFE_NRPy-1D_tests-staggered.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.) ###Code import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-Start_to_Finish-GiRaFFE_NRPy-1D_tests-staggered",location_of_template_file=os.path.join("..")) ###Output Created Tutorial-Start_to_Finish-GiRaFFE_NRPy-1D_tests-staggered.tex, and compiled LaTeX file to PDF file Tutorial-Start_to_Finish- GiRaFFE_NRPy-1D_tests-staggered.pdf
drafts/Week 3/Asnt3_Opt_JA.ipynb
###Markdown Practice Assignment: Understanding Distributions Through Sampling** *This assignment is optional, and I encourage you to share your solutions with me and your peers in the discussion forums!* **To complete this assignment, create a code cell that:* Creates a number of subplots using the `pyplot subplots` or `matplotlib gridspec` functionality.* Creates an animation, pulling between 100 and 1000 samples from each of the random variables (`x1`, `x2`, `x3`, `x4`) for each plot and plotting this as we did in the lecture on animation.* **Bonus:** Go above and beyond and "wow" your classmates (and me!) by looking into matplotlib widgets and adding a widget which allows for parameterization of the distributions behind the sampling animations.Tips:* Before you start, think about the different ways you can create this visualization to be as interesting and effective as possible.* Take a look at the histograms below to get an idea of what the random variables look like, as well as their positioning with respect to one another. This is just a guide, so be creative in how you lay things out!* Try to keep the length of your animation reasonable (roughly between 10 and 30 seconds). ###Code import matplotlib.pyplot as plt import numpy as np %matplotlib notebook # generate 4 random variables from the random, gamma, exponential, and uniform distributions x1 = np.random.normal(loc = 0.0, scale = 1, size = 10000) x2 = np.random.gamma(2, 1.5, size = 10000) x3 = np.random.exponential(2, size = 10000) x4 = np.random.uniform(low = -5, high = 5, size = 10000) # plot the histograms plt.figure(figsize=(9,3)) plt.hist(x1, normed=True, bins=20, alpha=0.5) plt.hist(x2, normed=True, bins=20, alpha=0.5) plt.hist(x3, normed=True, bins=20, alpha=0.5) plt.hist(x4, normed=True, bins=20, alpha=0.5); plt.axis([-7,21,0,0.6]) plt.text(x1.mean()-1.5, 0.5, 'x1\nNormal') plt.text(x2.mean()-1.5, 0.5, 'x2\nGamma') plt.text(x3.mean()-1.5, 0.5, 'x3\nExponential') plt.text(x4.mean()-1.5, 0.5, 'x4\nUniform') import matplotlib.gridspec as gridspec import matplotlib.animation as animation n = 30 bins = 60 fig = plt.figure() gspec = gridspec.GridSpec(1, 4) normdist = plt.subplot(gspec[0,0], title = 'Normal') gammdist = plt.subplot(gspec[0,1], sharey = normdist, title = 'Gamma') expodist = plt.subplot(gspec[0,2], sharey = normdist, title = 'Exponential') unifdist = plt.subplot(gspec[0,3], sharey = normdist, title = 'Uniform') # create the function that will do the plotting, where curr is the current frame def animate(curr): # check if animation is at the last frame, and if so, stop the animation a if curr == n: a.event_source.stop() plt.cla() normdist.hist(x1[:curr], bins = bins, color = 'cyan') normdist.set_ylim(0,10) gammdist.hist(x2[:curr], bins = bins, color = 'green') expodist.hist(x3[:curr], bins = bins, color = 'red') unifdist.hist(x4[:curr], bins = bins, color = 'orange') plt.suptitle('Distribution Plot: n = {}'.format(curr)) a = animation.FuncAnimation(fig, animate, interval=10) for plot in [gammdist, expodist, unifdist]: plt.setp(plot.get_yticklabels(), visible=False) ###Output _____no_output_____
Jupyter/kRPC Docs Launch to Orbit.ipynb
###Markdown Launch to Orbit (from kRPC docs) ###Code import math import time import krpc turn_start_altitude = 250 turn_end_altitude = 45000 target_altitude = 150000 conn = krpc.connect(name='Launch into orbit') vessel = conn.space_center.active_vessel # Set up streams for telemetry ut = conn.add_stream(getattr, conn.space_center, 'ut') altitude = conn.add_stream(getattr, vessel.flight(), 'mean_altitude') apoapsis = conn.add_stream(getattr, vessel.orbit, 'apoapsis_altitude') stage_2_resources = vessel.resources_in_decouple_stage(stage=2, cumulative=False) srb_fuel = conn.add_stream(stage_2_resources.amount, 'SolidFuel') # Pre-launch setup vessel.control.sas = False vessel.control.rcs = False vessel.control.throttle = 1.0 # Countdown... print('3...') time.sleep(1) print('2...') time.sleep(1) print('1...') time.sleep(1) print('Launch!') # Activate the first stage vessel.control.activate_next_stage() vessel.auto_pilot.engage() vessel.auto_pilot.target_pitch_and_heading(90, 90) # Main ascent loop srbs_separated = False turn_angle = 0 while True: # Gravity turn if altitude() > turn_start_altitude and altitude() < turn_end_altitude: frac = ((altitude() - turn_start_altitude) / (turn_end_altitude - turn_start_altitude)) new_turn_angle = frac * 90 if abs(new_turn_angle - turn_angle) > 0.5: turn_angle = new_turn_angle vessel.auto_pilot.target_pitch_and_heading(90-turn_angle, 90) # Separate SRBs when finished if not srbs_separated: if srb_fuel() < 0.1: vessel.control.activate_next_stage() srbs_separated = True print('SRBs separated') # Decrease throttle when approaching target apoapsis if apoapsis() > target_altitude*0.9: print('Approaching target apoapsis') break # Disable engines when target apoapsis is reached vessel.control.throttle = 0.25 while apoapsis() < target_altitude: pass print('Target apoapsis reached') vessel.control.throttle = 0.0 # Wait until out of atmosphere print('Coasting out of atmosphere') while altitude() < 70500: pass # Plan circularization burn (using vis-viva equation) print('Planning circularization burn') mu = vessel.orbit.body.gravitational_parameter r = vessel.orbit.apoapsis a1 = vessel.orbit.semi_major_axis a2 = r v1 = math.sqrt(mu*((2./r)-(1./a1))) v2 = math.sqrt(mu*((2./r)-(1./a2))) delta_v = v2 - v1 node = vessel.control.add_node( ut() + vessel.orbit.time_to_apoapsis, prograde=delta_v) # Calculate burn time (using rocket equation) F = vessel.available_thrust Isp = vessel.specific_impulse * 9.82 m0 = vessel.mass m1 = m0 / math.exp(delta_v/Isp) flow_rate = F / Isp burn_time = (m0 - m1) / flow_rate # Orientate ship print('Orientating ship for circularization burn') vessel.auto_pilot.reference_frame = node.reference_frame vessel.auto_pilot.target_direction = (0, 1, 0) vessel.auto_pilot.wait() # Wait until burn print('Waiting until circularization burn') burn_ut = ut() + vessel.orbit.time_to_apoapsis - (burn_time/2.) lead_time = 5 conn.space_center.warp_to(burn_ut - lead_time) # Execute burn print('Ready to execute burn') time_to_apoapsis = conn.add_stream(getattr, vessel.orbit, 'time_to_apoapsis') while time_to_apoapsis() - (burn_time/2.) > 0: pass print('Executing burn') vessel.control.throttle = 1.0 time.sleep(burn_time - 0.1) print('Fine tuning') vessel.control.throttle = 0.05 remaining_burn = conn.add_stream(node.remaining_burn_vector, node.reference_frame) while remaining_burn()[1] > 0: pass vessel.control.throttle = 0.0 node.remove() print('Launch complete') ###Output Ready to execute burn Executing burn Fine tuning
action_recognition/kinetics_video_to_frame.ipynb
###Markdown Function to convert Video to Frames ###Code def video_to_frames(video_name,dest_name): vidcap = cv2.VideoCapture(os.path.join(video_name,os.listdir(video_name)[0])) success,image = vidcap.read() count = 0 while success: file_name = "frame" + str(count) + ".jpeg" file_path = os.path.join(dest_name,file_name) cv2.imwrite(file_path,image) success,image = vidcap.read() count += 1 # may change depening upon the number of videos in each folder for a class max_video = 8 ###Output _____no_output_____ ###Markdown Iterate through each directory and do the conversion ###Code source_directory = '/content/drive/MyDrive/kinetics-downloader/validation_vids' dest_directory = '/content/drive/MyDrive/kinetics-downloader/NEW_PROCESSED' for filename in os.listdir(source_directory): video_path = os.path.join(source_directory,filename) if len(os.listdir(video_path))>max_video: class_path = os.path.join(dest_directory,filename) if not os.path.exists(class_path): os.makedirs(class_path) cnt = 1 for mp4 in os.listdir(video_path): frame_path = os.path.join(class_path,str(cnt)) if not os.path.exists(frame_path): os.makedirs(frame_path) video_to_frames(video_path,frame_path) cnt+=1 len_list = [] name_list = [] for filename in os.listdir(source_directory): video_path = os.path.join(source_directory,filename) if len(os.listdir(video_path))>=8: len_list.append(len(os.listdir(video_path))) name_list.append(filename) len(len_list) name_list import torch len_list = torch.tensor(len_list) torch.mode(len_list) ###Output /usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:2: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
LeNet-MNIST.ipynb
###Markdown Image Classification using LeNet CNN MNIST Dataset - Handwritten Digits (0-9)![MNIST Sample Data](images/MNISTExamples.png) ###Code # import tensorflow module. Check API version. import tensorflow as tf import numpy as np print (tf.__version__) # required for TF to run within docker using GPU (ignore otherwise) gpu = tf.config.experimental.list_physical_devices('GPU') tf.config.experimental.set_memory_growth(gpu[0], True) ###Output _____no_output_____ ###Markdown Load the data ###Code # grab the MNIST dataset (may take time the first time) print("[INFO] downloading MNIST...") (trainData, trainLabels), (testData, testLabels) = tf.keras.datasets.mnist.load_data() ###Output _____no_output_____ ###Markdown Prepare the data ###Code # parameters for MNIST data set num_classes = 10 image_width = 28 image_height = 28 image_channels = 1 # shape the input data using "channels last" ordering # num_samples x rows x columns x depth trainData = trainData.reshape( (trainData.shape[0], image_height, image_width, image_channels)) testData = testData.reshape( (testData.shape[0], image_height, image_width, image_channels)) # scale data to the range of [0.0, 1.0] trainData = trainData.astype("float32") / 255.0 testData = testData.astype("float32") / 255.0 # pad the data to 32X32 for use in LeNet5 network trainData = np.pad(trainData, ((0,0),(2,2),(2,2),(0,0)), 'constant') testData = np.pad(testData, ((0,0),(2,2),(2,2),(0,0)), 'constant') # display data dimentions print ("trainData:", trainData.shape) print ("trainLabels:", trainLabels.shape) print ("testData:", testData.shape) print ("testLabels:", testLabels.shape) # parameters for training data set num_classes = 10 image_width = 32 image_height = 32 image_channels = 1 ###Output _____no_output_____ ###Markdown Define Model![LeNet5 Model](images/LeNet5.jpg) ###Code # import the necessary packages from tensorflow.keras import backend from tensorflow.keras import models from tensorflow.keras import layers # define the model as a class class LeNet: # INPUT => CONV => TANH => AVG-POOL => CONV => TANH => AVG-POOL => FC => TANH => FC => TANH => FC => SMAX @staticmethod def init(numChannels, imgRows, imgCols, numClasses, weightsPath=None): # if we are using "channels first", update the input shape if backend.image_data_format() == "channels_first": inputShape = (numChannels, imgRows, imgCols) else: # "channels last" inputShape = (imgRows, imgCols, numChannels) # initialize the model model = models.Sequential() # define the first set of CONV => ACTIVATION => POOL layers model.add(layers.Conv2D(filters=6, kernel_size=(5, 5), strides=(1, 1), padding="valid", activation=tf.nn.tanh, input_shape=inputShape)) model.add(layers.AveragePooling2D(pool_size=(2, 2), strides=(2, 2))) # define the second set of CONV => ACTIVATION => POOL layers model.add(layers.Conv2D(filters=16, kernel_size=(5, 5), strides=(1, 1), padding="valid", activation=tf.nn.tanh)) model.add(layers.AveragePooling2D(pool_size=(2, 2), strides=(2, 2))) # flatten the convolution volume to fully connected layers model.add(layers.Flatten()) # define the first FC => ACTIVATION layers model.add(layers.Dense(units=120, activation=tf.nn.tanh)) # define the second FC => ACTIVATION layers model.add(layers.Dense(units=84, activation=tf.nn.tanh)) # lastly, define the soft-max classifier model.add(layers.Dense(units=numClasses, activation=tf.nn.softmax)) # if a weights path is supplied (inicating that the model was # pre-trained), then load the weights if weightsPath is not None: model.load_weights(weightsPath) # return the constructed network architecture return model ###Output _____no_output_____ ###Markdown Compile Model ###Code # initialize the model print("[INFO] compiling model...") model = LeNet.init(numChannels=image_channels, imgRows=image_height, imgCols=image_width, numClasses=num_classes, weightsPath=None) # compile the model model.compile(optimizer=tf.keras.optimizers.SGD(lr=0.01), # Stochastic Gradient Descent loss="sparse_categorical_crossentropy", metrics=["accuracy"]) # print model summary model.summary() ###Output _____no_output_____ ###Markdown Train Model ###Code # define callback function for training termination criteria #accuracy_cutoff = 0.99 class myCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs=None): if(logs.get('accuracy') > 0.99): print("\nReached 99% accuracy so cancelling training!") self.model.stop_training = True # initialize training config batch_size = 128 epochs = 100 # run training print("[INFO] training...") history = model.fit(x=trainData, y=trainLabels, validation_data=(testData, testLabels), batch_size=batch_size, epochs=epochs, verbose=1, callbacks=[myCallback()]) ###Output _____no_output_____ ###Markdown Evaluate Training Performance Expected Output![accplot](images/accuracyLeNetMNIST.png) ![lossplot](images/lossLeNetMNIST.png) ###Code %matplotlib inline import matplotlib.pyplot as plt # retrieve a list of list results on training and test data sets for each training epoch acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) # get number of epochs # plot training and validation accuracy per epoch plt.plot(epochs, acc, label='train accuracy') plt.plot(epochs, val_acc, label='val accuracy') plt.xlabel('epochs') plt.ylabel('accuracy') plt.legend(loc="lower right") plt.title('Training and validation accuracy') plt.figure() # plot training and validation loss per epoch plt.plot(epochs, loss, label='train loss') plt.plot(epochs, val_loss, label='val loss') plt.xlabel('epochs') plt.ylabel('loss') plt.legend(loc="upper right") plt.title('Training and validation loss') # show the accuracy on the testing set print("[INFO] evaluating...") (loss, accuracy) = model.evaluate(testData, testLabels, batch_size=batch_size, verbose=1) print("[INFO] accuracy: {:.2f}%".format(accuracy * 100)) model.save_weights("weights/LeNetMNIST.temp.hdf5", overwrite=True) ###Output _____no_output_____ ###Markdown Evaluate Pre-trained Model ###Code # init model and load the model weights print("[INFO] compiling model...") model = LeNet.init(numChannels=image_channels, imgRows=image_height, imgCols=image_width, numClasses=num_classes, weightsPath="weights/LeNetMNIST.hdf5") # compile the model model.compile(optimizer=tf.keras.optimizers.SGD(lr=0.01), # Stochastic Gradient Descent loss="sparse_categorical_crossentropy", metrics=["accuracy"]) # show the accuracy on the testing set print("[INFO] evaluating...") batch_size = 128 (loss, accuracy) = model.evaluate(testData, testLabels, batch_size=batch_size, verbose=1) print("[INFO] accuracy: {:.2f}%".format(accuracy * 100)) ###Output _____no_output_____ ###Markdown Model Predictions ###Code %matplotlib inline import numpy as np import cv2 import matplotlib.pyplot as plt # set up matplotlib fig, and size it to fit 3x4 pics nrows = 3 ncols = 4 fig = plt.gcf() fig.set_size_inches(ncols*4, nrows*4) # randomly select a few testing digits num_predictions = 12 test_indices = np.random.choice(np.arange(0, len(testLabels)), size=(num_predictions,)) test_images = np.stack(([testData[i] for i in test_indices])) test_labels = np.stack(([testLabels[i] for i in test_indices])) # compute predictions predictions = model.predict(test_images) for i in range(num_predictions): # select the most probable class prediction = np.argmax(predictions[i]) # rescale the test image image = (test_images[i] * 255).astype("uint8") # resize the image from a 28 x 28 image to a 96 x 96 image so we can better see it image = cv2.resize(image, (96, 96), interpolation=cv2.INTER_CUBIC) # convert grayscale image to RGB color image = cv2.merge([image] * 3) # select prediction text color if prediction == test_labels[i]: rgb_color = (0, 255, 0) # green for correct predictions else: rgb_color = (255, 0, 0) # red for wrong predictions # show the image and prediction cv2.putText(image, str(prediction), (0, 18), cv2.FONT_HERSHEY_SIMPLEX, 0.75, rgb_color, 1) # set up subplot; subplot indices start at 1 sp = plt.subplot(nrows, ncols, i + 1, title="label: %s" % test_labels[i]) sp.axis('Off') # don't show axes (or gridlines) plt.imshow(image) # show figure matrix plt.show() ###Output _____no_output_____ ###Markdown MNIST Digits Classification (LeNet) ###Code import torch import numpy as np import torchvision from torch.autograd import Variable from torch.utils.data import DataLoader from torchvision import datasets,transforms import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import copy import time # Check availability of GPU use_gpu = torch.cuda.is_available() if use_gpu: pinMem = True # Flag for pinning GPU memory print('GPU is available!') else: pinMem = False ###Output _____no_output_____ ###Markdown Downloading datset ###Code apply_transform = transforms.Compose([transforms.Resize(32),transforms.ToTensor()]) trainLoader = torch.utils.data.DataLoader(datasets.MNIST('./MNIST/', train=True, download=True, transform = apply_transform), batch_size=64, shuffle=True, num_workers=1, pin_memory=pinMem) testLoader = torch.utils.data.DataLoader(datasets.MNIST('./MNIST/', train=False,transform=apply_transform), batch_size=64, shuffle=True, num_workers=1, pin_memory=pinMem) # Size of train and test datasets print('No. of samples in train set: '+str(len(trainLoader.dataset))) print('No. of samples in test set: '+str(len(testLoader.dataset))) ###Output _____no_output_____ ###Markdown Define network architecture ###Code class LeNet(nn.Module): def __init__(self): super(LeNet, self).__init__() self.conv1 = nn.Conv2d(1, 6, kernel_size=5) self.pool1 = nn.MaxPool2d(kernel_size=2,stride=2) self.conv2 = nn.Conv2d(6, 16, kernel_size=5) self.pool2 = nn.MaxPool2d(kernel_size=2,stride=2) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(400, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = self.pool1(x) x = F.relu(self.conv2_drop(self.conv2(x))) x = self.pool2(x) x = x.view(-1, 400) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = F.relu(self.fc2(x)) x = F.dropout(x, training=self.training) x = self.fc3(x) return F.log_softmax(x,dim=1) ###Output _____no_output_____ ###Markdown Initialize the network ###Code net = LeNet() print(net) if use_gpu: net = net.cuda() net = torch.nn.DataParallel(net) ###Output _____no_output_____ ###Markdown Total number of trainable parameters ###Code totalParams = 0 for params in net.parameters(): print(params.size()) totalParams += np.sum(np.prod(params.size())) print('Total number of parameters: '+str(totalParams)) ###Output _____no_output_____ ###Markdown Define loss function and optimizer ###Code criterion = nn.NLLLoss() # Negative Log-likelihood # optimizer = optim.SGD(net.parameters(), lr=1e-2, momentum=0.9) # Stochastic gradient descent with momentum optimizer = optim.Adam(net.parameters(), lr=1e-4) # Adam ###Output _____no_output_____ ###Markdown Train the network ###Code iterations = 5 trainLoss = [] testAcc = [] start = time.time() for epoch in range(iterations): epochStart = time.time() runningLoss = 0 net.train(True) # For training for data in trainLoader: inputs,labels = data # Wrap them in Variable if use_gpu: inputs, labels = Variable(inputs.cuda()), \ Variable(labels.cuda()) else: inputs, labels = Variable(inputs), Variable(labels) # Initialize gradients to zero optimizer.zero_grad() # Feed-forward input data through the network outputs = net(inputs) # Compute loss/error loss = criterion(outputs, labels) # Backpropagate loss and compute gradients loss.backward() # Update the network parameters optimizer.step() # Accumulate loss per batch runningLoss += loss.item() avgTrainLoss = runningLoss/60000.0 trainLoss.append(avgTrainLoss) # Evaluating performance on test set for each epoch net.train(False) # For testing [Affects batch-norm and dropout layers (if any)] running_correct = 0 for data in testLoader: inputs,labels = data # Wrap them in Variable if use_gpu: inputs = Variable(inputs.cuda()) outputs = net(inputs) _, predicted = torch.max(outputs.data, 1) predicted = predicted.cpu() else: inputs = Variable(inputs) outputs = net(inputs) _, predicted = torch.max(outputs.data, 1) running_correct += (predicted == labels).sum() avgTestAcc = running_correct.numpy()/10000.0 testAcc.append(avgTestAcc) epochEnd = time.time()-epochStart print('Iteration: {:.0f} /{:.0f} ; Training Loss: {:.6f} ; Testing Acc: {:.3f} ; Time consumed: {:.0f}m {:.0f}s '\ .format(epoch + 1,iterations,avgTrainLoss,avgTestAcc*100,epochEnd//60,epochEnd%60)) end = time.time()-start print('Training completed in {:.0f}m {:.0f}s'.format(end//60,end%60)) ###Output _____no_output_____
Module-9-1-TensorFlow-crash_course/nbs/0.1-download_flowers_data.ipynb
###Markdown Introduction Download and prepare the flowers data set for later use with PyTorch and TensorFlow. Setup ###Code # This is a quick check of whether the notebook is currently running on Google Colaboratory, # as that makes some difference for the code below. if 'google.colab' in str(get_ipython()): print('The notebook is running on Colab. colab=True.') colab=True else: print('The notebook is not running on Colab. colab=False.') colab=False # Set to True if you're using Paperspace Gradient: gradient=False %matplotlib inline import matplotlib.pyplot as plt import os, shutil, random from PIL import Image from pathlib import Path import urllib.request if colab or gradient: !pip install -Uqq fastbook import fastbook fastbook.setup_book() from fastbook import * from fastcore.all import * NB_DIR = Path.cwd() if colab: DATADIR = Path('./gdrive/MyDrive/ColabData') elif gradient: DATADIR = Path('/storage') else: from fastcore.all import * NB_DIR = Path.cwd() DATADIR = Path('/home/alex/data/dat255') # Set this to where you want to store downloaded data DATADIR.mkdir(exist_ok=True) ###Output _____no_output_____ ###Markdown Download and extract ###Code url = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz' if not os.path.isfile(DATADIR/'flower_photos.tgz'): fn, _ = urllib.request.urlretrieve(url, DATADIR/'flower_photos.tgz') shutil.unpack_archive(fn, extract_dir=DATADIR) path = DATADIR/'flower_photos' else: print(f"Already downloaded to {DATADIR}") fn = DATADIR/'flower_photos.tgz' path = DATADIR/'flower_photos' ###Output Already downloaded to /home/alex/data/dat255 ###Markdown Inspect ###Code path.ls() imgs = L(path.glob('*/*.jpg')) imgs ###Output _____no_output_____ ###Markdown Let's display some random images: ###Code for img in random.choices(imgs, k=3): print(img) im = Image.open(img) print(im.size) plt.imshow(im) plt.axis('off') plt.show() ###Output /home/alex/data/dat255/flower_photos/tulips/16303377824_6e9128b4bd.jpg (500, 333) ###Markdown Export path Save the path to a file. To be loaded in later notebooks. ###Code import pickle with open(DATADIR/'path.pkl', 'wb') as f: pickle.dump(path, f) ###Output _____no_output_____
TRends/TReNDS Neuroimaging.ipynb
###Markdown This work uses **Rapids.AI** to leverage GPUs ###Code %%time import sys !cp ../input/rapids/rapids.0.13.0 /opt/conda/envs/rapids.tar.gz !cd /opt/conda/envs/ && tar -xzvf rapids.tar.gz > /dev/null sys.path = ["/opt/conda/envs/rapids/lib/python3.6/site-packages"] + sys.path sys.path = ["/opt/conda/envs/rapids/lib/python3.6"] + sys.path sys.path = ["/opt/conda/envs/rapids/lib"] + sys.path !cp /opt/conda/envs/rapids/lib/libxgboost.so /opt/conda/lib/ import matplotlib.pyplot as plt import seaborn as sns # to_drop_67auc=['IC_20', # 'IC_02', # 'IC_05', # 'IC_16', # 'IC_10', # 'IC_08', # 'CBN(4)_vs_CON(37)', # 'CBN(4)_vs_CON(38)', # 'SCN(99)_vs_SCN(98)', # 'DMN(23)_vs_CON(37)', # 'DMN(40)_vs_CON(48)', # 'DMN(17)_vs_DMN(40)', # 'DMN(17)_vs_CON(88)', # 'DMN(17)_vs_CON(33)', # 'CON(79)_vs_SMN(54)', # 'CON(55)_vs_SCN(45)', # 'CON(88)_vs_SMN(54)', # 'CON(83)_vs_CON(48)', # 'CON(83)_vs_CON(67)', # 'CON(83)_vs_CON(37)', # 'CON(83)_vs_CON(33)', # ] # to_drop=['IC_20', # 'IC_02', # 'IC_05', # 'IC_16', # 'IC_10', # 'IC_08', # 'CBN(4)_vs_CON(37)', # 'CBN(4)_vs_CON(38)', # 'SCN(99)_vs_SCN(98)', # ] # len(to_drop) !pip install fastai2>/dev/null !pip install fast_tabnet>/dev/null from fastai2.basics import * from fastai2.tabular.all import * from fast_tabnet.core import * import numpy as np import pandas as pd import matplotlib.pyplot as plt from tqdm.notebook import tqdm import gc fnc_df = pd.read_csv("../input/trends-assessment-prediction/fnc.csv") loading_df = pd.read_csv("../input/trends-assessment-prediction/loading.csv") labels_df = pd.read_csv("../input/trends-assessment-prediction/train_scores.csv") fnc_features, loading_features = list(fnc_df.columns[1:]), list(loading_df.columns[1:]) df = fnc_df.merge(loading_df, on="Id") labels_df["is_train"] = True df = df.merge(labels_df, on="Id", how="left") targets = ['age','domain1_var1','domain1_var2','domain2_var1', 'domain2_var2'] #imputing missing values in targets from sklearn.impute import KNNImputer imputer = KNNImputer(n_neighbors = 5, weights="distance") df[targets] = pd.DataFrame(imputer.fit_transform(df[targets]), columns = targets) test_df = df[df["is_train"] != True].copy() train_df = df[df["is_train"] == True].copy() train_df = train_df.drop(['is_train'], axis=1) test_df = test_df.drop(targets+['is_train'], axis=1) features=list(set(train_df.columns)-set(targets)-set(['Id'])) #train_df[loading_features]=train_df[loading_features].pow(2) train_df[fnc_features]=train_df[fnc_features].mul(1/600) # train_df[fnc_features]=train_df[fnc_features].pow(2) #test_df[loading_features]=test_df[loading_features].pow(2) test_df[fnc_features]=test_df[fnc_features].mul(1/600) # test_df[fnc_features]=test_df[fnc_features].pow(2) #-------Normalizing------------------------ # from sklearn.preprocessing import StandardScaler # scaler = StandardScaler() # train_df[features] = scaler.fit_transform(train_df[features],train_df[targets]) # test_df[features] = scaler.transform(test_df[features]) #---------------------------------------------------- to_drop=[ 'IC_05', 'IC_16', 'IC_10', ] train_df[to_drop]=train_df[to_drop].pow(2) test_df[to_drop]=test_df[to_drop].pow(2) print(train_df.shape,test_df.shape) print("Train and test dataframes contain Id column!!") # def trends_scorer_multitask_scoring(y_true,y_preds): # ''' # custom scoring function used for evaluation in this competition # ''' # y_true=torch.tensor(y_true,requires_grad=True) # y_preds=torch.tensor(y_preds,requires_grad=True) # inp,targ = flatten_check(y_true,y_preds) # w = torch.tensor([.3, .175, .175, .175, .175],requires_grad=True) # op = torch.mean(torch.matmul(torch.abs(y_true-y_preds),w/torch.mean(y_true,axis=0)),axis=0) # return op def my_metric_gpu(y_true, y_pred): import numpy as np y_true = y_true.cpu().detach().numpy() y_pred = y_pred.cpu().detach().numpy() return np.mean(np.sum(np.abs(y_true - y_pred), axis=0)/np.sum(y_true, axis=0)) def trends_scorer_multitask_scoring_gpu(y_true,y_preds): ''' custom scoring function used for evaluation in this competition ''' import numpy as np y_true = y_true.cpu().detach().numpy() y_preds= y_preds.cpu().detach().numpy() w = np.array([.175, .175]) op = np.mean(np.matmul(np.abs(y_true-y_preds),w/np.mean(y_true,axis=0)),axis=0) return op ###Output _____no_output_____ ###Markdown [Tweedie](https://towardsdatascience.com/insurance-risk-pricing-tweedie-approach-1d71207268fc) ###Code from fastai2.layers import L1LossFlat,MSELossFlat from torch.nn import SmoothL1Loss class SmoothMAELoss(torch.nn.Module): ''' For use with GPU only ''' def __init__(self,l1): super().__init__() self.l1=l1 def forward(self,y, y_hat): loss = (1-self.l1)*SmoothL1Loss()(y, y_hat) + self.l1*L1LossFlat()(y, y_hat) return loss def get_tabnet_data(df,train_val_idx,target_name): targets=['age','domain1_var1','domain1_var2','domain2_var1','domain2_var2'] features=list(set(df.columns)-set(targets)-set(["pred_age","pred_domain1_var1","pred_domain1_var2","pred_domain2_var1","pred_domain2_var2",'IC_20','IC_18','CBN(4)_vs_CON(37)','IC_02'])) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") to = TabularPandas( df=df, procs=[], cat_names=None, cont_names=features, y_names=target_name, y_block=TransformBlock(), splits=train_val_idx, do_setup=True, device=device, inplace=False, reduce_memory=True, ) return to,features,len(target_name) def get_model(emb_szs,dls,n_features,n_labels): model=TabNetModel( emb_szs, n_cont=n_features, out_sz=n_labels, embed_p=0.0, y_range=None, n_d=8, n_a=8, n_steps=2,#2-DO NOT CHANGE gamma=1.15, n_independent=0, n_shared=2, epsilon=1e-15, virtual_batch_size=128, momentum=0.25,#keep it small ) return model ###Output _____no_output_____ ###Markdown [fit_one_policy()--CNNfitting](https://atmamani.github.io/projects/dl/fastai/fastai-1/) [fit_one_policy()](https://iconof.com/1cycle-learning-rate-policy/)Cyclical learning rates, which practically eliminates the need to experimentally find the best values andschedule for the global learning rates. Instead of using a fixed, or a decreasing learning rate, the CLR method allows learning rate to continuously oscillate between reasonable minimum and maximum bounds.If the learning rate (LR) is too small, overfitting can occur. Large learning rates help to regularize the training but if the learning rate is too large, the training will diverge. Hence a grid search of short runs to find learning rates that converge or diverge is possible but there is an easier way. Cyclical learning rates (CLR) and the learning rate range test (LR range test) were first proposed by Smith (2015) and later updated in Smith (2017) as a recipe for choosing the learning rate.To use CLR, one specifies minimum and maximum learning rate boundaries and a stepsizeLR Finder trains the model with exponentially growing learning rates from start_lr to end_lr for num_it and stops in case of divergence (unless stop_div=False) then plots the losses vs the learning rates with a log scale.A good value for the learning rates is then either one tenth of the minimum before the divergence, orwhen the slope is the steepest 0.16355 for n_d=n_a=16 0.16256 for n_d=n_a=8 0.16231 for n_d=n_a=8 gamma=1.15 0.16114 for n_d=n_a=8 gamma=1.15 learn.fit_one_cycle(60,lr_max=0.3,div=25.0)learn.fit_one_cycle(60,lr_max=0.001,div=10.0) ###Code overal_score = 0.0 !rm -rf /kaggle/working/models from sklearn.model_selection import KFold from torch.nn import SmoothL1Loss NUM_FOLDS = 7 def my_metric(y_true, y_pred): return np.mean(np.sum(np.abs(y_true - y_pred), axis=0)/np.sum(y_true, axis=0)) kf = KFold(n_splits=NUM_FOLDS, shuffle=True, random_state=2019) for target,w in tqdm([('age',0.3),('domain2_var1',0.175),('domain2_var2',0.175),('domain1_var1',0.175),('domain1_var2',0.175)]): y_oof = np.zeros(train_df.shape[0]) y_test = np.zeros((test_df.shape[0], NUM_FOLDS)) print('*'*20,target,'*'*20) for i,(train_index, valid_index) in enumerate(kf.split(train_df, train_df)): print('>'*20,'Fold-',i+1) _, val_df = train_df.iloc[train_index], train_df.iloc[valid_index] #get data target_name=[] target_name.append(target) to,features,n_labels = get_tabnet_data(train_df,(list(train_index), list(valid_index)),target_name) dls = to.dataloaders(bs=512, path='/kaggle/working/') emb_szs = get_emb_sz(to) #get model model = get_model(emb_szs,dls,len(features),n_labels) opt_func = partial(Adam,lr=0.01,mom=0.9,sqr_mom=0.99,wd=0.01,eps=1e-5,decouple_wd=True) learn = None learn = Learner(dls, model, loss_func=SmoothL1Loss(), opt_func=opt_func, metrics=my_metric_gpu) learn.fit_one_cycle( 60, lr_max=0.3, div=25.0, div_final=1000000, pct_start=0.3, cbs=[EarlyStoppingCallback(min_delta=0.01,patience=20), SaveModelCallback(fname="model_{}".format(i+1),min_delta=0.01)] ) learn.load("model_{}".format(i+1)) learn.fit_one_cycle( 60, lr_max=0.001, div=10.0, div_final=1000000, pct_start=0.3, cbs=[EarlyStoppingCallback(min_delta=0.005,patience=20), SaveModelCallback(fname="model_{}".format(i+1),min_delta=0.005)] ) print("Best model:",learn.loss) print("len(features):",len(features)) #validation to_val = to.new(val_df[features]) to_val.process() val_dl = dls.valid.new(to_val) val_pred,_ = learn.get_preds(dl=val_dl) #prediction to_tst = to.new(test_df[features]) to_tst.process() tst_dl = dls.valid.new(to_tst) test_pred,_ = learn.get_preds(dl=tst_dl) val_pred=val_pred.reshape(-1,) y_oof[valid_index] = val_pred test_pred=test_pred.reshape(-1,) y_test[:, i] = test_pred train_df["pred_{}".format(target)] = y_oof test_df[target] = y_test.mean(axis=1) score = my_metric(train_df[train_df[target].notnull()][target].values, train_df[train_df[target].notnull()]["pred_{}".format(target)].values) print("="*20,target, np.round(score, 5)) print("-"*100) overal_score += w*score print("Overal score:", np.round(overal_score, 5)) 0.065836*2 output = test_df # sub_df = pd.melt(output[["Id", "age", "domain1_var1", "domain1_var2", "domain2_var1", "domain2_var2"]], id_vars=["Id"], value_name="Predicted") sub_df["Id"] = sub_df["Id"].astype("str") + "_" + sub_df["variable"].astype("str") sub_df = sub_df.drop("variable", axis=1).sort_values("Id") assert sub_df.shape[0] == test_df.shape[0]*5 sub_df.head(10) sub_df.to_csv("submission_tabnet_0.16057.csv", index=False) # !rm -rf /kaggle/working/models # from sklearn.model_selection import KFold # from torch.nn import SmoothL1Loss # NUM_FOLDS = 7 # kf = KFold(n_splits=NUM_FOLDS, shuffle=True, random_state=2019) # all_preds = [] # for i,(train_index, val_index) in enumerate(kf.split(train_df,train_df)): # print('fold-',i+1) # #get data # targets=['age','domain1_var1','domain1_var2','domain2_var1','domain2_var2'] # to,n_features,n_labels = get_tabnet_data(train_df,(list(train_index), list(val_index)),targets) # dls = to.dataloaders(bs=512, path='/kaggle/working/') # emb_szs = get_emb_sz(to) # #get model # model = get_model(emb_szs,dls,n_features,n_labels) # opt_func = partial(Adam,lr=0.01,mom=0.9,sqr_mom=0.99,wd=0.01,eps=1e-5,decouple_wd=True) # learn = Learner(dls, model, loss_func=SmoothMAELoss(l1=0.0), opt_func=opt_func, metrics=trends_scorer_multitask_scoring_gpu) # learn.fit_one_cycle( # 100, # lr_max=0.09, # div=25.0, # div_final=1000000, # pct_start=0.25, # cbs=[EarlyStoppingCallback(min_delta=0.01,patience=50), # SaveModelCallback(fname="model_{}".format(i+1),min_delta=0.01)] # ) # # learn.load("model_{}".format(i+1)) # # learn.fit_one_cycle( # # 50, # # lr_max=0.001, # # div=5.0, # # div_final=1000000, # # pct_start=0.5, # # cbs=[EarlyStoppingCallback(min_delta=0.01,patience=50), # # SaveModelCallback(fname="model_{}".format(i+1),min_delta=0.01)] # # ) # #predicting # learn.load("model_{}".format(i+1)) # print("Best model:",learn.loss) # to_tst = to.new(test_df) # to_tst.process() # tst_dl = dls.valid.new(to_tst) # tst_preds,_ = learn.get_preds(dl=tst_dl) # cb = None # all_preds.append(tst_preds) # # PREDICTING...... # p=sum(all_preds)/NUM_FOLDS # targets=['age','domain1_var1','domain1_var2', 'domain2_var1','domain2_var2'] # res = pd.DataFrame(np.array(p),columns=[targets]) # ids=pd.DataFrame(test_df.Id.values,columns=['Id']) # a=pd.concat([ids,res],axis=1) # b=a.iloc[:,0:6] # b.columns=['Id','age','domain1_var1','domain1_var2', 'domain2_var1','domain2_var2'] # b.head() ###Output _____no_output_____ ###Markdown 0.8.MSE+0.2MAE---->0.159,n_d=n_a=32, and then gradually reducing...val loss=56 gamma=2.15 ###Code # learn.fine_tune( # 100, # base_lr=0.002, # freeze_epochs=1, # pct_start=0.3, # div=5.0, # div_final=100000.0, # ) # from hyperopt import tpe # from hyperopt import STATUS_OK # from hyperopt import Trials # from hyperopt import hp # from hyperopt import fmin # from sklearn.linear_model import LogisticRegression # from sklearn.model_selection import cross_val_score # from sklearn.datasets import load_breast_cancer # import sys # if not sys.warnoptions: # import warnings # warnings.simplefilter("ignore") # N_FOLDS = 10 # MAX_EVALS = 200 # def objective(params, n_folds = N_FOLDS): # model=TabNetModel( # ems[0], # n_cont=1399, # out_sz=5, # embed_p=0.0, # y_range=None, # epsilon=1e-15, # virtual_batch_size=128, # **params # ) # opt_func = partial(Adam, wd=0.01, eps=1e-5) # learn = Learner(data[0], model, loss_func=SmoothMAELoss(l1=0.0), opt_func=opt_func, metrics=[trends_scorer_multitask_scoring_gpu]) # return {'loss':learn.loss,'params': params, 'status': STATUS_OK} # space = { # 'n_d' : hp.choice('n_d', range(2,64,1)), # 'n_a' : hp.choice('n_a', range(2,64,1)), # 'n_steps':hp.choice('n_steps', range(1,10,1)), # 'gamma': hp.uniform('gamma', 1, 5), # 'n_independent':hp.choice('n_independent', range(1,10,1)), # 'n_shared': hp.choice('n_shared', range(1,10,1)), # 'momentum' : hp.uniform('momentum', 0, 1) # } # # Algorithm # tpe_algorithm = tpe.suggest # # Trials object to track progress # bayes_trials = Trials() # # Optimize # best = fmin(fn = objective, space = space, algo = tpe.suggest, max_evals = MAX_EVALS, trials = bayes_trials) # best ###Output _____no_output_____ ###Markdown LGB [LGB optuna integration](https://medium.com/optuna/lightgbm-tuner-new-optuna-integration-for-hyperparameter-optimization-8b7095e99258) ###Code # import optuna.integration.lightgbm as lgb # import numpy as np # import pandas as pd # from sklearn.model_selection import KFold, train_test_split # from tqdm.notebook import tqdm # import gc # fnc_df = pd.read_csv("../input/trends-assessment-prediction/fnc.csv") # loading_df = pd.read_csv("../input/trends-assessment-prediction/loading.csv") # labels_df = pd.read_csv("../input/trends-assessment-prediction/train_scores.csv") # fnc_features, loading_features = list(fnc_df.columns[1:]), list(loading_df.columns[1:]) # df = fnc_df.merge(loading_df, on="Id") # labels_df["is_train"] = True # df = df.merge(labels_df, on="Id", how="left") # target_cols = ['age', 'domain1_var1', 'domain1_var2', 'domain2_var1', 'domain2_var2'] # #imputing missing values in targets # from sklearn.impute import KNNImputer # imputer = KNNImputer(n_neighbors = 5, weights="distance") # df[target_cols] = pd.DataFrame(imputer.fit_transform(df[target_cols]), columns = target_cols) # test_df = df[df["is_train"] != True].copy() # train_df = df[df["is_train"] == True].copy() # #y_train_df = train_df[target_cols] # train_df = train_df.drop(['is_train'], axis=1) # #train_df = train_df.drop(target_cols + ['is_train'], axis=1) # test_df = test_df.drop(target_cols+['is_train'], axis=1) # targets=['age','domain1_var1','domain1_var2', 'domain2_var1','domain2_var2'] # features=list(set(train_df.columns)-set(targets)-set(['Id'])) # train_df[features]=train_df[features].pow(2) # train_df[fnc_features]=train_df[fnc_features].mul(1/100) # train_df[fnc_features]=train_df[fnc_features].pow(2) # test_df[features]=test_df[features].pow(2) # test_df[fnc_features]=test_df[fnc_features].mul(1/100) # test_df[fnc_features]=test_df[fnc_features].pow(2) # print(train_df.shape,test_df.shape) # print("Train and test dataframes contain Id columns,too!!") # from sklearn.preprocessing import StandardScaler # scaler = StandardScaler() # train_df[features] = scaler.fit_transform(train_df[features],train_df[targets]) # test_df[features] = scaler.transform(test_df[features]) # X_train = train_df[features] # X_test = test_df[features] # y_train = train_df[targets] # print(X_train.shape,X_test.shape) # def my_metric(y_pred,train_data): # y_true = train_data.get_label() # print(len(y_true),len(y_pred)) # return np.mean(np.sum(np.abs(y_true - y_pred), axis=0)/np.sum(y_true, axis=0)) # X_tr, X_val, y_tr, y_val = train_test_split(X_train, y_train, test_size=0.2, shuffle=True, random_state=20) # train_data = lgb.Dataset(X_tr, label=y_tr['age']) # val_data = lgb.Dataset(X_val, label=y_val['age']) # params = { # 'objective':'fair', # 'metric':'l1', # 'boosting_type':'gbdt', # 'learning_rate':0.001, # 'tree_learner':'feature_parallel', # 'num_threads':4, # 'seed':0 # } # best_params, tuning_history = dict(), list() # model = lgb.train(params, # train_data, # num_boost_round=1000, # early_stopping_rounds=20, # valid_sets=[train_data,val_data], # verbose_eval=100, # learning_rates=lambda it: 0.01 * (0.8 ** it), # best_params=best_params, # tuning_history=tuning_history) # print("Best Params", best_params) ###Output _____no_output_____ ###Markdown For age--'objective': 'gamma' {'lambda_l1': 0.029688407904725312, 'lambda_l2': 4.927181117399353e-05, 'num_leaves': 101, 'feature_fraction': 0.9840000000000001, 'bagging_fraction': 1.0, 'bagging_freq': 0, 'min_child_samples': 20, 'objective': 'gamma', 'metric': 'l1', 'boosting_type': 'gbdt', 'learning_rate': 0.001, 'tree_learner': 'feature_parallel', 'num_threads': 8, 'seed': 0} For Domain1_var1--'objective': 'fair' {'lambda_l1': 0.0, 'lambda_l2': 0.0, 'num_leaves': 251, 'feature_fraction': 0.95, 'bagging_fraction': 0.9765733975192812, 'bagging_freq': 1, 'min_child_samples': 10, 'objective': 'fair', 'metric': 'l1', 'boosting_type': 'gbdt', 'learning_rate': 0.001, 'tree_learner': 'feature_parallel', 'num_threads': 4, 'seed': 0} For Domain1_var2--'objective': 'huber' {'lambda_l1': 7.733581684659643e-05, 'lambda_l2': 1.1878841440097718, 'num_leaves': 31, 'feature_fraction': 1.0, 'bagging_fraction': 1.0, 'bagging_freq': 0, 'min_child_samples': 25, 'objective': 'huber', 'metric': 'l1', 'boosting_type': 'gbdt', 'learning_rate': 0.01, 'tree_learner': 'feature_parallel', 'num_threads': 4, 'seed': 0} For Domain2_var1--'objective': 'huber' {'lambda_l1': 0.041395115988296434, 'lambda_l2': 0.00011959715500563623, 'num_leaves': 105, 'feature_fraction': 0.6, 'bagging_fraction': 0.5439884362351342, 'bagging_freq': 4, 'min_child_samples': 10, 'objective': 'huber', 'metric': 'l1', 'boosting_type': 'gbdt', 'learning_rate': 0.01, 'max_depth': -1, 'tree_learner': 'feature_parallel', 'num_threads': 8, 'seed': 0} For Domain2_var1--'objective': 'gamma' {'lambda_l1': 0.0, 'lambda_l2': 0.0, 'num_leaves': 53, 'feature_fraction': 0.584, 'bagging_fraction': 1.0, 'bagging_freq': 0, 'min_child_samples': 20, 'objective': 'gamma', 'metric': 'l1', 'boosting_type': 'gbdt', 'learning_rate': 0.001, 'tree_learner': 'feature_parallel', 'num_threads': 4, 'seed': 0} For Domain2_var2--'objective': 'huber' {'lambda_l1': 9.606755708273219e-05, 'lambda_l2': 0.17107930638380894, 'num_leaves': 59, 'feature_fraction': 1.0, 'bagging_fraction': 1.0, 'bagging_freq': 0, 'min_child_samples': 20, 'objective': 'huber', 'metric': 'l1', 'boosting_type': 'gbdt', 'learning_rate': 0.01, 'max_depth': -1, 'tree_learner': 'feature_parallel', 'num_threads': 8, 'seed': 0} [why tree_learner=featureparallel](https://lightgbm.readthedocs.io/en/latest/Parallel-Learning-Guide.html) ###Code # params={ # 'age':{'lambda_l1': 0.029688407904725312, # 'lambda_l2': 4.927181117399353e-03, # 'num_leaves': 101, # 'feature_fraction': 0.90, # 'bagging_fraction': 1.0, # 'bagging_freq': 0, # 'min_child_samples': 20, # 'objective': 'fair', # 'metric': 'l1', # 'boosting_type': 'gbdt', # 'learning_rate': 0.001, # 'tree_learner': 'feature_parallel', # 'num_threads': 4, # 'seed': 0} # , # 'domain1_var1':{'lambda_l1': 0.0, # 'lambda_l2': 0.0, # 'num_leaves': 200, # 'feature_fraction': 0.95, # 'bagging_fraction': 0.9765733975192812, # 'bagging_freq': 1, # 'min_child_samples': 10, # 'objective': 'fair', # 'metric': 'l1', # 'boosting_type': 'gbdt', # 'learning_rate': 0.001, # 'tree_learner': 'feature_parallel', # 'num_threads': 4, # 'seed': 0} # , # 'domain1_var2':{'lambda_l1': 7.733581684659643e-05, # 'lambda_l2': 1.1878841440097718, # 'num_leaves': 31, # 'feature_fraction': 1.0, # 'bagging_fraction': 1.0, # 'bagging_freq': 0, # 'min_child_samples': 25, # 'objective': 'huber', # 'metric': 'l1', # 'boosting_type': 'gbdt', # 'learning_rate': 0.01, # 'tree_learner': 'feature_parallel', # 'num_threads': 4, # 'seed': 0} # , # 'domain2_var1':{'lambda_l1': 0.041395115988296434, # 'lambda_l2': 0.00011959715500563623, # 'num_leaves': 105, # 'feature_fraction': 0.6, # 'bagging_fraction': 0.5439884362351342, # 'bagging_freq': 4, # 'min_child_samples': 10, # 'objective': 'huber', # 'metric': 'l1', # 'boosting_type': 'gbdt', # 'learning_rate': 0.01, # 'max_depth': -1, # 'tree_learner': 'feature_parallel', # 'num_threads': 8, # 'seed': 0} # , # 'domain2_var2':{'lambda_l1': 9.606755708273219e-05, # 'lambda_l2': 0.17107930638380894, # 'num_leaves': 59, # 'feature_fraction': 1.0, # 'bagging_fraction': 1.0, # 'bagging_freq': 0, # 'min_child_samples': 20, # 'objective': 'huber', # 'metric': 'l1', # 'boosting_type': 'gbdt', # 'learning_rate': 0.01, # 'max_depth': -1, # 'tree_learner': 'feature_parallel', # 'num_threads': 8, # 'seed': 0} # } # # LightGBM--Cross Validation implementation # from sklearn.model_selection import KFold # import numpy as np # import pandas as pd # import matplotlib.pyplot as plt # from tqdm.notebook import tqdm # import gc # import lightgbm as lgb # fnc_df = pd.read_csv("../input/trends-assessment-prediction/fnc.csv") # loading_df = pd.read_csv("../input/trends-assessment-prediction/loading.csv") # labels_df = pd.read_csv("../input/trends-assessment-prediction/train_scores.csv") # fnc_features, loading_features = list(fnc_df.columns[1:]), list(loading_df.columns[1:]) # df = fnc_df.merge(loading_df, on="Id") # labels_df["is_train"] = True # df = df.merge(labels_df, on="Id", how="left") # target_cols = ['age', 'domain1_var1', 'domain1_var2', 'domain2_var1', 'domain2_var2'] # #imputing missing values in targets # from sklearn.impute import KNNImputer # imputer = KNNImputer(n_neighbors = 5, weights="distance") # df[target_cols] = pd.DataFrame(imputer.fit_transform(df[target_cols]), columns = target_cols) # test_df = df[df["is_train"] != True].copy() # train_df = df[df["is_train"] == True].copy() # train_df = train_df.drop(['is_train'], axis=1) # test_df = test_df.drop(target_cols+['is_train'], axis=1) # targets=['age','domain1_var1','domain1_var2', 'domain2_var1','domain2_var2'] # features=list(set(train_df.columns)-set(targets)-set(['Id'])) # train_df[features]=train_df[features].pow(2) # train_df[fnc_features]=train_df[fnc_features].mul(1/500) # train_df[fnc_features]=train_df[fnc_features].pow(2) # test_df[features]=test_df[features].pow(2) # test_df[fnc_features]=test_df[fnc_features].mul(1/500) # test_df[fnc_features]=test_df[fnc_features].pow(2) # #-------Normalizing------------------------ # from sklearn.preprocessing import StandardScaler # scaler = StandardScaler() # train_df[features] = scaler.fit_transform(train_df[features],train_df[targets]) # test_df[features] = scaler.transform(test_df[features]) # #---------------------------------------------------- # print(train_df.shape,test_df.shape) # print("Train and test dataframes contain Id column!!") # def my_metric(y_true, y_pred): # return np.mean(np.sum(np.abs(y_true - y_pred), axis=0)/np.sum(y_true, axis=0)) # NFOLDS = 5 # from sklearn.model_selection import KFold # kf = KFold(n_splits=NFOLDS, shuffle=True, random_state=0) # targets=['age','domain2_var1','domain2_var2', 'domain1_var1','domain1_var2'] # features=list(set(train_df.columns)-set(targets)-set(['Id'])) # overal_score = 0.0 # for target,w in tqdm([('age',0.3),('domain1_var1',0.175),('domain1_var2',0.175),('domain2_var1',0.175),('domain2_var2',0.175)]): # y_oof = np.zeros(train_df.shape[0]) # y_test = np.zeros((test_df.shape[0], NFOLDS)) # print('*'*20,target,'*'*20) # for i,(train_index, valid_index) in enumerate(kf.split(train_df, train_df)): # print('>'*20,'Fold-',i+1) # train,val = train_df.iloc[train_index],train_df.iloc[valid_index] # X_train = train[features] # y_train = train[target] # X_val = val[features] # y_val = val[target] # train_data = lgb.Dataset(X_train, label=y_train) # val_data = lgb.Dataset(X_val, label=y_val) # #create model # model = lgb.train(params[target], # train_data, # num_boost_round=10000, # early_stopping_rounds=20, # valid_sets=[train_data,val_data], # learning_rates=lambda it: 0.01 * (0.8 ** it), # verbose_eval=100) # val_pred = model.predict(X_val) # test_pred = model.predict(test_df[features]) # y_oof[valid_index] = val_pred # y_test[:, i] = test_pred # train_df["pred_{}".format(target)] = y_oof # test_df[target] = y_test.mean(axis=1) # score = my_metric(train_df[train_df[target].notnull()][target].values, train_df[train_df[target].notnull()]["pred_{}".format(target)].values) # print("="*20,target, np.round(score, 5)) # print("-"*100) # overal_score += w*score # print("Overal score:", np.round(overal_score, 5)) # X_train = df.drop(columns=['Id','age','domain1_var1','domain1_var2', 'domain2_var1','domain2_var2', 'is_train'],axis=1) # X_test = test_df.drop(columns=['Id','age','domain1_var1','domain1_var2', 'domain2_var1','domain2_var2', 'is_train'],axis=1) # y_train = df[['age','domain1_var1','domain1_var2', 'domain2_var1','domain2_var2']] # %%time # from sklearn.linear_model import MultiTaskElasticNetCV # cv_model = MultiTaskElasticNetCV(l1_ratio=[.1, .5, .7, .9, .95, .99, 1],cv=5,verbose=1,n_jobs=10) # cv_model.fit(train_df[3634:],y_train_df[3634:][['domain2_var1','domain2_var2']]) # #fitting multitask net with hyperparameters obtained from cross validation above # from sklearn.linear_model import MultiTaskElasticNet # model_d2 = MultiTaskElasticNet(alpha=cv_model.alpha_,l1_ratio=cv_model.l1_ratio_,random_state=0) # model_d2.fit(train_df[:3634],y_train_df[:3634][['domain2_var1','domain2_var2']]) ###Output _____no_output_____ ###Markdown clf.alpha_,clf.l1_ratio_ = (3.944212390476824e-05, 0.99) **MultiOutputRegressor** consists of fitting one estimator per target. This is a simple strategy for extending regressors that do not natively support multi-target regression, like SVR(). ###Code # def trends_scorer_multitask_scoring(estimator,X,y_true): # ''' # custom scoring function used for evaluation in this competition # ''' # import numpy as np # y_true = np.array(y_true) # y_preds=estimator.predict(X) # y_preds = np.array(y_preds) # w = np.array([.3, .175, .175, .175, .175]) # op = np.mean(np.matmul(np.abs(y_true-y_preds),w/np.mean(y_true,axis=0)),axis=0) # print(op) # return op # %%time # from sklearn.datasets import make_regression # from sklearn.multioutput import MultiOutputRegressor # from sklearn.ensemble import RandomForestRegressor # from sklearn.ensemble import GradientBoostingRegressor # m = MultiOutputRegressor(RandomForestRegressor(n_estimators=100,random_state=0)) # m.fit(X_train,y_train) # preds = m.predict(X_test) # test_df[['age','domain1_var1','domain1_var2', 'domain2_var1','domain2_var2']] = preds # test_df.drop(columns=["is_train"],inplace=True) # test_df.head() # from statsmodels.tsa.stattools import grangercausalitytests # grangercausalitytests(X_train, maxlag=20, verbose=False) # #### Matrix factorisation using SVD # X_train = np.asarray(X_train)#.to_gpu_matrix(),dtype=np.float32) # mean = np.mean(X_train, axis = 1) # sd = np.std(X_train, axis = 1) # X_train_norm = (X_train - mean.reshape(-1, 1))/sd.reshape(-1, 1) # from scipy.sparse.linalg import svds # latent_factors = 50 # U, sigma, V_T = svds(X_train_norm, k = latent_factors) # sigma = np.diag(sigma) # U.shape,sigma.shape,V_T.shape #-------------------------------------------------------------------------------------------------------- # from sklearn.decomposition import FactorAnalysis # transformer = FactorAnalysis(n_components=50, random_state=0) # X_transformed = transformer.fit_transform(X_train) # X_transformed.shape import os import pandas as pd import cudf import gc def get_train_test(fnc_file,loadings_file,lablels_file): ''' function to get training and test data sets Works with Rapids.ai ONLY ''' path = "../input/trends-assessment-prediction/" fnc_df = pd.read_csv(os.path.join(path,fnc_file)) loading_df = pd.read_csv(os.path.join(path,loadings_file)) fnc_features, loading_features = list(fnc_df.columns[1:]), list(loading_df.columns[1:]) df = fnc_df.merge(loading_df, on="Id") labels_df = pd.read_csv(os.path.join(path,lablels_file)) labels_df["is_train"] = True df = df.merge(labels_df, on="Id", how="left") test_df = df[df["is_train"] != True].copy() train_df = df[df["is_train"] == True].copy() train_df = train_df.drop(['is_train'], axis=1) target_cols = ['age', 'domain1_var1', 'domain1_var2', 'domain2_var1', 'domain2_var2'] test_df = test_df.drop(target_cols + ['is_train'], axis=1) features = loading_features + fnc_features #-----------------Normalizing------------------------ from sklearn.preprocessing import StandardScaler scaler = StandardScaler() train_df[features] = scaler.fit_transform(train_df[features],train_df[target_cols]) test_df[features] = scaler.transform(test_df[features]) #---------------------------------------------------- # Giving less importance to FNC features since they are easier to overfit due to high dimensionality. train_df[fnc_features] = train_df[fnc_features].mul(1/800) test_df[fnc_features] = test_df[fnc_features].mul(1/800) #imputing missing values in targets from sklearn.impute import KNNImputer imputer = KNNImputer(n_neighbors = 5, weights="distance") train_df = cudf.from_pandas(pd.DataFrame(imputer.fit_transform(train_df), columns = list(train_df.columns))) test_df = cudf.from_pandas(test_df)#necessary for casting to gpu matrix del df gc.collect() return train_df,test_df,features,target_cols import numpy as np from cuml import SVR from cuml import RandomForestRegressor from cuml import NearestNeighbors,KMeans,UMAP,Ridge,ElasticNet import cupy as cp from sklearn.model_selection import KFold def my_metric(y_true, y_pred): return np.mean(np.sum(np.abs(y_true - y_pred), axis=0)/np.sum(y_true, axis=0)) def cv_train_predict(df,test_df,features): ''' training with k-fold cross-validation ''' weights={}#Weights & other hyperparameters #[target,score_weight,SVR_penalty(C),ElasticNet_l1ratio,Blend_ElNet_weight,Blend_RandForest_weight] weights['age'] =["age", 0.3, 40, 0.8, 0.5, 0.3] weights['domain1_var1']=["domain1_var1",0.175, 8, 0.5, 0.6, 0.4] weights['domain1_var2']=["domain1_var2",0.175, 8, 0.5, 0.6, 0.4] weights['domain2_var1']=["domain2_var1",0.175, 10, 0.8, 0.8, 0.5] weights['domain2_var2']=["domain2_var2",0.175, 10, 0.5, 0.6, 0.5] NUM_FOLDS = 7 kf = KFold(n_splits=NUM_FOLDS, shuffle=True, random_state=0) overal_score = 0 for target,w,c,l1_ratio,el,rf in [weights['age'], weights['domain1_var1'], weights['domain1_var2'],weights['domain2_var1'],weights['domain2_var2']]: y_oof = np.zeros(df.shape[0]) y_test = np.zeros((test_df.shape[0], NUM_FOLDS)) for f, (train_ind, val_ind) in enumerate(kf.split(df,df)): train_df, val_df = df.iloc[train_ind], df.iloc[val_ind] #-------training,val,test data preparation for RandomForestRegressor since it operates on float32 X_train = np.array(train_df[features].to_gpu_matrix(),dtype=np.float32) y_train = np.array(train_df[[target]].to_gpu_matrix(),dtype=np.float32) X_val = np.array(val_df[features].to_gpu_matrix(),dtype=np.float32) y_val = np.array(val_df[[target]].to_gpu_matrix(),dtype=np.float32) X_test = np.array(test_df[features].to_gpu_matrix(),dtype=np.float32) #--------------------------------------------------------------------------------------- model = RandomForestRegressor(n_estimators=200,split_criterion=2,accuracy_metric=my_metric,bootstrap=True,seed=0) model.fit(X_train,y_train) model_1 = SVR(C=c, cache_size=3000.0) model_1.fit(train_df[features].values, train_df[target].values) model_2 = ElasticNet(alpha = 1,l1_ratio=l1_ratio) model_2.fit(train_df[features].values, train_df[target].values) val_pred_rf=model.predict(X_val) val_pred_1 = model_1.predict(val_df[features]) val_pred_2 = model_2.predict(val_df[features]) test_pred_rf=model.predict(X_test) test_pred_1 = model_1.predict(test_df[features]) test_pred_2 = model_2.predict(test_df[features]) #pred = Blended prediction(RandomForest + Blended prediction(ElasticNet & SVR)) val_pred = rf*val_pred_rf + cp.asnumpy((1-rf)*((1-el)*val_pred_1+el*val_pred_2)) #val_pred = cp.asnumpy(val_pred.values.flatten()) test_pred = rf*test_pred_rf + cp.asnumpy((1-rf)*((1-el)*test_pred_1+el*test_pred_2)) #test_pred = cp.asnumpy(test_pred.values.flatten()) y_oof[val_ind] = val_pred y_test[:, f] = test_pred df["pred_{}".format(target)] = y_oof test_df[target] = y_test.mean(axis=1) score = my_metric(df[df[target].notnull()][target].values, df[df[target].notnull()]["pred_{}".format(target)].values) print(target, np.round(score, 5)) print() overal_score += w*score print("Overal score:", np.round(overal_score, 5)) %%time df,test_df,features, targets = get_train_test("fnc.csv","loading.csv","train_scores.csv") print("training shape={0} | testing shape={1}".format(df.shape, test_df.shape)) print(type(df),type(test_df),'Id' in features,'Id' in df.columns,'Id' in test_df.columns) df[targets].isna().sum() %%time to_drop=['IC_20','IC_02','IC_05','IC_16','IC_10','IC_18'] features = list(set(features)-set(to_drop))#Id is not present in features print("After excluding features and Id, training shape={0} | testing shape={1}".format(df[features].shape, test_df[features].shape)) # cv_train_predict(df,test_df,features) ###Output _____no_output_____ ###Markdown age-0.15866 for el=0.4 c=10, l1_ratio=0.8 age 0.16002 for el=0.6 c=10, l1_ratio=0.8 age 0.16216 for el=0.6 c=100, l1_ratio=0.8 age 0.16444 for el=0.4 c=100, l1_ratio=0.8 ###Code ##### NUM_FOLDS = 7 su = 0 kf = KFold(n_splits=NUM_FOLDS, shuffle=True, random_state=0) for f,(train_ind, val_ind) in enumerate(kf.split(df,df)): model = SVR(C=12, kernel='rbf',cache_size=3000.0,epsilon=0.000001) train_df, val_df = df.iloc[train_ind], df.iloc[val_ind] model.fit(train_df[features].values, train_df['age'].values) # X_train = np.array(train_df[features].to_gpu_matrix(),dtype=np.float32) # y_train = np.array(train_df[['domain2_var1']].to_gpu_matrix(),dtype=np.float32) # X_val = np.array(val_df[features].to_gpu_matrix(),dtype=np.float32) # y_val = np.array(val_df[['domain2_var1']].to_gpu_matrix(),dtype=np.float32) # model = RandomForestRegressor(n_estimators=120,max_depth=30,split_criterion=2,max_leaves=100,accuracy_metric=my_metric,bootstrap=True,seed=0) # model.fit(X_train,y_train) m=my_metric(val_df['age'].values,model.predict(val_df[features].values)) print(m) su+=m print("Average: ",su/NUM_FOLDS) SVR? ###Output _____no_output_____ ###Markdown domain2_var1 0.18293 for 7-fold w=0.175, c=5, l1-ratio=0.2, el=0.5, rf=0 0.18301 for 7-fold w=0.175, c=5, l1-ratio=0.2, el=0.7, rf=0 0.18457 for 7-fold w=0.175, c=5, l1-ratio=0.2, el=0 , rf=0 model_1 = SVR(C=c, cache_size=3000.0) model_2 = ElasticNet(alpha = 1,l1_ratio=0.2) [("age", 100, weights['age'], 0.15), ("domain1_var1", 12, weights['domain1_var1'], 0.2), ("domain1_var2", 8, weights['domain1_var2'], 0.2), ("domain2_var1", 10, weights['domain2_var1'], 0.3), ("domain2_var2", 12, weights['domain2_var2'], 0.22)] age 0.14606domain1_var1 0.14598domain1_var2 0.14519domain2_var1 0.18153domain2_var2 0.17543Overal score: 0.15724 LB: 0.1604 ###Code # 510 Ids of site2 are known in test data, there are more...510 are not all of site2 Ids. # training data does not have site2 Ids # site2_df = cudf.read_csv("../input/trends-assessment-prediction/reveal_ID_site2.csv") # testdf_site2 = test_df[test_df['Id'].isin(list(site2_df['Id']))] # testdf_site2.shape # np.array(train_df[features].fillna(0).to_gpu_matrix(),dtype=np.float32) ###Output _____no_output_____ ###Markdown Competition metric:- $\text{score} = \sum_{j} w_j \left( \frac{\sum_i \lvert y_{j,i} - \hat{y}_{j,i} \rvert}{\sum_i \hat{y}_{j,i}} \right)$ Submissions are scored using - feature-weighted, - normalized, - absolute errors. j--->age/domain1_var1... i--->data instance $y_{j,i}$ is ith observation of jth feauture.... weights are [.3, .175, .175, .175, .175] RandomForest split_criterion: 0 for GINI, 1 for ENTROPY, 2 for MSE, or 3 for MAE ###Code # #SCRATCH-PAD # def metric(y_true, y_pred):#-----------------------------C-----H----A---N---G----E----S------ # import numpy as np # return np.mean(np.sum(np.abs(y_true - y_pred), axis=0)/np.sum(y_true, axis=0))#CHANGED y_true--->y_pred # features = loading_features + fnc_features # X = np.array(df[features].to_gpu_matrix(),dtype=np.float32)[:5000] # y = np.array(df[['domain2_var1']].to_gpu_matrix(),dtype=np.float32)[:5000] # #model = RandomForestRegressor(n_estimators=100,split_criterion=3,accuracy_metric=metric,seed=0,bootstrap=True) # model=MBSGDRegressor(loss='squared_loss',penalty='elasticnet',learning_rate='adaptive',n_iter_no_change=5,verbose=True) # model.fit(X,y) # X_test = np.array(df[features].to_gpu_matrix(),dtype=np.float32)[5000:] # print(X_test.shape) ###Output _____no_output_____ ###Markdown Manual tryouts to select hyperparams for RandomForestRegressor... MSE(split_criterion=2) for age:bootstrap=true,accuracy_metric=mse, metric=0.244 | bootstrap=false, metric=0.3185 MAE(split_criterion=3) for age:bootstrap=true,accuracy_metric=mse, metric=0.243 | bootstrap=false, metric=0.3066 MAE(split_criterion=3) for age:bootstrap=true,accuracy_metric=mean_ae, metric=0.244 MAE(split_criterion=3) for age:bootstrap=true,accuracy_metric=custom metric, metric=0.244-------------------------------------------------------------------------------MSE for domain1_var1:bootstrap=true,accuracy_metric=mse, metric=0.1628758 | bootstrap=false, metric=0.2282 MAE for domain1_var1:bootstrap=true,accuracy_metric=mse, metric=0.1616656 | bootstrap=false, metric=0.2212 -------------------------------------------------------------------------------MSE for domain1_var2:bootstrap=true, metric=0.1628758 | bootstrap=false, metric=0.2282 MAE for domain1_var2:bootstrap=true-->accuracy_metric=custom metric, metric=0.153| accuracy_metric="mean_ae", metric=0.152 -------------------------------------------------------------------------------MAE for domain2_var1:bootstrap=true-->accuracy_metric=custom metric, metric=0.18972| accuracy_metric="mean_ae", metric=0.18971 MAE for domain2_var2:bootstrap=true-->accuracy_metric=custom metric, metric=0.18| accuracy_metric="mean_ae", metric=0.18 MSE for domain2_var1,domain2_var2: not done Conclusion:bootstrap=True, accuracy_metric=mean_ae Submitting...... ###Code output = test_df # sub_df = pd.melt(output[["Id", "age", "domain1_var1", "domain1_var2", "domain2_var1", "domain2_var2"]], id_vars=["Id"], value_name="Predicted") sub_df["Id"] = sub_df["Id"].astype("str") + "_" + sub_df["variable"].astype("str") sub_df = sub_df.drop("variable", axis=1).sort_values("Id") assert sub_df.shape[0] == test_df.shape[0]*5 sub_df.head(10) sub_df.to_csv("submission.csv", index=False) ###Output _____no_output_____
docsrc/source/_static/Examples/Visualization/Python/Intro to Graphics.ipynb
###Markdown Intro to GraphicsThis notebook gives an overview on basic plots, charts, and graphs in Python. It requires knowledge of `pandas` `DataFrame`s covered in "Intro to Pandas and Table Visualization.ipynb". Required ImportsWe will need to import `pandas` as usual. But here since we will need to do plotting, there is a specific command we need to run at the beginning of the notebook: `%matplotlib inline`. This basically allows the graphics to be displayed in the notebook. The `matplotlib` part comes because `pandas` uses `matplotlib` under the hood to generate its plots. ###Code import pandas as pd %matplotlib inline ###Output _____no_output_____ ###Markdown Plotting with Pandas Let's quickly set up a `DataFrame` with some data so we can create some plots from it. ###Code df = pd.DataFrame() df['Values'] = [100.12, 110.5, 105.2, 106.7, 110.2, 98.5, 94.2, 91.1, 93.12, 94.02, 92.17] df['t'] = [i + 1 for i in range(11)] df ###Output _____no_output_____ ###Markdown We can immediately call `.plot` on the `DataFrame` and it will try to give us a reasonable plot. ###Code df.plot() ###Output _____no_output_____ ###Markdown Here we can see it gave us the values, but over the index and not over time. It also plotted time separately, when really we want to see the values versus time. By specifying `y` and `x`, we can fix this: ###Code df.plot(y='Values', x='t') ###Output _____no_output_____ ###Markdown Now we can indeed see the values plotted over time. This is a line plot. We could have also created it via `.plot.line()`: ###Code df.plot.line(y='Values', x='t') ###Output _____no_output_____ ###Markdown You can see the available plot types in `pandas` by doing `df.plot.` and then pressing tab. Below is just my version that allows that to stay in the completed notebook: ###Code [attr for attr in dir(df.plot) if not attr.startswith('_')] ###Output _____no_output_____ ###Markdown AreaLike a line graph, but colors area under each line. ###Code df.plot.area(y='Values', x='t') ###Output _____no_output_____ ###Markdown Bar Graphs`bar` for vertical and `barh` for horizontal: ###Code df.plot.bar(y='Values', x='t') df.plot.barh(y='Values', x='t') ###Output _____no_output_____ ###Markdown Box and Whisker PlotGives a good summary info of the distribution of the data. ###Code df.plot.box(y='Values', x='t') ###Output _____no_output_____ ###Markdown Density and HistogramThese both plot the frequency/probability of the various values. The `density` or `kde` plot is basically a smoothed histogram. ###Code df.plot.density(y='Values', x='t') df.plot.hist(y='Values', x='t') ###Output _____no_output_____ ###Markdown Pie ChartsWho doesn't love a good pie chart? ###Code df.plot.pie(y='Values', x='t') ###Output _____no_output_____ ###Markdown Scatter ChartLike a line chart, but not connected. ###Code df.plot.scatter(y='Values', x='t') ###Output _____no_output_____
Tesorflow.ipynb
###Markdown idx 2 numpy script==========Einlesen:Ausgeben: ###Code !pip install numpy !pip install matplotlib !pip install tensorflow import gzip import struct import numpy as np import tensorflow as tf from matplotlib import pyplot as plt bilder_idx = "train-images-idx3-ubyte.gz" bezeichnungen_idx = "train-labels-idx1-ubyte.gz" bild_nr = 4 with gzip.open(bilder_idx) as f: zero, data_type, dims = struct.unpack('>HBB', f.read(4)) shape = tuple(struct.unpack('>I', f.read(4))[0] for d in range(dims)) bilder = np.frombuffer(f.read(), dtype=np.uint8).reshape(shape) with gzip.open(bezeichnungen_idx) as f: zero, data_type, dims = struct.unpack('>HBB', f.read(4)) shape = tuple(struct.unpack('>I', f.read(4))[0] for d in range(dims)) bezeichnungen_int = np.frombuffer(f.read(), dtype=np.uint8).reshape(shape) # Bezeichnungen in one-hot Format umwandeln bezeichnungen = tf.one_hot(bezeichnungen_int, 10) fig = plt.figure() ax = fig.add_subplot(111) ax.imshow(bilder[bild_nr], cmap='gray') ax.set_title("".join(["Number: ", str(bezeichnungen_int[bild_nr])])) ###Output _____no_output_____ ###Markdown Training=======Listing aus iX Artikel ###Code with tf.name_scope('model'): x = tf.placeholder(tf.float32, [None, 784]) W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) y = tf.nn.softmax(tf.matmul(x, W) + b) with tf.name_scope('train'): y_ = tf.placeholder(tf.float32, [None, 10]) cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) with tf.Session() as session: init = tf.global_variables_initializer() session.run(init) for i in range(1000): batch_xs, batch_ys = mnist.train.next_batch(100) session.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) #Test correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print(session.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})) ###Output _____no_output_____ ###Markdown Listing aus Tensorflow Tutorial mnist_softmax.py ###Code # Create the model x = tf.placeholder(tf.float32, [None, 784]) W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) y = tf.matmul(x, W) + b # Define loss and optimizer y_ = tf.placeholder(tf.float32, [None, 10]) # The raw formulation of cross-entropy, # # tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)), # reduction_indices=[1])) # # can be numerically unstable. # # So here we use tf.nn.softmax_cross_entropy_with_logits on the raw # outputs of 'y', and then average across the batch. cross_entropy = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) sess = tf.InteractiveSession() tf.global_variables_initializer().run() # Train for _ in range(1000): batch_xs, batch_ys = mnist.train.next_batch(100) sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) # Test trained model correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})) ###Output _____no_output_____
day5_assignment.ipynb
###Markdown Write a program to identify sub list [1,1,5] is there in the given list in the same order, if yes print“it’s a Match” if no then print “it’s Gone” in function.Example -Listy =[1,5,6,4,1,2,3,5] - it’s a MatchListy = [1,5,6,5,1,2,3.6] - it’s Gone ###Code length = int(input("Enter the length of list ")) test=[1,1,5] lst=[] for i in range (0,length): inp = int(input("Enter the number ")) lst.append(inp) j=0 for i in range (0,length): if(lst[i] == test[j]): j += 1 i += 1 else: i += 1 if(j == 3): print ("It's a match") else: print ("It's Gone") ###Output Enter the length of list 5 Enter the number 2 Enter the number 3 Enter the number 1 Enter the number 1 Enter the number 5 It's a match ###Markdown Make a Function for prime numbers and use Filter to filter out all the prime numbers from 1-2500 ###Code def isPrime(x): for n in range(2,x): if x%n==0: return False else: return True fltrObj=filter(isPrime, range(2500)) print ('Prime numbers between 1-10:', list(fltrObj)) ###Output Prime numbers between 1-10: [3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 137, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 175, 177, 179, 181, 183, 185, 187, 189, 191, 193, 195, 197, 199, 201, 203, 205, 207, 209, 211, 213, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, 255, 257, 259, 261, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 287, 289, 291, 293, 295, 297, 299, 301, 303, 305, 307, 309, 311, 313, 315, 317, 319, 321, 323, 325, 327, 329, 331, 333, 335, 337, 339, 341, 343, 345, 347, 349, 351, 353, 355, 357, 359, 361, 363, 365, 367, 369, 371, 373, 375, 377, 379, 381, 383, 385, 387, 389, 391, 393, 395, 397, 399, 401, 403, 405, 407, 409, 411, 413, 415, 417, 419, 421, 423, 425, 427, 429, 431, 433, 435, 437, 439, 441, 443, 445, 447, 449, 451, 453, 455, 457, 459, 461, 463, 465, 467, 469, 471, 473, 475, 477, 479, 481, 483, 485, 487, 489, 491, 493, 495, 497, 499, 501, 503, 505, 507, 509, 511, 513, 515, 517, 519, 521, 523, 525, 527, 529, 531, 533, 535, 537, 539, 541, 543, 545, 547, 549, 551, 553, 555, 557, 559, 561, 563, 565, 567, 569, 571, 573, 575, 577, 579, 581, 583, 585, 587, 589, 591, 593, 595, 597, 599, 601, 603, 605, 607, 609, 611, 613, 615, 617, 619, 621, 623, 625, 627, 629, 631, 633, 635, 637, 639, 641, 643, 645, 647, 649, 651, 653, 655, 657, 659, 661, 663, 665, 667, 669, 671, 673, 675, 677, 679, 681, 683, 685, 687, 689, 691, 693, 695, 697, 699, 701, 703, 705, 707, 709, 711, 713, 715, 717, 719, 721, 723, 725, 727, 729, 731, 733, 735, 737, 739, 741, 743, 745, 747, 749, 751, 753, 755, 757, 759, 761, 763, 765, 767, 769, 771, 773, 775, 777, 779, 781, 783, 785, 787, 789, 791, 793, 795, 797, 799, 801, 803, 805, 807, 809, 811, 813, 815, 817, 819, 821, 823, 825, 827, 829, 831, 833, 835, 837, 839, 841, 843, 845, 847, 849, 851, 853, 855, 857, 859, 861, 863, 865, 867, 869, 871, 873, 875, 877, 879, 881, 883, 885, 887, 889, 891, 893, 895, 897, 899, 901, 903, 905, 907, 909, 911, 913, 915, 917, 919, 921, 923, 925, 927, 929, 931, 933, 935, 937, 939, 941, 943, 945, 947, 949, 951, 953, 955, 957, 959, 961, 963, 965, 967, 969, 971, 973, 975, 977, 979, 981, 983, 985, 987, 989, 991, 993, 995, 997, 999, 1001, 1003, 1005, 1007, 1009, 1011, 1013, 1015, 1017, 1019, 1021, 1023, 1025, 1027, 1029, 1031, 1033, 1035, 1037, 1039, 1041, 1043, 1045, 1047, 1049, 1051, 1053, 1055, 1057, 1059, 1061, 1063, 1065, 1067, 1069, 1071, 1073, 1075, 1077, 1079, 1081, 1083, 1085, 1087, 1089, 1091, 1093, 1095, 1097, 1099, 1101, 1103, 1105, 1107, 1109, 1111, 1113, 1115, 1117, 1119, 1121, 1123, 1125, 1127, 1129, 1131, 1133, 1135, 1137, 1139, 1141, 1143, 1145, 1147, 1149, 1151, 1153, 1155, 1157, 1159, 1161, 1163, 1165, 1167, 1169, 1171, 1173, 1175, 1177, 1179, 1181, 1183, 1185, 1187, 1189, 1191, 1193, 1195, 1197, 1199, 1201, 1203, 1205, 1207, 1209, 1211, 1213, 1215, 1217, 1219, 1221, 1223, 1225, 1227, 1229, 1231, 1233, 1235, 1237, 1239, 1241, 1243, 1245, 1247, 1249, 1251, 1253, 1255, 1257, 1259, 1261, 1263, 1265, 1267, 1269, 1271, 1273, 1275, 1277, 1279, 1281, 1283, 1285, 1287, 1289, 1291, 1293, 1295, 1297, 1299, 1301, 1303, 1305, 1307, 1309, 1311, 1313, 1315, 1317, 1319, 1321, 1323, 1325, 1327, 1329, 1331, 1333, 1335, 1337, 1339, 1341, 1343, 1345, 1347, 1349, 1351, 1353, 1355, 1357, 1359, 1361, 1363, 1365, 1367, 1369, 1371, 1373, 1375, 1377, 1379, 1381, 1383, 1385, 1387, 1389, 1391, 1393, 1395, 1397, 1399, 1401, 1403, 1405, 1407, 1409, 1411, 1413, 1415, 1417, 1419, 1421, 1423, 1425, 1427, 1429, 1431, 1433, 1435, 1437, 1439, 1441, 1443, 1445, 1447, 1449, 1451, 1453, 1455, 1457, 1459, 1461, 1463, 1465, 1467, 1469, 1471, 1473, 1475, 1477, 1479, 1481, 1483, 1485, 1487, 1489, 1491, 1493, 1495, 1497, 1499, 1501, 1503, 1505, 1507, 1509, 1511, 1513, 1515, 1517, 1519, 1521, 1523, 1525, 1527, 1529, 1531, 1533, 1535, 1537, 1539, 1541, 1543, 1545, 1547, 1549, 1551, 1553, 1555, 1557, 1559, 1561, 1563, 1565, 1567, 1569, 1571, 1573, 1575, 1577, 1579, 1581, 1583, 1585, 1587, 1589, 1591, 1593, 1595, 1597, 1599, 1601, 1603, 1605, 1607, 1609, 1611, 1613, 1615, 1617, 1619, 1621, 1623, 1625, 1627, 1629, 1631, 1633, 1635, 1637, 1639, 1641, 1643, 1645, 1647, 1649, 1651, 1653, 1655, 1657, 1659, 1661, 1663, 1665, 1667, 1669, 1671, 1673, 1675, 1677, 1679, 1681, 1683, 1685, 1687, 1689, 1691, 1693, 1695, 1697, 1699, 1701, 1703, 1705, 1707, 1709, 1711, 1713, 1715, 1717, 1719, 1721, 1723, 1725, 1727, 1729, 1731, 1733, 1735, 1737, 1739, 1741, 1743, 1745, 1747, 1749, 1751, 1753, 1755, 1757, 1759, 1761, 1763, 1765, 1767, 1769, 1771, 1773, 1775, 1777, 1779, 1781, 1783, 1785, 1787, 1789, 1791, 1793, 1795, 1797, 1799, 1801, 1803, 1805, 1807, 1809, 1811, 1813, 1815, 1817, 1819, 1821, 1823, 1825, 1827, 1829, 1831, 1833, 1835, 1837, 1839, 1841, 1843, 1845, 1847, 1849, 1851, 1853, 1855, 1857, 1859, 1861, 1863, 1865, 1867, 1869, 1871, 1873, 1875, 1877, 1879, 1881, 1883, 1885, 1887, 1889, 1891, 1893, 1895, 1897, 1899, 1901, 1903, 1905, 1907, 1909, 1911, 1913, 1915, 1917, 1919, 1921, 1923, 1925, 1927, 1929, 1931, 1933, 1935, 1937, 1939, 1941, 1943, 1945, 1947, 1949, 1951, 1953, 1955, 1957, 1959, 1961, 1963, 1965, 1967, 1969, 1971, 1973, 1975, 1977, 1979, 1981, 1983, 1985, 1987, 1989, 1991, 1993, 1995, 1997, 1999, 2001, 2003, 2005, 2007, 2009, 2011, 2013, 2015, 2017, 2019, 2021, 2023, 2025, 2027, 2029, 2031, 2033, 2035, 2037, 2039, 2041, 2043, 2045, 2047, 2049, 2051, 2053, 2055, 2057, 2059, 2061, 2063, 2065, 2067, 2069, 2071, 2073, 2075, 2077, 2079, 2081, 2083, 2085, 2087, 2089, 2091, 2093, 2095, 2097, 2099, 2101, 2103, 2105, 2107, 2109, 2111, 2113, 2115, 2117, 2119, 2121, 2123, 2125, 2127, 2129, 2131, 2133, 2135, 2137, 2139, 2141, 2143, 2145, 2147, 2149, 2151, 2153, 2155, 2157, 2159, 2161, 2163, 2165, 2167, 2169, 2171, 2173, 2175, 2177, 2179, 2181, 2183, 2185, 2187, 2189, 2191, 2193, 2195, 2197, 2199, 2201, 2203, 2205, 2207, 2209, 2211, 2213, 2215, 2217, 2219, 2221, 2223, 2225, 2227, 2229, 2231, 2233, 2235, 2237, 2239, 2241, 2243, 2245, 2247, 2249, 2251, 2253, 2255, 2257, 2259, 2261, 2263, 2265, 2267, 2269, 2271, 2273, 2275, 2277, 2279, 2281, 2283, 2285, 2287, 2289, 2291, 2293, 2295, 2297, 2299, 2301, 2303, 2305, 2307, 2309, 2311, 2313, 2315, 2317, 2319, 2321, 2323, 2325, 2327, 2329, 2331, 2333, 2335, 2337, 2339, 2341, 2343, 2345, 2347, 2349, 2351, 2353, 2355, 2357, 2359, 2361, 2363, 2365, 2367, 2369, 2371, 2373, 2375, 2377, 2379, 2381, 2383, 2385, 2387, 2389, 2391, 2393, 2395, 2397, 2399, 2401, 2403, 2405, 2407, 2409, 2411, 2413, 2415, 2417, 2419, 2421, 2423, 2425, 2427, 2429, 2431, 2433, 2435, 2437, 2439, 2441, 2443, 2445, 2447, 2449, 2451, 2453, 2455, 2457, 2459, 2461, 2463, 2465, 2467, 2469, 2471, 2473, 2475, 2477, 2479, 2481, 2483, 2485, 2487, 2489, 2491, 2493, 2495, 2497, 2499] ###Markdown Make a Lambda function for capitalizing the whole sentence passed using arguments.And map all the sentences in the List, with the lambda functions ###Code full_name = lambda first,middle,last: f'Full name: {first.upper()} {middle.upper()} {last.upper()}' first=input("Enter your first name :") middle=input("Enter your middle name :") last=input("Enter your last name:") parameter = full_name(first,middle,last) print(parameter) filtered_result = map (lambda first,middle,last: f'Full name: {first.upper()} {middle.upper()} {last.upper()}', parameter) print(filtered_result) ###Output Enter your first name :rohan Enter your middle name :kumar Enter your last name:verma Full name: ROHAN KUMAR VERMA <map object at 0x0000026AF25F37F0>
SymPy_Examples.ipynb
###Markdown SymPy Examples ###Code # import symbolic capability to Python from sympy import * # print things all pretty from sympy.abc import * init_printing() # Need to define variables as symbolic for sympy to use them. x, y= symbols("x, y", real = True) diff((3*x**4+5)**3,x) laplace_transform(sinh(b*t),t,s) laplace_transform? diff((5*x**3+2)**4,x) simplify(diff((3*x-2)/(2*x+1),x)) simplify(diff((1-x)/(2*x+1),x)) simplify(diff((0*x-2)/(3-x**2),x)) simplify(diff((-1)/(2*x**2+1)**3,x)) simplify(diff((x+1)**3/(x-1)**3,x)) simplify(diff(sqrt(6*x-2),x)) simplify(diff(2*x*sqrt(3*x-1),x)) diff(sqrt(x)+1/2/sqrt(x),x) simplify(diff(x**(5/2),x)) simplify(diff(sqrt(t+sqrt(t)),t)) simplify(diff((3-2*x+x**3)*(x**4+7),x)) simplify(diff((t**3+1)*(t**2+t+1),t)) (diff((-3*x**5+1)**4*(2-x**3)**5,x)) simplify(diff((t**2-5)/(t**2+5),t)) simplify(diff((x**3-2*x+5)/(x**2+4),x)) simplify(diff((2*x-7)**3/(x-1)**5,x)) simplify(diff(((2*t+1)/(2*t-1))**3,t)) a = simplify(diff((-3)/(3*x**5-2*x+7)**11,x)) a a == 33*(3*x**5-2*x+7)*(15*x**4-2)/(3*x**5-2*x+7)**22 ###Output _____no_output_____ ###Markdown but that was way too easy. We could compare them, just to be sure they are the same ###Code simplify(diff(y,x))==simplify(diff(ln(y),x)*y) ###Output _____no_output_____ ###Markdown Or plot this monstrosity ###Code plot((diff(y,x),(x,0.2,10)), (y, (x,0.5,10))) # To change colors # show = False delays the plot until we can set all of the parameters # legend turns on the legend and uses the labels we have later. p = plot((diff(y,x),(x,0.2,10)), (y, (x,0.5,10)), show = False, legend = True) p[0].line_color = 'blue' p[0].label = '$\\frac{dy}{dx}$' p[1].line_color = 'green' p[1].label = '$y$' p.show() ###Output _____no_output_____ ###Markdown What if we wanted to make a substitution? ###Code y.subs(x,alpha) ###Output _____no_output_____ ###Markdown SymPy Examples ###Code # import symbolic capability to Python from sympy import * # print things all pretty from sympy.abc import * init_printing() # Need to define variables as symbolic for sympy to use them. x, y= symbols("x, y", real = True) diff((3*x**4+5)**3,x) laplace_transform(sinh(b*t),t,s) laplace_transform? diff((5*x**3+2)**4,x) simplify(diff((3*x-2)/(2*x+1),x)) simplify(diff((1-x)/(2*x+1),x)) simplify(diff((0*x-2)/(3-x**2),x)) simplify(diff((-1)/(2*x**2+1)**3,x)) simplify(diff((x+1)**3/(x-1)**3,x)) simplify(diff(sqrt(6*x-2),x)) simplify(diff(2*x*sqrt(3*x-1),x)) diff(sqrt(x)+1/2/sqrt(x),x) simplify(diff(x**(5/2),x)) simplify(diff(sqrt(t+sqrt(t)),t)) simplify(diff((3-2*x+x**3)*(x**4+7),x)) simplify(diff((t**3+1)*(t**2+t+1),t)) (diff((-3*x**5+1)**4*(2-x**3)**5,x)) simplify(diff((t**2-5)/(t**2+5),t)) simplify(diff((x**3-2*x+5)/(x**2+4),x)) simplify(diff((2*x-7)**3/(x-1)**5,x)) simplify(diff(((2*t+1)/(2*t-1))**3,t)) a = simplify(diff((-3)/(3*x**5-2*x+7)**11,x)) a a == 33*(3*x**5-2*x+7)*(15*x**4-2)/(3*x**5-2*x+7)**22 ###Output _____no_output_____ ###Markdown but that was way too easy. We could compare them, just to be sure they are the same ###Code simplify(diff(y,x))==simplify(diff(ln(y),x)*y) ###Output _____no_output_____ ###Markdown Or plot this monstrosity ###Code plot((diff(y,x),(x,0.2,10)), (y, (x,0.5,10))) # To change colors # show = False delays the plot until we can set all of the parameters # legend turns on the legend and uses the labels we have later. p = plot((diff(y,x),(x,0.2,10)), (y, (x,0.5,10)), show = False, legend = True) p[0].line_color = 'blue' p[0].label = '$\\frac{dy}{dx}$' p[1].line_color = 'green' p[1].label = '$y$' p.show() ###Output _____no_output_____ ###Markdown What if we wanted to make a substitution? ###Code y.subs(x,alpha) ###Output _____no_output_____
Deep_Learning/TensorFlow-Hvass-Labs/03B_Layers_API.ipynb
###Markdown TensorFlow Tutorial 03-B Layers APIThese lessons are adapted from [tutorials](https://github.com/Hvass-Labs/TensorFlow-Tutorials) by [Magnus Erik Hvass Pedersen](http://www.hvass-labs.org/) / [GitHub](https://github.com/Hvass-Labs/TensorFlow-Tutorials) / [Videos on YouTube](https://www.youtube.com/playlist?list=PL9Hr9sNUjfsmEu1ZniY0XpHSzl5uihcXZ) which are published under the [MIT License](https://github.com/Hvass-Labs/TensorFlow-Tutorials/blob/master/LICENSE) which allows very broad use for both academic and commercial purposes. IntroductionIt is important to use a builder API when constructing Neural Networks in TensorFlow because it makes it easier to implement and modify the source-code. This also lowers the risk of bugs.Many of the other tutorials used the TensorFlow builder API called PrettyTensor for easy construction of Neural Networks. But there are several other builder APIs available for TensorFlow. PrettyTensor was used in these tutorials, because at the time in mid-2016, PrettyTensor was the most complete and polished builder API available for TensorFlow. But PrettyTensor is only developed by a single person working at Google and although it has some unique and elegant features, it is possible that it may become deprecated in the future.This tutorial is about a small builder API that has recently been added to TensorFlow version 1.1. It is simply called *Layers* or the *Layers API* or by its Python name `tf.layers`. This builder API is automatically installed as part of TensorFlow, so you no longer have to install a separate Python package as was needed with PrettyTensor.This tutorial is very similar to Tutorial 03 on PrettyTensor and shows how to implement the same Convolutional Neural Network using the Layers API. It is recommended that you are familiar with Tutorial 02 on Convolutional Neural Networks. Flowchart The following chart shows roughly how the data flows in the Convolutional Neural Network that is implemented below. See Tutorial 02 for a more detailed description of convolution. ![Flowchart](images/02_network_flowchart.png) The input image is processed in the first convolutional layer using the filter-weights. This results in 16 new images, one for each filter in the convolutional layer. The images are also down-sampled using max-pooling so the image resolution is decreased from 28x28 to 14x14.These 16 smaller images are then processed in the second convolutional layer. We need filter-weights for each of these 16 channels, and we need filter-weights for each output channel of this layer. There are 36 output channels so there are a total of 16 x 36 = 576 filters in the second convolutional layer. The resulting images are also down-sampled using max-pooling to 7x7 pixels.The output of the second convolutional layer is 36 images of 7x7 pixels each. These are then flattened to a single vector of length 7 x 7 x 36 = 1764, which is used as the input to a fully-connected layer with 128 neurons (or elements). This feeds into another fully-connected layer with 10 neurons, one for each of the classes, which is used to determine the class of the image, that is, which number is depicted in the image.The convolutional filters are initially chosen at random, so the classification is done randomly. The error between the predicted and true class of the input image is measured as the so-called cross-entropy. The optimizer then automatically propagates this error back through the Convolutional Network using the chain-rule of differentiation and updates the filter-weights so as to improve the classification error. This is done iteratively thousands of times until the classification error is sufficiently low.These particular filter-weights and intermediate images are the results of one optimization run and may look different if you re-run this Notebook.Note that the computation in TensorFlow is actually done on a batch of images instead of a single image, which makes the computation more efficient. This means the flowchart actually has one more data-dimension when implemented in TensorFlow. Imports ###Code %matplotlib inline import matplotlib.pyplot as plt import tensorflow as tf import numpy as np from sklearn.metrics import confusion_matrix import math ###Output _____no_output_____ ###Markdown This was developed using Python 3.6 (Anaconda) and TensorFlow version: ###Code tf.__version__ ###Output _____no_output_____ ###Markdown Load Data The MNIST data-set is about 12 MB and will be downloaded automatically if it is not located in the given path. ###Code from tensorflow.examples.tutorials.mnist import input_data data = input_data.read_data_sets('data/MNIST/', one_hot=True) ###Output Extracting data/MNIST/train-images-idx3-ubyte.gz Extracting data/MNIST/train-labels-idx1-ubyte.gz Extracting data/MNIST/t10k-images-idx3-ubyte.gz Extracting data/MNIST/t10k-labels-idx1-ubyte.gz ###Markdown The MNIST data-set has now been loaded and consists of 70,000 images and associated labels (i.e. classifications of the images). The data-set is split into 3 mutually exclusive sub-sets. We will only use the training and test-sets in this tutorial. ###Code print("Size of:") print("- Training-set:\t\t{}".format(len(data.train.labels))) print("- Test-set:\t\t{}".format(len(data.test.labels))) print("- Validation-set:\t{}".format(len(data.validation.labels))) ###Output Size of: - Training-set: 55000 - Test-set: 10000 - Validation-set: 5000 ###Markdown The class-labels are One-Hot encoded, which means that each label is a vector with 10 elements, all of which are zero except for one element. The index of this one element is the class-number, that is, the digit shown in the associated image. We also need the class-numbers as integers for the test-set, so we calculate it now. ###Code data.test.cls = np.argmax(data.test.labels, axis=1) ###Output _____no_output_____ ###Markdown Data Dimensions The data dimensions are used in several places in the source-code below. They are defined once so we can use these variables instead of numbers throughout the source-code below. ###Code # We know that MNIST images are 28 pixels in each dimension. img_size = 28 # Images are stored in one-dimensional arrays of this length. img_size_flat = img_size * img_size # Tuple with height and width of images used to reshape arrays. img_shape = (img_size, img_size) # Number of colour channels for the images: 1 channel for gray-scale. num_channels = 1 # Number of classes, one class for each of 10 digits. num_classes = 10 ###Output _____no_output_____ ###Markdown Helper-function for plotting images Function used to plot 9 images in a 3x3 grid, and writing the true and predicted classes below each image. ###Code def plot_images(images, cls_true, cls_pred=None): assert len(images) == len(cls_true) == 9 # Create figure with 3x3 sub-plots. fig, axes = plt.subplots(3, 3) fig.subplots_adjust(hspace=0.3, wspace=0.3) for i, ax in enumerate(axes.flat): # Plot image. ax.imshow(images[i].reshape(img_shape), cmap='binary') # Show true and predicted classes. if cls_pred is None: xlabel = "True: {0}".format(cls_true[i]) else: xlabel = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i]) # Show the classes as the label on the x-axis. ax.set_xlabel(xlabel) # Remove ticks from the plot. ax.set_xticks([]) ax.set_yticks([]) # Ensure the plot is shown correctly with multiple plots # in a single Notebook cell. plt.show() ###Output _____no_output_____ ###Markdown Plot a few images to see if data is correct ###Code # Get the first images from the test-set. images = data.test.images[0:9] # Get the true classes for those images. cls_true = data.test.cls[0:9] # Plot the images and labels using our helper-function above. plot_images(images=images, cls_true=cls_true) ###Output _____no_output_____ ###Markdown TensorFlow GraphThe entire purpose of TensorFlow is to have a so-called computational graph that can be executed much more efficiently than if the same calculations were to be performed directly in Python. TensorFlow can be more efficient than NumPy because TensorFlow knows the entire computation graph that must be executed, while NumPy only knows the computation of a single mathematical operation at a time.TensorFlow can also automatically calculate the gradients that are needed to optimize the variables of the graph so as to make the model perform better. This is because the graph is a combination of simple mathematical expressions so the gradient of the entire graph can be calculated using the chain-rule for derivatives.TensorFlow can also take advantage of multi-core CPUs as well as GPUs - and Google has even built special chips just for TensorFlow which are called TPUs (Tensor Processing Units) and are even faster than GPUs.A TensorFlow graph consists of the following parts which will be detailed below:* Placeholder variables used for inputting data to the graph.* Variables that are going to be optimized so as to make the convolutional network perform better.* The mathematical formulas for the convolutional neural network.* A so-called cost-measure or loss-function that can be used to guide the optimization of the variables.* An optimization method which updates the variables.In addition, the TensorFlow graph may also contain various debugging statements e.g. for logging data to be displayed using TensorBoard, which is not covered in this tutorial. Placeholder variables Placeholder variables serve as the input to the TensorFlow computational graph that we may change each time we execute the graph. We call this feeding the placeholder variables and it is demonstrated further below.First we define the placeholder variable for the input images. This allows us to change the images that are input to the TensorFlow graph. This is a so-called tensor, which just means that it is a multi-dimensional array. The data-type is set to `float32` and the shape is set to `[None, img_size_flat]`, where `None` means that the tensor may hold an arbitrary number of images with each image being a vector of length `img_size_flat`. ###Code x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x') ###Output _____no_output_____ ###Markdown The convolutional layers expect `x` to be encoded as a 4-dim tensor so we have to reshape it so its shape is instead `[num_images, img_height, img_width, num_channels]`. Note that `img_height == img_width == img_size` and `num_images` can be inferred automatically by using -1 for the size of the first dimension. So the reshape operation is: ###Code x_image = tf.reshape(x, [-1, img_size, img_size, num_channels]) ###Output _____no_output_____ ###Markdown Next we have the placeholder variable for the true labels associated with the images that were input in the placeholder variable `x`. The shape of this placeholder variable is `[None, num_classes]` which means it may hold an arbitrary number of labels and each label is a vector of length `num_classes` which is 10 in this case. ###Code y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true') ###Output _____no_output_____ ###Markdown We could also have a placeholder variable for the class-number, but we will instead calculate it using argmax. Note that this is a TensorFlow operator so nothing is calculated at this point. ###Code y_true_cls = tf.argmax(y_true, dimension=1) ###Output _____no_output_____ ###Markdown PrettyTensor ImplementationThis section shows the implementation of a Convolutional Neural Network using PrettyTensor taken from Tutorial 03 so it can be compared to the implementation using the Layers API below. This code has been enclosed in an `if False:` block so it does not run here.The basic idea is to wrap the input tensor `x_image` in a PrettyTensor object which has helper-functions for adding new computational layers so as to create an entire Convolutional Neural Network. This is a fairly simple and elegant syntax. ###Code if False: x_pretty = pt.wrap(x_image) with pt.defaults_scope(activation_fn=tf.nn.relu): y_pred, loss = x_pretty.\ conv2d(kernel=5, depth=16, name='layer_conv1').\ max_pool(kernel=2, stride=2).\ conv2d(kernel=5, depth=36, name='layer_conv2').\ max_pool(kernel=2, stride=2).\ flatten().\ fully_connected(size=128, name='layer_fc1').\ softmax_classifier(num_classes=num_classes, labels=y_true) ###Output _____no_output_____ ###Markdown Layers ImplementationWe now implement the same Convolutional Neural Network using the Layers API that is included in TensorFlow version 1.1. This requires more code than PrettyTensor, although a lot of the following are just comments.We use the `net`-variable to refer to the last layer while building the Neural Network. This makes it easy to add or remove layers in the code if you want to experiment. First we set the `net`-variable to the reshaped input image. ###Code net = x_image ###Output _____no_output_____ ###Markdown The input image is then input to the first convolutional layer, which has 16 filters each of size 5x5 pixels. The activation-function is the Rectified Linear Unit (ReLU) described in more detail in Tutorial 02. ###Code net = tf.layers.conv2d(inputs=net, name='layer_conv1', padding='same', filters=16, kernel_size=5, activation=tf.nn.relu) ###Output _____no_output_____ ###Markdown One of the advantages of constructing neural networks in this fashion, is that we can now easily pull out a reference to a layer. This was more complicated in PrettyTensor.Further below we want to plot the output of the first convolutional layer, so we create another variable for holding a reference to that layer. ###Code layer_conv1 = net ###Output _____no_output_____ ###Markdown We now do the max-pooling on the output of the convolutional layer. This was also described in more detail in Tutorial 02. ###Code net = tf.layers.max_pooling2d(inputs=net, pool_size=2, strides=2) ###Output _____no_output_____ ###Markdown We now add the second convolutional layer which has 36 filters each with 5x5 pixels, and a ReLU activation function again. ###Code net = tf.layers.conv2d(inputs=net, name='layer_conv2', padding='same', filters=36, kernel_size=5, activation=tf.nn.relu) ###Output _____no_output_____ ###Markdown We also want to plot the output of this convolutional layer, so we keep a reference for later use. ###Code layer_conv2 = net ###Output _____no_output_____ ###Markdown The output of the second convolutional layer is also max-pooled for down-sampling the images. ###Code net = tf.layers.max_pooling2d(inputs=net, pool_size=2, strides=2) ###Output _____no_output_____ ###Markdown The tensors that are being output by this max-pooling are 4-rank, as can be seen from this: ###Code net ###Output _____no_output_____ ###Markdown Next we want to add fully-connected layers to the Neural Network, but these require 2-rank tensors as input, so we must first flatten the tensors.The `tf.layers` API was first located in `tf.contrib.layers` before it was moved into TensorFlow Core. But even though it has taken the TensorFlow developers a year to move these fairly simple functions, they have somehow forgotten to move the even simpler `flatten()` function. So we still need to use the one in `tf.contrib.layers`. ###Code net = tf.contrib.layers.flatten(net) # This should eventually be replaced by: # net = tf.layers.flatten(net) ###Output _____no_output_____ ###Markdown This has now flattened the data to a 2-rank tensor, as can be seen from this: ###Code net ###Output _____no_output_____ ###Markdown We can now add fully-connected layers to the neural network. These are called *dense* layers in the Layers API. ###Code net = tf.layers.dense(inputs=net, name='layer_fc1', units=128, activation=tf.nn.relu) ###Output _____no_output_____ ###Markdown We need the neural network to classify the input images into 10 different classes. So the final fully-connected layer has `num_classes=10` output neurons. ###Code net = tf.layers.dense(inputs=net, name='layer_fc_out', units=num_classes, activation=None) ###Output _____no_output_____ ###Markdown The output of the final fully-connected layer are sometimes called logits, so we have a convenience variable with that name. ###Code logits = net ###Output _____no_output_____ ###Markdown We use the softmax function to 'squash' the outputs so they are between zero and one, and so they sum to one. ###Code y_pred = tf.nn.softmax(logits=logits) ###Output _____no_output_____ ###Markdown This tells us how likely the neural network thinks the input image is of each possible class. The one that has the highest value is considered the most likely so its index is taken to be the class-number. ###Code y_pred_cls = tf.argmax(y_pred, dimension=1) ###Output _____no_output_____ ###Markdown We have now created the exact same Convolutional Neural Network in a few lines of code that required many complex lines of code in the direct TensorFlow implementation.The Layers API is perhaps not as elegant as PrettyTensor, but it has some other advantages, e.g. that we can more easily refer to intermediate layers, and it is also easier to construct neural networks with branches and multiple outputs using the Layers API. Loss-Function to be Optimized To make the model better at classifying the input images, we must somehow change the variables of the Convolutional Neural Network.The cross-entropy is a performance measure used in classification. The cross-entropy is a continuous function that is always positive and if the predicted output of the model exactly matches the desired output then the cross-entropy equals zero. The goal of optimization is therefore to minimize the cross-entropy so it gets as close to zero as possible by changing the variables of the model.TensorFlow has a function for calculating the cross-entropy, which uses the values of the `logits`-layer because it also calculates the softmax internally, so as to to improve numerical stability. ###Code cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_true, logits=logits) ###Output _____no_output_____ ###Markdown We have now calculated the cross-entropy for each of the image classifications so we have a measure of how well the model performs on each image individually. But in order to use the cross-entropy to guide the optimization of the model's variables we need a single scalar value, so we simply take the average of the cross-entropy for all the image classifications. ###Code loss = tf.reduce_mean(cross_entropy) ###Output _____no_output_____ ###Markdown Optimization MethodNow that we have a cost measure that must be minimized, we can then create an optimizer. In this case it is the Adam optimizer with a learning-rate of 1e-4.Note that optimization is not performed at this point. In fact, nothing is calculated at all, we just add the optimizer-object to the TensorFlow graph for later execution. ###Code optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss) ###Output _____no_output_____ ###Markdown Classification AccuracyWe need to calculate the classification accuracy so we can report progress to the user.First we create a vector of booleans telling us whether the predicted class equals the true class of each image. ###Code correct_prediction = tf.equal(y_pred_cls, y_true_cls) ###Output _____no_output_____ ###Markdown The classification accuracy is calculated by first type-casting the vector of booleans to floats, so that False becomes 0 and True becomes 1, and then taking the average of these numbers. ###Code accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) ###Output _____no_output_____ ###Markdown Getting the WeightsFurther below, we want to plot the weights of the convolutional layers. In the TensorFlow implementation we had created the variables ourselves so we could just refer to them directly. But when the network is constructed using a builder API such as `tf.layers`, all the variables of the layers are created indirectly by the builder API. We therefore have to retrieve the variables from TensorFlow.First we need a list of the variable names in the TensorFlow graph: ###Code for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES): print(var) ###Output <tf.Variable 'layer_conv1/kernel:0' shape=(5, 5, 1, 16) dtype=float32_ref> <tf.Variable 'layer_conv1/bias:0' shape=(16,) dtype=float32_ref> <tf.Variable 'layer_conv2/kernel:0' shape=(5, 5, 16, 36) dtype=float32_ref> <tf.Variable 'layer_conv2/bias:0' shape=(36,) dtype=float32_ref> <tf.Variable 'layer_fc1/kernel:0' shape=(1764, 128) dtype=float32_ref> <tf.Variable 'layer_fc1/bias:0' shape=(128,) dtype=float32_ref> <tf.Variable 'layer_fc_out/kernel:0' shape=(128, 10) dtype=float32_ref> <tf.Variable 'layer_fc_out/bias:0' shape=(10,) dtype=float32_ref> <tf.Variable 'beta1_power:0' shape=() dtype=float32_ref> <tf.Variable 'beta2_power:0' shape=() dtype=float32_ref> <tf.Variable 'layer_conv1/kernel/Adam:0' shape=(5, 5, 1, 16) dtype=float32_ref> <tf.Variable 'layer_conv1/kernel/Adam_1:0' shape=(5, 5, 1, 16) dtype=float32_ref> <tf.Variable 'layer_conv1/bias/Adam:0' shape=(16,) dtype=float32_ref> <tf.Variable 'layer_conv1/bias/Adam_1:0' shape=(16,) dtype=float32_ref> <tf.Variable 'layer_conv2/kernel/Adam:0' shape=(5, 5, 16, 36) dtype=float32_ref> <tf.Variable 'layer_conv2/kernel/Adam_1:0' shape=(5, 5, 16, 36) dtype=float32_ref> <tf.Variable 'layer_conv2/bias/Adam:0' shape=(36,) dtype=float32_ref> <tf.Variable 'layer_conv2/bias/Adam_1:0' shape=(36,) dtype=float32_ref> <tf.Variable 'layer_fc1/kernel/Adam:0' shape=(1764, 128) dtype=float32_ref> <tf.Variable 'layer_fc1/kernel/Adam_1:0' shape=(1764, 128) dtype=float32_ref> <tf.Variable 'layer_fc1/bias/Adam:0' shape=(128,) dtype=float32_ref> <tf.Variable 'layer_fc1/bias/Adam_1:0' shape=(128,) dtype=float32_ref> <tf.Variable 'layer_fc_out/kernel/Adam:0' shape=(128, 10) dtype=float32_ref> <tf.Variable 'layer_fc_out/kernel/Adam_1:0' shape=(128, 10) dtype=float32_ref> <tf.Variable 'layer_fc_out/bias/Adam:0' shape=(10,) dtype=float32_ref> <tf.Variable 'layer_fc_out/bias/Adam_1:0' shape=(10,) dtype=float32_ref> ###Markdown Each of the convolutional layers has two variables. For the first convolutional layer they are named `layer_conv1/kernel:0` and `layer_conv1/bias:0`. The `kernel` variables are the ones we want to plot further below.It is somewhat awkward to get references to these variables, because we have to use the TensorFlow function `get_variable()` which was designed for another purpose; either creating a new variable or re-using an existing variable. The easiest thing is to make the following helper-function. ###Code def get_weights_variable(layer_name): # Retrieve an existing variable named 'kernel' in the scope # with the given layer_name. # This is awkward because the TensorFlow function was # really intended for another purpose. with tf.variable_scope(layer_name, reuse=True): variable = tf.get_variable('kernel') return variable ###Output _____no_output_____ ###Markdown Using this helper-function we can retrieve the variables. These are TensorFlow objects. In order to get the contents of the variables, you must do something like: `contents = session.run(weights_conv1)` as demonstrated further below. ###Code weights_conv1 = get_weights_variable(layer_name='layer_conv1') weights_conv2 = get_weights_variable(layer_name='layer_conv2') ###Output _____no_output_____ ###Markdown TensorFlow Run Create TensorFlow sessionOnce the TensorFlow graph has been created, we have to create a TensorFlow session which is used to execute the graph. ###Code session = tf.Session() ###Output _____no_output_____ ###Markdown Initialize variablesThe variables for the TensorFlow graph must be initialized before we start optimizing them. ###Code session.run(tf.global_variables_initializer()) ###Output _____no_output_____ ###Markdown Helper-function to perform optimization iterations There are 55,000 images in the training-set. It takes a long time to calculate the gradient of the model using all these images. We therefore only use a small batch of images in each iteration of the optimizer.If your computer crashes or becomes very slow because you run out of RAM, then you may try and lower this number, but you may then need to do more optimization iterations. ###Code train_batch_size = 64 ###Output _____no_output_____ ###Markdown This function performs a number of optimization iterations so as to gradually improve the variables of the neural network layers. In each iteration, a new batch of data is selected from the training-set and then TensorFlow executes the optimizer using those training samples. The progress is printed every 100 iterations. ###Code # Counter for total number of iterations performed so far. total_iterations = 0 def optimize(num_iterations): # Ensure we update the global variable rather than a local copy. global total_iterations for i in range(total_iterations, total_iterations + num_iterations): # Get a batch of training examples. # x_batch now holds a batch of images and # y_true_batch are the true labels for those images. x_batch, y_true_batch = data.train.next_batch(train_batch_size) # Put the batch into a dict with the proper names # for placeholder variables in the TensorFlow graph. feed_dict_train = {x: x_batch, y_true: y_true_batch} # Run the optimizer using this batch of training data. # TensorFlow assigns the variables in feed_dict_train # to the placeholder variables and then runs the optimizer. session.run(optimizer, feed_dict=feed_dict_train) # Print status every 100 iterations. if i % 100 == 0: # Calculate the accuracy on the training-set. acc = session.run(accuracy, feed_dict=feed_dict_train) # Message for printing. msg = "Optimization Iteration: {0:>6}, Training Accuracy: {1:>6.1%}" # Print it. print(msg.format(i + 1, acc)) # Update the total number of iterations performed. total_iterations += num_iterations ###Output _____no_output_____ ###Markdown Helper-function to plot example errors Function for plotting examples of images from the test-set that have been mis-classified. ###Code def plot_example_errors(cls_pred, correct): # This function is called from print_test_accuracy() below. # cls_pred is an array of the predicted class-number for # all images in the test-set. # correct is a boolean array whether the predicted class # is equal to the true class for each image in the test-set. # Negate the boolean array. incorrect = (correct == False) # Get the images from the test-set that have been # incorrectly classified. images = data.test.images[incorrect] # Get the predicted classes for those images. cls_pred = cls_pred[incorrect] # Get the true classes for those images. cls_true = data.test.cls[incorrect] # Plot the first 9 images. plot_images(images=images[0:9], cls_true=cls_true[0:9], cls_pred=cls_pred[0:9]) ###Output _____no_output_____ ###Markdown Helper-function to plot confusion matrix ###Code def plot_confusion_matrix(cls_pred): # This is called from print_test_accuracy() below. # cls_pred is an array of the predicted class-number for # all images in the test-set. # Get the true classifications for the test-set. cls_true = data.test.cls # Get the confusion matrix using sklearn. cm = confusion_matrix(y_true=cls_true, y_pred=cls_pred) # Print the confusion matrix as text. print(cm) # Plot the confusion matrix as an image. plt.matshow(cm) # Make various adjustments to the plot. plt.colorbar() tick_marks = np.arange(num_classes) plt.xticks(tick_marks, range(num_classes)) plt.yticks(tick_marks, range(num_classes)) plt.xlabel('Predicted') plt.ylabel('True') # Ensure the plot is shown correctly with multiple plots # in a single Notebook cell. plt.show() ###Output _____no_output_____ ###Markdown Helper-function for showing the performance Below is a function for printing the classification accuracy on the test-set.It takes a while to compute the classification for all the images in the test-set, that's why the results are re-used by calling the above functions directly from this function, so the classifications don't have to be recalculated by each function.Note that this function can use a lot of computer memory, which is why the test-set is split into smaller batches. If you have little RAM in your computer and it crashes, then you can try and lower the batch-size. ###Code # Split the test-set into smaller batches of this size. test_batch_size = 256 def print_test_accuracy(show_example_errors=False, show_confusion_matrix=False): # Number of images in the test-set. num_test = len(data.test.images) # Allocate an array for the predicted classes which # will be calculated in batches and filled into this array. cls_pred = np.zeros(shape=num_test, dtype=np.int) # Now calculate the predicted classes for the batches. # We will just iterate through all the batches. # There might be a more clever and Pythonic way of doing this. # The starting index for the next batch is denoted i. i = 0 while i < num_test: # The ending index for the next batch is denoted j. j = min(i + test_batch_size, num_test) # Get the images from the test-set between index i and j. images = data.test.images[i:j, :] # Get the associated labels. labels = data.test.labels[i:j, :] # Create a feed-dict with these images and labels. feed_dict = {x: images, y_true: labels} # Calculate the predicted class using TensorFlow. cls_pred[i:j] = session.run(y_pred_cls, feed_dict=feed_dict) # Set the start-index for the next batch to the # end-index of the current batch. i = j # Convenience variable for the true class-numbers of the test-set. cls_true = data.test.cls # Create a boolean array whether each image is correctly classified. correct = (cls_true == cls_pred) # Calculate the number of correctly classified images. # When summing a boolean array, False means 0 and True means 1. correct_sum = correct.sum() # Classification accuracy is the number of correctly classified # images divided by the total number of images in the test-set. acc = float(correct_sum) / num_test # Print the accuracy. msg = "Accuracy on Test-Set: {0:.1%} ({1} / {2})" print(msg.format(acc, correct_sum, num_test)) # Plot some examples of mis-classifications, if desired. if show_example_errors: print("Example errors:") plot_example_errors(cls_pred=cls_pred, correct=correct) # Plot the confusion matrix, if desired. if show_confusion_matrix: print("Confusion Matrix:") plot_confusion_matrix(cls_pred=cls_pred) ###Output _____no_output_____ ###Markdown Performance before any optimizationThe accuracy on the test-set is very low because the variables for the neural network have only been initialized and not optimized at all, so it just classifies the images randomly. ###Code print_test_accuracy() ###Output Accuracy on Test-Set: 6.2% (621 / 10000) ###Markdown Performance after 1 optimization iterationThe classification accuracy does not improve much from just 1 optimization iteration, because the learning-rate for the optimizer is set very low. ###Code optimize(num_iterations=1) print_test_accuracy() ###Output Accuracy on Test-Set: 6.8% (675 / 10000) ###Markdown Performance after 100 optimization iterationsAfter 100 optimization iterations, the model has significantly improved its classification accuracy. ###Code %%time optimize(num_iterations=99) # We already performed 1 iteration above. print_test_accuracy(show_example_errors=True) ###Output Accuracy on Test-Set: 78.2% (7817 / 10000) Example errors: ###Markdown Performance after 1000 optimization iterationsAfter 1000 optimization iterations, the model has greatly increased its accuracy on the test-set to more than 90%. ###Code %%time optimize(num_iterations=900) # We performed 100 iterations above. print_test_accuracy(show_example_errors=True) ###Output Accuracy on Test-Set: 95.2% (9519 / 10000) Example errors: ###Markdown Performance after 10,000 optimization iterationsAfter 10,000 optimization iterations, the model has a classification accuracy on the test-set of about 99%. ###Code %%time optimize(num_iterations=9000) # We performed 1000 iterations above. print_test_accuracy(show_example_errors=True, show_confusion_matrix=True) ###Output Accuracy on Test-Set: 98.7% (9869 / 10000) Example errors: ###Markdown Visualization of Weights and Layers Helper-function for plotting convolutional weights ###Code def plot_conv_weights(weights, input_channel=0): # Assume weights are TensorFlow ops for 4-dim variables # e.g. weights_conv1 or weights_conv2. # Retrieve the values of the weight-variables from TensorFlow. # A feed-dict is not necessary because nothing is calculated. w = session.run(weights) # Get the lowest and highest values for the weights. # This is used to correct the colour intensity across # the images so they can be compared with each other. w_min = np.min(w) w_max = np.max(w) # Number of filters used in the conv. layer. num_filters = w.shape[3] # Number of grids to plot. # Rounded-up, square-root of the number of filters. num_grids = math.ceil(math.sqrt(num_filters)) # Create figure with a grid of sub-plots. fig, axes = plt.subplots(num_grids, num_grids) # Plot all the filter-weights. for i, ax in enumerate(axes.flat): # Only plot the valid filter-weights. if i<num_filters: # Get the weights for the i'th filter of the input channel. # See new_conv_layer() for details on the format # of this 4-dim tensor. img = w[:, :, input_channel, i] # Plot image. ax.imshow(img, vmin=w_min, vmax=w_max, interpolation='nearest', cmap='seismic') # Remove ticks from the plot. ax.set_xticks([]) ax.set_yticks([]) # Ensure the plot is shown correctly with multiple plots # in a single Notebook cell. plt.show() ###Output _____no_output_____ ###Markdown Helper-function for plotting the output of a convolutional layer ###Code def plot_conv_layer(layer, image): # Assume layer is a TensorFlow op that outputs a 4-dim tensor # which is the output of a convolutional layer, # e.g. layer_conv1 or layer_conv2. # Create a feed-dict containing just one image. # Note that we don't need to feed y_true because it is # not used in this calculation. feed_dict = {x: [image]} # Calculate and retrieve the output values of the layer # when inputting that image. values = session.run(layer, feed_dict=feed_dict) # Number of filters used in the conv. layer. num_filters = values.shape[3] # Number of grids to plot. # Rounded-up, square-root of the number of filters. num_grids = math.ceil(math.sqrt(num_filters)) # Create figure with a grid of sub-plots. fig, axes = plt.subplots(num_grids, num_grids) # Plot the output images of all the filters. for i, ax in enumerate(axes.flat): # Only plot the images for valid filters. if i<num_filters: # Get the output image of using the i'th filter. img = values[0, :, :, i] # Plot image. ax.imshow(img, interpolation='nearest', cmap='binary') # Remove ticks from the plot. ax.set_xticks([]) ax.set_yticks([]) # Ensure the plot is shown correctly with multiple plots # in a single Notebook cell. plt.show() ###Output _____no_output_____ ###Markdown Input ImagesHelper-function for plotting an image. ###Code def plot_image(image): plt.imshow(image.reshape(img_shape), interpolation='nearest', cmap='binary') plt.show() ###Output _____no_output_____ ###Markdown Plot an image from the test-set which will be used as an example below. ###Code image1 = data.test.images[0] plot_image(image1) ###Output _____no_output_____ ###Markdown Plot another example image from the test-set. ###Code image2 = data.test.images[13] plot_image(image2) ###Output _____no_output_____ ###Markdown Convolution Layer 1 Now plot the filter-weights for the first convolutional layer.Note that positive weights are red and negative weights are blue. ###Code plot_conv_weights(weights=weights_conv1) ###Output _____no_output_____ ###Markdown Applying each of these convolutional filters to the first input image gives the following output images, which are then used as input to the second convolutional layer. ###Code plot_conv_layer(layer=layer_conv1, image=image1) ###Output _____no_output_____ ###Markdown The following images are the results of applying the convolutional filters to the second image. ###Code plot_conv_layer(layer=layer_conv1, image=image2) ###Output _____no_output_____ ###Markdown Convolution Layer 2 Now plot the filter-weights for the second convolutional layer.There are 16 output channels from the first conv-layer, which means there are 16 input channels to the second conv-layer. The second conv-layer has a set of filter-weights for each of its input channels. We start by plotting the filter-weigths for the first channel.Note again that positive weights are red and negative weights are blue. ###Code plot_conv_weights(weights=weights_conv2, input_channel=0) ###Output _____no_output_____ ###Markdown There are 16 input channels to the second convolutional layer, so we can make another 15 plots of filter-weights like this. We just make one more with the filter-weights for the second channel. ###Code plot_conv_weights(weights=weights_conv2, input_channel=1) ###Output _____no_output_____ ###Markdown It can be difficult to understand and keep track of how these filters are applied because of the high dimensionality.Applying these convolutional filters to the images that were ouput from the first conv-layer gives the following images.Note that these are down-sampled to 14 x 14 pixels which is half the resolution of the original input images, because the first convolutional layer was followed by a max-pooling layer with stride 2. Max-pooling is also done after the second convolutional layer, but we retrieve these images before that has been applied. ###Code plot_conv_layer(layer=layer_conv2, image=image1) ###Output _____no_output_____ ###Markdown And these are the results of applying the filter-weights to the second image. ###Code plot_conv_layer(layer=layer_conv2, image=image2) ###Output _____no_output_____ ###Markdown Close TensorFlow Session We are now done using TensorFlow, so we close the session to release its resources. ###Code # This has been commented out in case you want to modify and experiment # with the Notebook without having to restart it. session.close() ###Output _____no_output_____
notebooks/executed/037_afox_RunParcels_TS_MXL_Multiline_Randomvel_Papermill_executed_2019-11-19.ipynb
###Markdown Parcels Experiment:Expanding the polyline code to release particles at density based on local velocity normal to section._(Based on an experiment originally designed by Christina Schmidt.)__(Runs on GEOMAR Jupyter Server at https://schulung3.geomar.de/user/workshop007/lab)_ To do- Check/ask how OceanParcels deals with partial cells, if it does. - It doesn't. Does it matter? Technical preamble ###Code %matplotlib inline from parcels import ( AdvectionRK4_3D, ErrorCode, FieldSet, JITParticle, ParticleSet, Variable ) # from operator import attrgetter from datetime import datetime, timedelta import numpy as np from pathlib import Path import matplotlib.pyplot as plt import cmocean as co import pandas as pd import xarray as xr # import dask as dask ###Output INFO: Compiled ParcelsRandom ==> /tmp/parcels-62665/libparcels_random_780daf13-63d8-42b3-9158-a422c7f9e1f3.so ###Markdown Experiment settings (user input) ParametersThese can be set in papermill ###Code # OSNAP multiline details sectionPathname = '../data/external/' sectionFilename = 'osnap_pos_wp.txt' sectionname = 'osnap' # location of input data path_name = '/data/iAtlantic/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/' experiment_name = 'VIKING20X.L46-KKG36107B' data_resolution = '1m' w_name_extension = '_repaire_depthw_time' # location of mask data mask_path_name = '/data/iAtlantic/ocean-only/VIKING20X.L46-KKG36107B/nemo/suppl/' mesh_mask_filename = '1_mesh_mask.nc_notime_depthw' # location of output data outpath_name = '../data/raw/' year_prefix = 201 # this does from 2000 onwards # set line segment to use start_vertex = 4 end_vertex = 12 # experiment duration etc runtime_in_days = 10 dt_in_minutes = -10 # repeatdt = timedelta(days=3) # number of particles to track create_number_particles = 200000 # many will not be ocean points use_number_particles = 200000 min_release_depth = 0 max_release_depth = 1_000 # max current speed for particle selection max_current = 1.0 # set base release date and time t_0_str = '2010-01-16T12:00:00' t_start_str = '2016-01-16T12:00:00' # particle positions are stored every x hours outputdt_in_hours = 120 # select subdomain (to decrease needed resources) comment out to use whole domain # sd_i1, sd_i2 = 0, 2404 # western/eastern limit (indices not coordinates) # sd_j1, sd_j2 = 1200, 2499 # southern/northern limit (indices not coordinates) # sd_z1, sd_z2 = 0, 46 # how to initialize the random number generator # --> is set in next cell # RNG_seed = 123 use_dask_chunks = True # Parameters path_name = "/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/" data_resolution = "5d" w_name_extension = "" mask_path_name = "/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/suppl/" mesh_mask_filename = "1_mesh_mask.nc" year_prefix = "" runtime_in_days = 3650 create_number_particles = 4000000 use_number_particles = 4000000 max_release_depth = 1000 max_current = 2.0 t_0_str = "1980-01-03T12:00:00" t_start_str = "2019-11-19T12:00:00" use_dask_chunks = False ###Output _____no_output_____ ###Markdown Derived variables ###Code # times t_0 = datetime.fromisoformat(t_0_str) # using monthly mean fields. Check dates. t_start = datetime.fromisoformat(t_start_str) # RNG seed based on release day (days since 1980-01-03) RNG_seed = int((t_start - t_0).total_seconds() / (60*60*24)) # names of files to load fname_U = f'1_{experiment_name}_{data_resolution}_{year_prefix}*_grid_U.nc' fname_V = f'1_{experiment_name}_{data_resolution}_{year_prefix}*_grid_V.nc' fname_T = f'1_{experiment_name}_{data_resolution}_{year_prefix}*_grid_T.nc' fname_W = f'1_{experiment_name}_{data_resolution}_{year_prefix}*_grid_W.nc{w_name_extension}' sectionPath = Path(sectionPathname) data_path = Path(path_name) mask_path = Path(mask_path_name) outpath = Path(outpath_name) display(t_0) display(t_start) if dt_in_minutes > 0: direction = '_forwards_' else: direction = '_backward_' year_str = str(t_start.year) month_str = str(t_start.month).zfill(2) day_str = str(t_start.day).zfill(2) days = str(runtime_in_days) seed = str(RNG_seed) npart= str(use_number_particles) degree2km = 1.852*60.0 ###Output _____no_output_____ ###Markdown Construct input / output paths etc. ###Code mesh_mask = mask_path / mesh_mask_filename ###Output _____no_output_____ ###Markdown Load input datasets ###Code def fieldset_defintions( list_of_filenames_U, list_of_filenames_V, list_of_filenames_W, list_of_filenames_T, mesh_mask ): ds_mask = xr.open_dataset(mesh_mask) filenames = {'U': {'lon': (mesh_mask), 'lat': (mesh_mask), 'depth': list_of_filenames_W[0], 'data': list_of_filenames_U}, 'V': {'lon': (mesh_mask), 'lat': (mesh_mask), 'depth': list_of_filenames_W[0], 'data': list_of_filenames_V}, 'W': {'lon': (mesh_mask), 'lat': (mesh_mask), 'depth': list_of_filenames_W[0], 'data': list_of_filenames_W}, 'T': {'lon': (mesh_mask), 'lat': (mesh_mask), 'depth': list_of_filenames_W[0], 'data': list_of_filenames_T}, 'S': {'lon': (mesh_mask), 'lat': (mesh_mask), 'depth': list_of_filenames_W[0], 'data': list_of_filenames_T}, 'MXL': {'lon': (mesh_mask), 'lat': (mesh_mask), 'data': list_of_filenames_T} } variables = {'U': 'vozocrtx', 'V': 'vomecrty', 'W': 'vovecrtz', 'T': 'votemper', 'S': 'vosaline', 'MXL':'somxl010' } dimensions = {'U': {'lon': 'glamf', 'lat': 'gphif', 'depth': 'depthw', 'time': 'time_counter'}, # needs to be on f-nodes 'V': {'lon': 'glamf', 'lat': 'gphif', 'depth': 'depthw', 'time': 'time_counter'}, # needs to be on f-nodes 'W': {'lon': 'glamf', 'lat': 'gphif', 'depth': 'depthw', 'time': 'time_counter'}, # needs to be on f-nodes 'T': {'lon': 'glamf', 'lat': 'gphif', 'depth': 'depthw', 'time': 'time_counter'}, # needs to be on t-nodes 'S': {'lon': 'glamf', 'lat': 'gphif', 'depth': 'depthw', 'time': 'time_counter'}, # needs to be on t-nodes 'MXL': {'lon': 'glamf', 'lat': 'gphif', 'time': 'time_counter'}, # needs to be on t-nodes } # exclude the two grid cells at the edges of the nest as they contain 0 # and everything south of 20N indices = {'lon': range(2, ds_mask.x.size-2), 'lat': range(1132, ds_mask.y.size-2)} # indices = { # 'U': {'depth': range(sd_z1, sd_z2), 'lon': range(sd_i1, sd_i2), 'lat': range(sd_j1, sd_j2)}, # 'V': {'depth': range(sd_z1, sd_z2), 'lon': range(sd_i1, sd_i2), 'lat': range(sd_j1, sd_j2)}, # 'W': {'depth': range(sd_z1, sd_z2), 'lon': range(sd_i1, sd_i2), 'lat':range(sd_j1, sd_j2)}, # 'T': {'depth': range(sd_z1, sd_z2), 'lon': range(sd_i1, sd_i2), 'lat':range(sd_j1, sd_j2)}, # 'S': {'depth': range(sd_z1, sd_z2), 'lon': range(sd_i1, sd_i2), 'lat':range(sd_j1, sd_j2)} # } if use_dask_chunks: field_chunksizes = {'U': {'lon':('x', 1024), 'lat':('y',128), 'depth': ('depthw', 64), 'time': ('time_counter',3)}, # needs to be on f-nodes 'V': {'lon':('x', 1024), 'lat':('y',128), 'depth': ('depthw', 64), 'time': ('time_counter',3)}, # needs to be on f-nodes 'W': {'lon':('x', 1024), 'lat':('y',128), 'depth': ('depthw', 64), 'time': ('time_counter',3)}, # needs to be on f-nodes 'T': {'lon':('x', 1024), 'lat':('y',128), 'depth': ('depthw', 64), 'time': ('time_counter',3)}, # needs to be on t-nodes 'S': {'lon':('x', 1024), 'lat':('y',128), 'depth': ('depthw', 64), 'time': ('time_counter',3)}, # needs to be on t-nodes 'MXL': {'lon':('x', 1024), 'lat':('y',128), 'time': ('time_counter',3)}, # needs to be on t-nodes } else: field_chunksizes = None return FieldSet.from_nemo( filenames, variables, dimensions, indices=indices, chunksize=field_chunksizes, # = None for no chunking mesh='spherical', tracer_interp_method='cgrid_tracer' # ,time_periodic=time_loop_period # ,allow_time_extrapolation=True ) def create_fieldset( data_path=data_path, experiment_name=experiment_name, fname_U=fname_U, fname_V=fname_V, fname_W=fname_W, fname_T=fname_T, mesh_mask = mesh_mask ): files_U = list(sorted((data_path).glob(fname_U))) files_V = list(sorted((data_path).glob(fname_V))) files_W = list(sorted((data_path).glob(fname_W))) files_T = list(sorted((data_path).glob(fname_T))) print(files_U) fieldset = fieldset_defintions( files_U, files_V, files_W, files_T, mesh_mask) return fieldset fieldset = create_fieldset() ###Output [PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_19800101_19801231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_19810101_19811231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_19820101_19821231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_19830101_19831231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_19840101_19841231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_19850101_19851231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_19860101_19861231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_19870101_19871231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_19880101_19881231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_19890101_19891231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_19900101_19901231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_19910101_19911231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_19920101_19921231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_19930101_19931231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_19940101_19941231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_19950101_19951231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_19960101_19961231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_19970101_19971231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_19980101_19981231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_19990101_19991231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_20000101_20001231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_20010101_20011231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_20020101_20021231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_20030101_20031231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_20040101_20041231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_20050101_20051231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_20060101_20061231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_20070101_20071231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_20080101_20081231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_20090101_20091231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_20100101_20101231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_20110101_20111231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_20120101_20121231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_20130101_20131231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_20140101_20141231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_20150101_20151231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_20160101_20161231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_20170101_20171231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_20180101_20181231_grid_U.nc'), PosixPath('/gxfs_work1/geomar/smomw355/model_data/ocean-only/VIKING20X.L46-KKG36107B/nemo/output/1_VIKING20X.L46-KKG36107B_5d_20190101_20191231_grid_U.nc')] ###Markdown Create Virtual Particles add a couple of simple plotting routines ###Code def plot_section_sdist(): plt.figure(figsize=(10,5)) u = np.array([p.uvel for p in pset]) * degree2km * 1000.0 * np.cos(np.radians(pset.lat)) v = np.array([p.vvel for p in pset]) * degree2km * 1000.0 section_index = np.searchsorted(lonlat.lon,pset.lon)-1 u_normal = v * lonlatdiff.costheta[section_index].data - u * lonlatdiff.sintheta[section_index].data y = (pset.lat - lonlat.lat[section_index]) * degree2km x = (pset.lon - lonlat.lon[section_index]) * degree2km*np.cos(np.radians(lonlat2mean.lat[section_index+1].data)) dist = np.sqrt(x**2 + y**2) + lonlatdiff.length_west[section_index].data plt.scatter( dist, [p.depth for p in pset], 1, u_normal, cmap=co.cm.balance,vmin=-0.3,vmax=0.3 ) plt.ylim(1200,0) plt.colorbar(label = r'normal velocity [$\mathrm{m\ s}^{-1}$]') plt.xlabel('distance [km]') plt.ylabel('depth [m]') return def plot_section_lon(): plt.figure(figsize=(10,5)) u = np.array([p.uvel for p in pset]) * degree2km * 1000.0 * np.cos(np.radians(pset.lat)) v = np.array([p.vvel for p in pset]) * degree2km * 1000.0 section_index = np.searchsorted(lonlat.lon,pset.lon)-1 u_normal = v * lonlatdiff.costheta[section_index].data - u * lonlatdiff.sintheta[section_index].data plt.scatter( [p.lon for p in pset], [p.depth for p in pset], 1, u_normal, cmap=co.cm.balance,vmin=-0.3,vmax=0.3 ) plt.ylim(1200,0) plt.colorbar(label = r'normal velocity [$\mathrm{m\ s}^{-1}$]'); plt.xlabel('longitude [$\degree$E]') plt.ylabel('depth [m]') return class SampleParticle(JITParticle): """Add variables to the standard particle class. Particles will sample temperature and track the age of the particle. Particles also have a flag `alive` that is 1 if the particle is alive and 0 otherwise. Furthermore, we have a `speed_param` that scales the velocity with which particles can swim towards the surface. Note that we don't initialize temp from the actual data. This speeds up particle creation, but might render initial data point less useful. """ mxl = Variable('mxl', dtype=np.float32, initial=-100) temp = Variable('temp', dtype=np.float32, initial=-100) salt = Variable('salt', dtype=np.float32, initial=-100) uvel = Variable('uvel', dtype=np.float32, initial=0) vvel = Variable('vvel', dtype=np.float32, initial=0) # wvel = Variable('wvel', dtype=np.float32, initial=0) # alive = Variable('alive', dtype=np.int32, initial=1) # speed_param = Variable('speed_param', dtype=np.float32, initial=1) # age = Variable('age', dtype=np.int32, initial=0, to_write=True) ###Output _____no_output_____ ###Markdown Create a set of particles with random initial positionsWe seed the RNG to be reproducible (and to be able to quickly create a second equivalent experiment with differently chosen compatible initial positions), and create arrays of random starting times, lats, lons, depths, and speed parameters (see kernel definitions below for details).Initially create points on 'rectangle'. Land points are removed later in a OceanParcels 'run' with runtime and timedelta zero. First set up the piecewise section ###Code lonlat = xr.Dataset(pd.read_csv(sectionPath / sectionFilename,delim_whitespace=True)) lonlat.lon.attrs['long_name']='Longitude' lonlat.lat.attrs['long_name']='Latitude' lonlat.lon.attrs['standard_name']='longitude' lonlat.lat.attrs['standard_name']='latitude' lonlat.lon.attrs['units']='degrees_east' lonlat.lat.attrs['units']='degrees_north' lonlatdiff = lonlat.diff('dim_0') lonlat2mean= lonlat.rolling({'dim_0':2}).mean() lonlat.plot.scatter(x='lon',y='lat') lonlat2mean.plot.scatter(x='lon',y='lat') lonlatdiff = lonlatdiff.assign({'y':lonlatdiff['lat']*degree2km}) lonlatdiff = lonlatdiff.assign({'x':lonlatdiff['lon']*degree2km*np.cos(np.radians(lonlat2mean.lat.data[1:]))}) lonlatdiff=lonlatdiff.assign({'length':np.sqrt(lonlatdiff['x']**2+lonlatdiff['y']**2)}) lonlatdiff=lonlatdiff.assign({'length_west':lonlatdiff.length.sum() - np.cumsum(lonlatdiff.length[::-1])[::-1]}) lonlatdiff=lonlatdiff.assign({'costheta':lonlatdiff['x']/lonlatdiff['length']}) lonlatdiff=lonlatdiff.assign({'sintheta':lonlatdiff['y']/lonlatdiff['length']}) total_length = lonlatdiff.length.sum().data print(total_length) lonlatdiff.length.shape[0] ###Output _____no_output_____ ###Markdown Seed particles uniform random along OSNAP section ###Code np.random.seed(RNG_seed) # define time of release for each particle relative to t0 # can start each particle at a different time if required # here all start at time t_start. times = [] lons = [] lats = [] depths = [] # for subsect in range(lonlatdiff.length.shape[0]): for subsect in range(start_vertex,end_vertex): number_particles = int(create_number_particles*lonlatdiff.length[subsect]/total_length) time = np.zeros(number_particles) time += (t_start - t_0).total_seconds() # start along a line from west to east west_lat = lonlat.lat[subsect].data west_lon = lonlat.lon[subsect].data east_lat = lonlat.lat[subsect+1].data east_lon = lonlat.lon[subsect+1].data lon = np.random.uniform( low=west_lon, high = east_lon, size=time.shape ) lat = west_lat + ((lon - west_lon) * (east_lat - west_lat)/ (east_lon - west_lon)) # at depths from surface to max_release_depth depth = np.random.uniform( low=min_release_depth, high=max_release_depth, size=time.shape ) times.append(time) lons.append(lon) lats.append(lat) depths.append(depth) time = np.concatenate(times) lon = np.concatenate(lons) lat = np.concatenate(lats) depth = np.concatenate(depths) ###Output _____no_output_____ ###Markdown Build particle set ###Code %%time pset = ParticleSet( fieldset=fieldset, pclass=SampleParticle, lat=lat, lon=lon, # speed_param=speed_param, depth=depth, time=time # repeatdt = repeatdt ) print(f"Created {len(pset)} particles.") # display(pset[:5]) # display(pset[-5:]) ###Output Created 2643886 particles. ###Markdown Compose custom kernelWe'll create three additional kernels:- One Kernel adds velocity sampling- One Kernel adds temperature sampling- One kernel adds salinity samplingThen, we combine the builtin `AdvectionRK4_3D` kernel with these additional kernels. ###Code def velocity_sampling(particle, fieldset, time): '''Sample velocity.''' (particle.uvel,particle.vvel) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] def temperature_sampling(particle, fieldset, time): '''Sample temperature.''' particle.temp = fieldset.T[time, particle.depth, particle.lat, particle.lon] def salinity_sampling(particle, fieldset, time): '''Sample salinity.''' particle.salt = fieldset.S[time, particle.depth, particle.lat, particle.lon] def mxl_sampling(particle, fieldset, time): '''Sample mixed layer depth.''' particle.mxl = fieldset.MXL[time, particle.depth, particle.lat, particle.lon] custom_kernel = ( pset.Kernel(AdvectionRK4_3D) # + pset.Kernel(temperature_sensitivity) + pset.Kernel(temperature_sampling) + pset.Kernel(salinity_sampling) + pset.Kernel(velocity_sampling) + pset.Kernel(mxl_sampling) ) ###Output _____no_output_____ ###Markdown Be able to handle errors during integrationWe have restricted our domain so in principle, particles could reach undefined positions.In that case, we want to just delete the particle (without forgetting its history). ###Code def DeleteParticle(particle, fieldset, time): particle.delete() recovery_cases = { ErrorCode.ErrorOutOfBounds: DeleteParticle, ErrorCode.Error: DeleteParticle, ErrorCode.ErrorInterpolation: DeleteParticle } ###Output _____no_output_____ ###Markdown Run with runtime=0 to initialise fields ###Code %%time # with dask.config.set(**{'array.slicing.split_large_chunks': False}): pset.execute( custom_kernel, runtime=0, # dt=timedelta(minutes=0), # output_file=outputfile, recovery=recovery_cases ) plot_section_sdist() ###Output _____no_output_____ ###Markdown Trim unwanted points from ParticleSetUse initialised fields to remove land points. We test `temp == 0.0` (the mask value over land). ###Code t = np.array([p.temp for p in pset]) # u = np.array([p.uvel for p in pset]) # v = np.array([p.vvel for p in pset]) pset.remove_indices(np.argwhere(t == 0).flatten()) # pset.remove(np.argwhere(x * y * z == 0).flatten()) print(len(pset)) plot_section_sdist() ###Output _____no_output_____ ###Markdown Test velocity normal to section Velocity conversions from degrees lat/lon per second to m/s ###Code u = np.array([p.uvel for p in pset]) v = np.array([p.vvel for p in pset]) u=u * degree2km * 1000.0 * np.cos(np.radians(pset.lat)) v=v * degree2km * 1000.0 ###Output _____no_output_____ ###Markdown normal velocities ###Code section_index = np.searchsorted(lonlat.lon,pset.lon)-1 u_normal = v * lonlatdiff.costheta[section_index].data - u * lonlatdiff.sintheta[section_index].data abs(u_normal).max() ###Output _____no_output_____ ###Markdown remove particles randomly with probability proportional to normal speed ###Code u_random = np.random.rand(len(u_normal))*max_current pset.remove_indices(np.argwhere(abs(u_normal) < u_random).flatten()) print(len(pset)) plot_section_sdist() ###Output _____no_output_____ ###Markdown Prepare outputWe define an output file and specify the desired output frequency. ###Code # output_filename = 'Parcels_IFFForwards_1m_June2016_2000.nc' npart = str(len(pset)) output_filename = 'tracks_randomvel_mxl_'+sectionname+direction+year_str+month_str+day_str+'_N'+npart+'_D'+days+'_Rnd'+ seed+'.nc' outfile = outpath / output_filename print(outfile) outputfile = pset.ParticleFile( name=outfile, outputdt=timedelta(hours=outputdt_in_hours) ) ###Output ../data/raw/tracks_randomvel_mxl_osnap_backward_20191119_N55395_D3650_Rnd14565.nc ###Markdown Execute the experimentWe'll evolve particles, log their positions and variables to the output buffer and finally export the output to a the file. Run the experiment ###Code %%time # with dask.config.set(**{'array.slicing.split_large_chunks': False}): pset.execute( custom_kernel, runtime=timedelta(days=runtime_in_days), dt=timedelta(minutes=dt_in_minutes), output_file=outputfile, recovery=recovery_cases ) # outputfile.export() outputfile.close() conda list pip list ###Output Package Version ----------------------------- -------------------------- alembic 1.5.5 ansiwrap 0.8.4 anyio 2.2.0 appdirs 1.4.4 argon2-cffi 20.1.0 asciitree 0.3.3 async-generator 1.10 attrs 20.3.0 Babel 2.9.0 backcall 0.2.0 backports.functools-lru-cache 1.6.1 basemap 1.2.1 black 20.8b1 bleach 3.3.0 blinker 1.4 blosc 1.10.2 bokeh 2.3.0 Bottleneck 1.3.2 brotlipy 0.7.0 cached-property 1.5.2 cachetools 4.2.1 Cartopy 0.18.0 certifi 2020.12.5 certipy 0.1.3 cffi 1.14.5 cftime 1.4.1 cgen 2020.1 chardet 4.0.0 click 7.1.2 click-plugins 1.1.1 cligj 0.7.1 cloudpickle 1.6.0 cmocean 2.0 colorcet 2.0.6 colorspacious 1.1.2 conda 4.9.2 conda-package-handling 1.7.2 cryptography 3.4.4 cycler 0.10.0 cytoolz 0.11.0 dask 2021.2.0 datashader 0.12.0 datashape 0.5.4 decorator 4.4.2 defusedxml 0.6.0 distributed 2021.2.0 entrypoints 0.3 fasteners 0.14.1 Fiona 1.8.18 fsspec 0.8.7 GDAL 3.2.1 geopandas 0.9.0 geoviews 0.0.0+g33876c88.gitarchive gsw 3.4.0 h5netcdf 0.10.0 h5py 3.1.0 HeapDict 1.0.1 holoviews 1.14.2 hvplot 0.7.1 idna 2.10 importlib-metadata 3.7.0 ipykernel 5.5.0 ipython 7.21.0 ipython-genutils 0.2.0 jedi 0.18.0 Jinja2 2.11.3 joblib 1.0.1 json5 0.9.5 jsonschema 3.2.0 jupyter-client 6.1.11 jupyter-core 4.7.1 jupyter-packaging 0.7.12 jupyter-server 1.4.1 jupyter-telemetry 0.1.0 jupyterhub 1.3.0 jupyterlab 3.0.9 jupyterlab-pygments 0.1.2 jupyterlab-server 2.3.0 kiwisolver 1.3.1 llvmlite 0.36.0 locket 0.2.0 Mako 1.1.4 mamba 0.7.14 Markdown 3.3.4 MarkupSafe 1.1.1 matplotlib 3.3.4 mistune 0.8.4 monotonic 1.5 msgpack 1.0.2 multipledispatch 0.6.0 munch 2.5.0 mypy-extensions 0.4.3 nbclassic 0.2.6 nbclient 0.5.3 nbconvert 6.0.7 nbformat 5.1.2 nest-asyncio 1.4.3 netCDF4 1.5.6 notebook 6.2.0 numba 0.53.0 numcodecs 0.7.3 numpy 1.20.1 oauthlib 3.0.1 olefile 0.46 packaging 20.9 pamela 1.0.0 pandas 1.2.3 pandocfilters 1.4.2 panel 0.11.0 papermill 2.3.3 param 1.10.1 parcels 2.2.2 parso 0.8.1 partd 1.1.0 pathspec 0.8.1 patsy 0.5.1 pexpect 4.8.0 pickleshare 0.7.5 Pillow 8.1.2 pip 21.0.1 progressbar2 3.53.1 prometheus-client 0.9.0 prompt-toolkit 3.0.16 psutil 5.8.0 ptyprocess 0.7.0 pycosat 0.6.3 pycparser 2.20 pyct 0.4.6 pycurl 7.43.0.6 Pygments 2.8.0 PyJWT 2.0.1 pymbolic 2020.1 pyOpenSSL 20.0.1 pyparsing 2.4.7 pyproj 3.0.1 PyQt5 5.12.3 PyQt5-sip 4.19.18 PyQtChart 5.12 PyQtWebEngine 5.12.1 pyrsistent 0.17.3 pyshp 2.1.3 PySocks 1.7.1 python-dateutil 2.8.1 python-editor 1.0.4 python-json-logger 2.0.1 python-utils 2.5.5 pytools 2021.2 pytz 2021.1 pyviz-comms 2.0.1 PyYAML 5.4.1 pyzmq 22.0.3 regex 2020.11.13 requests 2.25.1 Rtree 0.9.7 ruamel-yaml-conda 0.15.80 ruamel.yaml 0.16.12 ruamel.yaml.clib 0.2.2 scikit-learn 0.24.1 scipy 1.6.1 seaborn 0.11.1 seawater 3.3.4 Send2Trash 1.5.0 setuptools 49.6.0.post20210108 Shapely 1.7.1 six 1.15.0 sniffio 1.2.0 sortedcontainers 2.3.0 sparse 0.11.2 SQLAlchemy 1.3.23 statsmodels 0.12.2 tblib 1.6.0 tenacity 7.0.0 terminado 0.9.2 testpath 0.4.4 textwrap3 0.9.2 threadpoolctl 2.1.0 toml 0.10.2 toolz 0.11.1 tornado 6.1 tqdm 4.58.0 traitlets 5.0.5 typed-ast 1.4.2 typing-extensions 3.7.4.3 urllib3 1.26.3 wcwidth 0.2.5 webencodings 0.5.1 wheel 0.36.2 xarray 0.17.0 xhistogram 0.1.2 zarr 2.6.1 zict 2.0.0 zipp 3.4.0
tutorials/1-Introduction/FinRL_StockTrading_Fundamental.ipynb
###Markdown Automated stock trading using FinRL with financial dataTrained a Deep Reinforcement Learning model using FinRL and companies' financial ratio, and then backtested the model to examine how well-trained the model is* This Google Colabolatory notebook is based on the tutorial of FinRL: https://towardsdatascience.com/finrl-for-quantitative-finance-tutorial-for-multiple-stock-trading-7b00763b7530* This project is a final project of the almuni-mentored research project at Columbia University, Application of Reinforcement Learning to Finance, mentored by Bruce Yang from AI4Finance.* For more detailed explanation, please check out my Medium post: https://medium.com/@mariko.sawada1/automated-stock-trading-with-deep-reinforcement-learning-and-financial-data-a63286ccbe2b Content * [1. Problem Definition](0)* [2. Getting Started - Load Python packages](1) * [2.1. Install Packages](1.1) * [2.2. Check Additional Packages](1.2) * [2.3. Import Packages](1.3) * [2.4. Create Folders](1.4)* [3. Download Data](2)* [4. Preprocess fundamental Data](3) * [4-1 Import financial data](3.1) * [4-2 Specify items needed to calculate financial ratios](3.2) * [4-3 Calculate financial ratios](3.3) * [4-4 Deal with NAs and infinite values](3.4) * [4-5 Merge stock price data and ratios into one dataframe](3.5) * [4-6 Calculate market valuation ratios using daily stock price data](3.6)* [5.Build Environment](4) * [5.1. Training & Trade Data Split](4.1) * [5.2. User-defined Environment](4.2) * [5.3. Initialize Environment](4.3) * [6.Implement DRL Algorithms](5) * [7.Backtesting Performance](6) * [7.1. BackTestStats](6.1) * [7.2. BackTestPlot](6.2) * [7.3. Baseline Stats](6.3) * [7.3. Compare to Stock Market Index](6.4) Part 1. Problem Definition This problem is to design an automated trading solution for single stock trading. We model the stock trading process as a Markov Decision Process (MDP). We then formulate our trading goal as a maximization problem.The algorithm is trained using Deep Reinforcement Learning (DRL) algorithms and the components of the reinforcement learning environment are:* Action: The action space describes the allowed actions that the agent interacts with theenvironment. Normally, a ∈ A includes three actions: a ∈ {−1, 0, 1}, where −1, 0, 1 representselling, holding, and buying one stock. Also, an action can be carried upon multiple shares. We usean action space {−k, ..., −1, 0, 1, ..., k}, where k denotes the number of shares. For example, "Buy10 shares of AAPL" or "Sell 10 shares of AAPL" are 10 or −10, respectively* Reward function: r(s, a, s′) is the incentive mechanism for an agent to learn a better action. The change of the portfolio value when action a is taken at state s and arriving at new state s', i.e., r(s, a, s′) = v′ − v, where v′ and v represent the portfoliovalues at state s′ and s, respectively* State: The state space describes the observations that the agent receives from the environment. Just as a human trader needs to analyze various information before executing a trade, soour trading agent observes many different features to better learn in an interactive environment.* Environment: Dow 30 consituentsThe data of the single stock that we will be using for this case study is obtained from Yahoo Finance API. The data contains Open-High-Low-Close price and volume. Part 2. Load Python Packages 2.1. Install all the packages through FinRL library ###Code ## install finrl library %pip install git+https://github.com/AI4Finance-LLC/FinRL-Library.git ###Output Looking in indexes: https://mirror.sjtu.edu.cn/pypi/web/simple Collecting git+https://github.com/AI4Finance-LLC/FinRL-Library.git Cloning https://github.com/AI4Finance-LLC/FinRL-Library.git to /tmp/pip-req-build-ynujm_zf Running command git clone -q https://github.com/AI4Finance-LLC/FinRL-Library.git /tmp/pip-req-build-ynujm_zf Resolved https://github.com/AI4Finance-LLC/FinRL-Library.git to commit 44b82fb0c7a9b9aebc59265b0ff20295ef4119f3 Collecting pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2 Cloning https://github.com/quantopian/pyfolio.git to /tmp/pip-install-ja2vgv06/pyfolio_0da40ac97a354b908e4fcbde344ba2d7 Running command git clone -q https://github.com/quantopian/pyfolio.git /tmp/pip-install-ja2vgv06/pyfolio_0da40ac97a354b908e4fcbde344ba2d7 Resolved https://github.com/quantopian/pyfolio.git to commit 4b901f6d73aa02ceb6d04b7d83502e5c6f2e81aa Collecting elegantrl@ git+https://github.com/AI4Finance-Foundation/ElegantRL.git#egg=elegantrl Cloning https://github.com/AI4Finance-Foundation/ElegantRL.git to /tmp/pip-install-ja2vgv06/elegantrl_8779a3744a2946d3b2f8252427b25215 Running command git clone -q https://github.com/AI4Finance-Foundation/ElegantRL.git /tmp/pip-install-ja2vgv06/elegantrl_8779a3744a2946d3b2f8252427b25215 Resolved https://github.com/AI4Finance-Foundation/ElegantRL.git to commit fb35e25f01c50af61fa4697824025be50b2e53f1 Requirement already satisfied: numpy>=1.17.3 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (1.21.2) Requirement already satisfied: pandas>=1.1.5 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (1.3.5) Requirement already satisfied: stockstats>=0.4.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (0.4.1) Requirement already satisfied: yfinance in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (0.1.70) Requirement already satisfied: elegantrl in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (0.3.3) Requirement already satisfied: matplotlib in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (3.5.1) Requirement already satisfied: scikit-learn>=0.21.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (1.0.2) Requirement already satisfied: gym>=0.17 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (0.19.0) Requirement already satisfied: stable-baselines3[extra] in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (1.4.0) Requirement already satisfied: ray[default] in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (1.11.0) Requirement already satisfied: lz4 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (4.0.0) Requirement already satisfied: tensorboardX in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (2.5) Requirement already satisfied: gputil in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (1.4.0) Requirement already satisfied: exchange_calendars in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (3.6.1) Requirement already satisfied: alpaca_trade_api in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (1.2.3) Requirement already satisfied: ccxt>=1.66.32 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (1.72.98) Requirement already satisfied: jqdatasdk in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (1.8.10) Requirement already satisfied: wrds in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (3.1.1) Requirement already satisfied: pytest in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (7.1.1) Requirement already satisfied: setuptools==59.5.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (59.5.0) Requirement already satisfied: wheel>=0.33.6 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (0.37.1) Requirement already satisfied: pre-commit in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (2.17.0) Requirement already satisfied: aiodns>=1.1.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ccxt>=1.66.32->finrl==0.3.5) (3.0.0) Requirement already satisfied: certifi>=2018.1.18 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ccxt>=1.66.32->finrl==0.3.5) (2021.10.8) Requirement already satisfied: yarl==1.7.2 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ccxt>=1.66.32->finrl==0.3.5) (1.7.2) Requirement already satisfied: cryptography>=2.6.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ccxt>=1.66.32->finrl==0.3.5) (36.0.2) Requirement already satisfied: requests>=2.18.4 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ccxt>=1.66.32->finrl==0.3.5) (2.27.1) Requirement already satisfied: aiohttp>=3.8 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ccxt>=1.66.32->finrl==0.3.5) (3.8.1) Requirement already satisfied: multidict>=4.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from yarl==1.7.2->ccxt>=1.66.32->finrl==0.3.5) (6.0.2) Requirement already satisfied: idna>=2.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from yarl==1.7.2->ccxt>=1.66.32->finrl==0.3.5) (3.3) Requirement already satisfied: typing-extensions>=3.7.4 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from yarl==1.7.2->ccxt>=1.66.32->finrl==0.3.5) (3.10.0.2) Requirement already satisfied: pycares>=4.0.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from aiodns>=1.1.1->ccxt>=1.66.32->finrl==0.3.5) (4.1.2) Requirement already satisfied: async-timeout<5.0,>=4.0.0a3 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from aiohttp>=3.8->ccxt>=1.66.32->finrl==0.3.5) (4.0.2) Requirement already satisfied: aiosignal>=1.1.2 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from aiohttp>=3.8->ccxt>=1.66.32->finrl==0.3.5) (1.2.0) Requirement already satisfied: charset-normalizer<3.0,>=2.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from aiohttp>=3.8->ccxt>=1.66.32->finrl==0.3.5) (2.0.12) Requirement already satisfied: asynctest==0.13.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from aiohttp>=3.8->ccxt>=1.66.32->finrl==0.3.5) (0.13.0) Requirement already satisfied: attrs>=17.3.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from aiohttp>=3.8->ccxt>=1.66.32->finrl==0.3.5) (21.4.0) Requirement already satisfied: frozenlist>=1.1.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from aiohttp>=3.8->ccxt>=1.66.32->finrl==0.3.5) (1.3.0) Requirement already satisfied: cffi>=1.12 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from cryptography>=2.6.1->ccxt>=1.66.32->finrl==0.3.5) (1.15.0) Requirement already satisfied: pycparser in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from cffi>=1.12->cryptography>=2.6.1->ccxt>=1.66.32->finrl==0.3.5) (2.21) Requirement already satisfied: cloudpickle<1.7.0,>=1.2.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from gym>=0.17->finrl==0.3.5) (1.6.0) Requirement already satisfied: python-dateutil>=2.7.3 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pandas>=1.1.5->finrl==0.3.5) (2.8.2) Requirement already satisfied: pytz>=2017.3 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pandas>=1.1.5->finrl==0.3.5) (2022.1) Requirement already satisfied: six>=1.5 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from python-dateutil>=2.7.3->pandas>=1.1.5->finrl==0.3.5) (1.16.0) Requirement already satisfied: urllib3<1.27,>=1.21.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from requests>=2.18.4->ccxt>=1.66.32->finrl==0.3.5) (1.26.9) Requirement already satisfied: joblib>=0.11 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from scikit-learn>=0.21.0->finrl==0.3.5) (1.1.0) Requirement already satisfied: threadpoolctl>=2.0.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from scikit-learn>=0.21.0->finrl==0.3.5) (3.1.0) Requirement already satisfied: scipy>=1.1.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from scikit-learn>=0.21.0->finrl==0.3.5) (1.7.3) Requirement already satisfied: websockets<10,>=8.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from alpaca_trade_api->finrl==0.3.5) (9.1) Requirement already satisfied: websocket-client<2,>=0.56.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from alpaca_trade_api->finrl==0.3.5) (1.3.1) Requirement already satisfied: msgpack==1.0.2 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from alpaca_trade_api->finrl==0.3.5) (1.0.2) Requirement already satisfied: pybullet in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from elegantrl@ git+https://github.com/AI4Finance-Foundation/ElegantRL.git#egg=elegantrl->finrl==0.3.5) (3.2.1) Requirement already satisfied: torch in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from elegantrl@ git+https://github.com/AI4Finance-Foundation/ElegantRL.git#egg=elegantrl->finrl==0.3.5) (1.10.2) Requirement already satisfied: opencv-python in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from elegantrl@ git+https://github.com/AI4Finance-Foundation/ElegantRL.git#egg=elegantrl->finrl==0.3.5) (4.5.5.64) Requirement already satisfied: box2d-py in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from elegantrl@ git+https://github.com/AI4Finance-Foundation/ElegantRL.git#egg=elegantrl->finrl==0.3.5) (2.3.8) Requirement already satisfied: korean-lunar-calendar in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from exchange_calendars->finrl==0.3.5) (0.2.1) Requirement already satisfied: pyluach in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from exchange_calendars->finrl==0.3.5) (1.4.0) Requirement already satisfied: toolz in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from exchange_calendars->finrl==0.3.5) (0.11.2) Requirement already satisfied: thriftpy2>=0.3.9 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from jqdatasdk->finrl==0.3.5) (0.4.14) Requirement already satisfied: pymysql>=0.7.6 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from jqdatasdk->finrl==0.3.5) (1.0.2) Requirement already satisfied: SQLAlchemy>=1.2.8 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from jqdatasdk->finrl==0.3.5) (1.4.32) Requirement already satisfied: greenlet!=0.4.17 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from SQLAlchemy>=1.2.8->jqdatasdk->finrl==0.3.5) (1.1.2) Requirement already satisfied: importlib-metadata in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from SQLAlchemy>=1.2.8->jqdatasdk->finrl==0.3.5) (4.11.3) Requirement already satisfied: ply<4.0,>=3.4 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from thriftpy2>=0.3.9->jqdatasdk->finrl==0.3.5) (3.11) Requirement already satisfied: zipp>=0.5 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from importlib-metadata->SQLAlchemy>=1.2.8->jqdatasdk->finrl==0.3.5) (3.7.0) Requirement already satisfied: pillow>=6.2.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from matplotlib->finrl==0.3.5) (9.0.1) Requirement already satisfied: pyparsing>=2.2.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from matplotlib->finrl==0.3.5) (3.0.7) Requirement already satisfied: fonttools>=4.22.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from matplotlib->finrl==0.3.5) (4.31.1) Requirement already satisfied: cycler>=0.10 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from matplotlib->finrl==0.3.5) (0.11.0) Requirement already satisfied: kiwisolver>=1.0.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from matplotlib->finrl==0.3.5) (1.4.0) Requirement already satisfied: packaging>=20.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from matplotlib->finrl==0.3.5) (21.3) Requirement already satisfied: pyyaml>=5.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pre-commit->finrl==0.3.5) (6.0) Requirement already satisfied: nodeenv>=0.11.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pre-commit->finrl==0.3.5) (1.6.0) Requirement already satisfied: toml in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pre-commit->finrl==0.3.5) (0.10.2) Requirement already satisfied: identify>=1.0.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pre-commit->finrl==0.3.5) (2.4.12) Requirement already satisfied: cfgv>=2.0.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pre-commit->finrl==0.3.5) (3.3.1) Requirement already satisfied: virtualenv>=20.0.8 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pre-commit->finrl==0.3.5) (20.13.4) Requirement already satisfied: platformdirs<3,>=2 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from virtualenv>=20.0.8->pre-commit->finrl==0.3.5) (2.5.1) Requirement already satisfied: filelock<4,>=3.2 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from virtualenv>=20.0.8->pre-commit->finrl==0.3.5) (3.6.0) Requirement already satisfied: distlib<1,>=0.3.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from virtualenv>=20.0.8->pre-commit->finrl==0.3.5) (0.3.4) Requirement already satisfied: ipython>=3.2.3 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (7.32.0) Requirement already satisfied: seaborn>=0.7.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (0.11.2) Requirement already satisfied: empyrical>=0.5.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (0.5.5) Requirement already satisfied: pandas-datareader>=0.2 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from empyrical>=0.5.0->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (0.10.0) Requirement already satisfied: pygments in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (2.11.2) Requirement already satisfied: jedi>=0.16 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (0.18.1) Requirement already satisfied: decorator in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (5.1.1) Requirement already satisfied: matplotlib-inline in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (0.1.3) Requirement already satisfied: traitlets>=4.2 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (5.1.1) Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (3.0.28) Requirement already satisfied: pickleshare in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (0.7.5) Requirement already satisfied: backcall in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (0.2.0) Requirement already satisfied: pexpect>4.3 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (4.8.0) Requirement already satisfied: parso<0.9.0,>=0.8.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from jedi>=0.16->ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (0.8.3) Requirement already satisfied: lxml in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pandas-datareader>=0.2->empyrical>=0.5.0->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (4.8.0) Requirement already satisfied: ptyprocess>=0.5 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pexpect>4.3->ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (0.7.0) Requirement already satisfied: wcwidth in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (0.2.5) Requirement already satisfied: tomli>=1.0.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pytest->finrl==0.3.5) (2.0.1) Requirement already satisfied: py>=1.8.2 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pytest->finrl==0.3.5) (1.11.0) Requirement already satisfied: pluggy<2.0,>=0.12 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pytest->finrl==0.3.5) (1.0.0) Requirement already satisfied: iniconfig in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pytest->finrl==0.3.5) (1.1.1) Requirement already satisfied: redis>=3.5.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (4.1.4) Requirement already satisfied: grpcio<=1.43.0,>=1.28.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (1.43.0) Requirement already satisfied: jsonschema in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (4.4.0) Requirement already satisfied: click>=7.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (8.0.4) Requirement already satisfied: protobuf>=3.15.3 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (3.19.4) Requirement already satisfied: colorful in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (0.5.4) Requirement already satisfied: smart-open in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (5.2.1) Requirement already satisfied: gpustat>=1.0.0b1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (1.0.0b1) Requirement already satisfied: aioredis<2 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (1.3.1) Requirement already satisfied: prometheus-client>=0.7.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (0.13.1) Requirement already satisfied: aiohttp-cors in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (0.7.0) Requirement already satisfied: py-spy>=0.2.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (0.3.11) Requirement already satisfied: opencensus in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (0.8.0) Requirement already satisfied: hiredis in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from aioredis<2->ray[default]->finrl==0.3.5) (2.0.0) Requirement already satisfied: nvidia-ml-py3>=7.352.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from gpustat>=1.0.0b1->ray[default]->finrl==0.3.5) (7.352.0) Requirement already satisfied: blessed>=1.17.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from gpustat>=1.0.0b1->ray[default]->finrl==0.3.5) (1.19.1) Requirement already satisfied: psutil in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from gpustat>=1.0.0b1->ray[default]->finrl==0.3.5) (5.9.0) Requirement already satisfied: deprecated>=1.2.3 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from redis>=3.5.0->ray[default]->finrl==0.3.5) (1.2.13) Requirement already satisfied: wrapt<2,>=1.10 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from deprecated>=1.2.3->redis>=3.5.0->ray[default]->finrl==0.3.5) (1.14.0) Requirement already satisfied: importlib-resources>=1.4.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from jsonschema->ray[default]->finrl==0.3.5) (5.4.0) Requirement already satisfied: pyrsistent!=0.17.0,!=0.17.1,!=0.17.2,>=0.14.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from jsonschema->ray[default]->finrl==0.3.5) (0.18.1) Requirement already satisfied: opencensus-context==0.1.2 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from opencensus->ray[default]->finrl==0.3.5) (0.1.2) Requirement already satisfied: google-api-core<3.0.0,>=1.0.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from opencensus->ray[default]->finrl==0.3.5) (2.7.1) Requirement already satisfied: google-auth<3.0dev,>=1.25.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.5) (2.6.2) Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.52.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.5) (1.56.0) Requirement already satisfied: pyasn1-modules>=0.2.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.5) (0.2.8) Requirement already satisfied: rsa<5,>=3.1.4 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.5) (4.8) Requirement already satisfied: cachetools<6.0,>=2.0.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.5) (5.0.0) Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<3.0dev,>=1.25.0->google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.5) (0.4.8) Requirement already satisfied: tabulate in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (0.8.9) Requirement already satisfied: tensorboard>=2.2.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from stable-baselines3[extra]->finrl==0.3.5) (2.8.0) Requirement already satisfied: atari-py==0.2.6 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from stable-baselines3[extra]->finrl==0.3.5) (0.2.6) Requirement already satisfied: absl-py>=0.4 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.5) (1.0.0) Requirement already satisfied: markdown>=2.6.8 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.5) (3.3.6) Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.5) (1.8.1) Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.5) (0.4.6) Requirement already satisfied: werkzeug>=0.11.15 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.5) (2.0.3) Requirement already satisfied: tensorboard-data-server<0.7.0,>=0.6.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.5) (0.6.1) Requirement already satisfied: requests-oauthlib>=0.7.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.5) (1.3.1) Requirement already satisfied: oauthlib>=3.0.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.5) (3.2.0) Requirement already satisfied: psycopg2-binary in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from wrds->finrl==0.3.5) (2.9.3) Requirement already satisfied: mock in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from wrds->finrl==0.3.5) (4.0.3) Requirement already satisfied: multitasking>=0.0.7 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from yfinance->finrl==0.3.5) (0.0.10) Note: you may need to restart the kernel to use updated packages. ###Markdown 2.2. Check if the additional packages needed are present, if not install them. * Yahoo Finance API* pandas* numpy* matplotlib* stockstats* OpenAI gym* stable-baselines* tensorflow* pyfolio 2.3. Import Packages ###Code import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt # matplotlib.use('Agg') import datetime %matplotlib inline from finrl import config from finrl import config_tickers from finrl.finrl_meta.preprocessor.yahoodownloader import YahooDownloader from finrl.finrl_meta.preprocessor.preprocessors import FeatureEngineer, data_split from finrl.finrl_meta.env_stock_trading.env_stocktrading import StockTradingEnv from finrl.agents.stablebaselines3.models import DRLAgent from finrl.plot import backtest_stats, backtest_plot, get_daily_return, get_baseline from pprint import pprint import sys sys.path.append("../FinRL-Library") import itertools ###Output /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages/pyfolio/pos.py:27: UserWarning: Module "zipline.assets" not found; multipliers will not be applied to position notionals. 'Module "zipline.assets" not found; multipliers will not be applied' ###Markdown 2.4. Create Folders ###Code import os if not os.path.exists("./" + config.DATA_SAVE_DIR): os.makedirs("./" + config.DATA_SAVE_DIR) if not os.path.exists("./" + config.TRAINED_MODEL_DIR): os.makedirs("./" + config.TRAINED_MODEL_DIR) if not os.path.exists("./" + config.TENSORBOARD_LOG_DIR): os.makedirs("./" + config.TENSORBOARD_LOG_DIR) if not os.path.exists("./" + config.RESULTS_DIR): os.makedirs("./" + config.RESULTS_DIR) ###Output _____no_output_____ ###Markdown Part 3. Download Stock Data from Yahoo FinanceYahoo Finance is a website that provides stock data, financial news, financial reports, etc. All the data provided by Yahoo Finance is free.* FinRL uses a class **YahooDownloader** to fetch data from Yahoo Finance API* Call Limit: Using the Public API (without authentication), you are limited to 2,000 requests per hour per IP (or up to a total of 48,000 requests a day). -----class YahooDownloader: Provides methods for retrieving daily stock data from Yahoo Finance API Attributes ---------- start_date : str start date of the data (modified from config.py) end_date : str end date of the data (modified from config.py) ticker_list : list a list of stock tickers (modified from config.py) Methods ------- fetch_data() Fetches data from yahoo API ###Code print(config_tickers.DOW_30_TICKER) df = YahooDownloader(start_date = '2009-01-01', end_date = '2021-01-01', ticker_list = config_tickers.DOW_30_TICKER).fetch_data() df.shape df.head() df['date'] = pd.to_datetime(df['date'],format='%Y-%m-%d') df.sort_values(['date','tic'],ignore_index=True).head() ###Output _____no_output_____ ###Markdown Part 4: Preprocess fundamental data- Import finanical data downloaded from Compustat via WRDS(Wharton Research Data Service)- Preprocess the dataset and calculate financial ratios- Add those ratios to the price data preprocessed in Part 3- Calculate price-related ratios such as P/E and P/B 4-1 Import the financial data ###Code # Import fundamental data from my GitHub repository url = 'https://raw.githubusercontent.com/mariko-sawada/FinRL_with_fundamental_data/main/dow_30_fundamental_wrds.csv' fund = pd.read_csv(url) # Check the imported dataset fund.head() ###Output _____no_output_____ ###Markdown 4-2 Specify items needed to calculate financial ratios- To know more about the data description of the dataset, please check WRDS's website(https://wrds-www.wharton.upenn.edu/). Login will be required. ###Code # List items that are used to calculate financial ratios items = [ 'datadate', # Date 'tic', # Ticker 'oiadpq', # Quarterly operating income 'revtq', # Quartely revenue 'niq', # Quartely net income 'atq', # Total asset 'teqq', # Shareholder's equity 'epspiy', # EPS(Basic) incl. Extraordinary items 'ceqq', # Common Equity 'cshoq', # Common Shares Outstanding 'dvpspq', # Dividends per share 'actq', # Current assets 'lctq', # Current liabilities 'cheq', # Cash & Equivalent 'rectq', # Recievalbles 'cogsq', # Cost of Goods Sold 'invtq', # Inventories 'apq',# Account payable 'dlttq', # Long term debt 'dlcq', # Debt in current liabilites 'ltq' # Liabilities ] # Omit items that will not be used fund_data = fund[items] # Rename column names for the sake of readability fund_data = fund_data.rename(columns={ 'datadate':'date', # Date 'oiadpq':'op_inc_q', # Quarterly operating income 'revtq':'rev_q', # Quartely revenue 'niq':'net_inc_q', # Quartely net income 'atq':'tot_assets', # Assets 'teqq':'sh_equity', # Shareholder's equity 'epspiy':'eps_incl_ex', # EPS(Basic) incl. Extraordinary items 'ceqq':'com_eq', # Common Equity 'cshoq':'sh_outstanding', # Common Shares Outstanding 'dvpspq':'div_per_sh', # Dividends per share 'actq':'cur_assets', # Current assets 'lctq':'cur_liabilities', # Current liabilities 'cheq':'cash_eq', # Cash & Equivalent 'rectq':'receivables', # Receivalbles 'cogsq':'cogs_q', # Cost of Goods Sold 'invtq':'inventories', # Inventories 'apq': 'payables',# Account payable 'dlttq':'long_debt', # Long term debt 'dlcq':'short_debt', # Debt in current liabilites 'ltq':'tot_liabilities' # Liabilities }) # Check the data fund_data.head() ###Output _____no_output_____ ###Markdown 4-3 Calculate financial ratios- For items from Profit/Loss statements, we calculate LTM (Last Twelve Months) and use them to derive profitability related ratios such as Operating Maring and ROE. For items from balance sheets, we use the numbers on the day.- To check the definitions of the financial ratios calculated here, please refer to CFI's website: https://corporatefinanceinstitute.com/resources/knowledge/finance/financial-ratios/ ###Code # Calculate financial ratios date = pd.to_datetime(fund_data['date'],format='%Y%m%d') tic = fund_data['tic'].to_frame('tic') # Profitability ratios # Operating Margin OPM = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='OPM') for i in range(0, fund_data.shape[0]): if i-3 < 0: OPM[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: OPM.iloc[i] = np.nan else: OPM.iloc[i] = np.sum(fund_data['op_inc_q'].iloc[i-3:i])/np.sum(fund_data['rev_q'].iloc[i-3:i]) # Net Profit Margin NPM = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='NPM') for i in range(0, fund_data.shape[0]): if i-3 < 0: NPM[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: NPM.iloc[i] = np.nan else: NPM.iloc[i] = np.sum(fund_data['net_inc_q'].iloc[i-3:i])/np.sum(fund_data['rev_q'].iloc[i-3:i]) # Return On Assets ROA = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='ROA') for i in range(0, fund_data.shape[0]): if i-3 < 0: ROA[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: ROA.iloc[i] = np.nan else: ROA.iloc[i] = np.sum(fund_data['net_inc_q'].iloc[i-3:i])/fund_data['tot_assets'].iloc[i] # Return on Equity ROE = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='ROE') for i in range(0, fund_data.shape[0]): if i-3 < 0: ROE[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: ROE.iloc[i] = np.nan else: ROE.iloc[i] = np.sum(fund_data['net_inc_q'].iloc[i-3:i])/fund_data['sh_equity'].iloc[i] # For calculating valuation ratios in the next subpart, calculate per share items in advance # Earnings Per Share EPS = fund_data['eps_incl_ex'].to_frame('EPS') # Book Per Share BPS = (fund_data['com_eq']/fund_data['sh_outstanding']).to_frame('BPS') # Need to check units #Dividend Per Share DPS = fund_data['div_per_sh'].to_frame('DPS') # Liquidity ratios # Current ratio cur_ratio = (fund_data['cur_assets']/fund_data['cur_liabilities']).to_frame('cur_ratio') # Quick ratio quick_ratio = ((fund_data['cash_eq'] + fund_data['receivables'] )/fund_data['cur_liabilities']).to_frame('quick_ratio') # Cash ratio cash_ratio = (fund_data['cash_eq']/fund_data['cur_liabilities']).to_frame('cash_ratio') # Efficiency ratios # Inventory turnover ratio inv_turnover = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='inv_turnover') for i in range(0, fund_data.shape[0]): if i-3 < 0: inv_turnover[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: inv_turnover.iloc[i] = np.nan else: inv_turnover.iloc[i] = np.sum(fund_data['cogs_q'].iloc[i-3:i])/fund_data['inventories'].iloc[i] # Receivables turnover ratio acc_rec_turnover = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='acc_rec_turnover') for i in range(0, fund_data.shape[0]): if i-3 < 0: acc_rec_turnover[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: acc_rec_turnover.iloc[i] = np.nan else: acc_rec_turnover.iloc[i] = np.sum(fund_data['rev_q'].iloc[i-3:i])/fund_data['receivables'].iloc[i] # Payable turnover ratio acc_pay_turnover = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='acc_pay_turnover') for i in range(0, fund_data.shape[0]): if i-3 < 0: acc_pay_turnover[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: acc_pay_turnover.iloc[i] = np.nan else: acc_pay_turnover.iloc[i] = np.sum(fund_data['cogs_q'].iloc[i-3:i])/fund_data['payables'].iloc[i] ## Leverage financial ratios # Debt ratio debt_ratio = (fund_data['tot_liabilities']/fund_data['tot_assets']).to_frame('debt_ratio') # Debt to Equity ratio debt_to_equity = (fund_data['tot_liabilities']/fund_data['sh_equity']).to_frame('debt_to_equity') # Create a dataframe that merges all the ratios ratios = pd.concat([date,tic,OPM,NPM,ROA,ROE,EPS,BPS,DPS, cur_ratio,quick_ratio,cash_ratio,inv_turnover,acc_rec_turnover,acc_pay_turnover, debt_ratio,debt_to_equity], axis=1) # Check the ratio data ratios.head() ratios.tail() ###Output _____no_output_____ ###Markdown 4-4 Deal with NAs and infinite values- We replace N/A and infinite values with zero so that they can be recognized as a state ###Code # Replace NAs infinite values with zero final_ratios = ratios.copy() final_ratios = final_ratios.fillna(0) final_ratios = final_ratios.replace(np.inf,0) final_ratios.head() final_ratios.tail() ###Output _____no_output_____ ###Markdown 4-5 Merge stock price data and ratios into one dataframe- Merge the price dataframe preprocessed in Part 3 and the ratio dataframe created in this part- Since the prices are daily and ratios are quartely, we have NAs in the ratio columns after merging the two dataframes. We deal with this by backfilling the ratios. ###Code list_ticker = df["tic"].unique().tolist() list_date = list(pd.date_range(df['date'].min(),df['date'].max())) combination = list(itertools.product(list_date,list_ticker)) # Merge stock price data and ratios into one dataframe processed_full = pd.DataFrame(combination,columns=["date","tic"]).merge(df,on=["date","tic"],how="left") processed_full = processed_full.merge(final_ratios,how='left',on=['date','tic']) processed_full = processed_full.sort_values(['tic','date']) # Backfill the ratio data to make them daily processed_full = processed_full.bfill(axis='rows') ###Output _____no_output_____ ###Markdown 4-6 Calculate market valuation ratios using daily stock price data ###Code # Calculate P/E, P/B and dividend yield using daily closing price processed_full['PE'] = processed_full['close']/processed_full['EPS'] processed_full['PB'] = processed_full['close']/processed_full['BPS'] processed_full['Div_yield'] = processed_full['DPS']/processed_full['close'] # Drop per share items used for the above calculation processed_full = processed_full.drop(columns=['day','EPS','BPS','DPS']) # Replace NAs infinite values with zero processed_full = processed_full.copy() processed_full = processed_full.fillna(0) processed_full = processed_full.replace(np.inf,0) # Check the final data processed_full.sort_values(['date','tic'],ignore_index=True).head(10) ###Output _____no_output_____ ###Markdown Part 5. Design EnvironmentConsidering the stochastic and interactive nature of the automated stock trading tasks, a financial task is modeled as a **Markov Decision Process (MDP)** problem. The training process involves observing stock price change, taking an action and reward's calculation to have the agent adjusting its strategy accordingly. By interacting with the environment, the trading agent will derive a trading strategy with the maximized rewards as time proceeds.Our trading environments, based on OpenAI Gym framework, simulate live stock markets with real market data according to the principle of time-driven simulation.The action space describes the allowed actions that the agent interacts with the environment. Normally, action a includes three actions: {-1, 0, 1}, where -1, 0, 1 represent selling, holding, and buying one share. Also, an action can be carried upon multiple shares. We use an action space {-k,…,-1, 0, 1, …, k}, where k denotes the number of shares to buy and -k denotes the number of shares to sell. For example, "Buy 10 shares of AAPL" or "Sell 10 shares of AAPL" are 10 or -10, respectively. The continuous action space needs to be normalized to [-1, 1], since the policy is defined on a Gaussian distribution, which needs to be normalized and symmetric. 5-1 Split data into training and trade dataset- Training data split: 2009-01-01 to 2018-12-31- Trade data split: 2019-01-01 to 2020-09-30 ###Code train = data_split(processed_full, '2009-01-01','2019-01-01') trade = data_split(processed_full, '2019-01-01','2021-01-01') # Check the length of the two datasets print(len(train)) print(len(trade)) train.head() trade.head() ###Output _____no_output_____ ###Markdown 5-2 Set up the training environment ###Code import gym import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd from gym import spaces from gym.utils import seeding from stable_baselines3.common.vec_env import DummyVecEnv matplotlib.use("Agg") # from stable_baselines3.common import logger class StockTradingEnv(gym.Env): """A stock trading environment for OpenAI gym""" metadata = {"render.modes": ["human"]} def __init__( self, df, stock_dim, hmax, initial_amount, buy_cost_pct, sell_cost_pct, reward_scaling, state_space, action_space, tech_indicator_list, turbulence_threshold=None, risk_indicator_col="turbulence", make_plots=False, print_verbosity=10, day=0, initial=True, previous_state=[], model_name="", mode="", iteration="", ): self.day = day self.df = df self.stock_dim = stock_dim self.hmax = hmax self.initial_amount = initial_amount self.buy_cost_pct = buy_cost_pct self.sell_cost_pct = sell_cost_pct self.reward_scaling = reward_scaling self.state_space = state_space self.action_space = action_space self.tech_indicator_list = tech_indicator_list self.action_space = spaces.Box(low=-1, high=1, shape=(self.action_space,)) self.observation_space = spaces.Box( low=-np.inf, high=np.inf, shape=(self.state_space,) ) self.data = self.df.loc[self.day, :] self.terminal = False self.make_plots = make_plots self.print_verbosity = print_verbosity self.turbulence_threshold = turbulence_threshold self.risk_indicator_col = risk_indicator_col self.initial = initial self.previous_state = previous_state self.model_name = model_name self.mode = mode self.iteration = iteration # initalize state self.state = self._initiate_state() # initialize reward self.reward = 0 self.turbulence = 0 self.cost = 0 self.trades = 0 self.episode = 0 # memorize all the total balance change self.asset_memory = [self.initial_amount] self.rewards_memory = [] self.actions_memory = [] self.date_memory = [self._get_date()] # self.reset() self._seed() def _sell_stock(self, index, action): def _do_sell_normal(): if self.state[index + 1] > 0: # Sell only if the price is > 0 (no missing data in this particular date) # perform sell action based on the sign of the action if self.state[index + self.stock_dim + 1] > 0: # Sell only if current asset is > 0 sell_num_shares = min( abs(action), self.state[index + self.stock_dim + 1] ) sell_amount = ( self.state[index + 1] * sell_num_shares * (1 - self.sell_cost_pct) ) # update balance self.state[0] += sell_amount self.state[index + self.stock_dim + 1] -= sell_num_shares self.cost += ( self.state[index + 1] * sell_num_shares * self.sell_cost_pct ) self.trades += 1 else: sell_num_shares = 0 else: sell_num_shares = 0 return sell_num_shares # perform sell action based on the sign of the action if self.turbulence_threshold is not None: if self.turbulence >= self.turbulence_threshold: if self.state[index + 1] > 0: # Sell only if the price is > 0 (no missing data in this particular date) # if turbulence goes over threshold, just clear out all positions if self.state[index + self.stock_dim + 1] > 0: # Sell only if current asset is > 0 sell_num_shares = self.state[index + self.stock_dim + 1] sell_amount = ( self.state[index + 1] * sell_num_shares * (1 - self.sell_cost_pct) ) # update balance self.state[0] += sell_amount self.state[index + self.stock_dim + 1] = 0 self.cost += ( self.state[index + 1] * sell_num_shares * self.sell_cost_pct ) self.trades += 1 else: sell_num_shares = 0 else: sell_num_shares = 0 else: sell_num_shares = _do_sell_normal() else: sell_num_shares = _do_sell_normal() return sell_num_shares def _buy_stock(self, index, action): def _do_buy(): if self.state[index + 1] > 0: # Buy only if the price is > 0 (no missing data in this particular date) available_amount = self.state[0] // self.state[index + 1] # print('available_amount:{}'.format(available_amount)) # update balance buy_num_shares = min(available_amount, action) buy_amount = ( self.state[index + 1] * buy_num_shares * (1 + self.buy_cost_pct) ) self.state[0] -= buy_amount self.state[index + self.stock_dim + 1] += buy_num_shares self.cost += self.state[index + 1] * buy_num_shares * self.buy_cost_pct self.trades += 1 else: buy_num_shares = 0 return buy_num_shares # perform buy action based on the sign of the action if self.turbulence_threshold is None: buy_num_shares = _do_buy() else: if self.turbulence < self.turbulence_threshold: buy_num_shares = _do_buy() else: buy_num_shares = 0 pass return buy_num_shares def _make_plot(self): plt.plot(self.asset_memory, "r") plt.savefig("results/account_value_trade_{}.png".format(self.episode)) plt.close() def step(self, actions): self.terminal = self.day >= len(self.df.index.unique()) - 1 if self.terminal: # print(f"Episode: {self.episode}") if self.make_plots: self._make_plot() end_total_asset = self.state[0] + sum( np.array(self.state[1 : (self.stock_dim + 1)]) * np.array(self.state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)]) ) df_total_value = pd.DataFrame(self.asset_memory) tot_reward = ( self.state[0] + sum( np.array(self.state[1 : (self.stock_dim + 1)]) * np.array( self.state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)] ) ) - self.initial_amount ) df_total_value.columns = ["account_value"] df_total_value["date"] = self.date_memory df_total_value["daily_return"] = df_total_value["account_value"].pct_change( 1 ) if df_total_value["daily_return"].std() != 0: sharpe = ( (252 ** 0.5) * df_total_value["daily_return"].mean() / df_total_value["daily_return"].std() ) df_rewards = pd.DataFrame(self.rewards_memory) df_rewards.columns = ["account_rewards"] df_rewards["date"] = self.date_memory[:-1] if self.episode % self.print_verbosity == 0: print(f"day: {self.day}, episode: {self.episode}") print(f"begin_total_asset: {self.asset_memory[0]:0.2f}") print(f"end_total_asset: {end_total_asset:0.2f}") print(f"total_reward: {tot_reward:0.2f}") print(f"total_cost: {self.cost:0.2f}") print(f"total_trades: {self.trades}") if df_total_value["daily_return"].std() != 0: print(f"Sharpe: {sharpe:0.3f}") print("=================================") if (self.model_name != "") and (self.mode != ""): df_actions = self.save_action_memory() df_actions.to_csv( "results/actions_{}_{}_{}.csv".format( self.mode, self.model_name, self.iteration ) ) df_total_value.to_csv( "results/account_value_{}_{}_{}.csv".format( self.mode, self.model_name, self.iteration ), index=False, ) df_rewards.to_csv( "results/account_rewards_{}_{}_{}.csv".format( self.mode, self.model_name, self.iteration ), index=False, ) plt.plot(self.asset_memory, "r") plt.savefig( "results/account_value_{}_{}_{}.png".format( self.mode, self.model_name, self.iteration ), index=False, ) plt.close() # Add outputs to logger interface # logger.record("environment/portfolio_value", end_total_asset) # logger.record("environment/total_reward", tot_reward) # logger.record("environment/total_reward_pct", (tot_reward / (end_total_asset - tot_reward)) * 100) # logger.record("environment/total_cost", self.cost) # logger.record("environment/total_trades", self.trades) return self.state, self.reward, self.terminal, {} else: actions = actions * self.hmax # actions initially is scaled between 0 to 1 actions = actions.astype( int ) # convert into integer because we can't by fraction of shares if self.turbulence_threshold is not None: if self.turbulence >= self.turbulence_threshold: actions = np.array([-self.hmax] * self.stock_dim) begin_total_asset = self.state[0] + sum( np.array(self.state[1 : (self.stock_dim + 1)]) * np.array(self.state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)]) ) # print("begin_total_asset:{}".format(begin_total_asset)) argsort_actions = np.argsort(actions) sell_index = argsort_actions[: np.where(actions < 0)[0].shape[0]] buy_index = argsort_actions[::-1][: np.where(actions > 0)[0].shape[0]] for index in sell_index: # print(f"Num shares before: {self.state[index+self.stock_dim+1]}") # print(f'take sell action before : {actions[index]}') actions[index] = self._sell_stock(index, actions[index]) * (-1) # print(f'take sell action after : {actions[index]}') # print(f"Num shares after: {self.state[index+self.stock_dim+1]}") for index in buy_index: # print('take buy action: {}'.format(actions[index])) actions[index] = self._buy_stock(index, actions[index]) self.actions_memory.append(actions) # state: s -> s+1 self.day += 1 self.data = self.df.loc[self.day, :] if self.turbulence_threshold is not None: if len(self.df.tic.unique()) == 1: self.turbulence = self.data[self.risk_indicator_col] elif len(self.df.tic.unique()) > 1: self.turbulence = self.data[self.risk_indicator_col].values[0] self.state = self._update_state() end_total_asset = self.state[0] + sum( np.array(self.state[1 : (self.stock_dim + 1)]) * np.array(self.state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)]) ) self.asset_memory.append(end_total_asset) self.date_memory.append(self._get_date()) self.reward = end_total_asset - begin_total_asset self.rewards_memory.append(self.reward) self.reward = self.reward * self.reward_scaling return self.state, self.reward, self.terminal, {} def reset(self): # initiate state self.state = self._initiate_state() if self.initial: self.asset_memory = [self.initial_amount] else: previous_total_asset = self.previous_state[0] + sum( np.array(self.state[1 : (self.stock_dim + 1)]) * np.array( self.previous_state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)] ) ) self.asset_memory = [previous_total_asset] self.day = 0 self.data = self.df.loc[self.day, :] self.turbulence = 0 self.cost = 0 self.trades = 0 self.terminal = False # self.iteration=self.iteration self.rewards_memory = [] self.actions_memory = [] self.date_memory = [self._get_date()] self.episode += 1 return self.state def render(self, mode="human", close=False): return self.state def _initiate_state(self): if self.initial: # For Initial State if len(self.df.tic.unique()) > 1: # for multiple stock state = ( [self.initial_amount] + self.data.close.values.tolist() + [0] * self.stock_dim + sum( [ self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [], ) ) else: # for single stock state = ( [self.initial_amount] + [self.data.close] + [0] * self.stock_dim + sum([[self.data[tech]] for tech in self.tech_indicator_list], []) ) else: # Using Previous State if len(self.df.tic.unique()) > 1: # for multiple stock state = ( [self.previous_state[0]] + self.data.close.values.tolist() + self.previous_state[ (self.stock_dim + 1) : (self.stock_dim * 2 + 1) ] + sum( [ self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [], ) ) else: # for single stock state = ( [self.previous_state[0]] + [self.data.close] + self.previous_state[ (self.stock_dim + 1) : (self.stock_dim * 2 + 1) ] + sum([[self.data[tech]] for tech in self.tech_indicator_list], []) ) return state def _update_state(self): if len(self.df.tic.unique()) > 1: # for multiple stock state = ( [self.state[0]] + self.data.close.values.tolist() + list(self.state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)]) + sum( [ self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [], ) ) else: # for single stock state = ( [self.state[0]] + [self.data.close] + list(self.state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)]) + sum([[self.data[tech]] for tech in self.tech_indicator_list], []) ) return state def _get_date(self): if len(self.df.tic.unique()) > 1: date = self.data.date.unique()[0] else: date = self.data.date return date def save_asset_memory(self): date_list = self.date_memory asset_list = self.asset_memory # print(len(date_list)) # print(len(asset_list)) df_account_value = pd.DataFrame( {"date": date_list, "account_value": asset_list} ) return df_account_value def save_action_memory(self): if len(self.df.tic.unique()) > 1: # date and close price length must match actions length date_list = self.date_memory[:-1] df_date = pd.DataFrame(date_list) df_date.columns = ["date"] action_list = self.actions_memory df_actions = pd.DataFrame(action_list) df_actions.columns = self.data.tic.values df_actions.index = df_date.date # df_actions = pd.DataFrame({'date':date_list,'actions':action_list}) else: date_list = self.date_memory[:-1] action_list = self.actions_memory df_actions = pd.DataFrame({"date": date_list, "actions": action_list}) return df_actions def _seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] def get_sb_env(self): e = DummyVecEnv([lambda: self]) obs = e.reset() return e, obs ratio_list = ['OPM', 'NPM','ROA', 'ROE', 'cur_ratio', 'quick_ratio', 'cash_ratio', 'inv_turnover','acc_rec_turnover', 'acc_pay_turnover', 'debt_ratio', 'debt_to_equity', 'PE', 'PB', 'Div_yield'] stock_dimension = len(train.tic.unique()) state_space = 1 + 2*stock_dimension + len(ratio_list)*stock_dimension print(f"Stock Dimension: {stock_dimension}, State Space: {state_space}") # Parameters for the environment env_kwargs = { "hmax": 100, "initial_amount": 1000000, "buy_cost_pct": 0.001, "sell_cost_pct": 0.001, "state_space": state_space, "stock_dim": stock_dimension, "tech_indicator_list": ratio_list, "action_space": stock_dimension, "reward_scaling": 1e-4 } #Establish the training environment using StockTradingEnv() class e_train_gym = StockTradingEnv(df = train, **env_kwargs) ###Output _____no_output_____ ###Markdown Environment for Training ###Code env_train, _ = e_train_gym.get_sb_env() print(type(env_train)) ###Output <class 'stable_baselines3.common.vec_env.dummy_vec_env.DummyVecEnv'> ###Markdown Part 6: Implement DRL Algorithms* The implementation of the DRL algorithms are based on **OpenAI Baselines** and **Stable Baselines**. Stable Baselines is a fork of OpenAI Baselines, with a major structural refactoring, and code cleanups.* FinRL library includes fine-tuned standard DRL algorithms, such as DQN, DDPG,Multi-Agent DDPG, PPO, SAC, A2C and TD3. We also allow users todesign their own DRL algorithms by adapting these DRL algorithms. ###Code # Set up the agent using DRLAgent() class using the environment created in the previous part agent = DRLAgent(env = env_train) ###Output _____no_output_____ ###Markdown Model Training: 5 models, A2C DDPG, PPO, TD3, SAC Model 1: A2C ###Code agent = DRLAgent(env = env_train) model_a2c = agent.get_model("a2c") trained_a2c = agent.train_model(model=model_a2c, tb_log_name='a2c', total_timesteps=100000) ###Output ----------------------------------------- | time/ | | | fps | 95 | | iterations | 100 | | time_elapsed | 5 | | total_timesteps | 500 | | train/ | | | entropy_loss | -42.6 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 99 | | policy_loss | 63.9 | | reward | -0.0025933678 | | std | 1 | | value_loss | 4.83 | ----------------------------------------- -------------------------------------- | time/ | | | fps | 103 | | iterations | 200 | | time_elapsed | 9 | | total_timesteps | 1000 | | train/ | | | entropy_loss | -42.7 | | explained_variance | -0.000537 | | learning_rate | 0.0007 | | n_updates | 199 | | policy_loss | -171 | | reward | -2.1574955 | | std | 1 | | value_loss | 25.8 | -------------------------------------- -------------------------------------- | time/ | | | fps | 107 | | iterations | 300 | | time_elapsed | 13 | | total_timesteps | 1500 | | train/ | | | entropy_loss | -42.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 299 | | policy_loss | -24.3 | | reward | 0.16899587 | | std | 1 | | value_loss | 0.982 | -------------------------------------- -------------------------------------- | time/ | | | fps | 110 | | iterations | 400 | | time_elapsed | 18 | | total_timesteps | 2000 | | train/ | | | entropy_loss | -42.6 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 399 | | policy_loss | 74.3 | | reward | -3.0512037 | | std | 1 | | value_loss | 4.96 | -------------------------------------- --------------------------------------- | time/ | | | fps | 112 | | iterations | 500 | | time_elapsed | 22 | | total_timesteps | 2500 | | train/ | | | entropy_loss | -42.6 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 499 | | policy_loss | 107 | | reward | -0.17085995 | | std | 1 | | value_loss | 7.74 | --------------------------------------- ------------------------------------ | time/ | | | fps | 113 | | iterations | 600 | | time_elapsed | 26 | | total_timesteps | 3000 | | train/ | | | entropy_loss | -42.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 599 | | policy_loss | 165 | | reward | 0.0 | | std | 1 | | value_loss | 13.9 | ------------------------------------ ------------------------------------- | time/ | | | fps | 113 | | iterations | 700 | | time_elapsed | 30 | | total_timesteps | 3500 | | train/ | | | entropy_loss | -42.7 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 699 | | policy_loss | -43.5 | | reward | 4.7688413 | | std | 1 | | value_loss | 11.4 | ------------------------------------- -------------------------------------- | time/ | | | fps | 114 | | iterations | 800 | | time_elapsed | 34 | | total_timesteps | 4000 | | train/ | | | entropy_loss | -42.7 | | explained_variance | 1.19e-07 | | learning_rate | 0.0007 | | n_updates | 799 | | policy_loss | 40.9 | | reward | -0.7754371 | | std | 1 | | value_loss | 1.12 | -------------------------------------- ------------------------------------- | time/ | | | fps | 114 | | iterations | 900 | | time_elapsed | 39 | | total_timesteps | 4500 | | train/ | | | entropy_loss | -42.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 899 | | policy_loss | 52.1 | | reward | 1.1952021 | | std | 1 | | value_loss | 3.82 | ------------------------------------- ----------------------------------------- | time/ | | | fps | 115 | | iterations | 1000 | | time_elapsed | 43 | | total_timesteps | 5000 | | train/ | | | entropy_loss | -42.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 999 | | policy_loss | -56.4 | | reward | -0.0004602166 | | std | 1 | | value_loss | 2.08 | ----------------------------------------- ------------------------------------- | time/ | | | fps | 115 | | iterations | 1100 | | time_elapsed | 47 | | total_timesteps | 5500 | | train/ | | | entropy_loss | -42.7 | | explained_variance | -2.38e-07 | | learning_rate | 0.0007 | | n_updates | 1099 | | policy_loss | -6.22 | | reward | -3.228336 | | std | 1.01 | | value_loss | 1.29 | ------------------------------------- ------------------------------------------ | time/ | | | fps | 115 | | iterations | 1200 | | time_elapsed | 51 | | total_timesteps | 6000 | | train/ | | | entropy_loss | -42.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 1199 | | policy_loss | -60.2 | | reward | -0.00017504377 | | std | 1.01 | | value_loss | 3.58 | ------------------------------------------ -------------------------------------- | time/ | | | fps | 115 | | iterations | 1300 | | time_elapsed | 56 | | total_timesteps | 6500 | | train/ | | | entropy_loss | -42.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 1299 | | policy_loss | -54.2 | | reward | -1.3630623 | | std | 1.01 | | value_loss | 3.67 | -------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 1400 | | time_elapsed | 60 | | total_timesteps | 7000 | | train/ | | | entropy_loss | -42.9 | | explained_variance | 1.45e-05 | | learning_rate | 0.0007 | | n_updates | 1399 | | policy_loss | 302 | | reward | 2.7465565 | | std | 1.01 | | value_loss | 97.4 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 1500 | | time_elapsed | 64 | | total_timesteps | 7500 | | train/ | | | entropy_loss | -42.9 | | explained_variance | -0.00238 | | learning_rate | 0.0007 | | n_updates | 1499 | | policy_loss | 128 | | reward | 2.0559413 | | std | 1.01 | | value_loss | 8.62 | ------------------------------------- ------------------------------------------ | time/ | | | fps | 116 | | iterations | 1600 | | time_elapsed | 68 | | total_timesteps | 8000 | | train/ | | | entropy_loss | -42.9 | | explained_variance | 1.19e-07 | | learning_rate | 0.0007 | | n_updates | 1599 | | policy_loss | -74.4 | | reward | -7.6194716e-05 | | std | 1.01 | | value_loss | 3.53 | ------------------------------------------ -------------------------------------- | time/ | | | fps | 116 | | iterations | 1700 | | time_elapsed | 72 | | total_timesteps | 8500 | | train/ | | | entropy_loss | -43 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 1699 | | policy_loss | -73.7 | | reward | 0.46339005 | | std | 1.02 | | value_loss | 3.12 | -------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 1800 | | time_elapsed | 77 | | total_timesteps | 9000 | | train/ | | | entropy_loss | -43.1 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 1799 | | policy_loss | -3.94 | | reward | -2.0293849 | | std | 1.02 | | value_loss | 2.51 | -------------------------------------- -------------------------------------- | time/ | | | fps | 117 | | iterations | 1900 | | time_elapsed | 81 | | total_timesteps | 9500 | | train/ | | | entropy_loss | -43.1 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 1899 | | policy_loss | -231 | | reward | -1.4921011 | | std | 1.02 | | value_loss | 29.5 | -------------------------------------- ------------------------------------- | time/ | | | fps | 117 | | iterations | 2000 | | time_elapsed | 85 | | total_timesteps | 10000 | | train/ | | | entropy_loss | -43.1 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 1999 | | policy_loss | -116 | | reward | 3.9353683 | | std | 1.02 | | value_loss | 34.4 | ------------------------------------- ---------------------------------------- | time/ | | | fps | 117 | | iterations | 2100 | | time_elapsed | 89 | | total_timesteps | 10500 | | train/ | | | entropy_loss | -43.1 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 2099 | | policy_loss | 45.2 | | reward | -0.004765156 | | std | 1.02 | | value_loss | 1.09 | ---------------------------------------- -------------------------------------- | time/ | | | fps | 117 | | iterations | 2200 | | time_elapsed | 93 | | total_timesteps | 11000 | | train/ | | | entropy_loss | -43.1 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 2199 | | policy_loss | 55.2 | | reward | -2.4592872 | | std | 1.02 | | value_loss | 2.92 | -------------------------------------- -------------------------------------- | time/ | | | fps | 117 | | iterations | 2300 | | time_elapsed | 97 | | total_timesteps | 11500 | | train/ | | | entropy_loss | -43.2 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 2299 | | policy_loss | -78.1 | | reward | -3.0025318 | | std | 1.02 | | value_loss | 3.59 | -------------------------------------- ------------------------------------ | time/ | | | fps | 117 | | iterations | 2400 | | time_elapsed | 102 | | total_timesteps | 12000 | | train/ | | | entropy_loss | -43.2 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 2399 | | policy_loss | 11.9 | | reward | 2.655691 | | std | 1.02 | | value_loss | 1.06 | ------------------------------------ ------------------------------------------ | time/ | | | fps | 117 | | iterations | 2500 | | time_elapsed | 106 | | total_timesteps | 12500 | | train/ | | | entropy_loss | -43.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 2499 | | policy_loss | 11.9 | | reward | -0.00059555296 | | std | 1.02 | | value_loss | 0.141 | ------------------------------------------ -------------------------------------- | time/ | | | fps | 116 | | iterations | 2600 | | time_elapsed | 111 | | total_timesteps | 13000 | | train/ | | | entropy_loss | -43.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 2599 | | policy_loss | -157 | | reward | -1.1569912 | | std | 1.03 | | value_loss | 23.5 | -------------------------------------- ------------------------------------------ | time/ | | | fps | 116 | | iterations | 2700 | | time_elapsed | 115 | | total_timesteps | 13500 | | train/ | | | entropy_loss | -43.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 2699 | | policy_loss | 66.1 | | reward | -0.00066900573 | | std | 1.03 | | value_loss | 5.08 | ------------------------------------------ ------------------------------------- | time/ | | | fps | 116 | | iterations | 2800 | | time_elapsed | 119 | | total_timesteps | 14000 | | train/ | | | entropy_loss | -43.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 2799 | | policy_loss | 45.6 | | reward | -0.809039 | | std | 1.03 | | value_loss | 1.73 | ------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 2900 | | time_elapsed | 124 | | total_timesteps | 14500 | | train/ | | | entropy_loss | -43.4 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 2899 | | policy_loss | -72.9 | | reward | -2.4910564 | | std | 1.03 | | value_loss | 4.82 | -------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 3000 | | time_elapsed | 128 | | total_timesteps | 15000 | | train/ | | | entropy_loss | -43.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 2999 | | policy_loss | -181 | | reward | -0.7101703 | | std | 1.03 | | value_loss | 22.4 | -------------------------------------- ---------------------------------------- | time/ | | | fps | 116 | | iterations | 3100 | | time_elapsed | 132 | | total_timesteps | 15500 | | train/ | | | entropy_loss | -43.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 3099 | | policy_loss | -60 | | reward | -0.004172944 | | std | 1.03 | | value_loss | 2.11 | ---------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 3200 | | time_elapsed | 136 | | total_timesteps | 16000 | | train/ | | | entropy_loss | -43.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 3199 | | policy_loss | 22.2 | | reward | -0.9293916 | | std | 1.03 | | value_loss | 2.59 | -------------------------------------- ---------------------------------------- | time/ | | | fps | 116 | | iterations | 3300 | | time_elapsed | 141 | | total_timesteps | 16500 | | train/ | | | entropy_loss | -43.5 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 3299 | | policy_loss | -44.3 | | reward | -0.097194135 | | std | 1.03 | | value_loss | 1.52 | ---------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 3400 | | time_elapsed | 145 | | total_timesteps | 17000 | | train/ | | | entropy_loss | -43.5 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 3399 | | policy_loss | 58.4 | | reward | -0.06478584 | | std | 1.03 | | value_loss | 1.98 | --------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 3500 | | time_elapsed | 150 | | total_timesteps | 17500 | | train/ | | | entropy_loss | -43.4 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 3499 | | policy_loss | 14.2 | | reward | 0.53643763 | | std | 1.03 | | value_loss | 0.969 | -------------------------------------- ---------------------------------------- | time/ | | | fps | 116 | | iterations | 3600 | | time_elapsed | 154 | | total_timesteps | 18000 | | train/ | | | entropy_loss | -43.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 3599 | | policy_loss | -36.5 | | reward | -0.005426721 | | std | 1.03 | | value_loss | 1.59 | ---------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 3700 | | time_elapsed | 158 | | total_timesteps | 18500 | | train/ | | | entropy_loss | -43.6 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 3699 | | policy_loss | 36.6 | | reward | 0.027283445 | | std | 1.03 | | value_loss | 2.08 | --------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 3800 | | time_elapsed | 162 | | total_timesteps | 19000 | | train/ | | | entropy_loss | -43.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 3799 | | policy_loss | -2.93 | | reward | 0.38290653 | | std | 1.04 | | value_loss | 0.123 | -------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 3900 | | time_elapsed | 167 | | total_timesteps | 19500 | | train/ | | | entropy_loss | -43.7 | | explained_variance | -0.00293 | | learning_rate | 0.0007 | | n_updates | 3899 | | policy_loss | 47.5 | | reward | -0.75602734 | | std | 1.04 | | value_loss | 1.4 | --------------------------------------- ----------------------------------------- | time/ | | | fps | 116 | | iterations | 4000 | | time_elapsed | 171 | | total_timesteps | 20000 | | train/ | | | entropy_loss | -43.8 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 3999 | | policy_loss | -6.69 | | reward | -0.0017761731 | | std | 1.04 | | value_loss | 0.4 | ----------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 4100 | | time_elapsed | 176 | | total_timesteps | 20500 | | train/ | | | entropy_loss | -43.8 | | explained_variance | 0.00266 | | learning_rate | 0.0007 | | n_updates | 4099 | | policy_loss | 86 | | reward | -0.24046582 | | std | 1.04 | | value_loss | 5.17 | --------------------------------------- ------------------------------------------ | time/ | | | fps | 116 | | iterations | 4200 | | time_elapsed | 180 | | total_timesteps | 21000 | | train/ | | | entropy_loss | -43.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 4199 | | policy_loss | 159 | | reward | -4.6554425e-05 | | std | 1.04 | | value_loss | 19.3 | ------------------------------------------ -------------------------------------- | time/ | | | fps | 116 | | iterations | 4300 | | time_elapsed | 185 | | total_timesteps | 21500 | | train/ | | | entropy_loss | -43.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 4299 | | policy_loss | 43.8 | | reward | -2.8417304 | | std | 1.04 | | value_loss | 1.45 | -------------------------------------- ------------------------------------------ | time/ | | | fps | 116 | | iterations | 4400 | | time_elapsed | 189 | | total_timesteps | 22000 | | train/ | | | entropy_loss | -43.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 4399 | | policy_loss | 12.2 | | reward | -0.00078770204 | | std | 1.04 | | value_loss | 2.35 | ------------------------------------------ ---------------------------------------- | time/ | | | fps | 116 | | iterations | 4500 | | time_elapsed | 193 | | total_timesteps | 22500 | | train/ | | | entropy_loss | -43.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 4499 | | policy_loss | 19.3 | | reward | -0.012283918 | | std | 1.04 | | value_loss | 0.295 | ---------------------------------------- ------------------------------------------ | time/ | | | fps | 115 | | iterations | 4600 | | time_elapsed | 198 | | total_timesteps | 23000 | | train/ | | | entropy_loss | -43.8 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 4599 | | policy_loss | 74.3 | | reward | -0.00030953262 | | std | 1.04 | | value_loss | 3.06 | ------------------------------------------ ------------------------------------- | time/ | | | fps | 115 | | iterations | 4700 | | time_elapsed | 202 | | total_timesteps | 23500 | | train/ | | | entropy_loss | -43.9 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 4699 | | policy_loss | 60.3 | | reward | 0.6282372 | | std | 1.05 | | value_loss | 2.53 | ------------------------------------- -------------------------------------- | time/ | | | fps | 115 | | iterations | 4800 | | time_elapsed | 207 | | total_timesteps | 24000 | | train/ | | | entropy_loss | -43.9 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 4799 | | policy_loss | 3.01 | | reward | 0.32407507 | | std | 1.05 | | value_loss | 0.097 | -------------------------------------- -------------------------------------- | time/ | | | fps | 115 | | iterations | 4900 | | time_elapsed | 211 | | total_timesteps | 24500 | | train/ | | | entropy_loss | -43.9 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 4899 | | policy_loss | 225 | | reward | -2.2457426 | | std | 1.05 | | value_loss | 37.6 | -------------------------------------- ------------------------------------- | time/ | | | fps | 115 | | iterations | 5000 | | time_elapsed | 216 | | total_timesteps | 25000 | | train/ | | | entropy_loss | -43.9 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 4999 | | policy_loss | 4.28 | | reward | 1.4869773 | | std | 1.05 | | value_loss | 0.402 | ------------------------------------- ------------------------------------ | time/ | | | fps | 115 | | iterations | 5100 | | time_elapsed | 220 | | total_timesteps | 25500 | | train/ | | | entropy_loss | -44 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 5099 | | policy_loss | 108 | | reward | 0.0 | | std | 1.05 | | value_loss | 6.45 | ------------------------------------ -------------------------------------- | time/ | | | fps | 115 | | iterations | 5200 | | time_elapsed | 224 | | total_timesteps | 26000 | | train/ | | | entropy_loss | -43.9 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 5199 | | policy_loss | 9.08 | | reward | 0.35885853 | | std | 1.05 | | value_loss | 0.301 | -------------------------------------- -------------------------------------- | time/ | | | fps | 115 | | iterations | 5300 | | time_elapsed | 229 | | total_timesteps | 26500 | | train/ | | | entropy_loss | -44 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 5299 | | policy_loss | 55.9 | | reward | -0.3154858 | | std | 1.05 | | value_loss | 1.78 | -------------------------------------- -------------------------------------- | time/ | | | fps | 115 | | iterations | 5400 | | time_elapsed | 233 | | total_timesteps | 27000 | | train/ | | | entropy_loss | -44 | | explained_variance | 1.19e-07 | | learning_rate | 0.0007 | | n_updates | 5399 | | policy_loss | 5 | | reward | 0.48877394 | | std | 1.05 | | value_loss | 0.485 | -------------------------------------- ----------------------------------------- | time/ | | | fps | 115 | | iterations | 5500 | | time_elapsed | 237 | | total_timesteps | 27500 | | train/ | | | entropy_loss | -44 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 5499 | | policy_loss | 102 | | reward | -0.0007418807 | | std | 1.05 | | value_loss | 6 | ----------------------------------------- -------------------------------------- | time/ | | | fps | 115 | | iterations | 5600 | | time_elapsed | 242 | | total_timesteps | 28000 | | train/ | | | entropy_loss | -44 | | explained_variance | -0.0266 | | learning_rate | 0.0007 | | n_updates | 5599 | | policy_loss | 176 | | reward | -0.6638814 | | std | 1.05 | | value_loss | 47.8 | -------------------------------------- ----------------------------------------- | time/ | | | fps | 115 | | iterations | 5700 | | time_elapsed | 246 | | total_timesteps | 28500 | | train/ | | | entropy_loss | -44 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 5699 | | policy_loss | -87.8 | | reward | -0.0019212682 | | std | 1.05 | | value_loss | 4.24 | ----------------------------------------- ------------------------------------- | time/ | | | fps | 115 | | iterations | 5800 | | time_elapsed | 250 | | total_timesteps | 29000 | | train/ | | | entropy_loss | -44 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 5799 | | policy_loss | 30.8 | | reward | 2.3667176 | | std | 1.05 | | value_loss | 0.814 | ------------------------------------- ------------------------------------------ | time/ | | | fps | 115 | | iterations | 5900 | | time_elapsed | 254 | | total_timesteps | 29500 | | train/ | | | entropy_loss | -44 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 5899 | | policy_loss | 61.9 | | reward | -0.00036922866 | | std | 1.05 | | value_loss | 2.45 | ------------------------------------------ -------------------------------------- | time/ | | | fps | 115 | | iterations | 6000 | | time_elapsed | 259 | | total_timesteps | 30000 | | train/ | | | entropy_loss | -44 | | explained_variance | 0.342 | | learning_rate | 0.0007 | | n_updates | 5999 | | policy_loss | -67.2 | | reward | -0.3978224 | | std | 1.05 | | value_loss | 2.18 | -------------------------------------- ----------------------------------------- | time/ | | | fps | 115 | | iterations | 6100 | | time_elapsed | 263 | | total_timesteps | 30500 | | train/ | | | entropy_loss | -44 | | explained_variance | -0.0674 | | learning_rate | 0.0007 | | n_updates | 6099 | | policy_loss | 102 | | reward | -0.0011483803 | | std | 1.05 | | value_loss | 6.82 | ----------------------------------------- ------------------------------------- | time/ | | | fps | 115 | | iterations | 6200 | | time_elapsed | 267 | | total_timesteps | 31000 | | train/ | | | entropy_loss | -44 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 6199 | | policy_loss | 111 | | reward | -0.441761 | | std | 1.05 | | value_loss | 6.7 | ------------------------------------- ----------------------------------------- | time/ | | | fps | 115 | | iterations | 6300 | | time_elapsed | 271 | | total_timesteps | 31500 | | train/ | | | entropy_loss | -44 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 6299 | | policy_loss | 49 | | reward | -0.0017546773 | | std | 1.05 | | value_loss | 8.28 | ----------------------------------------- ------------------------------------ | time/ | | | fps | 115 | | iterations | 6400 | | time_elapsed | 276 | | total_timesteps | 32000 | | train/ | | | entropy_loss | -44.1 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 6399 | | policy_loss | -45.8 | | reward | 0.968053 | | std | 1.05 | | value_loss | 2.24 | ------------------------------------ -------------------------------------- | time/ | | | fps | 115 | | iterations | 6500 | | time_elapsed | 280 | | total_timesteps | 32500 | | train/ | | | entropy_loss | -44.1 | | explained_variance | 1.79e-07 | | learning_rate | 0.0007 | | n_updates | 6499 | | policy_loss | -192 | | reward | -0.6304483 | | std | 1.06 | | value_loss | 26.4 | -------------------------------------- day: 3651, episode: 10 begin_total_asset: 1000000.00 end_total_asset: 3389264.47 total_reward: 2389264.47 total_cost: 37026.78 total_trades: 64155 Sharpe: 0.621 ================================= -------------------------------------- | time/ | | | fps | 115 | | iterations | 6600 | | time_elapsed | 284 | | total_timesteps | 33000 | | train/ | | | entropy_loss | -44.1 | | explained_variance | 0.0293 | | learning_rate | 0.0007 | | n_updates | 6599 | | policy_loss | 3.26 | | reward | -1.1172065 | | std | 1.05 | | value_loss | 0.197 | -------------------------------------- --------------------------------------- | time/ | | | fps | 115 | | iterations | 6700 | | time_elapsed | 289 | | total_timesteps | 33500 | | train/ | | | entropy_loss | -44.2 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 6699 | | policy_loss | 59.6 | | reward | -0.57921684 | | std | 1.06 | | value_loss | 3.36 | --------------------------------------- ---------------------------------------- | time/ | | | fps | 115 | | iterations | 6800 | | time_elapsed | 293 | | total_timesteps | 34000 | | train/ | | | entropy_loss | -44.2 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 6799 | | policy_loss | 39 | | reward | -0.055463795 | | std | 1.06 | | value_loss | 1.07 | ---------------------------------------- -------------------------------------- | time/ | | | fps | 115 | | iterations | 6900 | | time_elapsed | 297 | | total_timesteps | 34500 | | train/ | | | entropy_loss | -44.2 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 6899 | | policy_loss | -4.67 | | reward | 0.24380073 | | std | 1.06 | | value_loss | 2.11 | -------------------------------------- ------------------------------------------ | time/ | | | fps | 115 | | iterations | 7000 | | time_elapsed | 302 | | total_timesteps | 35000 | | train/ | | | entropy_loss | -44.1 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 6999 | | policy_loss | 183 | | reward | -0.00011277113 | | std | 1.05 | | value_loss | 17.9 | ------------------------------------------ ------------------------------------- | time/ | | | fps | 115 | | iterations | 7100 | | time_elapsed | 306 | | total_timesteps | 35500 | | train/ | | | entropy_loss | -44.2 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 7099 | | policy_loss | 80.5 | | reward | 1.3815811 | | std | 1.06 | | value_loss | 5.57 | ------------------------------------- ----------------------------------------- | time/ | | | fps | 115 | | iterations | 7200 | | time_elapsed | 310 | | total_timesteps | 36000 | | train/ | | | entropy_loss | -44.2 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 7199 | | policy_loss | 95.9 | | reward | -3.144116e-06 | | std | 1.06 | | value_loss | 5.3 | ----------------------------------------- ------------------------------------- | time/ | | | fps | 115 | | iterations | 7300 | | time_elapsed | 314 | | total_timesteps | 36500 | | train/ | | | entropy_loss | -44.2 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 7299 | | policy_loss | -646 | | reward | 5.0602226 | | std | 1.06 | | value_loss | 320 | ------------------------------------- ----------------------------------------- | time/ | | | fps | 115 | | iterations | 7400 | | time_elapsed | 319 | | total_timesteps | 37000 | | train/ | | | entropy_loss | -44.2 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 7399 | | policy_loss | -15.7 | | reward | -0.0001164199 | | std | 1.06 | | value_loss | 0.363 | ----------------------------------------- -------------------------------------- | time/ | | | fps | 115 | | iterations | 7500 | | time_elapsed | 323 | | total_timesteps | 37500 | | train/ | | | entropy_loss | -44.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 7499 | | policy_loss | -218 | | reward | -1.7255528 | | std | 1.06 | | value_loss | 24.3 | -------------------------------------- ------------------------------------------- | time/ | | | fps | 115 | | iterations | 7600 | | time_elapsed | 327 | | total_timesteps | 38000 | | train/ | | | entropy_loss | -44.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 7599 | | policy_loss | -6.14 | | reward | -0.000104018785 | | std | 1.06 | | value_loss | 0.101 | ------------------------------------------- ------------------------------------- | time/ | | | fps | 115 | | iterations | 7700 | | time_elapsed | 331 | | total_timesteps | 38500 | | train/ | | | entropy_loss | -44.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 7699 | | policy_loss | 59.5 | | reward | 0.4927321 | | std | 1.06 | | value_loss | 2.36 | ------------------------------------- ------------------------------------- | time/ | | | fps | 115 | | iterations | 7800 | | time_elapsed | 336 | | total_timesteps | 39000 | | train/ | | | entropy_loss | -44.4 | | explained_variance | 1.19e-07 | | learning_rate | 0.0007 | | n_updates | 7799 | | policy_loss | -15.6 | | reward | 1.5719095 | | std | 1.06 | | value_loss | 0.427 | ------------------------------------- --------------------------------------- | time/ | | | fps | 115 | | iterations | 7900 | | time_elapsed | 340 | | total_timesteps | 39500 | | train/ | | | entropy_loss | -44.4 | | explained_variance | 1.79e-07 | | learning_rate | 0.0007 | | n_updates | 7899 | | policy_loss | -25.4 | | reward | -0.80941457 | | std | 1.07 | | value_loss | 0.735 | --------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 8000 | | time_elapsed | 344 | | total_timesteps | 40000 | | train/ | | | entropy_loss | -44.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 7999 | | policy_loss | 340 | | reward | -1.1565882 | | std | 1.07 | | value_loss | 88.3 | -------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 8100 | | time_elapsed | 349 | | total_timesteps | 40500 | | train/ | | | entropy_loss | -44.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 8099 | | policy_loss | -88.4 | | reward | -0.07625728 | | std | 1.07 | | value_loss | 4.96 | --------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 8200 | | time_elapsed | 353 | | total_timesteps | 41000 | | train/ | | | entropy_loss | -44.5 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 8199 | | policy_loss | -38.1 | | reward | 0.75147367 | | std | 1.07 | | value_loss | 1.55 | -------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 8300 | | time_elapsed | 357 | | total_timesteps | 41500 | | train/ | | | entropy_loss | -44.6 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 8299 | | policy_loss | 18 | | reward | -0.35836652 | | std | 1.07 | | value_loss | 0.404 | --------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 8400 | | time_elapsed | 361 | | total_timesteps | 42000 | | train/ | | | entropy_loss | -44.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 8399 | | policy_loss | 26.7 | | reward | 0.5091559 | | std | 1.07 | | value_loss | 0.767 | ------------------------------------- ----------------------------------------- | time/ | | | fps | 116 | | iterations | 8500 | | time_elapsed | 366 | | total_timesteps | 42500 | | train/ | | | entropy_loss | -44.7 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 8499 | | policy_loss | -22.2 | | reward | -0.0010097293 | | std | 1.07 | | value_loss | 2.03 | ----------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 8600 | | time_elapsed | 370 | | total_timesteps | 43000 | | train/ | | | entropy_loss | -44.6 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 8599 | | policy_loss | -222 | | reward | -1.5789497 | | std | 1.07 | | value_loss | 27 | -------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 8700 | | time_elapsed | 374 | | total_timesteps | 43500 | | train/ | | | entropy_loss | -44.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 8699 | | policy_loss | -194 | | reward | 0.0 | | std | 1.08 | | value_loss | 125 | ------------------------------------ --------------------------------------- | time/ | | | fps | 116 | | iterations | 8800 | | time_elapsed | 378 | | total_timesteps | 44000 | | train/ | | | entropy_loss | -44.7 | | explained_variance | 0.00142 | | learning_rate | 0.0007 | | n_updates | 8799 | | policy_loss | -68.1 | | reward | -0.01701872 | | std | 1.08 | | value_loss | 2.86 | --------------------------------------- ----------------------------------------- | time/ | | | fps | 116 | | iterations | 8900 | | time_elapsed | 383 | | total_timesteps | 44500 | | train/ | | | entropy_loss | -44.7 | | explained_variance | 1.19e-07 | | learning_rate | 0.0007 | | n_updates | 8899 | | policy_loss | 34.6 | | reward | -0.0006273153 | | std | 1.08 | | value_loss | 0.983 | ----------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 9000 | | time_elapsed | 387 | | total_timesteps | 45000 | | train/ | | | entropy_loss | -44.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 8999 | | policy_loss | 40.8 | | reward | -0.06860598 | | std | 1.08 | | value_loss | 1.97 | --------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 9100 | | time_elapsed | 391 | | total_timesteps | 45500 | | train/ | | | entropy_loss | -44.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 9099 | | policy_loss | 23.4 | | reward | 0.0 | | std | 1.08 | | value_loss | 0.466 | ------------------------------------ ------------------------------------ | time/ | | | fps | 116 | | iterations | 9200 | | time_elapsed | 395 | | total_timesteps | 46000 | | train/ | | | entropy_loss | -44.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 9199 | | policy_loss | -254 | | reward | 4.173793 | | std | 1.08 | | value_loss | 34.6 | ------------------------------------ ------------------------------------- | time/ | | | fps | 116 | | iterations | 9300 | | time_elapsed | 400 | | total_timesteps | 46500 | | train/ | | | entropy_loss | -44.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 9299 | | policy_loss | -97.8 | | reward | 2.3512838 | | std | 1.08 | | value_loss | 9.25 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 9400 | | time_elapsed | 404 | | total_timesteps | 47000 | | train/ | | | entropy_loss | -44.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 9399 | | policy_loss | 109 | | reward | 1.1928157 | | std | 1.08 | | value_loss | 9.77 | ------------------------------------- ---------------------------------------- | time/ | | | fps | 116 | | iterations | 9500 | | time_elapsed | 408 | | total_timesteps | 47500 | | train/ | | | entropy_loss | -44.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 9499 | | policy_loss | -7.39 | | reward | -0.003675337 | | std | 1.08 | | value_loss | 0.172 | ---------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 9600 | | time_elapsed | 412 | | total_timesteps | 48000 | | train/ | | | entropy_loss | -44.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 9599 | | policy_loss | -129 | | reward | -0.6974016 | | std | 1.08 | | value_loss | 9.67 | -------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 9700 | | time_elapsed | 417 | | total_timesteps | 48500 | | train/ | | | entropy_loss | -44.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 9699 | | policy_loss | -57.6 | | reward | 2.1197095 | | std | 1.08 | | value_loss | 2.62 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 9800 | | time_elapsed | 421 | | total_timesteps | 49000 | | train/ | | | entropy_loss | -44.9 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 9799 | | policy_loss | 88 | | reward | 1.5912246 | | std | 1.08 | | value_loss | 4.91 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 9900 | | time_elapsed | 425 | | total_timesteps | 49500 | | train/ | | | entropy_loss | -45 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 9899 | | policy_loss | 60.5 | | reward | 1.1694651 | | std | 1.09 | | value_loss | 2.71 | ------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 10000 | | time_elapsed | 429 | | total_timesteps | 50000 | | train/ | | | entropy_loss | -45 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 9999 | | policy_loss | -55.4 | | reward | 0.0 | | std | 1.09 | | value_loss | 1.86 | ------------------------------------ -------------------------------------- | time/ | | | fps | 116 | | iterations | 10100 | | time_elapsed | 433 | | total_timesteps | 50500 | | train/ | | | entropy_loss | -45.1 | | explained_variance | 1.19e-07 | | learning_rate | 0.0007 | | n_updates | 10099 | | policy_loss | -68.4 | | reward | -2.3534162 | | std | 1.09 | | value_loss | 3.09 | -------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 10200 | | time_elapsed | 438 | | total_timesteps | 51000 | | train/ | | | entropy_loss | -45.1 | | explained_variance | 1.19e-07 | | learning_rate | 0.0007 | | n_updates | 10199 | | policy_loss | 185 | | reward | 0.0 | | std | 1.09 | | value_loss | 15.2 | ------------------------------------ ------------------------------------- | time/ | | | fps | 116 | | iterations | 10300 | | time_elapsed | 442 | | total_timesteps | 51500 | | train/ | | | entropy_loss | -45.2 | | explained_variance | 1.19e-07 | | learning_rate | 0.0007 | | n_updates | 10299 | | policy_loss | 4.95 | | reward | 0.5778505 | | std | 1.09 | | value_loss | 0.168 | ------------------------------------- ----------------------------------------- | time/ | | | fps | 116 | | iterations | 10400 | | time_elapsed | 446 | | total_timesteps | 52000 | | train/ | | | entropy_loss | -45.3 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 10399 | | policy_loss | 34.2 | | reward | -0.0013078297 | | std | 1.1 | | value_loss | 0.852 | ----------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 10500 | | time_elapsed | 450 | | total_timesteps | 52500 | | train/ | | | entropy_loss | -45.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 10499 | | policy_loss | 29.1 | | reward | 0.26531887 | | std | 1.1 | | value_loss | 1.01 | -------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 10600 | | time_elapsed | 455 | | total_timesteps | 53000 | | train/ | | | entropy_loss | -45.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 10599 | | policy_loss | 104 | | reward | 0.0 | | std | 1.1 | | value_loss | 6.46 | ------------------------------------ ------------------------------------- | time/ | | | fps | 116 | | iterations | 10700 | | time_elapsed | 459 | | total_timesteps | 53500 | | train/ | | | entropy_loss | -45.3 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 10699 | | policy_loss | -267 | | reward | 1.9694797 | | std | 1.1 | | value_loss | 35.2 | ------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 10800 | | time_elapsed | 463 | | total_timesteps | 54000 | | train/ | | | entropy_loss | -45.2 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 10799 | | policy_loss | 107 | | reward | 0.42830834 | | std | 1.1 | | value_loss | 9.75 | -------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 10900 | | time_elapsed | 467 | | total_timesteps | 54500 | | train/ | | | entropy_loss | -45.3 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 10899 | | policy_loss | -308 | | reward | -8.225515 | | std | 1.1 | | value_loss | 53.5 | ------------------------------------- ------------------------------------------ | time/ | | | fps | 116 | | iterations | 11000 | | time_elapsed | 472 | | total_timesteps | 55000 | | train/ | | | entropy_loss | -45.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 10999 | | policy_loss | 37.6 | | reward | -2.8013346e-06 | | std | 1.1 | | value_loss | 1.17 | ------------------------------------------ ---------------------------------------- | time/ | | | fps | 116 | | iterations | 11100 | | time_elapsed | 476 | | total_timesteps | 55500 | | train/ | | | entropy_loss | -45.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 11099 | | policy_loss | -7.21 | | reward | 0.0042510955 | | std | 1.1 | | value_loss | 0.139 | ---------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 11200 | | time_elapsed | 480 | | total_timesteps | 56000 | | train/ | | | entropy_loss | -45.3 | | explained_variance | -2.38e-07 | | learning_rate | 0.0007 | | n_updates | 11199 | | policy_loss | 9.84 | | reward | 0.563495 | | std | 1.1 | | value_loss | 0.619 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 11300 | | time_elapsed | 484 | | total_timesteps | 56500 | | train/ | | | entropy_loss | -45.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 11299 | | policy_loss | 114 | | reward | 1.1201094 | | std | 1.1 | | value_loss | 8.24 | ------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 11400 | | time_elapsed | 489 | | total_timesteps | 57000 | | train/ | | | entropy_loss | -45.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 11399 | | policy_loss | -54.6 | | reward | -7.3706326 | | std | 1.1 | | value_loss | 5.52 | -------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 11500 | | time_elapsed | 493 | | total_timesteps | 57500 | | train/ | | | entropy_loss | -45.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 11499 | | policy_loss | 23.8 | | reward | 0.0 | | std | 1.1 | | value_loss | 0.336 | ------------------------------------ ------------------------------------ | time/ | | | fps | 116 | | iterations | 11600 | | time_elapsed | 497 | | total_timesteps | 58000 | | train/ | | | entropy_loss | -45.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 11599 | | policy_loss | 305 | | reward | 4.001372 | | std | 1.1 | | value_loss | 56.5 | ------------------------------------ ------------------------------------ | time/ | | | fps | 116 | | iterations | 11700 | | time_elapsed | 501 | | total_timesteps | 58500 | | train/ | | | entropy_loss | -45.4 | | explained_variance | -3.52 | | learning_rate | 0.0007 | | n_updates | 11699 | | policy_loss | -47.2 | | reward | 4.020171 | | std | 1.1 | | value_loss | 2.55 | ------------------------------------ ------------------------------------- | time/ | | | fps | 116 | | iterations | 11800 | | time_elapsed | 505 | | total_timesteps | 59000 | | train/ | | | entropy_loss | -45.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 11799 | | policy_loss | -64.1 | | reward | 1.5797172 | | std | 1.1 | | value_loss | 4.44 | ------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 11900 | | time_elapsed | 510 | | total_timesteps | 59500 | | train/ | | | entropy_loss | -45.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 11899 | | policy_loss | 297 | | reward | 0.0 | | std | 1.1 | | value_loss | 52 | ------------------------------------ --------------------------------------- | time/ | | | fps | 116 | | iterations | 12000 | | time_elapsed | 514 | | total_timesteps | 60000 | | train/ | | | entropy_loss | -45.4 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 11999 | | policy_loss | -112 | | reward | -0.50271374 | | std | 1.1 | | value_loss | 8.61 | --------------------------------------- ---------------------------------------- | time/ | | | fps | 116 | | iterations | 12100 | | time_elapsed | 518 | | total_timesteps | 60500 | | train/ | | | entropy_loss | -45.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 12099 | | policy_loss | -27.9 | | reward | -0.000236683 | | std | 1.1 | | value_loss | 0.555 | ---------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 12200 | | time_elapsed | 522 | | total_timesteps | 61000 | | train/ | | | entropy_loss | -45.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 12199 | | policy_loss | -493 | | reward | -5.9762735 | | std | 1.1 | | value_loss | 146 | -------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 12300 | | time_elapsed | 527 | | total_timesteps | 61500 | | train/ | | | entropy_loss | -45.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 12299 | | policy_loss | 87.3 | | reward | -0.57379526 | | std | 1.1 | | value_loss | 6.86 | --------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 12400 | | time_elapsed | 531 | | total_timesteps | 62000 | | train/ | | | entropy_loss | -45.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 12399 | | policy_loss | 242 | | reward | -2.7739487 | | std | 1.1 | | value_loss | 60.8 | -------------------------------------- ------------------------------------------ | time/ | | | fps | 116 | | iterations | 12500 | | time_elapsed | 535 | | total_timesteps | 62500 | | train/ | | | entropy_loss | -45.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 12499 | | policy_loss | 44 | | reward | -0.00068280834 | | std | 1.1 | | value_loss | 2.97 | ------------------------------------------ ------------------------------------- | time/ | | | fps | 116 | | iterations | 12600 | | time_elapsed | 539 | | total_timesteps | 63000 | | train/ | | | entropy_loss | -45.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 12599 | | policy_loss | 286 | | reward | 1.2796046 | | std | 1.1 | | value_loss | 47.9 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 12700 | | time_elapsed | 544 | | total_timesteps | 63500 | | train/ | | | entropy_loss | -45.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 12699 | | policy_loss | -39 | | reward | 1.9548224 | | std | 1.11 | | value_loss | 1.34 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 12800 | | time_elapsed | 548 | | total_timesteps | 64000 | | train/ | | | entropy_loss | -45.6 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 12799 | | policy_loss | -37.1 | | reward | 2.5056832 | | std | 1.11 | | value_loss | 1.11 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 12900 | | time_elapsed | 552 | | total_timesteps | 64500 | | train/ | | | entropy_loss | -45.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 12899 | | policy_loss | 343 | | reward | 1.0621004 | | std | 1.11 | | value_loss | 65.4 | ------------------------------------- ----------------------------------------- | time/ | | | fps | 116 | | iterations | 13000 | | time_elapsed | 556 | | total_timesteps | 65000 | | train/ | | | entropy_loss | -45.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 12999 | | policy_loss | 32.4 | | reward | -0.0010883237 | | std | 1.11 | | value_loss | 4.64 | ----------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 13100 | | time_elapsed | 560 | | total_timesteps | 65500 | | train/ | | | entropy_loss | -45.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 13099 | | policy_loss | 377 | | reward | 5.629558 | | std | 1.11 | | value_loss | 98 | ------------------------------------ --------------------------------------- | time/ | | | fps | 116 | | iterations | 13200 | | time_elapsed | 565 | | total_timesteps | 66000 | | train/ | | | entropy_loss | -45.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 13199 | | policy_loss | -5.61 | | reward | -0.68444264 | | std | 1.11 | | value_loss | 0.279 | --------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 13300 | | time_elapsed | 569 | | total_timesteps | 66500 | | train/ | | | entropy_loss | -45.7 | | explained_variance | 1.19e-07 | | learning_rate | 0.0007 | | n_updates | 13299 | | policy_loss | -24.7 | | reward | -0.1482064 | | std | 1.11 | | value_loss | 1.17 | -------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 13400 | | time_elapsed | 573 | | total_timesteps | 67000 | | train/ | | | entropy_loss | -45.7 | | explained_variance | 1.19e-07 | | learning_rate | 0.0007 | | n_updates | 13399 | | policy_loss | 41.4 | | reward | 0.0 | | std | 1.11 | | value_loss | 2.04 | ------------------------------------ ---------------------------------------- | time/ | | | fps | 116 | | iterations | 13500 | | time_elapsed | 577 | | total_timesteps | 67500 | | train/ | | | entropy_loss | -45.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 13499 | | policy_loss | 23 | | reward | -0.056845825 | | std | 1.11 | | value_loss | 0.956 | ---------------------------------------- ------------------------------------------ | time/ | | | fps | 116 | | iterations | 13600 | | time_elapsed | 582 | | total_timesteps | 68000 | | train/ | | | entropy_loss | -45.7 | | explained_variance | -2.38e-07 | | learning_rate | 0.0007 | | n_updates | 13599 | | policy_loss | -306 | | reward | -0.00051175256 | | std | 1.11 | | value_loss | 52.6 | ------------------------------------------ -------------------------------------- | time/ | | | fps | 116 | | iterations | 13700 | | time_elapsed | 586 | | total_timesteps | 68500 | | train/ | | | entropy_loss | -45.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 13699 | | policy_loss | -21.2 | | reward | -1.2614439 | | std | 1.11 | | value_loss | 4.28 | -------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 13800 | | time_elapsed | 590 | | total_timesteps | 69000 | | train/ | | | entropy_loss | -45.8 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 13799 | | policy_loss | -92.4 | | reward | 0.5540486 | | std | 1.12 | | value_loss | 17.6 | ------------------------------------- day: 3651, episode: 20 begin_total_asset: 1000000.00 end_total_asset: 4746174.96 total_reward: 3746174.96 total_cost: 7432.95 total_trades: 48850 Sharpe: 0.803 ================================= -------------------------------------- | time/ | | | fps | 116 | | iterations | 13900 | | time_elapsed | 594 | | total_timesteps | 69500 | | train/ | | | entropy_loss | -45.8 | | explained_variance | 0.383 | | learning_rate | 0.0007 | | n_updates | 13899 | | policy_loss | -99.4 | | reward | 0.44422933 | | std | 1.12 | | value_loss | 4.87 | -------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 14000 | | time_elapsed | 598 | | total_timesteps | 70000 | | train/ | | | entropy_loss | -45.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 13999 | | policy_loss | -61.4 | | reward | 0.0 | | std | 1.12 | | value_loss | 2.59 | ------------------------------------ ------------------------------------- | time/ | | | fps | 116 | | iterations | 14100 | | time_elapsed | 603 | | total_timesteps | 70500 | | train/ | | | entropy_loss | -45.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 14099 | | policy_loss | -50.6 | | reward | 0.9838486 | | std | 1.12 | | value_loss | 1.3 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 14200 | | time_elapsed | 607 | | total_timesteps | 71000 | | train/ | | | entropy_loss | -45.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 14199 | | policy_loss | 26.3 | | reward | 2.9133487 | | std | 1.12 | | value_loss | 0.682 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 14300 | | time_elapsed | 611 | | total_timesteps | 71500 | | train/ | | | entropy_loss | -45.9 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 14299 | | policy_loss | -91.8 | | reward | 0.7379675 | | std | 1.12 | | value_loss | 7.59 | ------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 14400 | | time_elapsed | 615 | | total_timesteps | 72000 | | train/ | | | entropy_loss | -45.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 14399 | | policy_loss | 173 | | reward | -0.49859428 | | std | 1.12 | | value_loss | 25.1 | --------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 14500 | | time_elapsed | 620 | | total_timesteps | 72500 | | train/ | | | entropy_loss | -45.8 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 14499 | | policy_loss | 140 | | reward | 0.0 | | std | 1.12 | | value_loss | 11.1 | ------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 14600 | | time_elapsed | 624 | | total_timesteps | 73000 | | train/ | | | entropy_loss | -45.9 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 14599 | | policy_loss | -71 | | reward | -1.2649014 | | std | 1.12 | | value_loss | 25.4 | -------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 14700 | | time_elapsed | 628 | | total_timesteps | 73500 | | train/ | | | entropy_loss | -45.9 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 14699 | | policy_loss | -21.2 | | reward | -0.23925309 | | std | 1.12 | | value_loss | 0.272 | --------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 14800 | | time_elapsed | 632 | | total_timesteps | 74000 | | train/ | | | entropy_loss | -46 | | explained_variance | 1.79e-07 | | learning_rate | 0.0007 | | n_updates | 14799 | | policy_loss | 112 | | reward | -1.0476866 | | std | 1.12 | | value_loss | 11.3 | -------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 14900 | | time_elapsed | 637 | | total_timesteps | 74500 | | train/ | | | entropy_loss | -46 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 14899 | | policy_loss | -47.4 | | reward | 0.0 | | std | 1.13 | | value_loss | 1.43 | ------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 15000 | | time_elapsed | 641 | | total_timesteps | 75000 | | train/ | | | entropy_loss | -46 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 14999 | | policy_loss | 69.1 | | reward | -2.6626704 | | std | 1.13 | | value_loss | 3.56 | -------------------------------------- ------------------------------------------ | time/ | | | fps | 116 | | iterations | 15100 | | time_elapsed | 645 | | total_timesteps | 75500 | | train/ | | | entropy_loss | -46 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 15099 | | policy_loss | -116 | | reward | -0.00095138035 | | std | 1.13 | | value_loss | 7.88 | ------------------------------------------ ------------------------------------- | time/ | | | fps | 116 | | iterations | 15200 | | time_elapsed | 650 | | total_timesteps | 76000 | | train/ | | | entropy_loss | -46.1 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 15199 | | policy_loss | -32.1 | | reward | 0.3815689 | | std | 1.13 | | value_loss | 3.63 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 15300 | | time_elapsed | 654 | | total_timesteps | 76500 | | train/ | | | entropy_loss | -46.1 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 15299 | | policy_loss | -181 | | reward | -8.710084 | | std | 1.13 | | value_loss | 24.2 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 15400 | | time_elapsed | 658 | | total_timesteps | 77000 | | train/ | | | entropy_loss | -46.1 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 15399 | | policy_loss | -27.6 | | reward | 1.5701025 | | std | 1.13 | | value_loss | 1.08 | ------------------------------------- ------------------------------------------ | time/ | | | fps | 116 | | iterations | 15500 | | time_elapsed | 662 | | total_timesteps | 77500 | | train/ | | | entropy_loss | -46.1 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 15499 | | policy_loss | -116 | | reward | -9.1413356e-05 | | std | 1.13 | | value_loss | 4.9 | ------------------------------------------ -------------------------------------- | time/ | | | fps | 116 | | iterations | 15600 | | time_elapsed | 667 | | total_timesteps | 78000 | | train/ | | | entropy_loss | -46.1 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 15599 | | policy_loss | 124 | | reward | 0.29586065 | | std | 1.13 | | value_loss | 7.62 | -------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 15700 | | time_elapsed | 671 | | total_timesteps | 78500 | | train/ | | | entropy_loss | -46.2 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 15699 | | policy_loss | 46.5 | | reward | 0.90381014 | | std | 1.13 | | value_loss | 2.67 | -------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 15800 | | time_elapsed | 675 | | total_timesteps | 79000 | | train/ | | | entropy_loss | -46.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 15799 | | policy_loss | -52.7 | | reward | 2.7351058 | | std | 1.13 | | value_loss | 1.41 | ------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 15900 | | time_elapsed | 679 | | total_timesteps | 79500 | | train/ | | | entropy_loss | -46.3 | | explained_variance | 1.19e-07 | | learning_rate | 0.0007 | | n_updates | 15899 | | policy_loss | 55.6 | | reward | -5.8838725 | | std | 1.14 | | value_loss | 2.14 | -------------------------------------- ------------------------------------------ | time/ | | | fps | 116 | | iterations | 16000 | | time_elapsed | 684 | | total_timesteps | 80000 | | train/ | | | entropy_loss | -46.2 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 15999 | | policy_loss | 448 | | reward | -2.6554717e-05 | | std | 1.13 | | value_loss | 87.7 | ------------------------------------------ --------------------------------------- | time/ | | | fps | 116 | | iterations | 16100 | | time_elapsed | 688 | | total_timesteps | 80500 | | train/ | | | entropy_loss | -46.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 16099 | | policy_loss | 55.9 | | reward | -0.43290994 | | std | 1.14 | | value_loss | 3.32 | --------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 16200 | | time_elapsed | 692 | | total_timesteps | 81000 | | train/ | | | entropy_loss | -46.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 16199 | | policy_loss | -28.4 | | reward | -1.3013879 | | std | 1.14 | | value_loss | 0.45 | -------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 16300 | | time_elapsed | 697 | | total_timesteps | 81500 | | train/ | | | entropy_loss | -46.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 16299 | | policy_loss | -15.8 | | reward | 0.093013614 | | std | 1.14 | | value_loss | 0.882 | --------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 16400 | | time_elapsed | 701 | | total_timesteps | 82000 | | train/ | | | entropy_loss | -46.5 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 16399 | | policy_loss | 14.9 | | reward | 0.0 | | std | 1.14 | | value_loss | 0.195 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 16500 | | time_elapsed | 705 | | total_timesteps | 82500 | | train/ | | | entropy_loss | -46.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 16499 | | policy_loss | 120 | | reward | 3.5595112 | | std | 1.15 | | value_loss | 8.12 | ------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 16600 | | time_elapsed | 709 | | total_timesteps | 83000 | | train/ | | | entropy_loss | -46.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 16599 | | policy_loss | -136 | | reward | 0.0 | | std | 1.14 | | value_loss | 9.58 | ------------------------------------ -------------------------------------- | time/ | | | fps | 116 | | iterations | 16700 | | time_elapsed | 714 | | total_timesteps | 83500 | | train/ | | | entropy_loss | -46.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 16699 | | policy_loss | -276 | | reward | -0.7146839 | | std | 1.14 | | value_loss | 36.8 | -------------------------------------- ----------------------------------------- | time/ | | | fps | 116 | | iterations | 16800 | | time_elapsed | 718 | | total_timesteps | 84000 | | train/ | | | entropy_loss | -46.5 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 16799 | | policy_loss | 151 | | reward | -0.0031395215 | | std | 1.15 | | value_loss | 20.8 | ----------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 16900 | | time_elapsed | 722 | | total_timesteps | 84500 | | train/ | | | entropy_loss | -46.6 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 16899 | | policy_loss | -21.3 | | reward | -3.3128438 | | std | 1.15 | | value_loss | 0.608 | -------------------------------------- ------------------------------------------ | time/ | | | fps | 116 | | iterations | 17000 | | time_elapsed | 726 | | total_timesteps | 85000 | | train/ | | | entropy_loss | -46.6 | | explained_variance | 1.19e-07 | | learning_rate | 0.0007 | | n_updates | 16999 | | policy_loss | 96.8 | | reward | -0.00041326988 | | std | 1.15 | | value_loss | 5.91 | ------------------------------------------ -------------------------------------- | time/ | | | fps | 116 | | iterations | 17100 | | time_elapsed | 731 | | total_timesteps | 85500 | | train/ | | | entropy_loss | -46.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 17099 | | policy_loss | 63.8 | | reward | 0.07509284 | | std | 1.15 | | value_loss | 2.72 | -------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 17200 | | time_elapsed | 735 | | total_timesteps | 86000 | | train/ | | | entropy_loss | -46.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 17199 | | policy_loss | 13.6 | | reward | -0.51101655 | | std | 1.15 | | value_loss | 0.626 | --------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 17300 | | time_elapsed | 739 | | total_timesteps | 86500 | | train/ | | | entropy_loss | -46.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 17299 | | policy_loss | -5.31 | | reward | 1.114724 | | std | 1.14 | | value_loss | 0.134 | ------------------------------------ --------------------------------------- | time/ | | | fps | 116 | | iterations | 17400 | | time_elapsed | 744 | | total_timesteps | 87000 | | train/ | | | entropy_loss | -46.6 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 17399 | | policy_loss | -42.1 | | reward | -0.81920934 | | std | 1.15 | | value_loss | 1.35 | --------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 17500 | | time_elapsed | 748 | | total_timesteps | 87500 | | train/ | | | entropy_loss | -46.6 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 17499 | | policy_loss | 69.8 | | reward | 0.0 | | std | 1.15 | | value_loss | 2.4 | ------------------------------------ ------------------------------------- | time/ | | | fps | 116 | | iterations | 17600 | | time_elapsed | 752 | | total_timesteps | 88000 | | train/ | | | entropy_loss | -46.6 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 17599 | | policy_loss | 11.5 | | reward | 1.0313632 | | std | 1.15 | | value_loss | 0.439 | ------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 17700 | | time_elapsed | 757 | | total_timesteps | 88500 | | train/ | | | entropy_loss | -46.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 17699 | | policy_loss | 115 | | reward | 0.59923035 | | std | 1.15 | | value_loss | 7.45 | -------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 17800 | | time_elapsed | 761 | | total_timesteps | 89000 | | train/ | | | entropy_loss | -46.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 17799 | | policy_loss | -111 | | reward | -0.12498487 | | std | 1.15 | | value_loss | 6.95 | --------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 17900 | | time_elapsed | 766 | | total_timesteps | 89500 | | train/ | | | entropy_loss | -46.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 17899 | | policy_loss | -42.7 | | reward | 0.0 | | std | 1.16 | | value_loss | 0.963 | ------------------------------------ -------------------------------------- | time/ | | | fps | 116 | | iterations | 18000 | | time_elapsed | 770 | | total_timesteps | 90000 | | train/ | | | entropy_loss | -46.9 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 17999 | | policy_loss | -274 | | reward | 0.98963755 | | std | 1.16 | | value_loss | 43.4 | -------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 18100 | | time_elapsed | 774 | | total_timesteps | 90500 | | train/ | | | entropy_loss | -46.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 18099 | | policy_loss | 215 | | reward | 0.0 | | std | 1.16 | | value_loss | 23 | ------------------------------------ ------------------------------------- | time/ | | | fps | 116 | | iterations | 18200 | | time_elapsed | 779 | | total_timesteps | 91000 | | train/ | | | entropy_loss | -46.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 18199 | | policy_loss | -301 | | reward | 2.7792392 | | std | 1.16 | | value_loss | 71.4 | ------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 18300 | | time_elapsed | 783 | | total_timesteps | 91500 | | train/ | | | entropy_loss | -46.9 | | explained_variance | -0.216 | | learning_rate | 0.0007 | | n_updates | 18299 | | policy_loss | 134 | | reward | 0.0 | | std | 1.16 | | value_loss | 8.96 | ------------------------------------ ------------------------------------- | time/ | | | fps | 116 | | iterations | 18400 | | time_elapsed | 787 | | total_timesteps | 92000 | | train/ | | | entropy_loss | -46.9 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 18399 | | policy_loss | -27.3 | | reward | 0.8572168 | | std | 1.16 | | value_loss | 0.713 | ------------------------------------- ------------------------------------------ | time/ | | | fps | 116 | | iterations | 18500 | | time_elapsed | 792 | | total_timesteps | 92500 | | train/ | | | entropy_loss | -47 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 18499 | | policy_loss | -92.7 | | reward | -1.0854058e-05 | | std | 1.16 | | value_loss | 4.5 | ------------------------------------------ ------------------------------------ | time/ | | | fps | 116 | | iterations | 18600 | | time_elapsed | 796 | | total_timesteps | 93000 | | train/ | | | entropy_loss | -47 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 18599 | | policy_loss | 28.6 | | reward | 0.126928 | | std | 1.16 | | value_loss | 0.825 | ------------------------------------ -------------------------------------- | time/ | | | fps | 116 | | iterations | 18700 | | time_elapsed | 800 | | total_timesteps | 93500 | | train/ | | | entropy_loss | -47.1 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 18699 | | policy_loss | -73.7 | | reward | -0.8160884 | | std | 1.17 | | value_loss | 3.9 | -------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 18800 | | time_elapsed | 805 | | total_timesteps | 94000 | | train/ | | | entropy_loss | -47.1 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 18799 | | policy_loss | -76 | | reward | 4.0295825 | | std | 1.17 | | value_loss | 4.75 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 18900 | | time_elapsed | 809 | | total_timesteps | 94500 | | train/ | | | entropy_loss | -47.1 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 18899 | | policy_loss | 206 | | reward | 0.2775372 | | std | 1.17 | | value_loss | 23.8 | ------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 19000 | | time_elapsed | 813 | | total_timesteps | 95000 | | train/ | | | entropy_loss | -47.1 | | explained_variance | 0.375 | | learning_rate | 0.0007 | | n_updates | 18999 | | policy_loss | -52.9 | | reward | 0.10771098 | | std | 1.17 | | value_loss | 3.01 | -------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 19100 | | time_elapsed | 817 | | total_timesteps | 95500 | | train/ | | | entropy_loss | -47.1 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 19099 | | policy_loss | -23.3 | | reward | 0.56881994 | | std | 1.17 | | value_loss | 0.797 | -------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 19200 | | time_elapsed | 822 | | total_timesteps | 96000 | | train/ | | | entropy_loss | -47.2 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 19199 | | policy_loss | -26 | | reward | 1.4480524 | | std | 1.17 | | value_loss | 1.43 | ------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 19300 | | time_elapsed | 826 | | total_timesteps | 96500 | | train/ | | | entropy_loss | -47.2 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 19299 | | policy_loss | -17.1 | | reward | -0.26339805 | | std | 1.17 | | value_loss | 0.236 | --------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 19400 | | time_elapsed | 830 | | total_timesteps | 97000 | | train/ | | | entropy_loss | -47.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 19399 | | policy_loss | -139 | | reward | 0.0 | | std | 1.17 | | value_loss | 11.6 | ------------------------------------ --------------------------------------- | time/ | | | fps | 116 | | iterations | 19500 | | time_elapsed | 835 | | total_timesteps | 97500 | | train/ | | | entropy_loss | -47.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 19499 | | policy_loss | -117 | | reward | -0.32186767 | | std | 1.17 | | value_loss | 16.1 | --------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 19600 | | time_elapsed | 839 | | total_timesteps | 98000 | | train/ | | | entropy_loss | -47.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 19599 | | policy_loss | 54.5 | | reward | 0.0 | | std | 1.18 | | value_loss | 5.75 | ------------------------------------ ------------------------------------- | time/ | | | fps | 116 | | iterations | 19700 | | time_elapsed | 843 | | total_timesteps | 98500 | | train/ | | | entropy_loss | -47.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 19699 | | policy_loss | 71.7 | | reward | 1.6004717 | | std | 1.18 | | value_loss | 7.57 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 19800 | | time_elapsed | 848 | | total_timesteps | 99000 | | train/ | | | entropy_loss | -47.4 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 19799 | | policy_loss | 27.3 | | reward | 0.0 | | std | 1.18 | | value_loss | 0.457 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 19900 | | time_elapsed | 852 | | total_timesteps | 99500 | | train/ | | | entropy_loss | -47.5 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 19899 | | policy_loss | -24.6 | | reward | 0.8102966 | | std | 1.18 | | value_loss | 0.672 | ------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 20000 | | time_elapsed | 856 | | total_timesteps | 100000 | | train/ | | | entropy_loss | -47.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 19999 | | policy_loss | -103 | | reward | 0.0 | | std | 1.18 | | value_loss | 5.25 | ------------------------------------ ###Markdown Model 2: DDPG ###Code agent = DRLAgent(env = env_train) model_ddpg = agent.get_model("ddpg") trained_ddpg = agent.train_model(model=model_ddpg, tb_log_name='ddpg', total_timesteps=50000) ###Output --------------------------------- | time/ | | | episodes | 4 | | fps | 89 | | time_elapsed | 163 | | total_timesteps | 14608 | | train/ | | | actor_loss | -51.5 | | critic_loss | 711 | | learning_rate | 0.001 | | n_updates | 10956 | | reward | 0.0 | --------------------------------- --------------------------------- | time/ | | | episodes | 8 | | fps | 84 | | time_elapsed | 346 | | total_timesteps | 29216 | | train/ | | | actor_loss | -16.1 | | critic_loss | 3.13 | | learning_rate | 0.001 | | n_updates | 25564 | | reward | 0.0 | --------------------------------- day: 3651, episode: 40 begin_total_asset: 1000000.00 end_total_asset: 3401133.75 total_reward: 2401133.75 total_cost: 1039.72 total_trades: 73035 Sharpe: 0.705 ================================= --------------------------------- | time/ | | | episodes | 12 | | fps | 83 | | time_elapsed | 526 | | total_timesteps | 43824 | | train/ | | | actor_loss | -10.4 | | critic_loss | 2.51 | | learning_rate | 0.001 | | n_updates | 40172 | | reward | 0.0 | --------------------------------- ###Markdown Model 3: PPO ###Code agent = DRLAgent(env = env_train) PPO_PARAMS = { "n_steps": 2048, "ent_coef": 0.01, "learning_rate": 0.00025, "batch_size": 128, } model_ppo = agent.get_model("ppo",model_kwargs = PPO_PARAMS) trained_ppo = agent.train_model(model=model_ppo, tb_log_name='ppo', total_timesteps=50000) ###Output _____no_output_____ ###Markdown Model 4: TD3 ###Code agent = DRLAgent(env = env_train) TD3_PARAMS = {"batch_size": 100, "buffer_size": 1000000, "learning_rate": 0.001} model_td3 = agent.get_model("td3",model_kwargs = TD3_PARAMS) trained_td3 = agent.train_model(model=model_td3, tb_log_name='td3', total_timesteps=30000) ###Output _____no_output_____ ###Markdown Model 5: SAC ###Code agent = DRLAgent(env = env_train) SAC_PARAMS = { "batch_size": 128, "buffer_size": 1000000, "learning_rate": 0.0001, "learning_starts": 100, "ent_coef": "auto_0.1", } model_sac = agent.get_model("sac",model_kwargs = SAC_PARAMS) trained_sac = agent.train_model(model=model_sac, tb_log_name='sac', total_timesteps=80000) ###Output _____no_output_____ ###Markdown TradingAssume that we have $1,000,000 initial capital at 2019-01-01. We use the DDPG model to trade Dow jones 30 stocks. TradeDRL model needs to update periodically in order to take full advantage of the data, ideally we need to retrain our model yearly, quarterly, or monthly. We also need to tune the parameters along the way, in this notebook I only use the in-sample data from 2009-01 to 2018-12 to tune the parameters once, so there is some alpha decay here as the length of trade date extends. Numerous hyperparameters – e.g. the learning rate, the total number of samples to train on – influence the learning process and are usually determined by testing some variations. ###Code trade = data_split(processed_full, '2019-01-01','2021-01-01') e_trade_gym = StockTradingEnv(df = trade, **env_kwargs) # env_trade, obs_trade = e_trade_gym.get_sb_env() trade.head() df_account_value, df_actions = DRLAgent.DRL_prediction( model=trained_ddpg, environment = e_trade_gym) df_account_value.shape df_account_value.tail() df_actions.head() ###Output _____no_output_____ ###Markdown Part 7: Backtest Our StrategyBacktesting plays a key role in evaluating the performance of a trading strategy. Automated backtesting tool is preferred because it reduces the human error. We usually use the Quantopian pyfolio package to backtest our trading strategies. It is easy to use and consists of various individual plots that provide a comprehensive image of the performance of a trading strategy. 7.1 BackTestStatspass in df_account_value, this information is stored in env class ###Code print("==============Get Backtest Results===========") now = datetime.datetime.now().strftime('%Y%m%d-%Hh%M') perf_stats_all = backtest_stats(account_value=df_account_value) perf_stats_all = pd.DataFrame(perf_stats_all) perf_stats_all.to_csv("./"+config.RESULTS_DIR+"/perf_stats_all_"+now+'.csv') #baseline stats print("==============Get Baseline Stats===========") baseline_df = get_baseline( ticker="^DJI", start = '2019-01-01', end = '2021-01-01') stats = backtest_stats(baseline_df, value_col_name = 'close') ###Output ==============Get Baseline Stats=========== [*********************100%***********************] 1 of 1 completed Shape of DataFrame: (506, 8) Annual return 0.144827 Cumulative returns 0.312037 Annual volatility 0.274346 Sharpe ratio 0.632258 Calmar ratio 0.390515 Stability 0.119309 Max drawdown -0.370862 Omega ratio 1.149712 Sortino ratio 0.871240 Skew NaN Kurtosis NaN Tail ratio 0.860739 Daily value at risk -0.033876 dtype: float64 ###Markdown 7.2 BackTestPlot ###Code print("==============Compare to DJIA===========") %matplotlib inline # S&P 500: ^GSPC # Dow Jones Index: ^DJI # NASDAQ 100: ^NDX backtest_plot(df_account_value, baseline_ticker = '^DJI', baseline_start = '2019-01-01', baseline_end = '2021-01-01') ###Output ==============Compare to DJIA=========== [*********************100%***********************] 1 of 1 completed Shape of DataFrame: (506, 8) ###Markdown Automated stock trading using FinRL with financial dataTrained a Deep Reinforcement Learning model using FinRL and companies' financial ratio, and then backtested the model to examine how well-trained the model is* This Google Colabolatory notebook is based on the tutorial of FinRL: https://towardsdatascience.com/finrl-for-quantitative-finance-tutorial-for-multiple-stock-trading-7b00763b7530* This project is a final project of the almuni-mentored research project at Columbia University, Application of Reinforcement Learning to Finance, mentored by Bruce Yang from AI4Finance.* For more detailed explanation, please check out my Medium post: https://medium.com/@mariko.sawada1/automated-stock-trading-with-deep-reinforcement-learning-and-financial-data-a63286ccbe2b Content * [1. Problem Definition](0)* [2. Getting Started - Load Python packages](1) * [2.1. Install Packages](1.1) * [2.2. Check Additional Packages](1.2) * [2.3. Import Packages](1.3) * [2.4. Create Folders](1.4)* [3. Download Data](2)* [4. Preprocess fundamental Data](3) * [4-1 Import financial data](3.1) * [4-2 Specify items needed to calculate financial ratios](3.2) * [4-3 Calculate financial ratios](3.3) * [4-4 Deal with NAs and infinite values](3.4) * [4-5 Merge stock price data and ratios into one dataframe](3.5) * [4-6 Calculate market valuation ratios using daily stock price data](3.6)* [5.Build Environment](4) * [5.1. Training & Trade Data Split](4.1) * [5.2. User-defined Environment](4.2) * [5.3. Initialize Environment](4.3) * [6.Implement DRL Algorithms](5) * [7.Backtesting Performance](6) * [7.1. BackTestStats](6.1) * [7.2. BackTestPlot](6.2) * [7.3. Baseline Stats](6.3) * [7.3. Compare to Stock Market Index](6.4) Part 1. Problem Definition This problem is to design an automated trading solution for single stock trading. We model the stock trading process as a Markov Decision Process (MDP). We then formulate our trading goal as a maximization problem.The algorithm is trained using Deep Reinforcement Learning (DRL) algorithms and the components of the reinforcement learning environment are:* Action: The action space describes the allowed actions that the agent interacts with theenvironment. Normally, a ∈ A includes three actions: a ∈ {−1, 0, 1}, where −1, 0, 1 representselling, holding, and buying one stock. Also, an action can be carried upon multiple shares. We usean action space {−k, ..., −1, 0, 1, ..., k}, where k denotes the number of shares. For example, "Buy10 shares of AAPL" or "Sell 10 shares of AAPL" are 10 or −10, respectively* Reward function: r(s, a, s′) is the incentive mechanism for an agent to learn a better action. The change of the portfolio value when action a is taken at state s and arriving at new state s', i.e., r(s, a, s′) = v′ − v, where v′ and v represent the portfoliovalues at state s′ and s, respectively* State: The state space describes the observations that the agent receives from the environment. Just as a human trader needs to analyze various information before executing a trade, soour trading agent observes many different features to better learn in an interactive environment.* Environment: Dow 30 consituentsThe data of the single stock that we will be using for this case study is obtained from Yahoo Finance API. The data contains Open-High-Low-Close price and volume. Part 2. Load Python Packages 2.1. Install all the packages through FinRL library ###Code ## install finrl library %pip install git+https://github.com/AI4Finance-LLC/FinRL-Library.git ###Output Looking in indexes: https://mirror.sjtu.edu.cn/pypi/web/simple Collecting git+https://github.com/AI4Finance-LLC/FinRL-Library.git Cloning https://github.com/AI4Finance-LLC/FinRL-Library.git to /tmp/pip-req-build-ynujm_zf Running command git clone -q https://github.com/AI4Finance-LLC/FinRL-Library.git /tmp/pip-req-build-ynujm_zf Resolved https://github.com/AI4Finance-LLC/FinRL-Library.git to commit 44b82fb0c7a9b9aebc59265b0ff20295ef4119f3 Collecting pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2 Cloning https://github.com/quantopian/pyfolio.git to /tmp/pip-install-ja2vgv06/pyfolio_0da40ac97a354b908e4fcbde344ba2d7 Running command git clone -q https://github.com/quantopian/pyfolio.git /tmp/pip-install-ja2vgv06/pyfolio_0da40ac97a354b908e4fcbde344ba2d7 Resolved https://github.com/quantopian/pyfolio.git to commit 4b901f6d73aa02ceb6d04b7d83502e5c6f2e81aa Collecting elegantrl@ git+https://github.com/AI4Finance-Foundation/ElegantRL.git#egg=elegantrl Cloning https://github.com/AI4Finance-Foundation/ElegantRL.git to /tmp/pip-install-ja2vgv06/elegantrl_8779a3744a2946d3b2f8252427b25215 Running command git clone -q https://github.com/AI4Finance-Foundation/ElegantRL.git /tmp/pip-install-ja2vgv06/elegantrl_8779a3744a2946d3b2f8252427b25215 Resolved https://github.com/AI4Finance-Foundation/ElegantRL.git to commit fb35e25f01c50af61fa4697824025be50b2e53f1 Requirement already satisfied: numpy>=1.17.3 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (1.21.2) Requirement already satisfied: pandas>=1.1.5 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (1.3.5) Requirement already satisfied: stockstats>=0.4.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (0.4.1) Requirement already satisfied: yfinance in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (0.1.70) Requirement already satisfied: elegantrl in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (0.3.3) Requirement already satisfied: matplotlib in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (3.5.1) Requirement already satisfied: scikit-learn>=0.21.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (1.0.2) Requirement already satisfied: gym>=0.17 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (0.19.0) Requirement already satisfied: stable-baselines3[extra] in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (1.4.0) Requirement already satisfied: ray[default] in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (1.11.0) Requirement already satisfied: lz4 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (4.0.0) Requirement already satisfied: tensorboardX in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (2.5) Requirement already satisfied: gputil in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (1.4.0) Requirement already satisfied: exchange_calendars in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (3.6.1) Requirement already satisfied: alpaca_trade_api in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (1.2.3) Requirement already satisfied: ccxt>=1.66.32 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (1.72.98) Requirement already satisfied: jqdatasdk in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (1.8.10) Requirement already satisfied: wrds in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (3.1.1) Requirement already satisfied: pytest in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (7.1.1) Requirement already satisfied: setuptools==59.5.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (59.5.0) Requirement already satisfied: wheel>=0.33.6 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (0.37.1) Requirement already satisfied: pre-commit in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from finrl==0.3.5) (2.17.0) Requirement already satisfied: aiodns>=1.1.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ccxt>=1.66.32->finrl==0.3.5) (3.0.0) Requirement already satisfied: certifi>=2018.1.18 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ccxt>=1.66.32->finrl==0.3.5) (2021.10.8) Requirement already satisfied: yarl==1.7.2 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ccxt>=1.66.32->finrl==0.3.5) (1.7.2) Requirement already satisfied: cryptography>=2.6.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ccxt>=1.66.32->finrl==0.3.5) (36.0.2) Requirement already satisfied: requests>=2.18.4 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ccxt>=1.66.32->finrl==0.3.5) (2.27.1) Requirement already satisfied: aiohttp>=3.8 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ccxt>=1.66.32->finrl==0.3.5) (3.8.1) Requirement already satisfied: multidict>=4.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from yarl==1.7.2->ccxt>=1.66.32->finrl==0.3.5) (6.0.2) Requirement already satisfied: idna>=2.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from yarl==1.7.2->ccxt>=1.66.32->finrl==0.3.5) (3.3) Requirement already satisfied: typing-extensions>=3.7.4 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from yarl==1.7.2->ccxt>=1.66.32->finrl==0.3.5) (3.10.0.2) Requirement already satisfied: pycares>=4.0.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from aiodns>=1.1.1->ccxt>=1.66.32->finrl==0.3.5) (4.1.2) Requirement already satisfied: async-timeout<5.0,>=4.0.0a3 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from aiohttp>=3.8->ccxt>=1.66.32->finrl==0.3.5) (4.0.2) Requirement already satisfied: aiosignal>=1.1.2 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from aiohttp>=3.8->ccxt>=1.66.32->finrl==0.3.5) (1.2.0) Requirement already satisfied: charset-normalizer<3.0,>=2.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from aiohttp>=3.8->ccxt>=1.66.32->finrl==0.3.5) (2.0.12) Requirement already satisfied: asynctest==0.13.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from aiohttp>=3.8->ccxt>=1.66.32->finrl==0.3.5) (0.13.0) Requirement already satisfied: attrs>=17.3.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from aiohttp>=3.8->ccxt>=1.66.32->finrl==0.3.5) (21.4.0) Requirement already satisfied: frozenlist>=1.1.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from aiohttp>=3.8->ccxt>=1.66.32->finrl==0.3.5) (1.3.0) Requirement already satisfied: cffi>=1.12 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from cryptography>=2.6.1->ccxt>=1.66.32->finrl==0.3.5) (1.15.0) Requirement already satisfied: pycparser in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from cffi>=1.12->cryptography>=2.6.1->ccxt>=1.66.32->finrl==0.3.5) (2.21) Requirement already satisfied: cloudpickle<1.7.0,>=1.2.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from gym>=0.17->finrl==0.3.5) (1.6.0) Requirement already satisfied: python-dateutil>=2.7.3 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pandas>=1.1.5->finrl==0.3.5) (2.8.2) Requirement already satisfied: pytz>=2017.3 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pandas>=1.1.5->finrl==0.3.5) (2022.1) Requirement already satisfied: six>=1.5 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from python-dateutil>=2.7.3->pandas>=1.1.5->finrl==0.3.5) (1.16.0) Requirement already satisfied: urllib3<1.27,>=1.21.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from requests>=2.18.4->ccxt>=1.66.32->finrl==0.3.5) (1.26.9) Requirement already satisfied: joblib>=0.11 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from scikit-learn>=0.21.0->finrl==0.3.5) (1.1.0) Requirement already satisfied: threadpoolctl>=2.0.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from scikit-learn>=0.21.0->finrl==0.3.5) (3.1.0) Requirement already satisfied: scipy>=1.1.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from scikit-learn>=0.21.0->finrl==0.3.5) (1.7.3) Requirement already satisfied: websockets<10,>=8.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from alpaca_trade_api->finrl==0.3.5) (9.1) Requirement already satisfied: websocket-client<2,>=0.56.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from alpaca_trade_api->finrl==0.3.5) (1.3.1) Requirement already satisfied: msgpack==1.0.2 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from alpaca_trade_api->finrl==0.3.5) (1.0.2) Requirement already satisfied: pybullet in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from elegantrl@ git+https://github.com/AI4Finance-Foundation/ElegantRL.git#egg=elegantrl->finrl==0.3.5) (3.2.1) Requirement already satisfied: torch in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from elegantrl@ git+https://github.com/AI4Finance-Foundation/ElegantRL.git#egg=elegantrl->finrl==0.3.5) (1.10.2) Requirement already satisfied: opencv-python in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from elegantrl@ git+https://github.com/AI4Finance-Foundation/ElegantRL.git#egg=elegantrl->finrl==0.3.5) (4.5.5.64) Requirement already satisfied: box2d-py in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from elegantrl@ git+https://github.com/AI4Finance-Foundation/ElegantRL.git#egg=elegantrl->finrl==0.3.5) (2.3.8) Requirement already satisfied: korean-lunar-calendar in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from exchange_calendars->finrl==0.3.5) (0.2.1) Requirement already satisfied: pyluach in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from exchange_calendars->finrl==0.3.5) (1.4.0) Requirement already satisfied: toolz in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from exchange_calendars->finrl==0.3.5) (0.11.2) Requirement already satisfied: thriftpy2>=0.3.9 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from jqdatasdk->finrl==0.3.5) (0.4.14) Requirement already satisfied: pymysql>=0.7.6 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from jqdatasdk->finrl==0.3.5) (1.0.2) Requirement already satisfied: SQLAlchemy>=1.2.8 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from jqdatasdk->finrl==0.3.5) (1.4.32) Requirement already satisfied: greenlet!=0.4.17 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from SQLAlchemy>=1.2.8->jqdatasdk->finrl==0.3.5) (1.1.2) Requirement already satisfied: importlib-metadata in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from SQLAlchemy>=1.2.8->jqdatasdk->finrl==0.3.5) (4.11.3) Requirement already satisfied: ply<4.0,>=3.4 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from thriftpy2>=0.3.9->jqdatasdk->finrl==0.3.5) (3.11) Requirement already satisfied: zipp>=0.5 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from importlib-metadata->SQLAlchemy>=1.2.8->jqdatasdk->finrl==0.3.5) (3.7.0) Requirement already satisfied: pillow>=6.2.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from matplotlib->finrl==0.3.5) (9.0.1) Requirement already satisfied: pyparsing>=2.2.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from matplotlib->finrl==0.3.5) (3.0.7) Requirement already satisfied: fonttools>=4.22.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from matplotlib->finrl==0.3.5) (4.31.1) Requirement already satisfied: cycler>=0.10 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from matplotlib->finrl==0.3.5) (0.11.0) Requirement already satisfied: kiwisolver>=1.0.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from matplotlib->finrl==0.3.5) (1.4.0) Requirement already satisfied: packaging>=20.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from matplotlib->finrl==0.3.5) (21.3) Requirement already satisfied: pyyaml>=5.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pre-commit->finrl==0.3.5) (6.0) Requirement already satisfied: nodeenv>=0.11.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pre-commit->finrl==0.3.5) (1.6.0) Requirement already satisfied: toml in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pre-commit->finrl==0.3.5) (0.10.2) Requirement already satisfied: identify>=1.0.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pre-commit->finrl==0.3.5) (2.4.12) Requirement already satisfied: cfgv>=2.0.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pre-commit->finrl==0.3.5) (3.3.1) Requirement already satisfied: virtualenv>=20.0.8 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pre-commit->finrl==0.3.5) (20.13.4) Requirement already satisfied: platformdirs<3,>=2 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from virtualenv>=20.0.8->pre-commit->finrl==0.3.5) (2.5.1) Requirement already satisfied: filelock<4,>=3.2 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from virtualenv>=20.0.8->pre-commit->finrl==0.3.5) (3.6.0) Requirement already satisfied: distlib<1,>=0.3.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from virtualenv>=20.0.8->pre-commit->finrl==0.3.5) (0.3.4) Requirement already satisfied: ipython>=3.2.3 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (7.32.0) Requirement already satisfied: seaborn>=0.7.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (0.11.2) Requirement already satisfied: empyrical>=0.5.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (0.5.5) Requirement already satisfied: pandas-datareader>=0.2 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from empyrical>=0.5.0->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (0.10.0) Requirement already satisfied: pygments in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (2.11.2) Requirement already satisfied: jedi>=0.16 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (0.18.1) Requirement already satisfied: decorator in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (5.1.1) Requirement already satisfied: matplotlib-inline in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (0.1.3) Requirement already satisfied: traitlets>=4.2 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (5.1.1) Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (3.0.28) Requirement already satisfied: pickleshare in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (0.7.5) Requirement already satisfied: backcall in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (0.2.0) Requirement already satisfied: pexpect>4.3 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (4.8.0) Requirement already satisfied: parso<0.9.0,>=0.8.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from jedi>=0.16->ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (0.8.3) Requirement already satisfied: lxml in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pandas-datareader>=0.2->empyrical>=0.5.0->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (4.8.0) Requirement already satisfied: ptyprocess>=0.5 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pexpect>4.3->ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (0.7.0) Requirement already satisfied: wcwidth in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.5) (0.2.5) Requirement already satisfied: tomli>=1.0.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pytest->finrl==0.3.5) (2.0.1) Requirement already satisfied: py>=1.8.2 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pytest->finrl==0.3.5) (1.11.0) Requirement already satisfied: pluggy<2.0,>=0.12 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pytest->finrl==0.3.5) (1.0.0) Requirement already satisfied: iniconfig in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pytest->finrl==0.3.5) (1.1.1) Requirement already satisfied: redis>=3.5.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (4.1.4) Requirement already satisfied: grpcio<=1.43.0,>=1.28.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (1.43.0) Requirement already satisfied: jsonschema in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (4.4.0) Requirement already satisfied: click>=7.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (8.0.4) Requirement already satisfied: protobuf>=3.15.3 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (3.19.4) Requirement already satisfied: colorful in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (0.5.4) Requirement already satisfied: smart-open in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (5.2.1) Requirement already satisfied: gpustat>=1.0.0b1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (1.0.0b1) Requirement already satisfied: aioredis<2 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (1.3.1) Requirement already satisfied: prometheus-client>=0.7.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (0.13.1) Requirement already satisfied: aiohttp-cors in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (0.7.0) Requirement already satisfied: py-spy>=0.2.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (0.3.11) Requirement already satisfied: opencensus in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (0.8.0) Requirement already satisfied: hiredis in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from aioredis<2->ray[default]->finrl==0.3.5) (2.0.0) Requirement already satisfied: nvidia-ml-py3>=7.352.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from gpustat>=1.0.0b1->ray[default]->finrl==0.3.5) (7.352.0) Requirement already satisfied: blessed>=1.17.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from gpustat>=1.0.0b1->ray[default]->finrl==0.3.5) (1.19.1) Requirement already satisfied: psutil in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from gpustat>=1.0.0b1->ray[default]->finrl==0.3.5) (5.9.0) Requirement already satisfied: deprecated>=1.2.3 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from redis>=3.5.0->ray[default]->finrl==0.3.5) (1.2.13) Requirement already satisfied: wrapt<2,>=1.10 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from deprecated>=1.2.3->redis>=3.5.0->ray[default]->finrl==0.3.5) (1.14.0) Requirement already satisfied: importlib-resources>=1.4.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from jsonschema->ray[default]->finrl==0.3.5) (5.4.0) Requirement already satisfied: pyrsistent!=0.17.0,!=0.17.1,!=0.17.2,>=0.14.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from jsonschema->ray[default]->finrl==0.3.5) (0.18.1) Requirement already satisfied: opencensus-context==0.1.2 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from opencensus->ray[default]->finrl==0.3.5) (0.1.2) Requirement already satisfied: google-api-core<3.0.0,>=1.0.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from opencensus->ray[default]->finrl==0.3.5) (2.7.1) Requirement already satisfied: google-auth<3.0dev,>=1.25.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.5) (2.6.2) Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.52.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.5) (1.56.0) Requirement already satisfied: pyasn1-modules>=0.2.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.5) (0.2.8) Requirement already satisfied: rsa<5,>=3.1.4 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.5) (4.8) Requirement already satisfied: cachetools<6.0,>=2.0.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from google-auth<3.0dev,>=1.25.0->google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.5) (5.0.0) Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<3.0dev,>=1.25.0->google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.5) (0.4.8) Requirement already satisfied: tabulate in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from ray[default]->finrl==0.3.5) (0.8.9) Requirement already satisfied: tensorboard>=2.2.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from stable-baselines3[extra]->finrl==0.3.5) (2.8.0) Requirement already satisfied: atari-py==0.2.6 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from stable-baselines3[extra]->finrl==0.3.5) (0.2.6) Requirement already satisfied: absl-py>=0.4 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.5) (1.0.0) Requirement already satisfied: markdown>=2.6.8 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.5) (3.3.6) Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.5) (1.8.1) Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.5) (0.4.6) Requirement already satisfied: werkzeug>=0.11.15 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.5) (2.0.3) Requirement already satisfied: tensorboard-data-server<0.7.0,>=0.6.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.5) (0.6.1) Requirement already satisfied: requests-oauthlib>=0.7.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.5) (1.3.1) Requirement already satisfied: oauthlib>=3.0.0 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.5) (3.2.0) Requirement already satisfied: psycopg2-binary in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from wrds->finrl==0.3.5) (2.9.3) Requirement already satisfied: mock in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from wrds->finrl==0.3.5) (4.0.3) Requirement already satisfied: multitasking>=0.0.7 in /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages (from yfinance->finrl==0.3.5) (0.0.10) Note: you may need to restart the kernel to use updated packages. ###Markdown 2.2. Check if the additional packages needed are present, if not install them. * Yahoo Finance API* pandas* numpy* matplotlib* stockstats* OpenAI gym* stable-baselines* tensorflow* pyfolio 2.3. Import Packages ###Code import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt # matplotlib.use('Agg') import datetime %matplotlib inline from finrl import config from finrl import config_tickers from finrl.finrl_meta.preprocessor.yahoodownloader import YahooDownloader from finrl.finrl_meta.preprocessor.preprocessors import FeatureEngineer, data_split from finrl.finrl_meta.env_stock_trading.env_stocktrading import StockTradingEnv from finrl.agents.stablebaselines3.models import DRLAgent from finrl.plot import backtest_stats, backtest_plot, get_daily_return, get_baseline from pprint import pprint import sys sys.path.append("../FinRL-Library") import itertools ###Output /home/henry/miniconda3/envs/finrl/lib/python3.7/site-packages/pyfolio/pos.py:27: UserWarning: Module "zipline.assets" not found; multipliers will not be applied to position notionals. 'Module "zipline.assets" not found; multipliers will not be applied' ###Markdown 2.4. Create Folders ###Code import os if not os.path.exists("./" + config.DATA_SAVE_DIR): os.makedirs("./" + config.DATA_SAVE_DIR) if not os.path.exists("./" + config.TRAINED_MODEL_DIR): os.makedirs("./" + config.TRAINED_MODEL_DIR) if not os.path.exists("./" + config.TENSORBOARD_LOG_DIR): os.makedirs("./" + config.TENSORBOARD_LOG_DIR) if not os.path.exists("./" + config.RESULTS_DIR): os.makedirs("./" + config.RESULTS_DIR) ###Output _____no_output_____ ###Markdown Part 3. Download Stock Data from Yahoo FinanceYahoo Finance is a website that provides stock data, financial news, financial reports, etc. All the data provided by Yahoo Finance is free.* FinRL uses a class **YahooDownloader** to fetch data from Yahoo Finance API* Call Limit: Using the Public API (without authentication), you are limited to 2,000 requests per hour per IP (or up to a total of 48,000 requests a day). -----class YahooDownloader: Provides methods for retrieving daily stock data from Yahoo Finance API Attributes ---------- start_date : str start date of the data (modified from config.py) end_date : str end date of the data (modified from config.py) ticker_list : list a list of stock tickers (modified from config.py) Methods ------- fetch_data() Fetches data from yahoo API ###Code print(config_tickers.DOW_30_TICKER) df = YahooDownloader(start_date = '2009-01-01', end_date = '2021-01-01', ticker_list = config_tickers.DOW_30_TICKER).fetch_data() df.shape df.head() df['date'] = pd.to_datetime(df['date'],format='%Y-%m-%d') df.sort_values(['date','tic'],ignore_index=True).head() ###Output _____no_output_____ ###Markdown Part 4: Preprocess fundamental data- Import finanical data downloaded from Compustat via WRDS(Wharton Research Data Service)- Preprocess the dataset and calculate financial ratios- Add those ratios to the price data preprocessed in Part 3- Calculate price-related ratios such as P/E and P/B 4-1 Import the financial data ###Code # Import fundamental data from my GitHub repository url = 'https://raw.githubusercontent.com/mariko-sawada/FinRL_with_fundamental_data/main/dow_30_fundamental_wrds.csv' fund = pd.read_csv(url) # Check the imported dataset fund.head() ###Output _____no_output_____ ###Markdown 4-2 Specify items needed to calculate financial ratios- To know more about the data description of the dataset, please check WRDS's website(https://wrds-www.wharton.upenn.edu/). Login will be required. ###Code # List items that are used to calculate financial ratios items = [ 'datadate', # Date 'tic', # Ticker 'oiadpq', # Quarterly operating income 'revtq', # Quartely revenue 'niq', # Quartely net income 'atq', # Total asset 'teqq', # Shareholder's equity 'epspiy', # EPS(Basic) incl. Extraordinary items 'ceqq', # Common Equity 'cshoq', # Common Shares Outstanding 'dvpspq', # Dividends per share 'actq', # Current assets 'lctq', # Current liabilities 'cheq', # Cash & Equivalent 'rectq', # Recievalbles 'cogsq', # Cost of Goods Sold 'invtq', # Inventories 'apq',# Account payable 'dlttq', # Long term debt 'dlcq', # Debt in current liabilites 'ltq' # Liabilities ] # Omit items that will not be used fund_data = fund[items] # Rename column names for the sake of readability fund_data = fund_data.rename(columns={ 'datadate':'date', # Date 'oiadpq':'op_inc_q', # Quarterly operating income 'revtq':'rev_q', # Quartely revenue 'niq':'net_inc_q', # Quartely net income 'atq':'tot_assets', # Assets 'teqq':'sh_equity', # Shareholder's equity 'epspiy':'eps_incl_ex', # EPS(Basic) incl. Extraordinary items 'ceqq':'com_eq', # Common Equity 'cshoq':'sh_outstanding', # Common Shares Outstanding 'dvpspq':'div_per_sh', # Dividends per share 'actq':'cur_assets', # Current assets 'lctq':'cur_liabilities', # Current liabilities 'cheq':'cash_eq', # Cash & Equivalent 'rectq':'receivables', # Receivalbles 'cogsq':'cogs_q', # Cost of Goods Sold 'invtq':'inventories', # Inventories 'apq': 'payables',# Account payable 'dlttq':'long_debt', # Long term debt 'dlcq':'short_debt', # Debt in current liabilites 'ltq':'tot_liabilities' # Liabilities }) # Check the data fund_data.head() ###Output _____no_output_____ ###Markdown 4-3 Calculate financial ratios- For items from Profit/Loss statements, we calculate LTM (Last Twelve Months) and use them to derive profitability related ratios such as Operating Maring and ROE. For items from balance sheets, we use the numbers on the day.- To check the definitions of the financial ratios calculated here, please refer to CFI's website: https://corporatefinanceinstitute.com/resources/knowledge/finance/financial-ratios/ ###Code # Calculate financial ratios date = pd.to_datetime(fund_data['date'],format='%Y%m%d') tic = fund_data['tic'].to_frame('tic') # Profitability ratios # Operating Margin OPM = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='OPM') for i in range(0, fund_data.shape[0]): if i-3 < 0: OPM[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: OPM.iloc[i] = np.nan else: OPM.iloc[i] = np.sum(fund_data['op_inc_q'].iloc[i-3:i])/np.sum(fund_data['rev_q'].iloc[i-3:i]) # Net Profit Margin NPM = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='NPM') for i in range(0, fund_data.shape[0]): if i-3 < 0: NPM[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: NPM.iloc[i] = np.nan else: NPM.iloc[i] = np.sum(fund_data['net_inc_q'].iloc[i-3:i])/np.sum(fund_data['rev_q'].iloc[i-3:i]) # Return On Assets ROA = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='ROA') for i in range(0, fund_data.shape[0]): if i-3 < 0: ROA[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: ROA.iloc[i] = np.nan else: ROA.iloc[i] = np.sum(fund_data['net_inc_q'].iloc[i-3:i])/fund_data['tot_assets'].iloc[i] # Return on Equity ROE = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='ROE') for i in range(0, fund_data.shape[0]): if i-3 < 0: ROE[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: ROE.iloc[i] = np.nan else: ROE.iloc[i] = np.sum(fund_data['net_inc_q'].iloc[i-3:i])/fund_data['sh_equity'].iloc[i] # For calculating valuation ratios in the next subpart, calculate per share items in advance # Earnings Per Share EPS = fund_data['eps_incl_ex'].to_frame('EPS') # Book Per Share BPS = (fund_data['com_eq']/fund_data['sh_outstanding']).to_frame('BPS') # Need to check units #Dividend Per Share DPS = fund_data['div_per_sh'].to_frame('DPS') # Liquidity ratios # Current ratio cur_ratio = (fund_data['cur_assets']/fund_data['cur_liabilities']).to_frame('cur_ratio') # Quick ratio quick_ratio = ((fund_data['cash_eq'] + fund_data['receivables'] )/fund_data['cur_liabilities']).to_frame('quick_ratio') # Cash ratio cash_ratio = (fund_data['cash_eq']/fund_data['cur_liabilities']).to_frame('cash_ratio') # Efficiency ratios # Inventory turnover ratio inv_turnover = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='inv_turnover') for i in range(0, fund_data.shape[0]): if i-3 < 0: inv_turnover[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: inv_turnover.iloc[i] = np.nan else: inv_turnover.iloc[i] = np.sum(fund_data['cogs_q'].iloc[i-3:i])/fund_data['inventories'].iloc[i] # Receivables turnover ratio acc_rec_turnover = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='acc_rec_turnover') for i in range(0, fund_data.shape[0]): if i-3 < 0: acc_rec_turnover[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: acc_rec_turnover.iloc[i] = np.nan else: acc_rec_turnover.iloc[i] = np.sum(fund_data['rev_q'].iloc[i-3:i])/fund_data['receivables'].iloc[i] # Payable turnover ratio acc_pay_turnover = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='acc_pay_turnover') for i in range(0, fund_data.shape[0]): if i-3 < 0: acc_pay_turnover[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: acc_pay_turnover.iloc[i] = np.nan else: acc_pay_turnover.iloc[i] = np.sum(fund_data['cogs_q'].iloc[i-3:i])/fund_data['payables'].iloc[i] ## Leverage financial ratios # Debt ratio debt_ratio = (fund_data['tot_liabilities']/fund_data['tot_assets']).to_frame('debt_ratio') # Debt to Equity ratio debt_to_equity = (fund_data['tot_liabilities']/fund_data['sh_equity']).to_frame('debt_to_equity') # Create a dataframe that merges all the ratios ratios = pd.concat([date,tic,OPM,NPM,ROA,ROE,EPS,BPS,DPS, cur_ratio,quick_ratio,cash_ratio,inv_turnover,acc_rec_turnover,acc_pay_turnover, debt_ratio,debt_to_equity], axis=1) # Check the ratio data ratios.head() ratios.tail() ###Output _____no_output_____ ###Markdown 4-4 Deal with NAs and infinite values- We replace N/A and infinite values with zero so that they can be recognized as a state ###Code # Replace NAs infinite values with zero final_ratios = ratios.copy() final_ratios = final_ratios.fillna(0) final_ratios = final_ratios.replace(np.inf,0) final_ratios.head() final_ratios.tail() ###Output _____no_output_____ ###Markdown 4-5 Merge stock price data and ratios into one dataframe- Merge the price dataframe preprocessed in Part 3 and the ratio dataframe created in this part- Since the prices are daily and ratios are quartely, we have NAs in the ratio columns after merging the two dataframes. We deal with this by backfilling the ratios. ###Code list_ticker = df["tic"].unique().tolist() list_date = list(pd.date_range(df['date'].min(),df['date'].max())) combination = list(itertools.product(list_date,list_ticker)) # Merge stock price data and ratios into one dataframe processed_full = pd.DataFrame(combination,columns=["date","tic"]).merge(df,on=["date","tic"],how="left") processed_full = processed_full.merge(final_ratios,how='left',on=['date','tic']) processed_full = processed_full.sort_values(['tic','date']) # Backfill the ratio data to make them daily processed_full = processed_full.bfill(axis='rows') ###Output _____no_output_____ ###Markdown 4-6 Calculate market valuation ratios using daily stock price data ###Code # Calculate P/E, P/B and dividend yield using daily closing price processed_full['PE'] = processed_full['close']/processed_full['EPS'] processed_full['PB'] = processed_full['close']/processed_full['BPS'] processed_full['Div_yield'] = processed_full['DPS']/processed_full['close'] # Drop per share items used for the above calculation processed_full = processed_full.drop(columns=['day','EPS','BPS','DPS']) # Replace NAs infinite values with zero processed_full = processed_full.copy() processed_full = processed_full.fillna(0) processed_full = processed_full.replace(np.inf,0) # Check the final data processed_full.sort_values(['date','tic'],ignore_index=True).head(10) ###Output _____no_output_____ ###Markdown Part 5. Design EnvironmentConsidering the stochastic and interactive nature of the automated stock trading tasks, a financial task is modeled as a **Markov Decision Process (MDP)** problem. The training process involves observing stock price change, taking an action and reward's calculation to have the agent adjusting its strategy accordingly. By interacting with the environment, the trading agent will derive a trading strategy with the maximized rewards as time proceeds.Our trading environments, based on OpenAI Gym framework, simulate live stock markets with real market data according to the principle of time-driven simulation.The action space describes the allowed actions that the agent interacts with the environment. Normally, action a includes three actions: {-1, 0, 1}, where -1, 0, 1 represent selling, holding, and buying one share. Also, an action can be carried upon multiple shares. We use an action space {-k,…,-1, 0, 1, …, k}, where k denotes the number of shares to buy and -k denotes the number of shares to sell. For example, "Buy 10 shares of AAPL" or "Sell 10 shares of AAPL" are 10 or -10, respectively. The continuous action space needs to be normalized to [-1, 1], since the policy is defined on a Gaussian distribution, which needs to be normalized and symmetric. 5-1 Split data into training and trade dataset- Training data split: 2009-01-01 to 2018-12-31- Trade data split: 2019-01-01 to 2020-09-30 ###Code train = data_split(processed_full, '2009-01-01','2019-01-01') trade = data_split(processed_full, '2019-01-01','2021-01-01') # Check the length of the two datasets print(len(train)) print(len(trade)) train.head() trade.head() ###Output _____no_output_____ ###Markdown 5-2 Set up the training environment ###Code import gym import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd from gym import spaces from gym.utils import seeding from stable_baselines3.common.vec_env import DummyVecEnv matplotlib.use("Agg") # from stable_baselines3.common import logger class StockTradingEnv(gym.Env): """A stock trading environment for OpenAI gym""" metadata = {"render.modes": ["human"]} def __init__( self, df, stock_dim, hmax, initial_amount, buy_cost_pct, sell_cost_pct, reward_scaling, state_space, action_space, tech_indicator_list, turbulence_threshold=None, risk_indicator_col="turbulence", make_plots=False, print_verbosity=10, day=0, initial=True, previous_state=[], model_name="", mode="", iteration="", ): self.day = day self.df = df self.stock_dim = stock_dim self.hmax = hmax self.initial_amount = initial_amount self.buy_cost_pct = buy_cost_pct self.sell_cost_pct = sell_cost_pct self.reward_scaling = reward_scaling self.state_space = state_space self.action_space = action_space self.tech_indicator_list = tech_indicator_list self.action_space = spaces.Box(low=-1, high=1, shape=(self.action_space,)) self.observation_space = spaces.Box( low=-np.inf, high=np.inf, shape=(self.state_space,) ) self.data = self.df.loc[self.day, :] self.terminal = False self.make_plots = make_plots self.print_verbosity = print_verbosity self.turbulence_threshold = turbulence_threshold self.risk_indicator_col = risk_indicator_col self.initial = initial self.previous_state = previous_state self.model_name = model_name self.mode = mode self.iteration = iteration # initalize state self.state = self._initiate_state() # initialize reward self.reward = 0 self.turbulence = 0 self.cost = 0 self.trades = 0 self.episode = 0 # memorize all the total balance change self.asset_memory = [self.initial_amount] self.rewards_memory = [] self.actions_memory = [] self.date_memory = [self._get_date()] # self.reset() self._seed() def _sell_stock(self, index, action): def _do_sell_normal(): if self.state[index + 1] > 0: # Sell only if the price is > 0 (no missing data in this particular date) # perform sell action based on the sign of the action if self.state[index + self.stock_dim + 1] > 0: # Sell only if current asset is > 0 sell_num_shares = min( abs(action), self.state[index + self.stock_dim + 1] ) sell_amount = ( self.state[index + 1] * sell_num_shares * (1 - self.sell_cost_pct) ) # update balance self.state[0] += sell_amount self.state[index + self.stock_dim + 1] -= sell_num_shares self.cost += ( self.state[index + 1] * sell_num_shares * self.sell_cost_pct ) self.trades += 1 else: sell_num_shares = 0 else: sell_num_shares = 0 return sell_num_shares # perform sell action based on the sign of the action if self.turbulence_threshold is not None: if self.turbulence >= self.turbulence_threshold: if self.state[index + 1] > 0: # Sell only if the price is > 0 (no missing data in this particular date) # if turbulence goes over threshold, just clear out all positions if self.state[index + self.stock_dim + 1] > 0: # Sell only if current asset is > 0 sell_num_shares = self.state[index + self.stock_dim + 1] sell_amount = ( self.state[index + 1] * sell_num_shares * (1 - self.sell_cost_pct) ) # update balance self.state[0] += sell_amount self.state[index + self.stock_dim + 1] = 0 self.cost += ( self.state[index + 1] * sell_num_shares * self.sell_cost_pct ) self.trades += 1 else: sell_num_shares = 0 else: sell_num_shares = 0 else: sell_num_shares = _do_sell_normal() else: sell_num_shares = _do_sell_normal() return sell_num_shares def _buy_stock(self, index, action): def _do_buy(): if self.state[index + 1] > 0: # Buy only if the price is > 0 (no missing data in this particular date) available_amount = self.state[0] // self.state[index + 1] # print('available_amount:{}'.format(available_amount)) # update balance buy_num_shares = min(available_amount, action) buy_amount = ( self.state[index + 1] * buy_num_shares * (1 + self.buy_cost_pct) ) self.state[0] -= buy_amount self.state[index + self.stock_dim + 1] += buy_num_shares self.cost += self.state[index + 1] * buy_num_shares * self.buy_cost_pct self.trades += 1 else: buy_num_shares = 0 return buy_num_shares # perform buy action based on the sign of the action if self.turbulence_threshold is None: buy_num_shares = _do_buy() else: if self.turbulence < self.turbulence_threshold: buy_num_shares = _do_buy() else: buy_num_shares = 0 pass return buy_num_shares def _make_plot(self): plt.plot(self.asset_memory, "r") plt.savefig("results/account_value_trade_{}.png".format(self.episode)) plt.close() def step(self, actions): self.terminal = self.day >= len(self.df.index.unique()) - 1 if self.terminal: # print(f"Episode: {self.episode}") if self.make_plots: self._make_plot() end_total_asset = self.state[0] + sum( np.array(self.state[1 : (self.stock_dim + 1)]) * np.array(self.state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)]) ) df_total_value = pd.DataFrame(self.asset_memory) tot_reward = ( self.state[0] + sum( np.array(self.state[1 : (self.stock_dim + 1)]) * np.array( self.state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)] ) ) - self.initial_amount ) df_total_value.columns = ["account_value"] df_total_value["date"] = self.date_memory df_total_value["daily_return"] = df_total_value["account_value"].pct_change( 1 ) if df_total_value["daily_return"].std() != 0: sharpe = ( (252 ** 0.5) * df_total_value["daily_return"].mean() / df_total_value["daily_return"].std() ) df_rewards = pd.DataFrame(self.rewards_memory) df_rewards.columns = ["account_rewards"] df_rewards["date"] = self.date_memory[:-1] if self.episode % self.print_verbosity == 0: print(f"day: {self.day}, episode: {self.episode}") print(f"begin_total_asset: {self.asset_memory[0]:0.2f}") print(f"end_total_asset: {end_total_asset:0.2f}") print(f"total_reward: {tot_reward:0.2f}") print(f"total_cost: {self.cost:0.2f}") print(f"total_trades: {self.trades}") if df_total_value["daily_return"].std() != 0: print(f"Sharpe: {sharpe:0.3f}") print("=================================") if (self.model_name != "") and (self.mode != ""): df_actions = self.save_action_memory() df_actions.to_csv( "results/actions_{}_{}_{}.csv".format( self.mode, self.model_name, self.iteration ) ) df_total_value.to_csv( "results/account_value_{}_{}_{}.csv".format( self.mode, self.model_name, self.iteration ), index=False, ) df_rewards.to_csv( "results/account_rewards_{}_{}_{}.csv".format( self.mode, self.model_name, self.iteration ), index=False, ) plt.plot(self.asset_memory, "r") plt.savefig( "results/account_value_{}_{}_{}.png".format( self.mode, self.model_name, self.iteration ), index=False, ) plt.close() # Add outputs to logger interface # logger.record("environment/portfolio_value", end_total_asset) # logger.record("environment/total_reward", tot_reward) # logger.record("environment/total_reward_pct", (tot_reward / (end_total_asset - tot_reward)) * 100) # logger.record("environment/total_cost", self.cost) # logger.record("environment/total_trades", self.trades) return self.state, self.reward, self.terminal, {} else: actions = actions * self.hmax # actions initially is scaled between 0 to 1 actions = actions.astype( int ) # convert into integer because we can't by fraction of shares if self.turbulence_threshold is not None: if self.turbulence >= self.turbulence_threshold: actions = np.array([-self.hmax] * self.stock_dim) begin_total_asset = self.state[0] + sum( np.array(self.state[1 : (self.stock_dim + 1)]) * np.array(self.state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)]) ) # print("begin_total_asset:{}".format(begin_total_asset)) argsort_actions = np.argsort(actions) sell_index = argsort_actions[: np.where(actions < 0)[0].shape[0]] buy_index = argsort_actions[::-1][: np.where(actions > 0)[0].shape[0]] for index in sell_index: # print(f"Num shares before: {self.state[index+self.stock_dim+1]}") # print(f'take sell action before : {actions[index]}') actions[index] = self._sell_stock(index, actions[index]) * (-1) # print(f'take sell action after : {actions[index]}') # print(f"Num shares after: {self.state[index+self.stock_dim+1]}") for index in buy_index: # print('take buy action: {}'.format(actions[index])) actions[index] = self._buy_stock(index, actions[index]) self.actions_memory.append(actions) # state: s -> s+1 self.day += 1 self.data = self.df.loc[self.day, :] if self.turbulence_threshold is not None: if len(self.df.tic.unique()) == 1: self.turbulence = self.data[self.risk_indicator_col] elif len(self.df.tic.unique()) > 1: self.turbulence = self.data[self.risk_indicator_col].values[0] self.state = self._update_state() end_total_asset = self.state[0] + sum( np.array(self.state[1 : (self.stock_dim + 1)]) * np.array(self.state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)]) ) self.asset_memory.append(end_total_asset) self.date_memory.append(self._get_date()) self.reward = end_total_asset - begin_total_asset self.rewards_memory.append(self.reward) self.reward = self.reward * self.reward_scaling return self.state, self.reward, self.terminal, {} def reset(self): # initiate state self.state = self._initiate_state() if self.initial: self.asset_memory = [self.initial_amount] else: previous_total_asset = self.previous_state[0] + sum( np.array(self.state[1 : (self.stock_dim + 1)]) * np.array( self.previous_state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)] ) ) self.asset_memory = [previous_total_asset] self.day = 0 self.data = self.df.loc[self.day, :] self.turbulence = 0 self.cost = 0 self.trades = 0 self.terminal = False # self.iteration=self.iteration self.rewards_memory = [] self.actions_memory = [] self.date_memory = [self._get_date()] self.episode += 1 return self.state def render(self, mode="human", close=False): return self.state def _initiate_state(self): if self.initial: # For Initial State if len(self.df.tic.unique()) > 1: # for multiple stock state = ( [self.initial_amount] + self.data.close.values.tolist() + [0] * self.stock_dim + sum( [ self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [], ) ) else: # for single stock state = ( [self.initial_amount] + [self.data.close] + [0] * self.stock_dim + sum([[self.data[tech]] for tech in self.tech_indicator_list], []) ) else: # Using Previous State if len(self.df.tic.unique()) > 1: # for multiple stock state = ( [self.previous_state[0]] + self.data.close.values.tolist() + self.previous_state[ (self.stock_dim + 1) : (self.stock_dim * 2 + 1) ] + sum( [ self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [], ) ) else: # for single stock state = ( [self.previous_state[0]] + [self.data.close] + self.previous_state[ (self.stock_dim + 1) : (self.stock_dim * 2 + 1) ] + sum([[self.data[tech]] for tech in self.tech_indicator_list], []) ) return state def _update_state(self): if len(self.df.tic.unique()) > 1: # for multiple stock state = ( [self.state[0]] + self.data.close.values.tolist() + list(self.state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)]) + sum( [ self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [], ) ) else: # for single stock state = ( [self.state[0]] + [self.data.close] + list(self.state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)]) + sum([[self.data[tech]] for tech in self.tech_indicator_list], []) ) return state def _get_date(self): if len(self.df.tic.unique()) > 1: date = self.data.date.unique()[0] else: date = self.data.date return date def save_asset_memory(self): date_list = self.date_memory asset_list = self.asset_memory # print(len(date_list)) # print(len(asset_list)) df_account_value = pd.DataFrame( {"date": date_list, "account_value": asset_list} ) return df_account_value def save_action_memory(self): if len(self.df.tic.unique()) > 1: # date and close price length must match actions length date_list = self.date_memory[:-1] df_date = pd.DataFrame(date_list) df_date.columns = ["date"] action_list = self.actions_memory df_actions = pd.DataFrame(action_list) df_actions.columns = self.data.tic.values df_actions.index = df_date.date # df_actions = pd.DataFrame({'date':date_list,'actions':action_list}) else: date_list = self.date_memory[:-1] action_list = self.actions_memory df_actions = pd.DataFrame({"date": date_list, "actions": action_list}) return df_actions def _seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] def get_sb_env(self): e = DummyVecEnv([lambda: self]) obs = e.reset() return e, obs ratio_list = ['OPM', 'NPM','ROA', 'ROE', 'cur_ratio', 'quick_ratio', 'cash_ratio', 'inv_turnover','acc_rec_turnover', 'acc_pay_turnover', 'debt_ratio', 'debt_to_equity', 'PE', 'PB', 'Div_yield'] stock_dimension = len(train.tic.unique()) state_space = 1 + 2*stock_dimension + len(ratio_list)*stock_dimension print(f"Stock Dimension: {stock_dimension}, State Space: {state_space}") # Parameters for the environment env_kwargs = { "hmax": 100, "initial_amount": 1000000, "buy_cost_pct": 0.001, "sell_cost_pct": 0.001, "state_space": state_space, "stock_dim": stock_dimension, "tech_indicator_list": ratio_list, "action_space": stock_dimension, "reward_scaling": 1e-4 } #Establish the training environment using StockTradingEnv() class e_train_gym = StockTradingEnv(df = train, **env_kwargs) ###Output _____no_output_____ ###Markdown Environment for Training ###Code env_train, _ = e_train_gym.get_sb_env() print(type(env_train)) ###Output <class 'stable_baselines3.common.vec_env.dummy_vec_env.DummyVecEnv'> ###Markdown Part 6: Implement DRL Algorithms* The implementation of the DRL algorithms are based on **OpenAI Baselines** and **Stable Baselines**. Stable Baselines is a fork of OpenAI Baselines, with a major structural refactoring, and code cleanups.* FinRL library includes fine-tuned standard DRL algorithms, such as DQN, DDPG,Multi-Agent DDPG, PPO, SAC, A2C and TD3. We also allow users todesign their own DRL algorithms by adapting these DRL algorithms. ###Code # Set up the agent using DRLAgent() class using the environment created in the previous part agent = DRLAgent(env = env_train) ###Output _____no_output_____ ###Markdown Model Training: 5 models, A2C DDPG, PPO, TD3, SAC Model 1: A2C ###Code agent = DRLAgent(env = env_train) model_a2c = agent.get_model("a2c") trained_a2c = agent.train_model(model=model_a2c, tb_log_name='a2c', total_timesteps=100000) ###Output ----------------------------------------- | time/ | | | fps | 95 | | iterations | 100 | | time_elapsed | 5 | | total_timesteps | 500 | | train/ | | | entropy_loss | -42.6 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 99 | | policy_loss | 63.9 | | reward | -0.0025933678 | | std | 1 | | value_loss | 4.83 | ----------------------------------------- -------------------------------------- | time/ | | | fps | 103 | | iterations | 200 | | time_elapsed | 9 | | total_timesteps | 1000 | | train/ | | | entropy_loss | -42.7 | | explained_variance | -0.000537 | | learning_rate | 0.0007 | | n_updates | 199 | | policy_loss | -171 | | reward | -2.1574955 | | std | 1 | | value_loss | 25.8 | -------------------------------------- -------------------------------------- | time/ | | | fps | 107 | | iterations | 300 | | time_elapsed | 13 | | total_timesteps | 1500 | | train/ | | | entropy_loss | -42.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 299 | | policy_loss | -24.3 | | reward | 0.16899587 | | std | 1 | | value_loss | 0.982 | -------------------------------------- -------------------------------------- | time/ | | | fps | 110 | | iterations | 400 | | time_elapsed | 18 | | total_timesteps | 2000 | | train/ | | | entropy_loss | -42.6 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 399 | | policy_loss | 74.3 | | reward | -3.0512037 | | std | 1 | | value_loss | 4.96 | -------------------------------------- --------------------------------------- | time/ | | | fps | 112 | | iterations | 500 | | time_elapsed | 22 | | total_timesteps | 2500 | | train/ | | | entropy_loss | -42.6 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 499 | | policy_loss | 107 | | reward | -0.17085995 | | std | 1 | | value_loss | 7.74 | --------------------------------------- ------------------------------------ | time/ | | | fps | 113 | | iterations | 600 | | time_elapsed | 26 | | total_timesteps | 3000 | | train/ | | | entropy_loss | -42.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 599 | | policy_loss | 165 | | reward | 0.0 | | std | 1 | | value_loss | 13.9 | ------------------------------------ ------------------------------------- | time/ | | | fps | 113 | | iterations | 700 | | time_elapsed | 30 | | total_timesteps | 3500 | | train/ | | | entropy_loss | -42.7 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 699 | | policy_loss | -43.5 | | reward | 4.7688413 | | std | 1 | | value_loss | 11.4 | ------------------------------------- -------------------------------------- | time/ | | | fps | 114 | | iterations | 800 | | time_elapsed | 34 | | total_timesteps | 4000 | | train/ | | | entropy_loss | -42.7 | | explained_variance | 1.19e-07 | | learning_rate | 0.0007 | | n_updates | 799 | | policy_loss | 40.9 | | reward | -0.7754371 | | std | 1 | | value_loss | 1.12 | -------------------------------------- ------------------------------------- | time/ | | | fps | 114 | | iterations | 900 | | time_elapsed | 39 | | total_timesteps | 4500 | | train/ | | | entropy_loss | -42.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 899 | | policy_loss | 52.1 | | reward | 1.1952021 | | std | 1 | | value_loss | 3.82 | ------------------------------------- ----------------------------------------- | time/ | | | fps | 115 | | iterations | 1000 | | time_elapsed | 43 | | total_timesteps | 5000 | | train/ | | | entropy_loss | -42.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 999 | | policy_loss | -56.4 | | reward | -0.0004602166 | | std | 1 | | value_loss | 2.08 | ----------------------------------------- ------------------------------------- | time/ | | | fps | 115 | | iterations | 1100 | | time_elapsed | 47 | | total_timesteps | 5500 | | train/ | | | entropy_loss | -42.7 | | explained_variance | -2.38e-07 | | learning_rate | 0.0007 | | n_updates | 1099 | | policy_loss | -6.22 | | reward | -3.228336 | | std | 1.01 | | value_loss | 1.29 | ------------------------------------- ------------------------------------------ | time/ | | | fps | 115 | | iterations | 1200 | | time_elapsed | 51 | | total_timesteps | 6000 | | train/ | | | entropy_loss | -42.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 1199 | | policy_loss | -60.2 | | reward | -0.00017504377 | | std | 1.01 | | value_loss | 3.58 | ------------------------------------------ -------------------------------------- | time/ | | | fps | 115 | | iterations | 1300 | | time_elapsed | 56 | | total_timesteps | 6500 | | train/ | | | entropy_loss | -42.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 1299 | | policy_loss | -54.2 | | reward | -1.3630623 | | std | 1.01 | | value_loss | 3.67 | -------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 1400 | | time_elapsed | 60 | | total_timesteps | 7000 | | train/ | | | entropy_loss | -42.9 | | explained_variance | 1.45e-05 | | learning_rate | 0.0007 | | n_updates | 1399 | | policy_loss | 302 | | reward | 2.7465565 | | std | 1.01 | | value_loss | 97.4 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 1500 | | time_elapsed | 64 | | total_timesteps | 7500 | | train/ | | | entropy_loss | -42.9 | | explained_variance | -0.00238 | | learning_rate | 0.0007 | | n_updates | 1499 | | policy_loss | 128 | | reward | 2.0559413 | | std | 1.01 | | value_loss | 8.62 | ------------------------------------- ------------------------------------------ | time/ | | | fps | 116 | | iterations | 1600 | | time_elapsed | 68 | | total_timesteps | 8000 | | train/ | | | entropy_loss | -42.9 | | explained_variance | 1.19e-07 | | learning_rate | 0.0007 | | n_updates | 1599 | | policy_loss | -74.4 | | reward | -7.6194716e-05 | | std | 1.01 | | value_loss | 3.53 | ------------------------------------------ -------------------------------------- | time/ | | | fps | 116 | | iterations | 1700 | | time_elapsed | 72 | | total_timesteps | 8500 | | train/ | | | entropy_loss | -43 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 1699 | | policy_loss | -73.7 | | reward | 0.46339005 | | std | 1.02 | | value_loss | 3.12 | -------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 1800 | | time_elapsed | 77 | | total_timesteps | 9000 | | train/ | | | entropy_loss | -43.1 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 1799 | | policy_loss | -3.94 | | reward | -2.0293849 | | std | 1.02 | | value_loss | 2.51 | -------------------------------------- -------------------------------------- | time/ | | | fps | 117 | | iterations | 1900 | | time_elapsed | 81 | | total_timesteps | 9500 | | train/ | | | entropy_loss | -43.1 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 1899 | | policy_loss | -231 | | reward | -1.4921011 | | std | 1.02 | | value_loss | 29.5 | -------------------------------------- ------------------------------------- | time/ | | | fps | 117 | | iterations | 2000 | | time_elapsed | 85 | | total_timesteps | 10000 | | train/ | | | entropy_loss | -43.1 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 1999 | | policy_loss | -116 | | reward | 3.9353683 | | std | 1.02 | | value_loss | 34.4 | ------------------------------------- ---------------------------------------- | time/ | | | fps | 117 | | iterations | 2100 | | time_elapsed | 89 | | total_timesteps | 10500 | | train/ | | | entropy_loss | -43.1 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 2099 | | policy_loss | 45.2 | | reward | -0.004765156 | | std | 1.02 | | value_loss | 1.09 | ---------------------------------------- -------------------------------------- | time/ | | | fps | 117 | | iterations | 2200 | | time_elapsed | 93 | | total_timesteps | 11000 | | train/ | | | entropy_loss | -43.1 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 2199 | | policy_loss | 55.2 | | reward | -2.4592872 | | std | 1.02 | | value_loss | 2.92 | -------------------------------------- -------------------------------------- | time/ | | | fps | 117 | | iterations | 2300 | | time_elapsed | 97 | | total_timesteps | 11500 | | train/ | | | entropy_loss | -43.2 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 2299 | | policy_loss | -78.1 | | reward | -3.0025318 | | std | 1.02 | | value_loss | 3.59 | -------------------------------------- ------------------------------------ | time/ | | | fps | 117 | | iterations | 2400 | | time_elapsed | 102 | | total_timesteps | 12000 | | train/ | | | entropy_loss | -43.2 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 2399 | | policy_loss | 11.9 | | reward | 2.655691 | | std | 1.02 | | value_loss | 1.06 | ------------------------------------ ------------------------------------------ | time/ | | | fps | 117 | | iterations | 2500 | | time_elapsed | 106 | | total_timesteps | 12500 | | train/ | | | entropy_loss | -43.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 2499 | | policy_loss | 11.9 | | reward | -0.00059555296 | | std | 1.02 | | value_loss | 0.141 | ------------------------------------------ -------------------------------------- | time/ | | | fps | 116 | | iterations | 2600 | | time_elapsed | 111 | | total_timesteps | 13000 | | train/ | | | entropy_loss | -43.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 2599 | | policy_loss | -157 | | reward | -1.1569912 | | std | 1.03 | | value_loss | 23.5 | -------------------------------------- ------------------------------------------ | time/ | | | fps | 116 | | iterations | 2700 | | time_elapsed | 115 | | total_timesteps | 13500 | | train/ | | | entropy_loss | -43.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 2699 | | policy_loss | 66.1 | | reward | -0.00066900573 | | std | 1.03 | | value_loss | 5.08 | ------------------------------------------ ------------------------------------- | time/ | | | fps | 116 | | iterations | 2800 | | time_elapsed | 119 | | total_timesteps | 14000 | | train/ | | | entropy_loss | -43.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 2799 | | policy_loss | 45.6 | | reward | -0.809039 | | std | 1.03 | | value_loss | 1.73 | ------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 2900 | | time_elapsed | 124 | | total_timesteps | 14500 | | train/ | | | entropy_loss | -43.4 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 2899 | | policy_loss | -72.9 | | reward | -2.4910564 | | std | 1.03 | | value_loss | 4.82 | -------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 3000 | | time_elapsed | 128 | | total_timesteps | 15000 | | train/ | | | entropy_loss | -43.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 2999 | | policy_loss | -181 | | reward | -0.7101703 | | std | 1.03 | | value_loss | 22.4 | -------------------------------------- ---------------------------------------- | time/ | | | fps | 116 | | iterations | 3100 | | time_elapsed | 132 | | total_timesteps | 15500 | | train/ | | | entropy_loss | -43.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 3099 | | policy_loss | -60 | | reward | -0.004172944 | | std | 1.03 | | value_loss | 2.11 | ---------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 3200 | | time_elapsed | 136 | | total_timesteps | 16000 | | train/ | | | entropy_loss | -43.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 3199 | | policy_loss | 22.2 | | reward | -0.9293916 | | std | 1.03 | | value_loss | 2.59 | -------------------------------------- ---------------------------------------- | time/ | | | fps | 116 | | iterations | 3300 | | time_elapsed | 141 | | total_timesteps | 16500 | | train/ | | | entropy_loss | -43.5 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 3299 | | policy_loss | -44.3 | | reward | -0.097194135 | | std | 1.03 | | value_loss | 1.52 | ---------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 3400 | | time_elapsed | 145 | | total_timesteps | 17000 | | train/ | | | entropy_loss | -43.5 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 3399 | | policy_loss | 58.4 | | reward | -0.06478584 | | std | 1.03 | | value_loss | 1.98 | --------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 3500 | | time_elapsed | 150 | | total_timesteps | 17500 | | train/ | | | entropy_loss | -43.4 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 3499 | | policy_loss | 14.2 | | reward | 0.53643763 | | std | 1.03 | | value_loss | 0.969 | -------------------------------------- ---------------------------------------- | time/ | | | fps | 116 | | iterations | 3600 | | time_elapsed | 154 | | total_timesteps | 18000 | | train/ | | | entropy_loss | -43.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 3599 | | policy_loss | -36.5 | | reward | -0.005426721 | | std | 1.03 | | value_loss | 1.59 | ---------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 3700 | | time_elapsed | 158 | | total_timesteps | 18500 | | train/ | | | entropy_loss | -43.6 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 3699 | | policy_loss | 36.6 | | reward | 0.027283445 | | std | 1.03 | | value_loss | 2.08 | --------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 3800 | | time_elapsed | 162 | | total_timesteps | 19000 | | train/ | | | entropy_loss | -43.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 3799 | | policy_loss | -2.93 | | reward | 0.38290653 | | std | 1.04 | | value_loss | 0.123 | -------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 3900 | | time_elapsed | 167 | | total_timesteps | 19500 | | train/ | | | entropy_loss | -43.7 | | explained_variance | -0.00293 | | learning_rate | 0.0007 | | n_updates | 3899 | | policy_loss | 47.5 | | reward | -0.75602734 | | std | 1.04 | | value_loss | 1.4 | --------------------------------------- ----------------------------------------- | time/ | | | fps | 116 | | iterations | 4000 | | time_elapsed | 171 | | total_timesteps | 20000 | | train/ | | | entropy_loss | -43.8 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 3999 | | policy_loss | -6.69 | | reward | -0.0017761731 | | std | 1.04 | | value_loss | 0.4 | ----------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 4100 | | time_elapsed | 176 | | total_timesteps | 20500 | | train/ | | | entropy_loss | -43.8 | | explained_variance | 0.00266 | | learning_rate | 0.0007 | | n_updates | 4099 | | policy_loss | 86 | | reward | -0.24046582 | | std | 1.04 | | value_loss | 5.17 | --------------------------------------- ------------------------------------------ | time/ | | | fps | 116 | | iterations | 4200 | | time_elapsed | 180 | | total_timesteps | 21000 | | train/ | | | entropy_loss | -43.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 4199 | | policy_loss | 159 | | reward | -4.6554425e-05 | | std | 1.04 | | value_loss | 19.3 | ------------------------------------------ -------------------------------------- | time/ | | | fps | 116 | | iterations | 4300 | | time_elapsed | 185 | | total_timesteps | 21500 | | train/ | | | entropy_loss | -43.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 4299 | | policy_loss | 43.8 | | reward | -2.8417304 | | std | 1.04 | | value_loss | 1.45 | -------------------------------------- ------------------------------------------ | time/ | | | fps | 116 | | iterations | 4400 | | time_elapsed | 189 | | total_timesteps | 22000 | | train/ | | | entropy_loss | -43.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 4399 | | policy_loss | 12.2 | | reward | -0.00078770204 | | std | 1.04 | | value_loss | 2.35 | ------------------------------------------ ---------------------------------------- | time/ | | | fps | 116 | | iterations | 4500 | | time_elapsed | 193 | | total_timesteps | 22500 | | train/ | | | entropy_loss | -43.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 4499 | | policy_loss | 19.3 | | reward | -0.012283918 | | std | 1.04 | | value_loss | 0.295 | ---------------------------------------- ------------------------------------------ | time/ | | | fps | 115 | | iterations | 4600 | | time_elapsed | 198 | | total_timesteps | 23000 | | train/ | | | entropy_loss | -43.8 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 4599 | | policy_loss | 74.3 | | reward | -0.00030953262 | | std | 1.04 | | value_loss | 3.06 | ------------------------------------------ ------------------------------------- | time/ | | | fps | 115 | | iterations | 4700 | | time_elapsed | 202 | | total_timesteps | 23500 | | train/ | | | entropy_loss | -43.9 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 4699 | | policy_loss | 60.3 | | reward | 0.6282372 | | std | 1.05 | | value_loss | 2.53 | ------------------------------------- -------------------------------------- | time/ | | | fps | 115 | | iterations | 4800 | | time_elapsed | 207 | | total_timesteps | 24000 | | train/ | | | entropy_loss | -43.9 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 4799 | | policy_loss | 3.01 | | reward | 0.32407507 | | std | 1.05 | | value_loss | 0.097 | -------------------------------------- -------------------------------------- | time/ | | | fps | 115 | | iterations | 4900 | | time_elapsed | 211 | | total_timesteps | 24500 | | train/ | | | entropy_loss | -43.9 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 4899 | | policy_loss | 225 | | reward | -2.2457426 | | std | 1.05 | | value_loss | 37.6 | -------------------------------------- ------------------------------------- | time/ | | | fps | 115 | | iterations | 5000 | | time_elapsed | 216 | | total_timesteps | 25000 | | train/ | | | entropy_loss | -43.9 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 4999 | | policy_loss | 4.28 | | reward | 1.4869773 | | std | 1.05 | | value_loss | 0.402 | ------------------------------------- ------------------------------------ | time/ | | | fps | 115 | | iterations | 5100 | | time_elapsed | 220 | | total_timesteps | 25500 | | train/ | | | entropy_loss | -44 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 5099 | | policy_loss | 108 | | reward | 0.0 | | std | 1.05 | | value_loss | 6.45 | ------------------------------------ -------------------------------------- | time/ | | | fps | 115 | | iterations | 5200 | | time_elapsed | 224 | | total_timesteps | 26000 | | train/ | | | entropy_loss | -43.9 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 5199 | | policy_loss | 9.08 | | reward | 0.35885853 | | std | 1.05 | | value_loss | 0.301 | -------------------------------------- -------------------------------------- | time/ | | | fps | 115 | | iterations | 5300 | | time_elapsed | 229 | | total_timesteps | 26500 | | train/ | | | entropy_loss | -44 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 5299 | | policy_loss | 55.9 | | reward | -0.3154858 | | std | 1.05 | | value_loss | 1.78 | -------------------------------------- -------------------------------------- | time/ | | | fps | 115 | | iterations | 5400 | | time_elapsed | 233 | | total_timesteps | 27000 | | train/ | | | entropy_loss | -44 | | explained_variance | 1.19e-07 | | learning_rate | 0.0007 | | n_updates | 5399 | | policy_loss | 5 | | reward | 0.48877394 | | std | 1.05 | | value_loss | 0.485 | -------------------------------------- ----------------------------------------- | time/ | | | fps | 115 | | iterations | 5500 | | time_elapsed | 237 | | total_timesteps | 27500 | | train/ | | | entropy_loss | -44 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 5499 | | policy_loss | 102 | | reward | -0.0007418807 | | std | 1.05 | | value_loss | 6 | ----------------------------------------- -------------------------------------- | time/ | | | fps | 115 | | iterations | 5600 | | time_elapsed | 242 | | total_timesteps | 28000 | | train/ | | | entropy_loss | -44 | | explained_variance | -0.0266 | | learning_rate | 0.0007 | | n_updates | 5599 | | policy_loss | 176 | | reward | -0.6638814 | | std | 1.05 | | value_loss | 47.8 | -------------------------------------- ----------------------------------------- | time/ | | | fps | 115 | | iterations | 5700 | | time_elapsed | 246 | | total_timesteps | 28500 | | train/ | | | entropy_loss | -44 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 5699 | | policy_loss | -87.8 | | reward | -0.0019212682 | | std | 1.05 | | value_loss | 4.24 | ----------------------------------------- ------------------------------------- | time/ | | | fps | 115 | | iterations | 5800 | | time_elapsed | 250 | | total_timesteps | 29000 | | train/ | | | entropy_loss | -44 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 5799 | | policy_loss | 30.8 | | reward | 2.3667176 | | std | 1.05 | | value_loss | 0.814 | ------------------------------------- ------------------------------------------ | time/ | | | fps | 115 | | iterations | 5900 | | time_elapsed | 254 | | total_timesteps | 29500 | | train/ | | | entropy_loss | -44 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 5899 | | policy_loss | 61.9 | | reward | -0.00036922866 | | std | 1.05 | | value_loss | 2.45 | ------------------------------------------ -------------------------------------- | time/ | | | fps | 115 | | iterations | 6000 | | time_elapsed | 259 | | total_timesteps | 30000 | | train/ | | | entropy_loss | -44 | | explained_variance | 0.342 | | learning_rate | 0.0007 | | n_updates | 5999 | | policy_loss | -67.2 | | reward | -0.3978224 | | std | 1.05 | | value_loss | 2.18 | -------------------------------------- ----------------------------------------- | time/ | | | fps | 115 | | iterations | 6100 | | time_elapsed | 263 | | total_timesteps | 30500 | | train/ | | | entropy_loss | -44 | | explained_variance | -0.0674 | | learning_rate | 0.0007 | | n_updates | 6099 | | policy_loss | 102 | | reward | -0.0011483803 | | std | 1.05 | | value_loss | 6.82 | ----------------------------------------- ------------------------------------- | time/ | | | fps | 115 | | iterations | 6200 | | time_elapsed | 267 | | total_timesteps | 31000 | | train/ | | | entropy_loss | -44 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 6199 | | policy_loss | 111 | | reward | -0.441761 | | std | 1.05 | | value_loss | 6.7 | ------------------------------------- ----------------------------------------- | time/ | | | fps | 115 | | iterations | 6300 | | time_elapsed | 271 | | total_timesteps | 31500 | | train/ | | | entropy_loss | -44 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 6299 | | policy_loss | 49 | | reward | -0.0017546773 | | std | 1.05 | | value_loss | 8.28 | ----------------------------------------- ------------------------------------ | time/ | | | fps | 115 | | iterations | 6400 | | time_elapsed | 276 | | total_timesteps | 32000 | | train/ | | | entropy_loss | -44.1 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 6399 | | policy_loss | -45.8 | | reward | 0.968053 | | std | 1.05 | | value_loss | 2.24 | ------------------------------------ -------------------------------------- | time/ | | | fps | 115 | | iterations | 6500 | | time_elapsed | 280 | | total_timesteps | 32500 | | train/ | | | entropy_loss | -44.1 | | explained_variance | 1.79e-07 | | learning_rate | 0.0007 | | n_updates | 6499 | | policy_loss | -192 | | reward | -0.6304483 | | std | 1.06 | | value_loss | 26.4 | -------------------------------------- day: 3651, episode: 10 begin_total_asset: 1000000.00 end_total_asset: 3389264.47 total_reward: 2389264.47 total_cost: 37026.78 total_trades: 64155 Sharpe: 0.621 ================================= -------------------------------------- | time/ | | | fps | 115 | | iterations | 6600 | | time_elapsed | 284 | | total_timesteps | 33000 | | train/ | | | entropy_loss | -44.1 | | explained_variance | 0.0293 | | learning_rate | 0.0007 | | n_updates | 6599 | | policy_loss | 3.26 | | reward | -1.1172065 | | std | 1.05 | | value_loss | 0.197 | -------------------------------------- --------------------------------------- | time/ | | | fps | 115 | | iterations | 6700 | | time_elapsed | 289 | | total_timesteps | 33500 | | train/ | | | entropy_loss | -44.2 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 6699 | | policy_loss | 59.6 | | reward | -0.57921684 | | std | 1.06 | | value_loss | 3.36 | --------------------------------------- ---------------------------------------- | time/ | | | fps | 115 | | iterations | 6800 | | time_elapsed | 293 | | total_timesteps | 34000 | | train/ | | | entropy_loss | -44.2 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 6799 | | policy_loss | 39 | | reward | -0.055463795 | | std | 1.06 | | value_loss | 1.07 | ---------------------------------------- -------------------------------------- | time/ | | | fps | 115 | | iterations | 6900 | | time_elapsed | 297 | | total_timesteps | 34500 | | train/ | | | entropy_loss | -44.2 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 6899 | | policy_loss | -4.67 | | reward | 0.24380073 | | std | 1.06 | | value_loss | 2.11 | -------------------------------------- ------------------------------------------ | time/ | | | fps | 115 | | iterations | 7000 | | time_elapsed | 302 | | total_timesteps | 35000 | | train/ | | | entropy_loss | -44.1 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 6999 | | policy_loss | 183 | | reward | -0.00011277113 | | std | 1.05 | | value_loss | 17.9 | ------------------------------------------ ------------------------------------- | time/ | | | fps | 115 | | iterations | 7100 | | time_elapsed | 306 | | total_timesteps | 35500 | | train/ | | | entropy_loss | -44.2 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 7099 | | policy_loss | 80.5 | | reward | 1.3815811 | | std | 1.06 | | value_loss | 5.57 | ------------------------------------- ----------------------------------------- | time/ | | | fps | 115 | | iterations | 7200 | | time_elapsed | 310 | | total_timesteps | 36000 | | train/ | | | entropy_loss | -44.2 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 7199 | | policy_loss | 95.9 | | reward | -3.144116e-06 | | std | 1.06 | | value_loss | 5.3 | ----------------------------------------- ------------------------------------- | time/ | | | fps | 115 | | iterations | 7300 | | time_elapsed | 314 | | total_timesteps | 36500 | | train/ | | | entropy_loss | -44.2 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 7299 | | policy_loss | -646 | | reward | 5.0602226 | | std | 1.06 | | value_loss | 320 | ------------------------------------- ----------------------------------------- | time/ | | | fps | 115 | | iterations | 7400 | | time_elapsed | 319 | | total_timesteps | 37000 | | train/ | | | entropy_loss | -44.2 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 7399 | | policy_loss | -15.7 | | reward | -0.0001164199 | | std | 1.06 | | value_loss | 0.363 | ----------------------------------------- -------------------------------------- | time/ | | | fps | 115 | | iterations | 7500 | | time_elapsed | 323 | | total_timesteps | 37500 | | train/ | | | entropy_loss | -44.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 7499 | | policy_loss | -218 | | reward | -1.7255528 | | std | 1.06 | | value_loss | 24.3 | -------------------------------------- ------------------------------------------- | time/ | | | fps | 115 | | iterations | 7600 | | time_elapsed | 327 | | total_timesteps | 38000 | | train/ | | | entropy_loss | -44.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 7599 | | policy_loss | -6.14 | | reward | -0.000104018785 | | std | 1.06 | | value_loss | 0.101 | ------------------------------------------- ------------------------------------- | time/ | | | fps | 115 | | iterations | 7700 | | time_elapsed | 331 | | total_timesteps | 38500 | | train/ | | | entropy_loss | -44.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 7699 | | policy_loss | 59.5 | | reward | 0.4927321 | | std | 1.06 | | value_loss | 2.36 | ------------------------------------- ------------------------------------- | time/ | | | fps | 115 | | iterations | 7800 | | time_elapsed | 336 | | total_timesteps | 39000 | | train/ | | | entropy_loss | -44.4 | | explained_variance | 1.19e-07 | | learning_rate | 0.0007 | | n_updates | 7799 | | policy_loss | -15.6 | | reward | 1.5719095 | | std | 1.06 | | value_loss | 0.427 | ------------------------------------- --------------------------------------- | time/ | | | fps | 115 | | iterations | 7900 | | time_elapsed | 340 | | total_timesteps | 39500 | | train/ | | | entropy_loss | -44.4 | | explained_variance | 1.79e-07 | | learning_rate | 0.0007 | | n_updates | 7899 | | policy_loss | -25.4 | | reward | -0.80941457 | | std | 1.07 | | value_loss | 0.735 | --------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 8000 | | time_elapsed | 344 | | total_timesteps | 40000 | | train/ | | | entropy_loss | -44.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 7999 | | policy_loss | 340 | | reward | -1.1565882 | | std | 1.07 | | value_loss | 88.3 | -------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 8100 | | time_elapsed | 349 | | total_timesteps | 40500 | | train/ | | | entropy_loss | -44.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 8099 | | policy_loss | -88.4 | | reward | -0.07625728 | | std | 1.07 | | value_loss | 4.96 | --------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 8200 | | time_elapsed | 353 | | total_timesteps | 41000 | | train/ | | | entropy_loss | -44.5 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 8199 | | policy_loss | -38.1 | | reward | 0.75147367 | | std | 1.07 | | value_loss | 1.55 | -------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 8300 | | time_elapsed | 357 | | total_timesteps | 41500 | | train/ | | | entropy_loss | -44.6 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 8299 | | policy_loss | 18 | | reward | -0.35836652 | | std | 1.07 | | value_loss | 0.404 | --------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 8400 | | time_elapsed | 361 | | total_timesteps | 42000 | | train/ | | | entropy_loss | -44.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 8399 | | policy_loss | 26.7 | | reward | 0.5091559 | | std | 1.07 | | value_loss | 0.767 | ------------------------------------- ----------------------------------------- | time/ | | | fps | 116 | | iterations | 8500 | | time_elapsed | 366 | | total_timesteps | 42500 | | train/ | | | entropy_loss | -44.7 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 8499 | | policy_loss | -22.2 | | reward | -0.0010097293 | | std | 1.07 | | value_loss | 2.03 | ----------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 8600 | | time_elapsed | 370 | | total_timesteps | 43000 | | train/ | | | entropy_loss | -44.6 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 8599 | | policy_loss | -222 | | reward | -1.5789497 | | std | 1.07 | | value_loss | 27 | -------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 8700 | | time_elapsed | 374 | | total_timesteps | 43500 | | train/ | | | entropy_loss | -44.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 8699 | | policy_loss | -194 | | reward | 0.0 | | std | 1.08 | | value_loss | 125 | ------------------------------------ --------------------------------------- | time/ | | | fps | 116 | | iterations | 8800 | | time_elapsed | 378 | | total_timesteps | 44000 | | train/ | | | entropy_loss | -44.7 | | explained_variance | 0.00142 | | learning_rate | 0.0007 | | n_updates | 8799 | | policy_loss | -68.1 | | reward | -0.01701872 | | std | 1.08 | | value_loss | 2.86 | --------------------------------------- ----------------------------------------- | time/ | | | fps | 116 | | iterations | 8900 | | time_elapsed | 383 | | total_timesteps | 44500 | | train/ | | | entropy_loss | -44.7 | | explained_variance | 1.19e-07 | | learning_rate | 0.0007 | | n_updates | 8899 | | policy_loss | 34.6 | | reward | -0.0006273153 | | std | 1.08 | | value_loss | 0.983 | ----------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 9000 | | time_elapsed | 387 | | total_timesteps | 45000 | | train/ | | | entropy_loss | -44.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 8999 | | policy_loss | 40.8 | | reward | -0.06860598 | | std | 1.08 | | value_loss | 1.97 | --------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 9100 | | time_elapsed | 391 | | total_timesteps | 45500 | | train/ | | | entropy_loss | -44.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 9099 | | policy_loss | 23.4 | | reward | 0.0 | | std | 1.08 | | value_loss | 0.466 | ------------------------------------ ------------------------------------ | time/ | | | fps | 116 | | iterations | 9200 | | time_elapsed | 395 | | total_timesteps | 46000 | | train/ | | | entropy_loss | -44.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 9199 | | policy_loss | -254 | | reward | 4.173793 | | std | 1.08 | | value_loss | 34.6 | ------------------------------------ ------------------------------------- | time/ | | | fps | 116 | | iterations | 9300 | | time_elapsed | 400 | | total_timesteps | 46500 | | train/ | | | entropy_loss | -44.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 9299 | | policy_loss | -97.8 | | reward | 2.3512838 | | std | 1.08 | | value_loss | 9.25 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 9400 | | time_elapsed | 404 | | total_timesteps | 47000 | | train/ | | | entropy_loss | -44.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 9399 | | policy_loss | 109 | | reward | 1.1928157 | | std | 1.08 | | value_loss | 9.77 | ------------------------------------- ---------------------------------------- | time/ | | | fps | 116 | | iterations | 9500 | | time_elapsed | 408 | | total_timesteps | 47500 | | train/ | | | entropy_loss | -44.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 9499 | | policy_loss | -7.39 | | reward | -0.003675337 | | std | 1.08 | | value_loss | 0.172 | ---------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 9600 | | time_elapsed | 412 | | total_timesteps | 48000 | | train/ | | | entropy_loss | -44.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 9599 | | policy_loss | -129 | | reward | -0.6974016 | | std | 1.08 | | value_loss | 9.67 | -------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 9700 | | time_elapsed | 417 | | total_timesteps | 48500 | | train/ | | | entropy_loss | -44.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 9699 | | policy_loss | -57.6 | | reward | 2.1197095 | | std | 1.08 | | value_loss | 2.62 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 9800 | | time_elapsed | 421 | | total_timesteps | 49000 | | train/ | | | entropy_loss | -44.9 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 9799 | | policy_loss | 88 | | reward | 1.5912246 | | std | 1.08 | | value_loss | 4.91 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 9900 | | time_elapsed | 425 | | total_timesteps | 49500 | | train/ | | | entropy_loss | -45 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 9899 | | policy_loss | 60.5 | | reward | 1.1694651 | | std | 1.09 | | value_loss | 2.71 | ------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 10000 | | time_elapsed | 429 | | total_timesteps | 50000 | | train/ | | | entropy_loss | -45 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 9999 | | policy_loss | -55.4 | | reward | 0.0 | | std | 1.09 | | value_loss | 1.86 | ------------------------------------ -------------------------------------- | time/ | | | fps | 116 | | iterations | 10100 | | time_elapsed | 433 | | total_timesteps | 50500 | | train/ | | | entropy_loss | -45.1 | | explained_variance | 1.19e-07 | | learning_rate | 0.0007 | | n_updates | 10099 | | policy_loss | -68.4 | | reward | -2.3534162 | | std | 1.09 | | value_loss | 3.09 | -------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 10200 | | time_elapsed | 438 | | total_timesteps | 51000 | | train/ | | | entropy_loss | -45.1 | | explained_variance | 1.19e-07 | | learning_rate | 0.0007 | | n_updates | 10199 | | policy_loss | 185 | | reward | 0.0 | | std | 1.09 | | value_loss | 15.2 | ------------------------------------ ------------------------------------- | time/ | | | fps | 116 | | iterations | 10300 | | time_elapsed | 442 | | total_timesteps | 51500 | | train/ | | | entropy_loss | -45.2 | | explained_variance | 1.19e-07 | | learning_rate | 0.0007 | | n_updates | 10299 | | policy_loss | 4.95 | | reward | 0.5778505 | | std | 1.09 | | value_loss | 0.168 | ------------------------------------- ----------------------------------------- | time/ | | | fps | 116 | | iterations | 10400 | | time_elapsed | 446 | | total_timesteps | 52000 | | train/ | | | entropy_loss | -45.3 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 10399 | | policy_loss | 34.2 | | reward | -0.0013078297 | | std | 1.1 | | value_loss | 0.852 | ----------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 10500 | | time_elapsed | 450 | | total_timesteps | 52500 | | train/ | | | entropy_loss | -45.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 10499 | | policy_loss | 29.1 | | reward | 0.26531887 | | std | 1.1 | | value_loss | 1.01 | -------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 10600 | | time_elapsed | 455 | | total_timesteps | 53000 | | train/ | | | entropy_loss | -45.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 10599 | | policy_loss | 104 | | reward | 0.0 | | std | 1.1 | | value_loss | 6.46 | ------------------------------------ ------------------------------------- | time/ | | | fps | 116 | | iterations | 10700 | | time_elapsed | 459 | | total_timesteps | 53500 | | train/ | | | entropy_loss | -45.3 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 10699 | | policy_loss | -267 | | reward | 1.9694797 | | std | 1.1 | | value_loss | 35.2 | ------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 10800 | | time_elapsed | 463 | | total_timesteps | 54000 | | train/ | | | entropy_loss | -45.2 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 10799 | | policy_loss | 107 | | reward | 0.42830834 | | std | 1.1 | | value_loss | 9.75 | -------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 10900 | | time_elapsed | 467 | | total_timesteps | 54500 | | train/ | | | entropy_loss | -45.3 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 10899 | | policy_loss | -308 | | reward | -8.225515 | | std | 1.1 | | value_loss | 53.5 | ------------------------------------- ------------------------------------------ | time/ | | | fps | 116 | | iterations | 11000 | | time_elapsed | 472 | | total_timesteps | 55000 | | train/ | | | entropy_loss | -45.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 10999 | | policy_loss | 37.6 | | reward | -2.8013346e-06 | | std | 1.1 | | value_loss | 1.17 | ------------------------------------------ ---------------------------------------- | time/ | | | fps | 116 | | iterations | 11100 | | time_elapsed | 476 | | total_timesteps | 55500 | | train/ | | | entropy_loss | -45.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 11099 | | policy_loss | -7.21 | | reward | 0.0042510955 | | std | 1.1 | | value_loss | 0.139 | ---------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 11200 | | time_elapsed | 480 | | total_timesteps | 56000 | | train/ | | | entropy_loss | -45.3 | | explained_variance | -2.38e-07 | | learning_rate | 0.0007 | | n_updates | 11199 | | policy_loss | 9.84 | | reward | 0.563495 | | std | 1.1 | | value_loss | 0.619 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 11300 | | time_elapsed | 484 | | total_timesteps | 56500 | | train/ | | | entropy_loss | -45.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 11299 | | policy_loss | 114 | | reward | 1.1201094 | | std | 1.1 | | value_loss | 8.24 | ------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 11400 | | time_elapsed | 489 | | total_timesteps | 57000 | | train/ | | | entropy_loss | -45.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 11399 | | policy_loss | -54.6 | | reward | -7.3706326 | | std | 1.1 | | value_loss | 5.52 | -------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 11500 | | time_elapsed | 493 | | total_timesteps | 57500 | | train/ | | | entropy_loss | -45.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 11499 | | policy_loss | 23.8 | | reward | 0.0 | | std | 1.1 | | value_loss | 0.336 | ------------------------------------ ------------------------------------ | time/ | | | fps | 116 | | iterations | 11600 | | time_elapsed | 497 | | total_timesteps | 58000 | | train/ | | | entropy_loss | -45.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 11599 | | policy_loss | 305 | | reward | 4.001372 | | std | 1.1 | | value_loss | 56.5 | ------------------------------------ ------------------------------------ | time/ | | | fps | 116 | | iterations | 11700 | | time_elapsed | 501 | | total_timesteps | 58500 | | train/ | | | entropy_loss | -45.4 | | explained_variance | -3.52 | | learning_rate | 0.0007 | | n_updates | 11699 | | policy_loss | -47.2 | | reward | 4.020171 | | std | 1.1 | | value_loss | 2.55 | ------------------------------------ ------------------------------------- | time/ | | | fps | 116 | | iterations | 11800 | | time_elapsed | 505 | | total_timesteps | 59000 | | train/ | | | entropy_loss | -45.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 11799 | | policy_loss | -64.1 | | reward | 1.5797172 | | std | 1.1 | | value_loss | 4.44 | ------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 11900 | | time_elapsed | 510 | | total_timesteps | 59500 | | train/ | | | entropy_loss | -45.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 11899 | | policy_loss | 297 | | reward | 0.0 | | std | 1.1 | | value_loss | 52 | ------------------------------------ --------------------------------------- | time/ | | | fps | 116 | | iterations | 12000 | | time_elapsed | 514 | | total_timesteps | 60000 | | train/ | | | entropy_loss | -45.4 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 11999 | | policy_loss | -112 | | reward | -0.50271374 | | std | 1.1 | | value_loss | 8.61 | --------------------------------------- ---------------------------------------- | time/ | | | fps | 116 | | iterations | 12100 | | time_elapsed | 518 | | total_timesteps | 60500 | | train/ | | | entropy_loss | -45.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 12099 | | policy_loss | -27.9 | | reward | -0.000236683 | | std | 1.1 | | value_loss | 0.555 | ---------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 12200 | | time_elapsed | 522 | | total_timesteps | 61000 | | train/ | | | entropy_loss | -45.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 12199 | | policy_loss | -493 | | reward | -5.9762735 | | std | 1.1 | | value_loss | 146 | -------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 12300 | | time_elapsed | 527 | | total_timesteps | 61500 | | train/ | | | entropy_loss | -45.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 12299 | | policy_loss | 87.3 | | reward | -0.57379526 | | std | 1.1 | | value_loss | 6.86 | --------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 12400 | | time_elapsed | 531 | | total_timesteps | 62000 | | train/ | | | entropy_loss | -45.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 12399 | | policy_loss | 242 | | reward | -2.7739487 | | std | 1.1 | | value_loss | 60.8 | -------------------------------------- ------------------------------------------ | time/ | | | fps | 116 | | iterations | 12500 | | time_elapsed | 535 | | total_timesteps | 62500 | | train/ | | | entropy_loss | -45.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 12499 | | policy_loss | 44 | | reward | -0.00068280834 | | std | 1.1 | | value_loss | 2.97 | ------------------------------------------ ------------------------------------- | time/ | | | fps | 116 | | iterations | 12600 | | time_elapsed | 539 | | total_timesteps | 63000 | | train/ | | | entropy_loss | -45.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 12599 | | policy_loss | 286 | | reward | 1.2796046 | | std | 1.1 | | value_loss | 47.9 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 12700 | | time_elapsed | 544 | | total_timesteps | 63500 | | train/ | | | entropy_loss | -45.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 12699 | | policy_loss | -39 | | reward | 1.9548224 | | std | 1.11 | | value_loss | 1.34 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 12800 | | time_elapsed | 548 | | total_timesteps | 64000 | | train/ | | | entropy_loss | -45.6 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 12799 | | policy_loss | -37.1 | | reward | 2.5056832 | | std | 1.11 | | value_loss | 1.11 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 12900 | | time_elapsed | 552 | | total_timesteps | 64500 | | train/ | | | entropy_loss | -45.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 12899 | | policy_loss | 343 | | reward | 1.0621004 | | std | 1.11 | | value_loss | 65.4 | ------------------------------------- ----------------------------------------- | time/ | | | fps | 116 | | iterations | 13000 | | time_elapsed | 556 | | total_timesteps | 65000 | | train/ | | | entropy_loss | -45.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 12999 | | policy_loss | 32.4 | | reward | -0.0010883237 | | std | 1.11 | | value_loss | 4.64 | ----------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 13100 | | time_elapsed | 560 | | total_timesteps | 65500 | | train/ | | | entropy_loss | -45.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 13099 | | policy_loss | 377 | | reward | 5.629558 | | std | 1.11 | | value_loss | 98 | ------------------------------------ --------------------------------------- | time/ | | | fps | 116 | | iterations | 13200 | | time_elapsed | 565 | | total_timesteps | 66000 | | train/ | | | entropy_loss | -45.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 13199 | | policy_loss | -5.61 | | reward | -0.68444264 | | std | 1.11 | | value_loss | 0.279 | --------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 13300 | | time_elapsed | 569 | | total_timesteps | 66500 | | train/ | | | entropy_loss | -45.7 | | explained_variance | 1.19e-07 | | learning_rate | 0.0007 | | n_updates | 13299 | | policy_loss | -24.7 | | reward | -0.1482064 | | std | 1.11 | | value_loss | 1.17 | -------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 13400 | | time_elapsed | 573 | | total_timesteps | 67000 | | train/ | | | entropy_loss | -45.7 | | explained_variance | 1.19e-07 | | learning_rate | 0.0007 | | n_updates | 13399 | | policy_loss | 41.4 | | reward | 0.0 | | std | 1.11 | | value_loss | 2.04 | ------------------------------------ ---------------------------------------- | time/ | | | fps | 116 | | iterations | 13500 | | time_elapsed | 577 | | total_timesteps | 67500 | | train/ | | | entropy_loss | -45.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 13499 | | policy_loss | 23 | | reward | -0.056845825 | | std | 1.11 | | value_loss | 0.956 | ---------------------------------------- ------------------------------------------ | time/ | | | fps | 116 | | iterations | 13600 | | time_elapsed | 582 | | total_timesteps | 68000 | | train/ | | | entropy_loss | -45.7 | | explained_variance | -2.38e-07 | | learning_rate | 0.0007 | | n_updates | 13599 | | policy_loss | -306 | | reward | -0.00051175256 | | std | 1.11 | | value_loss | 52.6 | ------------------------------------------ -------------------------------------- | time/ | | | fps | 116 | | iterations | 13700 | | time_elapsed | 586 | | total_timesteps | 68500 | | train/ | | | entropy_loss | -45.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 13699 | | policy_loss | -21.2 | | reward | -1.2614439 | | std | 1.11 | | value_loss | 4.28 | -------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 13800 | | time_elapsed | 590 | | total_timesteps | 69000 | | train/ | | | entropy_loss | -45.8 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 13799 | | policy_loss | -92.4 | | reward | 0.5540486 | | std | 1.12 | | value_loss | 17.6 | ------------------------------------- day: 3651, episode: 20 begin_total_asset: 1000000.00 end_total_asset: 4746174.96 total_reward: 3746174.96 total_cost: 7432.95 total_trades: 48850 Sharpe: 0.803 ================================= -------------------------------------- | time/ | | | fps | 116 | | iterations | 13900 | | time_elapsed | 594 | | total_timesteps | 69500 | | train/ | | | entropy_loss | -45.8 | | explained_variance | 0.383 | | learning_rate | 0.0007 | | n_updates | 13899 | | policy_loss | -99.4 | | reward | 0.44422933 | | std | 1.12 | | value_loss | 4.87 | -------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 14000 | | time_elapsed | 598 | | total_timesteps | 70000 | | train/ | | | entropy_loss | -45.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 13999 | | policy_loss | -61.4 | | reward | 0.0 | | std | 1.12 | | value_loss | 2.59 | ------------------------------------ ------------------------------------- | time/ | | | fps | 116 | | iterations | 14100 | | time_elapsed | 603 | | total_timesteps | 70500 | | train/ | | | entropy_loss | -45.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 14099 | | policy_loss | -50.6 | | reward | 0.9838486 | | std | 1.12 | | value_loss | 1.3 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 14200 | | time_elapsed | 607 | | total_timesteps | 71000 | | train/ | | | entropy_loss | -45.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 14199 | | policy_loss | 26.3 | | reward | 2.9133487 | | std | 1.12 | | value_loss | 0.682 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 14300 | | time_elapsed | 611 | | total_timesteps | 71500 | | train/ | | | entropy_loss | -45.9 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 14299 | | policy_loss | -91.8 | | reward | 0.7379675 | | std | 1.12 | | value_loss | 7.59 | ------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 14400 | | time_elapsed | 615 | | total_timesteps | 72000 | | train/ | | | entropy_loss | -45.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 14399 | | policy_loss | 173 | | reward | -0.49859428 | | std | 1.12 | | value_loss | 25.1 | --------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 14500 | | time_elapsed | 620 | | total_timesteps | 72500 | | train/ | | | entropy_loss | -45.8 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 14499 | | policy_loss | 140 | | reward | 0.0 | | std | 1.12 | | value_loss | 11.1 | ------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 14600 | | time_elapsed | 624 | | total_timesteps | 73000 | | train/ | | | entropy_loss | -45.9 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 14599 | | policy_loss | -71 | | reward | -1.2649014 | | std | 1.12 | | value_loss | 25.4 | -------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 14700 | | time_elapsed | 628 | | total_timesteps | 73500 | | train/ | | | entropy_loss | -45.9 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 14699 | | policy_loss | -21.2 | | reward | -0.23925309 | | std | 1.12 | | value_loss | 0.272 | --------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 14800 | | time_elapsed | 632 | | total_timesteps | 74000 | | train/ | | | entropy_loss | -46 | | explained_variance | 1.79e-07 | | learning_rate | 0.0007 | | n_updates | 14799 | | policy_loss | 112 | | reward | -1.0476866 | | std | 1.12 | | value_loss | 11.3 | -------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 14900 | | time_elapsed | 637 | | total_timesteps | 74500 | | train/ | | | entropy_loss | -46 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 14899 | | policy_loss | -47.4 | | reward | 0.0 | | std | 1.13 | | value_loss | 1.43 | ------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 15000 | | time_elapsed | 641 | | total_timesteps | 75000 | | train/ | | | entropy_loss | -46 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 14999 | | policy_loss | 69.1 | | reward | -2.6626704 | | std | 1.13 | | value_loss | 3.56 | -------------------------------------- ------------------------------------------ | time/ | | | fps | 116 | | iterations | 15100 | | time_elapsed | 645 | | total_timesteps | 75500 | | train/ | | | entropy_loss | -46 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 15099 | | policy_loss | -116 | | reward | -0.00095138035 | | std | 1.13 | | value_loss | 7.88 | ------------------------------------------ ------------------------------------- | time/ | | | fps | 116 | | iterations | 15200 | | time_elapsed | 650 | | total_timesteps | 76000 | | train/ | | | entropy_loss | -46.1 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 15199 | | policy_loss | -32.1 | | reward | 0.3815689 | | std | 1.13 | | value_loss | 3.63 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 15300 | | time_elapsed | 654 | | total_timesteps | 76500 | | train/ | | | entropy_loss | -46.1 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 15299 | | policy_loss | -181 | | reward | -8.710084 | | std | 1.13 | | value_loss | 24.2 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 15400 | | time_elapsed | 658 | | total_timesteps | 77000 | | train/ | | | entropy_loss | -46.1 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 15399 | | policy_loss | -27.6 | | reward | 1.5701025 | | std | 1.13 | | value_loss | 1.08 | ------------------------------------- ------------------------------------------ | time/ | | | fps | 116 | | iterations | 15500 | | time_elapsed | 662 | | total_timesteps | 77500 | | train/ | | | entropy_loss | -46.1 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 15499 | | policy_loss | -116 | | reward | -9.1413356e-05 | | std | 1.13 | | value_loss | 4.9 | ------------------------------------------ -------------------------------------- | time/ | | | fps | 116 | | iterations | 15600 | | time_elapsed | 667 | | total_timesteps | 78000 | | train/ | | | entropy_loss | -46.1 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 15599 | | policy_loss | 124 | | reward | 0.29586065 | | std | 1.13 | | value_loss | 7.62 | -------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 15700 | | time_elapsed | 671 | | total_timesteps | 78500 | | train/ | | | entropy_loss | -46.2 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 15699 | | policy_loss | 46.5 | | reward | 0.90381014 | | std | 1.13 | | value_loss | 2.67 | -------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 15800 | | time_elapsed | 675 | | total_timesteps | 79000 | | train/ | | | entropy_loss | -46.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 15799 | | policy_loss | -52.7 | | reward | 2.7351058 | | std | 1.13 | | value_loss | 1.41 | ------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 15900 | | time_elapsed | 679 | | total_timesteps | 79500 | | train/ | | | entropy_loss | -46.3 | | explained_variance | 1.19e-07 | | learning_rate | 0.0007 | | n_updates | 15899 | | policy_loss | 55.6 | | reward | -5.8838725 | | std | 1.14 | | value_loss | 2.14 | -------------------------------------- ------------------------------------------ | time/ | | | fps | 116 | | iterations | 16000 | | time_elapsed | 684 | | total_timesteps | 80000 | | train/ | | | entropy_loss | -46.2 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 15999 | | policy_loss | 448 | | reward | -2.6554717e-05 | | std | 1.13 | | value_loss | 87.7 | ------------------------------------------ --------------------------------------- | time/ | | | fps | 116 | | iterations | 16100 | | time_elapsed | 688 | | total_timesteps | 80500 | | train/ | | | entropy_loss | -46.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 16099 | | policy_loss | 55.9 | | reward | -0.43290994 | | std | 1.14 | | value_loss | 3.32 | --------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 16200 | | time_elapsed | 692 | | total_timesteps | 81000 | | train/ | | | entropy_loss | -46.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 16199 | | policy_loss | -28.4 | | reward | -1.3013879 | | std | 1.14 | | value_loss | 0.45 | -------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 16300 | | time_elapsed | 697 | | total_timesteps | 81500 | | train/ | | | entropy_loss | -46.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 16299 | | policy_loss | -15.8 | | reward | 0.093013614 | | std | 1.14 | | value_loss | 0.882 | --------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 16400 | | time_elapsed | 701 | | total_timesteps | 82000 | | train/ | | | entropy_loss | -46.5 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 16399 | | policy_loss | 14.9 | | reward | 0.0 | | std | 1.14 | | value_loss | 0.195 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 16500 | | time_elapsed | 705 | | total_timesteps | 82500 | | train/ | | | entropy_loss | -46.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 16499 | | policy_loss | 120 | | reward | 3.5595112 | | std | 1.15 | | value_loss | 8.12 | ------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 16600 | | time_elapsed | 709 | | total_timesteps | 83000 | | train/ | | | entropy_loss | -46.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 16599 | | policy_loss | -136 | | reward | 0.0 | | std | 1.14 | | value_loss | 9.58 | ------------------------------------ -------------------------------------- | time/ | | | fps | 116 | | iterations | 16700 | | time_elapsed | 714 | | total_timesteps | 83500 | | train/ | | | entropy_loss | -46.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 16699 | | policy_loss | -276 | | reward | -0.7146839 | | std | 1.14 | | value_loss | 36.8 | -------------------------------------- ----------------------------------------- | time/ | | | fps | 116 | | iterations | 16800 | | time_elapsed | 718 | | total_timesteps | 84000 | | train/ | | | entropy_loss | -46.5 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 16799 | | policy_loss | 151 | | reward | -0.0031395215 | | std | 1.15 | | value_loss | 20.8 | ----------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 16900 | | time_elapsed | 722 | | total_timesteps | 84500 | | train/ | | | entropy_loss | -46.6 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 16899 | | policy_loss | -21.3 | | reward | -3.3128438 | | std | 1.15 | | value_loss | 0.608 | -------------------------------------- ------------------------------------------ | time/ | | | fps | 116 | | iterations | 17000 | | time_elapsed | 726 | | total_timesteps | 85000 | | train/ | | | entropy_loss | -46.6 | | explained_variance | 1.19e-07 | | learning_rate | 0.0007 | | n_updates | 16999 | | policy_loss | 96.8 | | reward | -0.00041326988 | | std | 1.15 | | value_loss | 5.91 | ------------------------------------------ -------------------------------------- | time/ | | | fps | 116 | | iterations | 17100 | | time_elapsed | 731 | | total_timesteps | 85500 | | train/ | | | entropy_loss | -46.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 17099 | | policy_loss | 63.8 | | reward | 0.07509284 | | std | 1.15 | | value_loss | 2.72 | -------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 17200 | | time_elapsed | 735 | | total_timesteps | 86000 | | train/ | | | entropy_loss | -46.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 17199 | | policy_loss | 13.6 | | reward | -0.51101655 | | std | 1.15 | | value_loss | 0.626 | --------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 17300 | | time_elapsed | 739 | | total_timesteps | 86500 | | train/ | | | entropy_loss | -46.5 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 17299 | | policy_loss | -5.31 | | reward | 1.114724 | | std | 1.14 | | value_loss | 0.134 | ------------------------------------ --------------------------------------- | time/ | | | fps | 116 | | iterations | 17400 | | time_elapsed | 744 | | total_timesteps | 87000 | | train/ | | | entropy_loss | -46.6 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 17399 | | policy_loss | -42.1 | | reward | -0.81920934 | | std | 1.15 | | value_loss | 1.35 | --------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 17500 | | time_elapsed | 748 | | total_timesteps | 87500 | | train/ | | | entropy_loss | -46.6 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 17499 | | policy_loss | 69.8 | | reward | 0.0 | | std | 1.15 | | value_loss | 2.4 | ------------------------------------ ------------------------------------- | time/ | | | fps | 116 | | iterations | 17600 | | time_elapsed | 752 | | total_timesteps | 88000 | | train/ | | | entropy_loss | -46.6 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 17599 | | policy_loss | 11.5 | | reward | 1.0313632 | | std | 1.15 | | value_loss | 0.439 | ------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 17700 | | time_elapsed | 757 | | total_timesteps | 88500 | | train/ | | | entropy_loss | -46.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 17699 | | policy_loss | 115 | | reward | 0.59923035 | | std | 1.15 | | value_loss | 7.45 | -------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 17800 | | time_elapsed | 761 | | total_timesteps | 89000 | | train/ | | | entropy_loss | -46.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 17799 | | policy_loss | -111 | | reward | -0.12498487 | | std | 1.15 | | value_loss | 6.95 | --------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 17900 | | time_elapsed | 766 | | total_timesteps | 89500 | | train/ | | | entropy_loss | -46.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 17899 | | policy_loss | -42.7 | | reward | 0.0 | | std | 1.16 | | value_loss | 0.963 | ------------------------------------ -------------------------------------- | time/ | | | fps | 116 | | iterations | 18000 | | time_elapsed | 770 | | total_timesteps | 90000 | | train/ | | | entropy_loss | -46.9 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 17999 | | policy_loss | -274 | | reward | 0.98963755 | | std | 1.16 | | value_loss | 43.4 | -------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 18100 | | time_elapsed | 774 | | total_timesteps | 90500 | | train/ | | | entropy_loss | -46.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 18099 | | policy_loss | 215 | | reward | 0.0 | | std | 1.16 | | value_loss | 23 | ------------------------------------ ------------------------------------- | time/ | | | fps | 116 | | iterations | 18200 | | time_elapsed | 779 | | total_timesteps | 91000 | | train/ | | | entropy_loss | -46.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 18199 | | policy_loss | -301 | | reward | 2.7792392 | | std | 1.16 | | value_loss | 71.4 | ------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 18300 | | time_elapsed | 783 | | total_timesteps | 91500 | | train/ | | | entropy_loss | -46.9 | | explained_variance | -0.216 | | learning_rate | 0.0007 | | n_updates | 18299 | | policy_loss | 134 | | reward | 0.0 | | std | 1.16 | | value_loss | 8.96 | ------------------------------------ ------------------------------------- | time/ | | | fps | 116 | | iterations | 18400 | | time_elapsed | 787 | | total_timesteps | 92000 | | train/ | | | entropy_loss | -46.9 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 18399 | | policy_loss | -27.3 | | reward | 0.8572168 | | std | 1.16 | | value_loss | 0.713 | ------------------------------------- ------------------------------------------ | time/ | | | fps | 116 | | iterations | 18500 | | time_elapsed | 792 | | total_timesteps | 92500 | | train/ | | | entropy_loss | -47 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 18499 | | policy_loss | -92.7 | | reward | -1.0854058e-05 | | std | 1.16 | | value_loss | 4.5 | ------------------------------------------ ------------------------------------ | time/ | | | fps | 116 | | iterations | 18600 | | time_elapsed | 796 | | total_timesteps | 93000 | | train/ | | | entropy_loss | -47 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 18599 | | policy_loss | 28.6 | | reward | 0.126928 | | std | 1.16 | | value_loss | 0.825 | ------------------------------------ -------------------------------------- | time/ | | | fps | 116 | | iterations | 18700 | | time_elapsed | 800 | | total_timesteps | 93500 | | train/ | | | entropy_loss | -47.1 | | explained_variance | 5.96e-08 | | learning_rate | 0.0007 | | n_updates | 18699 | | policy_loss | -73.7 | | reward | -0.8160884 | | std | 1.17 | | value_loss | 3.9 | -------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 18800 | | time_elapsed | 805 | | total_timesteps | 94000 | | train/ | | | entropy_loss | -47.1 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 18799 | | policy_loss | -76 | | reward | 4.0295825 | | std | 1.17 | | value_loss | 4.75 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 18900 | | time_elapsed | 809 | | total_timesteps | 94500 | | train/ | | | entropy_loss | -47.1 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 18899 | | policy_loss | 206 | | reward | 0.2775372 | | std | 1.17 | | value_loss | 23.8 | ------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 19000 | | time_elapsed | 813 | | total_timesteps | 95000 | | train/ | | | entropy_loss | -47.1 | | explained_variance | 0.375 | | learning_rate | 0.0007 | | n_updates | 18999 | | policy_loss | -52.9 | | reward | 0.10771098 | | std | 1.17 | | value_loss | 3.01 | -------------------------------------- -------------------------------------- | time/ | | | fps | 116 | | iterations | 19100 | | time_elapsed | 817 | | total_timesteps | 95500 | | train/ | | | entropy_loss | -47.1 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 19099 | | policy_loss | -23.3 | | reward | 0.56881994 | | std | 1.17 | | value_loss | 0.797 | -------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 19200 | | time_elapsed | 822 | | total_timesteps | 96000 | | train/ | | | entropy_loss | -47.2 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 19199 | | policy_loss | -26 | | reward | 1.4480524 | | std | 1.17 | | value_loss | 1.43 | ------------------------------------- --------------------------------------- | time/ | | | fps | 116 | | iterations | 19300 | | time_elapsed | 826 | | total_timesteps | 96500 | | train/ | | | entropy_loss | -47.2 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 19299 | | policy_loss | -17.1 | | reward | -0.26339805 | | std | 1.17 | | value_loss | 0.236 | --------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 19400 | | time_elapsed | 830 | | total_timesteps | 97000 | | train/ | | | entropy_loss | -47.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 19399 | | policy_loss | -139 | | reward | 0.0 | | std | 1.17 | | value_loss | 11.6 | ------------------------------------ --------------------------------------- | time/ | | | fps | 116 | | iterations | 19500 | | time_elapsed | 835 | | total_timesteps | 97500 | | train/ | | | entropy_loss | -47.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 19499 | | policy_loss | -117 | | reward | -0.32186767 | | std | 1.17 | | value_loss | 16.1 | --------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 19600 | | time_elapsed | 839 | | total_timesteps | 98000 | | train/ | | | entropy_loss | -47.3 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 19599 | | policy_loss | 54.5 | | reward | 0.0 | | std | 1.18 | | value_loss | 5.75 | ------------------------------------ ------------------------------------- | time/ | | | fps | 116 | | iterations | 19700 | | time_elapsed | 843 | | total_timesteps | 98500 | | train/ | | | entropy_loss | -47.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 19699 | | policy_loss | 71.7 | | reward | 1.6004717 | | std | 1.18 | | value_loss | 7.57 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 19800 | | time_elapsed | 848 | | total_timesteps | 99000 | | train/ | | | entropy_loss | -47.4 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 19799 | | policy_loss | 27.3 | | reward | 0.0 | | std | 1.18 | | value_loss | 0.457 | ------------------------------------- ------------------------------------- | time/ | | | fps | 116 | | iterations | 19900 | | time_elapsed | 852 | | total_timesteps | 99500 | | train/ | | | entropy_loss | -47.5 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 19899 | | policy_loss | -24.6 | | reward | 0.8102966 | | std | 1.18 | | value_loss | 0.672 | ------------------------------------- ------------------------------------ | time/ | | | fps | 116 | | iterations | 20000 | | time_elapsed | 856 | | total_timesteps | 100000 | | train/ | | | entropy_loss | -47.4 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 19999 | | policy_loss | -103 | | reward | 0.0 | | std | 1.18 | | value_loss | 5.25 | ------------------------------------ ###Markdown Model 2: DDPG ###Code agent = DRLAgent(env = env_train) model_ddpg = agent.get_model("ddpg") trained_ddpg = agent.train_model(model=model_ddpg, tb_log_name='ddpg', total_timesteps=50000) ###Output --------------------------------- | time/ | | | episodes | 4 | | fps | 89 | | time_elapsed | 163 | | total_timesteps | 14608 | | train/ | | | actor_loss | -51.5 | | critic_loss | 711 | | learning_rate | 0.001 | | n_updates | 10956 | | reward | 0.0 | --------------------------------- --------------------------------- | time/ | | | episodes | 8 | | fps | 84 | | time_elapsed | 346 | | total_timesteps | 29216 | | train/ | | | actor_loss | -16.1 | | critic_loss | 3.13 | | learning_rate | 0.001 | | n_updates | 25564 | | reward | 0.0 | --------------------------------- day: 3651, episode: 40 begin_total_asset: 1000000.00 end_total_asset: 3401133.75 total_reward: 2401133.75 total_cost: 1039.72 total_trades: 73035 Sharpe: 0.705 ================================= --------------------------------- | time/ | | | episodes | 12 | | fps | 83 | | time_elapsed | 526 | | total_timesteps | 43824 | | train/ | | | actor_loss | -10.4 | | critic_loss | 2.51 | | learning_rate | 0.001 | | n_updates | 40172 | | reward | 0.0 | --------------------------------- ###Markdown Model 3: PPO ###Code agent = DRLAgent(env = env_train) PPO_PARAMS = { "n_steps": 2048, "ent_coef": 0.01, "learning_rate": 0.00025, "batch_size": 128, } model_ppo = agent.get_model("ppo",model_kwargs = PPO_PARAMS) trained_ppo = agent.train_model(model=model_ppo, tb_log_name='ppo', total_timesteps=50000) ###Output _____no_output_____ ###Markdown Model 4: TD3 ###Code agent = DRLAgent(env = env_train) TD3_PARAMS = {"batch_size": 100, "buffer_size": 1000000, "learning_rate": 0.001} model_td3 = agent.get_model("td3",model_kwargs = TD3_PARAMS) trained_td3 = agent.train_model(model=model_td3, tb_log_name='td3', total_timesteps=30000) ###Output _____no_output_____ ###Markdown Model 5: SAC ###Code agent = DRLAgent(env = env_train) SAC_PARAMS = { "batch_size": 128, "buffer_size": 1000000, "learning_rate": 0.0001, "learning_starts": 100, "ent_coef": "auto_0.1", } model_sac = agent.get_model("sac",model_kwargs = SAC_PARAMS) trained_sac = agent.train_model(model=model_sac, tb_log_name='sac', total_timesteps=80000) ###Output _____no_output_____ ###Markdown TradingAssume that we have $1,000,000 initial capital at 2019-01-01. We use the DDPG model to trade Dow jones 30 stocks. TradeDRL model needs to update periodically in order to take full advantage of the data, ideally we need to retrain our model yearly, quarterly, or monthly. We also need to tune the parameters along the way, in this notebook I only use the in-sample data from 2009-01 to 2018-12 to tune the parameters once, so there is some alpha decay here as the length of trade date extends. Numerous hyperparameters – e.g. the learning rate, the total number of samples to train on – influence the learning process and are usually determined by testing some variations. ###Code trade = data_split(processed_full, '2019-01-01','2021-01-01') e_trade_gym = StockTradingEnv(df = trade, **env_kwargs) # env_trade, obs_trade = e_trade_gym.get_sb_env() trade.head() df_account_value, df_actions = DRLAgent.DRL_prediction( model=trained_ddpg, environment = e_trade_gym) df_account_value.shape df_account_value.tail() df_actions.head() ###Output _____no_output_____ ###Markdown Part 7: Backtest Our StrategyBacktesting plays a key role in evaluating the performance of a trading strategy. Automated backtesting tool is preferred because it reduces the human error. We usually use the Quantopian pyfolio package to backtest our trading strategies. It is easy to use and consists of various individual plots that provide a comprehensive image of the performance of a trading strategy. 7.1 BackTestStatspass in df_account_value, this information is stored in env class ###Code print("==============Get Backtest Results===========") now = datetime.datetime.now().strftime('%Y%m%d-%Hh%M') perf_stats_all = backtest_stats(account_value=df_account_value) perf_stats_all = pd.DataFrame(perf_stats_all) perf_stats_all.to_csv("./"+config.RESULTS_DIR+"/perf_stats_all_"+now+'.csv') #baseline stats print("==============Get Baseline Stats===========") baseline_df = get_baseline( ticker="^DJI", start = '2019-01-01', end = '2021-01-01') stats = backtest_stats(baseline_df, value_col_name = 'close') ###Output ==============Get Baseline Stats=========== [*********************100%***********************] 1 of 1 completed Shape of DataFrame: (506, 8) Annual return 0.144827 Cumulative returns 0.312037 Annual volatility 0.274346 Sharpe ratio 0.632258 Calmar ratio 0.390515 Stability 0.119309 Max drawdown -0.370862 Omega ratio 1.149712 Sortino ratio 0.871240 Skew NaN Kurtosis NaN Tail ratio 0.860739 Daily value at risk -0.033876 dtype: float64 ###Markdown 7.2 BackTestPlot ###Code print("==============Compare to DJIA===========") %matplotlib inline # S&P 500: ^GSPC # Dow Jones Index: ^DJI # NASDAQ 100: ^NDX backtest_plot(df_account_value, baseline_ticker = '^DJI', baseline_start = '2019-01-01', baseline_end = '2021-01-01') ###Output ==============Compare to DJIA=========== [*********************100%***********************] 1 of 1 completed Shape of DataFrame: (506, 8) ###Markdown Automated stock trading using FinRL with financial dataTrained a Deep Reinforcement Learning model using FinRL and companies' financial ratio, and then backtested the model to examine how well-trained the model is* This Google Colabolatory notebook is based on the tutorial of FinRL: https://towardsdatascience.com/finrl-for-quantitative-finance-tutorial-for-multiple-stock-trading-7b00763b7530* This project is a final project of the almuni-mentored research project at Columbia University, Application of Reinforcement Learning to Finance, mentored by Bruce Yang from AI4Finance.* For more detailed explanation, please check out my Medium post: https://medium.com/@mariko.sawada1/automated-stock-trading-with-deep-reinforcement-learning-and-financial-data-a63286ccbe2b Content * [1. Problem Definition](0)* [2. Getting Started - Load Python packages](1) * [2.1. Install Packages](1.1) * [2.2. Check Additional Packages](1.2) * [2.3. Import Packages](1.3) * [2.4. Create Folders](1.4)* [3. Download Data](2)* [4. Preprocess fundamental Data](3) * [4-1 Import financial data](3.1) * [4-2 Specify items needed to calculate financial ratios](3.2) * [4-3 Calculate financial ratios](3.3) * [4-4 Deal with NAs and infinite values](3.4) * [4-5 Merge stock price data and ratios into one dataframe](3.5) * [4-6 Calculate market valuation ratios using daily stock price data](3.6)* [5.Build Environment](4) * [5.1. Training & Trade Data Split](4.1) * [5.2. User-defined Environment](4.2) * [5.3. Initialize Environment](4.3) * [6.Implement DRL Algorithms](5) * [7.Backtesting Performance](6) * [7.1. BackTestStats](6.1) * [7.2. BackTestPlot](6.2) * [7.3. Baseline Stats](6.3) * [7.3. Compare to Stock Market Index](6.4) Part 1. Problem Definition This problem is to design an automated trading solution for single stock trading. We model the stock trading process as a Markov Decision Process (MDP). We then formulate our trading goal as a maximization problem.The algorithm is trained using Deep Reinforcement Learning (DRL) algorithms and the components of the reinforcement learning environment are:* Action: The action space describes the allowed actions that the agent interacts with theenvironment. Normally, a ∈ A includes three actions: a ∈ {−1, 0, 1}, where −1, 0, 1 representselling, holding, and buying one stock. Also, an action can be carried upon multiple shares. We usean action space {−k, ..., −1, 0, 1, ..., k}, where k denotes the number of shares. For example, "Buy10 shares of AAPL" or "Sell 10 shares of AAPL" are 10 or −10, respectively* Reward function: r(s, a, s′) is the incentive mechanism for an agent to learn a better action. The change of the portfolio value when action a is taken at state s and arriving at new state s', i.e., r(s, a, s′) = v′ − v, where v′ and v represent the portfoliovalues at state s′ and s, respectively* State: The state space describes the observations that the agent receives from the environment. Just as a human trader needs to analyze various information before executing a trade, soour trading agent observes many different features to better learn in an interactive environment.* Environment: Dow 30 consituentsThe data of the single stock that we will be using for this case study is obtained from Yahoo Finance API. The data contains Open-High-Low-Close price and volume. Part 2. Load Python Packages 2.1. Install all the packages through FinRL library ###Code ## install finrl library !pip install git+https://github.com/AI4Finance-LLC/FinRL-Library.git ###Output Collecting git+https://github.com/AI4Finance-LLC/FinRL-Library.git Cloning https://github.com/AI4Finance-LLC/FinRL-Library.git to /tmp/pip-req-build-avwct7pb Running command git clone -q https://github.com/AI4Finance-LLC/FinRL-Library.git /tmp/pip-req-build-avwct7pb Collecting pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2 Cloning https://github.com/quantopian/pyfolio.git to /tmp/pip-install-sps4f25k/pyfolio_2efe9a99238a42588250d6733a01d260 Running command git clone -q https://github.com/quantopian/pyfolio.git /tmp/pip-install-sps4f25k/pyfolio_2efe9a99238a42588250d6733a01d260 Collecting elegantrl@ git+https://github.com/AI4Finance-Foundation/ElegantRL.git#egg=elegantrl Cloning https://github.com/AI4Finance-Foundation/ElegantRL.git to /tmp/pip-install-sps4f25k/elegantrl_28233bba5b454d3399006626d813b65b Running command git clone -q https://github.com/AI4Finance-Foundation/ElegantRL.git /tmp/pip-install-sps4f25k/elegantrl_28233bba5b454d3399006626d813b65b Requirement already satisfied: numpy>=1.17.3 in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.4) (1.19.5) Requirement already satisfied: pandas>=1.1.5 in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.4) (1.1.5) Collecting stockstats>=0.4.0 Downloading stockstats-0.4.1-py2.py3-none-any.whl (19 kB) Collecting yfinance Downloading yfinance-0.1.69-py2.py3-none-any.whl (26 kB) Collecting elegantrl Downloading elegantrl-0.3.3-py3-none-any.whl (234 kB)  |████████████████████████████████| 234 kB 26.4 MB/s [?25hRequirement already satisfied: matplotlib in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.4) (3.2.2) Requirement already satisfied: scikit-learn>=0.21.0 in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.4) (1.0.2) Requirement already satisfied: gym>=0.17 in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.4) (0.17.3) Collecting stable-baselines3[extra] Downloading stable_baselines3-1.3.0-py3-none-any.whl (174 kB)  |████████████████████████████████| 174 kB 73.9 MB/s [?25hCollecting ray[default] Downloading ray-1.9.2-cp37-cp37m-manylinux2014_x86_64.whl (57.6 MB)  |████████████████████████████████| 57.6 MB 1.2 MB/s [?25hCollecting lz4 Downloading lz4-3.1.10-cp37-cp37m-manylinux2010_x86_64.whl (1.8 MB)  |████████████████████████████████| 1.8 MB 63.9 MB/s [?25hCollecting tensorboardX Downloading tensorboardX-2.4.1-py2.py3-none-any.whl (124 kB)  |████████████████████████████████| 124 kB 83.4 MB/s [?25hCollecting gputil Downloading GPUtil-1.4.0.tar.gz (5.5 kB) Collecting exchange_calendars Downloading exchange_calendars-3.5.tar.gz (147 kB)  |████████████████████████████████| 147 kB 68.3 MB/s [?25hCollecting alpaca_trade_api Downloading alpaca_trade_api-1.4.3-py3-none-any.whl (36 kB) Collecting ccxt>=1.66.32 Downloading ccxt-1.67.31-py2.py3-none-any.whl (2.3 MB)  |████████████████████████████████| 2.3 MB 61.2 MB/s [?25hCollecting jqdatasdk Downloading jqdatasdk-1.8.10-py3-none-any.whl (153 kB)  |████████████████████████████████| 153 kB 76.3 MB/s [?25hCollecting wrds Downloading wrds-3.1.1-py3-none-any.whl (12 kB) Requirement already satisfied: pytest in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.4) (3.6.4) Requirement already satisfied: setuptools>=41.4.0 in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.4) (57.4.0) Requirement already satisfied: wheel>=0.33.6 in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.4) (0.37.1) Collecting pre-commit Downloading pre_commit-2.16.0-py2.py3-none-any.whl (191 kB)  |████████████████████████████████| 191 kB 75.1 MB/s [?25hCollecting pybullet Downloading pybullet-3.2.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl (90.8 MB)  |████████████████████████████████| 90.8 MB 298 bytes/s [?25hRequirement already satisfied: torch in /usr/local/lib/python3.7/dist-packages (from elegantrl@ git+https://github.com/AI4Finance-Foundation/ElegantRL.git#egg=elegantrl->finrl==0.3.4) (1.10.0+cu111) Requirement already satisfied: opencv-python in /usr/local/lib/python3.7/dist-packages (from elegantrl@ git+https://github.com/AI4Finance-Foundation/ElegantRL.git#egg=elegantrl->finrl==0.3.4) (4.1.2.30) Collecting box2d-py Downloading box2d_py-2.3.8-cp37-cp37m-manylinux1_x86_64.whl (448 kB)  |████████████████████████████████| 448 kB 60.4 MB/s [?25hRequirement already satisfied: ipython>=3.2.3 in /usr/local/lib/python3.7/dist-packages (from pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (5.5.0) Requirement already satisfied: pytz>=2014.10 in /usr/local/lib/python3.7/dist-packages (from pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (2018.9) Requirement already satisfied: scipy>=0.14.0 in /usr/local/lib/python3.7/dist-packages (from pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (1.4.1) Requirement already satisfied: seaborn>=0.7.1 in /usr/local/lib/python3.7/dist-packages (from pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (0.11.2) Collecting empyrical>=0.5.0 Downloading empyrical-0.5.5.tar.gz (52 kB)  |████████████████████████████████| 52 kB 2.1 MB/s [?25hRequirement already satisfied: requests>=2.18.4 in /usr/local/lib/python3.7/dist-packages (from ccxt>=1.66.32->finrl==0.3.4) (2.23.0) Collecting yarl==1.7.2 Downloading yarl-1.7.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (271 kB)  |████████████████████████████████| 271 kB 75.7 MB/s [?25hCollecting aiohttp>=3.8 Downloading aiohttp-3.8.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (1.1 MB)  |████████████████████████████████| 1.1 MB 58.7 MB/s [?25hCollecting cryptography>=2.6.1 Downloading cryptography-36.0.1-cp36-abi3-manylinux_2_24_x86_64.whl (3.6 MB)  |████████████████████████████████| 3.6 MB 58.9 MB/s [?25hRequirement already satisfied: certifi>=2018.1.18 in /usr/local/lib/python3.7/dist-packages (from ccxt>=1.66.32->finrl==0.3.4) (2021.10.8) Collecting aiodns>=1.1.1 Downloading aiodns-3.0.0-py3-none-any.whl (5.0 kB) Collecting multidict>=4.0 Downloading multidict-5.2.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (160 kB)  |████████████████████████████████| 160 kB 67.6 MB/s [?25hRequirement already satisfied: idna>=2.0 in /usr/local/lib/python3.7/dist-packages (from yarl==1.7.2->ccxt>=1.66.32->finrl==0.3.4) (2.10) Requirement already satisfied: typing-extensions>=3.7.4 in /usr/local/lib/python3.7/dist-packages (from yarl==1.7.2->ccxt>=1.66.32->finrl==0.3.4) (3.10.0.2) Collecting pycares>=4.0.0 Downloading pycares-4.1.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (291 kB)  |████████████████████████████████| 291 kB 70.8 MB/s [?25hRequirement already satisfied: charset-normalizer<3.0,>=2.0 in /usr/local/lib/python3.7/dist-packages (from aiohttp>=3.8->ccxt>=1.66.32->finrl==0.3.4) (2.0.10) Collecting aiosignal>=1.1.2 Downloading aiosignal-1.2.0-py3-none-any.whl (8.2 kB) Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.7/dist-packages (from aiohttp>=3.8->ccxt>=1.66.32->finrl==0.3.4) (21.4.0) Collecting asynctest==0.13.0 Downloading asynctest-0.13.0-py3-none-any.whl (26 kB) Collecting async-timeout<5.0,>=4.0.0a3 Downloading async_timeout-4.0.2-py3-none-any.whl (5.8 kB) Collecting frozenlist>=1.1.1 Downloading frozenlist-1.2.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (192 kB)  |████████████████████████████████| 192 kB 73.4 MB/s [?25hRequirement already satisfied: cffi>=1.12 in /usr/local/lib/python3.7/dist-packages (from cryptography>=2.6.1->ccxt>=1.66.32->finrl==0.3.4) (1.15.0) Requirement already satisfied: pycparser in /usr/local/lib/python3.7/dist-packages (from cffi>=1.12->cryptography>=2.6.1->ccxt>=1.66.32->finrl==0.3.4) (2.21) Requirement already satisfied: pandas-datareader>=0.2 in /usr/local/lib/python3.7/dist-packages (from empyrical>=0.5.0->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (0.9.0) Requirement already satisfied: pyglet<=1.5.0,>=1.4.0 in /usr/local/lib/python3.7/dist-packages (from gym>=0.17->finrl==0.3.4) (1.5.0) Requirement already satisfied: cloudpickle<1.7.0,>=1.2.0 in /usr/local/lib/python3.7/dist-packages (from gym>=0.17->finrl==0.3.4) (1.3.0) Requirement already satisfied: pygments in /usr/local/lib/python3.7/dist-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (2.6.1) Requirement already satisfied: traitlets>=4.2 in /usr/local/lib/python3.7/dist-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (5.1.1) Requirement already satisfied: pexpect in /usr/local/lib/python3.7/dist-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (4.8.0) Requirement already satisfied: decorator in /usr/local/lib/python3.7/dist-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (4.4.2) Requirement already satisfied: pickleshare in /usr/local/lib/python3.7/dist-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (0.7.5) Requirement already satisfied: simplegeneric>0.8 in /usr/local/lib/python3.7/dist-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (0.8.1) Requirement already satisfied: prompt-toolkit<2.0.0,>=1.0.4 in /usr/local/lib/python3.7/dist-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (1.0.18) Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib->finrl==0.3.4) (0.11.0) Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->finrl==0.3.4) (3.0.6) Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->finrl==0.3.4) (2.8.2) Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->finrl==0.3.4) (1.3.2) Requirement already satisfied: lxml in /usr/local/lib/python3.7/dist-packages (from pandas-datareader>=0.2->empyrical>=0.5.0->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (4.2.6) Requirement already satisfied: six>=1.9.0 in /usr/local/lib/python3.7/dist-packages (from prompt-toolkit<2.0.0,>=1.0.4->ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (1.15.0) Requirement already satisfied: wcwidth in /usr/local/lib/python3.7/dist-packages (from prompt-toolkit<2.0.0,>=1.0.4->ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (0.2.5) Requirement already satisfied: future in /usr/local/lib/python3.7/dist-packages (from pyglet<=1.5.0,>=1.4.0->gym>=0.17->finrl==0.3.4) (0.16.0) Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests>=2.18.4->ccxt>=1.66.32->finrl==0.3.4) (3.0.4) Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests>=2.18.4->ccxt>=1.66.32->finrl==0.3.4) (1.24.3) Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn>=0.21.0->finrl==0.3.4) (1.1.0) Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from scikit-learn>=0.21.0->finrl==0.3.4) (3.0.0) Collecting msgpack==1.0.2 Downloading msgpack-1.0.2-cp37-cp37m-manylinux1_x86_64.whl (273 kB)  |████████████████████████████████| 273 kB 73.9 MB/s [?25hCollecting websocket-client<2,>=0.56.0 Downloading websocket_client-1.2.3-py3-none-any.whl (53 kB)  |████████████████████████████████| 53 kB 2.8 MB/s [?25hCollecting alpaca_trade_api Downloading alpaca_trade_api-1.4.2-py3-none-any.whl (36 kB) Downloading alpaca_trade_api-1.4.1-py3-none-any.whl (36 kB) Downloading alpaca_trade_api-1.4.0-py3-none-any.whl (34 kB) Downloading alpaca_trade_api-1.3.0-py3-none-any.whl (43 kB)  |████████████████████████████████| 43 kB 2.3 MB/s [?25h Downloading alpaca_trade_api-1.2.3-py3-none-any.whl (40 kB)  |████████████████████████████████| 40 kB 7.3 MB/s [?25hCollecting websockets<10,>=8.0 Downloading websockets-9.1-cp37-cp37m-manylinux2010_x86_64.whl (103 kB)  |████████████████████████████████| 103 kB 77.3 MB/s [?25hCollecting pyluach Downloading pyluach-1.3.0-py3-none-any.whl (17 kB) Requirement already satisfied: toolz in /usr/local/lib/python3.7/dist-packages (from exchange_calendars->finrl==0.3.4) (0.11.2) Requirement already satisfied: korean_lunar_calendar in /usr/local/lib/python3.7/dist-packages (from exchange_calendars->finrl==0.3.4) (0.2.1) Collecting thriftpy2>=0.3.9 Downloading thriftpy2-0.4.14.tar.gz (361 kB)  |████████████████████████████████| 361 kB 78.3 MB/s [?25hCollecting pymysql>=0.7.6 Downloading PyMySQL-1.0.2-py3-none-any.whl (43 kB)  |████████████████████████████████| 43 kB 2.7 MB/s [?25hRequirement already satisfied: SQLAlchemy>=1.2.8 in /usr/local/lib/python3.7/dist-packages (from jqdatasdk->finrl==0.3.4) (1.4.29) Requirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from SQLAlchemy>=1.2.8->jqdatasdk->finrl==0.3.4) (4.10.0) Requirement already satisfied: greenlet!=0.4.17 in /usr/local/lib/python3.7/dist-packages (from SQLAlchemy>=1.2.8->jqdatasdk->finrl==0.3.4) (1.1.2) Collecting ply<4.0,>=3.4 Downloading ply-3.11-py2.py3-none-any.whl (49 kB)  |████████████████████████████████| 49 kB 7.6 MB/s [?25hRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata->SQLAlchemy>=1.2.8->jqdatasdk->finrl==0.3.4) (3.7.0) Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.7/dist-packages (from pexpect->ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (0.7.0) Collecting pyyaml>=5.1 Downloading PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (596 kB)  |████████████████████████████████| 596 kB 66.2 MB/s [?25hCollecting identify>=1.0.0 Downloading identify-2.4.3-py2.py3-none-any.whl (98 kB)  |████████████████████████████████| 98 kB 9.6 MB/s [?25hCollecting nodeenv>=0.11.1 Downloading nodeenv-1.6.0-py2.py3-none-any.whl (21 kB) Collecting virtualenv>=20.0.8 Downloading virtualenv-20.13.0-py2.py3-none-any.whl (6.5 MB)  |████████████████████████████████| 6.5 MB 50.1 MB/s [?25hRequirement already satisfied: toml in /usr/local/lib/python3.7/dist-packages (from pre-commit->finrl==0.3.4) (0.10.2) Collecting cfgv>=2.0.0 Downloading cfgv-3.3.1-py2.py3-none-any.whl (7.3 kB) Collecting platformdirs<3,>=2 Downloading platformdirs-2.4.1-py3-none-any.whl (14 kB) Collecting distlib<1,>=0.3.1 Downloading distlib-0.3.4-py2.py3-none-any.whl (461 kB)  |████████████████████████████████| 461 kB 65.2 MB/s [?25hRequirement already satisfied: filelock<4,>=3.2 in /usr/local/lib/python3.7/dist-packages (from virtualenv>=20.0.8->pre-commit->finrl==0.3.4) (3.4.2) Requirement already satisfied: pluggy<0.8,>=0.5 in /usr/local/lib/python3.7/dist-packages (from pytest->finrl==0.3.4) (0.7.1) Requirement already satisfied: more-itertools>=4.0.0 in /usr/local/lib/python3.7/dist-packages (from pytest->finrl==0.3.4) (8.12.0) Requirement already satisfied: atomicwrites>=1.0 in /usr/local/lib/python3.7/dist-packages (from pytest->finrl==0.3.4) (1.4.0) Requirement already satisfied: py>=1.5.0 in /usr/local/lib/python3.7/dist-packages (from pytest->finrl==0.3.4) (1.11.0) Collecting redis>=3.5.0 Downloading redis-4.1.0-py3-none-any.whl (171 kB)  |████████████████████████████████| 171 kB 77.2 MB/s [?25hRequirement already satisfied: protobuf>=3.15.3 in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.4) (3.17.3) Requirement already satisfied: grpcio>=1.28.1 in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.4) (1.43.0) Requirement already satisfied: click>=7.0 in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.4) (7.1.2) Requirement already satisfied: jsonschema in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.4) (4.3.3) Requirement already satisfied: smart-open in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.4) (5.2.1) Collecting opencensus Downloading opencensus-0.8.0-py2.py3-none-any.whl (128 kB)  |████████████████████████████████| 128 kB 80.3 MB/s [?25hCollecting aiohttp-cors Downloading aiohttp_cors-0.7.0-py3-none-any.whl (27 kB) Collecting colorful Downloading colorful-0.5.4-py2.py3-none-any.whl (201 kB)  |████████████████████████████████| 201 kB 79.8 MB/s [?25hCollecting py-spy>=0.2.0 Downloading py_spy-0.3.11-py2.py3-none-manylinux_2_5_x86_64.manylinux1_x86_64.whl (3.0 MB)  |████████████████████████████████| 3.0 MB 61.5 MB/s [?25hRequirement already satisfied: prometheus-client>=0.7.1 in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.4) (0.12.0) Collecting gpustat>=1.0.0b1 Downloading gpustat-1.0.0b1.tar.gz (82 kB)  |████████████████████████████████| 82 kB 313 kB/s [?25hCollecting aioredis<2 Downloading aioredis-1.3.1-py3-none-any.whl (65 kB)  |████████████████████████████████| 65 kB 4.5 MB/s [?25hCollecting hiredis Downloading hiredis-2.0.0-cp37-cp37m-manylinux2010_x86_64.whl (85 kB)  |████████████████████████████████| 85 kB 4.7 MB/s [?25hRequirement already satisfied: nvidia-ml-py3>=7.352.0 in /usr/local/lib/python3.7/dist-packages (from gpustat>=1.0.0b1->ray[default]->finrl==0.3.4) (7.352.0) Requirement already satisfied: psutil in /usr/local/lib/python3.7/dist-packages (from gpustat>=1.0.0b1->ray[default]->finrl==0.3.4) (5.4.8) Collecting blessed>=1.17.1 Downloading blessed-1.19.0-py2.py3-none-any.whl (57 kB)  |████████████████████████████████| 57 kB 7.1 MB/s [?25hCollecting deprecated>=1.2.3 Downloading Deprecated-1.2.13-py2.py3-none-any.whl (9.6 kB) Requirement already satisfied: packaging>=21.3 in /usr/local/lib/python3.7/dist-packages (from redis>=3.5.0->ray[default]->finrl==0.3.4) (21.3) Requirement already satisfied: wrapt<2,>=1.10 in /usr/local/lib/python3.7/dist-packages (from deprecated>=1.2.3->redis>=3.5.0->ray[default]->finrl==0.3.4) (1.13.3) Requirement already satisfied: pyrsistent!=0.17.0,!=0.17.1,!=0.17.2,>=0.14.0 in /usr/local/lib/python3.7/dist-packages (from jsonschema->ray[default]->finrl==0.3.4) (0.18.0) Requirement already satisfied: importlib-resources>=1.4.0 in /usr/local/lib/python3.7/dist-packages (from jsonschema->ray[default]->finrl==0.3.4) (5.4.0) Requirement already satisfied: google-api-core<3.0.0,>=1.0.0 in /usr/local/lib/python3.7/dist-packages (from opencensus->ray[default]->finrl==0.3.4) (1.26.3) Collecting opencensus-context==0.1.2 Downloading opencensus_context-0.1.2-py2.py3-none-any.whl (4.4 kB) Requirement already satisfied: google-auth<2.0dev,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.4) (1.35.0) Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.4) (1.54.0) Requirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from google-auth<2.0dev,>=1.21.1->google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.4) (4.2.4) Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.7/dist-packages (from google-auth<2.0dev,>=1.21.1->google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.4) (4.8) Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from google-auth<2.0dev,>=1.21.1->google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.4) (0.2.8) Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.7/dist-packages (from pyasn1-modules>=0.2.1->google-auth<2.0dev,>=1.21.1->google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.4) (0.4.8) Requirement already satisfied: tabulate in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.4) (0.8.9) Requirement already satisfied: pillow in /usr/local/lib/python3.7/dist-packages (from stable-baselines3[extra]->finrl==0.3.4) (7.1.2) Requirement already satisfied: tensorboard>=2.2.0 in /usr/local/lib/python3.7/dist-packages (from stable-baselines3[extra]->finrl==0.3.4) (2.7.0) Requirement already satisfied: atari-py~=0.2.0 in /usr/local/lib/python3.7/dist-packages (from stable-baselines3[extra]->finrl==0.3.4) (0.2.9) Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.4) (1.0.1) Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.4) (1.8.1) Requirement already satisfied: tensorboard-data-server<0.7.0,>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.4) (0.6.1) Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.4) (0.4.6) Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.4) (3.3.6) Requirement already satisfied: absl-py>=0.4 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.4) (0.12.0) Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.4) (1.3.0) Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.4) (3.1.1) Collecting psycopg2-binary Downloading psycopg2_binary-2.9.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.0 MB)  |████████████████████████████████| 3.0 MB 30.7 MB/s [?25hCollecting mock Downloading mock-4.0.3-py3-none-any.whl (28 kB) Requirement already satisfied: multitasking>=0.0.7 in /usr/local/lib/python3.7/dist-packages (from yfinance->finrl==0.3.4) (0.0.10) Collecting requests>=2.18.4 Downloading requests-2.27.1-py2.py3-none-any.whl (63 kB)  |████████████████████████████████| 63 kB 2.1 MB/s [?25hCollecting lxml Downloading lxml-4.7.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl (6.4 MB)  |████████████████████████████████| 6.4 MB 65.9 MB/s [?25hBuilding wheels for collected packages: finrl, elegantrl, pyfolio, empyrical, exchange-calendars, gputil, thriftpy2, gpustat Building wheel for finrl (setup.py) ... [?25l[?25hdone Created wheel for finrl: filename=finrl-0.3.4-py3-none-any.whl size=3885434 sha256=849cbf03841a5a022427fa9f806d06b3e94a6b1b24c083a41ea6f47445b938d8 Stored in directory: /tmp/pip-ephem-wheel-cache-pv4yf2kc/wheels/17/ff/bd/1bc602a0352762b0b24041b88536d803ae343ed0a711fcf55e Building wheel for elegantrl (setup.py) ... [?25l[?25hdone Created wheel for elegantrl: filename=elegantrl-0.3.3-py3-none-any.whl size=188472 sha256=8d5083e1d626c2ef812b814b40640e39b5018c862373497d500b55728f0fb8a3 Stored in directory: /tmp/pip-ephem-wheel-cache-pv4yf2kc/wheels/99/85/5e/86cb3a9f47adfca5e248295e93113e1b298d60883126d62c84 Building wheel for pyfolio (setup.py) ... [?25l[?25hdone Created wheel for pyfolio: filename=pyfolio-0.9.2+75.g4b901f6-py3-none-any.whl size=75774 sha256=c4497ace077e94d44cf5ac597e917a70823257583d7dac76152d009c98bda297 Stored in directory: /tmp/pip-ephem-wheel-cache-pv4yf2kc/wheels/ef/09/e5/2c1bf37c050d22557c080deb1be986d06424627c04aeca19b9 Building wheel for empyrical (setup.py) ... [?25l[?25hdone Created wheel for empyrical: filename=empyrical-0.5.5-py3-none-any.whl size=39780 sha256=ae4a57d940f20813f523a30c1de536b1f2531cac23a5de0b0bf37a9d3fa12015 Stored in directory: /root/.cache/pip/wheels/d9/91/4b/654fcff57477efcf149eaca236da2fce991526cbab431bf312 Building wheel for exchange-calendars (setup.py) ... [?25l[?25hdone Created wheel for exchange-calendars: filename=exchange_calendars-3.5-py3-none-any.whl size=179486 sha256=043fd7e4bfa797afe79a9eded1762c9d4543807b9c192f2d0d99df42c3c7b2ce Stored in directory: /root/.cache/pip/wheels/69/21/43/b6ae2605dd767f6cd5a5b0b70c93a9a75823e44b3ccb92bce7 Building wheel for gputil (setup.py) ... [?25l[?25hdone Created wheel for gputil: filename=GPUtil-1.4.0-py3-none-any.whl size=7411 sha256=7a9b6d994aaf299e7a3c73dbb8a98ede27aa9199340077bd7571f3f07d6d425b Stored in directory: /root/.cache/pip/wheels/6e/f8/83/534c52482d6da64622ddbf72cd93c35d2ef2881b78fd08ff0c Building wheel for thriftpy2 (setup.py) ... [?25l[?25hdone Created wheel for thriftpy2: filename=thriftpy2-0.4.14-cp37-cp37m-linux_x86_64.whl size=944197 sha256=e3ccabc57c0d1511e6e87f870d8941748f3af10e1e9b00010db3a0692d48165d Stored in directory: /root/.cache/pip/wheels/2a/f5/49/9c0d851aa64b58db72883cf9393cc824d536bdf13f5c83cff4 Building wheel for gpustat (setup.py) ... [?25l[?25hdone Created wheel for gpustat: filename=gpustat-1.0.0b1-py3-none-any.whl size=15979 sha256=c470a062e2c777d4121d6f451cf0224bc94b2ac7043e66724f194cc1bed87289 Stored in directory: /root/.cache/pip/wheels/1a/16/e2/3e2437fba4c4b6a97a97bd96fce5d14e66cff5c4966fb1cc8c Successfully built finrl elegantrl pyfolio empyrical exchange-calendars gputil thriftpy2 gpustat Installing collected packages: requests, multidict, frozenlist, yarl, lxml, deprecated, asynctest, async-timeout, aiosignal, redis, pyyaml, pycares, ply, platformdirs, opencensus-context, msgpack, hiredis, distlib, blessed, aiohttp, websockets, websocket-client, virtualenv, thriftpy2, tensorboardX, stable-baselines3, ray, pymysql, pyluach, pybullet, py-spy, psycopg2-binary, opencensus, nodeenv, mock, identify, gpustat, empyrical, cryptography, colorful, cfgv, box2d-py, aioredis, aiohttp-cors, aiodns, yfinance, wrds, stockstats, pyfolio, pre-commit, lz4, jqdatasdk, gputil, exchange-calendars, elegantrl, ccxt, alpaca-trade-api, finrl Attempting uninstall: requests Found existing installation: requests 2.23.0 Uninstalling requests-2.23.0: Successfully uninstalled requests-2.23.0 Attempting uninstall: lxml Found existing installation: lxml 4.2.6 Uninstalling lxml-4.2.6: Successfully uninstalled lxml-4.2.6 Attempting uninstall: pyyaml Found existing installation: PyYAML 3.13 Uninstalling PyYAML-3.13: Successfully uninstalled PyYAML-3.13 Attempting uninstall: msgpack Found existing installation: msgpack 1.0.3 Uninstalling msgpack-1.0.3: Successfully uninstalled msgpack-1.0.3 ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. google-colab 1.0.0 requires requests~=2.23.0, but you have requests 2.27.1 which is incompatible. datascience 0.10.6 requires folium==0.2.1, but you have folium 0.8.3 which is incompatible. Successfully installed aiodns-3.0.0 aiohttp-3.8.1 aiohttp-cors-0.7.0 aioredis-1.3.1 aiosignal-1.2.0 alpaca-trade-api-1.2.3 async-timeout-4.0.2 asynctest-0.13.0 blessed-1.19.0 box2d-py-2.3.8 ccxt-1.67.31 cfgv-3.3.1 colorful-0.5.4 cryptography-36.0.1 deprecated-1.2.13 distlib-0.3.4 elegantrl-0.3.3 empyrical-0.5.5 exchange-calendars-3.5 finrl-0.3.4 frozenlist-1.2.0 gpustat-1.0.0b1 gputil-1.4.0 hiredis-2.0.0 identify-2.4.3 jqdatasdk-1.8.10 lxml-4.7.1 lz4-3.1.10 mock-4.0.3 msgpack-1.0.2 multidict-5.2.0 nodeenv-1.6.0 opencensus-0.8.0 opencensus-context-0.1.2 platformdirs-2.4.1 ply-3.11 pre-commit-2.16.0 psycopg2-binary-2.9.3 py-spy-0.3.11 pybullet-3.2.1 pycares-4.1.2 pyfolio-0.9.2+75.g4b901f6 pyluach-1.3.0 pymysql-1.0.2 pyyaml-6.0 ray-1.9.2 redis-4.1.0 requests-2.27.1 stable-baselines3-1.3.0 stockstats-0.4.1 tensorboardX-2.4.1 thriftpy2-0.4.14 virtualenv-20.13.0 websocket-client-1.2.3 websockets-9.1 wrds-3.1.1 yarl-1.7.2 yfinance-0.1.69 ###Markdown 2.2. Check if the additional packages needed are present, if not install them. * Yahoo Finance API* pandas* numpy* matplotlib* stockstats* OpenAI gym* stable-baselines* tensorflow* pyfolio 2.3. Import Packages ###Code import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt # matplotlib.use('Agg') import datetime %matplotlib inline from finrl import config from finrl import config_tickers from finrl.finrl_meta.preprocessor.yahoodownloader import YahooDownloader from finrl.finrl_meta.preprocessor.preprocessors import FeatureEngineer, data_split from finrl.finrl_meta.env_stock_trading.env_stocktrading import StockTradingEnv from finrl.agents.stablebaselines3.models import DRLAgent from finrl.plot import backtest_stats, backtest_plot, get_daily_return, get_baseline from pprint import pprint import sys sys.path.append("../FinRL-Library") import itertools ###Output /usr/local/lib/python3.7/dist-packages/pyfolio/pos.py:27: UserWarning: Module "zipline.assets" not found; multipliers will not be applied to position notionals. 'Module "zipline.assets" not found; multipliers will not be applied' ###Markdown 2.4. Create Folders ###Code import os if not os.path.exists("./" + config.DATA_SAVE_DIR): os.makedirs("./" + config.DATA_SAVE_DIR) if not os.path.exists("./" + config.TRAINED_MODEL_DIR): os.makedirs("./" + config.TRAINED_MODEL_DIR) if not os.path.exists("./" + config.TENSORBOARD_LOG_DIR): os.makedirs("./" + config.TENSORBOARD_LOG_DIR) if not os.path.exists("./" + config.RESULTS_DIR): os.makedirs("./" + config.RESULTS_DIR) ###Output _____no_output_____ ###Markdown Part 3. Download Stock Data from Yahoo FinanceYahoo Finance is a website that provides stock data, financial news, financial reports, etc. All the data provided by Yahoo Finance is free.* FinRL uses a class **YahooDownloader** to fetch data from Yahoo Finance API* Call Limit: Using the Public API (without authentication), you are limited to 2,000 requests per hour per IP (or up to a total of 48,000 requests a day). -----class YahooDownloader: Provides methods for retrieving daily stock data from Yahoo Finance API Attributes ---------- start_date : str start date of the data (modified from config.py) end_date : str end date of the data (modified from config.py) ticker_list : list a list of stock tickers (modified from config.py) Methods ------- fetch_data() Fetches data from yahoo API ###Code # from config.py start_date is a string config.START_DATE # from config.py end_date is a string config.END_DATE print(config_tickers.DOW_30_TICKER) df = YahooDownloader(start_date = '2009-01-01', end_date = '2021-01-01', ticker_list = config_tickers.DOW_30_TICKER).fetch_data() df.shape df.head() df['date'] = pd.to_datetime(df['date'],format='%Y-%m-%d') df.sort_values(['date','tic'],ignore_index=True).head() ###Output _____no_output_____ ###Markdown Part 4: Preprocess fundamental data- Import finanical data downloaded from Compustat via WRDS(Wharton Research Data Service)- Preprocess the dataset and calculate financial ratios- Add those ratios to the price data preprocessed in Part 3- Calculate price-related ratios such as P/E and P/B 4-1 Import the financial data ###Code # Import fundamental data from my GitHub repository url = 'https://raw.githubusercontent.com/mariko-sawada/FinRL_with_fundamental_data/main/dow_30_fundamental_wrds.csv' fund = pd.read_csv(url) # Check the imported dataset fund.head() ###Output _____no_output_____ ###Markdown 4-2 Specify items needed to calculate financial ratios- To know more about the data description of the dataset, please check WRDS's website(https://wrds-www.wharton.upenn.edu/). Login will be required. ###Code # List items that are used to calculate financial ratios items = [ 'datadate', # Date 'tic', # Ticker 'oiadpq', # Quarterly operating income 'revtq', # Quartely revenue 'niq', # Quartely net income 'atq', # Total asset 'teqq', # Shareholder's equity 'epspiy', # EPS(Basic) incl. Extraordinary items 'ceqq', # Common Equity 'cshoq', # Common Shares Outstanding 'dvpspq', # Dividends per share 'actq', # Current assets 'lctq', # Current liabilities 'cheq', # Cash & Equivalent 'rectq', # Recievalbles 'cogsq', # Cost of Goods Sold 'invtq', # Inventories 'apq',# Account payable 'dlttq', # Long term debt 'dlcq', # Debt in current liabilites 'ltq' # Liabilities ] # Omit items that will not be used fund_data = fund[items] # Rename column names for the sake of readability fund_data = fund_data.rename(columns={ 'datadate':'date', # Date 'oiadpq':'op_inc_q', # Quarterly operating income 'revtq':'rev_q', # Quartely revenue 'niq':'net_inc_q', # Quartely net income 'atq':'tot_assets', # Assets 'teqq':'sh_equity', # Shareholder's equity 'epspiy':'eps_incl_ex', # EPS(Basic) incl. Extraordinary items 'ceqq':'com_eq', # Common Equity 'cshoq':'sh_outstanding', # Common Shares Outstanding 'dvpspq':'div_per_sh', # Dividends per share 'actq':'cur_assets', # Current assets 'lctq':'cur_liabilities', # Current liabilities 'cheq':'cash_eq', # Cash & Equivalent 'rectq':'receivables', # Receivalbles 'cogsq':'cogs_q', # Cost of Goods Sold 'invtq':'inventories', # Inventories 'apq': 'payables',# Account payable 'dlttq':'long_debt', # Long term debt 'dlcq':'short_debt', # Debt in current liabilites 'ltq':'tot_liabilities' # Liabilities }) # Check the data fund_data.head() ###Output _____no_output_____ ###Markdown 4-3 Calculate financial ratios- For items from Profit/Loss statements, we calculate LTM (Last Twelve Months) and use them to derive profitability related ratios such as Operating Maring and ROE. For items from balance sheets, we use the numbers on the day.- To check the definitions of the financial ratios calculated here, please refer to CFI's website: https://corporatefinanceinstitute.com/resources/knowledge/finance/financial-ratios/ ###Code # Calculate financial ratios date = pd.to_datetime(fund_data['date'],format='%Y%m%d') tic = fund_data['tic'].to_frame('tic') # Profitability ratios # Operating Margin OPM = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='OPM') for i in range(0, fund_data.shape[0]): if i-3 < 0: OPM[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: OPM.iloc[i] = np.nan else: OPM.iloc[i] = np.sum(fund_data['op_inc_q'].iloc[i-3:i])/np.sum(fund_data['rev_q'].iloc[i-3:i]) # Net Profit Margin NPM = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='NPM') for i in range(0, fund_data.shape[0]): if i-3 < 0: NPM[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: NPM.iloc[i] = np.nan else: NPM.iloc[i] = np.sum(fund_data['net_inc_q'].iloc[i-3:i])/np.sum(fund_data['rev_q'].iloc[i-3:i]) # Return On Assets ROA = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='ROA') for i in range(0, fund_data.shape[0]): if i-3 < 0: ROA[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: ROA.iloc[i] = np.nan else: ROA.iloc[i] = np.sum(fund_data['net_inc_q'].iloc[i-3:i])/fund_data['tot_assets'].iloc[i] # Return on Equity ROE = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='ROE') for i in range(0, fund_data.shape[0]): if i-3 < 0: ROE[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: ROE.iloc[i] = np.nan else: ROE.iloc[i] = np.sum(fund_data['net_inc_q'].iloc[i-3:i])/fund_data['sh_equity'].iloc[i] # For calculating valuation ratios in the next subpart, calculate per share items in advance # Earnings Per Share EPS = fund_data['eps_incl_ex'].to_frame('EPS') # Book Per Share BPS = (fund_data['com_eq']/fund_data['sh_outstanding']).to_frame('BPS') # Need to check units #Dividend Per Share DPS = fund_data['div_per_sh'].to_frame('DPS') # Liquidity ratios # Current ratio cur_ratio = (fund_data['cur_assets']/fund_data['cur_liabilities']).to_frame('cur_ratio') # Quick ratio quick_ratio = ((fund_data['cash_eq'] + fund_data['receivables'] )/fund_data['cur_liabilities']).to_frame('quick_ratio') # Cash ratio cash_ratio = (fund_data['cash_eq']/fund_data['cur_liabilities']).to_frame('cash_ratio') # Efficiency ratios # Inventory turnover ratio inv_turnover = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='inv_turnover') for i in range(0, fund_data.shape[0]): if i-3 < 0: inv_turnover[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: inv_turnover.iloc[i] = np.nan else: inv_turnover.iloc[i] = np.sum(fund_data['cogs_q'].iloc[i-3:i])/fund_data['inventories'].iloc[i] # Receivables turnover ratio acc_rec_turnover = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='acc_rec_turnover') for i in range(0, fund_data.shape[0]): if i-3 < 0: acc_rec_turnover[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: acc_rec_turnover.iloc[i] = np.nan else: acc_rec_turnover.iloc[i] = np.sum(fund_data['rev_q'].iloc[i-3:i])/fund_data['receivables'].iloc[i] # Payable turnover ratio acc_pay_turnover = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='acc_pay_turnover') for i in range(0, fund_data.shape[0]): if i-3 < 0: acc_pay_turnover[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: acc_pay_turnover.iloc[i] = np.nan else: acc_pay_turnover.iloc[i] = np.sum(fund_data['cogs_q'].iloc[i-3:i])/fund_data['payables'].iloc[i] ## Leverage financial ratios # Debt ratio debt_ratio = (fund_data['tot_liabilities']/fund_data['tot_assets']).to_frame('debt_ratio') # Debt to Equity ratio debt_to_equity = (fund_data['tot_liabilities']/fund_data['sh_equity']).to_frame('debt_to_equity') # Create a dataframe that merges all the ratios ratios = pd.concat([date,tic,OPM,NPM,ROA,ROE,EPS,BPS,DPS, cur_ratio,quick_ratio,cash_ratio,inv_turnover,acc_rec_turnover,acc_pay_turnover, debt_ratio,debt_to_equity], axis=1) # Check the ratio data ratios.head() ratios.tail() ###Output _____no_output_____ ###Markdown 4-4 Deal with NAs and infinite values- We replace N/A and infinite values with zero so that they can be recognized as a state ###Code # Replace NAs infinite values with zero final_ratios = ratios.copy() final_ratios = final_ratios.fillna(0) final_ratios = final_ratios.replace(np.inf,0) final_ratios.head() final_ratios.tail() ###Output _____no_output_____ ###Markdown 4-5 Merge stock price data and ratios into one dataframe- Merge the price dataframe preprocessed in Part 3 and the ratio dataframe created in this part- Since the prices are daily and ratios are quartely, we have NAs in the ratio columns after merging the two dataframes. We deal with this by backfilling the ratios. ###Code list_ticker = df["tic"].unique().tolist() list_date = list(pd.date_range(df['date'].min(),df['date'].max())) combination = list(itertools.product(list_date,list_ticker)) # Merge stock price data and ratios into one dataframe processed_full = pd.DataFrame(combination,columns=["date","tic"]).merge(df,on=["date","tic"],how="left") processed_full = processed_full.merge(final_ratios,how='left',on=['date','tic']) processed_full = processed_full.sort_values(['tic','date']) # Backfill the ratio data to make them daily processed_full = processed_full.bfill(axis='rows') ###Output _____no_output_____ ###Markdown 4-6 Calculate market valuation ratios using daily stock price data ###Code # Calculate P/E, P/B and dividend yield using daily closing price processed_full['PE'] = processed_full['close']/processed_full['EPS'] processed_full['PB'] = processed_full['close']/processed_full['BPS'] processed_full['Div_yield'] = processed_full['DPS']/processed_full['close'] # Drop per share items used for the above calculation processed_full = processed_full.drop(columns=['day','EPS','BPS','DPS']) # Replace NAs infinite values with zero processed_full = processed_full.copy() processed_full = processed_full.fillna(0) processed_full = processed_full.replace(np.inf,0) # Check the final data processed_full.sort_values(['date','tic'],ignore_index=True).head(10) ###Output _____no_output_____ ###Markdown Part 5. Design EnvironmentConsidering the stochastic and interactive nature of the automated stock trading tasks, a financial task is modeled as a **Markov Decision Process (MDP)** problem. The training process involves observing stock price change, taking an action and reward's calculation to have the agent adjusting its strategy accordingly. By interacting with the environment, the trading agent will derive a trading strategy with the maximized rewards as time proceeds.Our trading environments, based on OpenAI Gym framework, simulate live stock markets with real market data according to the principle of time-driven simulation.The action space describes the allowed actions that the agent interacts with the environment. Normally, action a includes three actions: {-1, 0, 1}, where -1, 0, 1 represent selling, holding, and buying one share. Also, an action can be carried upon multiple shares. We use an action space {-k,…,-1, 0, 1, …, k}, where k denotes the number of shares to buy and -k denotes the number of shares to sell. For example, "Buy 10 shares of AAPL" or "Sell 10 shares of AAPL" are 10 or -10, respectively. The continuous action space needs to be normalized to [-1, 1], since the policy is defined on a Gaussian distribution, which needs to be normalized and symmetric. 5-1 Split data into training and trade dataset- Training data split: 2009-01-01 to 2018-12-31- Trade data split: 2019-01-01 to 2020-09-30 ###Code train = data_split(processed_full, '2009-01-01','2019-01-01') trade = data_split(processed_full, '2019-01-01','2021-01-01') # Check the length of the two datasets print(len(train)) print(len(trade)) train.head() trade.head() ###Output _____no_output_____ ###Markdown 5-2 Set up the training environment ###Code import gym import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd from gym import spaces from gym.utils import seeding from stable_baselines3.common.vec_env import DummyVecEnv matplotlib.use("Agg") # from stable_baselines3.common import logger class StockTradingEnv(gym.Env): """A stock trading environment for OpenAI gym""" metadata = {"render.modes": ["human"]} def __init__( self, df, stock_dim, hmax, initial_amount, buy_cost_pct, sell_cost_pct, reward_scaling, state_space, action_space, tech_indicator_list, turbulence_threshold=None, risk_indicator_col="turbulence", make_plots=False, print_verbosity=10, day=0, initial=True, previous_state=[], model_name="", mode="", iteration="", ): self.day = day self.df = df self.stock_dim = stock_dim self.hmax = hmax self.initial_amount = initial_amount self.buy_cost_pct = buy_cost_pct self.sell_cost_pct = sell_cost_pct self.reward_scaling = reward_scaling self.state_space = state_space self.action_space = action_space self.tech_indicator_list = tech_indicator_list self.action_space = spaces.Box(low=-1, high=1, shape=(self.action_space,)) self.observation_space = spaces.Box( low=-np.inf, high=np.inf, shape=(self.state_space,) ) self.data = self.df.loc[self.day, :] self.terminal = False self.make_plots = make_plots self.print_verbosity = print_verbosity self.turbulence_threshold = turbulence_threshold self.risk_indicator_col = risk_indicator_col self.initial = initial self.previous_state = previous_state self.model_name = model_name self.mode = mode self.iteration = iteration # initalize state self.state = self._initiate_state() # initialize reward self.reward = 0 self.turbulence = 0 self.cost = 0 self.trades = 0 self.episode = 0 # memorize all the total balance change self.asset_memory = [self.initial_amount] self.rewards_memory = [] self.actions_memory = [] self.date_memory = [self._get_date()] # self.reset() self._seed() def _sell_stock(self, index, action): def _do_sell_normal(): if self.state[index + 1] > 0: # Sell only if the price is > 0 (no missing data in this particular date) # perform sell action based on the sign of the action if self.state[index + self.stock_dim + 1] > 0: # Sell only if current asset is > 0 sell_num_shares = min( abs(action), self.state[index + self.stock_dim + 1] ) sell_amount = ( self.state[index + 1] * sell_num_shares * (1 - self.sell_cost_pct) ) # update balance self.state[0] += sell_amount self.state[index + self.stock_dim + 1] -= sell_num_shares self.cost += ( self.state[index + 1] * sell_num_shares * self.sell_cost_pct ) self.trades += 1 else: sell_num_shares = 0 else: sell_num_shares = 0 return sell_num_shares # perform sell action based on the sign of the action if self.turbulence_threshold is not None: if self.turbulence >= self.turbulence_threshold: if self.state[index + 1] > 0: # Sell only if the price is > 0 (no missing data in this particular date) # if turbulence goes over threshold, just clear out all positions if self.state[index + self.stock_dim + 1] > 0: # Sell only if current asset is > 0 sell_num_shares = self.state[index + self.stock_dim + 1] sell_amount = ( self.state[index + 1] * sell_num_shares * (1 - self.sell_cost_pct) ) # update balance self.state[0] += sell_amount self.state[index + self.stock_dim + 1] = 0 self.cost += ( self.state[index + 1] * sell_num_shares * self.sell_cost_pct ) self.trades += 1 else: sell_num_shares = 0 else: sell_num_shares = 0 else: sell_num_shares = _do_sell_normal() else: sell_num_shares = _do_sell_normal() return sell_num_shares def _buy_stock(self, index, action): def _do_buy(): if self.state[index + 1] > 0: # Buy only if the price is > 0 (no missing data in this particular date) available_amount = self.state[0] // self.state[index + 1] # print('available_amount:{}'.format(available_amount)) # update balance buy_num_shares = min(available_amount, action) buy_amount = ( self.state[index + 1] * buy_num_shares * (1 + self.buy_cost_pct) ) self.state[0] -= buy_amount self.state[index + self.stock_dim + 1] += buy_num_shares self.cost += self.state[index + 1] * buy_num_shares * self.buy_cost_pct self.trades += 1 else: buy_num_shares = 0 return buy_num_shares # perform buy action based on the sign of the action if self.turbulence_threshold is None: buy_num_shares = _do_buy() else: if self.turbulence < self.turbulence_threshold: buy_num_shares = _do_buy() else: buy_num_shares = 0 pass return buy_num_shares def _make_plot(self): plt.plot(self.asset_memory, "r") plt.savefig("results/account_value_trade_{}.png".format(self.episode)) plt.close() def step(self, actions): self.terminal = self.day >= len(self.df.index.unique()) - 1 if self.terminal: # print(f"Episode: {self.episode}") if self.make_plots: self._make_plot() end_total_asset = self.state[0] + sum( np.array(self.state[1 : (self.stock_dim + 1)]) * np.array(self.state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)]) ) df_total_value = pd.DataFrame(self.asset_memory) tot_reward = ( self.state[0] + sum( np.array(self.state[1 : (self.stock_dim + 1)]) * np.array( self.state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)] ) ) - self.initial_amount ) df_total_value.columns = ["account_value"] df_total_value["date"] = self.date_memory df_total_value["daily_return"] = df_total_value["account_value"].pct_change( 1 ) if df_total_value["daily_return"].std() != 0: sharpe = ( (252 ** 0.5) * df_total_value["daily_return"].mean() / df_total_value["daily_return"].std() ) df_rewards = pd.DataFrame(self.rewards_memory) df_rewards.columns = ["account_rewards"] df_rewards["date"] = self.date_memory[:-1] if self.episode % self.print_verbosity == 0: print(f"day: {self.day}, episode: {self.episode}") print(f"begin_total_asset: {self.asset_memory[0]:0.2f}") print(f"end_total_asset: {end_total_asset:0.2f}") print(f"total_reward: {tot_reward:0.2f}") print(f"total_cost: {self.cost:0.2f}") print(f"total_trades: {self.trades}") if df_total_value["daily_return"].std() != 0: print(f"Sharpe: {sharpe:0.3f}") print("=================================") if (self.model_name != "") and (self.mode != ""): df_actions = self.save_action_memory() df_actions.to_csv( "results/actions_{}_{}_{}.csv".format( self.mode, self.model_name, self.iteration ) ) df_total_value.to_csv( "results/account_value_{}_{}_{}.csv".format( self.mode, self.model_name, self.iteration ), index=False, ) df_rewards.to_csv( "results/account_rewards_{}_{}_{}.csv".format( self.mode, self.model_name, self.iteration ), index=False, ) plt.plot(self.asset_memory, "r") plt.savefig( "results/account_value_{}_{}_{}.png".format( self.mode, self.model_name, self.iteration ), index=False, ) plt.close() # Add outputs to logger interface # logger.record("environment/portfolio_value", end_total_asset) # logger.record("environment/total_reward", tot_reward) # logger.record("environment/total_reward_pct", (tot_reward / (end_total_asset - tot_reward)) * 100) # logger.record("environment/total_cost", self.cost) # logger.record("environment/total_trades", self.trades) return self.state, self.reward, self.terminal, {} else: actions = actions * self.hmax # actions initially is scaled between 0 to 1 actions = actions.astype( int ) # convert into integer because we can't by fraction of shares if self.turbulence_threshold is not None: if self.turbulence >= self.turbulence_threshold: actions = np.array([-self.hmax] * self.stock_dim) begin_total_asset = self.state[0] + sum( np.array(self.state[1 : (self.stock_dim + 1)]) * np.array(self.state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)]) ) # print("begin_total_asset:{}".format(begin_total_asset)) argsort_actions = np.argsort(actions) sell_index = argsort_actions[: np.where(actions < 0)[0].shape[0]] buy_index = argsort_actions[::-1][: np.where(actions > 0)[0].shape[0]] for index in sell_index: # print(f"Num shares before: {self.state[index+self.stock_dim+1]}") # print(f'take sell action before : {actions[index]}') actions[index] = self._sell_stock(index, actions[index]) * (-1) # print(f'take sell action after : {actions[index]}') # print(f"Num shares after: {self.state[index+self.stock_dim+1]}") for index in buy_index: # print('take buy action: {}'.format(actions[index])) actions[index] = self._buy_stock(index, actions[index]) self.actions_memory.append(actions) # state: s -> s+1 self.day += 1 self.data = self.df.loc[self.day, :] if self.turbulence_threshold is not None: if len(self.df.tic.unique()) == 1: self.turbulence = self.data[self.risk_indicator_col] elif len(self.df.tic.unique()) > 1: self.turbulence = self.data[self.risk_indicator_col].values[0] self.state = self._update_state() end_total_asset = self.state[0] + sum( np.array(self.state[1 : (self.stock_dim + 1)]) * np.array(self.state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)]) ) self.asset_memory.append(end_total_asset) self.date_memory.append(self._get_date()) self.reward = end_total_asset - begin_total_asset self.rewards_memory.append(self.reward) self.reward = self.reward * self.reward_scaling return self.state, self.reward, self.terminal, {} def reset(self): # initiate state self.state = self._initiate_state() if self.initial: self.asset_memory = [self.initial_amount] else: previous_total_asset = self.previous_state[0] + sum( np.array(self.state[1 : (self.stock_dim + 1)]) * np.array( self.previous_state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)] ) ) self.asset_memory = [previous_total_asset] self.day = 0 self.data = self.df.loc[self.day, :] self.turbulence = 0 self.cost = 0 self.trades = 0 self.terminal = False # self.iteration=self.iteration self.rewards_memory = [] self.actions_memory = [] self.date_memory = [self._get_date()] self.episode += 1 return self.state def render(self, mode="human", close=False): return self.state def _initiate_state(self): if self.initial: # For Initial State if len(self.df.tic.unique()) > 1: # for multiple stock state = ( [self.initial_amount] + self.data.close.values.tolist() + [0] * self.stock_dim + sum( [ self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [], ) ) else: # for single stock state = ( [self.initial_amount] + [self.data.close] + [0] * self.stock_dim + sum([[self.data[tech]] for tech in self.tech_indicator_list], []) ) else: # Using Previous State if len(self.df.tic.unique()) > 1: # for multiple stock state = ( [self.previous_state[0]] + self.data.close.values.tolist() + self.previous_state[ (self.stock_dim + 1) : (self.stock_dim * 2 + 1) ] + sum( [ self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [], ) ) else: # for single stock state = ( [self.previous_state[0]] + [self.data.close] + self.previous_state[ (self.stock_dim + 1) : (self.stock_dim * 2 + 1) ] + sum([[self.data[tech]] for tech in self.tech_indicator_list], []) ) return state def _update_state(self): if len(self.df.tic.unique()) > 1: # for multiple stock state = ( [self.state[0]] + self.data.close.values.tolist() + list(self.state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)]) + sum( [ self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [], ) ) else: # for single stock state = ( [self.state[0]] + [self.data.close] + list(self.state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)]) + sum([[self.data[tech]] for tech in self.tech_indicator_list], []) ) return state def _get_date(self): if len(self.df.tic.unique()) > 1: date = self.data.date.unique()[0] else: date = self.data.date return date def save_asset_memory(self): date_list = self.date_memory asset_list = self.asset_memory # print(len(date_list)) # print(len(asset_list)) df_account_value = pd.DataFrame( {"date": date_list, "account_value": asset_list} ) return df_account_value def save_action_memory(self): if len(self.df.tic.unique()) > 1: # date and close price length must match actions length date_list = self.date_memory[:-1] df_date = pd.DataFrame(date_list) df_date.columns = ["date"] action_list = self.actions_memory df_actions = pd.DataFrame(action_list) df_actions.columns = self.data.tic.values df_actions.index = df_date.date # df_actions = pd.DataFrame({'date':date_list,'actions':action_list}) else: date_list = self.date_memory[:-1] action_list = self.actions_memory df_actions = pd.DataFrame({"date": date_list, "actions": action_list}) return df_actions def _seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] def get_sb_env(self): e = DummyVecEnv([lambda: self]) obs = e.reset() return e, obs ratio_list = ['OPM', 'NPM','ROA', 'ROE', 'cur_ratio', 'quick_ratio', 'cash_ratio', 'inv_turnover','acc_rec_turnover', 'acc_pay_turnover', 'debt_ratio', 'debt_to_equity', 'PE', 'PB', 'Div_yield'] stock_dimension = len(train.tic.unique()) state_space = 1 + 2*stock_dimension + len(ratio_list)*stock_dimension print(f"Stock Dimension: {stock_dimension}, State Space: {state_space}") # Parameters for the environment env_kwargs = { "hmax": 100, "initial_amount": 1000000, "buy_cost_pct": 0.001, "sell_cost_pct": 0.001, "state_space": state_space, "stock_dim": stock_dimension, "tech_indicator_list": ratio_list, "action_space": stock_dimension, "reward_scaling": 1e-4 } #Establish the training environment using StockTradingEnv() class e_train_gym = StockTradingEnv(df = train, **env_kwargs) ###Output _____no_output_____ ###Markdown Environment for Training ###Code env_train, _ = e_train_gym.get_sb_env() print(type(env_train)) ###Output <class 'stable_baselines3.common.vec_env.dummy_vec_env.DummyVecEnv'> ###Markdown Part 6: Implement DRL Algorithms* The implementation of the DRL algorithms are based on **OpenAI Baselines** and **Stable Baselines**. Stable Baselines is a fork of OpenAI Baselines, with a major structural refactoring, and code cleanups.* FinRL library includes fine-tuned standard DRL algorithms, such as DQN, DDPG,Multi-Agent DDPG, PPO, SAC, A2C and TD3. We also allow users todesign their own DRL algorithms by adapting these DRL algorithms. ###Code # Set up the agent using DRLAgent() class using the environment created in the previous part agent = DRLAgent(env = env_train) ###Output _____no_output_____ ###Markdown Model Training: 5 models, A2C DDPG, PPO, TD3, SAC Model 1: A2C ###Code agent = DRLAgent(env = env_train) model_a2c = agent.get_model("a2c") trained_a2c = agent.train_model(model=model_a2c, tb_log_name='a2c', total_timesteps=100000) ###Output ----------------------------------------- | time/ | | | fps | 85 | | iterations | 100 | | time_elapsed | 5 | | total_timesteps | 500 | | train/ | | | entropy_loss | -42.7 | | explained_variance | 0.00025 | | learning_rate | 0.0007 | | n_updates | 99 | | policy_loss | 72.9 | | reward | -0.0017323004 | | std | 1 | | value_loss | 5 | ----------------------------------------- ------------------------------------- | time/ | | | fps | 86 | | iterations | 200 | | time_elapsed | 11 | | total_timesteps | 1000 | | train/ | | | entropy_loss | -42.7 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 199 | | policy_loss | 33.4 | | reward | 1.0503633 | | std | 1 | | value_loss | 4.55 | ------------------------------------- ----------------------------------------- | time/ | | | fps | 87 | | iterations | 300 | | time_elapsed | 17 | | total_timesteps | 1500 | | train/ | | | entropy_loss | -42.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 299 | | policy_loss | 19.1 | | reward | -0.0030366322 | | std | 1.01 | | value_loss | 0.988 | ----------------------------------------- ------------------------------------- | time/ | | | fps | 87 | | iterations | 400 | | time_elapsed | 22 | | total_timesteps | 2000 | | train/ | | | entropy_loss | -42.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 399 | | policy_loss | 29.4 | | reward | 1.5995015 | | std | 1.01 | | value_loss | 1.41 | ------------------------------------- ###Markdown Model 2: DDPG ###Code agent = DRLAgent(env = env_train) model_ddpg = agent.get_model("ddpg") trained_ddpg = agent.train_model(model=model_ddpg, tb_log_name='ddpg', total_timesteps=50000) ###Output _____no_output_____ ###Markdown Model 3: PPO ###Code agent = DRLAgent(env = env_train) PPO_PARAMS = { "n_steps": 2048, "ent_coef": 0.01, "learning_rate": 0.00025, "batch_size": 128, } model_ppo = agent.get_model("ppo",model_kwargs = PPO_PARAMS) trained_ppo = agent.train_model(model=model_ppo, tb_log_name='ppo', total_timesteps=50000) ###Output _____no_output_____ ###Markdown Model 4: TD3 ###Code agent = DRLAgent(env = env_train) TD3_PARAMS = {"batch_size": 100, "buffer_size": 1000000, "learning_rate": 0.001} model_td3 = agent.get_model("td3",model_kwargs = TD3_PARAMS) trained_td3 = agent.train_model(model=model_td3, tb_log_name='td3', total_timesteps=30000) ###Output _____no_output_____ ###Markdown Model 5: SAC ###Code agent = DRLAgent(env = env_train) SAC_PARAMS = { "batch_size": 128, "buffer_size": 1000000, "learning_rate": 0.0001, "learning_starts": 100, "ent_coef": "auto_0.1", } model_sac = agent.get_model("sac",model_kwargs = SAC_PARAMS) trained_sac = agent.train_model(model=model_sac, tb_log_name='sac', total_timesteps=80000) ###Output _____no_output_____ ###Markdown TradingAssume that we have $1,000,000 initial capital at 2019-01-01. We use the DDPG model to trade Dow jones 30 stocks. TradeDRL model needs to update periodically in order to take full advantage of the data, ideally we need to retrain our model yearly, quarterly, or monthly. We also need to tune the parameters along the way, in this notebook I only use the in-sample data from 2009-01 to 2018-12 to tune the parameters once, so there is some alpha decay here as the length of trade date extends. Numerous hyperparameters – e.g. the learning rate, the total number of samples to train on – influence the learning process and are usually determined by testing some variations. ###Code trade = data_split(processed_full, '2019-01-01','2021-01-01') e_trade_gym = StockTradingEnv(df = trade, **env_kwargs) # env_trade, obs_trade = e_trade_gym.get_sb_env() trade.head() df_account_value, df_actions = DRLAgent.DRL_prediction( model=trained_ddpg, environment = e_trade_gym) df_account_value.shape df_account_value.tail() df_actions.head() ###Output _____no_output_____ ###Markdown Part 7: Backtest Our StrategyBacktesting plays a key role in evaluating the performance of a trading strategy. Automated backtesting tool is preferred because it reduces the human error. We usually use the Quantopian pyfolio package to backtest our trading strategies. It is easy to use and consists of various individual plots that provide a comprehensive image of the performance of a trading strategy. 7.1 BackTestStatspass in df_account_value, this information is stored in env class ###Code print("==============Get Backtest Results===========") now = datetime.datetime.now().strftime('%Y%m%d-%Hh%M') perf_stats_all = backtest_stats(account_value=df_account_value) perf_stats_all = pd.DataFrame(perf_stats_all) perf_stats_all.to_csv("./"+config.RESULTS_DIR+"/perf_stats_all_"+now+'.csv') #baseline stats print("==============Get Baseline Stats===========") baseline_df = get_baseline( ticker="^DJI", start = '2019-01-01', end = '2021-01-01') stats = backtest_stats(baseline_df, value_col_name = 'close') ###Output _____no_output_____ ###Markdown 7.2 BackTestPlot ###Code print("==============Compare to DJIA===========") %matplotlib inline # S&P 500: ^GSPC # Dow Jones Index: ^DJI # NASDAQ 100: ^NDX backtest_plot(df_account_value, baseline_ticker = '^DJI', baseline_start = '2019-01-01', baseline_end = '2021-01-01') ###Output _____no_output_____ ###Markdown Automated stock trading using FinRL with financial dataTrained a Deep Reinforcement Learning model using FinRL and companies' financial ratio, and then backtested the model to examine how well-trained the model is* This Google Colabolatory notebook is based on the tutorial of FinRL: https://towardsdatascience.com/finrl-for-quantitative-finance-tutorial-for-multiple-stock-trading-7b00763b7530* This project is a final project of the almuni-mentored research project at Columbia University, Application of Reinforcement Learning to Finance, mentored by Bruce Yang from AI4Finance.* For more detailed explanation, please check out my Medium post: https://medium.com/@mariko.sawada1/automated-stock-trading-with-deep-reinforcement-learning-and-financial-data-a63286ccbe2b Content * [1. Problem Definition](0)* [2. Getting Started - Load Python packages](1) * [2.1. Install Packages](1.1) * [2.2. Check Additional Packages](1.2) * [2.3. Import Packages](1.3) * [2.4. Create Folders](1.4)* [3. Download Data](2)* [4. Preprocess fundamental Data](3) * [4-1 Import financial data](3.1) * [4-2 Specify items needed to calculate financial ratios](3.2) * [4-3 Calculate financial ratios](3.3) * [4-4 Deal with NAs and infinite values](3.4) * [4-5 Merge stock price data and ratios into one dataframe](3.5) * [4-6 Calculate market valuation ratios using daily stock price data](3.6)* [5.Build Environment](4) * [5.1. Training & Trade Data Split](4.1) * [5.2. User-defined Environment](4.2) * [5.3. Initialize Environment](4.3) * [6.Implement DRL Algorithms](5) * [7.Backtesting Performance](6) * [7.1. BackTestStats](6.1) * [7.2. BackTestPlot](6.2) * [7.3. Baseline Stats](6.3) * [7.3. Compare to Stock Market Index](6.4) Part 1. Problem Definition This problem is to design an automated trading solution for single stock trading. We model the stock trading process as a Markov Decision Process (MDP). We then formulate our trading goal as a maximization problem.The algorithm is trained using Deep Reinforcement Learning (DRL) algorithms and the components of the reinforcement learning environment are:* Action: The action space describes the allowed actions that the agent interacts with theenvironment. Normally, a ∈ A includes three actions: a ∈ {−1, 0, 1}, where −1, 0, 1 representselling, holding, and buying one stock. Also, an action can be carried upon multiple shares. We usean action space {−k, ..., −1, 0, 1, ..., k}, where k denotes the number of shares. For example, "Buy10 shares of AAPL" or "Sell 10 shares of AAPL" are 10 or −10, respectively* Reward function: r(s, a, s′) is the incentive mechanism for an agent to learn a better action. The change of the portfolio value when action a is taken at state s and arriving at new state s', i.e., r(s, a, s′) = v′ − v, where v′ and v represent the portfoliovalues at state s′ and s, respectively* State: The state space describes the observations that the agent receives from the environment. Just as a human trader needs to analyze various information before executing a trade, soour trading agent observes many different features to better learn in an interactive environment.* Environment: Dow 30 consituentsThe data of the single stock that we will be using for this case study is obtained from Yahoo Finance API. The data contains Open-High-Low-Close price and volume. Part 2. Load Python Packages 2.1. Install all the packages through FinRL library ###Code ## install finrl library !pip install git+https://github.com/AI4Finance-LLC/FinRL-Library.git ###Output Collecting git+https://github.com/AI4Finance-LLC/FinRL-Library.git Cloning https://github.com/AI4Finance-LLC/FinRL-Library.git to /tmp/pip-req-build-avwct7pb Running command git clone -q https://github.com/AI4Finance-LLC/FinRL-Library.git /tmp/pip-req-build-avwct7pb Collecting pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2 Cloning https://github.com/quantopian/pyfolio.git to /tmp/pip-install-sps4f25k/pyfolio_2efe9a99238a42588250d6733a01d260 Running command git clone -q https://github.com/quantopian/pyfolio.git /tmp/pip-install-sps4f25k/pyfolio_2efe9a99238a42588250d6733a01d260 Collecting elegantrl@ git+https://github.com/AI4Finance-Foundation/ElegantRL.git#egg=elegantrl Cloning https://github.com/AI4Finance-Foundation/ElegantRL.git to /tmp/pip-install-sps4f25k/elegantrl_28233bba5b454d3399006626d813b65b Running command git clone -q https://github.com/AI4Finance-Foundation/ElegantRL.git /tmp/pip-install-sps4f25k/elegantrl_28233bba5b454d3399006626d813b65b Requirement already satisfied: numpy>=1.17.3 in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.4) (1.19.5) Requirement already satisfied: pandas>=1.1.5 in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.4) (1.1.5) Collecting stockstats>=0.4.0 Downloading stockstats-0.4.1-py2.py3-none-any.whl (19 kB) Collecting yfinance Downloading yfinance-0.1.69-py2.py3-none-any.whl (26 kB) Collecting elegantrl Downloading elegantrl-0.3.3-py3-none-any.whl (234 kB)  |████████████████████████████████| 234 kB 26.4 MB/s [?25hRequirement already satisfied: matplotlib in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.4) (3.2.2) Requirement already satisfied: scikit-learn>=0.21.0 in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.4) (1.0.2) Requirement already satisfied: gym>=0.17 in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.4) (0.17.3) Collecting stable-baselines3[extra] Downloading stable_baselines3-1.3.0-py3-none-any.whl (174 kB)  |████████████████████████████████| 174 kB 73.9 MB/s [?25hCollecting ray[default] Downloading ray-1.9.2-cp37-cp37m-manylinux2014_x86_64.whl (57.6 MB)  |████████████████████████████████| 57.6 MB 1.2 MB/s [?25hCollecting lz4 Downloading lz4-3.1.10-cp37-cp37m-manylinux2010_x86_64.whl (1.8 MB)  |████████████████████████████████| 1.8 MB 63.9 MB/s [?25hCollecting tensorboardX Downloading tensorboardX-2.4.1-py2.py3-none-any.whl (124 kB)  |████████████████████████████████| 124 kB 83.4 MB/s [?25hCollecting gputil Downloading GPUtil-1.4.0.tar.gz (5.5 kB) Collecting exchange_calendars Downloading exchange_calendars-3.5.tar.gz (147 kB)  |████████████████████████████████| 147 kB 68.3 MB/s [?25hCollecting alpaca_trade_api Downloading alpaca_trade_api-1.4.3-py3-none-any.whl (36 kB) Collecting ccxt>=1.66.32 Downloading ccxt-1.67.31-py2.py3-none-any.whl (2.3 MB)  |████████████████████████████████| 2.3 MB 61.2 MB/s [?25hCollecting jqdatasdk Downloading jqdatasdk-1.8.10-py3-none-any.whl (153 kB)  |████████████████████████████████| 153 kB 76.3 MB/s [?25hCollecting wrds Downloading wrds-3.1.1-py3-none-any.whl (12 kB) Requirement already satisfied: pytest in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.4) (3.6.4) Requirement already satisfied: setuptools>=41.4.0 in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.4) (57.4.0) Requirement already satisfied: wheel>=0.33.6 in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.4) (0.37.1) Collecting pre-commit Downloading pre_commit-2.16.0-py2.py3-none-any.whl (191 kB)  |████████████████████████████████| 191 kB 75.1 MB/s [?25hCollecting pybullet Downloading pybullet-3.2.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl (90.8 MB)  |████████████████████████████████| 90.8 MB 298 bytes/s [?25hRequirement already satisfied: torch in /usr/local/lib/python3.7/dist-packages (from elegantrl@ git+https://github.com/AI4Finance-Foundation/ElegantRL.git#egg=elegantrl->finrl==0.3.4) (1.10.0+cu111) Requirement already satisfied: opencv-python in /usr/local/lib/python3.7/dist-packages (from elegantrl@ git+https://github.com/AI4Finance-Foundation/ElegantRL.git#egg=elegantrl->finrl==0.3.4) (4.1.2.30) Collecting box2d-py Downloading box2d_py-2.3.8-cp37-cp37m-manylinux1_x86_64.whl (448 kB)  |████████████████████████████████| 448 kB 60.4 MB/s [?25hRequirement already satisfied: ipython>=3.2.3 in /usr/local/lib/python3.7/dist-packages (from pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (5.5.0) Requirement already satisfied: pytz>=2014.10 in /usr/local/lib/python3.7/dist-packages (from pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (2018.9) Requirement already satisfied: scipy>=0.14.0 in /usr/local/lib/python3.7/dist-packages (from pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (1.4.1) Requirement already satisfied: seaborn>=0.7.1 in /usr/local/lib/python3.7/dist-packages (from pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (0.11.2) Collecting empyrical>=0.5.0 Downloading empyrical-0.5.5.tar.gz (52 kB)  |████████████████████████████████| 52 kB 2.1 MB/s [?25hRequirement already satisfied: requests>=2.18.4 in /usr/local/lib/python3.7/dist-packages (from ccxt>=1.66.32->finrl==0.3.4) (2.23.0) Collecting yarl==1.7.2 Downloading yarl-1.7.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (271 kB)  |████████████████████████████████| 271 kB 75.7 MB/s [?25hCollecting aiohttp>=3.8 Downloading aiohttp-3.8.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (1.1 MB)  |████████████████████████████████| 1.1 MB 58.7 MB/s [?25hCollecting cryptography>=2.6.1 Downloading cryptography-36.0.1-cp36-abi3-manylinux_2_24_x86_64.whl (3.6 MB)  |████████████████████████████████| 3.6 MB 58.9 MB/s [?25hRequirement already satisfied: certifi>=2018.1.18 in /usr/local/lib/python3.7/dist-packages (from ccxt>=1.66.32->finrl==0.3.4) (2021.10.8) Collecting aiodns>=1.1.1 Downloading aiodns-3.0.0-py3-none-any.whl (5.0 kB) Collecting multidict>=4.0 Downloading multidict-5.2.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (160 kB)  |████████████████████████████████| 160 kB 67.6 MB/s [?25hRequirement already satisfied: idna>=2.0 in /usr/local/lib/python3.7/dist-packages (from yarl==1.7.2->ccxt>=1.66.32->finrl==0.3.4) (2.10) Requirement already satisfied: typing-extensions>=3.7.4 in /usr/local/lib/python3.7/dist-packages (from yarl==1.7.2->ccxt>=1.66.32->finrl==0.3.4) (3.10.0.2) Collecting pycares>=4.0.0 Downloading pycares-4.1.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (291 kB)  |████████████████████████████████| 291 kB 70.8 MB/s [?25hRequirement already satisfied: charset-normalizer<3.0,>=2.0 in /usr/local/lib/python3.7/dist-packages (from aiohttp>=3.8->ccxt>=1.66.32->finrl==0.3.4) (2.0.10) Collecting aiosignal>=1.1.2 Downloading aiosignal-1.2.0-py3-none-any.whl (8.2 kB) Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.7/dist-packages (from aiohttp>=3.8->ccxt>=1.66.32->finrl==0.3.4) (21.4.0) Collecting asynctest==0.13.0 Downloading asynctest-0.13.0-py3-none-any.whl (26 kB) Collecting async-timeout<5.0,>=4.0.0a3 Downloading async_timeout-4.0.2-py3-none-any.whl (5.8 kB) Collecting frozenlist>=1.1.1 Downloading frozenlist-1.2.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (192 kB)  |████████████████████████████████| 192 kB 73.4 MB/s [?25hRequirement already satisfied: cffi>=1.12 in /usr/local/lib/python3.7/dist-packages (from cryptography>=2.6.1->ccxt>=1.66.32->finrl==0.3.4) (1.15.0) Requirement already satisfied: pycparser in /usr/local/lib/python3.7/dist-packages (from cffi>=1.12->cryptography>=2.6.1->ccxt>=1.66.32->finrl==0.3.4) (2.21) Requirement already satisfied: pandas-datareader>=0.2 in /usr/local/lib/python3.7/dist-packages (from empyrical>=0.5.0->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (0.9.0) Requirement already satisfied: pyglet<=1.5.0,>=1.4.0 in /usr/local/lib/python3.7/dist-packages (from gym>=0.17->finrl==0.3.4) (1.5.0) Requirement already satisfied: cloudpickle<1.7.0,>=1.2.0 in /usr/local/lib/python3.7/dist-packages (from gym>=0.17->finrl==0.3.4) (1.3.0) Requirement already satisfied: pygments in /usr/local/lib/python3.7/dist-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (2.6.1) Requirement already satisfied: traitlets>=4.2 in /usr/local/lib/python3.7/dist-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (5.1.1) Requirement already satisfied: pexpect in /usr/local/lib/python3.7/dist-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (4.8.0) Requirement already satisfied: decorator in /usr/local/lib/python3.7/dist-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (4.4.2) Requirement already satisfied: pickleshare in /usr/local/lib/python3.7/dist-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (0.7.5) Requirement already satisfied: simplegeneric>0.8 in /usr/local/lib/python3.7/dist-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (0.8.1) Requirement already satisfied: prompt-toolkit<2.0.0,>=1.0.4 in /usr/local/lib/python3.7/dist-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (1.0.18) Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib->finrl==0.3.4) (0.11.0) Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->finrl==0.3.4) (3.0.6) Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->finrl==0.3.4) (2.8.2) Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->finrl==0.3.4) (1.3.2) Requirement already satisfied: lxml in /usr/local/lib/python3.7/dist-packages (from pandas-datareader>=0.2->empyrical>=0.5.0->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (4.2.6) Requirement already satisfied: six>=1.9.0 in /usr/local/lib/python3.7/dist-packages (from prompt-toolkit<2.0.0,>=1.0.4->ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (1.15.0) Requirement already satisfied: wcwidth in /usr/local/lib/python3.7/dist-packages (from prompt-toolkit<2.0.0,>=1.0.4->ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (0.2.5) Requirement already satisfied: future in /usr/local/lib/python3.7/dist-packages (from pyglet<=1.5.0,>=1.4.0->gym>=0.17->finrl==0.3.4) (0.16.0) Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests>=2.18.4->ccxt>=1.66.32->finrl==0.3.4) (3.0.4) Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests>=2.18.4->ccxt>=1.66.32->finrl==0.3.4) (1.24.3) Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn>=0.21.0->finrl==0.3.4) (1.1.0) Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from scikit-learn>=0.21.0->finrl==0.3.4) (3.0.0) Collecting msgpack==1.0.2 Downloading msgpack-1.0.2-cp37-cp37m-manylinux1_x86_64.whl (273 kB)  |████████████████████████████████| 273 kB 73.9 MB/s [?25hCollecting websocket-client<2,>=0.56.0 Downloading websocket_client-1.2.3-py3-none-any.whl (53 kB)  |████████████████████████████████| 53 kB 2.8 MB/s [?25hCollecting alpaca_trade_api Downloading alpaca_trade_api-1.4.2-py3-none-any.whl (36 kB) Downloading alpaca_trade_api-1.4.1-py3-none-any.whl (36 kB) Downloading alpaca_trade_api-1.4.0-py3-none-any.whl (34 kB) Downloading alpaca_trade_api-1.3.0-py3-none-any.whl (43 kB)  |████████████████████████████████| 43 kB 2.3 MB/s [?25h Downloading alpaca_trade_api-1.2.3-py3-none-any.whl (40 kB)  |████████████████████████████████| 40 kB 7.3 MB/s [?25hCollecting websockets<10,>=8.0 Downloading websockets-9.1-cp37-cp37m-manylinux2010_x86_64.whl (103 kB)  |████████████████████████████████| 103 kB 77.3 MB/s [?25hCollecting pyluach Downloading pyluach-1.3.0-py3-none-any.whl (17 kB) Requirement already satisfied: toolz in /usr/local/lib/python3.7/dist-packages (from exchange_calendars->finrl==0.3.4) (0.11.2) Requirement already satisfied: korean_lunar_calendar in /usr/local/lib/python3.7/dist-packages (from exchange_calendars->finrl==0.3.4) (0.2.1) Collecting thriftpy2>=0.3.9 Downloading thriftpy2-0.4.14.tar.gz (361 kB)  |████████████████████████████████| 361 kB 78.3 MB/s [?25hCollecting pymysql>=0.7.6 Downloading PyMySQL-1.0.2-py3-none-any.whl (43 kB)  |████████████████████████████████| 43 kB 2.7 MB/s [?25hRequirement already satisfied: SQLAlchemy>=1.2.8 in /usr/local/lib/python3.7/dist-packages (from jqdatasdk->finrl==0.3.4) (1.4.29) Requirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from SQLAlchemy>=1.2.8->jqdatasdk->finrl==0.3.4) (4.10.0) Requirement already satisfied: greenlet!=0.4.17 in /usr/local/lib/python3.7/dist-packages (from SQLAlchemy>=1.2.8->jqdatasdk->finrl==0.3.4) (1.1.2) Collecting ply<4.0,>=3.4 Downloading ply-3.11-py2.py3-none-any.whl (49 kB)  |████████████████████████████████| 49 kB 7.6 MB/s [?25hRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata->SQLAlchemy>=1.2.8->jqdatasdk->finrl==0.3.4) (3.7.0) Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.7/dist-packages (from pexpect->ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.4) (0.7.0) Collecting pyyaml>=5.1 Downloading PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (596 kB)  |████████████████████████████████| 596 kB 66.2 MB/s [?25hCollecting identify>=1.0.0 Downloading identify-2.4.3-py2.py3-none-any.whl (98 kB)  |████████████████████████████████| 98 kB 9.6 MB/s [?25hCollecting nodeenv>=0.11.1 Downloading nodeenv-1.6.0-py2.py3-none-any.whl (21 kB) Collecting virtualenv>=20.0.8 Downloading virtualenv-20.13.0-py2.py3-none-any.whl (6.5 MB)  |████████████████████████████████| 6.5 MB 50.1 MB/s [?25hRequirement already satisfied: toml in /usr/local/lib/python3.7/dist-packages (from pre-commit->finrl==0.3.4) (0.10.2) Collecting cfgv>=2.0.0 Downloading cfgv-3.3.1-py2.py3-none-any.whl (7.3 kB) Collecting platformdirs<3,>=2 Downloading platformdirs-2.4.1-py3-none-any.whl (14 kB) Collecting distlib<1,>=0.3.1 Downloading distlib-0.3.4-py2.py3-none-any.whl (461 kB)  |████████████████████████████████| 461 kB 65.2 MB/s [?25hRequirement already satisfied: filelock<4,>=3.2 in /usr/local/lib/python3.7/dist-packages (from virtualenv>=20.0.8->pre-commit->finrl==0.3.4) (3.4.2) Requirement already satisfied: pluggy<0.8,>=0.5 in /usr/local/lib/python3.7/dist-packages (from pytest->finrl==0.3.4) (0.7.1) Requirement already satisfied: more-itertools>=4.0.0 in /usr/local/lib/python3.7/dist-packages (from pytest->finrl==0.3.4) (8.12.0) Requirement already satisfied: atomicwrites>=1.0 in /usr/local/lib/python3.7/dist-packages (from pytest->finrl==0.3.4) (1.4.0) Requirement already satisfied: py>=1.5.0 in /usr/local/lib/python3.7/dist-packages (from pytest->finrl==0.3.4) (1.11.0) Collecting redis>=3.5.0 Downloading redis-4.1.0-py3-none-any.whl (171 kB)  |████████████████████████████████| 171 kB 77.2 MB/s [?25hRequirement already satisfied: protobuf>=3.15.3 in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.4) (3.17.3) Requirement already satisfied: grpcio>=1.28.1 in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.4) (1.43.0) Requirement already satisfied: click>=7.0 in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.4) (7.1.2) Requirement already satisfied: jsonschema in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.4) (4.3.3) Requirement already satisfied: smart-open in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.4) (5.2.1) Collecting opencensus Downloading opencensus-0.8.0-py2.py3-none-any.whl (128 kB)  |████████████████████████████████| 128 kB 80.3 MB/s [?25hCollecting aiohttp-cors Downloading aiohttp_cors-0.7.0-py3-none-any.whl (27 kB) Collecting colorful Downloading colorful-0.5.4-py2.py3-none-any.whl (201 kB)  |████████████████████████████████| 201 kB 79.8 MB/s [?25hCollecting py-spy>=0.2.0 Downloading py_spy-0.3.11-py2.py3-none-manylinux_2_5_x86_64.manylinux1_x86_64.whl (3.0 MB)  |████████████████████████████████| 3.0 MB 61.5 MB/s [?25hRequirement already satisfied: prometheus-client>=0.7.1 in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.4) (0.12.0) Collecting gpustat>=1.0.0b1 Downloading gpustat-1.0.0b1.tar.gz (82 kB)  |████████████████████████████████| 82 kB 313 kB/s [?25hCollecting aioredis<2 Downloading aioredis-1.3.1-py3-none-any.whl (65 kB)  |████████████████████████████████| 65 kB 4.5 MB/s [?25hCollecting hiredis Downloading hiredis-2.0.0-cp37-cp37m-manylinux2010_x86_64.whl (85 kB)  |████████████████████████████████| 85 kB 4.7 MB/s [?25hRequirement already satisfied: nvidia-ml-py3>=7.352.0 in /usr/local/lib/python3.7/dist-packages (from gpustat>=1.0.0b1->ray[default]->finrl==0.3.4) (7.352.0) Requirement already satisfied: psutil in /usr/local/lib/python3.7/dist-packages (from gpustat>=1.0.0b1->ray[default]->finrl==0.3.4) (5.4.8) Collecting blessed>=1.17.1 Downloading blessed-1.19.0-py2.py3-none-any.whl (57 kB)  |████████████████████████████████| 57 kB 7.1 MB/s [?25hCollecting deprecated>=1.2.3 Downloading Deprecated-1.2.13-py2.py3-none-any.whl (9.6 kB) Requirement already satisfied: packaging>=21.3 in /usr/local/lib/python3.7/dist-packages (from redis>=3.5.0->ray[default]->finrl==0.3.4) (21.3) Requirement already satisfied: wrapt<2,>=1.10 in /usr/local/lib/python3.7/dist-packages (from deprecated>=1.2.3->redis>=3.5.0->ray[default]->finrl==0.3.4) (1.13.3) Requirement already satisfied: pyrsistent!=0.17.0,!=0.17.1,!=0.17.2,>=0.14.0 in /usr/local/lib/python3.7/dist-packages (from jsonschema->ray[default]->finrl==0.3.4) (0.18.0) Requirement already satisfied: importlib-resources>=1.4.0 in /usr/local/lib/python3.7/dist-packages (from jsonschema->ray[default]->finrl==0.3.4) (5.4.0) Requirement already satisfied: google-api-core<3.0.0,>=1.0.0 in /usr/local/lib/python3.7/dist-packages (from opencensus->ray[default]->finrl==0.3.4) (1.26.3) Collecting opencensus-context==0.1.2 Downloading opencensus_context-0.1.2-py2.py3-none-any.whl (4.4 kB) Requirement already satisfied: google-auth<2.0dev,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.4) (1.35.0) Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.4) (1.54.0) Requirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from google-auth<2.0dev,>=1.21.1->google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.4) (4.2.4) Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.7/dist-packages (from google-auth<2.0dev,>=1.21.1->google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.4) (4.8) Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from google-auth<2.0dev,>=1.21.1->google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.4) (0.2.8) Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.7/dist-packages (from pyasn1-modules>=0.2.1->google-auth<2.0dev,>=1.21.1->google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.4) (0.4.8) Requirement already satisfied: tabulate in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.4) (0.8.9) Requirement already satisfied: pillow in /usr/local/lib/python3.7/dist-packages (from stable-baselines3[extra]->finrl==0.3.4) (7.1.2) Requirement already satisfied: tensorboard>=2.2.0 in /usr/local/lib/python3.7/dist-packages (from stable-baselines3[extra]->finrl==0.3.4) (2.7.0) Requirement already satisfied: atari-py~=0.2.0 in /usr/local/lib/python3.7/dist-packages (from stable-baselines3[extra]->finrl==0.3.4) (0.2.9) Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.4) (1.0.1) Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.4) (1.8.1) Requirement already satisfied: tensorboard-data-server<0.7.0,>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.4) (0.6.1) Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.4) (0.4.6) Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.4) (3.3.6) Requirement already satisfied: absl-py>=0.4 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.4) (0.12.0) Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.4) (1.3.0) Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.4) (3.1.1) Collecting psycopg2-binary Downloading psycopg2_binary-2.9.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.0 MB)  |████████████████████████████████| 3.0 MB 30.7 MB/s [?25hCollecting mock Downloading mock-4.0.3-py3-none-any.whl (28 kB) Requirement already satisfied: multitasking>=0.0.7 in /usr/local/lib/python3.7/dist-packages (from yfinance->finrl==0.3.4) (0.0.10) Collecting requests>=2.18.4 Downloading requests-2.27.1-py2.py3-none-any.whl (63 kB)  |████████████████████████████████| 63 kB 2.1 MB/s [?25hCollecting lxml Downloading lxml-4.7.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl (6.4 MB)  |████████████████████████████████| 6.4 MB 65.9 MB/s [?25hBuilding wheels for collected packages: finrl, elegantrl, pyfolio, empyrical, exchange-calendars, gputil, thriftpy2, gpustat Building wheel for finrl (setup.py) ... [?25l[?25hdone Created wheel for finrl: filename=finrl-0.3.4-py3-none-any.whl size=3885434 sha256=849cbf03841a5a022427fa9f806d06b3e94a6b1b24c083a41ea6f47445b938d8 Stored in directory: /tmp/pip-ephem-wheel-cache-pv4yf2kc/wheels/17/ff/bd/1bc602a0352762b0b24041b88536d803ae343ed0a711fcf55e Building wheel for elegantrl (setup.py) ... [?25l[?25hdone Created wheel for elegantrl: filename=elegantrl-0.3.3-py3-none-any.whl size=188472 sha256=8d5083e1d626c2ef812b814b40640e39b5018c862373497d500b55728f0fb8a3 Stored in directory: /tmp/pip-ephem-wheel-cache-pv4yf2kc/wheels/99/85/5e/86cb3a9f47adfca5e248295e93113e1b298d60883126d62c84 Building wheel for pyfolio (setup.py) ... [?25l[?25hdone Created wheel for pyfolio: filename=pyfolio-0.9.2+75.g4b901f6-py3-none-any.whl size=75774 sha256=c4497ace077e94d44cf5ac597e917a70823257583d7dac76152d009c98bda297 Stored in directory: /tmp/pip-ephem-wheel-cache-pv4yf2kc/wheels/ef/09/e5/2c1bf37c050d22557c080deb1be986d06424627c04aeca19b9 Building wheel for empyrical (setup.py) ... [?25l[?25hdone Created wheel for empyrical: filename=empyrical-0.5.5-py3-none-any.whl size=39780 sha256=ae4a57d940f20813f523a30c1de536b1f2531cac23a5de0b0bf37a9d3fa12015 Stored in directory: /root/.cache/pip/wheels/d9/91/4b/654fcff57477efcf149eaca236da2fce991526cbab431bf312 Building wheel for exchange-calendars (setup.py) ... [?25l[?25hdone Created wheel for exchange-calendars: filename=exchange_calendars-3.5-py3-none-any.whl size=179486 sha256=043fd7e4bfa797afe79a9eded1762c9d4543807b9c192f2d0d99df42c3c7b2ce Stored in directory: /root/.cache/pip/wheels/69/21/43/b6ae2605dd767f6cd5a5b0b70c93a9a75823e44b3ccb92bce7 Building wheel for gputil (setup.py) ... [?25l[?25hdone Created wheel for gputil: filename=GPUtil-1.4.0-py3-none-any.whl size=7411 sha256=7a9b6d994aaf299e7a3c73dbb8a98ede27aa9199340077bd7571f3f07d6d425b Stored in directory: /root/.cache/pip/wheels/6e/f8/83/534c52482d6da64622ddbf72cd93c35d2ef2881b78fd08ff0c Building wheel for thriftpy2 (setup.py) ... [?25l[?25hdone Created wheel for thriftpy2: filename=thriftpy2-0.4.14-cp37-cp37m-linux_x86_64.whl size=944197 sha256=e3ccabc57c0d1511e6e87f870d8941748f3af10e1e9b00010db3a0692d48165d Stored in directory: /root/.cache/pip/wheels/2a/f5/49/9c0d851aa64b58db72883cf9393cc824d536bdf13f5c83cff4 Building wheel for gpustat (setup.py) ... [?25l[?25hdone Created wheel for gpustat: filename=gpustat-1.0.0b1-py3-none-any.whl size=15979 sha256=c470a062e2c777d4121d6f451cf0224bc94b2ac7043e66724f194cc1bed87289 Stored in directory: /root/.cache/pip/wheels/1a/16/e2/3e2437fba4c4b6a97a97bd96fce5d14e66cff5c4966fb1cc8c Successfully built finrl elegantrl pyfolio empyrical exchange-calendars gputil thriftpy2 gpustat Installing collected packages: requests, multidict, frozenlist, yarl, lxml, deprecated, asynctest, async-timeout, aiosignal, redis, pyyaml, pycares, ply, platformdirs, opencensus-context, msgpack, hiredis, distlib, blessed, aiohttp, websockets, websocket-client, virtualenv, thriftpy2, tensorboardX, stable-baselines3, ray, pymysql, pyluach, pybullet, py-spy, psycopg2-binary, opencensus, nodeenv, mock, identify, gpustat, empyrical, cryptography, colorful, cfgv, box2d-py, aioredis, aiohttp-cors, aiodns, yfinance, wrds, stockstats, pyfolio, pre-commit, lz4, jqdatasdk, gputil, exchange-calendars, elegantrl, ccxt, alpaca-trade-api, finrl Attempting uninstall: requests Found existing installation: requests 2.23.0 Uninstalling requests-2.23.0: Successfully uninstalled requests-2.23.0 Attempting uninstall: lxml Found existing installation: lxml 4.2.6 Uninstalling lxml-4.2.6: Successfully uninstalled lxml-4.2.6 Attempting uninstall: pyyaml Found existing installation: PyYAML 3.13 Uninstalling PyYAML-3.13: Successfully uninstalled PyYAML-3.13 Attempting uninstall: msgpack Found existing installation: msgpack 1.0.3 Uninstalling msgpack-1.0.3: Successfully uninstalled msgpack-1.0.3 ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. google-colab 1.0.0 requires requests~=2.23.0, but you have requests 2.27.1 which is incompatible. datascience 0.10.6 requires folium==0.2.1, but you have folium 0.8.3 which is incompatible. Successfully installed aiodns-3.0.0 aiohttp-3.8.1 aiohttp-cors-0.7.0 aioredis-1.3.1 aiosignal-1.2.0 alpaca-trade-api-1.2.3 async-timeout-4.0.2 asynctest-0.13.0 blessed-1.19.0 box2d-py-2.3.8 ccxt-1.67.31 cfgv-3.3.1 colorful-0.5.4 cryptography-36.0.1 deprecated-1.2.13 distlib-0.3.4 elegantrl-0.3.3 empyrical-0.5.5 exchange-calendars-3.5 finrl-0.3.4 frozenlist-1.2.0 gpustat-1.0.0b1 gputil-1.4.0 hiredis-2.0.0 identify-2.4.3 jqdatasdk-1.8.10 lxml-4.7.1 lz4-3.1.10 mock-4.0.3 msgpack-1.0.2 multidict-5.2.0 nodeenv-1.6.0 opencensus-0.8.0 opencensus-context-0.1.2 platformdirs-2.4.1 ply-3.11 pre-commit-2.16.0 psycopg2-binary-2.9.3 py-spy-0.3.11 pybullet-3.2.1 pycares-4.1.2 pyfolio-0.9.2+75.g4b901f6 pyluach-1.3.0 pymysql-1.0.2 pyyaml-6.0 ray-1.9.2 redis-4.1.0 requests-2.27.1 stable-baselines3-1.3.0 stockstats-0.4.1 tensorboardX-2.4.1 thriftpy2-0.4.14 virtualenv-20.13.0 websocket-client-1.2.3 websockets-9.1 wrds-3.1.1 yarl-1.7.2 yfinance-0.1.69 ###Markdown 2.2. Check if the additional packages needed are present, if not install them. * Yahoo Finance API* pandas* numpy* matplotlib* stockstats* OpenAI gym* stable-baselines* tensorflow* pyfolio 2.3. Import Packages ###Code import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt # matplotlib.use('Agg') import datetime %matplotlib inline from finrl import config from finrl import config_tickers from finrl.finrl_meta.preprocessor.yahoodownloader import YahooDownloader from finrl.finrl_meta.preprocessor.preprocessors import FeatureEngineer, data_split from finrl.finrl_meta.env_stock_trading.env_stocktrading import StockTradingEnv from finrl.drl_agents.stablebaselines3.models import DRLAgent from finrl.plot import backtest_stats, backtest_plot, get_daily_return, get_baseline from pprint import pprint import sys sys.path.append("../FinRL-Library") import itertools ###Output /usr/local/lib/python3.7/dist-packages/pyfolio/pos.py:27: UserWarning: Module "zipline.assets" not found; multipliers will not be applied to position notionals. 'Module "zipline.assets" not found; multipliers will not be applied' ###Markdown 2.4. Create Folders ###Code import os if not os.path.exists("./" + config.DATA_SAVE_DIR): os.makedirs("./" + config.DATA_SAVE_DIR) if not os.path.exists("./" + config.TRAINED_MODEL_DIR): os.makedirs("./" + config.TRAINED_MODEL_DIR) if not os.path.exists("./" + config.TENSORBOARD_LOG_DIR): os.makedirs("./" + config.TENSORBOARD_LOG_DIR) if not os.path.exists("./" + config.RESULTS_DIR): os.makedirs("./" + config.RESULTS_DIR) ###Output _____no_output_____ ###Markdown Part 3. Download Stock Data from Yahoo FinanceYahoo Finance is a website that provides stock data, financial news, financial reports, etc. All the data provided by Yahoo Finance is free.* FinRL uses a class **YahooDownloader** to fetch data from Yahoo Finance API* Call Limit: Using the Public API (without authentication), you are limited to 2,000 requests per hour per IP (or up to a total of 48,000 requests a day). -----class YahooDownloader: Provides methods for retrieving daily stock data from Yahoo Finance API Attributes ---------- start_date : str start date of the data (modified from config.py) end_date : str end date of the data (modified from config.py) ticker_list : list a list of stock tickers (modified from config.py) Methods ------- fetch_data() Fetches data from yahoo API ###Code # from config.py start_date is a string config.START_DATE # from config.py end_date is a string config.END_DATE print(config_tickers.DOW_30_TICKER) df = YahooDownloader(start_date = '2009-01-01', end_date = '2021-01-01', ticker_list = config_tickers.DOW_30_TICKER).fetch_data() df.shape df.head() df['date'] = pd.to_datetime(df['date'],format='%Y-%m-%d') df.sort_values(['date','tic'],ignore_index=True).head() ###Output _____no_output_____ ###Markdown Part 4: Preprocess fundamental data- Import finanical data downloaded from Compustat via WRDS(Wharton Research Data Service)- Preprocess the dataset and calculate financial ratios- Add those ratios to the price data preprocessed in Part 3- Calculate price-related ratios such as P/E and P/B 4-1 Import the financial data ###Code # Import fundamental data from my GitHub repository url = 'https://raw.githubusercontent.com/mariko-sawada/FinRL_with_fundamental_data/main/dow_30_fundamental_wrds.csv' fund = pd.read_csv(url) # Check the imported dataset fund.head() ###Output _____no_output_____ ###Markdown 4-2 Specify items needed to calculate financial ratios- To know more about the data description of the dataset, please check WRDS's website(https://wrds-www.wharton.upenn.edu/). Login will be required. ###Code # List items that are used to calculate financial ratios items = [ 'datadate', # Date 'tic', # Ticker 'oiadpq', # Quarterly operating income 'revtq', # Quartely revenue 'niq', # Quartely net income 'atq', # Total asset 'teqq', # Shareholder's equity 'epspiy', # EPS(Basic) incl. Extraordinary items 'ceqq', # Common Equity 'cshoq', # Common Shares Outstanding 'dvpspq', # Dividends per share 'actq', # Current assets 'lctq', # Current liabilities 'cheq', # Cash & Equivalent 'rectq', # Recievalbles 'cogsq', # Cost of Goods Sold 'invtq', # Inventories 'apq',# Account payable 'dlttq', # Long term debt 'dlcq', # Debt in current liabilites 'ltq' # Liabilities ] # Omit items that will not be used fund_data = fund[items] # Rename column names for the sake of readability fund_data = fund_data.rename(columns={ 'datadate':'date', # Date 'oiadpq':'op_inc_q', # Quarterly operating income 'revtq':'rev_q', # Quartely revenue 'niq':'net_inc_q', # Quartely net income 'atq':'tot_assets', # Assets 'teqq':'sh_equity', # Shareholder's equity 'epspiy':'eps_incl_ex', # EPS(Basic) incl. Extraordinary items 'ceqq':'com_eq', # Common Equity 'cshoq':'sh_outstanding', # Common Shares Outstanding 'dvpspq':'div_per_sh', # Dividends per share 'actq':'cur_assets', # Current assets 'lctq':'cur_liabilities', # Current liabilities 'cheq':'cash_eq', # Cash & Equivalent 'rectq':'receivables', # Receivalbles 'cogsq':'cogs_q', # Cost of Goods Sold 'invtq':'inventories', # Inventories 'apq': 'payables',# Account payable 'dlttq':'long_debt', # Long term debt 'dlcq':'short_debt', # Debt in current liabilites 'ltq':'tot_liabilities' # Liabilities }) # Check the data fund_data.head() ###Output _____no_output_____ ###Markdown 4-3 Calculate financial ratios- For items from Profit/Loss statements, we calculate LTM (Last Twelve Months) and use them to derive profitability related ratios such as Operating Maring and ROE. For items from balance sheets, we use the numbers on the day.- To check the definitions of the financial ratios calculated here, please refer to CFI's website: https://corporatefinanceinstitute.com/resources/knowledge/finance/financial-ratios/ ###Code # Calculate financial ratios date = pd.to_datetime(fund_data['date'],format='%Y%m%d') tic = fund_data['tic'].to_frame('tic') # Profitability ratios # Operating Margin OPM = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='OPM') for i in range(0, fund_data.shape[0]): if i-3 < 0: OPM[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: OPM.iloc[i] = np.nan else: OPM.iloc[i] = np.sum(fund_data['op_inc_q'].iloc[i-3:i])/np.sum(fund_data['rev_q'].iloc[i-3:i]) # Net Profit Margin NPM = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='NPM') for i in range(0, fund_data.shape[0]): if i-3 < 0: NPM[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: NPM.iloc[i] = np.nan else: NPM.iloc[i] = np.sum(fund_data['net_inc_q'].iloc[i-3:i])/np.sum(fund_data['rev_q'].iloc[i-3:i]) # Return On Assets ROA = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='ROA') for i in range(0, fund_data.shape[0]): if i-3 < 0: ROA[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: ROA.iloc[i] = np.nan else: ROA.iloc[i] = np.sum(fund_data['net_inc_q'].iloc[i-3:i])/fund_data['tot_assets'].iloc[i] # Return on Equity ROE = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='ROE') for i in range(0, fund_data.shape[0]): if i-3 < 0: ROE[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: ROE.iloc[i] = np.nan else: ROE.iloc[i] = np.sum(fund_data['net_inc_q'].iloc[i-3:i])/fund_data['sh_equity'].iloc[i] # For calculating valuation ratios in the next subpart, calculate per share items in advance # Earnings Per Share EPS = fund_data['eps_incl_ex'].to_frame('EPS') # Book Per Share BPS = (fund_data['com_eq']/fund_data['sh_outstanding']).to_frame('BPS') # Need to check units #Dividend Per Share DPS = fund_data['div_per_sh'].to_frame('DPS') # Liquidity ratios # Current ratio cur_ratio = (fund_data['cur_assets']/fund_data['cur_liabilities']).to_frame('cur_ratio') # Quick ratio quick_ratio = ((fund_data['cash_eq'] + fund_data['receivables'] )/fund_data['cur_liabilities']).to_frame('quick_ratio') # Cash ratio cash_ratio = (fund_data['cash_eq']/fund_data['cur_liabilities']).to_frame('cash_ratio') # Efficiency ratios # Inventory turnover ratio inv_turnover = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='inv_turnover') for i in range(0, fund_data.shape[0]): if i-3 < 0: inv_turnover[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: inv_turnover.iloc[i] = np.nan else: inv_turnover.iloc[i] = np.sum(fund_data['cogs_q'].iloc[i-3:i])/fund_data['inventories'].iloc[i] # Receivables turnover ratio acc_rec_turnover = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='acc_rec_turnover') for i in range(0, fund_data.shape[0]): if i-3 < 0: acc_rec_turnover[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: acc_rec_turnover.iloc[i] = np.nan else: acc_rec_turnover.iloc[i] = np.sum(fund_data['rev_q'].iloc[i-3:i])/fund_data['receivables'].iloc[i] # Payable turnover ratio acc_pay_turnover = pd.Series(np.empty(fund_data.shape[0],dtype=object),name='acc_pay_turnover') for i in range(0, fund_data.shape[0]): if i-3 < 0: acc_pay_turnover[i] = np.nan elif fund_data.iloc[i,1] != fund_data.iloc[i-3,1]: acc_pay_turnover.iloc[i] = np.nan else: acc_pay_turnover.iloc[i] = np.sum(fund_data['cogs_q'].iloc[i-3:i])/fund_data['payables'].iloc[i] ## Leverage financial ratios # Debt ratio debt_ratio = (fund_data['tot_liabilities']/fund_data['tot_assets']).to_frame('debt_ratio') # Debt to Equity ratio debt_to_equity = (fund_data['tot_liabilities']/fund_data['sh_equity']).to_frame('debt_to_equity') # Create a dataframe that merges all the ratios ratios = pd.concat([date,tic,OPM,NPM,ROA,ROE,EPS,BPS,DPS, cur_ratio,quick_ratio,cash_ratio,inv_turnover,acc_rec_turnover,acc_pay_turnover, debt_ratio,debt_to_equity], axis=1) # Check the ratio data ratios.head() ratios.tail() ###Output _____no_output_____ ###Markdown 4-4 Deal with NAs and infinite values- We replace N/A and infinite values with zero so that they can be recognized as a state ###Code # Replace NAs infinite values with zero final_ratios = ratios.copy() final_ratios = final_ratios.fillna(0) final_ratios = final_ratios.replace(np.inf,0) final_ratios.head() final_ratios.tail() ###Output _____no_output_____ ###Markdown 4-5 Merge stock price data and ratios into one dataframe- Merge the price dataframe preprocessed in Part 3 and the ratio dataframe created in this part- Since the prices are daily and ratios are quartely, we have NAs in the ratio columns after merging the two dataframes. We deal with this by backfilling the ratios. ###Code list_ticker = df["tic"].unique().tolist() list_date = list(pd.date_range(df['date'].min(),df['date'].max())) combination = list(itertools.product(list_date,list_ticker)) # Merge stock price data and ratios into one dataframe processed_full = pd.DataFrame(combination,columns=["date","tic"]).merge(df,on=["date","tic"],how="left") processed_full = processed_full.merge(final_ratios,how='left',on=['date','tic']) processed_full = processed_full.sort_values(['tic','date']) # Backfill the ratio data to make them daily processed_full = processed_full.bfill(axis='rows') ###Output _____no_output_____ ###Markdown 4-6 Calculate market valuation ratios using daily stock price data ###Code # Calculate P/E, P/B and dividend yield using daily closing price processed_full['PE'] = processed_full['close']/processed_full['EPS'] processed_full['PB'] = processed_full['close']/processed_full['BPS'] processed_full['Div_yield'] = processed_full['DPS']/processed_full['close'] # Drop per share items used for the above calculation processed_full = processed_full.drop(columns=['day','EPS','BPS','DPS']) # Replace NAs infinite values with zero processed_full = processed_full.copy() processed_full = processed_full.fillna(0) processed_full = processed_full.replace(np.inf,0) # Check the final data processed_full.sort_values(['date','tic'],ignore_index=True).head(10) ###Output _____no_output_____ ###Markdown Part 5. Design EnvironmentConsidering the stochastic and interactive nature of the automated stock trading tasks, a financial task is modeled as a **Markov Decision Process (MDP)** problem. The training process involves observing stock price change, taking an action and reward's calculation to have the agent adjusting its strategy accordingly. By interacting with the environment, the trading agent will derive a trading strategy with the maximized rewards as time proceeds.Our trading environments, based on OpenAI Gym framework, simulate live stock markets with real market data according to the principle of time-driven simulation.The action space describes the allowed actions that the agent interacts with the environment. Normally, action a includes three actions: {-1, 0, 1}, where -1, 0, 1 represent selling, holding, and buying one share. Also, an action can be carried upon multiple shares. We use an action space {-k,…,-1, 0, 1, …, k}, where k denotes the number of shares to buy and -k denotes the number of shares to sell. For example, "Buy 10 shares of AAPL" or "Sell 10 shares of AAPL" are 10 or -10, respectively. The continuous action space needs to be normalized to [-1, 1], since the policy is defined on a Gaussian distribution, which needs to be normalized and symmetric. 5-1 Split data into training and trade dataset- Training data split: 2009-01-01 to 2018-12-31- Trade data split: 2019-01-01 to 2020-09-30 ###Code train = data_split(processed_full, '2009-01-01','2019-01-01') trade = data_split(processed_full, '2019-01-01','2021-01-01') # Check the length of the two datasets print(len(train)) print(len(trade)) train.head() trade.head() ###Output _____no_output_____ ###Markdown 5-2 Set up the training environment ###Code import gym import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd from gym import spaces from gym.utils import seeding from stable_baselines3.common.vec_env import DummyVecEnv matplotlib.use("Agg") # from stable_baselines3.common import logger class StockTradingEnv(gym.Env): """A stock trading environment for OpenAI gym""" metadata = {"render.modes": ["human"]} def __init__( self, df, stock_dim, hmax, initial_amount, buy_cost_pct, sell_cost_pct, reward_scaling, state_space, action_space, tech_indicator_list, turbulence_threshold=None, risk_indicator_col="turbulence", make_plots=False, print_verbosity=10, day=0, initial=True, previous_state=[], model_name="", mode="", iteration="", ): self.day = day self.df = df self.stock_dim = stock_dim self.hmax = hmax self.initial_amount = initial_amount self.buy_cost_pct = buy_cost_pct self.sell_cost_pct = sell_cost_pct self.reward_scaling = reward_scaling self.state_space = state_space self.action_space = action_space self.tech_indicator_list = tech_indicator_list self.action_space = spaces.Box(low=-1, high=1, shape=(self.action_space,)) self.observation_space = spaces.Box( low=-np.inf, high=np.inf, shape=(self.state_space,) ) self.data = self.df.loc[self.day, :] self.terminal = False self.make_plots = make_plots self.print_verbosity = print_verbosity self.turbulence_threshold = turbulence_threshold self.risk_indicator_col = risk_indicator_col self.initial = initial self.previous_state = previous_state self.model_name = model_name self.mode = mode self.iteration = iteration # initalize state self.state = self._initiate_state() # initialize reward self.reward = 0 self.turbulence = 0 self.cost = 0 self.trades = 0 self.episode = 0 # memorize all the total balance change self.asset_memory = [self.initial_amount] self.rewards_memory = [] self.actions_memory = [] self.date_memory = [self._get_date()] # self.reset() self._seed() def _sell_stock(self, index, action): def _do_sell_normal(): if self.state[index + 1] > 0: # Sell only if the price is > 0 (no missing data in this particular date) # perform sell action based on the sign of the action if self.state[index + self.stock_dim + 1] > 0: # Sell only if current asset is > 0 sell_num_shares = min( abs(action), self.state[index + self.stock_dim + 1] ) sell_amount = ( self.state[index + 1] * sell_num_shares * (1 - self.sell_cost_pct) ) # update balance self.state[0] += sell_amount self.state[index + self.stock_dim + 1] -= sell_num_shares self.cost += ( self.state[index + 1] * sell_num_shares * self.sell_cost_pct ) self.trades += 1 else: sell_num_shares = 0 else: sell_num_shares = 0 return sell_num_shares # perform sell action based on the sign of the action if self.turbulence_threshold is not None: if self.turbulence >= self.turbulence_threshold: if self.state[index + 1] > 0: # Sell only if the price is > 0 (no missing data in this particular date) # if turbulence goes over threshold, just clear out all positions if self.state[index + self.stock_dim + 1] > 0: # Sell only if current asset is > 0 sell_num_shares = self.state[index + self.stock_dim + 1] sell_amount = ( self.state[index + 1] * sell_num_shares * (1 - self.sell_cost_pct) ) # update balance self.state[0] += sell_amount self.state[index + self.stock_dim + 1] = 0 self.cost += ( self.state[index + 1] * sell_num_shares * self.sell_cost_pct ) self.trades += 1 else: sell_num_shares = 0 else: sell_num_shares = 0 else: sell_num_shares = _do_sell_normal() else: sell_num_shares = _do_sell_normal() return sell_num_shares def _buy_stock(self, index, action): def _do_buy(): if self.state[index + 1] > 0: # Buy only if the price is > 0 (no missing data in this particular date) available_amount = self.state[0] // self.state[index + 1] # print('available_amount:{}'.format(available_amount)) # update balance buy_num_shares = min(available_amount, action) buy_amount = ( self.state[index + 1] * buy_num_shares * (1 + self.buy_cost_pct) ) self.state[0] -= buy_amount self.state[index + self.stock_dim + 1] += buy_num_shares self.cost += self.state[index + 1] * buy_num_shares * self.buy_cost_pct self.trades += 1 else: buy_num_shares = 0 return buy_num_shares # perform buy action based on the sign of the action if self.turbulence_threshold is None: buy_num_shares = _do_buy() else: if self.turbulence < self.turbulence_threshold: buy_num_shares = _do_buy() else: buy_num_shares = 0 pass return buy_num_shares def _make_plot(self): plt.plot(self.asset_memory, "r") plt.savefig("results/account_value_trade_{}.png".format(self.episode)) plt.close() def step(self, actions): self.terminal = self.day >= len(self.df.index.unique()) - 1 if self.terminal: # print(f"Episode: {self.episode}") if self.make_plots: self._make_plot() end_total_asset = self.state[0] + sum( np.array(self.state[1 : (self.stock_dim + 1)]) * np.array(self.state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)]) ) df_total_value = pd.DataFrame(self.asset_memory) tot_reward = ( self.state[0] + sum( np.array(self.state[1 : (self.stock_dim + 1)]) * np.array( self.state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)] ) ) - self.initial_amount ) df_total_value.columns = ["account_value"] df_total_value["date"] = self.date_memory df_total_value["daily_return"] = df_total_value["account_value"].pct_change( 1 ) if df_total_value["daily_return"].std() != 0: sharpe = ( (252 ** 0.5) * df_total_value["daily_return"].mean() / df_total_value["daily_return"].std() ) df_rewards = pd.DataFrame(self.rewards_memory) df_rewards.columns = ["account_rewards"] df_rewards["date"] = self.date_memory[:-1] if self.episode % self.print_verbosity == 0: print(f"day: {self.day}, episode: {self.episode}") print(f"begin_total_asset: {self.asset_memory[0]:0.2f}") print(f"end_total_asset: {end_total_asset:0.2f}") print(f"total_reward: {tot_reward:0.2f}") print(f"total_cost: {self.cost:0.2f}") print(f"total_trades: {self.trades}") if df_total_value["daily_return"].std() != 0: print(f"Sharpe: {sharpe:0.3f}") print("=================================") if (self.model_name != "") and (self.mode != ""): df_actions = self.save_action_memory() df_actions.to_csv( "results/actions_{}_{}_{}.csv".format( self.mode, self.model_name, self.iteration ) ) df_total_value.to_csv( "results/account_value_{}_{}_{}.csv".format( self.mode, self.model_name, self.iteration ), index=False, ) df_rewards.to_csv( "results/account_rewards_{}_{}_{}.csv".format( self.mode, self.model_name, self.iteration ), index=False, ) plt.plot(self.asset_memory, "r") plt.savefig( "results/account_value_{}_{}_{}.png".format( self.mode, self.model_name, self.iteration ), index=False, ) plt.close() # Add outputs to logger interface # logger.record("environment/portfolio_value", end_total_asset) # logger.record("environment/total_reward", tot_reward) # logger.record("environment/total_reward_pct", (tot_reward / (end_total_asset - tot_reward)) * 100) # logger.record("environment/total_cost", self.cost) # logger.record("environment/total_trades", self.trades) return self.state, self.reward, self.terminal, {} else: actions = actions * self.hmax # actions initially is scaled between 0 to 1 actions = actions.astype( int ) # convert into integer because we can't by fraction of shares if self.turbulence_threshold is not None: if self.turbulence >= self.turbulence_threshold: actions = np.array([-self.hmax] * self.stock_dim) begin_total_asset = self.state[0] + sum( np.array(self.state[1 : (self.stock_dim + 1)]) * np.array(self.state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)]) ) # print("begin_total_asset:{}".format(begin_total_asset)) argsort_actions = np.argsort(actions) sell_index = argsort_actions[: np.where(actions < 0)[0].shape[0]] buy_index = argsort_actions[::-1][: np.where(actions > 0)[0].shape[0]] for index in sell_index: # print(f"Num shares before: {self.state[index+self.stock_dim+1]}") # print(f'take sell action before : {actions[index]}') actions[index] = self._sell_stock(index, actions[index]) * (-1) # print(f'take sell action after : {actions[index]}') # print(f"Num shares after: {self.state[index+self.stock_dim+1]}") for index in buy_index: # print('take buy action: {}'.format(actions[index])) actions[index] = self._buy_stock(index, actions[index]) self.actions_memory.append(actions) # state: s -> s+1 self.day += 1 self.data = self.df.loc[self.day, :] if self.turbulence_threshold is not None: if len(self.df.tic.unique()) == 1: self.turbulence = self.data[self.risk_indicator_col] elif len(self.df.tic.unique()) > 1: self.turbulence = self.data[self.risk_indicator_col].values[0] self.state = self._update_state() end_total_asset = self.state[0] + sum( np.array(self.state[1 : (self.stock_dim + 1)]) * np.array(self.state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)]) ) self.asset_memory.append(end_total_asset) self.date_memory.append(self._get_date()) self.reward = end_total_asset - begin_total_asset self.rewards_memory.append(self.reward) self.reward = self.reward * self.reward_scaling return self.state, self.reward, self.terminal, {} def reset(self): # initiate state self.state = self._initiate_state() if self.initial: self.asset_memory = [self.initial_amount] else: previous_total_asset = self.previous_state[0] + sum( np.array(self.state[1 : (self.stock_dim + 1)]) * np.array( self.previous_state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)] ) ) self.asset_memory = [previous_total_asset] self.day = 0 self.data = self.df.loc[self.day, :] self.turbulence = 0 self.cost = 0 self.trades = 0 self.terminal = False # self.iteration=self.iteration self.rewards_memory = [] self.actions_memory = [] self.date_memory = [self._get_date()] self.episode += 1 return self.state def render(self, mode="human", close=False): return self.state def _initiate_state(self): if self.initial: # For Initial State if len(self.df.tic.unique()) > 1: # for multiple stock state = ( [self.initial_amount] + self.data.close.values.tolist() + [0] * self.stock_dim + sum( [ self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [], ) ) else: # for single stock state = ( [self.initial_amount] + [self.data.close] + [0] * self.stock_dim + sum([[self.data[tech]] for tech in self.tech_indicator_list], []) ) else: # Using Previous State if len(self.df.tic.unique()) > 1: # for multiple stock state = ( [self.previous_state[0]] + self.data.close.values.tolist() + self.previous_state[ (self.stock_dim + 1) : (self.stock_dim * 2 + 1) ] + sum( [ self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [], ) ) else: # for single stock state = ( [self.previous_state[0]] + [self.data.close] + self.previous_state[ (self.stock_dim + 1) : (self.stock_dim * 2 + 1) ] + sum([[self.data[tech]] for tech in self.tech_indicator_list], []) ) return state def _update_state(self): if len(self.df.tic.unique()) > 1: # for multiple stock state = ( [self.state[0]] + self.data.close.values.tolist() + list(self.state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)]) + sum( [ self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [], ) ) else: # for single stock state = ( [self.state[0]] + [self.data.close] + list(self.state[(self.stock_dim + 1) : (self.stock_dim * 2 + 1)]) + sum([[self.data[tech]] for tech in self.tech_indicator_list], []) ) return state def _get_date(self): if len(self.df.tic.unique()) > 1: date = self.data.date.unique()[0] else: date = self.data.date return date def save_asset_memory(self): date_list = self.date_memory asset_list = self.asset_memory # print(len(date_list)) # print(len(asset_list)) df_account_value = pd.DataFrame( {"date": date_list, "account_value": asset_list} ) return df_account_value def save_action_memory(self): if len(self.df.tic.unique()) > 1: # date and close price length must match actions length date_list = self.date_memory[:-1] df_date = pd.DataFrame(date_list) df_date.columns = ["date"] action_list = self.actions_memory df_actions = pd.DataFrame(action_list) df_actions.columns = self.data.tic.values df_actions.index = df_date.date # df_actions = pd.DataFrame({'date':date_list,'actions':action_list}) else: date_list = self.date_memory[:-1] action_list = self.actions_memory df_actions = pd.DataFrame({"date": date_list, "actions": action_list}) return df_actions def _seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] def get_sb_env(self): e = DummyVecEnv([lambda: self]) obs = e.reset() return e, obs ratio_list = ['OPM', 'NPM','ROA', 'ROE', 'cur_ratio', 'quick_ratio', 'cash_ratio', 'inv_turnover','acc_rec_turnover', 'acc_pay_turnover', 'debt_ratio', 'debt_to_equity', 'PE', 'PB', 'Div_yield'] stock_dimension = len(train.tic.unique()) state_space = 1 + 2*stock_dimension + len(ratio_list)*stock_dimension print(f"Stock Dimension: {stock_dimension}, State Space: {state_space}") # Parameters for the environment env_kwargs = { "hmax": 100, "initial_amount": 1000000, "buy_cost_pct": 0.001, "sell_cost_pct": 0.001, "state_space": state_space, "stock_dim": stock_dimension, "tech_indicator_list": ratio_list, "action_space": stock_dimension, "reward_scaling": 1e-4 } #Establish the training environment using StockTradingEnv() class e_train_gym = StockTradingEnv(df = train, **env_kwargs) ###Output _____no_output_____ ###Markdown Environment for Training ###Code env_train, _ = e_train_gym.get_sb_env() print(type(env_train)) ###Output <class 'stable_baselines3.common.vec_env.dummy_vec_env.DummyVecEnv'> ###Markdown Part 6: Implement DRL Algorithms* The implementation of the DRL algorithms are based on **OpenAI Baselines** and **Stable Baselines**. Stable Baselines is a fork of OpenAI Baselines, with a major structural refactoring, and code cleanups.* FinRL library includes fine-tuned standard DRL algorithms, such as DQN, DDPG,Multi-Agent DDPG, PPO, SAC, A2C and TD3. We also allow users todesign their own DRL algorithms by adapting these DRL algorithms. ###Code # Set up the agent using DRLAgent() class using the environment created in the previous part agent = DRLAgent(env = env_train) ###Output _____no_output_____ ###Markdown Model Training: 5 models, A2C DDPG, PPO, TD3, SAC Model 1: A2C ###Code agent = DRLAgent(env = env_train) model_a2c = agent.get_model("a2c") trained_a2c = agent.train_model(model=model_a2c, tb_log_name='a2c', total_timesteps=100000) ###Output ----------------------------------------- | time/ | | | fps | 85 | | iterations | 100 | | time_elapsed | 5 | | total_timesteps | 500 | | train/ | | | entropy_loss | -42.7 | | explained_variance | 0.00025 | | learning_rate | 0.0007 | | n_updates | 99 | | policy_loss | 72.9 | | reward | -0.0017323004 | | std | 1 | | value_loss | 5 | ----------------------------------------- ------------------------------------- | time/ | | | fps | 86 | | iterations | 200 | | time_elapsed | 11 | | total_timesteps | 1000 | | train/ | | | entropy_loss | -42.7 | | explained_variance | -1.19e-07 | | learning_rate | 0.0007 | | n_updates | 199 | | policy_loss | 33.4 | | reward | 1.0503633 | | std | 1 | | value_loss | 4.55 | ------------------------------------- ----------------------------------------- | time/ | | | fps | 87 | | iterations | 300 | | time_elapsed | 17 | | total_timesteps | 1500 | | train/ | | | entropy_loss | -42.7 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 299 | | policy_loss | 19.1 | | reward | -0.0030366322 | | std | 1.01 | | value_loss | 0.988 | ----------------------------------------- ------------------------------------- | time/ | | | fps | 87 | | iterations | 400 | | time_elapsed | 22 | | total_timesteps | 2000 | | train/ | | | entropy_loss | -42.8 | | explained_variance | 0 | | learning_rate | 0.0007 | | n_updates | 399 | | policy_loss | 29.4 | | reward | 1.5995015 | | std | 1.01 | | value_loss | 1.41 | ------------------------------------- ###Markdown Model 2: DDPG ###Code agent = DRLAgent(env = env_train) model_ddpg = agent.get_model("ddpg") trained_ddpg = agent.train_model(model=model_ddpg, tb_log_name='ddpg', total_timesteps=50000) ###Output _____no_output_____ ###Markdown Model 3: PPO ###Code agent = DRLAgent(env = env_train) PPO_PARAMS = { "n_steps": 2048, "ent_coef": 0.01, "learning_rate": 0.00025, "batch_size": 128, } model_ppo = agent.get_model("ppo",model_kwargs = PPO_PARAMS) trained_ppo = agent.train_model(model=model_ppo, tb_log_name='ppo', total_timesteps=50000) ###Output _____no_output_____ ###Markdown Model 4: TD3 ###Code agent = DRLAgent(env = env_train) TD3_PARAMS = {"batch_size": 100, "buffer_size": 1000000, "learning_rate": 0.001} model_td3 = agent.get_model("td3",model_kwargs = TD3_PARAMS) trained_td3 = agent.train_model(model=model_td3, tb_log_name='td3', total_timesteps=30000) ###Output _____no_output_____ ###Markdown Model 5: SAC ###Code agent = DRLAgent(env = env_train) SAC_PARAMS = { "batch_size": 128, "buffer_size": 1000000, "learning_rate": 0.0001, "learning_starts": 100, "ent_coef": "auto_0.1", } model_sac = agent.get_model("sac",model_kwargs = SAC_PARAMS) trained_sac = agent.train_model(model=model_sac, tb_log_name='sac', total_timesteps=80000) ###Output _____no_output_____ ###Markdown TradingAssume that we have $1,000,000 initial capital at 2019-01-01. We use the DDPG model to trade Dow jones 30 stocks. TradeDRL model needs to update periodically in order to take full advantage of the data, ideally we need to retrain our model yearly, quarterly, or monthly. We also need to tune the parameters along the way, in this notebook I only use the in-sample data from 2009-01 to 2018-12 to tune the parameters once, so there is some alpha decay here as the length of trade date extends. Numerous hyperparameters – e.g. the learning rate, the total number of samples to train on – influence the learning process and are usually determined by testing some variations. ###Code trade = data_split(processed_full, '2019-01-01','2021-01-01') e_trade_gym = StockTradingEnv(df = trade, **env_kwargs) # env_trade, obs_trade = e_trade_gym.get_sb_env() trade.head() df_account_value, df_actions = DRLAgent.DRL_prediction( model=trained_ddpg, environment = e_trade_gym) df_account_value.shape df_account_value.tail() df_actions.head() ###Output _____no_output_____ ###Markdown Part 7: Backtest Our StrategyBacktesting plays a key role in evaluating the performance of a trading strategy. Automated backtesting tool is preferred because it reduces the human error. We usually use the Quantopian pyfolio package to backtest our trading strategies. It is easy to use and consists of various individual plots that provide a comprehensive image of the performance of a trading strategy. 7.1 BackTestStatspass in df_account_value, this information is stored in env class ###Code print("==============Get Backtest Results===========") now = datetime.datetime.now().strftime('%Y%m%d-%Hh%M') perf_stats_all = backtest_stats(account_value=df_account_value) perf_stats_all = pd.DataFrame(perf_stats_all) perf_stats_all.to_csv("./"+config.RESULTS_DIR+"/perf_stats_all_"+now+'.csv') #baseline stats print("==============Get Baseline Stats===========") baseline_df = get_baseline( ticker="^DJI", start = '2019-01-01', end = '2021-01-01') stats = backtest_stats(baseline_df, value_col_name = 'close') ###Output _____no_output_____ ###Markdown 7.2 BackTestPlot ###Code print("==============Compare to DJIA===========") %matplotlib inline # S&P 500: ^GSPC # Dow Jones Index: ^DJI # NASDAQ 100: ^NDX backtest_plot(df_account_value, baseline_ticker = '^DJI', baseline_start = '2019-01-01', baseline_end = '2021-01-01') ###Output _____no_output_____
R5_VARmodel.ipynb
###Markdown ベクトル自己回帰モデル(Vector AutoRegression model)VARモデルは予測を出すだけでなく、時系列データの相互作用を調べることもできる。 VARモデルの使い時個人消費と個人収入の時系列データがあったとする。 - 個人消費が増えると個人収入は増える?- 個人収入が増えると個人消費は増える?このような状況をモデル化することができるのがVARモデル。便利。 - 複数の時系列データを統合してモデル化することができる- 変数間の影響を調べるのに役立つ VARモデルの構造消費と収入の関係を表してみると次のようになる。**同時点の相手のデータ** がモデルに含まれていないことに注意する。$$ y_t = c_1 + \phi_{11}y_{t-1} + \phi_{12}x_{t-1} + \varepsilon_{1t} \\ x_t = c_2 + \phi_{21}y_{t-1} + \phi_{22}x_{t-1} + \varepsilon_{2t} \\$$$$ 2001年の消費 = c_1 + \phi_{11}2000年の消費 + \phi_{12}2000年の収入 + ノイズ \\ 2001年の収入 = c_2 + \phi_{21}2000年の消費 + \phi_{12}2000年の収入 + ノイズ \\$$変数*n*個の*p*次の***VAR(p)*** は以下のように表される$$ \boldsymbol{y}_t = \boldsymbol{c} + \boldsymbol{\phi}_1\boldsymbol{y}_{t-1} + \cdots + \boldsymbol{\phi}_p\boldsymbol{y}_{t-p} + \boldsymbol{\varepsilon}_t$$- $\boldsymbol{y_t}$ : n x 1のベクトル- $\boldsymbol{c}$ : n x 1のベクトル- $\boldsymbol{\phi_i}$ : n x nのベクトル Granger因果性検定- 「相手がいることによって、予測精度が上がるかどうか」を検定する。 - 今、消費がいることによって、収入の予測が向上するかどうかを判断することができる。- 「相手のデータを使うことで、予測残差が有意に減少したか」を検定する。- Granger因果性検定は定常データを対処として分析した時にしか使うことができない。 インパルス応用関数- 消費が急に増えると収入はどういった影響があるのか、を定量的に評価する手法。- ある変数にショックを与えてみて、その影響がどれほど続くのかをシミュレートする。- 同時点のノイズは相関をもつことがあるので、これを相関をしている部分と独立な部分にわける(**直工かく乱項**)。- 残差を直行化してからインパルス応答関数を求める方法を**直行化インパルス応答関数** と呼ぶ。 ###Code # パッケージのインストール # install.packages("fpp") # install.packages("vars") # パッケージの読み込み library(urca) library(fpp) library(vars) library(ggplot2) library(ggfortify) library(repr) # グラフのオプション options(repr.plot.width=14, repr.plot.height=6) # 分析の対象 # fppパッケージのusconsumptionという多変量データを使用する。 # 4半期ごとのアメリカの消費・収入の増加率データ head(usconsumption) # 図示 autoplot(usconsumption, facets = T) # ADF検定をして単位根が無い(定常性)を確かめる # summary(ur.df(usconsumption[, "cunsumption"]), type="drift") # summary(ur.df(usconsumption[, "income"]), type="drift") autoplot( ccf( usconsumption[, "consumption"], usconsumption[, "income"], plot = F ) ) ###Output Warning message: "`mutate_()` is deprecated as of dplyr 0.7.0. Please use `mutate()` instead. See vignette('programming') for more help This warning is displayed once every 8 hours. Call `lifecycle::last_warnings()` to see where this warning was generated." ###Markdown RによるVARモデルVARモデルの次数もARIMAと同様にAICが最小となる次数を選択する。 varsパッケージのVARSelect関数を使用して次数を選択していく。 ###Code select_result <- VARselect(usconsumption, lag.max=10, type="const") select_result ###Output _____no_output_____ ###Markdown AICを見ると、5次が最も小さくなっているのがわかる ###Code select_result$selection[1] var_bestorder <- VAR( y = usconsumption, type = "const", p = select_result$selection[1] ) summary(var_bestorder) ###Output _____no_output_____ ###Markdown VARモデルによる予測 ###Code predict(var_bestorder, n.ahead=4) # 8時点先までの予測結果を図示する autoplot( predict(var_bestorder, n.ahead=8), ts.colour = 1, predict.colour = 1, predict.linetype = 'dashed' ) ###Output Warning message: "`filter_()` is deprecated as of dplyr 0.7.0. Please use `filter()` instead. See vignette('programming') for more help This warning is displayed once every 8 hours. Call `lifecycle::last_warnings()` to see where this warning was generated." ###Markdown RによるGranger因果性検定varsパッケージのcausality関数を使用する。 ###Code # 収入が消費に与える影響 causality(var_bestorder, cause = "income") ###Output _____no_output_____ ###Markdown - GrangerがGrander因果性検定の結果を表している。p-value=0.212であり、p値が0.05よりも大きいので、Grangerの因果があるとは言えない。 - InstantはGranderの瞬時因果性と呼ばれる因果の検定結果。 ###Code # 収入が消費に与える影響 causality(var_bestorder, cause = "consumption") ###Output _____no_output_____ ###Markdown Granderの因果性も瞬時因果性も、ともに有意となった。 Granderの因果は「消費⇒収入」方向に存在した。その逆は有意なGrangerの因果は見られなかった。 同時刻における消費と収入は、互いに影響を及ぼしあっていると示唆された。 Rによるインパルス応答関数Grangerの因果は「消費⇒収入」方向に存在した。 消費が急に増えると、収入はどれくらいのラグが開いた後に増減するのか、インパルス関数を求めることによって調べられる。 varsパッケージのirf関数を使用する。 ###Code # インパルス応答関数 irf_consumption <- irf( var_bestorder, impulse = "consumption", response = c("consumption", "income"), n.ahead = 12, boot = T ) plot(irf_consumption) ###Output _____no_output_____ ###Markdown - 3年後に消費が増えたとしても、収入は2年後くらいにはほぼその効果がなくなることがわかる。 相手のデータの影響をどれほど受けているかを調べる**(分散分解)** 。 ###Code plot(fevd(var_bestorder, n.ahead=12)) ###Output _____no_output_____
Adaptor_design/20200125-Design_amplifiers_DNA_100mer.ipynb
###Markdown assemble amplifiers ###Code new_primary_amplifiers, new_secondary_amplifiers = [], [] for _i in range(6): _p_seq_str = amplifier_table['Primary amplifier sequences '][_i] _s_seq_str = amplifier_table['Secondary amplifier sequences'][_i] # 1. extract 1st amplify seq _seqs, _cts = np.unique(_p_seq_str.split(' '), return_counts=True) _lens = np.array([len(_seq) for _seq in _seqs], dtype=np.int) _1st_seq = str(_seqs[np.where((_cts==5) * (_lens>10))[0]][0]) # 2. assemble primary amp seq _p_amp_seq = [ str(readout_sites[_i%3].reverse_complement().seq)] _p_amp_seq += [_1st_seq]*4 _p_amp_seq = ' '.join(_p_amp_seq) # generate the name _p_name = f"{readout_sites[_i%3].id}_4xbit{amplifier_table['Bit'][_i]}_primary" # assemble _p_amp = SeqRecord(Seq(_p_amp_seq), id=_p_name, description='', name='') new_primary_amplifiers.append(_p_amp) # 3. assemble secondary amp seq _s_amp_seq = [str(Seq(_1st_seq).reverse_complement()), 'AA'] _s_amp_seq += [str(readout_sites[_i%3].seq)]*4 _s_amp_seq = ' '.join(_s_amp_seq)[:-2] # generate the name _s_name = f"bit{amplifier_table['Bit'][_i]}rc_4x{readout_sites[_i%3].id}rc_secondary" # assemble _s_amp = SeqRecord(Seq(_s_amp_seq), id=_s_name, description='', name='') new_secondary_amplifiers.append(_s_amp) readout_folder = r'\\10.245.74.212\Chromatin_NAS_2\Libraries\Readouts' stv_readout_file = os.path.join(readout_folder, 'updated_Stvs.fasta') ndb_readout_file = os.path.join(readout_folder, 'updated_NDBs.fasta') kept_primary_amplifiers = library_tools.readouts.Screen_seqs_against_fasta(new_primary_amplifiers, stv_readout_file) kept_primary_amplifiers = library_tools.readouts.Screen_seqs_against_fasta(kept_primary_amplifiers, ndb_readout_file) kept_secondary_amplifiers = library_tools.readouts.Screen_seqs_against_fasta(new_secondary_amplifiers, stv_readout_file) kept_secondary_amplifiers = library_tools.readouts.Screen_seqs_against_fasta(kept_secondary_amplifiers, ndb_readout_file) print(new_primary_amplifiers[0].seq[:20].reverse_complement()) print(new_primary_amplifiers[0].seq) print(new_secondary_amplifiers[0].seq) kept_secondary_amplifiers[0].id print(kept_primary_amplifiers[0].seq) print(kept_secondary_amplifiers[0].seq) print(readout_sites[0].reverse_complement().seq) print(readout_sites[0].seq) # generate csv file to order in IDT import csv with open(os.path.join(amplifier_folder, '20210201-_primary_amplifiers-100mer.csv'), 'w') as csvfile: csvwriter = csv.writer(csvfile, delimiter=',', lineterminator='\n', quotechar='|', quoting=csv.QUOTE_MINIMAL) # write header _header = ['Name', 'Sequence', 'Scale', 'Purification'] csvwriter.writerow(_header) # write sequence for _amplifier in kept_primary_amplifiers: _info = [_amplifier.id, str(_amplifier.seq), '250nm', 'STD'] csvwriter.writerow(_info) # generate csv file to order in IDT import csv with open(os.path.join(amplifier_folder, '20210201-_secondary_amplifiers_100mer.csv'), 'w') as csvfile: csvwriter = csv.writer(csvfile, delimiter=',', lineterminator='\n', quotechar='|', quoting=csv.QUOTE_MINIMAL) # write header _header = ['Name', 'Sequence', 'Scale', 'Purification'] csvwriter.writerow(_header) # write sequence for _amplifier in kept_secondary_amplifiers: _info = [_amplifier.id, str(_amplifier.seq), '250nm', 'STD'] csvwriter.writerow(_info) ###Output _____no_output_____
alternative_models/Simulating_proportional_mapping_of_S_and_partial_synchronization_of_G1_model-tau=2.ipynb
###Markdown Outgrowth simulationsProportional mapping of S and partial synchronization of G1 model ###Code # required libraries import numpy as np from scipy import stats import pandas as pd import os def cellsCycling(ts,cells,rl): # a cell divides when it completes its cell cycle, # meaning that the time remaining to cell division ('tr') reachs zero. cells_that_divide = [] for cell_id in cells.keys(): # go through each cell if not cells[cell_id]['dormant']: cells[cell_id]['tc'] += 1 # updates cell cycle position (simulation time dependent) if cells[cell_id]['td'] == cells[cell_id]['tc']: # checks if cell cycle is completed # if the cell completes its cell cycle cells_that_divide.append(cell_id) else: if cells[cell_id]['recruited']: cells[cell_id]['awakeningDelay'] -= 1 if cells[cell_id]['awakeningDelay'] == 0: cells[cell_id]['dormant'] = False if cells[cell_id]['position'] >= rl and ts < tau and not cells[cell_id]['recruited']: # signal pulse at time tau in lambda microns cells = cycleShortening(cells,cell_id) # cell recruitment cells = updatePositions(cells,cells_that_divide) # cell pushing mechanism cells = cellsDivision(cells,cells_that_divide) # cell division for cell_id in cells.keys(): cells = phaseIdentifier(cells,cell_id) # identifies in which phase the cell is return cells def cycleShortening(cells,cell_id): # cell cycle shortening implementation cycle_position = cells[cell_id]['tc'] cycle_length = cells[cell_id]['td'] g1_length = int(cycle_length*long_g1_proportion) g1_reduction = int(cycle_length*g1_reduction_proportion) s_length = int(cycle_length*long_s_proportion) s_reduction = int(cycle_length*s_reduction_proportion) g2m_length = int(cycle_length*long_g2m_proportion) if 0 <= cycle_position <= g1_reduction: # cell in the G1 skip # G1 skipping part 1 (partial synchronization implementation part 1) cells[cell_id]['tc'] = cycle_position-cycle_position elif g1_reduction < cycle_position <= g1_length: # cell in the rest of G1 # G1 skipping part 2 (partial synchronization implementation part 2) cells[cell_id]['tc'] = cycle_position-g1_reduction elif g1_length < cycle_position <= g1_length+s_length: # cell in S phase # S mapping (proportional mapping implementation) cells[cell_id]['tc'] = int((cycle_position-g1_length)*((s_length-s_reduction)/s_length)+(g1_length-g1_reduction)) elif g1_length+s_length < cycle_position <= g1_length+s_length+g2m_length+2: # cell in G2/M cells[cell_id]['tc'] = cycle_position-g1_reduction-s_reduction cells[cell_id]['td'] = cycle_length-g1_reduction-s_reduction cells[cell_id]['recruited'] = True cells[cell_id]['dormant'] = False return cells def updatePositions(cells,cells_that_divide): # cell pushing mechanism implementation movements = {} for cell_id in cells.keys(): cell_movement = 0 for divided_cell in cells_that_divide: if cells[cell_id]['position'] >= cells[divided_cell]['position']: cell_movement += 1 movements[cell_id] = cell_movement for cell_id in cells.keys(): cells[cell_id]['position'] = cells[cell_id]['position']+movements[cell_id]*cell_diameter return cells def cellsDivision(cells,cells_that_divide): # creates new cells based on mothers properties for cell_id in cells_that_divide: cells[cell_id]['tc'] = 0 daughter_id = len(cells) if cells[cell_id]['recruited']: # daughters of recruited cells are also recruited cells cells[cell_id]['td'] = lognormal(short_cycle_mean,short_cycle_std) cells[daughter_id] = {'tc':0, 'td':lognormal(short_cycle_mean,short_cycle_std), 'recruited':True, 'position':cells[cell_id]['position']-cell_diameter, 'dormant':False, 'awakeningDelay':0, 'clone':cell_id} else: # daughters of non-recruited cells are also non-recruited cells cells[cell_id]['td'] = lognormal(long_cycle_mean,long_cycle_std) cells[daughter_id] = {'tc':0, 'td':lognormal(long_cycle_mean,long_cycle_std), 'recruited':False, 'position':cells[cell_id]['position']-cell_diameter, 'dormant':False, 'awakeningDelay':0, 'clone':cell_id} return cells def phaseIdentifier(cells,cell_id): # identifies the current cell phase cycle_position = cells[cell_id]['tc'] cycle_length = cells[cell_id]['td'] if cells[cell_id]['recruited']: g1_length = int(cycle_length*short_g1_proportion) s_length = int(cycle_length*short_s_proportion) g2m_length = int(cycle_length*short_g2m_proportion) if 0 <= cycle_position <= g1_length: # G1 phase cells[cell_id]['phase'] = "G1" elif g1_length < cycle_position <= g1_length+s_length: # S phase cells[cell_id]['phase'] = "S" elif g1_length+s_length < cycle_position <= g1_length+s_length+g2m_length+2: # G2/M phase cells[cell_id]['phase'] = "G2/M" else: g1_length = int(cycle_length*long_g1_proportion) s_length = int(cycle_length*long_s_proportion) g2m_length = int(cycle_length*long_g2m_proportion) if 0 <= cycle_position <= g1_length: # G1 phase cells[cell_id]['phase'] = "G1" elif g1_length < cycle_position <= g1_length+s_length: # S phase cells[cell_id]['phase'] = "S" elif g1_length+s_length < cycle_position <= g1_length+s_length+g2m_length+2: # G2/M phase cells[cell_id]['phase'] = "G2/M" if cells[cell_id]['dormant'] or cells[cell_id]['awakeningDelay'] > 0: cells[cell_id]['phase'] = "G0" return cells def tc_distribution(td): x = np.arange(0,td+1) fn = 2**(1-x*p/td) fn /= fn.sum() # normalization tc = np.random.choice(x, p=fn) return tc def lognormal(mu_x,dt_x,size=1,integer=True): # Draw one value (or more if size > 1) from a discretized lognormal distribution mu = np.log(mu_x**2/np.sqrt(mu_x**2+dt_x**2)) sigma = np.sqrt(np.log(1+dt_x**2/mu_x**2)) shape = sigma # Scipy's shape parameter scale = np.exp(mu) # Scipy's scale parameter distribution = stats.lognorm.rvs(scale=scale,s=shape,size=size) if len(distribution) == 1: if integer: return int(distribution[0]) else: return distribution[0] else: return distribution def run(): # simulation run # initial conditions cells = {} for cell_id in range(0,n0): cell_key = cell_id td = lognormal(long_cycle_mean,long_cycle_std) tc = tc_distribution(td) cells[cell_key] = {'td':td, # cell cycle length 'tc':tc, # cell cycle position 'position':(cell_key+1-n0)*cell_diameter, 'recruited':False, 'dormant':False, 'awakeningDelay':0, 'clone':n0-cell_key} g0_cells_number = int(n0*g0_prop) cells_df = pd.DataFrame.from_dict(cells, orient='index') g0_cells = cells_df[cells_df['tc'] <= long_g1].sample(g0_cells_number).index cells_df.loc[g0_cells,'dormant'] = True cells_df.loc[g0_cells,'awakeningDelay'] = awakeningDelay cells = cells_df.to_dict(orient='index') # time iteration simulation = {} # empty simulation output ts = 0 # simulation time = 0 for ts in range(0,steps): signal_pos = ts*(-l/tau) cells = cellsCycling(ts,cells,signal_pos) cells_df = pd.DataFrame.from_dict(cells, orient='index') simulation[ts] = cells_df return simulation # run parameters n0_mean,n0_std = 196,2 # n0 mean and standar deviation l_mean,l_std = 828,30 # lambda mean and standar deviation tau_mean = 2 steps = 1+24*8 # number of steps (in hours) np.random.seed(0) seeds_number = 100 # number of simulations # constants cell_diameter = 13.2 # cell diameter long_g1 = 152 # G1 length in long cycle long_s = 179 # S length in long cycle short_g1 = 22 # G1 length in short cycle short_s = 88 # S length in short cycle long_g2m = short_g2m = 9 # G2/M length in both, long and short cycle long_cycle_mean = long_g1+long_s+long_g2m # long cell cycle mean long_cycle_std = 32 # long cell cycle standar deviation short_cycle_mean = short_g1+short_s+short_g2m # short cell cycle mean short_cycle_std = 10 # short cell cycle standar deviation long_g1_proportion = long_g1/long_cycle_mean # G1 proportion in the long cell cycle long_s_proportion = long_s/long_cycle_mean # S proportion in the long cell cycle long_g2m_proportion = long_g2m/long_cycle_mean # G2/M proportion in the long cell cycle short_g1_proportion = short_g1/short_cycle_mean # G1 proportion in the short cell cycle short_s_proportion = short_s/short_cycle_mean # S proportion in the short cell cycle short_g2m_proportion = short_g2m/short_cycle_mean # G2/M proportion in the short cell cycle g1_reduction_proportion = (long_g1-short_g1)/long_cycle_mean # proportion of G1 reduction in the long cell cycle s_reduction_proportion = (long_s-short_s)/long_cycle_mean # proportion of S reduction in the long cell cycle g0_prop = 0.12 # G0 cells proportion awakeningDelay = 72 # G0 cells activation delay p = 2 # tc_distribution perturbation # directory name root = '../main/simulations/' model = 'outgrowth/' parameters = 'n0='+str(n0_mean)+'\n'+'l='+str(l_mean)+'\n'+'tau='+str(tau_mean)+'/' path = root+model+parameters if not os.path.isdir(path): os.makedirs(path) # simulations for seed in range(1,seeds_number+1): print('Runing seed number:',seed, end="\r", flush=True) # parameters drawing n0 = int(np.random.normal(n0_mean,n0_std)) l = int(np.random.normal(l_mean,l_std)) tau = tau_mean # simulation run simulation = run() # output file for each seed parameters = 'seed='+str(seed)+'_n0='+str(n0)+'_'+'l='+str(l)+'_'+'tau='+str(tau) data = pd.concat(simulation, names=['time','id']) outfile = open(path+parameters+'.csv', 'a') data.to_csv(outfile, sep=',') outfile.close() ###Output _____no_output_____
pan/COURSE 1 Neural Networks and Deep Learning/Week 4/Building your Deep Neural Network - Step by Step/v5.ipynb
###Markdown Building your Deep Neural Network: Step by StepWelcome to your week 4 assignment (part 1 of 2)! You have previously trained a 2-layer Neural Network (with a single hidden layer). This week, you will build a deep neural network, with as many layers as you want!- In this notebook, you will implement all the functions required to build a deep neural network.- In the next assignment, you will use these functions to build a deep neural network for image classification.**After this assignment you will be able to:**- Use non-linear units like ReLU to improve your model- Build a deeper neural network (with more than 1 hidden layer)- Implement an easy-to-use neural network class**Notation**:- Superscript $[l]$ denotes a quantity associated with the $l^{th}$ layer. - Example: $a^{[L]}$ is the $L^{th}$ layer activation. $W^{[L]}$ and $b^{[L]}$ are the $L^{th}$ layer parameters.- Superscript $(i)$ denotes a quantity associated with the $i^{th}$ example. - Example: $x^{(i)}$ is the $i^{th}$ training example.- Lowerscript $i$ denotes the $i^{th}$ entry of a vector. - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the $l^{th}$ layer's activations).Let's get started! 1 - PackagesLet's first import all the packages that you will need during this assignment. - [numpy](www.numpy.org) is the main package for scientific computing with Python.- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.- dnn_utils provides some necessary functions for this notebook.- testCases provides some test cases to assess the correctness of your functions- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work. Please don't change the seed. ###Code import numpy as np import h5py import matplotlib.pyplot as plt from testCases_v3 import * from dnn_utils_v2 import sigmoid, sigmoid_backward, relu, relu_backward %matplotlib inline plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' %load_ext autoreload %autoreload 2 np.random.seed(1) ###Output _____no_output_____ ###Markdown 2 - Outline of the AssignmentTo build your neural network, you will be implementing several "helper functions". These helper functions will be used in the next assignment to build a two-layer neural network and an L-layer neural network. Each small helper function you will implement will have detailed instructions that will walk you through the necessary steps. Here is an outline of this assignment, you will:- Initialize the parameters for a two-layer network and for an $L$-layer neural network.- Implement the forward propagation module (shown in purple in the figure below). - Complete the LINEAR part of a layer's forward propagation step (resulting in $Z^{[l]}$). - We give you the ACTIVATION function (relu/sigmoid). - Combine the previous two steps into a new [LINEAR->ACTIVATION] forward function. - Stack the [LINEAR->RELU] forward function L-1 time (for layers 1 through L-1) and add a [LINEAR->SIGMOID] at the end (for the final layer $L$). This gives you a new L_model_forward function.- Compute the loss.- Implement the backward propagation module (denoted in red in the figure below). - Complete the LINEAR part of a layer's backward propagation step. - We give you the gradient of the ACTIVATE function (relu_backward/sigmoid_backward) - Combine the previous two steps into a new [LINEAR->ACTIVATION] backward function. - Stack [LINEAR->RELU] backward L-1 times and add [LINEAR->SIGMOID] backward in a new L_model_backward function- Finally update the parameters. **Figure 1****Note** that for every forward function, there is a corresponding backward function. That is why at every step of your forward module you will be storing some values in a cache. The cached values are useful for computing gradients. In the backpropagation module you will then use the cache to calculate the gradients. This assignment will show you exactly how to carry out each of these steps. 3 - InitializationYou will write two helper functions that will initialize the parameters for your model. The first function will be used to initialize parameters for a two layer model. The second one will generalize this initialization process to $L$ layers. 3.1 - 2-layer Neural Network**Exercise**: Create and initialize the parameters of the 2-layer neural network.**Instructions**:- The model's structure is: *LINEAR -> RELU -> LINEAR -> SIGMOID*. - Use random initialization for the weight matrices. Use `np.random.randn(shape)*0.01` with the correct shape.- Use zero initialization for the biases. Use `np.zeros(shape)`. ###Code # GRADED FUNCTION: initialize_parameters def initialize_parameters(n_x, n_h, n_y): """ Argument: n_x -- size of the input layer n_h -- size of the hidden layer n_y -- size of the output layer Returns: parameters -- python dictionary containing your parameters: W1 -- weight matrix of shape (n_h, n_x) b1 -- bias vector of shape (n_h, 1) W2 -- weight matrix of shape (n_y, n_h) b2 -- bias vector of shape (n_y, 1) """ np.random.seed(1) ### START CODE HERE ### (≈ 4 lines of code) W1 = np.random.randn(n_h,n_x)*0.01 b1 = np.zeros((n_h,1)) W2 = np.random.randn(n_y,n_h)*0.01 b2 = np.zeros((n_y,1)) ### END CODE HERE ### assert(W1.shape == (n_h, n_x)) assert(b1.shape == (n_h, 1)) assert(W2.shape == (n_y, n_h)) assert(b2.shape == (n_y, 1)) parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2} return parameters parameters = initialize_parameters(3,2,1) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) ###Output W1 = [[ 0.01624345 -0.00611756 -0.00528172] [-0.01072969 0.00865408 -0.02301539]] b1 = [[0.] [0.]] W2 = [[ 0.01744812 -0.00761207]] b2 = [[0.]] ###Markdown **Expected output**: **W1** [[ 0.01624345 -0.00611756 -0.00528172] [-0.01072969 0.00865408 -0.02301539]] **b1** [[ 0.] [ 0.]] **W2** [[ 0.01744812 -0.00761207]] **b2** [[ 0.]] 3.2 - L-layer Neural NetworkThe initialization for a deeper L-layer neural network is more complicated because there are many more weight matrices and bias vectors. When completing the `initialize_parameters_deep`, you should make sure that your dimensions match between each layer. Recall that $n^{[l]}$ is the number of units in layer $l$. Thus for example if the size of our input $X$ is $(12288, 209)$ (with $m=209$ examples) then: **Shape of W** **Shape of b** **Activation** **Shape of Activation** **Layer 1** $(n^{[1]},12288)$ $(n^{[1]},1)$ $Z^{[1]} = W^{[1]} X + b^{[1]} $ $(n^{[1]},209)$ **Layer 2** $(n^{[2]}, n^{[1]})$ $(n^{[2]},1)$ $Z^{[2]} = W^{[2]} A^{[1]} + b^{[2]}$ $(n^{[2]}, 209)$ $\vdots$ $\vdots$ $\vdots$ $\vdots$ $\vdots$ **Layer L-1** $(n^{[L-1]}, n^{[L-2]})$ $(n^{[L-1]}, 1)$ $Z^{[L-1]} = W^{[L-1]} A^{[L-2]} + b^{[L-1]}$ $(n^{[L-1]}, 209)$ **Layer L** $(n^{[L]}, n^{[L-1]})$ $(n^{[L]}, 1)$ $Z^{[L]} = W^{[L]} A^{[L-1]} + b^{[L]}$ $(n^{[L]}, 209)$ Remember that when we compute $W X + b$ in python, it carries out broadcasting. For example, if: $$ W = \begin{bmatrix} j & k & l\\ m & n & o \\ p & q & r \end{bmatrix}\;\;\; X = \begin{bmatrix} a & b & c\\ d & e & f \\ g & h & i \end{bmatrix} \;\;\; b =\begin{bmatrix} s \\ t \\ u\end{bmatrix}\tag{2}$$Then $WX + b$ will be:$$ WX + b = \begin{bmatrix} (ja + kd + lg) + s & (jb + ke + lh) + s & (jc + kf + li)+ s\\ (ma + nd + og) + t & (mb + ne + oh) + t & (mc + nf + oi) + t\\ (pa + qd + rg) + u & (pb + qe + rh) + u & (pc + qf + ri)+ u\end{bmatrix}\tag{3} $$ **Exercise**: Implement initialization for an L-layer Neural Network. **Instructions**:- The model's structure is *[LINEAR -> RELU] $ \times$ (L-1) -> LINEAR -> SIGMOID*. I.e., it has $L-1$ layers using a ReLU activation function followed by an output layer with a sigmoid activation function.- Use random initialization for the weight matrices. Use `np.random.rand(shape) * 0.01`.- Use zeros initialization for the biases. Use `np.zeros(shape)`.- We will store $n^{[l]}$, the number of units in different layers, in a variable `layer_dims`. For example, the `layer_dims` for the "Planar Data classification model" from last week would have been [2,4,1]: There were two inputs, one hidden layer with 4 hidden units, and an output layer with 1 output unit. Thus means `W1`'s shape was (4,2), `b1` was (4,1), `W2` was (1,4) and `b2` was (1,1). Now you will generalize this to $L$ layers! - Here is the implementation for $L=1$ (one layer neural network). It should inspire you to implement the general case (L-layer neural network).```python if L == 1: parameters["W" + str(L)] = np.random.randn(layer_dims[1], layer_dims[0]) * 0.01 parameters["b" + str(L)] = np.zeros((layer_dims[1], 1))``` ###Code # GRADED FUNCTION: initialize_parameters_deep def initialize_parameters_deep(layer_dims): """ Arguments: layer_dims -- python array (list) containing the dimensions of each layer in our network Returns: parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL": Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1]) bl -- bias vector of shape (layer_dims[l], 1) """ np.random.seed(3) parameters = {} L = len(layer_dims) # number of layers in the network for l in range(1, L): ### START CODE HERE ### (≈ 2 lines of code) parameters['W' + str(l)] = np.random.randn(layer_dims[l],layer_dims[l-1])*0.01 parameters['b' + str(l)] = np.zeros((layer_dims[l],1)) ### END CODE HERE ### assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1])) assert(parameters['b' + str(l)].shape == (layer_dims[l], 1)) return parameters parameters = initialize_parameters_deep([5,4,3]) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) ###Output W1 = [[ 0.01788628 0.0043651 0.00096497 -0.01863493 -0.00277388] [-0.00354759 -0.00082741 -0.00627001 -0.00043818 -0.00477218] [-0.01313865 0.00884622 0.00881318 0.01709573 0.00050034] [-0.00404677 -0.0054536 -0.01546477 0.00982367 -0.01101068]] b1 = [[0.] [0.] [0.] [0.]] W2 = [[-0.01185047 -0.0020565 0.01486148 0.00236716] [-0.01023785 -0.00712993 0.00625245 -0.00160513] [-0.00768836 -0.00230031 0.00745056 0.01976111]] b2 = [[0.] [0.] [0.]] ###Markdown **Expected output**: **W1** [[ 0.01788628 0.0043651 0.00096497 -0.01863493 -0.00277388] [-0.00354759 -0.00082741 -0.00627001 -0.00043818 -0.00477218] [-0.01313865 0.00884622 0.00881318 0.01709573 0.00050034] [-0.00404677 -0.0054536 -0.01546477 0.00982367 -0.01101068]] **b1** [[ 0.] [ 0.] [ 0.] [ 0.]] **W2** [[-0.01185047 -0.0020565 0.01486148 0.00236716] [-0.01023785 -0.00712993 0.00625245 -0.00160513] [-0.00768836 -0.00230031 0.00745056 0.01976111]] **b2** [[ 0.] [ 0.] [ 0.]] 4 - Forward propagation module 4.1 - Linear Forward Now that you have initialized your parameters, you will do the forward propagation module. You will start by implementing some basic functions that you will use later when implementing the model. You will complete three functions in this order:- LINEAR- LINEAR -> ACTIVATION where ACTIVATION will be either ReLU or Sigmoid. - [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID (whole model)The linear forward module (vectorized over all the examples) computes the following equations:$$Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}\tag{4}$$where $A^{[0]} = X$. **Exercise**: Build the linear part of forward propagation.**Reminder**:The mathematical representation of this unit is $Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}$. You may also find `np.dot()` useful. If your dimensions don't match, printing `W.shape` may help. ###Code # GRADED FUNCTION: linear_forward def linear_forward(A, W, b): """ Implement the linear part of a layer's forward propagation. Arguments: A -- activations from previous layer (or input data): (size of previous layer, number of examples) W -- weights matrix: numpy array of shape (size of current layer, size of previous layer) b -- bias vector, numpy array of shape (size of the current layer, 1) Returns: Z -- the input of the activation function, also called pre-activation parameter cache -- a python dictionary containing "A", "W" and "b" ; stored for computing the backward pass efficiently """ ### START CODE HERE ### (≈ 1 line of code) Z = np.dot(W,A)+b ### END CODE HERE ### assert(Z.shape == (W.shape[0], A.shape[1])) cache = (A, W, b) return Z, cache A, W, b = linear_forward_test_case() Z, linear_cache = linear_forward(A, W, b) print("Z = " + str(Z)) ###Output Z = [[ 3.26295337 -1.23429987]] ###Markdown **Expected output**: **Z** [[ 3.26295337 -1.23429987]] 4.2 - Linear-Activation ForwardIn this notebook, you will use two activation functions:- **Sigmoid**: $\sigma(Z) = \sigma(W A + b) = \frac{1}{ 1 + e^{-(W A + b)}}$. We have provided you with the `sigmoid` function. This function returns **two** items: the activation value "`a`" and a "`cache`" that contains "`Z`" (it's what we will feed in to the corresponding backward function). To use it you could just call: ``` pythonA, activation_cache = sigmoid(Z)```- **ReLU**: The mathematical formula for ReLu is $A = RELU(Z) = max(0, Z)$. We have provided you with the `relu` function. This function returns **two** items: the activation value "`A`" and a "`cache`" that contains "`Z`" (it's what we will feed in to the corresponding backward function). To use it you could just call:``` pythonA, activation_cache = relu(Z)``` For more convenience, you are going to group two functions (Linear and Activation) into one function (LINEAR->ACTIVATION). Hence, you will implement a function that does the LINEAR forward step followed by an ACTIVATION forward step.**Exercise**: Implement the forward propagation of the *LINEAR->ACTIVATION* layer. Mathematical relation is: $A^{[l]} = g(Z^{[l]}) = g(W^{[l]}A^{[l-1]} +b^{[l]})$ where the activation "g" can be sigmoid() or relu(). Use linear_forward() and the correct activation function. ###Code # GRADED FUNCTION: linear_activation_forward def linear_activation_forward(A_prev, W, b, activation): """ Implement the forward propagation for the LINEAR->ACTIVATION layer Arguments: A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples) W -- weights matrix: numpy array of shape (size of current layer, size of previous layer) b -- bias vector, numpy array of shape (size of the current layer, 1) activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu" Returns: A -- the output of the activation function, also called the post-activation value cache -- a python dictionary containing "linear_cache" and "activation_cache"; stored for computing the backward pass efficiently """ if activation == "sigmoid": # Inputs: "A_prev, W, b". Outputs: "A, activation_cache". ### START CODE HERE ### (≈ 2 lines of code) Z, linear_cache = linear_forward(A_prev,W,b) A, activation_cache = sigmoid(Z) ### END CODE HERE ### elif activation == "relu": # Inputs: "A_prev, W, b". Outputs: "A, activation_cache". ### START CODE HERE ### (≈ 2 lines of code) Z, linear_cache = linear_forward(A_prev,W,b) A, activation_cache = relu(Z) ### END CODE HERE ### assert (A.shape == (W.shape[0], A_prev.shape[1])) cache = (linear_cache, activation_cache) return A, cache A_prev, W, b = linear_activation_forward_test_case() A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "sigmoid") print("With sigmoid: A = " + str(A)) A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "relu") print("With ReLU: A = " + str(A)) ###Output With sigmoid: A = [[0.96890023 0.11013289]] With ReLU: A = [[3.43896131 0. ]] ###Markdown **Expected output**: **With sigmoid: A ** [[ 0.96890023 0.11013289]] **With ReLU: A ** [[ 3.43896131 0. ]] **Note**: In deep learning, the "[LINEAR->ACTIVATION]" computation is counted as a single layer in the neural network, not two layers. d) L-Layer Model For even more convenience when implementing the $L$-layer Neural Net, you will need a function that replicates the previous one (`linear_activation_forward` with RELU) $L-1$ times, then follows that with one `linear_activation_forward` with SIGMOID. **Figure 2** : *[LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID* model**Exercise**: Implement the forward propagation of the above model.**Instruction**: In the code below, the variable `AL` will denote $A^{[L]} = \sigma(Z^{[L]}) = \sigma(W^{[L]} A^{[L-1]} + b^{[L]})$. (This is sometimes also called `Yhat`, i.e., this is $\hat{Y}$.) **Tips**:- Use the functions you had previously written - Use a for loop to replicate [LINEAR->RELU] (L-1) times- Don't forget to keep track of the caches in the "caches" list. To add a new value `c` to a `list`, you can use `list.append(c)`. ###Code # GRADED FUNCTION: L_model_forward def L_model_forward(X, parameters): """ Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation Arguments: X -- data, numpy array of shape (input size, number of examples) parameters -- output of initialize_parameters_deep() Returns: AL -- last post-activation value caches -- list of caches containing: every cache of linear_relu_forward() (there are L-1 of them, indexed from 0 to L-2) the cache of linear_sigmoid_forward() (there is one, indexed L-1) """ caches = [] A = X L = len(parameters) // 2 # number of layers in the neural network # Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list. for l in range(1, L): A_prev = A ### START CODE HERE ### (≈ 2 lines of code) A, cache = linear_activation_forward(A_prev, parameters["W"+str(l)], parameters["b"+str(l)], activation = "relu") caches.append(cache) ### END CODE HERE ### # Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list. ### START CODE HERE ### (≈ 2 lines of code) AL, cache = linear_activation_forward(A, parameters["W"+str(L)], parameters["b"+str(L)], activation = "sigmoid") caches.append(cache) ### END CODE HERE ### assert(AL.shape == (1,X.shape[1])) return AL, caches X, parameters = L_model_forward_test_case_2hidden() AL, caches = L_model_forward(X, parameters) print("AL = " + str(AL)) print("Length of caches list = " + str(len(caches))) ###Output AL = [[0.03921668 0.70498921 0.19734387 0.04728177]] Length of caches list = 3 ###Markdown **AL** [[ 0.03921668 0.70498921 0.19734387 0.04728177]] **Length of caches list ** 3 Great! Now you have a full forward propagation that takes the input X and outputs a row vector $A^{[L]}$ containing your predictions. It also records all intermediate values in "caches". Using $A^{[L]}$, you can compute the cost of your predictions. 5 - Cost functionNow you will implement forward and backward propagation. You need to compute the cost, because you want to check if your model is actually learning.**Exercise**: Compute the cross-entropy cost $J$, using the following formula: $$-\frac{1}{m} \sum\limits_{i = 1}^{m} (y^{(i)}\log\left(a^{[L] (i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right)) \tag{7}$$ ###Code # GRADED FUNCTION: compute_cost def compute_cost(AL, Y): """ Implement the cost function defined by equation (7). Arguments: AL -- probability vector corresponding to your label predictions, shape (1, number of examples) Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples) Returns: cost -- cross-entropy cost """ m = Y.shape[1] # Compute loss from aL and y. ### START CODE HERE ### (≈ 1 lines of code) cost = -np.sum(Y*np.log(AL)+(1-Y)*np.log(1-AL))/m ### END CODE HERE ### cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17). assert(cost.shape == ()) return cost Y, AL = compute_cost_test_case() print("cost = " + str(compute_cost(AL, Y))) ###Output cost = 0.414931599615397 ###Markdown **Expected Output**: **cost** 0.41493159961539694 6 - Backward propagation moduleJust like with forward propagation, you will implement helper functions for backpropagation. Remember that back propagation is used to calculate the gradient of the loss function with respect to the parameters. **Reminder**: **Figure 3** : Forward and Backward propagation for *LINEAR->RELU->LINEAR->SIGMOID* *The purple blocks represent the forward propagation, and the red blocks represent the backward propagation.* <!-- For those of you who are expert in calculus (you don't need to be to do this assignment), the chain rule of calculus can be used to derive the derivative of the loss $\mathcal{L}$ with respect to $z^{[1]}$ in a 2-layer network as follows:$$\frac{d \mathcal{L}(a^{[2]},y)}{{dz^{[1]}}} = \frac{d\mathcal{L}(a^{[2]},y)}{{da^{[2]}}}\frac{{da^{[2]}}}{{dz^{[2]}}}\frac{{dz^{[2]}}}{{da^{[1]}}}\frac{{da^{[1]}}}{{dz^{[1]}}} \tag{8} $$In order to calculate the gradient $dW^{[1]} = \frac{\partial L}{\partial W^{[1]}}$, you use the previous chain rule and you do $dW^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial W^{[1]}}$. During the backpropagation, at each step you multiply your current gradient by the gradient corresponding to the specific layer to get the gradient you wanted.Equivalently, in order to calculate the gradient $db^{[1]} = \frac{\partial L}{\partial b^{[1]}}$, you use the previous chain rule and you do $db^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial b^{[1]}}$.This is why we talk about **backpropagation**.!-->Now, similar to forward propagation, you are going to build the backward propagation in three steps:- LINEAR backward- LINEAR -> ACTIVATION backward where ACTIVATION computes the derivative of either the ReLU or sigmoid activation- [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID backward (whole model) 6.1 - Linear backwardFor layer $l$, the linear part is: $Z^{[l]} = W^{[l]} A^{[l-1]} + b^{[l]}$ (followed by an activation).Suppose you have already calculated the derivative $dZ^{[l]} = \frac{\partial \mathcal{L} }{\partial Z^{[l]}}$. You want to get $(dW^{[l]}, db^{[l]} dA^{[l-1]})$. **Figure 4** The three outputs $(dW^{[l]}, db^{[l]}, dA^{[l]})$ are computed using the input $dZ^{[l]}$.Here are the formulas you need:$$ dW^{[l]} = \frac{\partial \mathcal{L} }{\partial W^{[l]}} = \frac{1}{m} dZ^{[l]} A^{[l-1] T} \tag{8}$$$$ db^{[l]} = \frac{\partial \mathcal{L} }{\partial b^{[l]}} = \frac{1}{m} \sum_{i = 1}^{m} dZ^{[l](i)}\tag{9}$$$$ dA^{[l-1]} = \frac{\partial \mathcal{L} }{\partial A^{[l-1]}} = W^{[l] T} dZ^{[l]} \tag{10}$$ **Exercise**: Use the 3 formulas above to implement linear_backward(). ###Code # GRADED FUNCTION: linear_backward def linear_backward(dZ, cache): """ Implement the linear portion of backward propagation for a single layer (layer l) Arguments: dZ -- Gradient of the cost with respect to the linear output (of current layer l) cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer Returns: dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev dW -- Gradient of the cost with respect to W (current layer l), same shape as W db -- Gradient of the cost with respect to b (current layer l), same shape as b """ A_prev, W, b = cache m = A_prev.shape[1] ### START CODE HERE ### (≈ 3 lines of code) dW = np.dot(dZ,A_prev.T)/m db = np.sum(dZ,axis=1,keepdims=True)/m dA_prev = np.dot(W.T,dZ) ### END CODE HERE ### assert (dA_prev.shape == A_prev.shape) assert (dW.shape == W.shape) assert (db.shape == b.shape) return dA_prev, dW, db # Set up some test inputs dZ, linear_cache = linear_backward_test_case() dA_prev, dW, db = linear_backward(dZ, linear_cache) print ("dA_prev = "+ str(dA_prev)) print ("dW = " + str(dW)) print ("db = " + str(db)) ###Output dA_prev = [[ 0.51822968 -0.19517421] [-0.40506361 0.15255393] [ 2.37496825 -0.89445391]] dW = [[-0.10076895 1.40685096 1.64992505]] db = [[0.50629448]] ###Markdown **Expected Output**: **dA_prev** [[ 0.51822968 -0.19517421] [-0.40506361 0.15255393] [ 2.37496825 -0.89445391]] **dW** [[-0.10076895 1.40685096 1.64992505]] **db** [[ 0.50629448]] 6.2 - Linear-Activation backwardNext, you will create a function that merges the two helper functions: **`linear_backward`** and the backward step for the activation **`linear_activation_backward`**. To help you implement `linear_activation_backward`, we provided two backward functions:- **`sigmoid_backward`**: Implements the backward propagation for SIGMOID unit. You can call it as follows:```pythondZ = sigmoid_backward(dA, activation_cache)```- **`relu_backward`**: Implements the backward propagation for RELU unit. You can call it as follows:```pythondZ = relu_backward(dA, activation_cache)```If $g(.)$ is the activation function, `sigmoid_backward` and `relu_backward` compute $$dZ^{[l]} = dA^{[l]} * g'(Z^{[l]}) \tag{11}$$. **Exercise**: Implement the backpropagation for the *LINEAR->ACTIVATION* layer. ###Code # GRADED FUNCTION: linear_activation_backward def linear_activation_backward(dA, cache, activation): """ Implement the backward propagation for the LINEAR->ACTIVATION layer. Arguments: dA -- post-activation gradient for current layer l cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu" Returns: dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev dW -- Gradient of the cost with respect to W (current layer l), same shape as W db -- Gradient of the cost with respect to b (current layer l), same shape as b """ linear_cache, activation_cache = cache if activation == "relu": ### START CODE HERE ### (≈ 2 lines of code) #dZ = relu_backward(dA,cache) #❌ dZ = relu_backward(dA,activation_cache) #dA_prev, dW, db = linear_backward(dZ,cache) #❌ dA_prev, dW, db = linear_backward(dZ,linear_cache) ### END CODE HERE ### elif activation == "sigmoid": ### START CODE HERE ### (≈ 2 lines of code) #dZ = sigmoid_backward(dA,cache) #❌ dZ = sigmoid_backward(dA,activation_cache) #dA_prev, dW, db = linear_backward(dZ,cache) #❌ dA_prev, dW, db = linear_backward(dZ,linear_cache) ### END CODE HERE ### return dA_prev, dW, db AL, linear_activation_cache = linear_activation_backward_test_case() dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "sigmoid") print ("sigmoid:") print ("dA_prev = "+ str(dA_prev)) print ("dW = " + str(dW)) print ("db = " + str(db) + "\n") dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "relu") print ("relu:") print ("dA_prev = "+ str(dA_prev)) print ("dW = " + str(dW)) print ("db = " + str(db)) ###Output sigmoid: dA_prev = [[ 0.11017994 0.01105339] [ 0.09466817 0.00949723] [-0.05743092 -0.00576154]] dW = [[ 0.10266786 0.09778551 -0.01968084]] db = [[-0.05729622]] relu: dA_prev = [[ 0.44090989 0. ] [ 0.37883606 0. ] [-0.2298228 0. ]] dW = [[ 0.44513824 0.37371418 -0.10478989]] db = [[-0.20837892]] ###Markdown **Expected output with sigmoid:** dA_prev [[ 0.11017994 0.01105339] [ 0.09466817 0.00949723] [-0.05743092 -0.00576154]] dW [[ 0.10266786 0.09778551 -0.01968084]] db [[-0.05729622]] **Expected output with relu:** dA_prev [[ 0.44090989 0. ] [ 0.37883606 0. ] [-0.2298228 0. ]] dW [[ 0.44513824 0.37371418 -0.10478989]] db [[-0.20837892]] 6.3 - L-Model Backward Now you will implement the backward function for the whole network. Recall that when you implemented the `L_model_forward` function, at each iteration, you stored a cache which contains (X,W,b, and z). In the back propagation module, you will use those variables to compute the gradients. Therefore, in the `L_model_backward` function, you will iterate through all the hidden layers backward, starting from layer $L$. On each step, you will use the cached values for layer $l$ to backpropagate through layer $l$. Figure 5 below shows the backward pass. **Figure 5** : Backward pass ** Initializing backpropagation**:To backpropagate through this network, we know that the output is, $A^{[L]} = \sigma(Z^{[L]})$. Your code thus needs to compute `dAL` $= \frac{\partial \mathcal{L}}{\partial A^{[L]}}$.To do so, use this formula (derived using calculus which you don't need in-depth knowledge of):```pythondAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) derivative of cost with respect to AL```You can then use this post-activation gradient `dAL` to keep going backward. As seen in Figure 5, you can now feed in `dAL` into the LINEAR->SIGMOID backward function you implemented (which will use the cached values stored by the L_model_forward function). After that, you will have to use a `for` loop to iterate through all the other layers using the LINEAR->RELU backward function. You should store each dA, dW, and db in the grads dictionary. To do so, use this formula : $$grads["dW" + str(l)] = dW^{[l]}\tag{15} $$For example, for $l=3$ this would store $dW^{[l]}$ in `grads["dW3"]`.**Exercise**: Implement backpropagation for the *[LINEAR->RELU] $\times$ (L-1) -> LINEAR -> SIGMOID* model. ###Code # GRADED FUNCTION: L_model_backward def L_model_backward(AL, Y, caches): """ Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group Arguments: AL -- probability vector, output of the forward propagation (L_model_forward()) Y -- true "label" vector (containing 0 if non-cat, 1 if cat) caches -- list of caches containing: every cache of linear_activation_forward() with "relu" (it's caches[l], for l in range(L-1) i.e l = 0...L-2) the cache of linear_activation_forward() with "sigmoid" (it's caches[L-1]) Returns: grads -- A dictionary with the gradients grads["dA" + str(l)] = ... grads["dW" + str(l)] = ... grads["db" + str(l)] = ... """ grads = {} L = len(caches) # the number of layers m = AL.shape[1] Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL # Initializing the backpropagation ### START CODE HERE ### (1 line of code) dAL = -np.divide(Y,AL)-np.divide(1-Y,1-AL) ### END CODE HERE ### # Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "AL, Y, caches". Outputs: "grads["dAL"], grads["dWL"], grads["dbL"] ### START CODE HERE ### (approx. 2 lines) #current_cache = caches[L-1] #❌ current_cache = caches[-1] grads["dA" + str(L)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL,current_cache,activation="sigmoid") ### END CODE HERE ### for l in reversed(range(L-1)): # lth layer: (RELU -> LINEAR) gradients. # Inputs: "grads["dA" + str(l + 2)], caches". Outputs: "grads["dA" + str(l + 1)] , grads["dW" + str(l + 1)] , grads["db" + str(l + 1)] ### START CODE HERE ### (approx. 5 lines) #current_cache = caches[l+1] #❌ current_cache = caches[l] #dA_prev_temp, dW_temp, db_temp = linear_activation_backward(dAL,current_cache,activation="relu") #❌ dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads["dA" + str(l + 2)],current_cache,activation="relu") #❌ grads["dA" + str(l + 1)] = dA_prev_temp grads["dW" + str(l + 1)] = dW_temp grads["db" + str(l + 1)] = db_temp ### END CODE HERE ### return grads AL, Y_assess, caches = L_model_backward_test_case() grads = L_model_backward(AL, Y_assess, caches) print_grads(grads) ###Output dW1 = [[-0.41010002 -0.07807203 -0.13798444 -0.10502167] [ 0. 0. 0. 0. ] [-0.05283652 -0.01005865 -0.01777766 -0.0135308 ]] db1 = [[0.22007063] [0. ] [0.02835349]] dA1 = [[ 0.12913162 0.44014127] [-0.14175655 -0.48317296] [ 0.01663708 0.05670698]] ###Markdown **Expected Output** dW1 [[ 0.41010002 0.07807203 0.13798444 0.10502167] [ 0. 0. 0. 0. ] [ 0.05283652 0.01005865 0.01777766 0.0135308 ]] db1 [[-0.22007063] [ 0. ] [-0.02835349]] dA1 [[ 0.12913162 -0.44014127] [-0.14175655 0.48317296] [ 0.01663708 -0.05670698]] 6.4 - Update ParametersIn this section you will update the parameters of the model, using gradient descent: $$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{16}$$$$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{17}$$where $\alpha$ is the learning rate. After computing the updated parameters, store them in the parameters dictionary. **Exercise**: Implement `update_parameters()` to update your parameters using gradient descent.**Instructions**:Update parameters using gradient descent on every $W^{[l]}$ and $b^{[l]}$ for $l = 1, 2, ..., L$. ###Code # GRADED FUNCTION: update_parameters def update_parameters(parameters, grads, learning_rate): """ Update parameters using gradient descent Arguments: parameters -- python dictionary containing your parameters grads -- python dictionary containing your gradients, output of L_model_backward Returns: parameters -- python dictionary containing your updated parameters parameters["W" + str(l)] = ... parameters["b" + str(l)] = ... """ L = len(parameters) // 2 # number of layers in the neural network # Update rule for each parameter. Use a for loop. ### START CODE HERE ### (≈ 3 lines of code) for l in range(L): parameters["W"+str(l+1)]=parameters["W"+str(l+1)]-learning_rate*grads["dW"+str(l+1)] parameters["b"+str(l+1)]=parameters["b"+str(l+1)]-learning_rate*grads["db"+str(l+1)] ### END CODE HERE ### return parameters parameters, grads = update_parameters_test_case() parameters = update_parameters(parameters, grads, 0.1) print ("W1 = "+ str(parameters["W1"])) print ("b1 = "+ str(parameters["b1"])) print ("W2 = "+ str(parameters["W2"])) print ("b2 = "+ str(parameters["b2"])) ###Output W1 = [[-0.59562069 -0.09991781 -2.14584584 1.82662008] [-1.76569676 -0.80627147 0.51115557 -1.18258802] [-1.0535704 -0.86128581 0.68284052 2.20374577]] b1 = [[-0.04659241] [-1.28888275] [ 0.53405496]] W2 = [[-0.55569196 0.0354055 1.32964895]] b2 = [[-0.84610769]]
examples/metrics/tutorials/porosity_profiles.ipynb
###Markdown Obtaining the porosity profile along each principle axisThis example illustrates how to use the ``porosity_profile`` function to study the distribution of porosity in a sample. Start by importing the usual packages, and setting the matplotlib style to something more reasonble: ###Code import numpy as np import porespy as ps import matplotlib.pyplot as plt ps.visualization.set_mpl_style() ###Output _____no_output_____ ###Markdown Here we generate a test image using the ``blobs`` function. We will create an image with different lengths in each direction since this highlights an important point later on when plotting the results. ###Code im = ps.generators.blobs(shape=[200, 300, 400]) fig, ax = plt.subplots() ax.imshow(im[:, :, 50]); ###Output _____no_output_____ ###Markdown All results in PoreSpy are reported in voxels, so it is up to the user to scale these values to the physical size of each voxel. Here we'll assume that each voxel corresponds to 5.9 microns. ###Code voxel_size = 5.9 # microns/voxel ###Output _____no_output_____ ###Markdown The porosity profile function is straight-forward. It only requires the binary image of the material, and a specification of along which axis to take the profile. Here we'll do all three directions: ###Code x_profile = ps.metrics.porosity_profile(im, 0) y_profile = ps.metrics.porosity_profile(im, 1) z_profile = ps.metrics.porosity_profile(im, 2) ###Output _____no_output_____ ###Markdown Now finally we can plot the profile in each direction using matplotlib: ###Code fig, ax = plt.subplots(figsize=(5, 4)) ax.plot(np.linspace(0, im.shape[0]*voxel_size, im.shape[0]), x_profile, 'b-', label='yz-plane', alpha=0.5) ax.plot(np.linspace(0, im.shape[1]*voxel_size, im.shape[1]), y_profile, 'r-', label='xz-plane', alpha=0.5) ax.plot(np.linspace(0, im.shape[2]*voxel_size, im.shape[2]), z_profile, 'g-', label='xy-plane', alpha=0.5) ax.set_ylim([0, 1]) ax.set_ylabel('Porosity of slice') ax.set_xlabel('Position of slice along given axis') ax.legend(); ###Output _____no_output_____ ###Markdown Note how each line ends at a different position on the x-axis. This is caused by the fact that image had different total length in each direction. We can also do the plots with each axis normalized by their total length: ###Code fig, ax = plt.subplots(figsize=(5, 4)) ax.plot(np.linspace(0, 1, im.shape[0]), x_profile, 'b-', label='yz-plane', alpha=0.5) ax.plot(np.linspace(0, 1, im.shape[1], im.shape[1]), y_profile, 'r-', label='xz-plane', alpha=0.5) ax.plot(np.linspace(0, 1, im.shape[2], im.shape[2]), z_profile, 'g-', label='xy-plane', alpha=0.5) ax.set_ylim([0, 1]) ax.set_ylabel('Porosity of slice') ax.set_xlabel('Fractional distance along given axis') ax.legend(); ###Output _____no_output_____ ###Markdown Obtaining the porosity profile along each principle axisThis example illustrates how to use the ``porosity_profile`` function to study the distribution of porosity in a sample. Start by importing the usual packages, and setting the matplotlib style to something more reasonble: ###Code import numpy as np import porespy as ps import matplotlib.pyplot as plt ps.visualization.set_mpl_style() ###Output _____no_output_____ ###Markdown Here we generate a test image using the ``blobs`` function. We will create an image with different lengths in each direction since this highlights an important point later on when plotting the results. ###Code im = ps.generators.blobs(shape=[200, 300, 400]) fig, ax = plt.subplots() ax.imshow(im[:, :, 50]); ###Output _____no_output_____ ###Markdown All results in PoreSpy are reported in voxels, so it is up to the user to scale these values to the physical size of each voxel. Here we'll assume that each voxel corresponds to 5.9 microns. ###Code voxel_size = 5.9 # microns/voxel ###Output _____no_output_____ ###Markdown The porosity profile function is straight-forward. It only requires the binary image of the material, and a specification of along which axis to take the profile. Here we'll do all three directions: ###Code x_profile = ps.metrics.porosity_profile(im, 0) y_profile = ps.metrics.porosity_profile(im, 1) z_profile = ps.metrics.porosity_profile(im, 2) ###Output _____no_output_____ ###Markdown Now finally we can plot the profile in each direction using matplotlib: ###Code fig, ax = plt.subplots(figsize=(5, 4)) ax.plot(np.linspace(0, im.shape[0]*voxel_size, im.shape[0]), x_profile, 'b-', label='yz-plane', alpha=0.5) ax.plot(np.linspace(0, im.shape[1]*voxel_size, im.shape[1]), y_profile, 'r-', label='xz-plane', alpha=0.5) ax.plot(np.linspace(0, im.shape[2]*voxel_size, im.shape[2]), z_profile, 'g-', label='xy-plane', alpha=0.5) ax.set_ylim([0, 1]) ax.set_ylabel('Porosity of slice') ax.set_xlabel('Position of slice along given axis') ax.legend(); ###Output _____no_output_____ ###Markdown Note how each line ends at a different position on the x-axis. This is caused by the fact that image had different total length in each direction. We can also do the plots with each axis normalized by their total length: ###Code fig, ax = plt.subplots(figsize=(5, 4)) ax.plot(np.linspace(0, 1, im.shape[0]), x_profile, 'b-', label='yz-plane', alpha=0.5) ax.plot(np.linspace(0, 1, im.shape[1], im.shape[1]), y_profile, 'r-', label='xz-plane', alpha=0.5) ax.plot(np.linspace(0, 1, im.shape[2], im.shape[2]), z_profile, 'g-', label='xy-plane', alpha=0.5) ax.set_ylim([0, 1]) ax.set_ylabel('Porosity of slice') ax.set_xlabel('Fractional distance along given axis') ax.legend(); ###Output _____no_output_____
Python API/WeatherPy.ipynb
###Markdown WeatherPy---- Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps. ###Code # Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import numpy as np import requests import datetime # Import API key from api_keys import api_key # Incorporated citipy to determine city based on latitude and longitude from citipy import citipy # Output File (CSV) output_data_file = "../output_data/cities.csv" # Range of latitudes and longitudes lat_range = (-90, 90) lng_range = (-180, 180) ###Output _____no_output_____ ###Markdown Generate Cities List ###Code # List for holding lat_lngs and cities lat_lngs = [] cities = [] # Create a set of random lat and lng combinations lats = np.random.uniform(low=-90.000, high=90.000, size=1500) lngs = np.random.uniform(low=-180.000, high=180.000, size=1500) lat_lngs = zip(lats, lngs) # Identify nearest city for each lat, lng combination for lat_lng in lat_lngs: city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name # If the city is unique, then add it to a our cities list if city not in cities: cities.append(city) # Print the city count to confirm sufficient count len(cities) ###Output _____no_output_____ ###Markdown Perform API Calls* Perform a weather check on each city using a series of successive API calls.* Include a print log of each city as it'sbeing processed (with the city number and city name). ###Code url = "http://api.openweathermap.org/data/2.5/weather?" params={ "units":"metric", "appid":api_key, } city_list=[] Cloudiness=[] Country=[] Date=[] Humidity=[] Lat=[] Lng=[] Max_Temp=[] Wind_Speed=[] #loop through cities and gather all data needed for charts counter=1 for city in cities: params["q"] = city response=requests.get(url, params=params) #check if city was found if response.status_code == 200: print(f"Getting record {counter} for city {city}") city_list.append(city) Cloudiness.append(response.json()['clouds']['all']) Country.append(response.json()['sys']['country']) Date.append(response.json()['dt']) Humidity.append(response.json()['main']['humidity']) Lat.append(response.json()['coord']['lat']) Lng.append(response.json()['coord']['lon']) Max_Temp.append(response.json()['main']['temp_max']) Wind_Speed.append(response.json()['wind']['speed']) counter=counter+1 print("______________________") else: print(f"{city} was not found, moving to next" ) print("______________________") print(f"Data Collection for {counter} cities is Complete") ###Output Getting record 1 for city samarai ______________________ Getting record 2 for city cape town ______________________ Getting record 3 for city iqaluit ______________________ Getting record 4 for city pontes e lacerda ______________________ Getting record 5 for city victoria ______________________ Getting record 6 for city hermanus ______________________ Getting record 7 for city geraldton ______________________ Getting record 8 for city albany ______________________ Getting record 9 for city mahebourg ______________________ Getting record 10 for city letnyaya stavka ______________________ Getting record 11 for city tiksi ______________________ Getting record 12 for city port alfred ______________________ Getting record 13 for city matara ______________________ Getting record 14 for city hobart ______________________ Getting record 15 for city ushuaia ______________________ Getting record 16 for city mataura ______________________ tsihombe was not found, moving to next ______________________ Getting record 17 for city shubarkuduk ______________________ Getting record 18 for city rodino ______________________ Getting record 19 for city busselton ______________________ amderma was not found, moving to next ______________________ Getting record 20 for city san cristobal ______________________ galgani was not found, moving to next ______________________ Getting record 21 for city rikitea ______________________ Getting record 22 for city qaanaaq ______________________ Getting record 23 for city dikson ______________________ Getting record 24 for city bambous virieux ______________________ Getting record 25 for city biltine ______________________ inegol was not found, moving to next ______________________ Getting record 26 for city new norfolk ______________________ Getting record 27 for city launceston ______________________ qui nhon was not found, moving to next ______________________ samusu was not found, moving to next ______________________ Getting record 28 for city tasiilaq ______________________ mys shmidta was not found, moving to next ______________________ Getting record 29 for city rostovka ______________________ Getting record 30 for city chapais ______________________ Getting record 31 for city santa maria ______________________ Getting record 32 for city avarua ______________________ Getting record 33 for city uaua ______________________ yirol was not found, moving to next ______________________ Getting record 34 for city port blair ______________________ Getting record 35 for city dawlatabad ______________________ Getting record 36 for city vaini ______________________ Getting record 37 for city punta arenas ______________________ dzhusaly was not found, moving to next ______________________ Getting record 38 for city marsh harbour ______________________ Getting record 39 for city castro ______________________ barentsburg was not found, moving to next ______________________ Getting record 40 for city miri ______________________ Getting record 41 for city hilo ______________________ Getting record 42 for city salisbury ______________________ nizhneyansk was not found, moving to next ______________________ Getting record 43 for city mayuge ______________________ Getting record 44 for city portland ______________________ Getting record 45 for city mandalgovi ______________________ Getting record 46 for city cidreira ______________________ Getting record 47 for city honjo ______________________ Getting record 48 for city qiongshan ______________________ Getting record 49 for city butaritari ______________________ Getting record 50 for city severo-kurilsk ______________________ Getting record 51 for city atuona ______________________ Getting record 52 for city kahului ______________________ ambodifototra was not found, moving to next ______________________ Getting record 53 for city torbay ______________________ Getting record 54 for city altus ______________________ Getting record 55 for city chokurdakh ______________________ Getting record 56 for city codrington ______________________ Getting record 57 for city palmer ______________________ Getting record 58 for city vanimo ______________________ Getting record 59 for city saint-joseph ______________________ artyk was not found, moving to next ______________________ Getting record 60 for city aguas vermelhas ______________________ illoqqortoormiut was not found, moving to next ______________________ Getting record 61 for city luderitz ______________________ Getting record 62 for city saskylakh ______________________ Getting record 63 for city pevek ______________________ Getting record 64 for city port elizabeth ______________________ Getting record 65 for city hasaki ______________________ Getting record 66 for city bahia blanca ______________________ Getting record 67 for city longyearbyen ______________________ Getting record 68 for city bluff ______________________ Getting record 69 for city la ronge ______________________ Getting record 70 for city upernavik ______________________ Getting record 71 for city canakkale ______________________ Getting record 72 for city beringovskiy ______________________ Getting record 73 for city kodiak ______________________ Getting record 74 for city fare ______________________ Getting record 75 for city yenagoa ______________________ Getting record 76 for city thompson ______________________ Getting record 77 for city fortuna ______________________ Getting record 78 for city gold coast ______________________ Getting record 79 for city paamiut ______________________ Getting record 80 for city sitka ______________________ Getting record 81 for city port hedland ______________________ balimo was not found, moving to next ______________________ Getting record 82 for city grindavik ______________________ Getting record 83 for city qingdao ______________________ Getting record 84 for city khatanga ______________________ Getting record 85 for city vila franca do campo ______________________ Getting record 86 for city plettenberg bay ______________________ Getting record 87 for city tuktoyaktuk ______________________ Getting record 88 for city springbok ______________________ Getting record 89 for city hirara ______________________ grand river south east was not found, moving to next ______________________ Getting record 90 for city saint-philippe ______________________ Getting record 91 for city san pedro ______________________ Getting record 92 for city arawa ______________________ taolanaro was not found, moving to next ______________________ Getting record 93 for city meulaboh ______________________ Getting record 94 for city chuy ______________________ Getting record 95 for city troitsko-pechorsk ______________________ Getting record 96 for city lovozero ______________________ Getting record 97 for city moron ______________________ Getting record 98 for city najran ______________________ Getting record 99 for city nizhniy kuranakh ______________________ toliary was not found, moving to next ______________________ attawapiskat was not found, moving to next ______________________ Getting record 100 for city jamestown ______________________ Getting record 101 for city baculin ______________________ Getting record 102 for city lazaro cardenas ______________________ Getting record 103 for city bredasdorp ______________________ Getting record 104 for city georgetown ______________________ Getting record 105 for city lompoc ______________________ bengkulu was not found, moving to next ______________________ Getting record 106 for city sao filipe ______________________ Getting record 107 for city benguela ______________________ Getting record 108 for city mabaruma ______________________ Getting record 109 for city sernur ______________________ Getting record 110 for city barrow ______________________ Getting record 111 for city ilulissat ______________________ Getting record 112 for city tongling ______________________ Getting record 113 for city champoton ______________________ Getting record 114 for city gat ______________________ Getting record 115 for city gondanglegi ______________________ Getting record 116 for city bethel ______________________ Getting record 117 for city comodoro rivadavia ______________________ ###Markdown Convert Raw Data to DataFrame* Export the city data into a .csv.* Display the DataFrame ###Code print(len(city_list), len(Cloudiness), len(Country), len(Date), len(Humidity), len(Lat), len(Lng), len(Max_Temp), len(Wind_Speed)) data = pd.DataFrame({ "City":city_list, "Cloudiness":Cloudiness, "Country":Country, "Date":Date, "Humidity":Humidity, "Latitude":Lat, "Longitude":Lng, "Max Tempurature":Max_Temp, "Wind Speed":Wind_Speed, }) data.head() import time converted_date=[] for row in data["Date"]: converted_date = time.ctime(row) data["TimeStampt_Coverted"]=converted_date data.to_csv(output_data_file) data.head() data.plot.scatter("Latitude", "Max Tempurature") plt.grid() plt.title(f"Latitude vs Max Temperature on {converted_date}") plt.savefig("../output_data/LatVsMaxTemp.png") ###Output _____no_output_____ ###Markdown Plotting the Data* Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.* Save the plotted figures as .pngs. Latitude vs. Temperature Plot Latitude vs. Humidity Plot ###Code data.plot.scatter("Latitude", "Humidity") plt.grid() plt.title(f"Latitude vs Humidity on {converted_date}") plt.savefig("../output_data/LatVsHumidity.png") ###Output _____no_output_____ ###Markdown Latitude vs. Cloudiness Plot ###Code data.plot.scatter("Latitude", "Cloudiness") plt.grid() plt.title(f"Latitude vs Cloudiness on {converted_date}") plt.savefig("../output_data/LatVsCloudiness.png") ###Output _____no_output_____ ###Markdown Latitude vs. Wind Speed Plot ###Code data.plot.scatter("Latitude", "Wind Speed") plt.grid() plt.title(f"Latitude vs Wind Speed on {converted_date}") plt.savefig("../output_data/LatVsWindSpeed.png") ###Output _____no_output_____
.ipynb_checkpoints/create-graph-checkpoint.ipynb
###Markdown Information- Truc Huynh- AI Academy Project DescriptionThis exercise has two parts. First you will import and then create a network graph to represent the data. Next you will create a program that utilizes the data. Requirement: Do not use any additional Python libraries with the exception of matplotlib and networkx.- [matplotlib](https://matplotlib.org/stable/plot_types/index.html)- [networkx](https://networkx.org/documentation/stable/tutorial.htmlattributes)- [shortest path function & length using networkx](https://networkx.org/documentation/stable/reference/algorithms/shortest_paths.html) Part 1- Write Python code to complete the following:- Load in the attached plain text file and generate a unique class RoadDict that uses a dictionary representation to store all of the roads in the file. Each line has a starting city, ending city, and distance in miles.- Define a new CityGraph class that stores the road network using a networkx graph structure and use it to print a graph of the cities. Example output shown below in Figure 1. Part 2Using the data imported in Part 1, write Python code to complete the following: InputsAsk for a start and end city from the user. Outputs- Perform a breadth first search on the graph and return the path found (intermediate cities) along with the miles traversed.- Perform a depth first search on the graph and return the path found along with the miles traversed. ###Code # Required library import matplotlib.pyplot as plt import networkx as nx """ # openfile read input from textfile # In fact, I test the data on different jupiter notebook and come up with the shortest version """ def openfile(filename): with open(filename, "r") as file_reader: all_lines = file_reader.readlines() return all_lines """ create_data(data): take parameter data, remove all the newline, space and then convert each line to a tuple create RoadDict dictionary and list of tuple clean_data """ def create_data(data): clean_data = [] road_dict = {} count = 0 # Clean the data by using for loop for line in data: line = line.replace('\n', '').replace(' ', '').split(',') road_dict[count] = {'citi1': line[0], 'citi2': line[1], 'distance': line[2]} clean_data.append((line[0], line[1], float(line[2]))) count += 1 return clean_data, road_dict """ Simple get the input from user, and validation, to make sure it not crash the application """ def get_user_input(cities, purpose): user_input = input(f"Please enter the {purpose} city: ").capitalize() while user_input not in cities: cities_display(cities) user_input = input(f"Please enter the {purpose} city again: ").capitalize() return user_input """ Print out the cities in the list """ def cities_display(cities): print("Target city and Destination city must be:") for citi in cities: print(citi, end=', ') print('') ###Output _____no_output_____ ###Markdown Data Preparation: ###Code # Data Preparation: # create RoadDict as requirement and graph_data to feed in networkx to create graphs graph_data, road_dict= create_data(openfile("frenchcities.txt")) # create multi graph using networkx multi_graph = nx.MultiGraph() multi_graph.add_weighted_edges_from(graph_data) # Convert the graph to dictionary with weight multi_graph_dict = dict(multi_graph.degree(weight='weight')) # create the city list for validation only cities_list = list(multi_graph_dict) ###Output _____no_output_____ ###Markdown Data Validation ###Code for data in graph_data: print(data) for data in road_dict: print(f"{data} : {road_dict[data]}") ###Output 0 : {'citi1': 'Brest', 'citi2': 'Rennes', 'distance': '244'} 1 : {'citi1': 'Rennes', 'citi2': 'Nantes', 'distance': '107'} 2 : {'citi1': 'Rennes', 'citi2': 'Paris', 'distance': '348'} 3 : {'citi1': 'Rennes', 'citi2': 'Caen', 'distance': '176'} 4 : {'citi1': 'Nantes', 'citi2': 'Limoges', 'distance': '329'} 5 : {'citi1': 'Nantes', 'citi2': 'Bordeaux', 'distance': '329'} 6 : {'citi1': 'Limoges', 'citi2': 'Paris', 'distance': '396'} 7 : {'citi1': 'Limoges', 'citi2': 'Lyon', 'distance': '389'} 8 : {'citi1': 'Limoges', 'citi2': 'Toulouse', 'distance': '313'} 9 : {'citi1': 'Limoges', 'citi2': 'Bordeaux', 'distance': '220'} 10 : {'citi1': 'Paris', 'citi2': 'Caen', 'distance': '241'} 11 : {'citi1': 'Paris', 'citi2': 'Calais', 'distance': '297'} 12 : {'citi1': 'Paris', 'citi2': 'Nancy', 'distance': '372'} 13 : {'citi1': 'Paris', 'citi2': 'Dijon', 'distance': '313'} 14 : {'citi1': 'Caen', 'citi2': 'Calais', 'distance': '120'} 15 : {'citi1': 'Calais', 'citi2': 'Nancy', 'distance': '534'} 16 : {'citi1': 'Nancy', 'citi2': 'Strasbourg', 'distance': '145'} 17 : {'citi1': 'Nancy', 'citi2': 'Dijon', 'distance': '201'} 18 : {'citi1': 'Dijon', 'citi2': 'Strasbourg', 'distance': '335'} 19 : {'citi1': 'Bordeaux', 'citi2': 'Toulouse', 'distance': '253'} 20 : {'citi1': 'Toulouse', 'citi2': 'Montpellier', 'distance': '240'} 21 : {'citi1': 'Montpellier', 'citi2': 'Avignon', 'distance': '121'} 22 : {'citi1': 'Dijon', 'citi2': 'Lyon', 'distance': '192'} 23 : {'citi1': 'Lyon', 'citi2': 'Grenoble', 'distance': '104'} 24 : {'citi1': 'Lyon', 'citi2': 'Avignon', 'distance': '216'} 25 : {'citi1': 'Avignon', 'citi2': 'Grenoble', 'distance': '227'} 26 : {'citi1': 'Avignon', 'citi2': 'Marseille', 'distance': '99'} 27 : {'citi1': 'Marseille', 'citi2': 'Nice', 'distance': '188'} ###Markdown Task 1: Draw Graph ###Code # Task 1: print out the data nx.draw(multi_graph, with_labels=True, font_weight='bold') plt.show() ###Output _____no_output_____ ###Markdown Task 2: ###Code # Task 2: cities_display(cities_list) target = get_user_input(cities_list, "target") destination = get_user_input(cities_list, "destination") # Using Kilometer because it is the standard measurement in France: # Searching using Dijkstra Algorithm (Bread First Search) print(f"BFS: Cities need to travel: {nx.dijkstra_path(multi_graph, target, destination)}, " f"total distance: {nx.dijkstra_path_length(multi_graph, target, destination)} Km") # Searching using Bellman Forf Algorithm (Depth First Search) print(f"DFS: Cities need to travel: {nx.bellman_ford_path(multi_graph, target, destination)}, " f"total distance: {nx.bellman_ford_path_length(multi_graph, target, destination)} Km") ###Output Target city and Destination city must be: Brest, Rennes, Nantes, Paris, Caen, Limoges, Bordeaux, Lyon, Toulouse, Calais, Nancy, Dijon, Strasbourg, Montpellier, Avignon, Grenoble, Marseille, Nice, Please enter the target city: nantes Please enter the destination city: nice BFS: Cities need to travel: ['Nantes', 'Limoges', 'Lyon', 'Avignon', 'Marseille', 'Nice'], total distance: 1221.0 Km DFS: Cities need to travel: ['Nantes', 'Limoges', 'Lyon', 'Avignon', 'Marseille', 'Nice'], total distance: 1221.0 Km
isolating_factors/mnist.ipynb
###Markdown Copyright 2021 Google LLCLicensed under the Apache License, Version 2.0 (the "License");you may not use this file except in compliance with the License.You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, softwaredistributed under the License is distributed on an "AS IS" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the License for the specific language governing permissions andlimitations under the License. Approximate Bijective Correspondence (ABC) with MNISTABC seeks correspondence between input sets of data which have been grouped by inactive factor of variation. In the case of MNIST, the data has been grouped by digit class, leaving style as the active factor of variation to embed. ###Code #@title Imports import tensorflow as tf import numpy as np import matplotlib.pyplot as plt %matplotlib inline import tensorflow_datasets as tfds from matplotlib.offsetbox import OffsetImage, AnnotationBbox from sklearn.decomposition import PCA tfkl = tf.keras.layers # The number of images of a digit in each of the two sets # This impacts the level of detail to which the network is sensitive. You could # imagine finding correspondence between stacks of 4 would require coarser # details than stacks of 64. stack_size = 64 # The dimension of the embedding space num_latent_dims = 8 # The similarity type to use as the distance metric in embedding space # Available options: # l2 : Negative Euclidean distance # l2sq : Negative squared Euclidean distance # l1 : Neggative L1 distance ('Manhattan' distance) # linf : ord = inf distance (the max displacement along any coordinate), negated # cosine : cosine similarity, bounded between -1 and 1 similarity_type = 'l2sq' # The digit to withold during training test_digit = 9 optimizer_name = 'adam' lr = 1e-4 num_steps = 500 temperature = 1. # essentially ineffective unless using cosine similarity, just sets the length scale in embedding space imgs_to_plot = 400 # for displaying the embeddings via pca output_plots_during_training = True output_loss_every = 100 #@title Load MNIST into 10 digit-specific datasets dset = tfds.load('mnist', split='train') dset = dset.map(lambda example: (tf.cast(example['image'], tf.float32)/255., example['label']), num_parallel_calls=tf.data.experimental.AUTOTUNE) ds = [dset.filter(lambda x, y: y==i) for i in range(10)] ds = [d.map(lambda x, y: x).shuffle(1000).repeat().batch(stack_size) for d in ds] dset_test = tfds.load('mnist', split='test') dset_test = dset_test.map(lambda example: (tf.cast(example['image'], tf.float32)/255., example['label']), num_parallel_calls=tf.data.experimental.AUTOTUNE) ds_test = [dset_test.filter(lambda x, y: y==i) for i in range(10)] ds_test = [d.map(lambda x, y: x).batch(stack_size) for d in ds_test] # Combine stacks from different digits randomly (sometimes a digit is paired with itself but this does not derail training). # The shape of each element is [2, stack_size, 28, 28, 1]. combined_dset = tf.data.experimental.sample_from_datasets(ds[:test_digit]+ds[test_digit+1:]).batch(2) #@title The data is grouped by class label; this is all the supervision needed to learn about writing style. for d in ds: for img_stack in d.take(1): plt.figure(figsize=(9, 1)) for j in range(8): plt.subplot(1, 8, j+1) plt.imshow(img_stack[j, ..., 0], cmap='binary') plt.xticks([]); plt.yticks([]) plt.show() # The embedding model model = tf.keras.Sequential([ tfkl.Input(shape=(28, 28, 1)), tfkl.Conv2D(32, 3, activation='relu'), tfkl.Conv2D(32, 3, activation='relu'), tfkl.Conv2D(32, 3, activation='relu', strides=2), tfkl.Conv2D(32, 3, activation='relu'), tfkl.Conv2D(32, 3, activation='relu'), tfkl.Flatten(), tfkl.Dense(128, activation='relu'), tfkl.Dense(num_latent_dims, activation='linear'), ]) print(model.summary()) #@title Run PCA on the output of the untrained model (for comparison). plot_imgs = [] embs_pre = [] for img_stack in ds[test_digit].take(imgs_to_plot//stack_size): plot_imgs.append(img_stack) embs_pre.append(model(img_stack, training=False)) plot_imgs = tf.concat(plot_imgs, 0) embs_pre = tf.concat(embs_pre, 0) pca_pre = PCA(n_components=2) pca_pre.fit(embs_pre) print('PCA2 explained variance before training:', pca_pre.explained_variance_ratio_) t_pre = pca_pre.transform(embs_pre) #@title The heart of ABC: helper functions for computing the loss # Many were copied+modified from Dwibedi et al. (2019). @tf.function def pairwise_l2_distance(embs1, embs2): # embs are shape [stack_size, num_latent_dims] # returns shape [stack_size, stack_size] as the full matrix of distances btwn embs norm1 = tf.reduce_sum(tf.square(embs1), 1) norm1 = tf.reshape(norm1, [-1, 1]) norm2 = tf.reduce_sum(tf.square(embs2), 1) norm2 = tf.reshape(norm2, [1, -1]) dist = tf.maximum( norm1 + norm2 - 2.0 * tf.matmul(embs1, embs2, False, True), 0.0) return dist @tf.function def pairwise_l1_distance(embs1, embs2): ss2 = embs2.shape[0] embs1_tiled = tf.tile(tf.expand_dims(embs1, 1), [1, ss2, 1]) dist = tf.reduce_sum(tf.abs(embs1_tiled-embs2), -1) return dist @tf.function def pairwise_linf_distance(embs1, embs2): ss2 = embs2.shape[0] embs1_tiled = tf.tile(tf.expand_dims(embs1, 1), [1, ss2, 1]) dist = tf.reduce_max(tf.abs(embs1_tiled-embs2), -1) return dist @tf.function def get_scaled_similarity(embs1, embs2, similarity_type, temperature): if similarity_type == 'l2sq': similarity = -1.0 * pairwise_l2_distance(embs1, embs2) elif similarity_type == 'l2': similarity = -1.0 * tf.sqrt(pairwise_l2_distance(embs1, embs2) + eps) elif similarity_type == 'l1': similarity = -1.0 * pairwise_l1_distance(embs1, embs2) elif similarity_type == 'linf': similarity = -1.0 * pairwise_linf_distance(embs1, embs2) elif similarity_type == 'cosine': embs1, _ = tf.linalg.normalize(embs1, ord=2, axis=-1) embs2, _ = tf.linalg.normalize(embs2, ord=2, axis=-1) similarity = tf.matmul(embs1, embs2, transpose_b=True) else: raise ValueError('Unknown similarity type: {}'.format(similarity_type)) similarity /= temperature return similarity @tf.function def align_pair_of_sequences(embs1, embs2, similarity_type, temperature): # Creates a soft nearest neighbor for each emb1 out of the elements of embs2 ss1 = tf.shape(embs1)[0] sim_12 = get_scaled_similarity(embs1, embs2, similarity_type, temperature) softmaxed_sim_12 = tf.nn.softmax(sim_12, axis=1) nn_embs = tf.matmul(softmaxed_sim_12, embs2) sim_21 = get_scaled_similarity(nn_embs, embs1, similarity_type, temperature) loss = tf.keras.losses.sparse_categorical_crossentropy(tf.range(ss1), sim_21, from_logits=True) return tf.reduce_mean(loss) #@title display_pca_embs definition def display_pca_embs(model, plot_imgs, step, avg_loss): embs_intermed = [] for start_ind in range(0, len(plot_imgs), stack_size): embs_intermed.append(model(plot_imgs[start_ind:start_ind+stack_size], training=False)) embs_intermed = tf.concat(embs_intermed, 0) pca_intermed = PCA(n_components=2) pca_intermed.fit(embs_intermed) t_intermed = pca_intermed.transform(embs_intermed) plt.figure(figsize=(8, 8)) zoom_factor = 1. # scales the size of the individual digit images ax = plt.gca() for img_id, img in enumerate(plot_imgs): img = tf.concat([1-img, 1-img, 1-img, img], -1) im = OffsetImage(img, zoom=zoom_factor) ab = AnnotationBbox(im, t_intermed[img_id], frameon=False) ax.add_artist(ab) plt.scatter(t_intermed[img_id, 0], t_intermed[img_id, 1], s=0) # this is just so the axes bound the images plt.xlabel('PC0, var {:.3f}'.format(pca_intermed.explained_variance_ratio_[0]), fontsize=14.) plt.ylabel('PC1, var {:.3f}'.format(pca_intermed.explained_variance_ratio_[1]), fontsize=14.) plt.title('Step {}, ABC Loss = {:.3f}'.format(step, avg_loss), fontsize=16.) plt.show() return # Train the embedder opt = tf.keras.optimizers.get(optimizer_name) opt.lr = lr losses = [] for step, paired_stacks in enumerate(combined_dset.take(num_steps)): with tf.GradientTape() as tape: embs1 = model(paired_stacks[0], training=True) embs2 = model(paired_stacks[1], training=True) loss = align_pair_of_sequences(embs1, embs2, similarity_type, temperature) loss += align_pair_of_sequences(embs2, embs1, similarity_type, temperature) grads = tape.gradient(loss, model.trainable_variables) opt.apply_gradients(zip(grads, model.trainable_variables)) losses.append(loss.numpy()) if not step % output_loss_every: if output_plots_during_training: display_pca_embs(model, plot_imgs, step, np.average(losses[-output_loss_every:])) else: print('Step {} Loss: {:.2f}'.format(step, np.average(losses[-output_loss_every:]))) if output_plots_during_training: display_pca_embs(model, plot_imgs, step, np.average(losses[-output_loss_every:])) else: print('Step {} Loss: {:.2f}'.format(step, np.average(losses[-output_loss_every:]))) print('Training completed.') #@title Run PCA on the output of the trained model embs_post = [] for start_ind in range(0, len(plot_imgs), stack_size): if not start_ind: embs_post = model(plot_imgs[start_ind:start_ind+stack_size], training=False) else: embs_post = tf.concat([embs_post, model(plot_imgs[start_ind:start_ind+stack_size], training=False)], 0) pca_post = PCA(n_components=2) pca_post.fit(embs_post) print('PCA2 explained variance after training:', pca_post.explained_variance_ratio_) t_post = pca_post.transform(embs_post) #@title Compare the PCA embeddings before and after training. plt.figure(figsize=(16, 8)) zoom_factor = 1. # scales the size of the individual digit images plt.subplot(121) ax = plt.gca() for img_id, img in enumerate(plot_imgs): img = tf.concat([1-img, 1-img, 1-img, img], -1) im = OffsetImage(img, zoom=zoom_factor) ab = AnnotationBbox(im, t_pre[img_id], frameon=False) ax.add_artist(ab) plt.scatter(t_pre[img_id, 0], t_pre[img_id, 1], s=0) # this is just so the axes bound the images plt.xlabel('PC0, var = {:.3f}'.format(pca_pre.explained_variance_ratio_[0]), fontsize=14.) plt.ylabel('PC1, var = {:.3f}'.format(pca_pre.explained_variance_ratio_[1]), fontsize=14.) plt.title('Before training', fontsize=16.) plt.subplot(122) ax = plt.gca() for img_id, img in enumerate(plot_imgs): img = tf.concat([1-img, 1-img, 1-img, img], -1) im = OffsetImage(img, zoom=zoom_factor) ab = AnnotationBbox(im, t_post[img_id], frameon=False) ax.add_artist(ab) plt.scatter(t_post[img_id, 0], t_post[img_id, 1], s=0) plt.xlabel('PC0, var = {:.3f}'.format(pca_post.explained_variance_ratio_[0]), fontsize=14.) plt.ylabel('PC1, var = {:.3f}'.format(pca_post.explained_variance_ratio_[1]), fontsize=14.) plt.title('After training', fontsize=16.) plt.show() #@title Check out other digits (0s and 1s are often easier to decipher). imgs_to_plot = 200 digits_to_plot = [0, 1, 2] plt.figure(figsize=(18, 6)) for plot_id, digit_id in enumerate(digits_to_plot): imgs = [] embs = [] for ind, img_stack in enumerate(ds[digit_id].take(imgs_to_plot//stack_size)): if not ind: imgs = img_stack embs = model(img_stack, training=False) else: imgs = tf.concat([imgs, img_stack], 0) embs = tf.concat([embs, model(img_stack, training=False)], 0) t_post = pca_post.transform(embs) plt.subplot(1, 3, plot_id+1) ax = plt.gca() for img_id, img in enumerate(imgs): img = tf.concat([1-img, 1-img, 1-img, img], -1) im = OffsetImage(img, zoom=zoom_factor) ab = AnnotationBbox(im, t_post[img_id], frameon=False) ax.add_artist(ab) plt.scatter(t_post[img_id, 0], t_post[img_id, 1], s=0.) plt.show() # Perform retrieval using random test digits all_imgs = []; all_embs = [] num_images_to_use = 512 for d in ds_test: for stack_id, img_stack in enumerate(d.take(num_images_to_use//stack_size)): embs = model(img_stack, training=False) if stack_id: all_imgs[-1] = tf.concat([all_imgs[-1], img_stack], 0) all_embs[-1] = tf.concat([all_embs[-1], embs], 0) else: all_imgs.append(img_stack) all_embs.append(embs) # Group by the nearest example of each digit plt.figure(figsize=(10, 10)) for digit in range(10): template_img = all_imgs[digit][0] for other_digit in range(10): dists = pairwise_l2_distance(all_embs[digit][0:1], all_embs[other_digit]) # [1, stack_size] min_ind = tf.argmin(dists[0]) plt.subplot(10, 10, digit*10 + other_digit + 1) plt.imshow(all_imgs[other_digit][min_ind, ..., 0], cmap='binary') plt.xticks([]); plt.yticks([]) ax = plt.gca() plt.setp(ax.spines.values(), color='#ccdad1', linewidth=[0., 5.][digit==other_digit]) plt.show() ###Output _____no_output_____ ###Markdown Copyright 2021 Google LLCLicensed under the Apache License, Version 2.0 (the "License");you may not use this file except in compliance with the License.You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, softwaredistributed under the License is distributed on an "AS IS" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the License for the specific language governing permissions andlimitations under the License. Cycle Consistency across Sets (CCS) with MNISTCCS seeks to find correspondence between input sets of data which have been grouped by inactive factor of variation. In the case of MNIST, the data has been grouped by digit class leaving style as the active factor of variation to embed. ###Code #@title Imports import tensorflow.compat.v2 as tf tf.enable_v2_behavior() import numpy as np import matplotlib.pyplot as plt %matplotlib inline import tensorflow_datasets as tfds from matplotlib.offsetbox import OffsetImage, AnnotationBbox from sklearn.decomposition import PCA tfkl = tf.keras.layers # The number of images of a digit in each of the two sets # This impacts the level of detail to which the network is sensitive. You could # imagine pairing up stacks of 4 would be require coarser details than stacks of 64. stack_size = 64 # The dimension of the embedding space -- though the embeddings end up occupying a # lower dimensional volume, this gives the network more freedom to work in num_latent_dims = 8 # The similarity type to use as the distance metric in embedding space # Available options: # l2 : Euclidean distance # l2sq : Squared Euclidean distance # l1 : L1 distance ('Manhattan' distance) # linf : ord = inf distance (the max displacement along any coordinate) # cosine : cosine similarity, bounded between -1 and 1 similarity_type = 'l2sq' # The digit to withold during training test_digit = 9 optimizer_name = 'adam' lr = 1e-4 num_steps = 1000 temperature = 1. ## essentially ineffective unless using cosine similarity, just sets the length scale in embedding space imgs_to_plot = 400 ## for displaying the embeddings via pca output_plots_during_training = True #@title Load MNIST into 10 digit-specific datasets dset = tfds.load('mnist', split='train') dset = dset.map(lambda example: (tf.cast(example['image'], tf.float32)/255., example['label']), num_parallel_calls=tf.data.experimental.AUTOTUNE) ds = [dset.filter(lambda x, y: y==i) for i in range(10)] ds = [d.map(lambda x, y: x).shuffle(1000).repeat().batch(stack_size) for d in ds] dset_test = tfds.load('mnist', split='test') dset_test = dset_test.map(lambda example: (tf.cast(example['image'], tf.float32)/255., example['label']), num_parallel_calls=tf.data.experimental.AUTOTUNE) ds_test = [dset_test.filter(lambda x, y: y==i) for i in range(10)] ds_test = [d.map(lambda x, y: x).batch(stack_size) for d in ds_test] ## Combine stacks from different digits randomly (sometimes a digit is paired with itself but not the end of the world) ## The shape of each element is [2, stack_size, 28, 28, 1] combined_dset = tf.data.experimental.sample_from_datasets(ds[:test_digit]+ds[test_digit+1:]).batch(2) #@title The data is grouped by class label; this is all the supervision needed to learn about writing style for d in ds: for img_stack in d.take(1): plt.figure(figsize=(9, 1)) for j in range(8): plt.subplot(1, 8, j+1) plt.imshow(img_stack[j, ..., 0], cmap='binary') plt.xticks([]); plt.yticks([]) plt.show() ## The embedding model model = tf.keras.Sequential([ tfkl.Input(shape=(28, 28, 1)), tfkl.Conv2D(32, 3, activation='relu'), tfkl.Conv2D(32, 3, activation='relu'), tfkl.Conv2D(32, 3, activation='relu', strides=2), tfkl.Conv2D(32, 3, activation='relu'), tfkl.Conv2D(32, 3, activation='relu'), tfkl.Flatten(), tfkl.Dense(128, activation='relu'), tfkl.Dense(num_latent_dims, activation='linear'), ]) print(model.summary()) #@title Run PCA on the output of the untrained model (for comparison) plot_imgs = [] embs_pre = [] for img_stack in ds[test_digit].take(imgs_to_plot//stack_size): plot_imgs.append(img_stack) embs_pre.append(model(img_stack, training=False)) plot_imgs = tf.concat(plot_imgs, 0) embs_pre = tf.concat(embs_pre, 0) pca_pre = PCA(n_components=2) pca_pre.fit(embs_pre) print('PCA2 explained variance before training:', pca_pre.explained_variance_ratio_) t_pre = pca_pre.transform(embs_pre) #@title The heart of CCS: helper functions for computing the loss ## Many were copied+modified from Dwibedi 2019 @tf.function def pairwise_l2_distance(embs1, embs2): ## embs are shape [stack_size, num_latent_dims] ## returns shape [stack_size, stack_size] as the full matrix of distances btwn embs norm1 = tf.reduce_sum(tf.square(embs1), 1) norm1 = tf.reshape(norm1, [-1, 1]) norm2 = tf.reduce_sum(tf.square(embs2), 1) norm2 = tf.reshape(norm2, [1, -1]) dist = tf.maximum( norm1 + norm2 - 2.0 * tf.matmul(embs1, embs2, False, True), 0.0) return dist @tf.function def pairwise_l1_distance(embs1, embs2): ss2 = embs2.shape[0] embs1_tiled = tf.tile(tf.expand_dims(embs1, 1), [1, ss2, 1]) dist = tf.reduce_sum(tf.abs(embs1_tiled-embs2), -1) return dist @tf.function def pairwise_linf_distance(embs1, embs2): ss2 = embs2.shape[0] embs1_tiled = tf.tile(tf.expand_dims(embs1, 1), [1, ss2, 1]) dist = tf.reduce_max(tf.abs(embs1_tiled-embs2), -1) return dist @tf.function def get_scaled_similarity(embs1, embs2, similarity_type, temperature): if similarity_type == 'l2sq': similarity = -1.0 * pairwise_l2_distance(embs1, embs2) elif similarity_type == 'l2': similarity = -1.0 * tf.sqrt(pairwise_l2_distance(embs1, embs2) + eps) elif similarity_type == 'l1': similarity = -1.0 * pairwise_l1_distance(embs1, embs2) elif similarity_type == 'linf': similarity = -1.0 * pairwise_linf_distance(embs1, embs2) elif similarity_type == 'cosine': embs1, _ = tf.linalg.normalize(embs1, ord=2, axis=-1) embs2, _ = tf.linalg.normalize(embs2, ord=2, axis=-1) similarity = tf.matmul(embs1, embs2, transpose_b=True) else: raise ValueError('Unknown similarity type: {}'.format(similarity_type)) similarity /= temperature return similarity @tf.function def align_pair_of_sequences(embs1, embs2, similarity_type, temperature): ## Cycles from embs1 to embs2 and back using the soft nearest neighbor ss1 = tf.shape(embs1)[0] sim_12 = get_scaled_similarity(embs1, embs2, similarity_type, temperature) softmaxed_sim_12 = tf.nn.softmax(sim_12, axis=1) nn_embs = tf.matmul(softmaxed_sim_12, embs2) sim_21 = get_scaled_similarity(nn_embs, embs1, similarity_type, temperature) logits = sim_21 labels = tf.one_hot(tf.range(ss1), ss1) return logits, labels @tf.function def classification_loss(logits, labels, top_k_num=0): ## A modified classification loss for coming back to yourself ## Where top_k_num can be used to consider only the top k matches of a stack ## might be useful in the case where there are frequent outliers labels = tf.stop_gradient(labels) losses = tf.keras.losses.categorical_crossentropy(y_true=labels, y_pred=logits, from_logits=True) if top_k_num == 0: loss = tf.reduce_mean(losses) else: loss = tf.reduce_mean(-tf.math.top_k(-losses, k=top_k_num)[0]) return loss #@title display_pca_embs definition def display_pca_embs(model, plot_imgs, step, avg_loss): embs_intermed = [] for start_ind in range(0, len(plot_imgs), stack_size): embs_intermed.append(model(plot_imgs[start_ind:start_ind+stack_size], training=False)) embs_intermed = tf.concat(embs_intermed, 0) pca_intermed = PCA(n_components=2) pca_intermed.fit(embs_intermed) t_intermed = pca_intermed.transform(embs_intermed) plt.figure(figsize=(8, 8)) zoom_factor = 1. ## scales the size of the individual digit images ax = plt.gca() for img_id, img in enumerate(plot_imgs): img = tf.concat([1-img, 1-img, 1-img, img], -1) im = OffsetImage(img, zoom=zoom_factor) ab = AnnotationBbox(im, t_intermed[img_id], frameon=False) ax.add_artist(ab) plt.scatter(t_intermed[img_id, 0], t_intermed[img_id, 1], s=0) ## this is just so the axes bound the images plt.xlabel('PC0, var {:.3f}'.format(pca_intermed.explained_variance_ratio_[0]), fontsize=14.) plt.ylabel('PC1, var {:.3f}'.format(pca_intermed.explained_variance_ratio_[1]), fontsize=14.) plt.title('Step {}, CCS Loss = {:.3f}'.format(step, avg_loss), fontsize=16.) plt.show() return ## Train the embedder opt = tf.keras.optimizers.get(optimizer_name) opt.lr = lr output_loss_every = 100 losses = [] for step, paired_stacks in enumerate(combined_dset.take(num_steps)): with tf.GradientTape() as tape: embs1 = model(paired_stacks[0], training=True) embs2 = model(paired_stacks[1], training=True) logits, labels = align_pair_of_sequences(embs1, embs2, similarity_type, temperature) loss = classification_loss(logits, labels) logits, labels = align_pair_of_sequences(embs2, embs1, similarity_type, temperature) loss += classification_loss(logits, labels) grads = tape.gradient(loss, model.trainable_variables) opt.apply_gradients(zip(grads, model.trainable_variables)) losses.append(loss.numpy()) if not step % output_loss_every: if output_plots_during_training: display_pca_embs(model, plot_imgs, step, np.average(losses[-output_loss_every:])) else: print('Step {} Loss: {:.2f}'.format(step, np.average(losses[-output_loss_every:]))) if output_plots_during_training: display_pca_embs(model, plot_imgs, step, np.average(losses[-output_loss_every:])) else: print('Step {} Loss: {:.2f}'.format(step, np.average(losses[-output_loss_every:]))) print('Training completed.') #@title Run PCA on the output of the trained model embs_post = [] for start_ind in range(0, len(plot_imgs), stack_size): if not start_ind: embs_post = model(plot_imgs[start_ind:start_ind+stack_size], training=False) else: embs_post = tf.concat([embs_post, model(plot_imgs[start_ind:start_ind+stack_size], training=False)], 0) pca_post = PCA(n_components=2) pca_post.fit(embs_post) print('PCA2 explained variance after training:', pca_post.explained_variance_ratio_) t_post = pca_post.transform(embs_post) #@title Compare the PCA embeddings before and after training plt.figure(figsize=(16, 8)) zoom_factor = 1. ## scales the size of the individual digit images plt.subplot(121) ax = plt.gca() for img_id, img in enumerate(plot_imgs): img = tf.concat([1-img, 1-img, 1-img, img], -1) im = OffsetImage(img, zoom=zoom_factor) ab = AnnotationBbox(im, t_pre[img_id], frameon=False) ax.add_artist(ab) plt.scatter(t_pre[img_id, 0], t_pre[img_id, 1], s=0) ## this is just so the axes bound the images plt.xlabel('PC0, var = {:.3f}'.format(pca_pre.explained_variance_ratio_[0]), fontsize=14.) plt.ylabel('PC1, var = {:.3f}'.format(pca_pre.explained_variance_ratio_[1]), fontsize=14.) plt.title('Before training', fontsize=16.) plt.subplot(122) ax = plt.gca() for img_id, img in enumerate(plot_imgs): img = tf.concat([1-img, 1-img, 1-img, img], -1) im = OffsetImage(img, zoom=zoom_factor) ab = AnnotationBbox(im, t_post[img_id], frameon=False) ax.add_artist(ab) plt.scatter(t_post[img_id, 0], t_post[img_id, 1], s=0) plt.xlabel('PC0, var = {:.3f}'.format(pca_post.explained_variance_ratio_[0]), fontsize=14.) plt.ylabel('PC1, var = {:.3f}'.format(pca_post.explained_variance_ratio_[1]), fontsize=14.) plt.title('After training', fontsize=16.) plt.show() #@title Check out other digits (0s and 1s are often easier to decipher) imgs_to_plot = 200 digits_to_plot = [0, 1, 2] plt.figure(figsize=(18, 6)) for plot_id, digit_id in enumerate(digits_to_plot): imgs = [] embs = [] for ind, img_stack in enumerate(ds[digit_id].take(imgs_to_plot//stack_size)): if not ind: imgs = img_stack embs = model(img_stack, training=False) else: imgs = tf.concat([imgs, img_stack], 0) embs = tf.concat([embs, model(img_stack, training=False)], 0) t_post = pca_post.transform(embs) plt.subplot(1, 3, plot_id+1) ax = plt.gca() for img_id, img in enumerate(imgs): img = tf.concat([1-img, 1-img, 1-img, img], -1) im = OffsetImage(img, zoom=zoom_factor) ab = AnnotationBbox(im, t_post[img_id], frameon=False) ax.add_artist(ab) plt.scatter(t_post[img_id, 0], t_post[img_id, 1], s=0.) plt.show() ## Perform retrieval using random test digits all_imgs = []; all_embs = [] num_images_to_use = 512 for d in ds_test: for stack_id, img_stack in enumerate(d.take(num_images_to_use//stack_size)): embs = model(img_stack, training=False) if stack_id: all_imgs[-1] = tf.concat([all_imgs[-1], img_stack], 0) all_embs[-1] = tf.concat([all_embs[-1], embs], 0) else: all_imgs.append(img_stack) all_embs.append(embs) ## group by the nearest example of each digit plt.figure(figsize=(10, 10)) for digit in range(10): template_img = all_imgs[digit][0] for other_digit in range(10): dists = pairwise_l2_distance(all_embs[digit][0:1], all_embs[other_digit]) ## [1, stack_size] min_ind = tf.argmin(dists[0]) plt.subplot(10, 10, digit*10 + other_digit + 1) plt.imshow(all_imgs[other_digit][min_ind, ..., 0], cmap='binary') plt.xticks([]); plt.yticks([]) ax = plt.gca() plt.setp(ax.spines.values(), color='#ccdad1', linewidth=[0., 5.][digit==other_digit]) plt.show() ###Output _____no_output_____
notebooks/Modeling - 2.0 - Dummy Classifier.ipynb
###Markdown Import ###Code import dill import numpy as np import pandas as pd pd.set_option("display.max_columns", None) import plotly.graph_objects as go import matplotlib.pyplot as plt import seaborn as sns from sklearn.dummy import DummyClassifier from src.models.dummy_classifier import ModelDummyClassifier import src.models.performance_metrics as performance_metrics ###Output _____no_output_____ ###Markdown Dataset ###Code with open(path_dataset_train, "rb") as input_file: dataset_train = dill.load(input_file) ###Output _____no_output_____ ###Markdown Overall ###Code model = ModelDummyClassifier() model.version dataset_train = model.preprocessing_training(dataset_train) model.train(dataset_train) with open(f"models/{model.version}__model.pkl", "wb") as file: dill.dump(model, file) print(f"Saved at {file.name}") ###Output _____no_output_____ ###Markdown Validation results ###Code with open(path_dataset_valid, "rb") as input_file: dataset_valid = dill.load(input_file) y_valid = dataset_valid[model.vardict["target"]].copy() dataset_valid = model.preprocessing_inference(dataset_valid) predictions = model.predict(dataset=dataset_valid, target_present=False) predictions["y_true"] = y_valid.values.tolist() predictions binary_classification_results = performance_metrics.get_binary_classification_results( predictions, model_name=f"{model.version}_valid" ) binary_classification_results regression_results = performance_metrics.get_regression_results( predictions, model_name=f"{model.version}_valid" ) regression_results performance_metrics.plot_roc_auc_curve(predictions, model_name=f"{model.version}_valid") performance_metrics.plot_precision_recall_curve( predictions, binary_classification_results, model_name=f"{model.version}_valid" ) performance_metrics.plot_predictions(predictions, model_name=f"{model.version}_valid") ###Output _____no_output_____
FinText_RNN.ipynb
###Markdown Aqui temos o download do FinText baseado no FastText (método Skip-gram). Para uso do FinText baseado no Word2Vec (método Skip-gram), basta que se altere a URL abaixo e as linhas de código que usem o arquivo baixado: ###Code !wget https://www.rahimikia.com/FinText/FinText_FastText_Skip-gram.zip !mkdir FinText_Word2Vec !unzip "/content/FinText_FastText_Skip-gram.zip" -d "/content/FinText_FastText/" import os import numpy as np from numpy.random import seed seed(42) rng = np.random.RandomState(42) import tensorflow tensorflow.random.set_seed(42) os.environ['TF_DETERMINISTIC_OPS'] = '1' !pip install keras -U !pip install tensorflow-addons tensorflow-determinism import pandas as pd from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from sklearn.preprocessing import LabelEncoder from gensim.models import FastText from tensorflow_addons.optimizers import AdamW from tensorflow.keras.layers import Layer, Embedding, Input, Dropout, Bidirectional, LSTM, GRU, Dense, Conv1D, MaxPooling1D #from tensorflow.compat.v1.keras.layers import CuDNNGRU from tensorflow.keras.models import Model from tensorflow.keras.utils import plot_model from sklearn.model_selection import StratifiedKFold from sklearn.utils.class_weight import compute_class_weight from sklearn.metrics import confusion_matrix, matthews_corrcoef, f1_score, precision_score, recall_score, balanced_accuracy_score from imblearn.metrics import specificity_score data = pd.read_csv('/content/title-sentiment.csv',encoding='latin-1') data.title = data.title.astype(str) data.sentiment = data.sentiment.astype(str) data['title'] = data['title'].str.replace(r'[^\w\s]+', '') data['title'] = data['title'].str.replace('\s+', ' ', regex=True) X = data['title'].to_numpy() MAX_NB_WORDS = 12697 MAX_SEQUENCE_LENGTH = 225 tokenizer = Tokenizer(num_words=MAX_NB_WORDS,split=' ') tokenizer.fit_on_texts(X) X = tokenizer.texts_to_sequences(X) X = pad_sequences(X, maxlen=MAX_SEQUENCE_LENGTH) encoder = LabelEncoder() Y = encoder.fit_transform(data['sentiment']) encoder.classes_ vocab_size = len(tokenizer.word_index) + 1 print (vocab_size) FinText_FastText_skipgram = FastText.load('/content/FinText_FastText/FinText_FastText_Skip-gram/Word_Embedding_2000_2015') embedding_matrix = np.zeros((vocab_size, 300)) for word, i in tokenizer.word_index.items(): try: embedding_vector = FinText_FastText_skipgram[word] if embedding_vector is not None: embedding_matrix[i] = embedding_vector except: pass embedding_matrix embedding_matrix.shape embedding_layer = Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=(MAX_SEQUENCE_LENGTH,), trainable=False) custom_adam = AdamW(weight_decay=0.0,learning_rate=1e-5, epsilon=1e-8) ###Output _____no_output_____ ###Markdown Definições para uso do mecanismo de Atenção nas RNNs ###Code import tensorflow.keras.backend as K from tensorflow.keras import regularizers, constraints, initializers def dot_product(x, kernel): """ Wrapper for dot product operation, in order to be compatible with both Theano and Tensorflow Args: x (): input kernel (): weights Returns: """ if K.backend() == 'tensorflow': return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1) else: return K.dot(x, kernel) class AttentionWithContext(Layer): """ Attention operation, with a context/query vector, for temporal data. Supports Masking. Follows the work of Yang et al. [https://www.cc.gatech.edu/~dyang888/docs/naacl16.pdf] "Hierarchical Attention Networks for Document Classification" by using a context vector to assist the attention # Input shape 3D tensor with shape: `(samples, steps, features)`. # Output shape 2D tensor with shape: `(samples, features)`. How to use: Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True. The dimensions are inferred based on the output shape of the RNN. Note: The layer has been tested with Keras 2.0.6 Example: model.add(LSTM(64, return_sequences=True)) model.add(AttentionWithContext()) # next add a Dense layer (for classification/regression) or whatever... """ def __init__(self, W_regularizer=None, u_regularizer=None, b_regularizer=None, W_constraint=None, u_constraint=None, b_constraint=None, bias=True, **kwargs): self.supports_masking = True self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.u_regularizer = regularizers.get(u_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.u_constraint = constraints.get(u_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias super(AttentionWithContext, self).__init__(**kwargs) def build(self, input_shape): assert len(input_shape) == 3 self.W = self.add_weight(shape=(input_shape[-1], input_shape[-1],), initializer=self.init, name='{}_W'.format(self.name), regularizer=self.W_regularizer, constraint=self.W_constraint) if self.bias: self.b = self.add_weight(shape=(input_shape[-1],), initializer='zero', name='{}_b'.format(self.name), regularizer=self.b_regularizer, constraint=self.b_constraint) self.u = self.add_weight(shape=(input_shape[-1],), initializer=self.init, name='{}_u'.format(self.name), regularizer=self.u_regularizer, constraint=self.u_constraint) super(AttentionWithContext, self).build(input_shape) def compute_mask(self, input, input_mask=None): # do not pass the mask to the next layers return None def call(self, x, mask=None): uit = dot_product(x, self.W) if self.bias: uit += self.b uit = K.tanh(uit) ait = dot_product(uit, self.u) a = K.exp(ait) # apply mask after the exp. will be re-normalized next if mask is not None: # Cast the mask to floatX to avoid float64 upcasting in theano a *= K.cast(mask, K.floatx()) # in some cases especially in the early stages of training the sum may be almost zero # and this results in NaN's. A workaround is to add a very small positive number ε to the sum. # a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx()) a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx()) a = K.expand_dims(a) weighted_input = x * a return K.sum(weighted_input, axis=1) def compute_output_shape(self, input_shape): return input_shape[0], input_shape[-1] ###Output _____no_output_____ ###Markdown Definição da RNN (pode ser alterada para definir outras arquiteturas, como LSTMs bidirecionais, GRUs, Convnets, etc) ###Code def lstm_model(input_shape): X_indices = Input(input_shape) embeddings = embedding_layer(X_indices) #X = Dropout(0.5)(embeddings) X = LSTM(50, return_sequences=True)(embeddings) X = LSTM(50, return_sequences=True)(X) #X = LSTM(50, return_sequences=False)(X) X = AttentionWithContext()(X) X = Dense(3, activation='softmax')(X) model = Model(inputs=X_indices, outputs=X) model.summary() plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True) return model ###Output _____no_output_____ ###Markdown Treinamento da RNN com 10-fold cross-validation e obtenção dos valores de benchmarking do modelo ###Code bilstm_mcc = [] bilstm_f1 = [] bilstm_precision = [] bilstm_recall = [] bilstm_bacc = [] bilstm_spec = [] fold = 1 skf = StratifiedKFold(n_splits=10, random_state=rng, shuffle=True) for train_index, test_index in skf.split(X, Y): model_bilstm = lstm_model((MAX_SEQUENCE_LENGTH,)) model_bilstm.compile(optimizer=custom_adam,loss='sparse_categorical_crossentropy',metrics=['acc']) X_train, X_test = X[train_index], X[test_index] y_train, y_test = Y[train_index], Y[test_index] class_weights = compute_class_weight(class_weight = 'balanced', classes = np.unique(y_train), y = y_train) weight = {i : class_weights[i] for i in range(3)} model_bilstm.fit(X_train,y_train,epochs=10,verbose=1,batch_size=32, class_weight=weight) y_pred = model_bilstm.predict(X_test, batch_size=32) preds = np.argmax(y_pred, axis = 1) cnf_mtx = confusion_matrix(y_test, preds) print("Fold #%i Confusion Matrix:" % fold) print(cnf_mtx) bilstm_mcc.append(matthews_corrcoef(y_test, preds)) bilstm_f1.append(f1_score(y_test, preds, average='weighted')) bilstm_precision.append(precision_score(y_test, preds, average='weighted')) bilstm_recall.append(recall_score(y_test, preds, average='weighted')) bilstm_bacc.append(balanced_accuracy_score(y_test, preds)) bilstm_spec.append(specificity_score(y_test, preds, average='weighted')) fold += 1 print(f"Mean-MCC: {sum(bilstm_mcc) / len(bilstm_mcc):.4f}") print(f"Mean-F1: {sum(bilstm_f1) / len(bilstm_f1):.4f}") print(f"Mean-Precision: {sum(bilstm_precision) / len(bilstm_precision):.4f}") print(f"Mean-Recall: {sum(bilstm_recall) / len(bilstm_recall):.4f}") print(f"Mean-BACC: {sum(bilstm_bacc) / len(bilstm_bacc):.4f}") print(f"Mean-Specificity: {sum(bilstm_spec) / len(bilstm_spec):.4f}") ###Output _____no_output_____
176.ipynb
###Markdown 读取数据 ###Code evaluation=pd.read_csv('submit.txt') product_info=pd.read_csv('product_info.txt') product_quantity=pd.read_csv('product_quantity.txt') ###Output _____no_output_____ ###Markdown 数据排序 ###Code product_quantity.sort_values(['product_id','product_date'],inplace=True) product_quantity ###Output _____no_output_____ ###Markdown 根据天整理数据 ###Code train_day=product_quantity.groupby(['product_id','product_date']).sum()['ciiquantity'].unstack()#.mean(axis=1) train_day ###Output _____no_output_____ ###Markdown 查看数据缺失情况 ###Code train_day.apply(lambda x: sum(x.isnull())).plot(figsize=(12,6)) ###Output _____no_output_____ ###Markdown 缺失数据的走势 ###Code train_day.sum().plot(figsize=(12,6)) # 为什么不填充?因为最后一天仍有1000家商铺信息缺失。即接下来的14个月,这样的情况仍会持续 ###Output _____no_output_____ ###Markdown 月度数据整理 ###Code product_quantity['product_month']=product_quantity['product_date'].apply(lambda x: x[:7]) train_month=product_quantity.groupby(['product_id','product_month']).sum()['ciiquantity'].unstack() train_month.sum().plot(figsize=(12,6)) train_month product_quantity['product_month']=product_quantity['product_date'].apply(lambda x: x[:7]) product_quantity=product_quantity[product_quantity.product_month>='2015-03'] train_month=product_quantity.groupby(['product_id','product_month']).sum()['ciiquantity'].unstack() train_month train_month.fillna(140,inplace=True) train_month.mean().plot(figsize=(10,6)) average_all=pd.DataFrame(train_month.mean(axis=1),columns=['average_all']).reset_index() average_all.shape submission=evaluation submission.shape col=['product_id','product_month','ciiquantity_month'] submission.columns=col out=pd.merge(submission,average_all,on='product_id',how='left').fillna(132) out.apply(lambda x: sum(x.isnull())) out.ciiquantity_month=out.average_all*0.9 out.drop(['average_all'],axis=1,inplace=True) out.to_csv('sub_average9months132fillna2.txt',index=False) out ###Output _____no_output_____
Scrapping_Preprocessing_Data/7_Data_set_up_for_Training_Validation_And_Testing.ipynb
###Markdown Data Preparation for Traing Read Data Set ###Code import pandas as pd import seaborn as sns from bokeh.charts import Horizon, output_file, show import numpy as np import tensorflow as tf from tensorflow.python.ops import * import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline cd CSV_file_from_2007_to_2017/ Data = pd.read_csv('ItemListWithDummy.csv') Data.columns Data[1:35] del Data['Unnamed: 0'] #del Data['hour'] del Data['day'] del Data['daysInterval'] del Data['days_interval'] del Data['date'] del Data['time'] del Data['hour_interval'] del Data['NAME'] Data[1:4] ###Output _____no_output_____ ###Markdown Import Datasets For Only one Location ###Code DataKarpos = pd.read_csv('Target.csv') # Only Karpos Location is used here . Other locations are zero DataKarpos[1:4] DataKarpos.columns TargetKarpos = DataKarpos[['PM10', 'month','hour','PM10_null_pointers']].copy(deep=True) TargetKarpos.size #Total size print("Total Nan present in values for Location Karpos:");TargetKarpos.isnull().values.sum() # Null size print ("Percentage of Null Values : ");TargetKarpos.isnull().values.sum()/TargetKarpos.size *100 print ("Percentage of Non-null Values : ");(TargetKarpos.size-TargetKarpos.isnull().values.sum())/TargetKarpos.size *100 ###Output Percentage of Non-null Values : ###Markdown Pair plots And Fillna ###Code # Fill data sets with Null Data.fillna(0,inplace=True) #df[1].fillna(0, inplace=True) Data.isnull().values.sum() # Null size Data[1:5] g = sns.pairplot(Data, size=3, vars=["PM10", "O3","NO2","CO","PM25"]) plt.show() ###Output _____no_output_____ ###Markdown g = sns.pairplot(Data, size=3, vars=["CO"],hue='PM10')plt.show() ###Code g = sns.pairplot(Data, size=3, vars=["PM10","PM25"],kind="reg") plt.show() sns.jointplot(x="PM10", y="CO", data=Data); sns.jointplot(x="PM10", y="PM25", data=Data); sns.jointplot(x="PM10", y="NO2", data=Data); ###Output _____no_output_____
autompg_linearregression.ipynb
###Markdown 데이터 로딩 ###Code import pandas as pd df = pd.read_csv('./auto-mpg.csv', header=None) df.columns = ['mpg','cylinders','displacement','horsepower','weight', 'acceleration','model year','origin','name'] df.info() df[['horsepower','name']].describe(include='all') ###Output _____no_output_____ ###Markdown replace() ###Code df['horsepower'].value_counts() df['horsepower'].unique() df_horsepower = df['horsepower'].replace(to_replace='?', value=None, inplace=False) df_horsepower.unique() df_horsepower = df_horsepower.astype('float') df_horsepower.mean() df['horsepower'] = df_horsepower.fillna(104) df.info() df['name'].unique() df.head() ###Output _____no_output_____ ###Markdown 분류와 연속 컬럼 구분 ###Code df.head(8) ###Output _____no_output_____ ###Markdown check columns - 연속형 : displacement, horsepower, weight, acceleration, mpg- 분류형 : model year, name, cylinders, origin ###Code df['name'].value_counts() df['origin'].value_counts() df['mpg'].describe(include='all') df['mpg'].value_counts() ###Output _____no_output_____ ###Markdown 정규화 단계 ###Code Y = df['mpg'] X_contiuns = df[['displacement', 'horsepower', 'weight', 'acceleration']] X_category = df[['model year', 'cylinders', 'origin']] from sklearn import preprocessing scaler = preprocessing.StandardScaler() type(scaler) scaler.fit(X_contiuns) X = scaler.transform(X_contiuns) from sklearn.linear_model import LinearRegression lr = LinearRegression() type(lr) lr.fit(X,Y) lr.score(X,Y) import pickle pickle.dump(lr, open('./autompg_lr.pkl','wb')) ###Output _____no_output_____ ###Markdown 데이터 로딩 ###Code import pandas as pd df = pd.read_csv('./auto-mpg.csv', header=None) df.columns = ['mpg','cylinders','displacement','horsepower','weight','acceleration','model year','origin','name'] df.info() # df.describe(include='all') df[['horsepower','name']].describe(include='all') ###Output _____no_output_____ ###Markdown Replace() ###Code df['horsepower'].value_counts() # df['horsepower'].astype('float') df['horsepower'].unique() df_horsepower = df['horsepower'].replace(to_replace='?', value=None, inplace=False) df_horsepower.unique() # type(df['horsepower']) ###Output _____no_output_____ ###Markdown pandas.DataFrame.replacehttps://pandas.pydata.org/docs/reference/api/pandas.DataFrame.replace.html ###Code type(df_horsepower), df_horsepower.dtypes ###Output _____no_output_____ ###Markdown dtype('O') -> Object: 통계값 X ###Code df_horsepower = df_horsepower.astype('float') df_horsepower.mean() df['horsepower'] = df_horsepower.fillna(104) df.info() df['name'].unique() df.head() df['origin'] df.head() ###Output _____no_output_____ ###Markdown 분류와 연속 컬럼 구분 ###Code df.head(8) ###Output _____no_output_____ ###Markdown check columns- 연속형: displacement, horsepower, weight, acceleration, mpg- 분류형: model year, name, cylinders, origin ###Code df['name'].value_counts() df['origin'].value_counts() df['mpg'].describe(include='all') df['mpg'].value_counts() ###Output _____no_output_____ ###Markdown 정규화 단계 ###Code Y = df['mpg'] X_continues = df[['displacement', 'horsepower', 'weight', 'acceleration']] X_categories = df[['model year', 'cylinders', 'origin']] from sklearn import preprocessing scaler = preprocessing.StandardScaler() type(scaler) scaler.fit(X_continues) X = scaler.transform(X_continues) from sklearn.linear_model import LinearRegression lr = LinearRegression() type(lr) lr.fit(X, Y) lr.score(X,Y) ###Output _____no_output_____ ###Markdown 데이터 로딩 ###Code import pandas as pd df = pd.read_csv('./auto-mpg.csv', header=None) df.columns = ['mpg','cylinders','displacement','horsepower','weight', 'acceleration','model year','origin','name'] df.info() df[['horsepower','name']].describe(include='all') ###Output _____no_output_____ ###Markdown replace() ###Code df['horsepower'].value_counts() df['horsepower'].unique() df_horsepower = df['horsepower'].replace(to_replace='?', value=None, inplace=False) df_horsepower.unique() df_horsepower = df_horsepower.astype('float') df_horsepower.mean() df['horsepower'] = df_horsepower.fillna(104) df.info() df['name'].unique() df.head() ###Output _____no_output_____ ###Markdown 분류와 연속 컬럼 구분 ###Code df.head(8) ###Output _____no_output_____ ###Markdown check columns - 연속형 : displacement, horsepower, weight, acceleration, mpg- 분류형 : model year, name, cylinders, origin ###Code df['name'].value_counts() df['origin'].value_counts() df['mpg'].describe(include='all') df['mpg'].value_counts() ###Output _____no_output_____ ###Markdown 정규화 단계 ###Code Y = df['mpg'] X_contiuns = df[['displacement', 'horsepower', 'weight', 'acceleration']] X_category = df[['model year', 'cylinders', 'origin']] from sklearn import preprocessing scaler = preprocessing.StandardScaler() type(scaler) scaler.fit(X_contiuns) X = scaler.transform(X_contiuns) from sklearn.linear_model import LinearRegression lr = LinearRegression() type(lr) lr.fit(X,Y) lr.score(X,Y) df.head(1) x_cusmter = scaler.transform([[307.0,130.0,3504.0,12.0]]) x_cusmter.shape lr.predict(x_cusmter) ###Output _____no_output_____ ###Markdown pickle ###Code import pickle pickle.dump(lr, open('./autompg_lr.pkl', 'wb')) !ls -l ./saves/autompg_lr.pkl pickle.load(open('./saves/autompg_lr.pkl', 'rb')) pickle.dump(scaler, open('./autompg_standardscaler.pkl', 'wb')) ###Output _____no_output_____ ###Markdown 데이터 로딩 ###Code import pandas as pd df = pd.read_csv('./auto-mpg.csv', header=None) df.columns = ['mpg','cylinders','displacement','horsepower','weight', 'acceleration','model year','origin','name'] df.info() df[['horsepower','name']].describe(include='all') ###Output _____no_output_____ ###Markdown replace() ###Code df['horsepower'].value_counts() df['horsepower'].unique() df_horsepower = df['horsepower'].replace(to_replace='?', value=None, inplace=False) df_horsepower.unique() df_horsepower = df_horsepower.astype('float') df_horsepower.mean() df['horsepower'] = df_horsepower.fillna(104) df.info() df['name'].unique() df.head() ###Output _____no_output_____ ###Markdown 분류와 연속 컬럼 구분 ###Code df.head(8) ###Output _____no_output_____ ###Markdown check columns - 연속형 : displacement, horsepower, weight, acceleration, mpg- 분류형 : model year, name, cylinders, origin ###Code df['name'].value_counts() df['origin'].value_counts() df['mpg'].describe(include='all') df['mpg'].value_counts() ###Output _____no_output_____ ###Markdown 정규화 단계 ###Code Y = df['mpg'] X_contiuns = df[['displacement', 'horsepower', 'weight', 'acceleration']] X_category = df[['model year', 'cylinders', 'origin']] from sklearn import preprocessing scaler = preprocessing.StandardScaler() type(scaler) scaler.fit(X_contiuns) X = scaler.transform(X_contiuns) from sklearn.linear_model import LinearRegression lr = LinearRegression() type(lr) lr.fit(X,Y) lr.score(X,Y) df.head(1) ###Output _____no_output_____ ###Markdown X_contiuns = df[['displacement', 'horsepower', 'weight', 'acceleration']] ###Code x_cusmter = scaler.transform([[307.0,130.0,3504.0,12.0]]) x_cusmter.shape lr.predict(x_cusmter) ###Output _____no_output_____ ###Markdown XGboost ###Code import xgboost as xgb model_xgb = xgb.XGBRegressor() model_xgb.fit(X, Y) model_xgb.score(X,Y) model_xgb.predict(x_cusmter) ###Output _____no_output_____ ###Markdown LightXGboost ###Code from lightgbm import LGBMRegressor model_lxgb = LGBMRegressor() model_lxgb.fit(X, Y) model_lxgb.score(X, Y) ###Output _____no_output_____ ###Markdown pickle ###Code import pickle pickle.dump(lr, open('./autompg_lr.pkl','wb')) ###Output _____no_output_____ ###Markdown ###Code !ls -l ./saves/autompg_lr.pkl pickle.load(open('./saves/autompg_lr.pkl', 'rb')) pickle.dump(scaler, open('./autompg_standardscaler.pkl','wb')) ###Output _____no_output_____ ###Markdown One hot encoding ###Code X_category.head(3) X_category['origin'].value_counts() # 1, 2, 3 #? | ? | ? # 1 | 0 | 0 -> 1 # 0 | 1 | 0 -> 2 # 0 | 0 | 1 _ 3 # data, prefix=None df_origin = pd.get_dummies(X_category['origin'], prefix='origin') df_cylinders = pd.get_dummies(X_category['cylinders'], prefix='cylinders') df_origin.shape, df_cylinders.shape X_contiuns.head(3) # X_contiuns + df_cylinders + df_origin # objs, axis=0 X = pd.concat([X_contiuns, df_cylinders, df_origin], axis='columns') from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(X, Y) x_train.shape, x_test.shape, y_train.shape, y_test.shape import xgboost xgb = xgboost.XGBRegressor() xgb xgb.fit(x_train, y_train) xgb.score(x_train, y_train) ###Output _____no_output_____ ###Markdown ###Code xgb.score(x_test, y_test) import pickle pickle.dump(scaler, open('./scaler_model.pkl','wb')) pickle.dump(xgb, open('./xgb_model.pkl','wb')) ###Output _____no_output_____ ###Markdown 데이터 로딩 ###Code import pandas as pd df = pd.read_csv('./auto-mpg.csv', header=None) df.info() df.columns = ['mpg','cylinders','displacement','horsepower','weight', 'acceleration','model year','origin','name'] df[['horsepower','name']].describe(include='all') df['horsepower'].value_counts() # df['horsepower'].astype('float') ###Output _____no_output_____ ###Markdown replace () ###Code df['horsepower'].unique() df_horsepower = df['horsepower'].replace(to_replace='?', value=None, inplace=False) df_horsepower.unique() df_horsepower = df['horsepower'].replace(to_replace='?', inplace=False) df_horsepower.unique() df_horsepower = df_horsepower.astype('float') df_horsepower.mean() df['horsepower'] = df_horsepower.fillna(104) df.info() df['name'].unique() df.head() ###Output _____no_output_____ ###Markdown 분류와 연속 컬럼 구분 check cloumns- 연속형 : displacement, horsepower, weight, acceleration(소수점 이하의 값 존재->category x) -> standard scaler, mpg- 분류형 : model year, name, cylinders, origin- 중립형 : df[' '].describe(), df[' '].value_counts()로 구분 ###Code df['mpg'].describe(include='all') df['mpg'].value_counts() df['cylinders'].describe() df['cylinders'].value_counts() df['origin'].describe() df['name'].value_counts() # category 값이 너무 작아서 사용하기 어려움 ###Output _____no_output_____ ###Markdown 정규화 단계 ###Code Y = df['mpg'] X_continuous = df[['displacement', 'horsepower', 'weight', 'acceleration']] X_catagory = df[['model year', 'cylinders', 'origin']] from sklearn import preprocessing scaler = preprocessing.StandardScaler() type(scaler) scaler.fit(X_continuous) X = scaler.transform(X_continuous) from sklearn.linear_model import LinearRegression lr = LinearRegression() type(lr) lr.fit(X,Y) lr.score(X,Y) ###Output _____no_output_____ ###Markdown ###Code import pandas as pd df = pd.read_csv('./auto-mpg.csv', header=None) df.columns = ['mpg','cylinders','displacement','horsepower','weight','acceleration','model year','origin','name'] df.info() df[['horsepower', 'name']].describe(include='all') ###Output _____no_output_____ ###Markdown replace() ###Code df['horsepower'].value_counts() df['horsepower'].unique() df_horsepower = df['horsepower'].replace(to_replace='?', value=None, inplace=False) df_horsepower.unique() df_horsepower = df_horsepower.astype('float') df_horsepower.mean() df['horsepower'] = df_horsepower.fillna(104) df.info() df['name'].unique() df.head() ###Output _____no_output_____ ###Markdown 분류와 연속 컬럼 구분 ###Code df.head(8) ###Output _____no_output_____ ###Markdown X_contiuns = df[['displacement', 'horsepower', 'weight', 'acceleration']] ###Code x_custom = scaler.transform([[307.0,13.0,3504.0,12.0]]) x_custom.shape lr.predict(x_custom) ###Output _____no_output_____ ###Markdown Check Columns연속형 : displacement, horsepower, weight, acceleration, mpg 분류형 : model year, name, cylinders, origin ###Code df['name'].value_counts() df['origin'].value_counts() df['mpg'].describe(include='all') df['mpg'].value_counts() ###Output _____no_output_____ ###Markdown 정규화 단계 ###Code X_contiuns = df[['displacement', 'horsepower', 'weight', 'acceleration']] X_category = df[['model year', 'cylinders', 'origin']] Y = df['mpg'] from sklearn import preprocessing scaler = preprocessing.StandardScaler() type(scaler) scaler.fit(X_contiuns) X = scaler.transform(X_contiuns) from sklearn.linear_model import LinearRegression lr = LinearRegression() type(lr) lr.fit(X,Y) lr.score(X,Y) ###Output _____no_output_____ ###Markdown pickle ###Code import pickle pickle.dump(lr, open('./autompg_lr.pkl', 'wb')) !ls -l './saves/autompg_lr.pkl' pickle.load(open('./saves/autompg_lr.pkl', 'rb')) pickle.dump(scaler, open('./autompg_standardscaler.pkl', 'wb')) ###Output _____no_output_____ ###Markdown 데이터 로딩 ###Code import pandas as pd df = pd.read_csv('./auto-mpg_1.csv') df.info() df.columns = ['mpg','cylinders','displacement','horsepower','weight', 'acceleration','model year','origin','name'] df.info() df[['horsepower','name']].describe(include='all') ###Output _____no_output_____ ###Markdown replace() ###Code df['horsepower'].value_counts() df['horsepower'].unique() df_horsepower = df['horsepower'].replace(to_replace='?',value=None,inplace=False) type(df_horsepower) # df_horsepower = df['horsepower'].replace(to_replace='?',value=None , inplace=False) df_horsepower.unique() df_horsepower.unique() # df_horsepower = df['horsepower'].astype('float') # 데이터 형 변경 df_horsepower.unique() df['horsepower'] = df_horsepower.fillna(104) df.info() df['name'].unique() df['origin'].unique() ###Output _____no_output_____ ###Markdown 분류와 연속 컬럼 구분 ###Code df.head(8) ###Output _____no_output_____ ###Markdown check columns - DataFrame.head(5)- 연속형 : displacement , horsepower , weight , acceleration(소숫점으로 판단) , mpg -> Standard Scaler- 분류형 : model year , cylinders , origin -> One-hot Encoding / ( 제외 :name )- 중립형 : ###Code df.head(5) df['mpg'].describe(include='all') # 판단할수 없음 object 만 판단가능 df['mpg'].value_counts() # 소숫점이 있으면 연속형이라고 판단 가능 df['cylinders'].value_counts() # 개체 종류 수가 적음으로 분류라고 판단 df['origin'].value_counts() # 개체 종류 수가 적음으로 분류라고 판단 df['mpg'].describe(include='all') df['mpg'].value_counts() ###Output _____no_output_____ ###Markdown 정규화 단계https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html표준정규화 : z = ( x - u ) s z : Standard Division / s : 표준 편차 /x : Score() /u : Mean() / ###Code df['displacement'].describe() , df['horsepower'].describe() , df['weight'].describe(), df['acceleration'].describe() # df['acceleration'].describe(include='all') Y = df['mpg'] X_continus = df[['displacement' ,'horsepower', 'weight' , 'acceleration']] X_category = df[['model year', 'cylinders' , 'origin']] X_continus from sklearn import preprocessing scaler = preprocessing.StandardScaler() type(scaler) scaler.fit(X_continus) # 교육단계 X = scaler.transform(X_continus) from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(X,Y) lr.score(X,Y) ###Output _____no_output_____ ###Markdown predict로 전달 하기 predict로 전달 하기X_continus = df[['displacement','horsepower', 'weight','acceleration']] ###Code df.head(1) #scaler를 이미 해서 패턴을 만들어놨기 때문에 transform을 사용한다. x_cusmter = scaler.transform([[307.0,130.0,3504.0,12.0]]) x_cusmter.shape lr.predict(x_cusmter) ###Output _____no_output_____ ###Markdown Pickle ###Code import pickle !dir pickle.dump(lr, open('./auto-mpg_lr.pkl','wb')) # 확장자는 아무거나 해도 되지만 알수 있게 , pkl 통용 된다. pickle.load(open('./saves/auto-mpg_lr.pkl', 'rb')) pickle.dump(scaler,open('./autompg_standard_scaler.pkl','wb')) ###Output _____no_output_____ ###Markdown 데이터 로딩 ###Code import pandas as pd df = pd.read_csv('./auto-mpg.csv', header=None) df.info() df.columns = ['mpg','cylinders','displacement','horsepower','weight', 'acceleration','model year','origin','name'] df df.info() df[['horsepower','name']].describe(include='all') ###Output _____no_output_____ ###Markdown replace() ###Code df['horsepower'].value_counts() df['horsepower'].astype('float64') # 이렇게 하면 안된다는 것을 확인하는 차원에서 들어간 코드임 df['horsepower'].unique() df_horsepower = df['horsepower'].replace(to_replace='?', value=None, inplace=False) # ?를 None으로 바꾸면 통계값을 빼낼 수 있다. # type(df_horsepower) df_horsepower.unique() df_horsepower = df_horsepower.astype('float') df_horsepower.mean() df['horsepower'] = df_horsepower.fillna(104) df.info() df['name'].unique() ###Output _____no_output_____ ###Markdown 분류와 연속 컬럼 구분 ###Code df.head(8) # acceleration은 소숫점 이하의 값이 있으니 연속형이라고 볼 수 있다. ###Output _____no_output_____ ###Markdown check columns- 연속형: displacement, horsepower, weight, acceleration, mpg,- 분류형: model year, name, cylinders, origin- 중립형: ###Code df['name'].value_counts() df['mpg'].describe(include='all') df['mpg'].value_counts() # 소숫점 이하의 값이 있으니 연속형이라고 볼 수 있다. df['cylinders'].value_counts() df['origin'].value_counts() ###Output _____no_output_____ ###Markdown 정규화 단계x와 y형으로 나눈다. ###Code Y = df['mpg'] X_continues = df[['displacement', 'horsepower', 'weight', 'acceleration']] X_category = df[['model year', 'cylinders', 'origin']] from sklearn import preprocessing scaler = preprocessing.StandardScaler() type(scaler) scaler.fit(X_continues) X = scaler.transform(X_continues) from sklearn.linear_model import LinearRegression lr = LinearRegression() type(lr) lr.fit(X,Y) lr.score(X,Y) import pickle pickle.dump(lr, open('./autompg_lr.pkl','wb')) ###Output _____no_output_____ ###Markdown 데이터 로딩 ###Code import pandas as pd pd.read_csv('./auto-mpg.csv', header=None) df = pd.read_csv('./auto-mpg.csv', header=None) df.columns = ['mpg','cylinders','displacement','horsepower','weight', 'acceleration','model year','origin','name'] df.info() df.describe(include='all') ###Output _____no_output_____ ###Markdown replace ###Code df[['horsepower','name']].describe(include='all') df['horsepower'].value_counts() # df['horsepower'].astype('float64') df['horsepower'].unique() df['horsepower'].replace(to_replace='?', value=None, inplace=False) df_horsepower = df['horsepower'].replace(to_replace='?', value=None, inplace=False) df_horsepower.unique() type(df['horsepower']) type(df_horsepower) df_horsepower = df_horsepower.astype('float') df_horsepower.mean() df['horsepower'] = df_horsepower.fillna(104) df.info() df['name'].unique() df.head() ###Output _____no_output_____ ###Markdown 분류와 연속컬럼 구분 ###Code df.head(8) ###Output _____no_output_____ ###Markdown check columns- 연속형 : displacement, horsepower, weight, acceleration, mpg- 분류형 : model year, name, cylinders, origin- 중립형 : ###Code df['name'].value_counts() # 카테고리에 너무 적어서 교육의 의미가 없음 df['mpg'].describe() df['mpg'].value_counts() df['cylinders'].describe() df['cylinders'].value_counts() df['origin'].value_counts() ###Output _____no_output_____ ###Markdown 정규화 단계 ###Code Y = df['mpg'] X_continus = df[['displacement','horsepower', 'weight','acceleration']] X_category = df[['model year','cylinders','origin']] from sklearn import preprocessing scaler = preprocessing.StandardScaler() type(scaler) scaler.fit(X_continus) X = scaler.transform(X_continus) from sklearn.linear_model import LinearRegression lr = LinearRegression() type(lr) lr.fit(X,Y) lr.score(X_continus,Y) df.head(1) ###Output _____no_output_____ ###Markdown X_continus = df[['displacement','horsepower', 'weight','acceleration']]순서에 맞추어 입력하여야 함 ###Code lr.predict([[307.0, 130.0, 3504.0, 12.0]]) scaler.transform([[307.0, 130.0, 3504.0, 12.0]]) x_customer = scaler.transform([[307.0, 130.0, 3504.0, 12.0]]) x_customer.shape lr.predict(x_customer) # Y를 예측하는 것 ###Output _____no_output_____ ###Markdown pickle ###Code import pickle pickle.dump(lr, open('./autompg_lr.pkl', 'wb')) !ls -l ./saves/autompg_lr.pkl pickle.load(open('./saves/autompg_lr.pkl', 'rb')) pickle.dump(scaler, open('./autompg_standardscaler.pkl', 'wb')) ###Output _____no_output_____ ###Markdown 데이터 로딩 ###Code import pandas as pd df = pd.read_csv('./auto-mpg.csv', header=None) df.columns = ['mpg','cylinders','displacement','horsepower','weight', 'acceleration','model year','origin','name'] df.info() df[['horsepower', 'name']].describe(include='all') ###Output _____no_output_____ ###Markdown replace() ###Code df[['horsepower']].value_counts() # df['horsepower'].astype('float') df['horsepower'].unique() df['horsepower'].replace() df_horsepower=df['horsepower'].replace(to_replace='?', value=None, inplace=False) df_horsepower.unique() df_horsepower = df_horsepower.astype('float') df_horsepower.mean() df['horsepower'] = df_horsepower.fillna(104) df.info() df['name'].unique df['origin'] df.head() ###Output _____no_output_____ ###Markdown 분류와 연속 컬럼 구분 ###Code df.head(8) ###Output _____no_output_____ ###Markdown check columns- 연속형 : displacement, horesepower, weight, acceleration- 분류형 : model year, name, mpg, cylinders, origin ###Code df['mpg'].describe() df['mpg'].value_counts() df['cylinders'].describe() df['cylinders'].value_counts() df['origin'].value_counts() ###Output _____no_output_____ ###Markdown 정규화 단계 ###Code Y = df['mpg'] X_continue = df[['displacement', 'horsepower', 'weight', 'acceleration']] X_category = df[['model year', 'cylinders', 'origin']] from sklearn import preprocessing scaler = preprocessing.StandardScaler() type(scaler) scaler.fit(X_continue) X = scaler.transform(X_continue) from sklearn.linear_model import LinearRegression lr = LinearRegression() type(lr) lr.fit(X, Y) lr.score(X,Y) ###Output _____no_output_____ ###Markdown 데이터 로딩 ###Code import pandas as pd df = pd.read_csv('./auto-mpg.csv', header=None) df.columns = ['mpg','cylinders','displacement','horsepower','weight', 'acceleration','model year','origin','name'] df.info() df[['horsepower','name']].describe(include='all') ###Output _____no_output_____ ###Markdown replace ###Code df['horsepower'].value_counts() #df['horsepower'].astype('float') df['horsepower'].unique() df_horsepower = df['horsepower'].replace(to_replace='?',value=None,inplace=False) df_horsepower.unique() #type(df['horsepower']) df_horsepower = df_horsepower.astype('float') df_horsepower.mean() df['horsepower'] = df_horsepower.fillna(104) df.info() df['name'].unique() df.head() ###Output _____no_output_____ ###Markdown 분류와 연속 컬럼 구분 ###Code df.head(8) ###Output _____no_output_____ ###Markdown check columns- 연속형 : displacement, horsepower, weight, acceleration, mpg- 분류형 : model year , name, cylinders, origin- 중립형 : ~~mpg~~, ~~cylinders~~, ~~origin~~ ###Code df['mpg'].describe(include='all') df['mpg'].value_counts() df['cylinders'].value_counts() df['origin'].value_counts() df['name'].value_counts() ###Output _____no_output_____ ###Markdown 정규화 단계 ###Code Y = df['mpg'] X_continus = df[['displacement', 'horsepower', 'weight', 'acceleration']] X_category = df[['model year','cylinders','origin']] from sklearn import preprocessing scaler = preprocessing.StandardScaler() type(scaler) scaler.fit(X_continus) X = scaler.transform(X_continus) from sklearn.linear_model import LinearRegression lr = LinearRegression() type(lr) lr.fit(X,Y) lr.score(X,Y) df.head(1) ###Output _____no_output_____ ###Markdown X_continus = df[['displacement', 'horsepower', 'weight', 'acceleration']] ###Code x_customer = scaler.transform([[307.0,130.0,3504.0,12.0]]) x_customer.shape lr.predict(x_customer) ###Output _____no_output_____ ###Markdown pickle ###Code import pickle pickle.dump(lr, open('./autompg_lr.pkl','wb')) !ls -l ./saves/autompg_lr.pkl pickle.load(open('./saves/autompg_lr.pkl','rb')) pickle.dump(scaler, open('./autompg_standardscaler.pkl','wb')) ###Output _____no_output_____
examples/reliability_analysis_example.ipynb
###Markdown Design Reliability Analysis using `duqo`TLDR; Following is an introduction to reliability analysis. Proceed to next cell if you are already familiar with it.Before conducting design reliability analysis, let us define it first. In engineering design, there are often various criteria to describe design safety and failure. One possible formulation is using limit state functions. A limit state function $g(x): \mathbb{R}^n \rightarrow \mathbb{R}$ is similar to a constraint, in that it defines the safe region using a limit state value, e.g. a design is safe, if $g(x) \geq 0$. Now consider that $x$ is a draw from random variable (for example due to tolerance fields of the design variables or epistemic uncertainies such as measurement errors). Let us denote this as $x \sim F_X$, where $F_X: \mathbb{R}^n \rightarrow \mathbb{R}$ is the joint CDF. In this case, $G = g(X)$ is also a random variable. Despite the deterministic relationship $g(\cdot)$, the distribution $F_G$ of $G$ may be unknown in a practical sense (intractable, to expensive to compute, etc.) Thus, the condition $g(x) \geq 0$ is now an event $\mathcal{F}: G \geq 0$, which is often called *failure*, with a probability $P(\mathcal{F}) = P(G < 0)$. Reliability analysis aims to estimate this probability. If $F_G$ is known, the failure probability can be computed directly. Since we assume it to be unknown but the input uncertainties $F_X$ to be known, the following multi-dimensional integral has to be solved for computing $$ P(\mathcal{F}) = \int \dots \int_{g(x_1, \dots, x_n) < 0} f_X(x_1, \dots, x_n) d x_1 \dots d x_n $$where $f_X$ is the joint PDF. Note that this integral can be very burdensome to compute depending on $f_x$ and $g$ as well as the number of dimensions $n$ and the failure probability $P(\mathcal{F})$ depending on the method. Thus, various methods for reliability analysis exist.`duqo` currently implements only two of them, directional sampling and monte carlo simulation. More methods such as subset simulation, importance sampling etc. are planned if there is community interest. Please make an issue (after checking that it does not already exist). ###Code # Imports from time import time import numpy as np import matplotlib.pyplot as plt from duqo.stoch import UniVar, MultiVar # We will use these to define the input CDF F_x from duqo.proba import MC, DS # These are the methods that we will test integrator_classes = [MC, DS] # Define two limit state functions def himmblau(x, gamma=1): if x.ndim < 2: x = x.reshape((1, -1)) res = ((x[:, 0] ** 2 + x[:, 1]) / 1.81 - 11) ** 2 res += ((x[:, 0] + x[:, 1] ** 2) / 1.81 - 7) ** 2 return res - 45 * gamma def tricky_cos(x): if x.ndim < 2: x = x.reshape((1, -1)) alpha = 1.475 return 7. - np.sum((x / alpha) ** 2 - 5 * np.cos(2 * np.pi * x / alpha), axis=1) # Plot over [-6, 6] for both variables def surface_plot(bounds, fun): grid = np.linspace(bounds[0], bounds[1], 64) X, Y = np.meshgrid(grid, grid) # Flatten to shape = (samples, dims) inps = np.c_[X.ravel(), Y.ravel()] Z = fun(inps).reshape(X.shape) fig, ax = plt.subplots(figsize=(12, 7)) ax.contourf(X, Y, Z) ax.contour(X, Y, Z, colors="r", levels=[0]) # The zero line ax.set_title(fun.__name__) return ax plot_bounds = [-6, 6] _ = surface_plot(plot_bounds, himmblau) _ = surface_plot(plot_bounds, tricky_cos) # Now define the input uncertainty # For simplicity, let us use independent standard normal variables. Other distributions as well as linear dependence # is also supported multivar_stdnorm = MultiVar([UniVar("norm", mean=0, std=1) for _ in range(2)]) # Now we are ready to rock some probabilities! # Test Himmblau constraints = [himmblau] for integrator_cls in integrator_classes: integrator = integrator_cls(multivar_stdnorm, constraints) t0 = time() fail_prob, _, _, _ = integrator.calc_fail_prob(prob_tol=1e-4, post_proc=False, verbose=0, converge=False) print(integrator_cls.__name__, f": {fail_prob:.2e} ({time() - t0:4f} s.)") # MC gets inefficient for small failure probabilities. for integrator_cls in integrator_classes: integrator = integrator_cls(multivar_stdnorm, constraints) t0 = time() # prob_tol is an estimation of the smallest failure probability of interest fail_prob, _, _, _ = integrator.calc_fail_prob(prob_tol=1e-6, post_proc=False, verbose=0, converge=False) print(integrator_cls.__name__, f": {fail_prob:.2e} ({time() - t0:4f} s.)") # Test Tricky constraints = [tricky_cos] for integrator_cls in integrator_classes: integrator = integrator_cls(multivar_stdnorm, constraints) t0 = time() fail_prob, _, _, _ = integrator.calc_fail_prob(post_proc=False, verbose=0) print(integrator_cls.__name__, f": {fail_prob:.2e} ({time() - t0:4f} s.)") # The difference is quite large due to original formulation of DS which is the default configuration. # To solve such highly multi modal problems, multi_region should be set to True, which inevitably will increase # the inference duration integrator = DS(multivar_stdnorm, constraints) t0 = time() fail_prob, _, _, _ = integrator.calc_fail_prob(post_proc=False, verbose=0, converge=False, multi_region=True) print(integrator_cls.__name__, f"() : {fail_prob:.2e} ({time() - t0:4f} s.)") # Let us visualize the difference using a plot ds1 = DS(multivar_stdnorm, constraints) ds2 = DS(multivar_stdnorm, constraints) _ = ds1.calc_fail_prob(post_proc=True, verbose=0, multi_region=True, num_parallel=1) _ = ds2.calc_fail_prob(post_proc=True, verbose=0, multi_region=False, num_parallel=1) ax = surface_plot(plot_bounds, tricky_cos) ax.scatter(ds1.x_lsf[:, 0], ds1.x_lsf[:, 1], color="k", label="multi_region=True") ax.scatter(ds2.x_lsf[:, 0], ds2.x_lsf[:, 1], color="w", label="multi_region=False") _ = ax.legend() # As you can see, multi_region=False only considers the first failure region in each direction, #while multi_region=True considers all of them within the search radius ###Output _____no_output_____ ###Markdown Extra: System reliabilityIn system reliability, the failure event is sometimes defined as $P(\mathcal{F}) = P(\min(\{g_{k}(x); \; k \in [1, n_g\}))$, where $n_g$ is the number of limit state conditions. This kind of probability can also be computed with `duqo` by passing a list of functions. ###Code # Test Both constraints = [tricky_cos, himmblau] for integrator_cls in [MC, DS]: integrator = integrator_cls(multivar_stdnorm, constraints) t0 = time() fail_prob, _, _, _ = integrator.calc_fail_prob(post_proc=False, verbose=0, multi_region=True) print(integrator_cls.__name__, f": {fail_prob:.2e} ({time() - t0:4f} s.)") # The only caveat is now you only get the system failure probability. Thus, reevaluation with indiviudal lsfs are # currently required to acquire individual failure probabilities ###Output MC : 1.43e-01 (0.014002 s.) DS : 1.39e-01 (1.541032 s.)
week_5/practice_reinforce_pytorch_gpu.ipynb
###Markdown REINFORCE in PyTorchJust like we did before for Q-learning, this time we'll design a PyTorch network to learn `CartPole-v0` via policy gradient (REINFORCE).Most of the code in this notebook is taken from approximate Q-learning, so you'll find it more or less familiar and even simpler. ###Code import sys, os if 'google.colab' in sys.modules and not os.path.exists('.setup_complete'): !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/master/setup_colab.sh -O- | bash !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/grading.py -O ../grading.py !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/week5_policy_based/submit.py !touch .setup_complete # This code creates a virtual display to draw game images on. # It will have no effect if your machine has a monitor. if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0: !bash ../xvfb start os.environ['DISPLAY'] = ':1' import gym import numpy as np import matplotlib.pyplot as plt %matplotlib inline ###Output _____no_output_____ ###Markdown A caveat: with some versions of `pyglet`, the following cell may crash with `NameError: name 'base' is not defined`. The corresponding bug report is [here](https://github.com/pyglet/pyglet/issues/134). If you see this error, try restarting the kernel. ###Code env = gym.make("CartPole-v0") # gym compatibility: unwrap TimeLimit if hasattr(env, '_max_episode_steps'): env = env.env env.reset() n_actions = env.action_space.n state_dim = env.observation_space.shape plt.imshow(env.render("rgb_array")) ###Output _____no_output_____ ###Markdown Building the network for REINFORCE For REINFORCE algorithm, we'll need a model that predicts action probabilities given states.For numerical stability, please __do not include the softmax layer into your network architecture__.We'll use softmax or log-softmax where appropriate. ###Code import torch import torch.nn as nn # Build a simple neural network that predicts policy logits. # Keep it simple: CartPole isn't worth deep architectures. model = nn.Sequential( # <YOUR CODE: define a neural network that predicts policy logits> nn.Linear(in_features=4, out_features=256, bias=True), nn.ReLU(), nn.Linear(in_features=256, out_features=128, bias=True), nn.ReLU(), nn.Linear(in_features=128, out_features=n_actions, bias=True) ) cuda = torch.device("cuda" if torch.cuda.is_available() else "cpu") if cuda == 'cpu': print('cpu') else: n_gpu = torch.cuda.device_count() print(torch.cuda.get_device_name(0)) model.cuda() ###Output _____no_output_____ ###Markdown Predict function Note: output value of this function is not a torch tensor, it's a numpy array.So, here gradient calculation is not needed.Use [no_grad](https://pytorch.org/docs/stable/autograd.htmltorch.autograd.no_grad)to suppress gradient calculation.Also, `.detach()` (or legacy `.data` property) can be used instead, but there is a difference:With `.detach()` computational graph is built but then disconnected from a particular tensor,so `.detach()` should be used if that graph is needed for backprop via some other (not detached) tensor;In contrast, no graph is built by any operation in `no_grad()` context, thus it's preferable here. ###Code def predict_probs(states): """ Predict action probabilities given states. :param states: numpy array of shape [batch, state_shape] :returns: numpy array of shape [batch, n_actions] """ # convert states, compute logits, use softmax to get probability "<YOUR CODE>" n = states.shape[0] states = torch.tensor(states, dtype=torch.float32, device=cuda) qvalues = model(states) qvalues_proba = nn.functional.softmax(qvalues, dim=1) return qvalues_proba.cpu().detach().numpy() print(env.reset()) test_states = np.array([env.reset() for _ in range(5)]) test_probas = predict_probs(test_states) assert isinstance( test_probas, np.ndarray), "you must return np array and not %s" % type(test_probas) assert tuple(test_probas.shape) == ( test_states.shape[0], env.action_space.n), "wrong output shape: {}".format(np.shape(test_probas)) assert np.allclose(np.sum(test_probas, axis=1), 1), "probabilities do not sum to 1" ###Output _____no_output_____ ###Markdown Play the gameWe can now use our newly built agent to play the game. ###Code def generate_session(env, t_max=1000): """ play a full session with REINFORCE agent and train at the session end. returns sequences of states, actions andrewards """ # arrays to record session states, actions, rewards = [], [], [] s = env.reset() for t in range(t_max): # action probabilities array aka pi(a|s) action_probs = predict_probs(np.array([s]))[0] # Sample action with given probabilities. "<YOUR CODE>" a = np.random.choice((0, 1), p=action_probs) new_s, r, done, info = env.step(a) # record session history to train later states.append(s) actions.append(a) rewards.append(r) s = new_s if done: break return states, actions, rewards # test it states, actions, rewards = generate_session(env) ###Output _____no_output_____ ###Markdown Computing cumulative rewards$$\begin{align*}G_t &= r_t + \gamma r_{t + 1} + \gamma^2 r_{t + 2} + \ldots \\&= \sum_{i = t}^T \gamma^{i - t} r_i \\&= r_t + \gamma * G_{t + 1}\end{align*}$$ ###Code def get_cumulative_rewards(rewards, # rewards at each step gamma=0.99 # discount for reward ): """ take a list of immediate rewards r(s,a) for the whole session compute cumulative returns (a.k.a. G(s,a) in Sutton '16) G_t = r_t + gamma*r_{t+1} + gamma^2*r_{t+2} + ... The simple way to compute cumulative rewards is to iterate from last to first time tick and compute G_t = r_t + gamma*G_{t+1} recurrently You must return an array/list of cumulative rewards with as many elements as in the initial rewards. """ # <YOUR CODE> l = len(rewards) G = list(range(l)) G[l-1] = rewards[-1] for t in reversed(range(l-1)): G[t] = rewards[t] + gamma * G[t+1] return G # <YOUR CODE: array of cumulative rewards> get_cumulative_rewards(rewards) assert len(get_cumulative_rewards(list(range(100)))) == 100 assert np.allclose(get_cumulative_rewards([0, 0, 1, 0, 0, 1, 0], gamma=0.9), [ 1.40049, 1.5561, 1.729, 0.81, 0.9, 1.0, 0.0]) assert np.allclose(get_cumulative_rewards( [0, 0, 1, -2, 3, -4, 0], gamma=0.5), [0.0625, 0.125, 0.25, -1.5, 1.0, -4.0, 0.0]) assert np.allclose(get_cumulative_rewards( [0, 0, 1, 2, 3, 4, 0], gamma=0), [0, 0, 1, 2, 3, 4, 0]) print("looks good!") ###Output looks good! ###Markdown Loss function and updatesWe now need to define objective and update over policy gradient.Our objective function is$$ J \approx { 1 \over N } \sum_{s_i,a_i} G(s_i,a_i) $$REINFORCE defines a way to compute the gradient of the expected reward with respect to policy parameters. The formula is as follows:$$ \nabla_\theta \hat J(\theta) \approx { 1 \over N } \sum_{s_i, a_i} \nabla_\theta \log \pi_\theta (a_i \mid s_i) \cdot G_t(s_i, a_i) $$We can abuse PyTorch's capabilities for automatic differentiation by defining our objective function as follows:$$ \hat J(\theta) \approx { 1 \over N } \sum_{s_i, a_i} \log \pi_\theta (a_i \mid s_i) \cdot G_t(s_i, a_i) $$When you compute the gradient of that function with respect to network weights $\theta$, it will become exactly the policy gradient. ###Code def to_one_hot(y_tensor, ndims): """ helper: take an integer vector and convert it to 1-hot matrix. """ y_tensor = y_tensor.type(torch.cuda. LongTensor).view(-1, 1) y_one_hot = torch.zeros( y_tensor.size()[0], ndims, device=cuda).scatter_(1, y_tensor, 1) return y_one_hot # Your code: define optimizers optimizer = torch.optim.Adam(model.parameters(), 1e-3) def train_on_session(states, actions, rewards, gamma=0.99, entropy_coef=1e-2): """ Takes a sequence of states, actions and rewards produced by generate_session. Updates agent's weights by following the policy gradient above. Please use Adam optimizer with default parameters. Confer: https://fosterelli.co/entropy-loss-for-reinforcement-learning """ # cast everything into torch tensors states = torch.tensor(states, dtype=torch.float32, device=cuda) actions = torch.tensor(actions, dtype=torch.int32, device=cuda) cumulative_returns = np.array(get_cumulative_rewards(rewards, gamma)) cumulative_returns = torch.tensor(cumulative_returns, dtype=torch.float32, device=cuda) # predict logits, probas and log-probas using an agent. logits = model(states) probs = nn.functional.softmax(logits, -1) log_probs = nn.functional.log_softmax(logits, -1) assert all(isinstance(v, torch.Tensor) for v in [logits, probs, log_probs]), \ "please use compute using torch tensors and don't use predict_probs function" # select log-probabilities for chosen actions, log pi(a_i|s_i) log_probs_for_actions = torch.sum( log_probs * to_one_hot(actions, env.action_space.n), dim=1) # Compute loss here. Don't forgen entropy regularization with `entropy_coef` "<YOUR CODE>" entropy = -torch.sum(log_probs * probs).to(cuda) #print(entropy) "<YOUR CODE>" J_hat = torch.mean(log_probs_for_actions * cumulative_returns).to(cuda) # maximize J value, minimize entropy loss = - J_hat + entropy_coef * entropy # this implementation does not use baseline # Gradient descent step "<YOUR CODE>" optimizer.zero_grad() loss.backward() optimizer.step() # technical: return session rewards to print them later return np.sum(rewards) ###Output _____no_output_____ ###Markdown The actual training ###Code for i in range(100): rewards = [train_on_session(*generate_session(env)) for _ in range(100)] # generate new sessions print("mean reward:%.3f" % (np.mean(rewards))) if np.mean(rewards) > 300: print("You Win!") # but you can train even further break ###Output mean reward:46.640 mean reward:86.650 mean reward:52.640 mean reward:142.490 mean reward:188.770 mean reward:375.310 You Win! ###Markdown Results & video ###Code # Record sessions import gym.wrappers with gym.wrappers.Monitor(gym.make("CartPole-v0"), directory="videos", force=True) as env_monitor: sessions = [generate_session(env_monitor) for _ in range(100)] # Show video. This may not work in some setups. If it doesn't # work for you, you can download the videos and view them locally. from pathlib import Path from IPython.display import HTML video_names = sorted([s for s in Path('videos').iterdir() if s.suffix == '.mp4']) HTML(""" <video width="640" height="480" controls> <source src="{}" type="video/mp4"> </video> """.format(video_names[-1])) # You can also try other indices from submit import submit_cartpole submit_cartpole(generate_session, '[email protected]', 'aywdIFBlmbdYJSKd') ###Output Your average reward is 317.9 over 100 episodes Submitted to Coursera platform. See results on assignment page!
FJLTDemo.ipynb
###Markdown In a nutshell, `fjlt_sfd` works best and it needs only numpy. Just use the code below as needed. Test different embedding methods ###Code def calc_dist(x1, x2): """ l2-distance between points x1 and x2. """ return np.linalg.norm(x1 - x2) def eval_dist(A, A_embedded): """ Calculate l2 distances between points (column vectors) for the original matrix A and the embedded matrix A_embedded. Calculate only for the pairs of points of adjacent indices (it's simple this way). """ N = A.shape[1] dist = np.zeros(N - 1) dist_embedded = np.zeros(N - 1) for i in range(N - 1): dist[i] = calc_dist(A[:, i], A[:, i + 1]) dist_embedded[i] = calc_dist(A_embedded[:, i], A_embedded[:, i + 1]) distortion = (dist - dist_embedded) / dist return distortion def test_embedding_method(A, k, method): """ Apply an embedding method. Check time and the distortion in l2 distances between points. """ start_time = time.time() A_embedded = method(A, k) print('Time: {} s'.format(time.time() - start_time)) distortion = eval_dist(A, A_embedded) dist_abs = np.abs(distortion) print('Mean absolute distortion: {}, std = {}'.format(dist_abs.mean(), dist_abs.std())) print('--------------------------') return distortion def gaussian_random_projection(A, k): """ Gaussian random projection from sklearn. """ transformer = GaussianRandomProjection(n_components=k) A_embedded = transformer.fit_transform(A.T).T return A_embedded # def fjlt_mathen(A, k): # """ # FJLT implementation taken from https://github.com/michaelmathen/FJLT. # Require the Fast Hadamard Transform package from https://github.com/nbarbey/fht. # Note: # I don't understand what q is, but apparently higher q = less distortion and more time. # """ # import fjlt # return fjlt.fjlt(A, k, q=0.0001) def fjlt_sfd(A, k): """ A variant of FJLT. See the following resources: - The review section (page 3) of https://arxiv.org/abs/1909.04801 - Page 1 of https://www.sketchingbigdata.org/fall17/lec/lec9.pdf Note: I name it sfd because the matrices are called S(ample), F(ourier transform), D(iagonal). """ d = A.shape[0] sign_vector = np.random.randint(0, 2, size=(d, 1)) * 2 - 1 idx = np.zeros(k, dtype=int) idx[1:] = np.random.choice(d - 1, k - 1, replace=False) + 1 DA = sign_vector * A FDA = np.fft.fft(DA, axis=0, norm='ortho') A_embedded = np.sqrt(d / k) * FDA[idx] return A_embedded def fjlt_dct(A, k): """ Like fjlt_sfd, but use DCT instead of FFT. """ d = A.shape[0] sign_vector = np.random.randint(0, 2, size=(d, 1)) * 2 - 1 idx = np.zeros(k, dtype=int) idx[1:] = np.random.choice(d - 1, k - 1, replace=False) + 1 DA = sign_vector * A FDA = dct(DA, axis=0, norm='ortho') A_embedded = np.sqrt(d / k) * FDA[idx] return A_embedded def sjlt_dense_rep(A, k): """ Yin et al. 2020 ESE: Extremely Sparse JL Transform. Note: The description in the paper embed each row of the data matrix. For comparison with other methods, I will embed columns instead. I use dense matrix representation here for prototyping. """ d = A.shape[0] h = np.random.choice(d, size=k) sigma = np.random.choice([-1, 1], size=d) R = np.zeros((d, k)) for j in range(k): R[h[j], j] = sigma[h[j]] A_sjlt = (np.sqrt(d / k) * A.T @ R).T return A_sjlt # Parameters d = 10000 # Dimension of each point N = 100 # Number of points k = johnson_lindenstrauss_min_dim(d, eps=0.15) # Dimension to embed to print('Embed {} dim -> {} dim'.format(d, k)) # Generate data matrix A. Each data point is a column vector. A = np.random.rand(d, N) err_gauss = test_embedding_method(A, k, gaussian_random_projection) # err_mathen = test_embedding_method(A, k, fjlt_mathen) err_sfd = test_embedding_method(A, k, fjlt_sfd) err_dct = test_embedding_method(A, k, fjlt_dct) err_sjl = test_embedding_method(A, k, sjlt_dense_rep) plt.plot(err_gauss * 100, label='Gaussian random projection') # plt.plot(err_mathen * 100, label='Original FJLT (from Mathen)') # I didn't tune q carefully plt.plot(err_sfd * 100, label='Faster FJLT') plt.plot(err_dct * 100, label='Faster FJLT with DCT') plt.plot(err_sjl * 100, label='Sparse JLT') plt.xlabel('Index of comparison') plt.ylabel('Distortion (%)') plt.legend() plt.show() ###Output _____no_output_____ ###Markdown Find compression-error tradeoff of different methods ###Code def run_many_k(A, k_list, method): time_hist = np.zeros(len(k_list)) err_hist = np.zeros(len(k_list)) for i, k in enumerate(k_list): start_time = time.time() A_embedded = method(A, k) time_hist[i] = time.time() - start_time distortion = eval_dist(A, A_embedded) err_hist[i] = np.abs(distortion).mean() return time_hist, err_hist k_list = np.arange(1000, 10001, 1000) # [1000, 2000, ..., 10000] # time_mathen, errhist_mathen = run_many_k(A, k_list, fjlt_mathen) time_sfd, errhist_sfd = run_many_k(A, k_list, fjlt_sfd) time_sjl, errhist_sjl = run_many_k(A, k_list, sjlt_dense_rep) # plt.plot(k_list, time_mathen * 1e3, label='Mathen FJLT') plt.plot(k_list, time_sfd * 1e3, label='Faster FJLT') plt.plot(k_list, time_sjl * 1e3, label='Sparse JLT') plt.xlabel('Dimension of embedding') plt.ylabel('Time (ms)') plt.legend() plt.show() # plt.plot(k_list, errhist_mathen * 100, label='Mathen FJLT') plt.plot(k_list, errhist_sfd * 100, label='Faster FJLT') plt.plot(k_list, errhist_sjl * 100, label='Sparse JLT') plt.xlabel('Dimension of embedding') plt.ylabel('Absolute distortion (%)') plt.legend() plt.show() ###Output _____no_output_____ ###Markdown Sparse JL Transform ###Code def eval_dist_sparse(A, A_embedded, num_comparison=100): """ Calculate l2 distances between points (row vectors) for the original sparse matrix A and the sparse embedded matrix A_embedded. Calculate only for the pairs of points of adjacent indices (it's simple this way). """ assert A.shape[0] >= num_comparison dist = np.zeros(num_comparison - 1) dist_embedded = np.zeros(num_comparison - 1) for i in range(num_comparison - 1): dist[i] = (A[i] - A[i + 1]).power(2).sum() dist_embedded[i] = (A_embedded[i] - A_embedded[i + 1]).power(2).sum() distortion = (dist - dist_embedded) / dist return distortion, dist, dist_embedded def sjlt_sparse_rep(A, k): """ Yin et al. 2020 ESE: Extremely Sparse JL Transform. Implemented for scipy.sparse representation. """ d = A.shape[1] h = np.random.choice(d, size=k) sigma = np.random.choice([-1, 1], size=d) R = sparse.lil_matrix((d, k)) for j in range(k): R[h[j], j] = sigma[h[j]] R = R.tocsc() A_sjlt = (A.dot(R)).multiply(np.sqrt(d / k)) return A_sjlt # Parameters d = 10000 # Dimension of each point N = 100 # Number of points k = johnson_lindenstrauss_min_dim(d, eps=0.15) # Dimension to embed to print('Embed {} dim -> {} dim'.format(d, k)) # Generate data matrix A. Each data point is a row vector. A = sparse.csc_matrix(np.random.rand(N, d)) A_sjlt = sjlt_sparse_rep(A, k) distortion, dist, dist_embedded = eval_dist_sparse(A, A_sjlt) fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 4)) ax[0].plot(dist, label='Original distance') ax[0].plot(dist_embedded, label='Distance in embedded space') ax[0].legend() ax[1].plot(distortion * 100) ax[1].set_ylabel('Distortion (%)') fig.show() ###Output _____no_output_____ ###Markdown Test Sparse JLT with test matrices ###Code # File names A_dir = 'matrices/Trefethen_32768.mat' Mstar_dir = 'matrices/Trefethen_SSAI_32768.mat' # Load matrices A = sio.loadmat(A_dir, squeeze_me=True)['tref2'] Mstar = sio.loadmat(Mstar_dir, squeeze_me=True)['Mst'] # Parameters d = A.shape[0] k = johnson_lindenstrauss_min_dim(d, eps=0.1) print('Embed {} dim -> {} dim'.format(d, k)) # Calculate S Id = sparse.eye(d).tocsc() MA = Mstar.dot(A) S = Id.multiply(2) - MA - MA.transpose() # Project S SOmega = sjlt_sparse_rep(S, k) distortion, dist, dist_embedded = eval_dist_sparse(S, SOmega, num_comparison=100) fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 4)) ax[0].plot(dist, label='Original distance') ax[0].plot(dist_embedded, label='Distance in embedded space') ax[0].legend() ax[1].plot(distortion * 100) ax[1].set_ylabel('Distortion (%)') fig.show() ###Output _____no_output_____
SOM/Self-Organising-Map.ipynb
###Markdown Self-Organising Maps ###Code from jupyter_utils import progress from __future__ import division import numpy as np from matplotlib import pyplot as plt from matplotlib import patches as patches %matplotlib inline # 8 colours as initial test set #raw_data = np.array([[1, 0, 0], [0, 1, 0], # [0, 0.5, 0.25], [0, 0, 1], # [0, 0, 0.5], [1, 1, 0.2], # [1, 0.4, 0.25], [1, 0, 1]]).T * 255 # or use random colours raw_data = np.random.randint(0, 255, (3, 1000)) ###Output _____no_output_____ ###Markdown SOM Setup ###Code network_dimensions = np.array([30, 30]) n_iterations = 5000 init_learning_rate = 0.01 normalise_data = True # if True, assume all data on common scale # if False, normalise to [0 1] range along each column normalise_by_column = False # establish variables based on data m = raw_data.shape[0] n = raw_data.shape[1] # initial neighbourhood radius init_radius = max(network_dimensions[0], network_dimensions[1]) / 2 # radius decay parameter time_constant = n_iterations / np.log(init_radius) data = raw_data # check if data needs to be normalised if normalise_data: if normalise_by_column: # normalise along each column col_maxes = raw_data.max(axis=0) data = raw_data / col_maxes[np.newaxis, :] else: # normalise entire dataset data = raw_data / data.max() # setup random weights between 0 and 1 # weight matrix needs to be one m-dimensional vector for each neuron in the SOM net = np.random.random((network_dimensions[0], network_dimensions[1], m)) ###Output _____no_output_____ ###Markdown SOM Function Definitions ###Code def find_bmu(t, net, m): """ Find the best matching unit for a given vector, t, in the SOM Returns: a (bmu, bmu_idx) tuple where bmu is the high-dimensional BMU and bmu_idx is the index of this vector in the SOM """ bmu_idx = np.array([0, 0]) # set the initial minimum distance to a huge number min_dist = np.iinfo(np.int).max # calculate the high-dimensional distance between each neuron and the input for x in range(net.shape[0]): for y in range(net.shape[1]): w = net[x, y, :].reshape(m, 1) # don't bother with actual Euclidean distance, to avoid expensive sqrt operation sq_dist = np.sum((w - t) ** 2) if sq_dist < min_dist: min_dist = sq_dist bmu_idx = np.array([x, y]) # get vector corresponding to bmu_idx bmu = net[bmu_idx[0], bmu_idx[1], :].reshape(m, 1) # return the (bmu, bmu_idx) tuple return (bmu, bmu_idx) def decay_radius(initial_radius, i, time_constant): return initial_radius * np.exp(-i / time_constant) def decay_learning_rate(initial_learning_rate, i, n_iterations): return initial_learning_rate * np.exp(-i / n_iterations) def calculate_influence(distance, radius): return np.exp(-distance / (2* (radius**2))) ###Output _____no_output_____ ###Markdown SOM Learning ###Code for i in progress(range(n_iterations),every = 1, name='Iteration'): # select a training example at random t = data[:, np.random.randint(0, n)].reshape(np.array([m, 1])) # find its Best Matching Unit bmu, bmu_idx = find_bmu(t, net, m) # decay the SOM parameters r = decay_radius(init_radius, i, time_constant) l = decay_learning_rate(init_learning_rate, i, n_iterations) # now we know the BMU, update its weight vector to move closer to input # and move its neighbours in 2-D space closer # by a factor proportional to their 2-D distance from the BMU for x in range(net.shape[0]): for y in range(net.shape[1]): w = net[x, y, :].reshape(m, 1) # get the 2-D distance (again, not the actual Euclidean distance) w_dist = np.sum((np.array([x, y]) - bmu_idx) ** 2) # if the distance is within the current neighbourhood radius if w_dist <= r**2: # calculate the degree of influence (based on the 2-D distance) influence = calculate_influence(w_dist, r) # now update the neuron's weight using the formula: # new w = old w + (learning rate * influence * delta) # where delta = input vector (t) - old w new_w = w + (l * influence * (t - w)) # commit the new weight net[x, y, :] = new_w.reshape(1, 3) ###Output _____no_output_____ ###Markdown Plot the Colour Map ###Code fig = plt.figure() # setup axes ax = fig.add_subplot(111, aspect='equal') ax.set_xlim((0, net.shape[0]+1)) ax.set_ylim((0, net.shape[1]+1)) ax.set_title('Self-Organising Map after %d iterations' % n_iterations) # plot the rectangles for x in range(1, net.shape[0] + 1): for y in range(1, net.shape[1] + 1): ax.add_patch(patches.Rectangle((x-0.5, y-0.5), 1, 1, facecolor=net[x-1,y-1,:], edgecolor='none')) plt.show() ###Output _____no_output_____
Python Fundamentals/Module_4_1_Python_Fundamentals.ipynb
###Markdown 2-4.1 Intro Python Working with Files4.1 **File import in Jupyter Notebooks** 4.1 **File `open(`) and `.read()`** 4.2 File Read as a list with **`.readlines()`** 4.2 File Closing to free resources with **`.close()`** 4.3 File Read a line at a time with **`.readline()`** 4.3 Remove characters using **`.strip()`** 4.4 File **`.write()`** with **`.seek()`** 4.4 File append mode ----- > Student will be able to 4.1 **Import files in Jupyter Notebooks using the curl command ** 4.1 **`open()` and `.read()` local files in memory** 4.1 **`.read()` a specific number of characters** 4.2 Use **`.readlines()`** to read data from file as a list of lines 4.2 Use **`.close`** to free system resources 4.3 Use **`.readline()`** to read data from file a line at a time 4.3 Use **`.strip()`** to remove new line characters 4.4 **`.write()`** data to a new local file4.4 Use **`.seek()`** to set file read or write location 4.4 Use file append mode &nbsp; Concepts Import Files to Jupyter[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/850e52a7-2082-4cb7-926f-54bf2527cee0/Unit2_Section4.1a-Import_Files_to_Jupyter.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/850e52a7-2082-4cb7-926f-54bf2527cee0/Unit2_Section4.1a-Import_Files_to_Jupyter.vtt","srclang":"en","kind":"subtitles","label":"english"}]) curl imports files to Jupyter session from a web addressbelow is a code using curl to import poem1.txt, the code is in a command line interface syntax> `!curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/poem1.txt -o poem1.txt` The table explains each element of the command above | code | meaning ||-----|---|| **`!`** | runs command interface supporting **curl** | | **`curl`** | enables **curl** that can download files | | **`https://raw.githubusercontent.com/...`** | is the address for data file to import | | **`-o`** | tells **`curl`** write data to a file | | ** *`poem1.txt`* ** | name **`curl`** will give the file | &nbsp; Examples ###Code # [ ] review and run example !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/poem1.txt -o poem1.txt ###Output _____no_output_____ ###Markdown &nbsp; Concepts Opening a Local File in read mode>```pythonpoem_file = open('poem1.txt', 'r') ``` [![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/07e5f863-e416-4534-a45c-df50d0d3df33/Unit2_Section4.1b-Opening_Files_Read_Mode.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/07e5f863-e416-4534-a45c-df50d0d3df33/Unit2_Section4.1b-Opening_Files_Read_Mode.vtt","srclang":"en","kind":"subtitles","label":"english"}]) Read mode &nbsp; `'r'`| MODE | Description ||:-------:|:--------------|| **'r'** | **read only mode** || 'w' | write - overwrites file with same name || 'r+' | read and write mode || 'a' | opens for appending to end of file | `open()` creates an object that can be addressed in python code &nbsp; Examples ###Code # [ ]Run to open 'he file i' memory as poem_file poem_file = open("poem1.txt", 'r') # [ ] run and review code to test if open worked # should display name='poem1.txt' and no errors poem_file ###Output _____no_output_____ ###Markdown &nbsp; Task 1 import and open a local file in read mode1. **Import a list of cities using curl** a. git the list from https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/cities b. name the list cities.txt 2. **open cities.txt in read mode using a variable = cities_file** 3. **test that cities_file opened cities.txt with a print statement** ###Code # [ ] import cities.txt # [ ] open cities.txt as cities_file # [ ] test cities.txt was opened ###Output _____no_output_____ ###Markdown &nbsp; Concepts Read a file using `.read()`[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c37a03df-7d95-4339-8fec-8f199b747a08/Unit2_Section4.1c-Reading_Files.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c37a03df-7d95-4339-8fec-8f199b747a08/Unit2_Section4.1c-Reading_Files.vtt","srclang":"en","kind":"subtitles","label":"english"}]) reading text```pythonpoem_contents = poem_file.read()``` `.read()` loads the content of the file into memory as a string, including formatting such as new line (`\n`) &nbsp; Examples examples expect that the cells that import and open of poem1.txt has been run without a read()Run the cells that import and open poem1.txt ###Code # [ ] review and run example poem_contents = poem_file.read() # [ ] review and run example # shows the file as a string with formatting characters such as "\n", output should be non-blank poem_contents # [ ] review and run example # since .read() loaded the file as a string it can be printed print(poem_contents) ###Output _____no_output_____ ###Markdown &nbsp; Task 2 read a file Read the file cities.text that was imported in task 11. **import cities.txt and open** a. ensure the code was created and run in **task 1** to import cities.txt b. create and run code to re-open cities.txt as cities_file 2. **read() cities_file into a variable called cities**3. Test the read() by displaying the string contained in cities4. Test the read() by printing the cities string ###Code # [ ] after import and open of cities.txt in task 1 # [ ] read cities_file as cities # [ ] display the string: cities # [ ] print the string: cities ###Output _____no_output_____ ###Markdown &nbsp; Concepts reading a file with `.read(n)` where n = number of characters to read[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/9960a973-339b-40e1-b5b3-1f7db661934e/Unit2_Section4.1d-Reading_Number_of_Characters.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/9960a973-339b-40e1-b5b3-1f7db661934e/Unit2_Section4.1d-Reading_Number_of_Characters.vtt","srclang":"en","kind":"subtitles","label":"english"}]) each time `poem_file.read(10)` runs, the next 10 characters are read.> **Note:** if .read(10) result is = '' &nbsp;(or empty string with no characters), it is likely that the end of the file has been reached. Perform a fresh **.open()** to reset read() to the beginning of the file. &nbsp; Examples examples expect that the cells that import and open of poem1.txt has been run without a read()Run the cell at the top of the notebook to ** import poem1.txt** each line is a different approach to reading and displaying 10 characters of the poem ###Code # [ ] review and run example to read poem1.txt 10 characters at a time poem_file = open('poem1.txt', 'r') poem_10char = poem_file.read(10) print(poem_10char) poem_10char # [ ] review and run example, + 10 more characters # reads and displays without storing in a variable poem_file.read(10) # [ ] review and run example, + 10 more characters # reads and stores in variable poem_parts poem_parts = poem_file.read(10) print(poem_parts) # [ ] REPEATEDLY RUN this cell, + 5 more characters each time run are appended using string addition # [ ] consider why no additional text displays after multiple runs poem_parts += poem_file.read(5) print(poem_parts) ###Output _____no_output_____ ###Markdown &nbsp; Task 3 digits of pi read a set number of digits with .read(n) import, open, read, print1. import digits_of_pi.txt located at https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/digits_of_pi 2. open as **digits_of_pi_text** 3. read()the first 4 characters of digits_of_pi_text into a variable called pi_digits 4. print pi_digits 5. add to pi_digits string with string addition a. add next 4 characters from digits_of_pi obtained from read() b. run the cell multiple times to get more digits of *pi* ###Code # [ ] digits of pi # 1. import digits_of_pi.txt # [ ] digits of pi # 2. open as digits_of_pi_text # 3. read() 4 char of digits_of_pi_text to pi_digits variable # 4. print pi_digits # [ ] digits of pi # 5. add to pi_digits string with string addition # a. add next 4 characters from digits_of_pi obtained from read() # b. run the cell multiple times to get more digits of *pi* ###Output _____no_output_____ ###Markdown &nbsp; Concept .read() returns a string These strings can be manipulated just like any other string[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/f7fb0136-24a3-4a0e-aff2-b1abc2f83029/Unit2_Section4.1e-Read_Returns_a_String.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/f7fb0136-24a3-4a0e-aff2-b1abc2f83029/Unit2_Section4.1e-Read_Returns_a_String.vtt","srclang":"en","kind":"subtitles","label":"english"}]) Boolean tests such as:- .upper() - .title() - string slices, e.g.- `cities[3:9]` - etc.. and string methods can be performed such as:- .isdigit() - .isalpha() - etc... &nbsp; Examples examples expect that the cells that import has been run it may be necessary to run the cell to **import poem1.txt** at top of notebook ###Code # [ ] review and run example poem_file = open('poem1.txt', 'r') poem_part = poem_file.read(15).upper() print(poem_part) # [ ] review and run example poem_part = poem_file.read(6).title() print(poem_part) # [ ] review and run example poem_part = poem_file.read(6) print(poem_part) print(poem_part.isalpha(), "isalpha() because of `\\n`") poem_part # [ ] review and run example poem_file = open('poem1.txt', 'r') poem_text = poem_file.read() print(poem_text[8:26]) ###Output _____no_output_____ ###Markdown &nbsp; Task 4 City Initials Read the file cities.text that was imported in task 11. ensure the code was created and run in **task 1** to import cities.txt 2. create and run code to re-open cities.txt as cities_file 3. **`read()`** cities_file into a variable called cities 4. iterate through the characters in cities a. test if .isupper(), if True append the character to a string variable: initials c. else if (elif) character is "\n", if True append the "\n" to initials 5. print initials ###Code # [ ] compelete the task ###Output _____no_output_____ ###Markdown 2-4.1 Intro Python Working with Files4.1 **File import in Jupyter Notebooks** 4.1 **File `open(`) and `.read()`** 4.2 File Read as a list with **`.readlines()`** 4.2 File Closing to free resources with **`.close()`** 4.3 File Read a line at a time with **`.readline()`** 4.3 Remove characters using **`.strip()`** 4.4 File **`.write()`** with **`.seek()`** 4.4 File append mode ----- > Student will be able to 4.1 **Import files in Jupyter Notebooks using the curl command ** 4.1 **`open()` and `.read()` local files in memory** 4.1 **`.read()` a specific number of characters** 4.2 Use **`.readlines()`** to read data from file as a list of lines 4.2 Use **`.close`** to free system resources 4.3 Use **`.readline()`** to read data from file a line at a time 4.3 Use **`.strip()`** to remove new line characters 4.4 **`.write()`** data to a new local file4.4 Use **`.seek()`** to set file read or write location 4.4 Use file append mode &nbsp; Concepts Import Files to Jupyter[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/850e52a7-2082-4cb7-926f-54bf2527cee0/Unit2_Section4.1a-Import_Files_to_Jupyter.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/850e52a7-2082-4cb7-926f-54bf2527cee0/Unit2_Section4.1a-Import_Files_to_Jupyter.vtt","srclang":"en","kind":"subtitles","label":"english"}]) curl imports files to Jupyter session from a web addressbelow is a code using curl to import poem1.txt, the code is in a command line interface syntax> `!curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/poem1.txt -o poem1.txt` The table explains each element of the command above | code | meaning ||-----|---|| **`!`** | runs command interface supporting **curl** | | **`curl`** | enables **curl** that can download files | | **`https://raw.githubusercontent.com/...`** | is the address for data file to import | | **`-o`** | tells **`curl`** write data to a file | | ** *`poem1.txt`* ** | name **`curl`** will give the file | &nbsp; Examples ###Code # [ ] review and run example !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/poem1.txt -o poem1.txt ###Output % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 56 100 56 0 0 245 0 --:--:-- --:--:-- --:--:-- 245 ###Markdown &nbsp; Concepts Opening a Local File in read mode>```pythonpoem_file = open('poem1.txt', 'r') ``` [![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/07e5f863-e416-4534-a45c-df50d0d3df33/Unit2_Section4.1b-Opening_Files_Read_Mode.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/07e5f863-e416-4534-a45c-df50d0d3df33/Unit2_Section4.1b-Opening_Files_Read_Mode.vtt","srclang":"en","kind":"subtitles","label":"english"}]) Read mode &nbsp; `'r'`| MODE | Description ||:-------:|:--------------|| **'r'** | **read only mode** || 'w' | write - overwrites file with same name || 'r+' | read and write mode || 'a' | opens for appending to end of file | `open()` creates an object that can be addressed in python code &nbsp; Examples ###Code # [ ]Run to open the file in memory as poem_file poem_file = open('poem1.txt', 'r') # [ ] run and review code to test if open worked # should display name='poem1.txt' and no errors poem_file ###Output _____no_output_____ ###Markdown &nbsp; Task 1 import and open a local file in read mode1. **Import a list of cities using curl** a. git the list from https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/cities b. name the list cities.txt 2. **open cities.txt in read mode using a variable = cities_file** 3. **test that cities_file opened cities.txt with a print statement** ###Code # [ ] import cities.txt !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/cities -o cities.txt cities = open("cities.txt", "r") # [ ] open cities.txt as cities_file # [ ] test cities.txt was opened cities_file = open("cities.txt", "r") cities_file print (cities_file) ###Output <_io.TextIOWrapper name='cities.txt' mode='r' encoding='cp1252'> ###Markdown &nbsp; Concepts Read a file using `.read()`[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c37a03df-7d95-4339-8fec-8f199b747a08/Unit2_Section4.1c-Reading_Files.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c37a03df-7d95-4339-8fec-8f199b747a08/Unit2_Section4.1c-Reading_Files.vtt","srclang":"en","kind":"subtitles","label":"english"}]) reading text```pythonpoem_contents = poem_file.read()``` `.read()` loads the content of the file into memory as a string, including formatting such as new line (`\n`) &nbsp; Examples examples expect that the cells that import and open of poem1.txt has been run without a read()Run the cells that import and open poem1.txt ###Code # [ ] review and run example poem_contents = poem_file.read() # [ ] review and run example # shows the file as a string with formatting characters such as "\n", output should be non-blank poem_contents # [ ] review and run example # since .read() loaded the file as a string it can be printed print(poem_contents) ###Output Loops I repeat loops loops loops I repeat until I break ###Markdown &nbsp; Task 2 read a file Read the file cities.text that was imported in task 11. **import cities.txt and open** a. ensure the code was created and run in **task 1** to import cities.txt b. create and run code to re-open cities.txt as cities_file 2. **read() cities_file into a variable called cities**3. Test the read() by displaying the string contained in cities4. Test the read() by printing the cities string ###Code # [ ] after import and open of cities.txt in task 1 # [ ] read cities_file as cities # [ ] display the string: cities cities_file = open("cities.txt", "r") cities = cities_file.read() cities # [ ] print the string: cities print (cities) ###Output Beijing Cairo London Nairobi New York City Sydney Tokyo ###Markdown &nbsp; Concepts reading a file with `.read(n)` where n = number of characters to read[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/9960a973-339b-40e1-b5b3-1f7db661934e/Unit2_Section4.1d-Reading_Number_of_Characters.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/9960a973-339b-40e1-b5b3-1f7db661934e/Unit2_Section4.1d-Reading_Number_of_Characters.vtt","srclang":"en","kind":"subtitles","label":"english"}]) each time `poem_file.read(10)` runs, the next 10 characters are read.> **Note:** if .read(10) result is = '' &nbsp;(or empty string with no characters), it is likely that the end of the file has been reached. Perform a fresh **.open()** to reset read() to the beginning of the file. &nbsp; Examples examples expect that the cells that import and open of poem1.txt has been run without a read()Run the cell at the top of the notebook to ** import poem1.txt** each line is a different approach to reading and displaying 10 characters of the poem ###Code # [ ] review and run example to read poem1.txt 10 characters at a time poem_file = open('poem1.txt', 'r') poem_10char = poem_file.read(10) print(poem_10char) poem_10char # [ ] review and run example, + 10 more characters # reads and displays without storing in a variable poem_file.read(10) # [ ] review and run example, + 10 more characters # reads and stores in variable poem_parts poem_parts = poem_file.read(10) print(poem_parts) # [ ] REPEATEDLY RUN this cell, + 5 more characters each time run are appended using string addition # [ ] consider why no additional text displays after multiple runs poem_parts += poem_file.read(5) print(poem_parts) ###Output loops loops I repeat until I break ###Markdown &nbsp; Task 3 digits of pi read a set number of digits with .read(n) import, open, read, print1. import digits_of_pi.txt located at https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/digits_of_pi 2. open as **digits_of_pi_text** 3. read()the first 4 characters of digits_of_pi_text into a variable called pi_digits 4. print pi_digits 5. add to pi_digits string with string addition a. add next 4 characters from digits_of_pi obtained from read() b. run the cell multiple times to get more digits of *pi* ###Code # [ ] digits of pi # 1. import digits_of_pi.txt !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/digits_of_pi -o digits_of_pi_text.txt # [ ] digits of pi # 2. open as digits_of_pi_text # 3. read() 4 char of digits_of_pi_text to pi_digits variable # 4. print pi_digits digits_of_pi_text = open ("digits_of_pi_text.txt", "r") pi_digits = digits_of_pi_text.read(4) print (pi_digits) # [ ] digits of pi # 5. add to pi_digits string with string addition # a. add next 4 characters from digits_of_pi obtained from read() # b. run the cell multiple times to get more digits of *pi* pi_digits += digits_of_pi_text.read(4) print (pi_digits) ###Output 3.141592653589793238462643383279 ###Markdown &nbsp; Concept .read() returns a string These strings can be manipulated just like any other string[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/f7fb0136-24a3-4a0e-aff2-b1abc2f83029/Unit2_Section4.1e-Read_Returns_a_String.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/f7fb0136-24a3-4a0e-aff2-b1abc2f83029/Unit2_Section4.1e-Read_Returns_a_String.vtt","srclang":"en","kind":"subtitles","label":"english"}]) Boolean tests such as:- .upper() - .title() - string slices, e.g.- `cities[3:9]` - etc.. and string methods can be performed such as:- .isdigit() - .isalpha() - etc... &nbsp; Examples examples expect that the cells that import has been run it may be necessary to run the cell to **import poem1.txt** at top of notebook ###Code # [ ] review and run example poem_file = open('poem1.txt', 'r') poem_part = poem_file.read(15).upper() print(poem_part) # [ ] review and run example poem_part = poem_file.read(6).title() print(poem_part) # [ ] review and run example poem_part = poem_file.read(6) print(poem_part) print(poem_part.isalpha(), "isalpha() because of `\\n`") poem_part # [ ] review and run example poem_file = open('poem1.txt', 'r') poem_text = poem_file.read() print(poem_text[8:26]) ###Output repeat loops loops ###Markdown &nbsp; Task 4 City Initials Read the file cities.text that was imported in task 11. ensure the code was created and run in **task 1** to import cities.txt 2. create and run code to re-open cities.txt as cities_file 3. **`read()`** cities_file into a variable called cities 4. iterate through the characters in cities a. test if .isupper(), if True append the character to a string variable: initials c. else if (elif) character is "\n", if True append the "\n" to initials 5. print initials ###Code # [ ] compelete the task cities_file = open("cities.txt", "r") cities = cities_file.read() i = 0 initials = "" print (cities) while i < len(cities): if cities[i].isupper(): initials += cities[i] elif cities[i] == "\n": initials += "\n" else: pass i += 1 print (initials) ###Output Beijing Cairo London Nairobi New York City Sydney Tokyo B C L N NYC S T ###Markdown 2-4.1 Intro Python Working with Files4.1 **File import in Jupyter Notebooks** 4.1 **File `open(`) and `.read()`** 4.2 File Read as a list with **`.readlines()`** 4.2 File Closing to free resources with **`.close()`** 4.3 File Read a line at a time with **`.readline()`** 4.3 Remove characters using **`.strip()`** 4.4 File **`.write()`** with **`.seek()`** 4.4 File append mode ----- > Student will be able to 4.1 **Import files in Jupyter Notebooks using the curl command ** 4.1 **`open()` and `.read()` local files in memory** 4.1 **`.read()` a specific number of characters** 4.2 Use **`.readlines()`** to read data from file as a list of lines 4.2 Use **`.close`** to free system resources 4.3 Use **`.readline()`** to read data from file a line at a time 4.3 Use **`.strip()`** to remove new line characters 4.4 **`.write()`** data to a new local file4.4 Use **`.seek()`** to set file read or write location 4.4 Use file append mode &nbsp; Concepts Import Files to Jupyter[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/850e52a7-2082-4cb7-926f-54bf2527cee0/Unit2_Section4.1a-Import_Files_to_Jupyter.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/850e52a7-2082-4cb7-926f-54bf2527cee0/Unit2_Section4.1a-Import_Files_to_Jupyter.vtt","srclang":"en","kind":"subtitles","label":"english"}]) curl imports files to Jupyter session from a web addressbelow is a code using curl to import poem1.txt, the code is in a command line interface syntax> `!curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/poem1.txt -o poem1.txt` The table explains each element of the command above | code | meaning ||-----|---|| **`!`** | runs command interface supporting **curl** | | **`curl`** | enables **curl** that can download files | | **`https://raw.githubusercontent.com/...`** | is the address for data file to import | | **`-o`** | tells **`curl`** write data to a file | | ** *`poem1.txt`* ** | name **`curl`** will give the file | &nbsp; Examples ###Code # [ ] review and run example !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/poem1.txt -o poem1.txt ###Output % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 56 100 56 0 0 148 0 --:--:-- --:--:-- --:--:-- 148 ###Markdown &nbsp; Concepts Opening a Local File in read mode>```pythonpoem_file = open('poem1.txt', 'r') ``` [![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/07e5f863-e416-4534-a45c-df50d0d3df33/Unit2_Section4.1b-Opening_Files_Read_Mode.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/07e5f863-e416-4534-a45c-df50d0d3df33/Unit2_Section4.1b-Opening_Files_Read_Mode.vtt","srclang":"en","kind":"subtitles","label":"english"}]) Read mode &nbsp; `'r'`| MODE | Description ||:-------:|:--------------|| **'r'** | **read only mode** || 'w' | write - overwrites file with same name || 'r+' | read and write mode || 'a' | opens for appending to end of file | `open()` creates an object that can be addressed in python code &nbsp; Examples ###Code # [ ]Run to open the file in memory as poem_file poem_file = open('poem1.txt', 'r') # [ ] run and review code to test if open worked # should display name='poem1.txt' and no errors poem_file ###Output _____no_output_____ ###Markdown &nbsp; Task 1 import and open a local file in read mode1. **Import a list of cities using curl** a. git the list from https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/cities b. name the list cities.txt 2. **open cities.txt in read mode using a variable = cities_file** 3. **test that cities_file opened cities.txt with a print statement** ###Code # [ ] import cities.txt !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/cities -o cities.txt # [ ] open cities.txt as cities_file # [ ] test cities.txt was opened cities_file = open('cities.txt', 'r') cities_file ###Output _____no_output_____ ###Markdown &nbsp; Concepts Read a file using `.read()`[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c37a03df-7d95-4339-8fec-8f199b747a08/Unit2_Section4.1c-Reading_Files.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c37a03df-7d95-4339-8fec-8f199b747a08/Unit2_Section4.1c-Reading_Files.vtt","srclang":"en","kind":"subtitles","label":"english"}]) reading text```pythonpoem_contents = poem_file.read()``` `.read()` loads the content of the file into memory as a string, including formatting such as new line (`\n`) &nbsp; Examples examples expect that the cells that import and open of poem1.txt has been run without a read()Run the cells that import and open poem1.txt ###Code # [ ] review and run example poem_contents = poem_file.read() # [ ] review and run example # shows the file as a string with formatting characters such as "\n", output should be non-blank poem_contents # [ ] review and run example # since .read() loaded the file as a string it can be printed print(poem_contents) ###Output Loops I repeat loops loops loops I repeat until I break ###Markdown &nbsp; Task 2 read a file Read the file cities.text that was imported in task 11. **import cities.txt and open** a. ensure the code was created and run in **task 1** to import cities.txt b. create and run code to re-open cities.txt as cities_file 2. **read() cities_file into a variable called cities**3. Test the read() by displaying the string contained in cities4. Test the read() by printing the cities string ###Code # [ ] after import and open of cities.txt in task 1 # [ ] read cities_file as cities # [ ] display the string: cities !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/cities -o cities.txt cities_file = open('cities.txt', 'r') cities_file cities = cities_file.read() cities # [ ] print the string: cities print(cities) ###Output Beijing Cairo London Nairobi New York City Sydney Tokyo ###Markdown &nbsp; Concepts reading a file with `.read(n)` where n = number of characters to read[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/9960a973-339b-40e1-b5b3-1f7db661934e/Unit2_Section4.1d-Reading_Number_of_Characters.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/9960a973-339b-40e1-b5b3-1f7db661934e/Unit2_Section4.1d-Reading_Number_of_Characters.vtt","srclang":"en","kind":"subtitles","label":"english"}]) each time `poem_file.read(10)` runs, the next 10 characters are read.> **Note:** if .read(10) result is = '' &nbsp;(or empty string with no characters), it is likely that the end of the file has been reached. Perform a fresh **.open()** to reset read() to the beginning of the file. &nbsp; Examples examples expect that the cells that import and open of poem1.txt has been run without a read()Run the cell at the top of the notebook to ** import poem1.txt** each line is a different approach to reading and displaying 10 characters of the poem ###Code # [ ] review and run example to read poem1.txt 10 characters at a time poem_file = open('poem1.txt', 'r') poem_10char = poem_file.read(10) print(poem_10char) poem_10char # [ ] review and run example, + 10 more characters # reads and displays without storing in a variable poem_file.read(10) # [ ] review and run example, + 10 more characters # reads and stores in variable poem_parts poem_parts = poem_file.read(10) print(poem_parts) # [ ] REPEATEDLY RUN this cell, + 5 more characters each time run are appended using string addition # [ ] consider why no additional text displays after multiple runs poem_parts += poem_file.read(5) print(poem_parts) ###Output loops loops I repeat until I break ###Markdown &nbsp; Task 3 digits of pi read a set number of digits with .read(n) import, open, read, print1. import digits_of_pi.txt located at https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/digits_of_pi 2. open as **digits_of_pi_text** 3. read()the first 4 characters of digits_of_pi_text into a variable called pi_digits 4. print pi_digits 5. add to pi_digits string with string addition a. add next 4 characters from digits_of_pi obtained from read() b. run the cell multiple times to get more digits of *pi* ###Code # [ ] digits of pi # 1. import digits_of_pi.txt !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/digits_of_pi -o digits_of_pi_text.txt # [ ] digits of pi # 2. open as digits_of_pi_text # 3. read() 4 char of digits_of_pi_text to pi_digits variable # 4. print pi_digits digits_of_pi_text1 = open('digits_of_pi_text.txt', 'r') pi_digits = digits_of_pi_text1.read(4) print(pi_digits) # [ ] digits of pi # 5. add to pi_digits string with string addition # a. add next 4 characters from digits_of_pi obtained from read() # b. run the cell multiple times to get more digits of *pi* pi_digits = pi_digits + digits_of_pi_text1.read(4) + digits_of_pi_text1.read(4) print(pi_digits) ###Output 3.141592653589793238462643383279502884197169399375105820974944592307816406286208998628034825342117067982148086513282306647093844609550582231725359408128481117450284102701938521105559644622 ###Markdown &nbsp; Concept .read() returns a string These strings can be manipulated just like any other string[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/f7fb0136-24a3-4a0e-aff2-b1abc2f83029/Unit2_Section4.1e-Read_Returns_a_String.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/f7fb0136-24a3-4a0e-aff2-b1abc2f83029/Unit2_Section4.1e-Read_Returns_a_String.vtt","srclang":"en","kind":"subtitles","label":"english"}]) Boolean tests such as:- .upper() - .title() - string slices, e.g.- `cities[3:9]` - etc.. and string methods can be performed such as:- .isdigit() - .isalpha() - etc... &nbsp; Examples examples expect that the cells that import has been run it may be necessary to run the cell to **import poem1.txt** at top of notebook ###Code # [ ] review and run example poem_file = open('poem1.txt', 'r') poem_part = poem_file.read(15).upper() print(poem_part) # [ ] review and run example poem_part = poem_file.read(6).title() print(poem_part) # [ ] review and run example poem_part = poem_file.read(6) print(poem_part) print(poem_part.isalpha(), "isalpha() because of `\\n`") poem_part # [ ] review and run example poem_file = open('poem1.txt', 'r') poem_text = poem_file.read() print(poem_text[8:26]) ###Output repeat loops loops ###Markdown &nbsp; Task 4 City Initials Read the file cities.text that was imported in task 11. ensure the code was created and run in **task 1** to import cities.txt 2. create and run code to re-open cities.txt as cities_file 3. **`read()`** cities_file into a variable called cities 4. iterate through the characters in cities a. test if .isupper(), if True append the character to a string variable: initials c. else if (elif) character is "\n", if True append the "\n" to initials 5. print initials ###Code # [ ] compelete the task !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/cities -o cities.txt cities_file = open('cities.txt', 'r') cities = cities_file.read() poop = cities.isupper() if poop == True: initials = cities print(initials) elif poop == False: initials = cities print(initials) ###Output Beijing Cairo London Nairobi New York City Sydney Tokyo ###Markdown 2-4.1 Intro Python Working with Files4.1 **File import in Jupyter Notebooks** 4.1 **File `open(`) and `.read()`** 4.2 File Read as a list with **`.readlines()`** 4.2 File Closing to free resources with **`.close()`** 4.3 File Read a line at a time with **`.readline()`** 4.3 Remove characters using **`.strip()`** 4.4 File **`.write()`** with **`.seek()`** 4.4 File append mode ----- > Student will be able to 4.1 **Import files in Jupyter Notebooks using the curl command ** 4.1 **`open()` and `.read()` local files in memory** 4.1 **`.read()` a specific number of characters** 4.2 Use **`.readlines()`** to read data from file as a list of lines 4.2 Use **`.close`** to free system resources 4.3 Use **`.readline()`** to read data from file a line at a time 4.3 Use **`.strip()`** to remove new line characters 4.4 **`.write()`** data to a new local file4.4 Use **`.seek()`** to set file read or write location 4.4 Use file append mode &nbsp; Concepts Import Files to Jupyter[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/850e52a7-2082-4cb7-926f-54bf2527cee0/Unit2_Section4.1a-Import_Files_to_Jupyter.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/850e52a7-2082-4cb7-926f-54bf2527cee0/Unit2_Section4.1a-Import_Files_to_Jupyter.vtt","srclang":"en","kind":"subtitles","label":"english"}]) curl imports files to Jupyter session from a web addressbelow is a code using curl to import poem1.txt, the code is in a command line interface syntax> `!curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/poem1.txt -o poem1.txt` The table explains each element of the command above | code | meaning ||-----|---|| **`!`** | runs command interface supporting **curl** | | **`curl`** | enables **curl** that can download files | | **`https://raw.githubusercontent.com/...`** | is the address for data file to import | | **`-o`** | tells **`curl`** write data to a file | | ** *`poem1.txt`* ** | name **`curl`** will give the file | &nbsp; Examples ###Code # [ ] review and run example !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/poem1.txt -o poem1.txt ###Output % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 56 100 56 0 0 77 0 --:--:-- --:--:-- --:--:-- 77 ###Markdown &nbsp; Concepts Opening a Local File in read mode>```pythonpoem_file = open('poem1.txt', 'r') ``` [![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/07e5f863-e416-4534-a45c-df50d0d3df33/Unit2_Section4.1b-Opening_Files_Read_Mode.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/07e5f863-e416-4534-a45c-df50d0d3df33/Unit2_Section4.1b-Opening_Files_Read_Mode.vtt","srclang":"en","kind":"subtitles","label":"english"}]) Read mode &nbsp; `'r'`| MODE | Description ||:-------:|:--------------|| **'r'** | **read only mode** || 'w' | write - overwrites file with same name || 'r+' | read and write mode || 'a' | opens for appending to end of file | `open()` creates an object that can be addressed in python code &nbsp; Examples ###Code # [ ]Run to open the file in memory as poem_file poem_file = open('poem1.txt', 'r') # [ ] run and review code to test if open worked # should display name='poem1.txt' and no errors poem_file ###Output _____no_output_____ ###Markdown &nbsp; Task 1 import and open a local file in read mode1. **Import a list of cities using curl** a. git the list from https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/cities b. name the list cities.txt 2. **open cities.txt in read mode using a variable = cities_file** 3. **test that cities_file opened cities.txt with a print statement** ###Code # [ ] import cities.txt !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/cities -o cities.txt # [ ] open cities.txt as cities_file # [ ] test cities.txt was opened cities_file = open('cities.txt', 'r') cities_file ###Output _____no_output_____ ###Markdown &nbsp; Concepts Read a file using `.read()`[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c37a03df-7d95-4339-8fec-8f199b747a08/Unit2_Section4.1c-Reading_Files.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c37a03df-7d95-4339-8fec-8f199b747a08/Unit2_Section4.1c-Reading_Files.vtt","srclang":"en","kind":"subtitles","label":"english"}]) reading text```pythonpoem_contents = poem_file.read()``` `.read()` loads the content of the file into memory as a string, including formatting such as new line (`\n`) &nbsp; Examples examples expect that the cells that import and open of poem1.txt has been run without a read()Run the cells that import and open poem1.txt ###Code # [ ] review and run example poem_contents = poem_file.read() # [ ] review and run example # shows the file as a string with formatting characters such as "\n", output should be non-blank poem_contents # [ ] review and run example # since .read() loaded the file as a string it can be printed print(poem_contents) ###Output Loops I repeat loops loops loops I repeat until I break ###Markdown &nbsp; Task 2 read a file Read the file cities.text that was imported in task 11. **import cities.txt and open** a. ensure the code was created and run in **task 1** to import cities.txt b. create and run code to re-open cities.txt as cities_file 2. **read() cities_file into a variable called cities**3. Test the read() by displaying the string contained in cities4. Test the read() by printing the cities string ###Code # [ ] after import and open of cities.txt in task 1 # [ ] read cities_file as cities # [ ] display the string: cities !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/cities -o cities.txt cities_file = open('cities.txt', 'r') cities_file cities = cities_file.read() cities # [ ] print the string: cities print(cities) ###Output Beijing Cairo London Nairobi New York City Sydney Tokyo ###Markdown &nbsp; Concepts reading a file with `.read(n)` where n = number of characters to read[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/9960a973-339b-40e1-b5b3-1f7db661934e/Unit2_Section4.1d-Reading_Number_of_Characters.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/9960a973-339b-40e1-b5b3-1f7db661934e/Unit2_Section4.1d-Reading_Number_of_Characters.vtt","srclang":"en","kind":"subtitles","label":"english"}]) each time `poem_file.read(10)` runs, the next 10 characters are read.> **Note:** if .read(10) result is = '' &nbsp;(or empty string with no characters), it is likely that the end of the file has been reached. Perform a fresh **.open()** to reset read() to the beginning of the file. &nbsp; Examples examples expect that the cells that import and open of poem1.txt has been run without a read()Run the cell at the top of the notebook to ** import poem1.txt** each line is a different approach to reading and displaying 10 characters of the poem ###Code # [ ] review and run example to read poem1.txt 10 characters at a time poem_file = open('poem1.txt', 'r') poem_10char = poem_file.read(10) print(poem_10char) poem_10char # [ ] review and run example, + 10 more characters # reads and displays without storing in a variable poem_file.read(10) # [ ] review and run example, + 10 more characters # reads and stores in variable poem_parts poem_parts = poem_file.read(10) print(poem_parts) # [ ] REPEATEDLY RUN this cell, + 5 more characters each time run are appended using string addition # [ ] consider why no additional text displays after multiple runs poem_parts += poem_file.read(5) print(poem_parts) ###Output repeat until I break ###Markdown &nbsp; Task 3 digits of pi read a set number of digits with .read(n) import, open, read, print1. import digits_of_pi.txt located at https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/digits_of_pi 2. open as **digits_of_pi_text** 3. read()the first 4 characters of digits_of_pi_text into a variable called pi_digits 4. print pi_digits 5. add to pi_digits string with string addition a. add next 4 characters from digits_of_pi obtained from read() b. run the cell multiple times to get more digits of *pi* ###Code # [ ] digits of pi # 1. import digits_of_pi.txt !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/digits_of_pi -o digits_of_pi.txt # [ ] digits of pi # 2. open as digits_of_pi_text # 3. read() 4 char of digits_of_pi_text to pi_digits variable # 4. print pi_digits digits_file = open("digits_of_pi.txt", "r") digits_file pi_digits = digits_file.read(4) print(pi_digits) pi_digits # [ ] digits of pi # 5. add to pi_digits string with string addition # a. add next 4 characters from digits_of_pi obtained from read() # b. run the cell multiple times to get more digits of *pi* digits_file.read(4) ###Output _____no_output_____ ###Markdown &nbsp; Concept .read() returns a string These strings can be manipulated just like any other string[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/f7fb0136-24a3-4a0e-aff2-b1abc2f83029/Unit2_Section4.1e-Read_Returns_a_String.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/f7fb0136-24a3-4a0e-aff2-b1abc2f83029/Unit2_Section4.1e-Read_Returns_a_String.vtt","srclang":"en","kind":"subtitles","label":"english"}]) Boolean tests such as:- .upper() - .title() - string slices, e.g.- `cities[3:9]` - etc.. and string methods can be performed such as:- .isdigit() - .isalpha() - etc... &nbsp; Examples examples expect that the cells that import has been run it may be necessary to run the cell to **import poem1.txt** at top of notebook ###Code # [ ] review and run example poem_file = open('poem1.txt', 'r') poem_part = poem_file.read(15).upper() print(poem_part) # [ ] review and run example poem_part = poem_file.read(6).title() print(poem_part) # [ ] review and run example poem_part = poem_file.read(6) print(poem_part) print(poem_part.isalpha(), "isalpha() because of `\\n`") poem_part # [ ] review and run example poem_file = open('poem1.txt', 'r') poem_text = poem_file.read() print(poem_text[8:26]) ###Output repeat loops loops ###Markdown &nbsp; Task 4 City Initials Read the file cities.text that was imported in task 11. ensure the code was created and run in **task 1** to import cities.txt 2. create and run code to re-open cities.txt as cities_file 3. **`read()`** cities_file into a variable called cities 4. iterate through the characters in cities a. test if .isupper(), if True append the character to a string variable: initials c. else if (elif) character is "\n", if True append the "\n" to initials 5. print initials ###Code # [ ] compelete the task !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/cities -o cities.txt cities_file = open('cities.txt', 'r') cities_file cities = cities_file c = [] for city in cities: if cities.isupper() == True: c.append(cities) elif city("\n") == True: c.append(cities) else: pass print(c) ###Output % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 56 100 56 0 0 361 0 --:--:-- --:--:-- --:--:-- 361 ###Markdown 2-4.1 Intro Python Working with Files4.1 **File import in Jupyter Notebooks** 4.1 **File `open(`) and `.read()`** 4.2 File Read as a list with **`.readlines()`** 4.2 File Closing to free resources with **`.close()`** 4.3 File Read a line at a time with **`.readline()`** 4.3 Remove characters using **`.strip()`** 4.4 File **`.write()`** with **`.seek()`** 4.4 File append mode ----- > Student will be able to 4.1 **Import files in Jupyter Notebooks using the curl command ** 4.1 **`open()` and `.read()` local files in memory** 4.1 **`.read()` a specific number of characters** 4.2 Use **`.readlines()`** to read data from file as a list of lines 4.2 Use **`.close`** to free system resources 4.3 Use **`.readline()`** to read data from file a line at a time 4.3 Use **`.strip()`** to remove new line characters 4.4 **`.write()`** data to a new local file4.4 Use **`.seek()`** to set file read or write location 4.4 Use file append mode &nbsp; Concepts Import Files to Jupyter[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/850e52a7-2082-4cb7-926f-54bf2527cee0/Unit2_Section4.1a-Import_Files_to_Jupyter.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/850e52a7-2082-4cb7-926f-54bf2527cee0/Unit2_Section4.1a-Import_Files_to_Jupyter.vtt","srclang":"en","kind":"subtitles","label":"english"}]) curl imports files to Jupyter session from a web addressbelow is a code using curl to import poem1.txt, the code is in a command line interface syntax> `!curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/poem1.txt -o poem1.txt` The table explains each element of the command above | code | meaning ||-----|---|| **`!`** | runs command interface supporting **curl** | | **`curl`** | enables **curl** that can download files | | **`https://raw.githubusercontent.com/...`** | is the address for data file to import | | **`-o`** | tells **`curl`** write data to a file | | ** *`poem1.txt`* ** | name **`curl`** will give the file | &nbsp; Examples ###Code # [ ] review and run example !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/poem1.txt -o poem1.txt ###Output % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 56 100 56 0 0 756 0 --:--:-- --:--:-- --:--:-- 756 ###Markdown &nbsp; Concepts Opening a Local File in read mode>```pythonpoem_file = open('poem1.txt', 'r') ``` [![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/07e5f863-e416-4534-a45c-df50d0d3df33/Unit2_Section4.1b-Opening_Files_Read_Mode.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/07e5f863-e416-4534-a45c-df50d0d3df33/Unit2_Section4.1b-Opening_Files_Read_Mode.vtt","srclang":"en","kind":"subtitles","label":"english"}]) Read mode &nbsp; `'r'`| MODE | Description ||:-------:|:--------------|| **'r'** | **read only mode** || 'w' | write - overwrites file with same name || 'r+' | read and write mode || 'a' | opens for appending to end of file | `open()` creates an object that can be addressed in python code &nbsp; Examples ###Code # [ ]Run to open the file in memory as poem_file poem_file = open('poem1.txt', 'r') # [ ] run and review code to test if open worked # should display name='poem1.txt' and no errors poem_file ###Output _____no_output_____ ###Markdown &nbsp; Task 1 import and open a local file in read mode1. **Import a list of cities using curl** a. git the list from https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/cities b. name the list cities.txt 2. **open cities.txt in read mode using a variable = cities_file** 3. **test that cities_file opened cities.txt with a print statement** ###Code # [ ] import cities.txt !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/cities -o cities.txt # [ ] open cities.txt as cities_file # [ ] test cities.txt was opened cities_file = open('cities.txt', 'r') cities_file ###Output _____no_output_____ ###Markdown &nbsp; Concepts Read a file using `.read()`[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c37a03df-7d95-4339-8fec-8f199b747a08/Unit2_Section4.1c-Reading_Files.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c37a03df-7d95-4339-8fec-8f199b747a08/Unit2_Section4.1c-Reading_Files.vtt","srclang":"en","kind":"subtitles","label":"english"}]) reading text```pythonpoem_contents = poem_file.read()``` `.read()` loads the content of the file into memory as a string, including formatting such as new line (`\n`) &nbsp; Examples examples expect that the cells that import and open of poem1.txt has been run without a read()Run the cells that import and open poem1.txt ###Code # [ ] review and run example poem_contents = poem_file.read() # [ ] review and run example # shows the file as a string with formatting characters such as "\n", output should be non-blank poem_contents # [ ] review and run example # since .read() loaded the file as a string it can be printed print(poem_contents) ###Output Loops I repeat loops loops loops I repeat until I break ###Markdown &nbsp; Task 2 read a file Read the file cities.text that was imported in task 11. **import cities.txt and open** a. ensure the code was created and run in **task 1** to import cities.txt b. create and run code to re-open cities.txt as cities_file 2. **read() cities_file into a variable called cities**3. Test the read() by displaying the string contained in cities4. Test the read() by printing the cities string ###Code # [ ] after import and open of cities.txt in task 1 # [ ] read cities_file as cities # [ ] display the string: cities !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/cities -o cities.txt cities_file = open('cities.txt', 'r') cities_contents = cities_file.read() # [ ] print the string: cities print(cities_contents) ###Output Beijing Cairo London Nairobi New York City Sydney Tokyo ###Markdown &nbsp; Concepts reading a file with `.read(n)` where n = number of characters to read[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/9960a973-339b-40e1-b5b3-1f7db661934e/Unit2_Section4.1d-Reading_Number_of_Characters.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/9960a973-339b-40e1-b5b3-1f7db661934e/Unit2_Section4.1d-Reading_Number_of_Characters.vtt","srclang":"en","kind":"subtitles","label":"english"}]) each time `poem_file.read(10)` runs, the next 10 characters are read.> **Note:** if .read(10) result is = '' &nbsp;(or empty string with no characters), it is likely that the end of the file has been reached. Perform a fresh **.open()** to reset read() to the beginning of the file. &nbsp; Examples examples expect that the cells that import and open of poem1.txt has been run without a read()Run the cell at the top of the notebook to ** import poem1.txt** each line is a different approach to reading and displaying 10 characters of the poem ###Code # [ ] review and run example to read poem1.txt 10 characters at a time poem_file = open('poem1.txt', 'r') poem_10char = poem_file.read(10) print(poem_10char) poem_10char # [ ] review and run example, + 10 more characters # reads and displays without storing in a variable poem_file.read(10) # [ ] review and run example, + 10 more characters # reads and stores in variable poem_parts poem_parts = poem_file.read(10) print(poem_parts) # [ ] REPEATEDLY RUN this cell, + 5 more characters each time run are appended using string addition # [ ] consider why no additional text displays after multiple runs poem_parts += poem_file.read(5) print(poem_parts) ###Output loops loops I ###Markdown &nbsp; Task 3 digits of pi read a set number of digits with .read(n) import, open, read, print1. import digits_of_pi.txt located at https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/digits_of_pi 2. open as **digits_of_pi_text** 3. read()the first 4 characters of digits_of_pi_text into a variable called pi_digits 4. print pi_digits 5. add to pi_digits string with string addition a. add next 4 characters from digits_of_pi obtained from read() b. run the cell multiple times to get more digits of *pi* ###Code # [ ] digits of pi # 1. import digits_of_pi.txt !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/digits_of_pi # [ ] digits of pi # 2. open as digits_of_pi_text # 3. read() 4 char of digits_of_pi_text to pi_digits variable # 4. print pi_digits digits_of_pi_text = open('pi_digits', 'r') pi_digits = digits_of_pi_text.read(4) print(pi_digits) # [ ] digits of pi # 5. add to pi_digits string with string addition # a. add next 4 characters from digits_of_pi obtained from read() # b. run the cell multiple times to get more digits of *pi* digits_of_pi_text.read(4) ###Output _____no_output_____ ###Markdown &nbsp; Concept .read() returns a string These strings can be manipulated just like any other string[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/f7fb0136-24a3-4a0e-aff2-b1abc2f83029/Unit2_Section4.1e-Read_Returns_a_String.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/f7fb0136-24a3-4a0e-aff2-b1abc2f83029/Unit2_Section4.1e-Read_Returns_a_String.vtt","srclang":"en","kind":"subtitles","label":"english"}]) Boolean tests such as:- .upper() - .title() - string slices, e.g.- `cities[3:9]` - etc.. and string methods can be performed such as:- .isdigit() - .isalpha() - etc... &nbsp; Examples examples expect that the cells that import has been run it may be necessary to run the cell to **import poem1.txt** at top of notebook ###Code # [ ] review and run example poem_file = open('poem1.txt', 'r') poem_part = poem_file.read(15).upper() print(poem_part) # [ ] review and run example poem_part = poem_file.read(6).title() print(poem_part) # [ ] review and run example poem_part = poem_file.read(6) print(poem_part) print(poem_part.isalpha(), "isalpha() because of `\\n`") poem_part # [ ] review and run example poem_file = open('poem1.txt', 'r') poem_text = poem_file.read() print(poem_text[8:26]) ###Output repeat loops loops ###Markdown &nbsp; Task 4 City Initials Read the file cities.text that was imported in task 11. ensure the code was created and run in **task 1** to import cities.txt 2. create and run code to re-open cities.txt as cities_file 3. **`read()`** cities_file into a variable called cities 4. iterate through the characters in cities a. test if .isupper(), if True append the character to a string variable: initials c. else if (elif) character is "\n", if True append the "\n" to initials 5. print initials ###Code # [ ] compelete the task !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/cities -o cities.txt cities_file = open('cities.txt', 'r') cities = cities_file.read() print(cities) ###Output % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 56 100 56 0 0 727 0 --:--:-- --:--:-- --:--:-- 727 Beijing Cairo London Nairobi New York City Sydney Tokyo ###Markdown 2-4.1 Intro Python Working with Files4.1 **File import in Jupyter Notebooks** 4.1 **File `open(`) and `.read()`** 4.2 File Read as a list with **`.readlines()`** 4.2 File Closing to free resources with **`.close()`** 4.3 File Read a line at a time with **`.readline()`** 4.3 Remove characters using **`.strip()`** 4.4 File **`.write()`** with **`.seek()`** 4.4 File append mode ----- > Student will be able to 4.1 **Import files in Jupyter Notebooks using the curl command ** 4.1 **`open()` and `.read()` local files in memory** 4.1 **`.read()` a specific number of characters** 4.2 Use **`.readlines()`** to read data from file as a list of lines 4.2 Use **`.close`** to free system resources 4.3 Use **`.readline()`** to read data from file a line at a time 4.3 Use **`.strip()`** to remove new line characters 4.4 **`.write()`** data to a new local file4.4 Use **`.seek()`** to set file read or write location 4.4 Use file append mode &nbsp; Concepts Import Files to Jupyter[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/850e52a7-2082-4cb7-926f-54bf2527cee0/Unit2_Section4.1a-Import_Files_to_Jupyter.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/850e52a7-2082-4cb7-926f-54bf2527cee0/Unit2_Section4.1a-Import_Files_to_Jupyter.vtt","srclang":"en","kind":"subtitles","label":"english"}]) curl imports files to Jupyter session from a web addressbelow is a code using curl to import poem1.txt, the code is in a command line interface syntax> `!curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/poem1.txt -o poem1.txt` The table explains each element of the command above | code | meaning ||-----|---|| **`!`** | runs command interface supporting **curl** | | **`curl`** | enables **curl** that can download files | | **`https://raw.githubusercontent.com/...`** | is the address for data file to import | | **`-o`** | tells **`curl`** write data to a file | | ** *`poem1.txt`* ** | name **`curl`** will give the file | &nbsp; Examples ###Code # [ ] review and run example !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/poem1.txt -o poem1.txt ###Output % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 56 100 56 0 0 290 0 --:--:-- --:--:-- --:--:-- 290 ###Markdown &nbsp; Concepts Opening a Local File in read mode>```pythonpoem_file = open('poem1.txt', 'r') ``` [![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/07e5f863-e416-4534-a45c-df50d0d3df33/Unit2_Section4.1b-Opening_Files_Read_Mode.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/07e5f863-e416-4534-a45c-df50d0d3df33/Unit2_Section4.1b-Opening_Files_Read_Mode.vtt","srclang":"en","kind":"subtitles","label":"english"}]) Read mode &nbsp; `'r'`| MODE | Description ||:-------:|:--------------|| **'r'** | **read only mode** || 'w' | write - overwrites file with same name || 'r+' | read and write mode || 'a' | opens for appending to end of file | `open()` creates an object that can be addressed in python code &nbsp; Examples ###Code # [ ]Run to open the file in memory as poem_file poem_file = open('poem1.txt', 'r') # [ ] run and review code to test if open worked # should display name='poem1.txt' and no errors poem_file ###Output _____no_output_____ ###Markdown &nbsp; Task 1 import and open a local file in read mode1. **Import a list of cities using curl** a. git the list from https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/cities b. name the list cities.txt 2. **open cities.txt in read mode using a variable = cities_file** 3. **test that cities_file opened cities.txt with a print statement** ###Code # [ ] import cities.txt !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/cities -o cities.txt # [ ] open cities.txt as cities_file # [ ] test cities.txt was opened cities_file = open('cities.txt', 'r') cities_file ###Output _____no_output_____ ###Markdown &nbsp; Concepts Read a file using `.read()`[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c37a03df-7d95-4339-8fec-8f199b747a08/Unit2_Section4.1c-Reading_Files.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c37a03df-7d95-4339-8fec-8f199b747a08/Unit2_Section4.1c-Reading_Files.vtt","srclang":"en","kind":"subtitles","label":"english"}]) reading text```pythonpoem_contents = poem_file.read()``` `.read()` loads the content of the file into memory as a string, including formatting such as new line (`\n`) &nbsp; Examples examples expect that the cells that import and open of poem1.txt has been run without a read()Run the cells that import and open poem1.txt ###Code # [ ] review and run example poem_contents = poem_file.read() # [ ] review and run example # shows the file as a string with formatting characters such as "\n", output should be non-blank poem_contents # [ ] review and run example # since .read() loaded the file as a string it can be printed print(poem_contents) ###Output Loops I repeat loops loops loops I repeat until I break ###Markdown &nbsp; Task 2 read a file Read the file cities.text that was imported in task 11. **import cities.txt and open** a. ensure the code was created and run in **task 1** to import cities.txt b. create and run code to re-open cities.txt as cities_file 2. **read() cities_file into a variable called cities**3. Test the read() by displaying the string contained in cities4. Test the read() by printing the cities string ###Code # [ ] after import and open of cities.txt in task 1 # [ ] read cities_file as cities # [ ] display the string: cities cities = cities_file.read() cities # [ ] print the string: cities print(cities) ###Output Beijing Cairo London Nairobi New York City Sydney Tokyo ###Markdown &nbsp; Concepts reading a file with `.read(n)` where n = number of characters to read[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/9960a973-339b-40e1-b5b3-1f7db661934e/Unit2_Section4.1d-Reading_Number_of_Characters.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/9960a973-339b-40e1-b5b3-1f7db661934e/Unit2_Section4.1d-Reading_Number_of_Characters.vtt","srclang":"en","kind":"subtitles","label":"english"}]) each time `poem_file.read(10)` runs, the next 10 characters are read.> **Note:** if .read(10) result is = '' &nbsp;(or empty string with no characters), it is likely that the end of the file has been reached. Perform a fresh **.open()** to reset read() to the beginning of the file. &nbsp; Examples examples expect that the cells that import and open of poem1.txt has been run without a read()Run the cell at the top of the notebook to ** import poem1.txt** each line is a different approach to reading and displaying 10 characters of the poem ###Code # [ ] review and run example to read poem1.txt 10 characters at a time poem_file = open('poem1.txt', 'r') poem_10char = poem_file.read(10) print(poem_10char) poem_10char # [ ] review and run example, + 10 more characters # reads and displays without storing in a variable poem_file.read(10) # [ ] review and run example, + 10 more characters # reads and stores in variable poem_parts poem_parts = poem_file.read(10) print(poem_parts) # [ ] REPEATEDLY RUN this cell, + 5 more characters each time run are appended using string addition # [ ] consider why no additional text displays after multiple runs poem_parts += poem_file.read(5) print(poem_parts) ###Output loops loops I repeat until I break ###Markdown &nbsp; Task 3 digits of pi read a set number of digits with .read(n) import, open, read, print1. import digits_of_pi.txt located at https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/digits_of_pi 2. open as **digits_of_pi_text** 3. read()the first 4 characters of digits_of_pi_text into a variable called pi_digits 4. print pi_digits 5. add to pi_digits string with string addition a. add next 4 characters from digits_of_pi obtained from read() b. run the cell multiple times to get more digits of *pi* ###Code # [ ] digits of pi # 1. import digits_of_pi.txt !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/digits_of_pi -o digits_of_pi.txt # [ ] digits of pi # 2. open as digits_of_pi_text # 3. read() 4 char of digits_of_pi_text to pi_digits variable # 4. print pi_digits digits_of_pi_text = open('digits_of_pi.txt', 'r') pi_digits = digits_of_pi_text.read(4) pi_digits # [ ] digits of pi # 5. add to pi_digits string with string addition # a. add next 4 characters from digits_of_pi obtained from read() # b. run the cell multiple times to get more digits of *pi* pi_digits += digits_of_pi_text.read(4) print(pi_digits) ###Output 3.14159265358979 ###Markdown &nbsp; Concept .read() returns a string These strings can be manipulated just like any other string[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/f7fb0136-24a3-4a0e-aff2-b1abc2f83029/Unit2_Section4.1e-Read_Returns_a_String.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/f7fb0136-24a3-4a0e-aff2-b1abc2f83029/Unit2_Section4.1e-Read_Returns_a_String.vtt","srclang":"en","kind":"subtitles","label":"english"}]) Boolean tests such as:- .upper() - .title() - string slices, e.g.- `cities[3:9]` - etc.. and string methods can be performed such as:- .isdigit() - .isalpha() - etc... &nbsp; Examples examples expect that the cells that import has been run it may be necessary to run the cell to **import poem1.txt** at top of notebook ###Code # [ ] review and run example poem_file = open('poem1.txt', 'r') poem_part = poem_file.read(15).upper() print(poem_part) # [ ] review and run example poem_part = poem_file.read(6).title() print(poem_part) # [ ] review and run example poem_part = poem_file.read(6) print(poem_part) print(poem_part.isalpha(), "isalpha() because of `\\n`") poem_part # [ ] review and run example poem_file = open('poem1.txt', 'r') poem_text = poem_file.read() print(poem_text[8:26]) ###Output repeat loops loops ###Markdown &nbsp; Task 4 City Initials Read the file cities.text that was imported in task 11. ensure the code was created and run in **task 1** to import cities.txt 2. create and run code to re-open cities.txt as cities_file 3. **`read()`** cities_file into a variable called cities 4. iterate through the characters in cities a. test if .isupper(), if True append the character to a string variable: initials c. else if (elif) character is "\n", if True append the "\n" to initials 5. print initials ###Code # [ ] compelete the task cities_file = open('cities.txt', 'r') cities = cities_file.read() initials = "" for element in range(0, len(cities)): if cities[element].isupper(): initials += cities[element] elif cities[element] == "\n": initials +="\n" else: pass print(initials) ###Output B C L N NYC S T ###Markdown 2-4.1 Intro Python Working with Files4.1 **File import in Jupyter Notebooks** 4.1 **File `open(`) and `.read()`** 4.2 File Read as a list with **`.readlines()`** 4.2 File Closing to free resources with **`.close()`** 4.3 File Read a line at a time with **`.readline()`** 4.3 Remove characters using **`.strip()`** 4.4 File **`.write()`** with **`.seek()`** 4.4 File append mode ----- > Student will be able to 4.1 **Import files in Jupyter Notebooks using the curl command ** 4.1 **`open()` and `.read()` local files in memory** 4.1 **`.read()` a specific number of characters** 4.2 Use **`.readlines()`** to read data from file as a list of lines 4.2 Use **`.close`** to free system resources 4.3 Use **`.readline()`** to read data from file a line at a time 4.3 Use **`.strip()`** to remove new line characters 4.4 **`.write()`** data to a new local file4.4 Use **`.seek()`** to set file read or write location 4.4 Use file append mode &nbsp; Concepts Import Files to Jupyter[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/850e52a7-2082-4cb7-926f-54bf2527cee0/Unit2_Section4.1a-Import_Files_to_Jupyter.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/850e52a7-2082-4cb7-926f-54bf2527cee0/Unit2_Section4.1a-Import_Files_to_Jupyter.vtt","srclang":"en","kind":"subtitles","label":"english"}]) curl imports files to Jupyter session from a web addressbelow is a code using curl to import poem1.txt, the code is in a command line interface syntax> `!curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/poem1.txt -o poem1.txt` The table explains each element of the command above | code | meaning ||-----|---|| **`!`** | runs command interface supporting **curl** | | **`curl`** | enables **curl** that can download files | | **`https://raw.githubusercontent.com/...`** | is the address for data file to import | | **`-o`** | tells **`curl`** write data to a file | | ** *`poem1.txt`* ** | name **`curl`** will give the file | &nbsp; Examples ###Code # [ ] review and run example !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/poem1.txt -o poem1.txt ###Output % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 56 100 56 0 0 246 0 --:--:-- --:--:-- --:--:-- 245 ###Markdown &nbsp; Concepts Opening a Local File in read mode>```pythonpoem_file = open('poem1.txt', 'r') ``` [![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/07e5f863-e416-4534-a45c-df50d0d3df33/Unit2_Section4.1b-Opening_Files_Read_Mode.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/07e5f863-e416-4534-a45c-df50d0d3df33/Unit2_Section4.1b-Opening_Files_Read_Mode.vtt","srclang":"en","kind":"subtitles","label":"english"}]) Read mode &nbsp; `'r'`| MODE | Description ||:-------:|:--------------|| **'r'** | **read only mode** || 'w' | write - overwrites file with same name || 'r+' | read and write mode || 'a' | opens for appending to end of file | `open()` creates an object that can be addressed in python code &nbsp; Examples ###Code # [ ]Run to open the file in memory as poem_file poem_file = open('poem1.txt', 'r') # [ ] run and review code to test if open worked # should display name='poem1.txt' and no errors poem_file ###Output _____no_output_____ ###Markdown &nbsp; Task 1 import and open a local file in read mode1. **Import a list of cities using curl** a. git the list from https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/cities b. name the list cities.txt 2. **open cities.txt in read mode using a variable = cities_file** 3. **test that cities_file opened cities.txt with a print statement** ###Code # [ ] import cities.txt !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/cities -o cities.txt # [ ] open cities.txt as cities_file # [ ] test cities.txt was opened cities_file = open('cities.txt', 'r') cities_file ###Output _____no_output_____ ###Markdown &nbsp; Concepts Read a file using `.read()`[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c37a03df-7d95-4339-8fec-8f199b747a08/Unit2_Section4.1c-Reading_Files.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c37a03df-7d95-4339-8fec-8f199b747a08/Unit2_Section4.1c-Reading_Files.vtt","srclang":"en","kind":"subtitles","label":"english"}]) reading text```pythonpoem_contents = poem_file.read()``` `.read()` loads the content of the file into memory as a string, including formatting such as new line (`\n`) &nbsp; Examples examples expect that the cells that import and open of poem1.txt has been run without a read()Run the cells that import and open poem1.txt ###Code # [ ] review and run example poem_contents = poem_file.read() # [ ] review and run example # shows the file as a string with formatting characters such as "\n", output should be non-blank poem_contents # [ ] review and run example # since .read() loaded the file as a string it can be printed print(poem_contents) ###Output ###Markdown &nbsp; Task 2 read a file Read the file cities.text that was imported in task 11. **import cities.txt and open** a. ensure the code was created and run in **task 1** to import cities.txt b. create and run code to re-open cities.txt as cities_file 2. **read() cities_file into a variable called cities**3. Test the read() by displaying the string contained in cities4. Test the read() by printing the cities string ###Code # [ ] after import and open of cities.txt in task 1 # [ ] read cities_file as cities # [ ] display the string: cities cities = cities_file.read() cities # [ ] print the string: cities print(cities) ###Output Beijing Cairo London Nairobi New York City Sydney Tokyo ###Markdown &nbsp; Concepts reading a file with `.read(n)` where n = number of characters to read[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/9960a973-339b-40e1-b5b3-1f7db661934e/Unit2_Section4.1d-Reading_Number_of_Characters.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/9960a973-339b-40e1-b5b3-1f7db661934e/Unit2_Section4.1d-Reading_Number_of_Characters.vtt","srclang":"en","kind":"subtitles","label":"english"}]) each time `poem_file.read(10)` runs, the next 10 characters are read.> **Note:** if .read(10) result is = '' &nbsp;(or empty string with no characters), it is likely that the end of the file has been reached. Perform a fresh **.open()** to reset read() to the beginning of the file. &nbsp; Examples examples expect that the cells that import and open of poem1.txt has been run without a read()Run the cell at the top of the notebook to ** import poem1.txt** each line is a different approach to reading and displaying 10 characters of the poem ###Code # [ ] review and run example to read poem1.txt 10 characters at a time poem_file = open('poem1.txt', 'r') poem_10char = poem_file.read(10) print(poem_10char) poem_10char # [ ] review and run example, + 10 more characters # reads and displays without storing in a variable poem_file.read(10) # [ ] review and run example, + 10 more characters # reads and stores in variable poem_parts poem_parts = poem_file.read(10) print(poem_parts) # [ ] REPEATEDLY RUN this cell, + 5 more characters each time run are appended using string addition # [ ] consider why no additional text displays after multiple runs poem_parts += poem_file.read(5) print(poem_parts) ###Output loops loops I ###Markdown &nbsp; Task 3 digits of pi read a set number of digits with .read(n) import, open, read, print1. import digits_of_pi.txt located at https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/digits_of_pi 2. open as **digits_of_pi_text** 3. read()the first 4 characters of digits_of_pi_text into a variable called pi_digits 4. print pi_digits 5. add to pi_digits string with string addition a. add next 4 characters from digits_of_pi obtained from read() b. run the cell multiple times to get more digits of *pi* ###Code # [ ] digits of pi # 1. import digits_of_pi.txt !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/digits_of_pi -o digits_of_pi.txt # [ ] digits of pi # 2. open as digits_of_pi_text # 3. read() 4 char of digits_of_pi_text to pi_digits variable # 4. print pi_digits digits_of_pi_text = open('digits_of_pi.txt', 'r') pi_digits = digits_of_pi_text.read(4) print(pi_digits) # [ ] digits of pi # 5. add to pi_digits string with string addition # a. add next 4 characters from digits_of_pi obtained from read() # b. run the cell multiple times to get more digits of *pi* pi_digits += digits_of_pi_text.read(4) print(pi_digits) ###Output 3.141592 ###Markdown &nbsp; Concept .read() returns a string These strings can be manipulated just like any other string[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/f7fb0136-24a3-4a0e-aff2-b1abc2f83029/Unit2_Section4.1e-Read_Returns_a_String.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/f7fb0136-24a3-4a0e-aff2-b1abc2f83029/Unit2_Section4.1e-Read_Returns_a_String.vtt","srclang":"en","kind":"subtitles","label":"english"}]) Boolean tests such as:- .upper() - .title() - string slices, e.g.- `cities[3:9]` - etc.. and string methods can be performed such as:- .isdigit() - .isalpha() - etc... &nbsp; Examples examples expect that the cells that import has been run it may be necessary to run the cell to **import poem1.txt** at top of notebook ###Code # [ ] review and run example poem_file = open('poem1.txt', 'r') poem_part = poem_file.read(15).upper() print(poem_part) # [ ] review and run example poem_part = poem_file.read(6).title() print(poem_part) # [ ] review and run example poem_part = poem_file.read(6) print(poem_part) print(poem_part.isalpha(), "isalpha() because of `\\n`") poem_part # [ ] review and run example poem_file = open('poem1.txt', 'r') poem_text = poem_file.read() print(poem_text[8:26]) ###Output repeat loops loops ###Markdown &nbsp; Task 4 City Initials Read the file cities.text that was imported in task 11. ensure the code was created and run in **task 1** to import cities.txt 2. create and run code to re-open cities.txt as cities_file 3. **`read()`** cities_file into a variable called cities 4. iterate through the characters in cities a. test if .isupper(), if True append the character to a string variable: initials c. else if (elif) character is "\n", if True append the "\n" to initials 5. print initials ###Code # [ ] compelete the task cities_file = open('cities.txt', 'r') cities = cities_file.read() cities initials = "" for char in cities: if char.isupper(): initials += char elif char == "\n": initials += char print(initials) initials ###Output B C L N NYC S T ###Markdown 2-4.1 Intro Python Working with Files4.1 **File import in Jupyter Notebooks** 4.1 **File `open(`) and `.read()`** 4.2 File Read as a list with **`.readlines()`** 4.2 File Closing to free resources with **`.close()`** 4.3 File Read a line at a time with **`.readline()`** 4.3 Remove characters using **`.strip()`** 4.4 File **`.write()`** with **`.seek()`** 4.4 File append mode ----- > Student will be able to 4.1 **Import files in Jupyter Notebooks using the curl command ** 4.1 **`open()` and `.read()` local files in memory** 4.1 **`.read()` a specific number of characters** 4.2 Use **`.readlines()`** to read data from file as a list of lines 4.2 Use **`.close`** to free system resources 4.3 Use **`.readline()`** to read data from file a line at a time 4.3 Use **`.strip()`** to remove new line characters 4.4 **`.write()`** data to a new local file4.4 Use **`.seek()`** to set file read or write location 4.4 Use file append mode &nbsp; Concepts Import Files to Jupyter[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/850e52a7-2082-4cb7-926f-54bf2527cee0/Unit2_Section4.1a-Import_Files_to_Jupyter.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/850e52a7-2082-4cb7-926f-54bf2527cee0/Unit2_Section4.1a-Import_Files_to_Jupyter.vtt","srclang":"en","kind":"subtitles","label":"english"}]) curl imports files to Jupyter session from a web addressbelow is a code using curl to import poem1.txt, the code is in a command line interface syntax> `!curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/poem1.txt -o poem1.txt` The table explains each element of the command above | code | meaning ||-----|---|| **`!`** | runs command interface supporting **curl** | | **`curl`** | enables **curl** that can download files | | **`https://raw.githubusercontent.com/...`** | is the address for data file to import | | **`-o`** | tells **`curl`** write data to a file | | ** *`poem1.txt`* ** | name **`curl`** will give the file | &nbsp; Examples ###Code # [ ] review and run example !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/poem1.txt -o poem1.txt ###Output _____no_output_____ ###Markdown &nbsp; Concepts Opening a Local File in read mode>```pythonpoem_file = open('poem1.txt', 'r') ``` [![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/07e5f863-e416-4534-a45c-df50d0d3df33/Unit2_Section4.1b-Opening_Files_Read_Mode.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/07e5f863-e416-4534-a45c-df50d0d3df33/Unit2_Section4.1b-Opening_Files_Read_Mode.vtt","srclang":"en","kind":"subtitles","label":"english"}]) Read mode &nbsp; `'r'`| MODE | Description ||:-------:|:--------------|| **'r'** | **read only mode** || 'w' | write - overwrites file with same name || 'r+' | read and write mode || 'a' | opens for appending to end of file | `open()` creates an object that can be addressed in python code &nbsp; Examples ###Code # [ ]Run to open the file in memory as poem_file poem_file = open('poem1.txt', 'r') # [ ] run and review code to test if open worked # should display name='poem1.txt' and no errors poem_file ###Output _____no_output_____ ###Markdown &nbsp; Task 1 import and open a local file in read mode1. **Import a list of cities using curl** a. git the list from https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/cities b. name the list cities.txt 2. **open cities.txt in read mode using a variable = cities_file** 3. **test that cities_file opened cities.txt with a print statement** ###Code # [ ] import cities.txt # [ ] open cities.txt as cities_file # [ ] test cities.txt was opened ###Output _____no_output_____ ###Markdown &nbsp; Concepts Read a file using `.read()`[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c37a03df-7d95-4339-8fec-8f199b747a08/Unit2_Section4.1c-Reading_Files.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c37a03df-7d95-4339-8fec-8f199b747a08/Unit2_Section4.1c-Reading_Files.vtt","srclang":"en","kind":"subtitles","label":"english"}]) reading text```pythonpoem_contents = poem_file.read()``` `.read()` loads the content of the file into memory as a string, including formatting such as new line (`\n`) &nbsp; Examples examples expect that the cells that import and open of poem1.txt has been run without a read()Run the cells that import and open poem1.txt ###Code # [ ] review and run example poem_contents = poem_file.read() # [ ] review and run example # shows the file as a string with formatting characters such as "\n", output should be non-blank poem_contents # [ ] review and run example # since .read() loaded the file as a string it can be printed print(poem_contents) ###Output _____no_output_____ ###Markdown &nbsp; Task 2 read a file Read the file cities.text that was imported in task 11. **import cities.txt and open** a. ensure the code was created and run in **task 1** to import cities.txt b. create and run code to re-open cities.txt as cities_file 2. **read() cities_file into a variable called cities**3. Test the read() by displaying the string contained in cities4. Test the read() by printing the cities string ###Code # [ ] after import and open of cities.txt in task 1 # [ ] read cities_file as cities # [ ] display the string: cities # [ ] print the string: cities ###Output _____no_output_____ ###Markdown &nbsp; Concepts reading a file with `.read(n)` where n = number of characters to read[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/9960a973-339b-40e1-b5b3-1f7db661934e/Unit2_Section4.1d-Reading_Number_of_Characters.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/9960a973-339b-40e1-b5b3-1f7db661934e/Unit2_Section4.1d-Reading_Number_of_Characters.vtt","srclang":"en","kind":"subtitles","label":"english"}]) each time `poem_file.read(10)` runs, the next 10 characters are read.> **Note:** if .read(10) result is = '' &nbsp;(or empty string with no characters), it is likely that the end of the file has been reached. Perform a fresh **.open()** to reset read() to the beginning of the file. &nbsp; Examples examples expect that the cells that import and open of poem1.txt has been run without a read()Run the cell at the top of the notebook to ** import poem1.txt** each line is a different approach to reading and displaying 10 characters of the poem ###Code # [ ] review and run example to read poem1.txt 10 characters at a time poem_file = open('poem1.txt', 'r') poem_10char = poem_file.read(10) print(poem_10char) poem_10char # [ ] review and run example, + 10 more characters # reads and displays without storing in a variable poem_file.read(10) # [ ] review and run example, + 10 more characters # reads and stores in variable poem_parts poem_parts = poem_file.read(10) print(poem_parts) # [ ] REPEATEDLY RUN this cell, + 5 more characters each time run are appended using string addition # [ ] consider why no additional text displays after multiple runs poem_parts += poem_file.read(5) print(poem_parts) ###Output _____no_output_____ ###Markdown &nbsp; Task 3 digits of pi read a set number of digits with .read(n) import, open, read, print1. import digits_of_pi.txt located at https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/digits_of_pi 2. open as **digits_of_pi_text** 3. read()the first 4 characters of digits_of_pi_text into a variable called pi_digits 4. print pi_digits 5. add to pi_digits string with string addition a. add next 4 characters from digits_of_pi obtained from read() b. run the cell multiple times to get more digits of *pi* ###Code # [ ] digits of pi # 1. import digits_of_pi.txt # [ ] digits of pi # 2. open as digits_of_pi_text # 3. read() 4 char of digits_of_pi_text to pi_digits variable # 4. print pi_digits # [ ] digits of pi # 5. add to pi_digits string with string addition # a. add next 4 characters from digits_of_pi obtained from read() # b. run the cell multiple times to get more digits of *pi* ###Output _____no_output_____ ###Markdown &nbsp; Concept .read() returns a string These strings can be manipulated just like any other string[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/f7fb0136-24a3-4a0e-aff2-b1abc2f83029/Unit2_Section4.1e-Read_Returns_a_String.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/f7fb0136-24a3-4a0e-aff2-b1abc2f83029/Unit2_Section4.1e-Read_Returns_a_String.vtt","srclang":"en","kind":"subtitles","label":"english"}]) Boolean tests such as:- .upper() - .title() - string slices, e.g.- `cities[3:9]` - etc.. and string methods can be performed such as:- .isdigit() - .isalpha() - etc... &nbsp; Examples examples expect that the cells that import has been run it may be necessary to run the cell to **import poem1.txt** at top of notebook ###Code # [ ] review and run example poem_file = open('poem1.txt', 'r') poem_part = poem_file.read(15).upper() print(poem_part) # [ ] review and run example poem_part = poem_file.read(6).title() print(poem_part) # [ ] review and run example poem_part = poem_file.read(6) print(poem_part) print(poem_part.isalpha(), "isalpha() because of `\\n`") poem_part # [ ] review and run example poem_file = open('poem1.txt', 'r') poem_text = poem_file.read() print(poem_text[8:26]) ###Output _____no_output_____ ###Markdown &nbsp; Task 4 City Initials Read the file cities.text that was imported in task 11. ensure the code was created and run in **task 1** to import cities.txt 2. create and run code to re-open cities.txt as cities_file 3. **`read()`** cities_file into a variable called cities 4. iterate through the characters in cities a. test if .isupper(), if True append the character to a string variable: initials c. else if (elif) character is "\n", if True append the "\n" to initials 5. print initials ###Code # [ ] compelete the task ###Output _____no_output_____ ###Markdown 2-4.1 Intro Python Working with Files4.1 **File import in Jupyter Notebooks** 4.1 **File `open(`) and `.read()`** 4.2 File Read as a list with **`.readlines()`** 4.2 File Closing to free resources with **`.close()`** 4.3 File Read a line at a time with **`.readline()`** 4.3 Remove characters using **`.strip()`** 4.4 File **`.write()`** with **`.seek()`** 4.4 File append mode ----- > Student will be able to 4.1 **Import files in Jupyter Notebooks using the curl command ** 4.1 **`open()` and `.read()` local files in memory** 4.1 **`.read()` a specific number of characters** 4.2 Use **`.readlines()`** to read data from file as a list of lines 4.2 Use **`.close`** to free system resources 4.3 Use **`.readline()`** to read data from file a line at a time 4.3 Use **`.strip()`** to remove new line characters 4.4 **`.write()`** data to a new local file4.4 Use **`.seek()`** to set file read or write location 4.4 Use file append mode &nbsp; Concepts Import Files to Jupyter[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/850e52a7-2082-4cb7-926f-54bf2527cee0/Unit2_Section4.1a-Import_Files_to_Jupyter.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/850e52a7-2082-4cb7-926f-54bf2527cee0/Unit2_Section4.1a-Import_Files_to_Jupyter.vtt","srclang":"en","kind":"subtitles","label":"english"}]) curl imports files to Jupyter session from a web addressbelow is a code using curl to import poem1.txt, the code is in a command line interface syntax> `!curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/poem1.txt -o poem1.txt` The table explains each element of the command above | code | meaning ||-----|---|| **`!`** | runs command interface supporting **curl** | | **`curl`** | enables **curl** that can download files | | **`https://raw.githubusercontent.com/...`** | is the address for data file to import | | **`-o`** | tells **`curl`** write data to a file | | ** *`poem1.txt`* ** | name **`curl`** will give the file | &nbsp; Examples ###Code # [ ] review and run example !curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/poem1.txt -o poem1.txt ###Output % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 56 100 56 0 0 240 0 --:--:-- --:--:-- --:--:-- 242 ###Markdown &nbsp; Concepts Opening a Local File in read mode>```pythonpoem_file = open('poem1.txt', 'r') ``` [![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/07e5f863-e416-4534-a45c-df50d0d3df33/Unit2_Section4.1b-Opening_Files_Read_Mode.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/07e5f863-e416-4534-a45c-df50d0d3df33/Unit2_Section4.1b-Opening_Files_Read_Mode.vtt","srclang":"en","kind":"subtitles","label":"english"}]) Read mode &nbsp; `'r'`| MODE | Description ||:-------:|:--------------|| **'r'** | **read only mode** || 'w' | write - overwrites file with same name || 'r+' | read and write mode || 'a' | opens for appending to end of file | `open()` creates an object that can be addressed in python code &nbsp; Examples ###Code # [ ]Run to open the file in memory as poem_file poem_file = open('poem1.txt', 'r') # [ ] run and review code to test if open worked # should display name='poem1.txt' and no errors poem_file ###Output _____no_output_____ ###Markdown &nbsp; Task 1 import and open a local file in read mode1. **Import a list of cities using curl** a. git the list from https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/cities b. name the list cities.txt 2. **open cities.txt in read mode using a variable = cities_file** 3. **test that cities_file opened cities.txt with a print statement** ###Code # [ ] import cities.txt ! curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/cities -o cities.txt # [ ] open cities.txt as cities_file cities_file = open('cities.txt', 'r') # [ ] test cities.txt was opened cities_file ###Output _____no_output_____ ###Markdown &nbsp; Concepts Read a file using `.read()`[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c37a03df-7d95-4339-8fec-8f199b747a08/Unit2_Section4.1c-Reading_Files.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c37a03df-7d95-4339-8fec-8f199b747a08/Unit2_Section4.1c-Reading_Files.vtt","srclang":"en","kind":"subtitles","label":"english"}]) reading text```pythonpoem_contents = poem_file.read()``` `.read()` loads the content of the file into memory as a string, including formatting such as new line (`\n`) &nbsp; Examples examples expect that the cells that import and open of poem1.txt has been run without a read()Run the cells that import and open poem1.txt ###Code # [ ] review and run example poem_contents = poem_file.read() # [ ] review and run example # shows the file as a string with formatting characters such as "\n", output should be non-blank poem_contents # [ ] review and run example # since .read() loaded the file as a string it can be printed print(poem_contents) ###Output Loops I repeat loops loops loops I repeat until I break ###Markdown &nbsp; Task 2 read a file Read the file cities.text that was imported in task 11. **import cities.txt and open** a. ensure the code was created and run in **task 1** to import cities.txt b. create and run code to re-open cities.txt as cities_file 2. **read() cities_file into a variable called cities**3. Test the read() by displaying the string contained in cities4. Test the read() by printing the cities string ###Code # [ ] after import and open of cities.txt in task 1 # [ ] read cities_file as cities cities_contents = cities_file.read() # [ ] display the string: cities cities_contents # [ ] print the string: cities print(cities_contents) ###Output Beijing Cairo London Nairobi New York City Sydney Tokyo ###Markdown &nbsp; Concepts reading a file with `.read(n)` where n = number of characters to read[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/9960a973-339b-40e1-b5b3-1f7db661934e/Unit2_Section4.1d-Reading_Number_of_Characters.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/9960a973-339b-40e1-b5b3-1f7db661934e/Unit2_Section4.1d-Reading_Number_of_Characters.vtt","srclang":"en","kind":"subtitles","label":"english"}]) each time `poem_file.read(10)` runs, the next 10 characters are read.> **Note:** if .read(10) result is = '' &nbsp;(or empty string with no characters), it is likely that the end of the file has been reached. Perform a fresh **.open()** to reset read() to the beginning of the file. &nbsp; Examples examples expect that the cells that import and open of poem1.txt has been run without a read()Run the cell at the top of the notebook to ** import poem1.txt** each line is a different approach to reading and displaying 10 characters of the poem ###Code # [ ] review and run example to read poem1.txt 10 characters at a time poem_file = open('poem1.txt', 'r') poem_10char = poem_file.read(10) print(poem_10char) poem_10char # [ ] review and run example, + 10 more characters # reads and displays without storing in a variable poem_file.read(10) # [ ] review and run example, + 10 more characters # reads and stores in variable poem_parts poem_parts = poem_file.read(10) print(poem_parts) # [ ] REPEATEDLY RUN this cell, + 5 more characters each time run are appended using string addition # [ ] consider why no additional text displays after multiple runs poem_parts += poem_file.read(5) print(poem_parts) ###Output loops loops I ###Markdown &nbsp; Task 3 digits of pi read a set number of digits with .read(n) import, open, read, print1. import digits_of_pi.txt located at https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/digits_of_pi 2. open as **digits_of_pi_text** 3. read()the first 4 characters of digits_of_pi_text into a variable called pi_digits 4. print pi_digits 5. add to pi_digits string with string addition a. add next 4 characters from digits_of_pi obtained from read() b. run the cell multiple times to get more digits of *pi* ###Code # [ ] digits of pi # 1. import digits_of_pi.txt ! curl https://raw.githubusercontent.com/MicrosoftLearning/intropython/master/digits_of_pi -o digits_of_pi.txt # [ ] digits of pi # 2. open as digits_of_pi_text # 3. read() 4 char of digits_of_pi_text to pi_digits variable # 4. print pi_digits digits_file = open('digits_of_pi.txt', 'r') digits_4char = digits_file.read(4) print(digits_4char) # [ ] digits of pi # 5. add to pi_digits string with string addition # a. add next 4 characters from digits_of_pi obtained from read() # b. run the cell multiple times to get more digits of *pi* digits_parts = digits_file.read(4) print(digits_parts) digit_8char = digits_4char + digits_parts print(digit_8char) ###Output 6535 3.146535 ###Markdown &nbsp; Concept .read() returns a string These strings can be manipulated just like any other string[![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/f7fb0136-24a3-4a0e-aff2-b1abc2f83029/Unit2_Section4.1e-Read_Returns_a_String.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/f7fb0136-24a3-4a0e-aff2-b1abc2f83029/Unit2_Section4.1e-Read_Returns_a_String.vtt","srclang":"en","kind":"subtitles","label":"english"}]) Boolean tests such as:- .upper() - .title() - string slices, e.g.- `cities[3:9]` - etc.. and string methods can be performed such as:- .isdigit() - .isalpha() - etc... &nbsp; Examples examples expect that the cells that import has been run it may be necessary to run the cell to **import poem1.txt** at top of notebook ###Code # [ ] review and run example poem_file = open('poem1.txt', 'r') poem_part = poem_file.read(15).upper() print(poem_part) # [ ] review and run example poem_part = poem_file.read(6).title() print(poem_part) # [ ] review and run example poem_part = poem_file.read(6) print(poem_part) print(poem_part.isalpha(), "isalpha() because of `\\n`") poem_part # [ ] review and run example poem_file = open('poem1.txt', 'r') poem_text = poem_file.read() print(poem_text[8:26]) ###Output repeat loops loops ###Markdown &nbsp; Task 4 City Initials Read the file cities.text that was imported in task 11. ensure the code was created and run in **task 1** to import cities.txt 2. create and run code to re-open cities.txt as cities_file 3. **`read()`** cities_file into a variable called cities 4. iterate through the characters in cities a. test if .isupper(), if True append the character to a string variable: initials c. else if (elif) character is "\n", if True append the "\n" to initials 5. print initials ###Code # [ ] compelete the task cities_file = open('cities.txt', 'r') cities = cities_file.read() cities initials = "" for char in cities: if char.isupper(): initials += char elif char == "\n": initials += char print(initials) initials ###Output B C L N NYC S T
computation/01 - The Turing Machine.ipynb
###Markdown Turing machines PartsAll turing machines have the following parts Tape An **infinitely** long tape like the memory: imagine `...BBBBBBBBBBBBBBBBBBB...` as a blank tape HeadA `head` (say `^`) that points to a location/cell on the tape. Any write or read will be done at the `head` and can be moved right or left```shell...BBBBBBBBBBBBBBBBBBBBBB... ^``` instructionsInstruction has two parts to it 1. reads the current value on tape and store a new value (could be the same old value) to the tape2. moves the `head` to `right` or `left` by **1**e.g.Given the state below```...BBBBBBBBBBBBB... ^```and these instructions:```pythonrightrightstore 2```the machine will compute the state as ```Old: ...BBBBBBBBBBBBB... ^Now: ...BBBBB2BBBBBB... ^```**NOTE:** these aren't **real** turning machine instructions ... so lets look at a simple turing machine that flips `B -> X` and `X -> B` the X_B machine The X_B machine GoalSay a machine with on 2 cells on the tape `BB` we want to flip only the first cell for ever i.e. given `BB -> XB -> BB -> XB -> ... ` Initial attempt without any stateInstructions may look like this to _start_ with``` Start (s1) flip B B -> X, R | BB -> XB | ^ ^ move the head backB -> B, L | XB -> XB | ^ ^ flip X to B X -> B, R | XB -> BB | ^ ^ move the head back and back to the first stateB -> B, L | BB -> BB | ^ ^``` State informationThe problem with the solution above is that say the `head` reads `B` what should it do? Should it execute the first instruction `B -> X, R` or should it execute the last `B -> B, R`? To disambiguate, Turing introduces **state** and reads something like "Given this `state` and the `head` reads `X` then do `Y`. The turns the above set of instructions into```s1: BB ^ B, s1 -> X, R, s2 | BB -> XB | ^ ^B, s2 -> B, L, s3 | XB -> XB | ^ ^X, s3 -> B, R, s4 | XB -> BB | ^ ^B, s4 -> B, L, s1 | BB -> BB | ^ ^```This answers the problem that confounded us above as what instruction to execute depends on the state you are in. So when `head` reads `B` the instruction it executes depends on the current state that it is in. Now lets translate all this into `python` code. ###Code X_B = { ("B", "s1"): ("X", "R", "s2"), ("B", "s2"): ("B", "L", "s3"), ("X", "s3"): ("B", "R", "s4"), ("B", "s4"): ("B", "L", "s1"), } def print_state(tape, head, state): print(state.rjust(4), ":", "".join(tape)) print(" ", " " * head, "^") def execute_v0(instructions): # init the machine # forever # read the value at head # lookup instruction for value and state # set the value # move the head head = 0 # these are HACKS as we won't set the state list this and # the tape should be infinite state = "s1" tape = ['B', 'B'] for _ in range(9): print_state(tape, head, state) current_val = tape[head] lookup = (current_val, state) target_val, move_dir, target_state = instructions[lookup] tape[head] = target_val state = target_state head += 1 if move_dir == "R" else -1 execute_v0(X_B) ###Output s1 : BB ^ s2 : XB ^ s3 : XB ^ s4 : BB ^ s1 : BB ^ s2 : XB ^ s3 : XB ^ s4 : BB ^ s1 : BB ^ ###Markdown We are simplify the above a lot like below: ###Code def execute_terse(instructions): head, state, tape = 0, "s1", ['B', 'B'] for _ in range(9): print_state(tape, head, state) tape[head], move_dir, state = instructions[(tape[head], state)] head += 1 if move_dir == "R" else -1 execute_terse(X_B) ###Output s1 : BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB ^ s2 : XBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB ^ s3 : XBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB ^ s4 : BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB ^ s1 : BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB ^ s2 : XBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB ^ s3 : XBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB ^ s4 : BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB ^ s1 : BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB ^ ###Markdown The Adder program ###Code def execute(instructions): head, state, tape = 0, "s1", ['B'] * 32 # 22 the number sufficient to get the programs running fine for _ in range(22): print_state(tape, head, state) tape[head], move_dir, state = instructions[(tape[head], state)] head += 1 if move_dir == "R" else -1 # add (11+111)BBBBB to get (11111)BBBBB ADDER_INPUT = { # input ("B", "s1"): ("(", "R", "s2"), ("B", "s2"): ("1", "R", "s3"), ("B", "s3"): ("1", "R", "s4"), ("B", "s4"): ("+", "R", "s5"), ("B", "s5"): ("1", "R", "s6"), ("B", "s6"): ("1", "R", "s7"), ("B", "s7"): ("1", "R", "s8"), ("B", "s8"): (")", "R", "s9"), ("B", "s9"): ("B", "R", "s9"), } execute(ADDER_INPUT) # input and logic ADDER = { # input ("B", "s1"): ("(", "R", "s2"), ("B", "s2"): ("1", "R", "s3"), ("B", "s3"): ("1", "R", "s4"), ("B", "s4"): ("+", "R", "s5"), ("B", "s5"): ("1", "R", "s6"), ("B", "s6"): ("1", "R", "s7"), ("B", "s7"): ("1", "R", "s8"), ("B", "s8"): (")", "R", "s9"), # logic: go back until we find + ("B", "s9"): ("B", "L", "s10"), (")", "s10"): (")", "L", "s11"), ("1", "s11"): ("1", "L", "s11"), # loop back ("+", "s11"): ("1", "R", "s12"), ("1", "s12"): ("1", "R", "s12"), (")", "s12"): ("B", "L", "s13"), ("1", "s13"): (")", "R", "s14"), ("B", "s14"): ("B", "R", "s14"), } execute(ADDER) ###Output s1 : BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB ^ s2 : (BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB ^ s3 : (1BBBBBBBBBBBBBBBBBBBBBBBBBBBBBB ^ s4 : (11BBBBBBBBBBBBBBBBBBBBBBBBBBBBB ^ s5 : (11+BBBBBBBBBBBBBBBBBBBBBBBBBBBB ^ s6 : (11+1BBBBBBBBBBBBBBBBBBBBBBBBBBB ^ s7 : (11+11BBBBBBBBBBBBBBBBBBBBBBBBBB ^ s8 : (11+111BBBBBBBBBBBBBBBBBBBBBBBBB ^ s9 : (11+111)BBBBBBBBBBBBBBBBBBBBBBBB ^ s10 : (11+111)BBBBBBBBBBBBBBBBBBBBBBBB ^ s11 : (11+111)BBBBBBBBBBBBBBBBBBBBBBBB ^ s11 : (11+111)BBBBBBBBBBBBBBBBBBBBBBBB ^ s11 : (11+111)BBBBBBBBBBBBBBBBBBBBBBBB ^ s11 : (11+111)BBBBBBBBBBBBBBBBBBBBBBBB ^ s12 : (111111)BBBBBBBBBBBBBBBBBBBBBBBB ^ s12 : (111111)BBBBBBBBBBBBBBBBBBBBBBBB ^ s12 : (111111)BBBBBBBBBBBBBBBBBBBBBBBB ^ s12 : (111111)BBBBBBBBBBBBBBBBBBBBBBBB ^ s13 : (111111BBBBBBBBBBBBBBBBBBBBBBBBB ^ s14 : (11111)BBBBBBBBBBBBBBBBBBBBBBBBB ^ s14 : (11111)BBBBBBBBBBBBBBBBBBBBBBBBB ^ s14 : (11111)BBBBBBBBBBBBBBBBBBBBBBBBB ^
API statistics.ipynb
###Markdown PDBe API TrainingThis interactive Python notebook will guide you through various ways of programmatically accessing Protein Data Bank in Europe (PDBe) data using REST APIThe REST API is a programmatic way to obtain information from the PDB and EMDB. You can access details about:* sample* experiment* models* compounds* cross-references* publications* quality* assembliesand more...For more information, visit https://www.ebi.ac.uk/pdbe/pdbe-rest-api Notebook 6This notebook is the second in the training material series, and focuses on getting information for multiple PDB entries using the REST search API of PDBe. 1) Making imports and setting variablesFirst, we import some packages that we will use, and set some variables.Note: Full list of valid URLs is available from https://www.ebi.ac.uk/pdbe/api/doc/ ###Code import requests # used for getting data from a URL from pprint import pprint # pretty print import matplotlib.pyplot as plt # plotting results import pandas as pd # used for turning results into mini databases # make graphs show on the page %matplotlib inline # use plotly and cufflinks to make interactive plots import cufflinks as cf from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot init_notebook_mode(connected=True) cf.go_offline() # settings for PDBe API base_url = "https://www.ebi.ac.uk/pdbe/" # the beginning of the URL for PDBe's API. api_base = base_url + "api/" search_url = base_url + 'search/pdb/select?' # the rest of the URL used for PDBe's search API. ###Output _____no_output_____ ###Markdown 2) a function to get data from the search APILet's start with defining a function that can be used to GET data from the PDBe search API. ###Code def make_request(search_term, number_of_rows=10): """ This function can make GET requests to the PDBe search API :param url: String, :param pdb_id: String :return: JSON """ search_variables = '&wt=json&rows={}'.format(number_of_rows) url = search_url+search_term+search_variables print(url) response = requests.get(url) if response.status_code == 200: return response.json() else: print("[No data retrieved - %s] %s" % (response.status_code, response.text)) return {} ###Output _____no_output_____ ###Markdown 3) formatting the search terms This will allow us to use human readable search terms and this function will make a URL that the search API can handle. ###Code def format_search_terms(search_terms, filter_terms=None): # print('formatting search terms: %s' % search_terms) search_string = '' filter_string = '' search_list = [] if isinstance(search_terms, dict): for key in search_terms: term = search_terms.get(key) if ' ' in term: if not '[' in term: if not '"' in term: term = '"{}"'.format(term) elif not "'" in term: term = "'{}'".format(term) search_list.append('{}:{}'.format(key, term)) search_string = ' AND '.join(search_list) else: if '&' in search_terms: search_string = search_terms.replace('&', ' AND ') else: search_string = search_terms if filter_terms: filter_string = '&fl={}'.format(','.join(filter_terms)) # print('formatted search terms: %s' % search_string) final_search_string = 'q={}{}'.format(search_string, filter_string) print(final_search_string) return final_search_string ###Output _____no_output_____ ###Markdown 4) Getting useful data out of the searchThis function will run the search and will return a list of the results ###Code def run_search(search_terms, filter_terms=None, number_of_rows=100): search_term = format_search_terms(search_terms, filter_terms) response = make_request(search_term, number_of_rows) results = response.get('response', {}).get('docs', []) print('Number of results for {}: {}'.format(','.join(search_terms.values()), len(results))) return results ###Output _____no_output_____ ###Markdown 5) running a searchNow we are ready to actually run a search against the PDB API for entries containing human Dihydrofolate reductase in the PDB. This will return a list of results - only 10 to start with.A list of search terms is available at:https://www.ebi.ac.uk/pdbe/api/doc/search.htmlThis will return details of human Dihydrofolate reductase's in the PDBThe search terms are defined as a dictionary (a hash in other programming lanuguages). e.g. {"molecule_name":"Dihydrofolate reductase"}Here we are searching for molecules named Dihydrofolate reductase.If we search for two terms i.e. molecule_name and organism_scientific_name then we will get molecules that match both search terms.We will return the number of results for two searches.The first one will hit the limit of 100. There are more than 100 Dihydrofolate reductase structures. We have to add the argument "number_of_rows" to a higher number, say 1000, to find all the examples. ###Code print('1st search') search_terms = {"molecule_name":"Dihydrofolate reductase"} results = run_search(search_terms) results = run_search(search_terms, number_of_rows=1000) ###Output https://www.ebi.ac.uk/pdbe/search/pdb/select?q=molecule_name:"Dihydrofolate reductase"&wt=json&rows=1000 Number of results for Dihydrofolate reductase: 365 ###Markdown We will add organism_name of Human to the query to limit the results to only return those that are structures of Human Dihydrofolate reductase. ###Code print('2nd search') search_terms = {"molecule_name":"Dihydrofolate reductase", "organism_name":"Human" } results = run_search(search_terms) ###Output 2nd search https://www.ebi.ac.uk/pdbe/search/pdb/select?q=molecule_name:"Dihydrofolate reductase" AND organism_name:Human&wt=json&rows=100 Number of results for Dihydrofolate reductase,Human: 79 ###Markdown We will then look at the last result.We will print the data we have for the first result.This will be the first item of the list "results"i.e. results[0]We are using "pprint" (pretty print) rather than "print" to make the result easier to read. ###Code pprint(results[0]) ###Output a': 100.967, 'q_nigli_cell_c': 56.376, 'q_nigli_cell_gamma': 100.967, 'q_nigli_cell_symmetry': 'R32', 'q_num_interacting_entity_id': [0], 'q_number_of_bound_entities': 2, 'q_number_of_bound_molecules': 2, 'q_number_of_copies': 1, 'q_number_of_models': 1, 'q_number_of_polymer_entities': 1, 'q_number_of_polymer_residues': 374, 'q_number_of_polymers': 1, 'q_number_of_protein_chains': 1, 'q_organism_name': ['Homo sapiens', 'Man', 'Homo Sapiens (Human)', 'Human', 'Homo Sapiens', 'Homo sapiens', 'Homo', 'Homininae', 'Hominidae', 'Primates', 'Mammalia', 'Chordata', 'Metazoa', 'Eukaryota'], 'q_organism_scientific_name': ['Homo sapiens'], 'q_organism_synonyms': ['Man', 'Homo Sapiens (Human)', 'Human', 'Homo Sapiens', 'Homo sapiens', 'Homo', 'Homininae', 'Hominidae', 'Primates', 'Mammalia', 'Chordata', 'Metazoa', 'Eukaryota'], 'q_overall_quality': -63.0, 'q_pdb_accession': '1s3w', 'q_pdb_format_compatible': 'Y', 'q_pdb_id': '1s3w', 'q_percent_solvent': 53.04, 'q_pfam': ['PF00186 : DHFR_1'], 'q_pfam_accession': ['PF00186'], 'q_pfam_clan': ['CL0387 : DHFred'], 'q_pfam_clan_name': ['DHFred'], 'q_pfam_description': ['Dihydrofolate reductase'], 'q_pfam_name': ['DHFR_1'], 'q_pivot_resolution': 1.9, 'q_polymer_length': 186, 'q_prefered_assembly_id': '1', 'q_primary_wavelength': 1.5418, 'q_processing_site': 'RCSB', 'q_pubmed_author_list': ['Cody V, Luft JR, Pangborn W, Gangjee A, Queener SF'], 'q_pubmed_authors': ['Cody V', 'Luft JR', 'Pangborn W', 'Gangjee A', 'Queener SF'], 'q_pubmed_id': '15039552', 'q_r_factor': 0.216, 'q_r_free': 0.216, 'q_r_work': [0.177], 'q_rank': ['species', 'genus', 'subfamily', 'family', 'order', 'class', 'phylum', 'kingdom', 'superkingdom', 'species', 'genus', 'family', 'order', 'class', 'phylum', 'superkingdom'], 'q_refinement_software': ['CNS'], 'q_release_date': '2004-03-30T01:00:00Z', 'q_release_year': 2004, 'q_resolution': 1.9, 'q_revision_date': '2011-07-13T01:00:00Z', 'q_revision_year': 2011, 'q_sample_preparation_method': ['Engineered'], 'q_scop_class': ['Alpha and beta proteins (a/b)'], 'q_scop_family': ['Dihydrofolate reductases'], 'q_scop_fold': ['Dihydrofolate reductase-like'], 'q_scop_superfamily': ['Dihydrofolate reductase-like'], 'q_seq_100_cluster_number': '33614', 'q_seq_100_cluster_rank': 36, 'q_seq_30_cluster_number': '12361', 'q_seq_30_cluster_rank': 60, 'q_seq_40_cluster_number': '23062', 'q_seq_40_cluster_rank': 60, 'q_seq_50_cluster_number': '32707', 'q_seq_50_cluster_rank': 60, 'q_seq_70_cluster_number': '28973', 'q_seq_70_cluster_rank': 60, 'q_seq_90_cluster_number': '43363', 'q_seq_90_cluster_rank': 55, 'q_seq_95_cluster_number': '26752', 'q_seq_95_cluster_rank': 49, 'q_spacegroup': 'H 3', 'q_status': 'REL', 'q_struct_asym_id': ['A'], 'q_structure_determination_method': ['FOURIER SYNTHESIS'], 'q_structure_solution_software': ['CNS'], 'q_superkingdom': ['Eukaryota'], 'q_tax_id': [9606], 'q_tax_query': [9606], 'q_title': 'Structure Determination of Tetrahydroquinazoline Antifoaltes in ' 'Complex with Human and Pneumocystis carinii Dihydrofolate ' 'Reductase: Correlations of Enzyme Selectivity and Stereochemistry', 'q_uniprot': ['P00374 : DYR_HUMAN'], 'q_uniprot_accession': ['P00374', 'P00374-2'], 'q_uniprot_accession_best': ['P00374'], 'q_uniprot_best': ['P00374 : DYR_HUMAN'], 'q_uniprot_coverage': [0.99], 'q_uniprot_features': ['Protein has possible alternate isoforms', 'DHFR', 'Protein has possible natural variant ', 'Nucleotide binding - NADP'], 'q_uniprot_id': ['DYR_HUMAN', 'DYR_HUMAN'], 'q_uniprot_id_best': ['DYR_HUMAN'], 'q_uniprot_non_canonical': ['P00374-2 : DYR_HUMAN'], 'q_unp_count': 1, 'q_unp_nf90_accession': ['B0YJ76', 'A0A2K6C3Y8', 'P00374', 'A0A024RAQ3', 'S5WD14', 'S5VM81'], 'q_unp_nf90_id': ['B0YJ76_HUMAN', 'A0A2K6C3Y8_MACNE', 'DYR_HUMAN', 'A0A024RAQ3_HUMAN', 'S5WD14_SHISS', 'S5VM81_ECO57'], 'q_unp_nf90_organism': ['Homo sapiens (Human)', 'Macaca nemestrina (Pig-tailed macaque)', 'Homo sapiens (Human)', 'Homo sapiens (Human)', 'Shigella sonnei (strain Ss046)', 'Escherichia coli O157:H7'], 'q_unp_nf90_protein_name': ['Dihydrofolate reductase', 'DHFR domain-containing protein', 'Dihydrofolate reductase', 'Dihydrofolate reductase, isoform CRA_a', 'Trimethoprim resistant protein', 'Trimethoprim resistant protein'], 'q_unp_nf90_tax_id': ['9606', '9545', '9606', '9606', '300269', '83334'], 'r_factor': 0.216, 'r_free': 0.216, 'r_work': [0.177], 'rank': ['species', 'genus', 'subfamily', 'family', 'order', 'class', 'phylum', 'kingdom', 'superkingdom', 'species', 'genus', 'family', 'order', 'class', 'phylum', 'superkingdom'], 'refinement_software': ['CNS'], 'release_date': '2004-03-30T01:00:00Z', 'release_year': 2004, 'resolution': 1.9, 'revision_date': '2011-07-13T01:00:00Z', 'revision_year': 2011, 'sample_preparation_method': ['Engineered'], 'scop_class': ['Alpha and beta proteins (a/b)'], 'scop_family': ['Dihydrofolate reductases'], 'scop_fold': ['Dihydrofolate reductase-like'], 'scop_superfamily': ['Dihydrofolate reductase-like'], 'seq_100_cluster_number': '33614', 'seq_100_cluster_rank': 36, 'seq_30_cluster_number': '12361', 'seq_30_cluster_rank': 60, 'seq_40_cluster_number': '23062', 'seq_40_cluster_rank': 60, 'seq_50_cluster_number': '32707', 'seq_50_cluster_rank': 60, 'seq_70_cluster_number': '28973', 'seq_70_cluster_rank': 60, 'seq_90_cluster_number': '43363', 'seq_90_cluster_rank': 55, 'seq_95_cluster_number': '26752', 'seq_95_cluster_rank': 49, 'spacegroup': 'H 3', 'status': 'REL', 'struct_asym_id': ['A'], 'structure_determination_method': ['FOURIER SYNTHESIS'], 'structure_solution_software': ['CNS'], 'superkingdom': ['Eukaryota'], 't_abstracttext_unassigned': ['Structural data are reported for the first ' 'examples of the tetrahydroquinazoline ' 'antifolate ' '(6R,6S)-2,4-diamino-6-(1-indolinomethyl)-5,6,7,8-tetrahydroquinazoline ' '(1) and its trimethoxy analogue ' "(6R,6S)-2,4-diamino-6-(3',4',5'-trimethoxybenzyl)-5,6,7,8-tetrahydroquinazoline " '(2) as inhibitor complexes with dihydrofolate ' 'reductase (DHFR) from human (hDHFR) and ' 'Pneumocystis carinii (pcDHFR) sources. The ' 'indoline analogue (1) was crystallized as ' 'ternary complexes with NADPH and hDHFR (1.9 A ' 'resolution) and pcDHFR (2.3 A resolution), ' 'while the trimethoxy quinazoline analogue (2) ' 'was crystallized as a binary complex with ' 'hDHFR in two polymorphic rhombohedral R3 ' 'lattices: R3(1) to 1.8 A resolution and R3(2) ' 'to 2.0 A resolution. Structural analysis of ' 'these potent and selective DHFR-inhibitor ' 'complexes revealed preferential binding of the ' '6S-equatorial isomer in each structure. This ' 'configuration is similar to that of the ' 'natural tetrahydrofolate substrate; that is, ' '6S. These data also show that in both the ' 'hDHFR and pcDHFR ternary complexes with (1) ' 'the indoline ring is partially disordered, ' 'with two static conformations that differ ' 'between structures. These conformers also ' 'differ from that observed for the ' 'trimethoxybenzyl ring of tetrahydroquinazoline ' '(2). There is also a correlation between the ' 'disorder of the flexible loop 23 and the ' 'disorder of the cofactor nicotinamide ribose ' 'ring in the pcDHFR-NADPH-(1) ternary complex. ' 'Comparison of the Toxoplasma gondii DHFR ' '(tgDHFR) sequence with those of other DHFRs ' 'provides insight into the role of sequence and ' 'conformation in inhibitor-binding preferences ' 'which may aid in the design of novel ' 'antifolates with specific DHFR selectivity.'], 't_all_compound_names': ['Nicotinamide-adenine dinucleotide', 'NAP', 'NAP : NADP NICOTINAMIDE-ADENINE-DINUCLEOTIDE ' 'PHOSPHATE', 'TQT : ' '6-(OCTAHYDRO-1H-INDOL-1-YLMETHYL)DECAHYDROQUINAZOLINE-2,4-DIAMINE', "NAP : 2'-MONOPHOSPHOADENOSINE 5'-DIPHOSPHORIBOSE", 'TQT : ' '(6S)-6-(2,3,3a,4,5,6,7,7a-octahydroindol-1-ylmethyl)-1,2,3,4,4a,5,6,7,8,8a-decahydroquinazoline-2,4-diamine', 'NAP : ' '[(2R,3S,4R,5R)-5-(3-aminocarbonylpyridin-1-ium-1-yl)-3,4-dihydroxy-oxolan-2-yl]methyl ' '[[(2R,3R,4R,5R)-5-(6-aminopurin-9-yl)-3-hydroxy-4-phosphonooxy-oxolan-2-yl]methoxy-hydroxy-phosphoryl] ' 'phosphate', 'TQT : ' '(2R,4R,4aR,6S,8aS)-6-[(3aR,7aS)-octahydro-1H-indol-1-ylmethyl]decahydroquinazoline-2,4-diamine'], 't_all_enzyme_names': ['Oxidoreductases', 'Acting on the CH-NH group of donors', 'With NAD(+) or NADP(+) as acceptor', 'Dihydrofolate reductase', '1.5.1.3 : Dihydrofolate reductase', '5,6,7,8-tetrahydrofolate:NADP(+) oxidoreductase'], 't_all_go_terms': ['cytoplasm', 'mitochondrion', 'cytosol', 'folic acid binding', 'oxidoreductase activity', 'NADPH binding', 'RNA binding', 'sequence-specific mRNA binding', 'mRNA binding', 'dihydrofolate reductase activity', 'methotrexate binding', 'NADP binding', 'translation repressor activity, mRNA regulatory element ' 'binding', 'drug binding', 'one-carbon metabolic process', 'folic acid metabolic process', 'negative regulation of translation', 'response to methotrexate', 'regulation of removal of superoxide radicals', 'tetrahydrobiopterin biosynthetic process', 'tetrahydrofolate metabolic process', 'tetrahydrofolate biosynthetic process', 'regulation of transcription involved in G1/S transition ' 'of mitotic cell cycle', 'positive regulation of nitric-oxide synthase activity', 'oxidation-reduction process', 'axon regeneration', 'dihydrofolate metabolic process'], 't_all_sequence_family': ['IPR001796 : Dihydrofolate reductase domain', 'IPR017925 : Dihydrofolate reductase conserved site', 'IPR024072 : Dihydrofolate reductase-like domain ' 'superfamily', 'PF00186 : DHFR_1', 'CL0387 : DHFred'], 't_all_structure_family': ['3-Layer(aba) Sandwich', 'Alpha Beta', '3.40.430.10', 'Dihydrofolate Reductase, subunit A', 'Dihydrofolate Reductase, subunit A', 'Alpha and beta proteins (a/b)', 'Dihydrofolate reductases', 'Dihydrofolate reductase-like', 'Dihydrofolate reductase-like'], 't_citation_authors': ['Cody V', 'Luft JR', 'Pangborn W', 'Gangjee A', 'Queener SF'], 't_citation_title': ['Structure determination of tetrahydroquinazoline ' 'antifolates in complex with human and Pneumocystis ' 'carinii dihydrofolate reductase: correlations between ' 'enzyme selectivity and stereochemistry.'], 't_entry_authors': ['Cody V', 'Luft JR', 'Pangborn W', 'Gangjee A', 'Queener SF'], 't_entry_info': ['DENZO', 'SCALEPACK', 'Image plate', 'RIGAKU RAXIS IIC', 'X-ray diffraction', 'CNS', 'FOURIER SYNTHESIS', 'CNS'], 't_entry_title': ['Structure Determination of Tetrahydroquinazoline ' 'Antifoaltes in Complex with Human and Pneumocystis carinii ' 'Dihydrofolate Reductase: Correlations of Enzyme ' 'Selectivity and Stereochemistry'], 't_expression_organism_name': ['Escherichia coli', 'Bacterium Coli', 'Enterococcus Coli', 'Bacterium 10a', 'Escherichia Coli', 'Escherichia Sp. 3_2_53faa', 'Escherichia/Shigella Coli', 'Ecolx', 'Bacterium E3', 'Bacterium Coli Commune', 'E. Coli', 'Bacillus Coli', 'Escherichia Sp. Mar', 'Escherichia coli', 'Escherichia', 'Enterobacteriaceae', 'Enterobacterales', 'Gammaproteobacteria', 'Proteobacteria', 'Bacteria'], 't_journal': ['Acta Crystallogr. D Biol. Crystallogr.'], 't_mesh_terms': ['Crystallography, X-Ray,Folic Acid ' 'Antagonists,Humans,Models, Molecular,Molecular ' 'Structure,Pneumocystis carinii,Protein Binding,Protein ' 'Conformation,Quinazolines,Stereoisomerism,Substrate ' 'Specificity,Tetrahydrofolate Dehydrogenase'], 't_molecule_info': ['protein structure', 'homo', 'monomer', 'Dihydrofolate reductase', 'Dihydrofolate reductase', 'protein structure', 'homo', 'monomer', 'DHFR', 'P00374', 'P00374-2', 'Protein has possible alternate isoforms', 'DHFR', 'Protein has possible natural variant ', 'Nucleotide binding - NADP', 'DYR_HUMAN', 'DYR_HUMAN'], 't_molecule_sequence': 'VGSLNCIVAVSQNMGIGKNGDLPWPPLRNEFRYFQRMTTTSSVEGKQNLVIMGKKTWFSIPEKNRPLKGRINLVLSRELKEPPQGAHFLSRSLDDALKLTEQPELANKVDMVWIVGGSSVYKEAMNHPGHLKLFVTRIMQDFESDTFFPEIDLEKYKLLPEYPGVLSDVQEEKGIKYKFEVYEKND', 't_organism_name': ['Homo sapiens', 'Man', 'Homo Sapiens (Human)', 'Human', 'Homo Sapiens', 'Homo sapiens', 'Homo', 'Homininae', 'Hominidae', 'Primates', 'Mammalia', 'Chordata', 'Metazoa', 'Eukaryota'], 'tax_id': [9606], 'tax_query': [9606], 'title': 'Structure Determination of Tetrahydroquinazoline Antifoaltes in ' 'Complex with Human and Pneumocystis carinii Dihydrofolate ' 'Reductase: Correlations of Enzyme Selectivity and Stereochemistry', 'uniprot': ['P00374 : DYR_HUMAN'], 'uniprot_accession': ['P00374', 'P00374-2'], 'uniprot_accession_best': ['P00374'], 'uniprot_best': ['P00374 : DYR_HUMAN'], 'uniprot_coverage': [0.99], 'uniprot_features': ['Protein has possible alternate isoforms', 'DHFR', 'Protein has possible natural variant ', 'Nucleotide binding - NADP'], 'uniprot_id': ['DYR_HUMAN', 'DYR_HUMAN'], 'uniprot_id_best': ['DYR_HUMAN'], 'uniprot_non_canonical': ['P00374-2 : DYR_HUMAN'], 'unp_count': 1, 'unp_nf90_accession': ['B0YJ76', 'A0A2K6C3Y8', 'P00374', 'A0A024RAQ3', 'S5WD14', 'S5VM81'], 'unp_nf90_id': ['B0YJ76_HUMAN', 'A0A2K6C3Y8_MACNE', 'DYR_HUMAN', 'A0A024RAQ3_HUMAN', 'S5WD14_SHISS', 'S5VM81_ECO57'], 'unp_nf90_organism': ['Homo sapiens (Human)', 'Macaca nemestrina (Pig-tailed macaque)', 'Homo sapiens (Human)', 'Homo sapiens (Human)', 'Shigella sonnei (strain Ss046)', 'Escherichia coli O157:H7'], 'unp_nf90_protein_name': ['Dihydrofolate reductase', 'DHFR domain-containing protein', 'Dihydrofolate reductase', 'Dihydrofolate reductase, isoform CRA_a', 'Trimethoprim resistant protein', 'Trimethoprim resistant protein'], 'unp_nf90_tax_id': ['9606', '9545', '9606', '9606', '300269', '83334']} ###Markdown As you can see we get lots of data back about the individual molecule we have searched for and the PDB entries in which it is contained. We can get the PDB ID and experimental method for this first row as follows. ###Code print(results[0].get('pdb_id')) print(results[0].get('experimental_method')) ###Output 1s3w ['X-ray diffraction'] ###Markdown We can restrict the results to only the information we want using a filter so its easier to see the information we want. ###Code print('3rd search') range = "[2010 TO 2019]" range = range.strip('\"') print(range) search_terms = {"release_year":range, "status":"REL" } filter_terms = ['pdb_id', 'experimental_method', 'release_year', 'resolution', 'assembly_mol_wt', 'synchrotron_site', 'journal'] results = run_search(search_terms, filter_terms) pprint(results) ###Output 3rd search [2010 TO 2019] q=release_year:[2010 TO 2019] AND status:REL&fl=pdb_id,experimental_method,release_year,resolution,assembly_mol_wt,synchrotron_site,journal https://www.ebi.ac.uk/pdbe/search/pdb/select?q=release_year:[2010 TO 2019] AND status:REL&fl=pdb_id,experimental_method,release_year,resolution,assembly_mol_wt,synchrotron_site,journal&wt=json&rows=100 Number of results for [2010 TO 2019],REL: 100 [{'assembly_mol_wt': 2196.434, 'experimental_method': ['Electron Microscopy'], 'journal': 'Nat. Struct. Mol. Biol.', 'pdb_id': '5o2r', 'release_year': 2017, 'resolution': 3.4}, {'assembly_mol_wt': 28.444, 'experimental_method': ['X-ray diffraction'], 'journal': 'J. Med. Chem.', 'pdb_id': '4na8', 'release_year': 2014, 'resolution': 2.3}, {'assembly_mol_wt': 79.563, 'experimental_method': ['X-ray diffraction'], 'journal': 'J. Virol.', 'pdb_id': '4a8s', 'release_year': 2012, 'resolution': 2.9, 'synchrotron_site': ['ESRF']}, {'assembly_mol_wt': 2196.434, 'experimental_method': ['Electron Microscopy'], 'journal': 'Nat. Struct. Mol. Biol.', 'pdb_id': '5o2r', 'release_year': 2017, 'resolution': 3.4}, {'assembly_mol_wt': 113.636, 'experimental_method': ['X-ray diffraction'], 'journal': 'Proteins', 'pdb_id': '4fio', 'release_year': 2013, 'resolution': 1.37, 'synchrotron_site': ['Australian Synchrotron']}, {'assembly_mol_wt': 2217.997, 'experimental_method': ['X-ray diffraction'], 'journal': 'Proc. Natl. Acad. Sci. U.S.A.', 'pdb_id': '4lsk', 'release_year': 2014, 'resolution': 3.4800062, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 2196.434, 'experimental_method': ['Electron Microscopy'], 'journal': 'Nat. Struct. Mol. Biol.', 'pdb_id': '5o2r', 'release_year': 2017, 'resolution': 3.4}, {'assembly_mol_wt': 2196.434, 'experimental_method': ['Electron Microscopy'], 'journal': 'Nat. Struct. Mol. Biol.', 'pdb_id': '5o2r', 'release_year': 2017, 'resolution': 3.4}, {'assembly_mol_wt': 54.07, 'experimental_method': ['X-ray diffraction'], 'journal': 'To be published', 'pdb_id': '3rj8', 'release_year': 2012, 'resolution': 2.4, 'synchrotron_site': ['EMBL/DESY, Hamburg']}, {'assembly_mol_wt': 2217.997, 'experimental_method': ['X-ray diffraction'], 'journal': 'Proc. Natl. Acad. Sci. U.S.A.', 'pdb_id': '4lsk', 'release_year': 2014, 'resolution': 3.4800062, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 2217.997, 'experimental_method': ['X-ray diffraction'], 'journal': 'Proc. Natl. Acad. Sci. U.S.A.', 'pdb_id': '4lsk', 'release_year': 2014, 'resolution': 3.4800062, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 2281.701, 'experimental_method': ['X-ray diffraction'], 'journal': 'Cell Rep', 'pdb_id': '5dfe', 'release_year': 2016, 'resolution': 3.0999756, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 2196.434, 'experimental_method': ['Electron Microscopy'], 'journal': 'Nat. Struct. Mol. Biol.', 'pdb_id': '5o2r', 'release_year': 2017, 'resolution': 3.4}, {'assembly_mol_wt': 89.883, 'experimental_method': ['X-ray diffraction'], 'journal': 'Sci Adv', 'pdb_id': '4x9g', 'release_year': 2015, 'resolution': 3.403, 'synchrotron_site': ['BSRF']}, {'assembly_mol_wt': 2217.997, 'experimental_method': ['X-ray diffraction'], 'journal': 'Proc. Natl. Acad. Sci. U.S.A.', 'pdb_id': '4lsk', 'release_year': 2014, 'resolution': 3.4800062, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 2217.997, 'experimental_method': ['X-ray diffraction'], 'journal': 'Proc. Natl. Acad. Sci. U.S.A.', 'pdb_id': '4lsk', 'release_year': 2014, 'resolution': 3.4800062, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 2196.434, 'experimental_method': ['Electron Microscopy'], 'journal': 'Nat. Struct. Mol. Biol.', 'pdb_id': '5o2r', 'release_year': 2017, 'resolution': 3.4}, {'assembly_mol_wt': 2196.434, 'experimental_method': ['Electron Microscopy'], 'journal': 'Nat. Struct. Mol. Biol.', 'pdb_id': '5o2r', 'release_year': 2017, 'resolution': 3.4}, {'assembly_mol_wt': 35.494, 'experimental_method': ['X-ray diffraction'], 'journal': 'Biochem. Biophys. Res. Commun.', 'pdb_id': '4ouj', 'release_year': 2014, 'resolution': 1.46, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 2196.434, 'experimental_method': ['Electron Microscopy'], 'journal': 'Nat. Struct. Mol. Biol.', 'pdb_id': '5o2r', 'release_year': 2017, 'resolution': 3.4}, {'assembly_mol_wt': 2281.701, 'experimental_method': ['X-ray diffraction'], 'journal': 'Cell Rep', 'pdb_id': '5dfe', 'release_year': 2016, 'resolution': 3.0999756, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 3234.942, 'experimental_method': ['Electron Microscopy'], 'journal': 'Elife', 'pdb_id': '6mtb', 'release_year': 2018, 'resolution': 3.6}, {'assembly_mol_wt': 43.149, 'experimental_method': ['X-ray diffraction'], 'journal': 'Nat Commun', 'pdb_id': '5pi2', 'release_year': 2017, 'resolution': 1.52, 'synchrotron_site': ['Diamond']}, {'assembly_mol_wt': 2281.701, 'experimental_method': ['X-ray diffraction'], 'journal': 'Cell Rep', 'pdb_id': '5dfe', 'release_year': 2016, 'resolution': 3.0999756, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 2217.997, 'experimental_method': ['X-ray diffraction'], 'journal': 'Proc. Natl. Acad. Sci. U.S.A.', 'pdb_id': '4lsk', 'release_year': 2014, 'resolution': 3.4800062, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 2281.701, 'experimental_method': ['X-ray diffraction'], 'journal': 'Cell Rep', 'pdb_id': '5dfe', 'release_year': 2016, 'resolution': 3.0999756, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 2281.701, 'experimental_method': ['X-ray diffraction'], 'journal': 'Cell Rep', 'pdb_id': '5dfe', 'release_year': 2016, 'resolution': 3.0999756, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 144.662, 'experimental_method': ['X-ray diffraction'], 'journal': 'Int J Mol Sci', 'pdb_id': '5c2t', 'release_year': 2015, 'resolution': 2.75, 'synchrotron_site': ['SPring-8']}, {'assembly_mol_wt': 144.662, 'experimental_method': ['X-ray diffraction'], 'journal': 'Int J Mol Sci', 'pdb_id': '5c2t', 'release_year': 2015, 'resolution': 2.75, 'synchrotron_site': ['SPring-8']}, {'assembly_mol_wt': 22.411, 'experimental_method': ['X-ray diffraction'], 'journal': 'Acta Crystallogr. D Biol. Crystallogr.', 'pdb_id': '4ekh', 'release_year': 2012, 'resolution': 1.75, 'synchrotron_site': ['CHESS']}, {'assembly_mol_wt': 2281.701, 'experimental_method': ['X-ray diffraction'], 'journal': 'Cell Rep', 'pdb_id': '5dfe', 'release_year': 2016, 'resolution': 3.0999756, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 2281.701, 'experimental_method': ['X-ray diffraction'], 'journal': 'Cell Rep', 'pdb_id': '5dfe', 'release_year': 2016, 'resolution': 3.0999756, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 3234.942, 'experimental_method': ['Electron Microscopy'], 'journal': 'Elife', 'pdb_id': '6mtb', 'release_year': 2018, 'resolution': 3.6}, {'assembly_mol_wt': 3234.942, 'experimental_method': ['Electron Microscopy'], 'journal': 'Elife', 'pdb_id': '6mtb', 'release_year': 2018, 'resolution': 3.6}, {'assembly_mol_wt': 2281.701, 'experimental_method': ['X-ray diffraction'], 'journal': 'Cell Rep', 'pdb_id': '5dfe', 'release_year': 2016, 'resolution': 3.0999756, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 55.494, 'experimental_method': ['X-ray diffraction'], 'journal': 'J. Biol. Chem.', 'pdb_id': '5d0r', 'release_year': 2016, 'resolution': 2.24, 'synchrotron_site': ['BESSY']}, {'assembly_mol_wt': 3234.942, 'experimental_method': ['Electron Microscopy'], 'journal': 'Elife', 'pdb_id': '6mtb', 'release_year': 2018, 'resolution': 3.6}, {'assembly_mol_wt': 26.136, 'experimental_method': ['X-ray diffraction'], 'journal': 'ACS Chem. Biol.', 'pdb_id': '4eq1', 'release_year': 2013, 'resolution': 1.6, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 3192.521, 'experimental_method': ['Electron Microscopy'], 'journal': 'J. Mol. Biol.', 'pdb_id': '6gqb', 'release_year': 2018, 'resolution': 3.9}, {'assembly_mol_wt': 3234.942, 'experimental_method': ['Electron Microscopy'], 'journal': 'Elife', 'pdb_id': '6mtb', 'release_year': 2018, 'resolution': 3.6}, {'assembly_mol_wt': 65.143, 'experimental_method': ['X-ray diffraction'], 'journal': 'To be published', 'pdb_id': '5kwv', 'release_year': 2016, 'resolution': 2.25, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 67.087, 'experimental_method': ['X-ray diffraction'], 'journal': 'Nature', 'pdb_id': '4xv3', 'release_year': 2015, 'resolution': 2.8, 'synchrotron_site': ['ALS']}, {'assembly_mol_wt': 99.583, 'experimental_method': ['X-ray diffraction'], 'journal': 'PLoS ONE', 'pdb_id': '3ucm', 'release_year': 2011, 'resolution': 2.513, 'synchrotron_site': ['ESRF']}, {'assembly_mol_wt': 3234.942, 'experimental_method': ['Electron Microscopy'], 'journal': 'Elife', 'pdb_id': '6mtb', 'release_year': 2018, 'resolution': 3.6}, {'assembly_mol_wt': 31.516, 'experimental_method': ['X-ray diffraction'], 'journal': 'Antimicrob. Agents Chemother.', 'pdb_id': '4y0o', 'release_year': 2016, 'resolution': 2.37}, {'assembly_mol_wt': 317.148, 'experimental_method': ['X-ray diffraction'], 'journal': 'Nat. Struct. Mol. Biol.', 'pdb_id': '4hf5', 'release_year': 2013, 'resolution': 3.004, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 1967.09, 'experimental_method': ['Electron Microscopy'], 'journal': 'Nature', 'pdb_id': '6f38', 'release_year': 2018, 'resolution': 6.7}, {'assembly_mol_wt': 211.126, 'experimental_method': ['X-ray diffraction'], 'journal': 'J. Mol. Biol.', 'pdb_id': '4di4', 'release_year': 2012, 'resolution': 2.714, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 1967.09, 'experimental_method': ['Electron Microscopy'], 'journal': 'Nature', 'pdb_id': '6f38', 'release_year': 2018, 'resolution': 6.7}, {'assembly_mol_wt': 86.076, 'experimental_method': ['X-ray diffraction'], 'journal': 'J. Bacteriol.', 'pdb_id': '4uaq', 'release_year': 2015, 'resolution': 2.8, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 2928.071, 'experimental_method': ['Electron Microscopy'], 'journal': 'Cell', 'pdb_id': '6em5', 'release_year': 2017, 'resolution': 4.3}, {'assembly_mol_wt': 27.653, 'experimental_method': ['X-ray diffraction'], 'journal': 'Sci Rep', 'pdb_id': '4q8r', 'release_year': 2014, 'resolution': 1.65, 'synchrotron_site': ['ESRF']}, {'assembly_mol_wt': 12.118, 'experimental_method': ['X-ray diffraction'], 'journal': 'To be published', 'pdb_id': '3rov', 'release_year': 2012, 'resolution': 2.3}, {'assembly_mol_wt': 66.657, 'experimental_method': ['X-ray diffraction'], 'journal': 'Nat Commun', 'pdb_id': '6gvd', 'release_year': 2018, 'resolution': 1.22, 'synchrotron_site': ['Diamond']}, {'assembly_mol_wt': 54.361, 'experimental_method': ['X-ray diffraction'], 'journal': 'Chem. Commun. (Camb.)', 'pdb_id': '4dhu', 'release_year': 2013, 'resolution': 1.67}, {'assembly_mol_wt': 1967.09, 'experimental_method': ['Electron Microscopy'], 'journal': 'Nature', 'pdb_id': '6f38', 'release_year': 2018, 'resolution': 6.7}, {'assembly_mol_wt': 1967.09, 'experimental_method': ['Electron Microscopy'], 'journal': 'Nature', 'pdb_id': '6f38', 'release_year': 2018, 'resolution': 6.7}, {'assembly_mol_wt': 33.734, 'experimental_method': ['X-ray diffraction'], 'journal': 'Nat Commun', 'pdb_id': '4mql', 'release_year': 2013, 'resolution': 1.3, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 3234.942, 'experimental_method': ['Electron Microscopy'], 'journal': 'Elife', 'pdb_id': '6mtb', 'release_year': 2018, 'resolution': 3.6}, {'assembly_mol_wt': 172.203, 'experimental_method': ['X-ray diffraction'], 'journal': 'To be published', 'pdb_id': '4dyn', 'release_year': 2013, 'resolution': 2.4, 'synchrotron_site': ['SLS']}, {'assembly_mol_wt': 44.515, 'experimental_method': ['X-ray diffraction'], 'journal': 'Proteins', 'pdb_id': '4ooz', 'release_year': 2014, 'resolution': 2.6, 'synchrotron_site': ['PAL/PLS']}, {'assembly_mol_wt': 29.675, 'experimental_method': ['X-ray diffraction'], 'journal': 'To be published', 'pdb_id': '4zko', 'release_year': 2016, 'resolution': 1.29, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 2229.574, 'experimental_method': ['Electron Microscopy'], 'journal': 'Cell', 'pdb_id': '6ff4', 'release_year': 2018, 'resolution': 3.4}, {'assembly_mol_wt': 687.815, 'experimental_method': ['Electron Microscopy'], 'journal': 'Cell', 'pdb_id': '6osy', 'release_year': 2019, 'resolution': 4.3}, {'assembly_mol_wt': 5730.344, 'experimental_method': ['X-ray diffraction'], 'journal': 'J. Virol.', 'pdb_id': '5c9a', 'release_year': 2015, 'resolution': 2.7, 'synchrotron_site': ['Diamond']}, {'assembly_mol_wt': 762.725, 'experimental_method': ['X-ray diffraction'], 'journal': 'Nat Commun', 'pdb_id': '4tnh', 'release_year': 2014, 'resolution': 4.900007, 'synchrotron_site': ['SLAC LCLS']}, {'assembly_mol_wt': 2229.574, 'experimental_method': ['Electron Microscopy'], 'journal': 'Cell', 'pdb_id': '6ff4', 'release_year': 2018, 'resolution': 3.4}, {'assembly_mol_wt': 687.815, 'experimental_method': ['Electron Microscopy'], 'journal': 'Cell', 'pdb_id': '6osy', 'release_year': 2019, 'resolution': 4.3}, {'assembly_mol_wt': 56.286, 'experimental_method': ['X-ray diffraction'], 'journal': 'Front Immunol', 'pdb_id': '6s0b', 'release_year': 2019, 'resolution': 2.312, 'synchrotron_site': ['ESRF']}, {'assembly_mol_wt': 43.149, 'experimental_method': ['X-ray diffraction'], 'journal': 'Nat Commun', 'pdb_id': '5pm8', 'release_year': 2017, 'resolution': 1.54, 'synchrotron_site': ['Diamond']}, {'assembly_mol_wt': 24.576, 'experimental_method': ['X-ray diffraction'], 'journal': 'Bioorg. Med. Chem. Lett.', 'pdb_id': '5ctw', 'release_year': 2016, 'resolution': 1.48, 'synchrotron_site': ['Diamond']}, {'assembly_mol_wt': 11.5, 'experimental_method': ['X-ray diffraction'], 'journal': 'J. Inorg. Biochem.', 'pdb_id': '2wlb', 'release_year': 2010, 'resolution': 2.6}, {'assembly_mol_wt': 56.286, 'experimental_method': ['X-ray diffraction'], 'journal': 'Front Immunol', 'pdb_id': '6s0b', 'release_year': 2019, 'resolution': 2.312, 'synchrotron_site': ['ESRF']}, {'assembly_mol_wt': 65.022, 'experimental_method': ['X-ray diffraction'], 'journal': 'Proc. Natl. Acad. Sci. U.S.A.', 'pdb_id': '5ibc', 'release_year': 2016, 'resolution': 1.66, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 66.961, 'experimental_method': ['X-ray diffraction'], 'journal': 'Nat Commun', 'pdb_id': '6phf', 'release_year': 2019, 'resolution': 3.1, 'synchrotron_site': ['CLSI']}, {'assembly_mol_wt': 14.201, 'experimental_method': ['X-ray diffraction'], 'journal': 'To be published', 'pdb_id': '4h7r', 'release_year': 2014, 'resolution': 1.33, 'synchrotron_site': ['Diamond']}, {'assembly_mol_wt': 14.518, 'experimental_method': ['X-ray diffraction'], 'journal': 'Acta Crystallogr. D Biol. Crystallogr.', 'pdb_id': '4h8z', 'release_year': 2013, 'resolution': 1.1998, 'synchrotron_site': ['SSRL']}, {'assembly_mol_wt': 762.725, 'experimental_method': ['X-ray diffraction'], 'journal': 'Nat Commun', 'pdb_id': '4tnh', 'release_year': 2014, 'resolution': 4.900007, 'synchrotron_site': ['SLAC LCLS']}, {'assembly_mol_wt': 762.725, 'experimental_method': ['X-ray diffraction'], 'journal': 'Nat Commun', 'pdb_id': '4tnh', 'release_year': 2014, 'resolution': 4.900007, 'synchrotron_site': ['SLAC LCLS']}, {'assembly_mol_wt': 3492.785, 'experimental_method': ['Electron Microscopy'], 'journal': 'Nat Commun', 'pdb_id': '6jct', 'release_year': 2019, 'resolution': 3.18}, {'assembly_mol_wt': 762.725, 'experimental_method': ['X-ray diffraction'], 'journal': 'Nat Commun', 'pdb_id': '4tnh', 'release_year': 2014, 'resolution': 4.900007, 'synchrotron_site': ['SLAC LCLS']}, {'assembly_mol_wt': 42.66, 'experimental_method': ['X-ray diffraction'], 'journal': 'Acta Crystallogr D Struct Biol', 'pdb_id': '6nwo', 'release_year': 2019, 'resolution': 2.11, 'synchrotron_site': ['Australian Synchrotron']}, {'assembly_mol_wt': 28.05, 'experimental_method': ['X-ray diffraction'], 'journal': 'Rus.J.Bioorg.Chem.', 'pdb_id': '4n3d', 'release_year': 2014, 'resolution': 1.34, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 762.725, 'experimental_method': ['X-ray diffraction'], 'journal': 'Nat Commun', 'pdb_id': '4tnh', 'release_year': 2014, 'resolution': 4.900007, 'synchrotron_site': ['SLAC LCLS']}, {'assembly_mol_wt': 29.281, 'experimental_method': ['X-ray diffraction'], 'journal': 'Biochemistry', 'pdb_id': '5d2j', 'release_year': 2016, 'resolution': 1.718, 'synchrotron_site': ['LNLS']}, {'assembly_mol_wt': 762.725, 'experimental_method': ['X-ray diffraction'], 'journal': 'Nat Commun', 'pdb_id': '4tnh', 'release_year': 2014, 'resolution': 4.900007, 'synchrotron_site': ['SLAC LCLS']}, {'assembly_mol_wt': 66.961, 'experimental_method': ['X-ray diffraction'], 'journal': 'Nat Commun', 'pdb_id': '6phf', 'release_year': 2019, 'resolution': 3.1, 'synchrotron_site': ['CLSI']}, {'assembly_mol_wt': 112.383, 'experimental_method': ['X-ray diffraction'], 'journal': 'Microbiology (Reading, Engl.)', 'pdb_id': '6r1e', 'release_year': 2019, 'resolution': 2.6}, {'assembly_mol_wt': 55.788, 'experimental_method': ['X-ray diffraction'], 'journal': 'J. Biol. Chem.', 'pdb_id': '3zli', 'release_year': 2013, 'resolution': 1.8, 'synchrotron_site': ['Diamond']}, {'assembly_mol_wt': 92.203, 'experimental_method': ['X-ray diffraction'], 'journal': 'Biochemistry', 'pdb_id': '6mrh', 'release_year': 2019, 'resolution': 2.02, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 79.013, 'experimental_method': ['X-ray diffraction'], 'journal': 'Nucleic Acids Res.', 'pdb_id': '5o85', 'release_year': 2017, 'resolution': 3.4, 'synchrotron_site': ['SOLEIL']}, {'assembly_mol_wt': 38.67, 'experimental_method': ['X-ray diffraction'], 'journal': 'J. Biol. Chem.', 'pdb_id': '3i7z', 'release_year': 2010, 'resolution': 2.3}, {'assembly_mol_wt': 3741.132, 'experimental_method': ['Electron Microscopy'], 'journal': 'Cell', 'pdb_id': '6ff7', 'release_year': 2019, 'resolution': 4.5}, {'assembly_mol_wt': 19.151, 'experimental_method': ['X-ray diffraction'], 'journal': 'J. Mol. Biol.', 'pdb_id': '4e2u', 'release_year': 2012, 'resolution': 1.582, 'synchrotron_site': ['ESRF']}, {'assembly_mol_wt': 169.609, 'experimental_method': ['X-ray diffraction'], 'journal': 'Structure', 'pdb_id': '4fip', 'release_year': 2012, 'resolution': 2.686, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 29.675, 'experimental_method': ['X-ray diffraction'], 'journal': 'To be published', 'pdb_id': '4zko', 'release_year': 2016, 'resolution': 1.29, 'synchrotron_site': ['APS']}, {'assembly_mol_wt': 108.217, 'experimental_method': ['X-ray diffraction'], 'journal': 'To be published', 'pdb_id': '5i4u', 'release_year': 2017, 'resolution': 2.372, 'synchrotron_site': ['ALS']}, {'assembly_mol_wt': 45.06, 'experimental_method': ['X-ray diffraction'], 'journal': 'Proteomics', 'pdb_id': '6bxq', 'release_year': 2018, 'resolution': 1.58, 'synchrotron_site': ['Australian Synchrotron']}, {'assembly_mol_wt': 100.239, 'experimental_method': ['X-ray diffraction'], 'journal': 'J. Med. Chem.', 'pdb_id': '5gvp', 'release_year': 2017, 'resolution': 2.26, 'synchrotron_site': ['NSRRC']}, {'assembly_mol_wt': 35.331, 'experimental_method': ['X-ray diffraction'], 'journal': 'J. Med. Chem.', 'pdb_id': '5n2t', 'release_year': 2017, 'resolution': 1.379, 'synchrotron_site': ['BESSY']}] ###Markdown 6) Analysing and plotting the results We are going to use a Python package called Pandas to help us sort and visualise the resultsFirst we have to do a bit of housekeeping, some of the results are lists (a PDB entry can have more than one experimental method or organism for example) so we need to change them into strings so we can use them in a graph ###Code def change_lists_to_strings(results): """ input - list of results from search output - list of results with lists changed into strings """ for row in results: for data in row: if type(row[data]) == list: # if there are any numbers in the list change them into strings row[data] = [str(a) for a in row[data]] # unique and sort the list and then change the list into a string row[data] = ','.join(sorted(list(set(row[data])))) return results results = change_lists_to_strings(results) pprint(results) ###Output [{'assembly_mol_wt': 2196.434, 'experimental_method': 'Electron Microscopy', 'journal': 'Nat. Struct. Mol. Biol.', 'pdb_id': '5o2r', 'release_year': 2017, 'resolution': 3.4}, {'assembly_mol_wt': 28.444, 'experimental_method': 'X-ray diffraction', 'journal': 'J. Med. Chem.', 'pdb_id': '4na8', 'release_year': 2014, 'resolution': 2.3}, {'assembly_mol_wt': 79.563, 'experimental_method': 'X-ray diffraction', 'journal': 'J. Virol.', 'pdb_id': '4a8s', 'release_year': 2012, 'resolution': 2.9, 'synchrotron_site': 'ESRF'}, {'assembly_mol_wt': 2196.434, 'experimental_method': 'Electron Microscopy', 'journal': 'Nat. Struct. Mol. Biol.', 'pdb_id': '5o2r', 'release_year': 2017, 'resolution': 3.4}, {'assembly_mol_wt': 113.636, 'experimental_method': 'X-ray diffraction', 'journal': 'Proteins', 'pdb_id': '4fio', 'release_year': 2013, 'resolution': 1.37, 'synchrotron_site': 'Australian Synchrotron'}, {'assembly_mol_wt': 2217.997, 'experimental_method': 'X-ray diffraction', 'journal': 'Proc. Natl. Acad. Sci. U.S.A.', 'pdb_id': '4lsk', 'release_year': 2014, 'resolution': 3.4800062, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 2196.434, 'experimental_method': 'Electron Microscopy', 'journal': 'Nat. Struct. Mol. Biol.', 'pdb_id': '5o2r', 'release_year': 2017, 'resolution': 3.4}, {'assembly_mol_wt': 2196.434, 'experimental_method': 'Electron Microscopy', 'journal': 'Nat. Struct. Mol. Biol.', 'pdb_id': '5o2r', 'release_year': 2017, 'resolution': 3.4}, {'assembly_mol_wt': 54.07, 'experimental_method': 'X-ray diffraction', 'journal': 'To be published', 'pdb_id': '3rj8', 'release_year': 2012, 'resolution': 2.4, 'synchrotron_site': 'EMBL/DESY, Hamburg'}, {'assembly_mol_wt': 2217.997, 'experimental_method': 'X-ray diffraction', 'journal': 'Proc. Natl. Acad. Sci. U.S.A.', 'pdb_id': '4lsk', 'release_year': 2014, 'resolution': 3.4800062, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 2217.997, 'experimental_method': 'X-ray diffraction', 'journal': 'Proc. Natl. Acad. Sci. U.S.A.', 'pdb_id': '4lsk', 'release_year': 2014, 'resolution': 3.4800062, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 2281.701, 'experimental_method': 'X-ray diffraction', 'journal': 'Cell Rep', 'pdb_id': '5dfe', 'release_year': 2016, 'resolution': 3.0999756, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 2196.434, 'experimental_method': 'Electron Microscopy', 'journal': 'Nat. Struct. Mol. Biol.', 'pdb_id': '5o2r', 'release_year': 2017, 'resolution': 3.4}, {'assembly_mol_wt': 89.883, 'experimental_method': 'X-ray diffraction', 'journal': 'Sci Adv', 'pdb_id': '4x9g', 'release_year': 2015, 'resolution': 3.403, 'synchrotron_site': 'BSRF'}, {'assembly_mol_wt': 2217.997, 'experimental_method': 'X-ray diffraction', 'journal': 'Proc. Natl. Acad. Sci. U.S.A.', 'pdb_id': '4lsk', 'release_year': 2014, 'resolution': 3.4800062, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 2217.997, 'experimental_method': 'X-ray diffraction', 'journal': 'Proc. Natl. Acad. Sci. U.S.A.', 'pdb_id': '4lsk', 'release_year': 2014, 'resolution': 3.4800062, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 2196.434, 'experimental_method': 'Electron Microscopy', 'journal': 'Nat. Struct. Mol. Biol.', 'pdb_id': '5o2r', 'release_year': 2017, 'resolution': 3.4}, {'assembly_mol_wt': 2196.434, 'experimental_method': 'Electron Microscopy', 'journal': 'Nat. Struct. Mol. Biol.', 'pdb_id': '5o2r', 'release_year': 2017, 'resolution': 3.4}, {'assembly_mol_wt': 35.494, 'experimental_method': 'X-ray diffraction', 'journal': 'Biochem. Biophys. Res. Commun.', 'pdb_id': '4ouj', 'release_year': 2014, 'resolution': 1.46, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 2196.434, 'experimental_method': 'Electron Microscopy', 'journal': 'Nat. Struct. Mol. Biol.', 'pdb_id': '5o2r', 'release_year': 2017, 'resolution': 3.4}, {'assembly_mol_wt': 2281.701, 'experimental_method': 'X-ray diffraction', 'journal': 'Cell Rep', 'pdb_id': '5dfe', 'release_year': 2016, 'resolution': 3.0999756, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 3234.942, 'experimental_method': 'Electron Microscopy', 'journal': 'Elife', 'pdb_id': '6mtb', 'release_year': 2018, 'resolution': 3.6}, {'assembly_mol_wt': 43.149, 'experimental_method': 'X-ray diffraction', 'journal': 'Nat Commun', 'pdb_id': '5pi2', 'release_year': 2017, 'resolution': 1.52, 'synchrotron_site': 'Diamond'}, {'assembly_mol_wt': 2281.701, 'experimental_method': 'X-ray diffraction', 'journal': 'Cell Rep', 'pdb_id': '5dfe', 'release_year': 2016, 'resolution': 3.0999756, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 2217.997, 'experimental_method': 'X-ray diffraction', 'journal': 'Proc. Natl. Acad. Sci. U.S.A.', 'pdb_id': '4lsk', 'release_year': 2014, 'resolution': 3.4800062, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 2281.701, 'experimental_method': 'X-ray diffraction', 'journal': 'Cell Rep', 'pdb_id': '5dfe', 'release_year': 2016, 'resolution': 3.0999756, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 2281.701, 'experimental_method': 'X-ray diffraction', 'journal': 'Cell Rep', 'pdb_id': '5dfe', 'release_year': 2016, 'resolution': 3.0999756, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 144.662, 'experimental_method': 'X-ray diffraction', 'journal': 'Int J Mol Sci', 'pdb_id': '5c2t', 'release_year': 2015, 'resolution': 2.75, 'synchrotron_site': 'SPring-8'}, {'assembly_mol_wt': 144.662, 'experimental_method': 'X-ray diffraction', 'journal': 'Int J Mol Sci', 'pdb_id': '5c2t', 'release_year': 2015, 'resolution': 2.75, 'synchrotron_site': 'SPring-8'}, {'assembly_mol_wt': 22.411, 'experimental_method': 'X-ray diffraction', 'journal': 'Acta Crystallogr. D Biol. Crystallogr.', 'pdb_id': '4ekh', 'release_year': 2012, 'resolution': 1.75, 'synchrotron_site': 'CHESS'}, {'assembly_mol_wt': 2281.701, 'experimental_method': 'X-ray diffraction', 'journal': 'Cell Rep', 'pdb_id': '5dfe', 'release_year': 2016, 'resolution': 3.0999756, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 2281.701, 'experimental_method': 'X-ray diffraction', 'journal': 'Cell Rep', 'pdb_id': '5dfe', 'release_year': 2016, 'resolution': 3.0999756, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 3234.942, 'experimental_method': 'Electron Microscopy', 'journal': 'Elife', 'pdb_id': '6mtb', 'release_year': 2018, 'resolution': 3.6}, {'assembly_mol_wt': 3234.942, 'experimental_method': 'Electron Microscopy', 'journal': 'Elife', 'pdb_id': '6mtb', 'release_year': 2018, 'resolution': 3.6}, {'assembly_mol_wt': 2281.701, 'experimental_method': 'X-ray diffraction', 'journal': 'Cell Rep', 'pdb_id': '5dfe', 'release_year': 2016, 'resolution': 3.0999756, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 55.494, 'experimental_method': 'X-ray diffraction', 'journal': 'J. Biol. Chem.', 'pdb_id': '5d0r', 'release_year': 2016, 'resolution': 2.24, 'synchrotron_site': 'BESSY'}, {'assembly_mol_wt': 3234.942, 'experimental_method': 'Electron Microscopy', 'journal': 'Elife', 'pdb_id': '6mtb', 'release_year': 2018, 'resolution': 3.6}, {'assembly_mol_wt': 26.136, 'experimental_method': 'X-ray diffraction', 'journal': 'ACS Chem. Biol.', 'pdb_id': '4eq1', 'release_year': 2013, 'resolution': 1.6, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 3192.521, 'experimental_method': 'Electron Microscopy', 'journal': 'J. Mol. Biol.', 'pdb_id': '6gqb', 'release_year': 2018, 'resolution': 3.9}, {'assembly_mol_wt': 3234.942, 'experimental_method': 'Electron Microscopy', 'journal': 'Elife', 'pdb_id': '6mtb', 'release_year': 2018, 'resolution': 3.6}, {'assembly_mol_wt': 65.143, 'experimental_method': 'X-ray diffraction', 'journal': 'To be published', 'pdb_id': '5kwv', 'release_year': 2016, 'resolution': 2.25, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 67.087, 'experimental_method': 'X-ray diffraction', 'journal': 'Nature', 'pdb_id': '4xv3', 'release_year': 2015, 'resolution': 2.8, 'synchrotron_site': 'ALS'}, {'assembly_mol_wt': 99.583, 'experimental_method': 'X-ray diffraction', 'journal': 'PLoS ONE', 'pdb_id': '3ucm', 'release_year': 2011, 'resolution': 2.513, 'synchrotron_site': 'ESRF'}, {'assembly_mol_wt': 3234.942, 'experimental_method': 'Electron Microscopy', 'journal': 'Elife', 'pdb_id': '6mtb', 'release_year': 2018, 'resolution': 3.6}, {'assembly_mol_wt': 31.516, 'experimental_method': 'X-ray diffraction', 'journal': 'Antimicrob. Agents Chemother.', 'pdb_id': '4y0o', 'release_year': 2016, 'resolution': 2.37}, {'assembly_mol_wt': 317.148, 'experimental_method': 'X-ray diffraction', 'journal': 'Nat. Struct. Mol. Biol.', 'pdb_id': '4hf5', 'release_year': 2013, 'resolution': 3.004, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 1967.09, 'experimental_method': 'Electron Microscopy', 'journal': 'Nature', 'pdb_id': '6f38', 'release_year': 2018, 'resolution': 6.7}, {'assembly_mol_wt': 211.126, 'experimental_method': 'X-ray diffraction', 'journal': 'J. Mol. Biol.', 'pdb_id': '4di4', 'release_year': 2012, 'resolution': 2.714, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 1967.09, 'experimental_method': 'Electron Microscopy', 'journal': 'Nature', 'pdb_id': '6f38', 'release_year': 2018, 'resolution': 6.7}, {'assembly_mol_wt': 86.076, 'experimental_method': 'X-ray diffraction', 'journal': 'J. Bacteriol.', 'pdb_id': '4uaq', 'release_year': 2015, 'resolution': 2.8, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 2928.071, 'experimental_method': 'Electron Microscopy', 'journal': 'Cell', 'pdb_id': '6em5', 'release_year': 2017, 'resolution': 4.3}, {'assembly_mol_wt': 27.653, 'experimental_method': 'X-ray diffraction', 'journal': 'Sci Rep', 'pdb_id': '4q8r', 'release_year': 2014, 'resolution': 1.65, 'synchrotron_site': 'ESRF'}, {'assembly_mol_wt': 12.118, 'experimental_method': 'X-ray diffraction', 'journal': 'To be published', 'pdb_id': '3rov', 'release_year': 2012, 'resolution': 2.3}, {'assembly_mol_wt': 66.657, 'experimental_method': 'X-ray diffraction', 'journal': 'Nat Commun', 'pdb_id': '6gvd', 'release_year': 2018, 'resolution': 1.22, 'synchrotron_site': 'Diamond'}, {'assembly_mol_wt': 54.361, 'experimental_method': 'X-ray diffraction', 'journal': 'Chem. Commun. (Camb.)', 'pdb_id': '4dhu', 'release_year': 2013, 'resolution': 1.67}, {'assembly_mol_wt': 1967.09, 'experimental_method': 'Electron Microscopy', 'journal': 'Nature', 'pdb_id': '6f38', 'release_year': 2018, 'resolution': 6.7}, {'assembly_mol_wt': 1967.09, 'experimental_method': 'Electron Microscopy', 'journal': 'Nature', 'pdb_id': '6f38', 'release_year': 2018, 'resolution': 6.7}, {'assembly_mol_wt': 33.734, 'experimental_method': 'X-ray diffraction', 'journal': 'Nat Commun', 'pdb_id': '4mql', 'release_year': 2013, 'resolution': 1.3, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 3234.942, 'experimental_method': 'Electron Microscopy', 'journal': 'Elife', 'pdb_id': '6mtb', 'release_year': 2018, 'resolution': 3.6}, {'assembly_mol_wt': 172.203, 'experimental_method': 'X-ray diffraction', 'journal': 'To be published', 'pdb_id': '4dyn', 'release_year': 2013, 'resolution': 2.4, 'synchrotron_site': 'SLS'}, {'assembly_mol_wt': 44.515, 'experimental_method': 'X-ray diffraction', 'journal': 'Proteins', 'pdb_id': '4ooz', 'release_year': 2014, 'resolution': 2.6, 'synchrotron_site': 'PAL/PLS'}, {'assembly_mol_wt': 29.675, 'experimental_method': 'X-ray diffraction', 'journal': 'To be published', 'pdb_id': '4zko', 'release_year': 2016, 'resolution': 1.29, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 2229.574, 'experimental_method': 'Electron Microscopy', 'journal': 'Cell', 'pdb_id': '6ff4', 'release_year': 2018, 'resolution': 3.4}, {'assembly_mol_wt': 687.815, 'experimental_method': 'Electron Microscopy', 'journal': 'Cell', 'pdb_id': '6osy', 'release_year': 2019, 'resolution': 4.3}, {'assembly_mol_wt': 5730.344, 'experimental_method': 'X-ray diffraction', 'journal': 'J. Virol.', 'pdb_id': '5c9a', 'release_year': 2015, 'resolution': 2.7, 'synchrotron_site': 'Diamond'}, {'assembly_mol_wt': 762.725, 'experimental_method': 'X-ray diffraction', 'journal': 'Nat Commun', 'pdb_id': '4tnh', 'release_year': 2014, 'resolution': 4.900007, 'synchrotron_site': 'SLAC LCLS'}, {'assembly_mol_wt': 2229.574, 'experimental_method': 'Electron Microscopy', 'journal': 'Cell', 'pdb_id': '6ff4', 'release_year': 2018, 'resolution': 3.4}, {'assembly_mol_wt': 687.815, 'experimental_method': 'Electron Microscopy', 'journal': 'Cell', 'pdb_id': '6osy', 'release_year': 2019, 'resolution': 4.3}, {'assembly_mol_wt': 56.286, 'experimental_method': 'X-ray diffraction', 'journal': 'Front Immunol', 'pdb_id': '6s0b', 'release_year': 2019, 'resolution': 2.312, 'synchrotron_site': 'ESRF'}, {'assembly_mol_wt': 43.149, 'experimental_method': 'X-ray diffraction', 'journal': 'Nat Commun', 'pdb_id': '5pm8', 'release_year': 2017, 'resolution': 1.54, 'synchrotron_site': 'Diamond'}, {'assembly_mol_wt': 24.576, 'experimental_method': 'X-ray diffraction', 'journal': 'Bioorg. Med. Chem. Lett.', 'pdb_id': '5ctw', 'release_year': 2016, 'resolution': 1.48, 'synchrotron_site': 'Diamond'}, {'assembly_mol_wt': 11.5, 'experimental_method': 'X-ray diffraction', 'journal': 'J. Inorg. Biochem.', 'pdb_id': '2wlb', 'release_year': 2010, 'resolution': 2.6}, {'assembly_mol_wt': 56.286, 'experimental_method': 'X-ray diffraction', 'journal': 'Front Immunol', 'pdb_id': '6s0b', 'release_year': 2019, 'resolution': 2.312, 'synchrotron_site': 'ESRF'}, {'assembly_mol_wt': 65.022, 'experimental_method': 'X-ray diffraction', 'journal': 'Proc. Natl. Acad. Sci. U.S.A.', 'pdb_id': '5ibc', 'release_year': 2016, 'resolution': 1.66, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 66.961, 'experimental_method': 'X-ray diffraction', 'journal': 'Nat Commun', 'pdb_id': '6phf', 'release_year': 2019, 'resolution': 3.1, 'synchrotron_site': 'CLSI'}, {'assembly_mol_wt': 14.201, 'experimental_method': 'X-ray diffraction', 'journal': 'To be published', 'pdb_id': '4h7r', 'release_year': 2014, 'resolution': 1.33, 'synchrotron_site': 'Diamond'}, {'assembly_mol_wt': 14.518, 'experimental_method': 'X-ray diffraction', 'journal': 'Acta Crystallogr. D Biol. Crystallogr.', 'pdb_id': '4h8z', 'release_year': 2013, 'resolution': 1.1998, 'synchrotron_site': 'SSRL'}, {'assembly_mol_wt': 762.725, 'experimental_method': 'X-ray diffraction', 'journal': 'Nat Commun', 'pdb_id': '4tnh', 'release_year': 2014, 'resolution': 4.900007, 'synchrotron_site': 'SLAC LCLS'}, {'assembly_mol_wt': 762.725, 'experimental_method': 'X-ray diffraction', 'journal': 'Nat Commun', 'pdb_id': '4tnh', 'release_year': 2014, 'resolution': 4.900007, 'synchrotron_site': 'SLAC LCLS'}, {'assembly_mol_wt': 3492.785, 'experimental_method': 'Electron Microscopy', 'journal': 'Nat Commun', 'pdb_id': '6jct', 'release_year': 2019, 'resolution': 3.18}, {'assembly_mol_wt': 762.725, 'experimental_method': 'X-ray diffraction', 'journal': 'Nat Commun', 'pdb_id': '4tnh', 'release_year': 2014, 'resolution': 4.900007, 'synchrotron_site': 'SLAC LCLS'}, {'assembly_mol_wt': 42.66, 'experimental_method': 'X-ray diffraction', 'journal': 'Acta Crystallogr D Struct Biol', 'pdb_id': '6nwo', 'release_year': 2019, 'resolution': 2.11, 'synchrotron_site': 'Australian Synchrotron'}, {'assembly_mol_wt': 28.05, 'experimental_method': 'X-ray diffraction', 'journal': 'Rus.J.Bioorg.Chem.', 'pdb_id': '4n3d', 'release_year': 2014, 'resolution': 1.34, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 762.725, 'experimental_method': 'X-ray diffraction', 'journal': 'Nat Commun', 'pdb_id': '4tnh', 'release_year': 2014, 'resolution': 4.900007, 'synchrotron_site': 'SLAC LCLS'}, {'assembly_mol_wt': 29.281, 'experimental_method': 'X-ray diffraction', 'journal': 'Biochemistry', 'pdb_id': '5d2j', 'release_year': 2016, 'resolution': 1.718, 'synchrotron_site': 'LNLS'}, {'assembly_mol_wt': 762.725, 'experimental_method': 'X-ray diffraction', 'journal': 'Nat Commun', 'pdb_id': '4tnh', 'release_year': 2014, 'resolution': 4.900007, 'synchrotron_site': 'SLAC LCLS'}, {'assembly_mol_wt': 66.961, 'experimental_method': 'X-ray diffraction', 'journal': 'Nat Commun', 'pdb_id': '6phf', 'release_year': 2019, 'resolution': 3.1, 'synchrotron_site': 'CLSI'}, {'assembly_mol_wt': 112.383, 'experimental_method': 'X-ray diffraction', 'journal': 'Microbiology (Reading, Engl.)', 'pdb_id': '6r1e', 'release_year': 2019, 'resolution': 2.6}, {'assembly_mol_wt': 55.788, 'experimental_method': 'X-ray diffraction', 'journal': 'J. Biol. Chem.', 'pdb_id': '3zli', 'release_year': 2013, 'resolution': 1.8, 'synchrotron_site': 'Diamond'}, {'assembly_mol_wt': 92.203, 'experimental_method': 'X-ray diffraction', 'journal': 'Biochemistry', 'pdb_id': '6mrh', 'release_year': 2019, 'resolution': 2.02, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 79.013, 'experimental_method': 'X-ray diffraction', 'journal': 'Nucleic Acids Res.', 'pdb_id': '5o85', 'release_year': 2017, 'resolution': 3.4, 'synchrotron_site': 'SOLEIL'}, {'assembly_mol_wt': 38.67, 'experimental_method': 'X-ray diffraction', 'journal': 'J. Biol. Chem.', 'pdb_id': '3i7z', 'release_year': 2010, 'resolution': 2.3}, {'assembly_mol_wt': 3741.132, 'experimental_method': 'Electron Microscopy', 'journal': 'Cell', 'pdb_id': '6ff7', 'release_year': 2019, 'resolution': 4.5}, {'assembly_mol_wt': 19.151, 'experimental_method': 'X-ray diffraction', 'journal': 'J. Mol. Biol.', 'pdb_id': '4e2u', 'release_year': 2012, 'resolution': 1.582, 'synchrotron_site': 'ESRF'}, {'assembly_mol_wt': 169.609, 'experimental_method': 'X-ray diffraction', 'journal': 'Structure', 'pdb_id': '4fip', 'release_year': 2012, 'resolution': 2.686, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 29.675, 'experimental_method': 'X-ray diffraction', 'journal': 'To be published', 'pdb_id': '4zko', 'release_year': 2016, 'resolution': 1.29, 'synchrotron_site': 'APS'}, {'assembly_mol_wt': 108.217, 'experimental_method': 'X-ray diffraction', 'journal': 'To be published', 'pdb_id': '5i4u', 'release_year': 2017, 'resolution': 2.372, 'synchrotron_site': 'ALS'}, {'assembly_mol_wt': 45.06, 'experimental_method': 'X-ray diffraction', 'journal': 'Proteomics', 'pdb_id': '6bxq', 'release_year': 2018, 'resolution': 1.58, 'synchrotron_site': 'Australian Synchrotron'}, {'assembly_mol_wt': 100.239, 'experimental_method': 'X-ray diffraction', 'journal': 'J. Med. Chem.', 'pdb_id': '5gvp', 'release_year': 2017, 'resolution': 2.26, 'synchrotron_site': 'NSRRC'}, {'assembly_mol_wt': 35.331, 'experimental_method': 'X-ray diffraction', 'journal': 'J. Med. Chem.', 'pdb_id': '5n2t', 'release_year': 2017, 'resolution': 1.379, 'synchrotron_site': 'BESSY'}] ###Markdown Notice that the only thing that changed is ['X-ray diffraction'] is now 'X-ray diffraction' If we wanted to know the experimental methods used to determine structures of Human Dihydrofolate reductase we could loop through the results and count how many entries use each experimental method. We can use a Python package called Pandas to do this for us. It changes the results into a mini database - called a DataFrame. ###Code def pandas_dataset(list_of_results): results = change_lists_to_strings(list_of_results) # we have added our function to change lists to strings df = pd.DataFrame(list_of_results) return df df = pandas_dataset(list_of_results=results) print(df) ###Output assembly_mol_wt experimental_method \ 0 2196.434 Electron Microscopy 1 28.444 X-ray diffraction 2 79.563 X-ray diffraction 3 2196.434 Electron Microscopy 4 113.636 X-ray diffraction 5 2217.997 X-ray diffraction 6 2196.434 Electron Microscopy 7 2196.434 Electron Microscopy 8 54.070 X-ray diffraction 9 2217.997 X-ray diffraction 10 2217.997 X-ray diffraction 11 2281.701 X-ray diffraction 12 2196.434 Electron Microscopy 13 89.883 X-ray diffraction 14 2217.997 X-ray diffraction 15 2217.997 X-ray diffraction 16 2196.434 Electron Microscopy 17 2196.434 Electron Microscopy 18 35.494 X-ray diffraction 19 2196.434 Electron Microscopy 20 2281.701 X-ray diffraction 21 3234.942 Electron Microscopy 22 43.149 X-ray diffraction 23 2281.701 X-ray diffraction 24 2217.997 X-ray diffraction 25 2281.701 X-ray diffraction 26 2281.701 X-ray diffraction 27 144.662 X-ray diffraction 28 144.662 X-ray diffraction 29 22.411 X-ray diffraction .. ... ... 70 24.576 X-ray diffraction 71 11.500 X-ray diffraction 72 56.286 X-ray diffraction 73 65.022 X-ray diffraction 74 66.961 X-ray diffraction 75 14.201 X-ray diffraction 76 14.518 X-ray diffraction 77 762.725 X-ray diffraction 78 762.725 X-ray diffraction 79 3492.785 Electron Microscopy 80 762.725 X-ray diffraction 81 42.660 X-ray diffraction 82 28.050 X-ray diffraction 83 762.725 X-ray diffraction 84 29.281 X-ray diffraction 85 762.725 X-ray diffraction 86 66.961 X-ray diffraction 87 112.383 X-ray diffraction 88 55.788 X-ray diffraction 89 92.203 X-ray diffraction 90 79.013 X-ray diffraction 91 38.670 X-ray diffraction 92 3741.132 Electron Microscopy 93 19.151 X-ray diffraction 94 169.609 X-ray diffraction 95 29.675 X-ray diffraction 96 108.217 X-ray diffraction 97 45.060 X-ray diffraction 98 100.239 X-ray diffraction 99 35.331 X-ray diffraction journal pdb_id release_year resolution \ 0 Nat. Struct. Mol. Biol. 5o2r 2017 3.400000 1 J. Med. Chem. 4na8 2014 2.300000 2 J. Virol. 4a8s 2012 2.900000 3 Nat. Struct. Mol. Biol. 5o2r 2017 3.400000 4 Proteins 4fio 2013 1.370000 5 Proc. Natl. Acad. Sci. U.S.A. 4lsk 2014 3.480006 6 Nat. Struct. Mol. Biol. 5o2r 2017 3.400000 7 Nat. Struct. Mol. Biol. 5o2r 2017 3.400000 8 To be published 3rj8 2012 2.400000 9 Proc. Natl. Acad. Sci. U.S.A. 4lsk 2014 3.480006 10 Proc. Natl. Acad. Sci. U.S.A. 4lsk 2014 3.480006 11 Cell Rep 5dfe 2016 3.099976 12 Nat. Struct. Mol. Biol. 5o2r 2017 3.400000 13 Sci Adv 4x9g 2015 3.403000 14 Proc. Natl. Acad. Sci. U.S.A. 4lsk 2014 3.480006 15 Proc. Natl. Acad. Sci. U.S.A. 4lsk 2014 3.480006 16 Nat. Struct. Mol. Biol. 5o2r 2017 3.400000 17 Nat. Struct. Mol. Biol. 5o2r 2017 3.400000 18 Biochem. Biophys. Res. Commun. 4ouj 2014 1.460000 19 Nat. Struct. Mol. Biol. 5o2r 2017 3.400000 20 Cell Rep 5dfe 2016 3.099976 21 Elife 6mtb 2018 3.600000 22 Nat Commun 5pi2 2017 1.520000 23 Cell Rep 5dfe 2016 3.099976 24 Proc. Natl. Acad. Sci. U.S.A. 4lsk 2014 3.480006 25 Cell Rep 5dfe 2016 3.099976 26 Cell Rep 5dfe 2016 3.099976 27 Int J Mol Sci 5c2t 2015 2.750000 28 Int J Mol Sci 5c2t 2015 2.750000 29 Acta Crystallogr. D Biol. Crystallogr. 4ekh 2012 1.750000 .. ... ... ... ... 70 Bioorg. Med. Chem. Lett. 5ctw 2016 1.480000 71 J. Inorg. Biochem. 2wlb 2010 2.600000 72 Front Immunol 6s0b 2019 2.312000 73 Proc. Natl. Acad. Sci. U.S.A. 5ibc 2016 1.660000 74 Nat Commun 6phf 2019 3.100000 75 To be published 4h7r 2014 1.330000 76 Acta Crystallogr. D Biol. Crystallogr. 4h8z 2013 1.199800 77 Nat Commun 4tnh 2014 4.900007 78 Nat Commun 4tnh 2014 4.900007 79 Nat Commun 6jct 2019 3.180000 80 Nat Commun 4tnh 2014 4.900007 81 Acta Crystallogr D Struct Biol 6nwo 2019 2.110000 82 Rus.J.Bioorg.Chem. 4n3d 2014 1.340000 83 Nat Commun 4tnh 2014 4.900007 84 Biochemistry 5d2j 2016 1.718000 85 Nat Commun 4tnh 2014 4.900007 86 Nat Commun 6phf 2019 3.100000 87 Microbiology (Reading, Engl.) 6r1e 2019 2.600000 88 J. Biol. Chem. 3zli 2013 1.800000 89 Biochemistry 6mrh 2019 2.020000 90 Nucleic Acids Res. 5o85 2017 3.400000 91 J. Biol. Chem. 3i7z 2010 2.300000 92 Cell 6ff7 2019 4.500000 93 J. Mol. Biol. 4e2u 2012 1.582000 94 Structure 4fip 2012 2.686000 95 To be published 4zko 2016 1.290000 96 To be published 5i4u 2017 2.372000 97 Proteomics 6bxq 2018 1.580000 98 J. Med. Chem. 5gvp 2017 2.260000 99 J. Med. Chem. 5n2t 2017 1.379000 synchrotron_site 0 NaN 1 NaN 2 ESRF 3 NaN 4 Australian Synchrotron 5 APS 6 NaN 7 NaN 8 EMBL/DESY, Hamburg 9 APS 10 APS 11 APS 12 NaN 13 BSRF 14 APS 15 APS 16 NaN 17 NaN 18 APS 19 NaN 20 APS 21 NaN 22 Diamond 23 APS 24 APS 25 APS 26 APS 27 SPring-8 28 SPring-8 29 CHESS .. ... 70 Diamond 71 NaN 72 ESRF 73 APS 74 CLSI 75 Diamond 76 SSRL 77 SLAC LCLS 78 SLAC LCLS 79 NaN 80 SLAC LCLS 81 Australian Synchrotron 82 APS 83 SLAC LCLS 84 LNLS 85 SLAC LCLS 86 CLSI 87 NaN 88 Diamond 89 APS 90 SOLEIL 91 NaN 92 NaN 93 ESRF 94 APS 95 APS 96 ALS 97 Australian Synchrotron 98 NSRRC 99 BESSY [100 rows x 7 columns] ###Markdown We can use the this to count how many PDB codes there are for each experimental methodThis groups PDB IDs by experimental method and then counts the number of unique PDB IDs per method. ###Code ds = df.groupby('release_year')['pdb_id'].nunique() ds_exp = df.groupby(['release_year','experimental_method'])['pdb_id'].nunique() #Group X-ray entries: df_xray = df[df['experimental_method'] == 'X-ray diffraction'] xray_year_pdb = df_xray.groupby('release_year')['pdb_id'].nunique() #Group EM entries df_em = df[df['experimental_method'] == 'Electron Microscopy'] em_year_pdb = df_em.groupby('release_year')['pdb_id'].nunique() #By average resolution: #Xray entries: df_xray["resolution"].mean() xray_year = df_xray.groupby('release_year') xray_year_res = xray_year["resolution"].mean() #EM entries: df_em["resolution"].mean() em_year = df_em.groupby('release_year') em_year_res = em_year["resolution"].mean() print(xray_year_pdb) ###Output release_year 2010 2 2011 1 2012 7 2013 8 2014 8 2015 5 2016 8 2017 6 2018 2 2019 5 Name: pdb_id, dtype: int64 ###Markdown We can find which experimental method has the greatest (max) or lowest (min) number of entries. ###Code dt = ds.max() print(dt) dt = ds.min() print(dt) ###Output 78 1 ###Markdown We can sort the results so its in decending order and then the first value is the experimental method with the highest number of results ###Code ds.sort_values(ascending=False).index[0] ###Output _____no_output_____ ###Markdown Or sort ascending so the experimental method with the lowest number of results is given ###Code ds.sort_values(ascending=True).index[0] ###Output _____no_output_____ ###Markdown Or we can then very easily plot these results as a bar chart ###Code ds.iplot(kind='bar') ###Output _____no_output_____ ###Markdown We will make this into two functions so we can resue them ###Code def pandas_count(list_of_results, column_to_group_by): df = pandas_dataset(list_of_results) ds = df.groupby(column_to_group_by)['pdb_id'].nunique() return ds def pandas_min_max(list_of_results, column_to_group_by, get_min=True): df = pandas_dataset(list_of_results) if get_min: ds = df.groupby(column_to_group_by)['pdb_id'].min() else: ds = df.groupby(column_to_group_by)['pdb_id'].max() return ds def pandas_plot(list_of_results, column_to_group_by, graph_type='bar'): ds = pandas_count(list_of_results=list_of_results, column_to_group_by=column_to_group_by) ds.iplot(kind=graph_type) ###Output _____no_output_____ ###Markdown One for counting the results ###Code pandas_count(list_of_results=results, column_to_group_by='experimental_method') ###Output _____no_output_____ ###Markdown One for getting min or max ###Code print('updated search') search_terms = {"molecule_name":"Dihydrofolate reductase", "organism_name":"Human" } filter_terms = ['pdb_id', 'resolution'] new_results = run_search(search_terms, filter_terms) pandas_min_max(list_of_results=new_results, column_to_group_by='resolution') ###Output updated search https://www.ebi.ac.uk/pdbe/search/pdb/select?q=molecule_name:"Dihydrofolate reductase" AND organism_name:Human&fl=pdb_id,resolution&wt=json&rows=100 Number of results for Dihydrofolate reductase,Human: 79 ###Markdown and one for plotting the results ###Code pandas_plot(list_of_results=results, column_to_group_by='experimental_method') ###Output _____no_output_____ ###Markdown Remember this only searched through the first 10 results.To increase the number of entries we have to run the search again, this time setting number_of_rows to a number in the function run_search. ###Code search_terms = {"molecule_name":"Dihydrofolate reductase", "organism_name":"Human" } results = run_search(search_terms, number_of_rows=10000) ###Output https://www.ebi.ac.uk/pdbe/search/pdb/select?q=molecule_name:"Dihydrofolate reductase" AND organism_name:Human&wt=json&rows=10000 Number of results for Dihydrofolate reductase,Human: 79 ###Markdown Then we can count the results using our pandas function above ###Code pandas_count(list_of_results=results, column_to_group_by='experimental_method') ###Output _____no_output_____ ###Markdown Changing the result so it groups by release year of the PDB entries. ###Code pandas_count(list_of_results=results, column_to_group_by='release_year') ###Output _____no_output_____ ###Markdown And then plot the number of entries released per year ###Code pandas_plot(list_of_results=results, column_to_group_by='release_year') ###Output _____no_output_____ ###Markdown We can make this into a line graph ###Code pandas_plot(list_of_results=results, column_to_group_by='release_year', graph_type='line') ###Output _____no_output_____ ###Markdown Try changing the term you want to search for and see if you get interesting results. 7) searching for two terms at onceIt would be interesting to see how many PDB entries were solved by each experimental method per year. we can use the tag "release_year" to get the year of release of each entryWe have to define a new function to group entries by two terms.When we do the search we have to filter the results by the terms we want to plot otherwise it takes too long to run. ###Code search_terms = {"all_enzyme_names":"Lysozyme", } filter_results = ['beam_source_name','release_year', 'pdb_id'] results = run_search(search_terms, filter_results, number_of_rows=10000) ###Output https://www.ebi.ac.uk/pdbe/search/pdb/select?q=all_enzyme_names:Lysozyme&fl=beam_source_name,release_year,pdb_id&wt=json&rows=10000 Number of results for Lysozyme: 1865 ###Markdown This will take a while as it will return lots of results. We can then define a function to group the results by two terms. ###Code def pandas_plot_multi_groupby(results, first_column_to_group_by, second_column_to_group_by, y_axis='pdb_id', graph_type='line'): df = pandas_dataset(results) new_df = df.groupby([first_column_to_group_by, second_column_to_group_by]) ds = new_df.count().unstack().reset_index(first_column_to_group_by) ds.iplot(x=first_column_to_group_by, y=y_axis, kind=graph_type) def pandas_plot_multi_groupby_min(results, first_column_to_group_by, second_column_to_group_by, graph_type='line', use_min=False, use_max=False): df = pandas_dataset(results) new_df = df.groupby([first_column_to_group_by])[second_column_to_group_by] ds = None if use_min: ds = new_df.min() elif use_max: ds = new_df.max() else: print('specify either use_min or use_max') return None ds.plot(x=first_column_to_group_by, y=second_column_to_group_by, kind=graph_type) def pandas_box_plot(results, first_column_to_group_by, second_column_to_group_by): df = pandas_dataset(results) df.boxplot(column=second_column_to_group_by,by=first_column_to_group_by) pandas_plot_multi_groupby(results, 'release_year', 'beam_source_name') ###Output _____no_output_____
old/bias_classifier_v2.ipynb
###Markdown Dataset Preparation ###Code import pandas as pd import seaborn as sns import numpy as np import nltk import re from nltk.stem.porter import PorterStemmer from sklearn.preprocessing import LabelEncoder train_df = pd.read_json("./data/bias_articles_train.json") dev_df = pd.read_json("./data/bias_articles_dev.json") test_df = pd.read_json("./data/bias_articles_test.json") # drop id=175 (all values are NaN) train_df = train_df.dropna().reset_index() def data_distribution(df): return sns.countplot(x='bias', data=df) # "Center" class proportion is almost 2:1 compared to the others. data_distribution(train_df) def data_cleaning(text): # Remove symbols and punctuations & apply loxer() to string formatted_text = re.sub(r"[^\w\s]", " ", text).lower() # Remove stopwords stopwords = set(nltk.corpus.stopwords.words('english')) words = [i for i in formatted_text.split() if not i in stopwords] # Stemming tokens word_stem = [PorterStemmer().stem(word) for word in words] return (" ".join(word_stem)) def column_concat(df, col1, col2): return (df[col1] + ' ' + df[col2]) train_df['title_body'] = [data_cleaning(i) for i in column_concat(train_df, 'title', 'body')] dev_df['title_body'] = [data_cleaning(i) for i in column_concat(dev_df, 'title', 'body')] test_df['title_body'] = [data_cleaning(i) for i in column_concat(test_df, 'title', 'body')] train_x, dev_x, test_x = train_df['title_body'], dev_df['title_body'], test_df['title_body'] train_y, dev_y = LabelEncoder().fit_transform(train_df['bias']), LabelEncoder().fit_transform(dev_df['bias']) ###Output _____no_output_____ ###Markdown Feature Engineering & Model Training & Fine Tuning ###Code from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer, TfidfTransformer from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier from sklearn.naive_bayes import MultinomialNB from sklearn.svm import SVC from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV from sklearn.metrics import accuracy_score, classification_report ###Output _____no_output_____ ###Markdown Naive Bayes Training ###Code NB_pipe = Pipeline([ ('cv', CountVectorizer()), ('tfidf', TfidfTransformer()), ('nb_clf', MultinomialNB())]) NB_pipe.fit(train_x, train_y) ###Output _____no_output_____ ###Markdown Fine tuning ###Code parameters = {'cv__max_df': (0.25, 0.5, 0.75, 1.0), 'cv__analyzer': ['word', 'char'], 'cv__ngram_range': [(1, 3), (2, 3), (1, 4)], 'tfidf__norm': ('l1', 'l2'), 'tfidf__use_idf': (True, False)} NV_gs = GridSearchCV(NB_pipe, parameters, n_jobs=-1, verbose=3) NV_gs = NV_gs.fit(train_x, train_y) NV_gs.best_params_ NV_gs.best_score_ ###Output _____no_output_____ ###Markdown Prediction & Evaluation (with best params) ###Code best_NB_pipe = Pipeline([ ('cv', CountVectorizer(analyzer='word', max_df=0.25, ngram_range=(1,3))), ('tfidf', TfidfTransformer(norm='l1', use_idf=True)), ('nb_clf', MultinomialNB())]) best_NB_pipe.fit(train_x, train_y) NB_pred = best_NB_pipe.predict(dev_x) print(NB_pred) print(classification_report(NB_pred, dev_y)) ###Output precision recall f1-score support 0 1.00 0.50 0.67 30 1 0.00 0.00 0.00 0 2 0.00 0.00 0.00 0 accuracy 0.50 30 macro avg 0.33 0.17 0.22 30 weighted avg 1.00 0.50 0.67 30 ###Markdown SVM Training ###Code SVM_pipe = Pipeline([ ('cv', CountVectorizer()), ('svm_clf', SVC(kernel='poly', degree=3))]) SVM_pipe.fit(train_x, train_y) ###Output _____no_output_____ ###Markdown Fine tuning ###Code parameters = {'cv__max_df': (0.5, 0.75), 'cv__analyzer': ['word', 'char'], 'svm_clf__C': (0.5, 0.75, 1, 1.25, 1.5), 'svm_clf__class_weight': [None, 'balanced'], 'svm_clf__gamma': ['auto', 'scale'] } SVM_gs = GridSearchCV(SVM_pipe, parameters, n_jobs=-1, verbose=3) SVM_gs = SVM_gs.fit(train_x, train_y) SVM_gs.best_params_ SVM_gs.best_score_ ###Output _____no_output_____ ###Markdown Prediction & Evaluation (with best params) ###Code best_SVM_pipe = Pipeline([ ('cv', CountVectorizer(analyzer='char', max_df=0.75)), ('svm_clf', SVC(kernel='poly', degree=3, gamma='scale', C=1.25))]) best_SVM_pipe.fit(train_x, train_y) SVM_pred = best_SVM_pipe.predict(dev_x) print(SVM_pred) print(classification_report(SVM_pred, dev_y)) ###Output precision recall f1-score support 0 0.80 0.46 0.59 26 1 0.00 0.00 0.00 2 2 0.14 0.50 0.22 2 accuracy 0.43 30 macro avg 0.31 0.32 0.27 30 weighted avg 0.70 0.43 0.52 30 ###Markdown Random Forest Training (Random Forest on Count Vectors) ###Code RF_cv_pipe = Pipeline([ ('cv', CountVectorizer()), ('rf_clf', RandomForestClassifier())]) RF_cv_pipe.fit(train_x, train_y) ###Output _____no_output_____ ###Markdown Fine Tuning (Random Forest on Count Vectors) ###Code parameters = {'cv__max_df': (0.5, 0.75, 1.0), 'cv__analyzer': ['word', 'char'], 'rf_clf__max_features': ['auto', 'sqrt'], 'rf_clf__max_depth': (4,6,8,10), 'rf_clf__class_weight': [None, 'balanced'] } RF_cv_gs = GridSearchCV(RF_cv_pipe, parameters, n_jobs=-1, verbose=3) RF_cv_gs = RF_cv_gs.fit(train_x, train_y) RF_cv_gs.best_params_ RF_cv_gs.best_score_ ###Output _____no_output_____ ###Markdown Prediction & Evaluation (with best params - Random Forest on Count Vectors) ###Code best_RF_cv_pipe = Pipeline([ ('cv', CountVectorizer(max_df=0.75, analyzer='word')), ('rf_clf', RandomForestClassifier(class_weight='balanced', max_depth=10, max_features='auto'))]) best_RF_cv_pipe.fit(train_x, train_y) RF_cv_pred = best_RF_cv_pipe.predict(dev_x) print(RF_cv_pred) print(classification_report(RF_cv_pred, dev_y)) ###Output precision recall f1-score support 0 0.73 0.48 0.58 23 1 0.00 0.00 0.00 3 2 0.00 0.00 0.00 4 accuracy 0.37 30 macro avg 0.24 0.16 0.19 30 weighted avg 0.56 0.37 0.44 30 ###Markdown Training (Random Forest on TF IDF Vectors, Ngram Level) ###Code RF_tfidf_pipe = Pipeline([ ('tfidf', TfidfVectorizer(ngram_range=(2, 3), analyzer='word')), ('rf_clf', RandomForestClassifier())]) RF_tfidf_pipe.fit(train_x, train_y) ###Output _____no_output_____ ###Markdown Fine Tuning (Random Forest on TF IDF Vectors, Ngram Level) ###Code parameters = {'tfidf__max_df': (0.25, 0.5, 0.75), 'rf_clf__max_features': ['auto', 'sqrt'], 'rf_clf__class_weight': [None, 'balanced'], 'rf_clf__max_depth': (4,6,8,10,12) } RF_tfidf_gs = GridSearchCV(RF_tfidf_pipe, parameters, n_jobs=-1, verbose=3) RF_tfidf_gs= RF_tfidf_gs.fit(train_x, train_y) RF_tfidf_gs.best_params_ RF_tfidf_gs.best_score_ ###Output _____no_output_____ ###Markdown Prediction & Evaluation (with best params - Random Forest on TF IDF Vectors, Ngram Level)) ###Code best_RF_tfidf_pipe = Pipeline([ ('tfidf', TfidfVectorizer(max_df=0.75)), ('rf_clf', RandomForestClassifier(class_weight='balanced', max_features='auto', max_depth=12))]) best_RF_tfidf_pipe.fit(train_x, train_y) RF_tfidf_pred = best_RF_tfidf_pipe.predict(dev_x) print(RF_tfidf_pred) print(classification_report(RF_tfidf_pred, dev_y)) ###Output precision recall f1-score support 0 0.73 0.50 0.59 22 1 0.00 0.00 0.00 1 2 0.29 0.29 0.29 7 accuracy 0.43 30 macro avg 0.34 0.26 0.29 30 weighted avg 0.60 0.43 0.50 30 ###Markdown Test set label prediction ###Code best_RF_cv_pipe = Pipeline([ ('cv', CountVectorizer(max_df=0.75, analyzer='word')), ('rf_clf', RandomForestClassifier(class_weight='balanced', max_depth=10, max_features='auto'))]) best_RF_cv_pipe.fit(train_x, train_y) RF_cv_pred = best_RF_cv_pipe.predict(test_x) print(RF_cv_pred) test_df['bias_predicted'] = RF_cv_pred test_df["bias_predicted"].replace({0: "Center", 2: "Right", 1: "Left"}, inplace=True) test_df test_df.to_csv('data/pred/bias_articles_test_pred_v2.csv', columns=['bias_predicted', 'id', 'title', 'body'], index=False) ###Output _____no_output_____
Section_3/Video_3_2.ipynb
###Markdown Convolutional Neural Networks ###Code from __future__ import print_function import argparse import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets, transforms import numpy as np import matplotlib.pyplot as plt %matplotlib inline class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.conv1 = nn.Conv2d(1, 20, 5, 1) self.conv2 = nn.Conv2d(20, 50, 5, 1) self.fc1 = nn.Linear(4*4*50, 500) self.fc2 = nn.Linear(500, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 2, 2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2, 2) x = x.view(-1, 4*4*50) x = F.relu(self.fc1(x)) x = self.fc2(x) return F.log_softmax(x, dim=1) def train(model, device, train_loader, optimizer, epoch): # sets the module in training mode model.train() for batch_idx, (data, target) in enumerate(train_loader): data = data.to(device) target = target.to(device) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() # output if batch_idx % 100 == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item())) def test(model, device, test_loader): # sets the model in evaluation mode model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: data = data.to(device) target = target.to(device) output = model(data) test_loss += F.nll_loss(output, target, reduction='sum').item() pred = output.max(1, keepdim=True)[1] correct += pred.eq(target.view_as(pred)).sum().item() test_loss /= len(test_loader.dataset) print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) torch.manual_seed(42) #sets the device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") device # Data loading batch_size = 64 test_batch_size = 1000 train_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=batch_size, shuffle=True ) test_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=test_batch_size, shuffle=True ) dataiter = iter(train_loader) images, labels = dataiter.next() images = images.numpy() plt.imshow(np.squeeze(images[0])) # Training the model epochs = 10 lr = 0.01 momentum = 0.5 model = CNN().to(device) optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum) for epoch in range(1, epochs + 1): train(model, device, train_loader, optimizer, epoch) test(model, device, test_loader) # Saving the model torch.save(model.state_dict(), "mnist.pt") ###Output _____no_output_____
examples/models/sklearn_iris_customdata/sklearn_iris_customdata.ipynb
###Markdown Scikit-Learn Iris Model using customData* Wrap a scikit-learn python model for use as a prediction microservice in seldon-core * Run locally on Docker to test * Deploy on seldon-core running on a Kubernetes cluster Dependencies* [s2i](https://github.com/openshift/source-to-image)* Seldon Core v1.0.3+ installed* `pip install sklearn seldon-core protobuf grpcio` Train locally ###Code import os import numpy as np from sklearn import datasets from sklearn.externals import joblib from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline def main(): clf = LogisticRegression() p = Pipeline([("clf", clf)]) print("Training model...") p.fit(X, y) print("Model trained!") filename_p = "IrisClassifier.sav" print("Saving model in %s" % filename_p) joblib.dump(p, filename_p) print("Model saved!") if __name__ == "__main__": print("Loading iris data set...") iris = datasets.load_iris() X, y = iris.data, iris.target print("Dataset loaded!") main() ###Output _____no_output_____ ###Markdown Custom Protobuf SpecificationFirst, we'll need to define our custom protobuf specification so that it can be leveraged. ###Code %%writefile iris.proto syntax = "proto3"; package iris; message IrisPredictRequest { float sepal_length = 1; float sepal_width = 2; float petal_length = 3; float petal_width = 4; } message IrisPredictResponse { float setosa = 1; float versicolor = 2; float virginica = 3; } ###Output _____no_output_____ ###Markdown Custom Protobuf CompilationWe will need to compile our custom protobuf for python so that we can unpack the `customData` field passed to our `predict` method later on. ###Code !python -m grpc.tools.protoc --python_out=./ --proto_path=. iris.proto ###Output _____no_output_____ ###Markdown gRPC testWrap model using s2i ###Code !s2i build . seldonio/seldon-core-s2i-python37-ubi8:1.7.0-dev seldonio/sklearn-iris-customdata:0.1 ###Output _____no_output_____ ###Markdown Serve the model locally ###Code !docker run --name "iris_predictor" -d --rm -p 5000:5000 seldonio/sklearn-iris-customdata:0.1 ###Output _____no_output_____ ###Markdown Test using custom protobuf payload ###Code import grpc from iris_pb2 import IrisPredictRequest, IrisPredictResponse from seldon_core.proto import prediction_pb2, prediction_pb2_grpc channel = grpc.insecure_channel("localhost:5000") stub = prediction_pb2_grpc.ModelStub(channel) iris_request = IrisPredictRequest( sepal_length=7.233, sepal_width=4.652, petal_length=7.39, petal_width=0.324 ) seldon_request = prediction_pb2.SeldonMessage() seldon_request.customData.Pack(iris_request) response = stub.Predict(seldon_request) iris_response = IrisPredictResponse() response.customData.Unpack(iris_response) print(iris_response) ###Output _____no_output_____ ###Markdown Stop serving model ###Code !docker rm iris_predictor --force ###Output _____no_output_____ ###Markdown Setup Seldon CoreUse the [setup notebook](https://github.com/SeldonIO/seldon-core/blob/master/notebooks/seldon_core_setup.ipynb) to setup Seldon Core with an ingress - either Ambassador or IstioThen port-forward to that ingress on localhost:8003 in a separate terminal either with:* Ambassador: `kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080`* Istio: `kubectl port-forward $(kubectl get pods -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].metadata.name}') -n istio-system 8003:80` ###Code !kubectl create namespace seldon !kubectl config set-context $(kubectl config current-context) --namespace=seldon ###Output _____no_output_____ ###Markdown Deploy your Seldon ModelWe first create a configuration file: ###Code %%writefile sklearn_iris_customdata_deployment.yaml apiVersion: machinelearning.seldon.io/v1 kind: SeldonDeployment metadata: name: seldon-deployment-example spec: name: sklearn-iris-deployment predictors: - componentSpecs: - spec: containers: - image: groszewn/sklearn-iris-customdata:0.1 imagePullPolicy: IfNotPresent name: sklearn-iris-classifier graph: children: [] endpoint: type: GRPC name: sklearn-iris-classifier type: MODEL name: sklearn-iris-predictor replicas: 1 ###Output _____no_output_____ ###Markdown Run the model in our clusterApply the Seldon Deployment configuration file we just created ###Code !kubectl create -f sklearn_iris_customdata_deployment.yaml ###Output _____no_output_____ ###Markdown Check that the model has been deployed ###Code !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=seldon-deployment-example -o jsonpath='{.items[0].metadata.name}') ###Output _____no_output_____ ###Markdown Test by sending prediction calls`IrisPredictRequest` sent via the `customData` field. ###Code iris_request = IrisPredictRequest( sepal_length=7.233, sepal_width=4.652, petal_length=7.39, petal_width=0.324 ) seldon_request = prediction_pb2.SeldonMessage() seldon_request.customData.Pack(iris_request) channel = grpc.insecure_channel("localhost:8003") stub = prediction_pb2_grpc.SeldonStub(channel) metadata = [("seldon", "seldon-deployment-example"), ("namespace", "seldon")] response = stub.Predict(request=seldon_request, metadata=metadata) iris_response = IrisPredictResponse() response.customData.Unpack(iris_response) print(iris_response) ###Output _____no_output_____ ###Markdown Cleanup our deployment ###Code !kubectl delete -f sklearn_iris_customdata_deployment.yaml ###Output _____no_output_____ ###Markdown Scikit-Learn Iris Model using customData* Wrap a scikit-learn python model for use as a prediction microservice in seldon-core * Run locally on Docker to test * Deploy on seldon-core running on a Kubernetes cluster Dependencies* [s2i](https://github.com/openshift/source-to-image)* Seldon Core v1.0.3+ installed* `pip install sklearn seldon-core protobuf grpcio` Train locally ###Code import numpy as np import os from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.externals import joblib from sklearn import datasets def main(): clf = LogisticRegression() p = Pipeline([('clf', clf)]) print('Training model...') p.fit(X, y) print('Model trained!') filename_p = 'IrisClassifier.sav' print('Saving model in %s' % filename_p) joblib.dump(p, filename_p) print('Model saved!') if __name__ == "__main__": print('Loading iris data set...') iris = datasets.load_iris() X, y = iris.data, iris.target print('Dataset loaded!') main() ###Output _____no_output_____ ###Markdown Custom Protobuf SpecificationFirst, we'll need to define our custom protobuf specification so that it can be leveraged. ###Code %%writefile iris.proto syntax = "proto3"; package iris; message IrisPredictRequest { float sepal_length = 1; float sepal_width = 2; float petal_length = 3; float petal_width = 4; } message IrisPredictResponse { float setosa = 1; float versicolor = 2; float virginica = 3; } ###Output _____no_output_____ ###Markdown Custom Protobuf CompilationWe will need to compile our custom protobuf for python so that we can unpack the `customData` field passed to our `predict` method later on. ###Code !python -m grpc.tools.protoc --python_out=./ --proto_path=. iris.proto ###Output _____no_output_____ ###Markdown gRPC testWrap model using s2i ###Code !s2i build . seldonio/seldon-core-s2i-python37:1.1.1-rc seldonio/sklearn-iris-customdata:0.1 ###Output _____no_output_____ ###Markdown Serve the model locally ###Code !docker run --name "iris_predictor" -d --rm -p 5000:5000 seldonio/sklearn-iris-customdata:0.1 ###Output _____no_output_____ ###Markdown Test using custom protobuf payload ###Code from iris_pb2 import IrisPredictRequest, IrisPredictResponse from seldon_core.proto import prediction_pb2, prediction_pb2_grpc import grpc channel = grpc.insecure_channel("localhost:5000") stub = prediction_pb2_grpc.ModelStub(channel) iris_request = IrisPredictRequest(sepal_length=7.233, sepal_width=4.652, petal_length=7.39, petal_width=0.324) seldon_request = prediction_pb2.SeldonMessage() seldon_request.customData.Pack(iris_request) response = stub.Predict(seldon_request) iris_response = IrisPredictResponse() response.customData.Unpack(iris_response) print(iris_response) ###Output _____no_output_____ ###Markdown Stop serving model ###Code !docker rm iris_predictor --force ###Output _____no_output_____ ###Markdown Setup Seldon CoreUse the [setup notebook](https://github.com/SeldonIO/seldon-core/blob/master/notebooks/seldon_core_setup.ipynb) to setup Seldon Core with an ingress - either Ambassador or IstioThen port-forward to that ingress on localhost:8003 in a separate terminal either with:* Ambassador: `kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080`* Istio: `kubectl port-forward $(kubectl get pods -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].metadata.name}') -n istio-system 8003:80` ###Code !kubectl create namespace seldon !kubectl config set-context $(kubectl config current-context) --namespace=seldon ###Output _____no_output_____ ###Markdown Deploy your Seldon ModelWe first create a configuration file: ###Code %%writefile sklearn_iris_customdata_deployment.yaml apiVersion: machinelearning.seldon.io/v1 kind: SeldonDeployment metadata: name: seldon-deployment-example spec: name: sklearn-iris-deployment predictors: - componentSpecs: - spec: containers: - image: groszewn/sklearn-iris-customdata:0.1 imagePullPolicy: IfNotPresent name: sklearn-iris-classifier graph: children: [] endpoint: type: GRPC name: sklearn-iris-classifier type: MODEL name: sklearn-iris-predictor replicas: 1 ###Output _____no_output_____ ###Markdown Run the model in our clusterApply the Seldon Deployment configuration file we just created ###Code !kubectl create -f sklearn_iris_customdata_deployment.yaml ###Output _____no_output_____ ###Markdown Check that the model has been deployed ###Code !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=seldon-deployment-example -o jsonpath='{.items[0].metadata.name}') ###Output _____no_output_____ ###Markdown Test by sending prediction calls`IrisPredictRequest` sent via the `customData` field. ###Code iris_request = IrisPredictRequest(sepal_length=7.233, sepal_width=4.652, petal_length=7.39, petal_width=0.324) seldon_request = prediction_pb2.SeldonMessage() seldon_request.customData.Pack(iris_request) channel = grpc.insecure_channel("localhost:8003") stub = prediction_pb2_grpc.SeldonStub(channel) metadata = [("seldon", "seldon-deployment-example"), ("namespace", "seldon")] response = stub.Predict(request=seldon_request, metadata=metadata) iris_response = IrisPredictResponse() response.customData.Unpack(iris_response) print(iris_response) ###Output _____no_output_____ ###Markdown Cleanup our deployment ###Code !kubectl delete -f sklearn_iris_customdata_deployment.yaml ###Output _____no_output_____ ###Markdown Scikit-Learn Iris Model using customData* Wrap a scikit-learn python model for use as a prediction microservice in seldon-core * Run locally on Docker to test * Deploy on seldon-core running on a Kubernetes cluster Dependencies* [s2i](https://github.com/openshift/source-to-image)* Seldon Core v1.0.3+ installed* `pip install sklearn seldon-core protobuf grpcio` Train locally ###Code import numpy as np import os from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.externals import joblib from sklearn import datasets def main(): clf = LogisticRegression() p = Pipeline([('clf', clf)]) print('Training model...') p.fit(X, y) print('Model trained!') filename_p = 'IrisClassifier.sav' print('Saving model in %s' % filename_p) joblib.dump(p, filename_p) print('Model saved!') if __name__ == "__main__": print('Loading iris data set...') iris = datasets.load_iris() X, y = iris.data, iris.target print('Dataset loaded!') main() ###Output _____no_output_____ ###Markdown Custom Protobuf SpecificationFirst, we'll need to define our custom protobuf specification so that it can be leveraged. ###Code %%writefile iris.proto syntax = "proto3"; package iris; message IrisPredictRequest { float sepal_length = 1; float sepal_width = 2; float petal_length = 3; float petal_width = 4; } message IrisPredictResponse { float setosa = 1; float versicolor = 2; float virginica = 3; } ###Output _____no_output_____ ###Markdown Custom Protobuf CompilationWe will need to compile our custom protobuf for python so that we can unpack the `customData` field passed to our `predict` method later on. ###Code !python -m grpc.tools.protoc --python_out=./ --proto_path=. iris.proto ###Output _____no_output_____ ###Markdown gRPC testWrap model using s2i ###Code !s2i build . seldonio/seldon-core-s2i-python37:1.4.0 seldonio/sklearn-iris-customdata:0.1 ###Output _____no_output_____ ###Markdown Serve the model locally ###Code !docker run --name "iris_predictor" -d --rm -p 5000:5000 seldonio/sklearn-iris-customdata:0.1 ###Output _____no_output_____ ###Markdown Test using custom protobuf payload ###Code from iris_pb2 import IrisPredictRequest, IrisPredictResponse from seldon_core.proto import prediction_pb2, prediction_pb2_grpc import grpc channel = grpc.insecure_channel("localhost:5000") stub = prediction_pb2_grpc.ModelStub(channel) iris_request = IrisPredictRequest(sepal_length=7.233, sepal_width=4.652, petal_length=7.39, petal_width=0.324) seldon_request = prediction_pb2.SeldonMessage() seldon_request.customData.Pack(iris_request) response = stub.Predict(seldon_request) iris_response = IrisPredictResponse() response.customData.Unpack(iris_response) print(iris_response) ###Output _____no_output_____ ###Markdown Stop serving model ###Code !docker rm iris_predictor --force ###Output _____no_output_____ ###Markdown Setup Seldon CoreUse the [setup notebook](https://github.com/SeldonIO/seldon-core/blob/master/notebooks/seldon_core_setup.ipynb) to setup Seldon Core with an ingress - either Ambassador or IstioThen port-forward to that ingress on localhost:8003 in a separate terminal either with:* Ambassador: `kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080`* Istio: `kubectl port-forward $(kubectl get pods -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].metadata.name}') -n istio-system 8003:80` ###Code !kubectl create namespace seldon !kubectl config set-context $(kubectl config current-context) --namespace=seldon ###Output _____no_output_____ ###Markdown Deploy your Seldon ModelWe first create a configuration file: ###Code %%writefile sklearn_iris_customdata_deployment.yaml apiVersion: machinelearning.seldon.io/v1 kind: SeldonDeployment metadata: name: seldon-deployment-example spec: name: sklearn-iris-deployment predictors: - componentSpecs: - spec: containers: - image: groszewn/sklearn-iris-customdata:0.1 imagePullPolicy: IfNotPresent name: sklearn-iris-classifier graph: children: [] endpoint: type: GRPC name: sklearn-iris-classifier type: MODEL name: sklearn-iris-predictor replicas: 1 ###Output _____no_output_____ ###Markdown Run the model in our clusterApply the Seldon Deployment configuration file we just created ###Code !kubectl create -f sklearn_iris_customdata_deployment.yaml ###Output _____no_output_____ ###Markdown Check that the model has been deployed ###Code !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=seldon-deployment-example -o jsonpath='{.items[0].metadata.name}') ###Output _____no_output_____ ###Markdown Test by sending prediction calls`IrisPredictRequest` sent via the `customData` field. ###Code iris_request = IrisPredictRequest(sepal_length=7.233, sepal_width=4.652, petal_length=7.39, petal_width=0.324) seldon_request = prediction_pb2.SeldonMessage() seldon_request.customData.Pack(iris_request) channel = grpc.insecure_channel("localhost:8003") stub = prediction_pb2_grpc.SeldonStub(channel) metadata = [("seldon", "seldon-deployment-example"), ("namespace", "seldon")] response = stub.Predict(request=seldon_request, metadata=metadata) iris_response = IrisPredictResponse() response.customData.Unpack(iris_response) print(iris_response) ###Output _____no_output_____ ###Markdown Cleanup our deployment ###Code !kubectl delete -f sklearn_iris_customdata_deployment.yaml ###Output _____no_output_____ ###Markdown Scikit-Learn Iris Model using customData* Wrap a scikit-learn python model for use as a prediction microservice in seldon-core * Run locally on Docker to test * Deploy on seldon-core running on a Kubernetes cluster Dependencies* [s2i](https://github.com/openshift/source-to-image)* Seldon Core v1.0.3+ installed* `pip install sklearn seldon-core protobuf grpcio` Train locally ###Code import numpy as np import os from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.externals import joblib from sklearn import datasets def main(): clf = LogisticRegression() p = Pipeline([('clf', clf)]) print('Training model...') p.fit(X, y) print('Model trained!') filename_p = 'IrisClassifier.sav' print('Saving model in %s' % filename_p) joblib.dump(p, filename_p) print('Model saved!') if __name__ == "__main__": print('Loading iris data set...') iris = datasets.load_iris() X, y = iris.data, iris.target print('Dataset loaded!') main() ###Output _____no_output_____ ###Markdown Custom Protobuf SpecificationFirst, we'll need to define our custom protobuf specification so that it can be leveraged. ###Code %%writefile iris.proto syntax = "proto3"; package iris; message IrisPredictRequest { float sepal_length = 1; float sepal_width = 2; float petal_length = 3; float petal_width = 4; } message IrisPredictResponse { float setosa = 1; float versicolor = 2; float virginica = 3; } ###Output _____no_output_____ ###Markdown Custom Protobuf CompilationWe will need to compile our custom protobuf for python so that we can unpack the `customData` field passed to our `predict` method later on. ###Code !python -m grpc.tools.protoc --python_out=./ --proto_path=. iris.proto ###Output _____no_output_____ ###Markdown gRPC testWrap model using s2i ###Code !s2i build . seldonio/seldon-core-s2i-python37:1.3.0-dev seldonio/sklearn-iris-customdata:0.1 ###Output _____no_output_____ ###Markdown Serve the model locally ###Code !docker run --name "iris_predictor" -d --rm -p 5000:5000 seldonio/sklearn-iris-customdata:0.1 ###Output _____no_output_____ ###Markdown Test using custom protobuf payload ###Code from iris_pb2 import IrisPredictRequest, IrisPredictResponse from seldon_core.proto import prediction_pb2, prediction_pb2_grpc import grpc channel = grpc.insecure_channel("localhost:5000") stub = prediction_pb2_grpc.ModelStub(channel) iris_request = IrisPredictRequest(sepal_length=7.233, sepal_width=4.652, petal_length=7.39, petal_width=0.324) seldon_request = prediction_pb2.SeldonMessage() seldon_request.customData.Pack(iris_request) response = stub.Predict(seldon_request) iris_response = IrisPredictResponse() response.customData.Unpack(iris_response) print(iris_response) ###Output _____no_output_____ ###Markdown Stop serving model ###Code !docker rm iris_predictor --force ###Output _____no_output_____ ###Markdown Setup Seldon CoreUse the [setup notebook](https://github.com/SeldonIO/seldon-core/blob/master/notebooks/seldon_core_setup.ipynb) to setup Seldon Core with an ingress - either Ambassador or IstioThen port-forward to that ingress on localhost:8003 in a separate terminal either with:* Ambassador: `kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080`* Istio: `kubectl port-forward $(kubectl get pods -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].metadata.name}') -n istio-system 8003:80` ###Code !kubectl create namespace seldon !kubectl config set-context $(kubectl config current-context) --namespace=seldon ###Output _____no_output_____ ###Markdown Deploy your Seldon ModelWe first create a configuration file: ###Code %%writefile sklearn_iris_customdata_deployment.yaml apiVersion: machinelearning.seldon.io/v1 kind: SeldonDeployment metadata: name: seldon-deployment-example spec: name: sklearn-iris-deployment predictors: - componentSpecs: - spec: containers: - image: groszewn/sklearn-iris-customdata:0.1 imagePullPolicy: IfNotPresent name: sklearn-iris-classifier graph: children: [] endpoint: type: GRPC name: sklearn-iris-classifier type: MODEL name: sklearn-iris-predictor replicas: 1 ###Output _____no_output_____ ###Markdown Run the model in our clusterApply the Seldon Deployment configuration file we just created ###Code !kubectl create -f sklearn_iris_customdata_deployment.yaml ###Output _____no_output_____ ###Markdown Check that the model has been deployed ###Code !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=seldon-deployment-example -o jsonpath='{.items[0].metadata.name}') ###Output _____no_output_____ ###Markdown Test by sending prediction calls`IrisPredictRequest` sent via the `customData` field. ###Code iris_request = IrisPredictRequest(sepal_length=7.233, sepal_width=4.652, petal_length=7.39, petal_width=0.324) seldon_request = prediction_pb2.SeldonMessage() seldon_request.customData.Pack(iris_request) channel = grpc.insecure_channel("localhost:8003") stub = prediction_pb2_grpc.SeldonStub(channel) metadata = [("seldon", "seldon-deployment-example"), ("namespace", "seldon")] response = stub.Predict(request=seldon_request, metadata=metadata) iris_response = IrisPredictResponse() response.customData.Unpack(iris_response) print(iris_response) ###Output _____no_output_____ ###Markdown Cleanup our deployment ###Code !kubectl delete -f sklearn_iris_customdata_deployment.yaml ###Output _____no_output_____ ###Markdown Scikit-Learn Iris Model using customData* Wrap a scikit-learn python model for use as a prediction microservice in seldon-core * Run locally on Docker to test * Deploy on seldon-core running on a Kubernetes cluster Dependencies* [s2i](https://github.com/openshift/source-to-image)* Seldon Core v1.0.3+ installed* `pip install sklearn seldon-core protobuf grpcio` Train locally ###Code import numpy as np import os from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.externals import joblib from sklearn import datasets def main(): clf = LogisticRegression() p = Pipeline([('clf', clf)]) print('Training model...') p.fit(X, y) print('Model trained!') filename_p = 'IrisClassifier.sav' print('Saving model in %s' % filename_p) joblib.dump(p, filename_p) print('Model saved!') if __name__ == "__main__": print('Loading iris data set...') iris = datasets.load_iris() X, y = iris.data, iris.target print('Dataset loaded!') main() ###Output _____no_output_____ ###Markdown Custom Protobuf SpecificationFirst, we'll need to define our custom protobuf specification so that it can be leveraged. ###Code %%writefile iris.proto syntax = "proto3"; package iris; message IrisPredictRequest { float sepal_length = 1; float sepal_width = 2; float petal_length = 3; float petal_width = 4; } message IrisPredictResponse { float setosa = 1; float versicolor = 2; float virginica = 3; } ###Output _____no_output_____ ###Markdown Custom Protobuf CompilationWe will need to compile our custom protobuf for python so that we can unpack the `customData` field passed to our `predict` method later on. ###Code !python -m grpc.tools.protoc --python_out=./ --proto_path=. iris.proto ###Output _____no_output_____ ###Markdown gRPC testWrap model using s2i ###Code !s2i build . seldonio/seldon-core-s2i-python37:1.7.0-dev seldonio/sklearn-iris-customdata:0.1 ###Output _____no_output_____ ###Markdown Serve the model locally ###Code !docker run --name "iris_predictor" -d --rm -p 5000:5000 seldonio/sklearn-iris-customdata:0.1 ###Output _____no_output_____ ###Markdown Test using custom protobuf payload ###Code from iris_pb2 import IrisPredictRequest, IrisPredictResponse from seldon_core.proto import prediction_pb2, prediction_pb2_grpc import grpc channel = grpc.insecure_channel("localhost:5000") stub = prediction_pb2_grpc.ModelStub(channel) iris_request = IrisPredictRequest(sepal_length=7.233, sepal_width=4.652, petal_length=7.39, petal_width=0.324) seldon_request = prediction_pb2.SeldonMessage() seldon_request.customData.Pack(iris_request) response = stub.Predict(seldon_request) iris_response = IrisPredictResponse() response.customData.Unpack(iris_response) print(iris_response) ###Output _____no_output_____ ###Markdown Stop serving model ###Code !docker rm iris_predictor --force ###Output _____no_output_____ ###Markdown Setup Seldon CoreUse the [setup notebook](https://github.com/SeldonIO/seldon-core/blob/master/notebooks/seldon_core_setup.ipynb) to setup Seldon Core with an ingress - either Ambassador or IstioThen port-forward to that ingress on localhost:8003 in a separate terminal either with:* Ambassador: `kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080`* Istio: `kubectl port-forward $(kubectl get pods -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].metadata.name}') -n istio-system 8003:80` ###Code !kubectl create namespace seldon !kubectl config set-context $(kubectl config current-context) --namespace=seldon ###Output _____no_output_____ ###Markdown Deploy your Seldon ModelWe first create a configuration file: ###Code %%writefile sklearn_iris_customdata_deployment.yaml apiVersion: machinelearning.seldon.io/v1 kind: SeldonDeployment metadata: name: seldon-deployment-example spec: name: sklearn-iris-deployment predictors: - componentSpecs: - spec: containers: - image: groszewn/sklearn-iris-customdata:0.1 imagePullPolicy: IfNotPresent name: sklearn-iris-classifier graph: children: [] endpoint: type: GRPC name: sklearn-iris-classifier type: MODEL name: sklearn-iris-predictor replicas: 1 ###Output _____no_output_____ ###Markdown Run the model in our clusterApply the Seldon Deployment configuration file we just created ###Code !kubectl create -f sklearn_iris_customdata_deployment.yaml ###Output _____no_output_____ ###Markdown Check that the model has been deployed ###Code !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=seldon-deployment-example -o jsonpath='{.items[0].metadata.name}') ###Output _____no_output_____ ###Markdown Test by sending prediction calls`IrisPredictRequest` sent via the `customData` field. ###Code iris_request = IrisPredictRequest(sepal_length=7.233, sepal_width=4.652, petal_length=7.39, petal_width=0.324) seldon_request = prediction_pb2.SeldonMessage() seldon_request.customData.Pack(iris_request) channel = grpc.insecure_channel("localhost:8003") stub = prediction_pb2_grpc.SeldonStub(channel) metadata = [("seldon", "seldon-deployment-example"), ("namespace", "seldon")] response = stub.Predict(request=seldon_request, metadata=metadata) iris_response = IrisPredictResponse() response.customData.Unpack(iris_response) print(iris_response) ###Output _____no_output_____ ###Markdown Cleanup our deployment ###Code !kubectl delete -f sklearn_iris_customdata_deployment.yaml ###Output _____no_output_____ ###Markdown Scikit-Learn Iris Model using customData* Wrap a scikit-learn python model for use as a prediction microservice in seldon-core * Run locally on Docker to test * Deploy on seldon-core running on a Kubernetes cluster Dependencies* [s2i](https://github.com/openshift/source-to-image)* Seldon Core v1.0.3+ installed* `pip install sklearn seldon-core protobuf grpcio` Train locally ###Code import numpy as np import os from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.externals import joblib from sklearn import datasets def main(): clf = LogisticRegression() p = Pipeline([('clf', clf)]) print('Training model...') p.fit(X, y) print('Model trained!') filename_p = 'IrisClassifier.sav' print('Saving model in %s' % filename_p) joblib.dump(p, filename_p) print('Model saved!') if __name__ == "__main__": print('Loading iris data set...') iris = datasets.load_iris() X, y = iris.data, iris.target print('Dataset loaded!') main() ###Output _____no_output_____ ###Markdown Custom Protobuf SpecificationFirst, we'll need to define our custom protobuf specification so that it can be leveraged. ###Code %%writefile iris.proto syntax = "proto3"; package iris; message IrisPredictRequest { float sepal_length = 1; float sepal_width = 2; float petal_length = 3; float petal_width = 4; } message IrisPredictResponse { float setosa = 1; float versicolor = 2; float virginica = 3; } ###Output _____no_output_____ ###Markdown Custom Protobuf CompilationWe will need to compile our custom protobuf for python so that we can unpack the `customData` field passed to our `predict` method later on. ###Code !python -m grpc.tools.protoc --python_out=./ --proto_path=. iris.proto ###Output _____no_output_____ ###Markdown gRPC testWrap model using s2i ###Code !s2i build . seldonio/seldon-core-s2i-python37:1.2.2-dev seldonio/sklearn-iris-customdata:0.1 ###Output _____no_output_____ ###Markdown Serve the model locally ###Code !docker run --name "iris_predictor" -d --rm -p 5000:5000 seldonio/sklearn-iris-customdata:0.1 ###Output _____no_output_____ ###Markdown Test using custom protobuf payload ###Code from iris_pb2 import IrisPredictRequest, IrisPredictResponse from seldon_core.proto import prediction_pb2, prediction_pb2_grpc import grpc channel = grpc.insecure_channel("localhost:5000") stub = prediction_pb2_grpc.ModelStub(channel) iris_request = IrisPredictRequest(sepal_length=7.233, sepal_width=4.652, petal_length=7.39, petal_width=0.324) seldon_request = prediction_pb2.SeldonMessage() seldon_request.customData.Pack(iris_request) response = stub.Predict(seldon_request) iris_response = IrisPredictResponse() response.customData.Unpack(iris_response) print(iris_response) ###Output _____no_output_____ ###Markdown Stop serving model ###Code !docker rm iris_predictor --force ###Output _____no_output_____ ###Markdown Setup Seldon CoreUse the [setup notebook](https://github.com/SeldonIO/seldon-core/blob/master/notebooks/seldon_core_setup.ipynb) to setup Seldon Core with an ingress - either Ambassador or IstioThen port-forward to that ingress on localhost:8003 in a separate terminal either with:* Ambassador: `kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080`* Istio: `kubectl port-forward $(kubectl get pods -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].metadata.name}') -n istio-system 8003:80` ###Code !kubectl create namespace seldon !kubectl config set-context $(kubectl config current-context) --namespace=seldon ###Output _____no_output_____ ###Markdown Deploy your Seldon ModelWe first create a configuration file: ###Code %%writefile sklearn_iris_customdata_deployment.yaml apiVersion: machinelearning.seldon.io/v1 kind: SeldonDeployment metadata: name: seldon-deployment-example spec: name: sklearn-iris-deployment predictors: - componentSpecs: - spec: containers: - image: groszewn/sklearn-iris-customdata:0.1 imagePullPolicy: IfNotPresent name: sklearn-iris-classifier graph: children: [] endpoint: type: GRPC name: sklearn-iris-classifier type: MODEL name: sklearn-iris-predictor replicas: 1 ###Output _____no_output_____ ###Markdown Run the model in our clusterApply the Seldon Deployment configuration file we just created ###Code !kubectl create -f sklearn_iris_customdata_deployment.yaml ###Output _____no_output_____ ###Markdown Check that the model has been deployed ###Code !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=seldon-deployment-example -o jsonpath='{.items[0].metadata.name}') ###Output _____no_output_____ ###Markdown Test by sending prediction calls`IrisPredictRequest` sent via the `customData` field. ###Code iris_request = IrisPredictRequest(sepal_length=7.233, sepal_width=4.652, petal_length=7.39, petal_width=0.324) seldon_request = prediction_pb2.SeldonMessage() seldon_request.customData.Pack(iris_request) channel = grpc.insecure_channel("localhost:8003") stub = prediction_pb2_grpc.SeldonStub(channel) metadata = [("seldon", "seldon-deployment-example"), ("namespace", "seldon")] response = stub.Predict(request=seldon_request, metadata=metadata) iris_response = IrisPredictResponse() response.customData.Unpack(iris_response) print(iris_response) ###Output _____no_output_____ ###Markdown Cleanup our deployment ###Code !kubectl delete -f sklearn_iris_customdata_deployment.yaml ###Output _____no_output_____ ###Markdown Scikit-Learn Iris Model using customData* Wrap a scikit-learn python model for use as a prediction microservice in seldon-core * Run locally on Docker to test * Deploy on seldon-core running on a Kubernetes cluster Dependencies* [s2i](https://github.com/openshift/source-to-image)* Seldon Core v1.0.3+ installed* `pip install sklearn seldon-core protobuf grpcio` Train locally ###Code import numpy as np import os from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.externals import joblib from sklearn import datasets def main(): clf = LogisticRegression() p = Pipeline([('clf', clf)]) print('Training model...') p.fit(X, y) print('Model trained!') filename_p = 'IrisClassifier.sav' print('Saving model in %s' % filename_p) joblib.dump(p, filename_p) print('Model saved!') if __name__ == "__main__": print('Loading iris data set...') iris = datasets.load_iris() X, y = iris.data, iris.target print('Dataset loaded!') main() ###Output _____no_output_____ ###Markdown Custom Protobuf SpecificationFirst, we'll need to define our custom protobuf specification so that it can be leveraged. ###Code %%writefile iris.proto syntax = "proto3"; package iris; message IrisPredictRequest { float sepal_length = 1; float sepal_width = 2; float petal_length = 3; float petal_width = 4; } message IrisPredictResponse { float setosa = 1; float versicolor = 2; float virginica = 3; } ###Output _____no_output_____ ###Markdown Custom Protobuf CompilationWe will need to compile our custom protobuf for python so that we can unpack the `customData` field passed to our `predict` method later on. ###Code !python -m grpc.tools.protoc --python_out=./ --proto_path=. iris.proto ###Output _____no_output_____ ###Markdown gRPC testWrap model using s2i ###Code !s2i build . seldonio/seldon-core-s2i-python37-ubi8:1.7.0-dev seldonio/sklearn-iris-customdata:0.1 ###Output _____no_output_____ ###Markdown Serve the model locally ###Code !docker run --name "iris_predictor" -d --rm -p 5000:5000 seldonio/sklearn-iris-customdata:0.1 ###Output _____no_output_____ ###Markdown Test using custom protobuf payload ###Code from iris_pb2 import IrisPredictRequest, IrisPredictResponse from seldon_core.proto import prediction_pb2, prediction_pb2_grpc import grpc channel = grpc.insecure_channel("localhost:5000") stub = prediction_pb2_grpc.ModelStub(channel) iris_request = IrisPredictRequest(sepal_length=7.233, sepal_width=4.652, petal_length=7.39, petal_width=0.324) seldon_request = prediction_pb2.SeldonMessage() seldon_request.customData.Pack(iris_request) response = stub.Predict(seldon_request) iris_response = IrisPredictResponse() response.customData.Unpack(iris_response) print(iris_response) ###Output _____no_output_____ ###Markdown Stop serving model ###Code !docker rm iris_predictor --force ###Output _____no_output_____ ###Markdown Setup Seldon CoreUse the [setup notebook](https://github.com/SeldonIO/seldon-core/blob/master/notebooks/seldon_core_setup.ipynb) to setup Seldon Core with an ingress - either Ambassador or IstioThen port-forward to that ingress on localhost:8003 in a separate terminal either with:* Ambassador: `kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080`* Istio: `kubectl port-forward $(kubectl get pods -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].metadata.name}') -n istio-system 8003:80` ###Code !kubectl create namespace seldon !kubectl config set-context $(kubectl config current-context) --namespace=seldon ###Output _____no_output_____ ###Markdown Deploy your Seldon ModelWe first create a configuration file: ###Code %%writefile sklearn_iris_customdata_deployment.yaml apiVersion: machinelearning.seldon.io/v1 kind: SeldonDeployment metadata: name: seldon-deployment-example spec: name: sklearn-iris-deployment predictors: - componentSpecs: - spec: containers: - image: groszewn/sklearn-iris-customdata:0.1 imagePullPolicy: IfNotPresent name: sklearn-iris-classifier graph: children: [] endpoint: type: GRPC name: sklearn-iris-classifier type: MODEL name: sklearn-iris-predictor replicas: 1 ###Output _____no_output_____ ###Markdown Run the model in our clusterApply the Seldon Deployment configuration file we just created ###Code !kubectl create -f sklearn_iris_customdata_deployment.yaml ###Output _____no_output_____ ###Markdown Check that the model has been deployed ###Code !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=seldon-deployment-example -o jsonpath='{.items[0].metadata.name}') ###Output _____no_output_____ ###Markdown Test by sending prediction calls`IrisPredictRequest` sent via the `customData` field. ###Code iris_request = IrisPredictRequest(sepal_length=7.233, sepal_width=4.652, petal_length=7.39, petal_width=0.324) seldon_request = prediction_pb2.SeldonMessage() seldon_request.customData.Pack(iris_request) channel = grpc.insecure_channel("localhost:8003") stub = prediction_pb2_grpc.SeldonStub(channel) metadata = [("seldon", "seldon-deployment-example"), ("namespace", "seldon")] response = stub.Predict(request=seldon_request, metadata=metadata) iris_response = IrisPredictResponse() response.customData.Unpack(iris_response) print(iris_response) ###Output _____no_output_____ ###Markdown Cleanup our deployment ###Code !kubectl delete -f sklearn_iris_customdata_deployment.yaml ###Output _____no_output_____ ###Markdown Scikit-Learn Iris Model using customData* Wrap a scikit-learn python model for use as a prediction microservice in seldon-core * Run locally on Docker to test * Deploy on seldon-core running on a Kubernetes cluster Dependencies* [s2i](https://github.com/openshift/source-to-image)* Seldon Core v1.0.3+ installed* `pip install sklearn seldon-core protobuf grpcio` Train locally ###Code import numpy as np import os from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.externals import joblib from sklearn import datasets def main(): clf = LogisticRegression() p = Pipeline([('clf', clf)]) print('Training model...') p.fit(X, y) print('Model trained!') filename_p = 'IrisClassifier.sav' print('Saving model in %s' % filename_p) joblib.dump(p, filename_p) print('Model saved!') if __name__ == "__main__": print('Loading iris data set...') iris = datasets.load_iris() X, y = iris.data, iris.target print('Dataset loaded!') main() ###Output _____no_output_____ ###Markdown Custom Protobuf SpecificationFirst, we'll need to define our custom protobuf specification so that it can be leveraged. ###Code %%writefile iris.proto syntax = "proto3"; package iris; message IrisPredictRequest { float sepal_length = 1; float sepal_width = 2; float petal_length = 3; float petal_width = 4; } message IrisPredictResponse { float setosa = 1; float versicolor = 2; float virginica = 3; } ###Output _____no_output_____ ###Markdown Custom Protobuf CompilationWe will need to compile our custom protobuf for python so that we can unpack the `customData` field passed to our `predict` method later on. ###Code !python -m grpc.tools.protoc --python_out=./ --proto_path=. iris.proto ###Output _____no_output_____ ###Markdown gRPC testWrap model using s2i ###Code !s2i build . seldonio/seldon-core-s2i-python37:1.2.3-dev seldonio/sklearn-iris-customdata:0.1 ###Output _____no_output_____ ###Markdown Serve the model locally ###Code !docker run --name "iris_predictor" -d --rm -p 5000:5000 seldonio/sklearn-iris-customdata:0.1 ###Output _____no_output_____ ###Markdown Test using custom protobuf payload ###Code from iris_pb2 import IrisPredictRequest, IrisPredictResponse from seldon_core.proto import prediction_pb2, prediction_pb2_grpc import grpc channel = grpc.insecure_channel("localhost:5000") stub = prediction_pb2_grpc.ModelStub(channel) iris_request = IrisPredictRequest(sepal_length=7.233, sepal_width=4.652, petal_length=7.39, petal_width=0.324) seldon_request = prediction_pb2.SeldonMessage() seldon_request.customData.Pack(iris_request) response = stub.Predict(seldon_request) iris_response = IrisPredictResponse() response.customData.Unpack(iris_response) print(iris_response) ###Output _____no_output_____ ###Markdown Stop serving model ###Code !docker rm iris_predictor --force ###Output _____no_output_____ ###Markdown Setup Seldon CoreUse the [setup notebook](https://github.com/SeldonIO/seldon-core/blob/master/notebooks/seldon_core_setup.ipynb) to setup Seldon Core with an ingress - either Ambassador or IstioThen port-forward to that ingress on localhost:8003 in a separate terminal either with:* Ambassador: `kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080`* Istio: `kubectl port-forward $(kubectl get pods -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].metadata.name}') -n istio-system 8003:80` ###Code !kubectl create namespace seldon !kubectl config set-context $(kubectl config current-context) --namespace=seldon ###Output _____no_output_____ ###Markdown Deploy your Seldon ModelWe first create a configuration file: ###Code %%writefile sklearn_iris_customdata_deployment.yaml apiVersion: machinelearning.seldon.io/v1 kind: SeldonDeployment metadata: name: seldon-deployment-example spec: name: sklearn-iris-deployment predictors: - componentSpecs: - spec: containers: - image: groszewn/sklearn-iris-customdata:0.1 imagePullPolicy: IfNotPresent name: sklearn-iris-classifier graph: children: [] endpoint: type: GRPC name: sklearn-iris-classifier type: MODEL name: sklearn-iris-predictor replicas: 1 ###Output _____no_output_____ ###Markdown Run the model in our clusterApply the Seldon Deployment configuration file we just created ###Code !kubectl create -f sklearn_iris_customdata_deployment.yaml ###Output _____no_output_____ ###Markdown Check that the model has been deployed ###Code !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=seldon-deployment-example -o jsonpath='{.items[0].metadata.name}') ###Output _____no_output_____ ###Markdown Test by sending prediction calls`IrisPredictRequest` sent via the `customData` field. ###Code iris_request = IrisPredictRequest(sepal_length=7.233, sepal_width=4.652, petal_length=7.39, petal_width=0.324) seldon_request = prediction_pb2.SeldonMessage() seldon_request.customData.Pack(iris_request) channel = grpc.insecure_channel("localhost:8003") stub = prediction_pb2_grpc.SeldonStub(channel) metadata = [("seldon", "seldon-deployment-example"), ("namespace", "seldon")] response = stub.Predict(request=seldon_request, metadata=metadata) iris_response = IrisPredictResponse() response.customData.Unpack(iris_response) print(iris_response) ###Output _____no_output_____ ###Markdown Cleanup our deployment ###Code !kubectl delete -f sklearn_iris_customdata_deployment.yaml ###Output _____no_output_____ ###Markdown Scikit-Learn Iris Model using customData* Wrap a scikit-learn python model for use as a prediction microservice in seldon-core * Run locally on Docker to test * Deploy on seldon-core running on a Kubernetes cluster Dependencies* [s2i](https://github.com/openshift/source-to-image)* Seldon Core v1.0.3+ installed* `pip install sklearn seldon-core protobuf grpcio` Train locally ###Code import numpy as np import os from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.externals import joblib from sklearn import datasets def main(): clf = LogisticRegression() p = Pipeline([('clf', clf)]) print('Training model...') p.fit(X, y) print('Model trained!') filename_p = 'IrisClassifier.sav' print('Saving model in %s' % filename_p) joblib.dump(p, filename_p) print('Model saved!') if __name__ == "__main__": print('Loading iris data set...') iris = datasets.load_iris() X, y = iris.data, iris.target print('Dataset loaded!') main() ###Output _____no_output_____ ###Markdown Custom Protobuf SpecificationFirst, we'll need to define our custom protobuf specification so that it can be leveraged. ###Code %%writefile iris.proto syntax = "proto3"; package iris; message IrisPredictRequest { float sepal_length = 1; float sepal_width = 2; float petal_length = 3; float petal_width = 4; } message IrisPredictResponse { float setosa = 1; float versicolor = 2; float virginica = 3; } ###Output _____no_output_____ ###Markdown Custom Protobuf CompilationWe will need to compile our custom protobuf for python so that we can unpack the `customData` field passed to our `predict` method later on. ###Code !python -m grpc.tools.protoc --python_out=./ --proto_path=. iris.proto ###Output _____no_output_____ ###Markdown gRPC testWrap model using s2i ###Code !s2i build . seldonio/seldon-core-s2i-python37:1.5.0-dev seldonio/sklearn-iris-customdata:0.1 ###Output _____no_output_____ ###Markdown Serve the model locally ###Code !docker run --name "iris_predictor" -d --rm -p 5000:5000 seldonio/sklearn-iris-customdata:0.1 ###Output _____no_output_____ ###Markdown Test using custom protobuf payload ###Code from iris_pb2 import IrisPredictRequest, IrisPredictResponse from seldon_core.proto import prediction_pb2, prediction_pb2_grpc import grpc channel = grpc.insecure_channel("localhost:5000") stub = prediction_pb2_grpc.ModelStub(channel) iris_request = IrisPredictRequest(sepal_length=7.233, sepal_width=4.652, petal_length=7.39, petal_width=0.324) seldon_request = prediction_pb2.SeldonMessage() seldon_request.customData.Pack(iris_request) response = stub.Predict(seldon_request) iris_response = IrisPredictResponse() response.customData.Unpack(iris_response) print(iris_response) ###Output _____no_output_____ ###Markdown Stop serving model ###Code !docker rm iris_predictor --force ###Output _____no_output_____ ###Markdown Setup Seldon CoreUse the [setup notebook](https://github.com/SeldonIO/seldon-core/blob/master/notebooks/seldon_core_setup.ipynb) to setup Seldon Core with an ingress - either Ambassador or IstioThen port-forward to that ingress on localhost:8003 in a separate terminal either with:* Ambassador: `kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080`* Istio: `kubectl port-forward $(kubectl get pods -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].metadata.name}') -n istio-system 8003:80` ###Code !kubectl create namespace seldon !kubectl config set-context $(kubectl config current-context) --namespace=seldon ###Output _____no_output_____ ###Markdown Deploy your Seldon ModelWe first create a configuration file: ###Code %%writefile sklearn_iris_customdata_deployment.yaml apiVersion: machinelearning.seldon.io/v1 kind: SeldonDeployment metadata: name: seldon-deployment-example spec: name: sklearn-iris-deployment predictors: - componentSpecs: - spec: containers: - image: groszewn/sklearn-iris-customdata:0.1 imagePullPolicy: IfNotPresent name: sklearn-iris-classifier graph: children: [] endpoint: type: GRPC name: sklearn-iris-classifier type: MODEL name: sklearn-iris-predictor replicas: 1 ###Output _____no_output_____ ###Markdown Run the model in our clusterApply the Seldon Deployment configuration file we just created ###Code !kubectl create -f sklearn_iris_customdata_deployment.yaml ###Output _____no_output_____ ###Markdown Check that the model has been deployed ###Code !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=seldon-deployment-example -o jsonpath='{.items[0].metadata.name}') ###Output _____no_output_____ ###Markdown Test by sending prediction calls`IrisPredictRequest` sent via the `customData` field. ###Code iris_request = IrisPredictRequest(sepal_length=7.233, sepal_width=4.652, petal_length=7.39, petal_width=0.324) seldon_request = prediction_pb2.SeldonMessage() seldon_request.customData.Pack(iris_request) channel = grpc.insecure_channel("localhost:8003") stub = prediction_pb2_grpc.SeldonStub(channel) metadata = [("seldon", "seldon-deployment-example"), ("namespace", "seldon")] response = stub.Predict(request=seldon_request, metadata=metadata) iris_response = IrisPredictResponse() response.customData.Unpack(iris_response) print(iris_response) ###Output _____no_output_____ ###Markdown Cleanup our deployment ###Code !kubectl delete -f sklearn_iris_customdata_deployment.yaml ###Output _____no_output_____ ###Markdown Scikit-Learn Iris Model using customData* Wrap a scikit-learn python model for use as a prediction microservice in seldon-core * Run locally on Docker to test * Deploy on seldon-core running on a Kubernetes cluster Dependencies* [s2i](https://github.com/openshift/source-to-image)* Seldon Core v1.0.3+ installed* `pip install sklearn seldon-core protobuf grpcio` Train locally ###Code import numpy as np import os from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.externals import joblib from sklearn import datasets def main(): clf = LogisticRegression() p = Pipeline([('clf', clf)]) print('Training model...') p.fit(X, y) print('Model trained!') filename_p = 'IrisClassifier.sav' print('Saving model in %s' % filename_p) joblib.dump(p, filename_p) print('Model saved!') if __name__ == "__main__": print('Loading iris data set...') iris = datasets.load_iris() X, y = iris.data, iris.target print('Dataset loaded!') main() ###Output _____no_output_____ ###Markdown Custom Protobuf SpecificationFirst, we'll need to define our custom protobuf specification so that it can be leveraged. ###Code %%writefile iris.proto syntax = "proto3"; package iris; message IrisPredictRequest { float sepal_length = 1; float sepal_width = 2; float petal_length = 3; float petal_width = 4; } message IrisPredictResponse { float setosa = 1; float versicolor = 2; float virginica = 3; } ###Output _____no_output_____ ###Markdown Custom Protobuf CompilationWe will need to compile our custom protobuf for python so that we can unpack the `customData` field passed to our `predict` method later on. ###Code !python -m grpc.tools.protoc --python_out=./ --proto_path=. iris.proto ###Output _____no_output_____ ###Markdown gRPC testWrap model using s2i ###Code !s2i build . seldonio/seldon-core-s2i-python37:1.2.1-dev seldonio/sklearn-iris-customdata:0.1 ###Output _____no_output_____ ###Markdown Serve the model locally ###Code !docker run --name "iris_predictor" -d --rm -p 5000:5000 seldonio/sklearn-iris-customdata:0.1 ###Output _____no_output_____ ###Markdown Test using custom protobuf payload ###Code from iris_pb2 import IrisPredictRequest, IrisPredictResponse from seldon_core.proto import prediction_pb2, prediction_pb2_grpc import grpc channel = grpc.insecure_channel("localhost:5000") stub = prediction_pb2_grpc.ModelStub(channel) iris_request = IrisPredictRequest(sepal_length=7.233, sepal_width=4.652, petal_length=7.39, petal_width=0.324) seldon_request = prediction_pb2.SeldonMessage() seldon_request.customData.Pack(iris_request) response = stub.Predict(seldon_request) iris_response = IrisPredictResponse() response.customData.Unpack(iris_response) print(iris_response) ###Output _____no_output_____ ###Markdown Stop serving model ###Code !docker rm iris_predictor --force ###Output _____no_output_____ ###Markdown Setup Seldon CoreUse the [setup notebook](https://github.com/SeldonIO/seldon-core/blob/master/notebooks/seldon_core_setup.ipynb) to setup Seldon Core with an ingress - either Ambassador or IstioThen port-forward to that ingress on localhost:8003 in a separate terminal either with:* Ambassador: `kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080`* Istio: `kubectl port-forward $(kubectl get pods -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].metadata.name}') -n istio-system 8003:80` ###Code !kubectl create namespace seldon !kubectl config set-context $(kubectl config current-context) --namespace=seldon ###Output _____no_output_____ ###Markdown Deploy your Seldon ModelWe first create a configuration file: ###Code %%writefile sklearn_iris_customdata_deployment.yaml apiVersion: machinelearning.seldon.io/v1 kind: SeldonDeployment metadata: name: seldon-deployment-example spec: name: sklearn-iris-deployment predictors: - componentSpecs: - spec: containers: - image: groszewn/sklearn-iris-customdata:0.1 imagePullPolicy: IfNotPresent name: sklearn-iris-classifier graph: children: [] endpoint: type: GRPC name: sklearn-iris-classifier type: MODEL name: sklearn-iris-predictor replicas: 1 ###Output _____no_output_____ ###Markdown Run the model in our clusterApply the Seldon Deployment configuration file we just created ###Code !kubectl create -f sklearn_iris_customdata_deployment.yaml ###Output _____no_output_____ ###Markdown Check that the model has been deployed ###Code !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=seldon-deployment-example -o jsonpath='{.items[0].metadata.name}') ###Output _____no_output_____ ###Markdown Test by sending prediction calls`IrisPredictRequest` sent via the `customData` field. ###Code iris_request = IrisPredictRequest(sepal_length=7.233, sepal_width=4.652, petal_length=7.39, petal_width=0.324) seldon_request = prediction_pb2.SeldonMessage() seldon_request.customData.Pack(iris_request) channel = grpc.insecure_channel("localhost:8003") stub = prediction_pb2_grpc.SeldonStub(channel) metadata = [("seldon", "seldon-deployment-example"), ("namespace", "seldon")] response = stub.Predict(request=seldon_request, metadata=metadata) iris_response = IrisPredictResponse() response.customData.Unpack(iris_response) print(iris_response) ###Output _____no_output_____ ###Markdown Cleanup our deployment ###Code !kubectl delete -f sklearn_iris_customdata_deployment.yaml ###Output _____no_output_____ ###Markdown Scikit-Learn Iris Model using customData* Wrap a scikit-learn python model for use as a prediction microservice in seldon-core * Run locally on Docker to test * Deploy on seldon-core running on a Kubernetes cluster Dependencies* [s2i](https://github.com/openshift/source-to-image)* Seldon Core v1.0.3+ installed* `pip install sklearn seldon-core protobuf grpcio` Train locally ###Code import numpy as np import os from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.externals import joblib from sklearn import datasets def main(): clf = LogisticRegression() p = Pipeline([('clf', clf)]) print('Training model...') p.fit(X, y) print('Model trained!') filename_p = 'IrisClassifier.sav' print('Saving model in %s' % filename_p) joblib.dump(p, filename_p) print('Model saved!') if __name__ == "__main__": print('Loading iris data set...') iris = datasets.load_iris() X, y = iris.data, iris.target print('Dataset loaded!') main() ###Output _____no_output_____ ###Markdown Custom Protobuf SpecificationFirst, we'll need to define our custom protobuf specification so that it can be leveraged. ###Code %%writefile iris.proto syntax = "proto3"; package iris; message IrisPredictRequest { float sepal_length = 1; float sepal_width = 2; float petal_length = 3; float petal_width = 4; } message IrisPredictResponse { float setosa = 1; float versicolor = 2; float virginica = 3; } ###Output _____no_output_____ ###Markdown Custom Protobuf CompilationWe will need to compile our custom protobuf for python so that we can unpack the `customData` field passed to our `predict` method later on. ###Code !python -m grpc.tools.protoc --python_out=./ --proto_path=. iris.proto ###Output _____no_output_____ ###Markdown gRPC testWrap model using s2i ###Code !s2i build . seldonio/seldon-core-s2i-python37:1.6.0-dev seldonio/sklearn-iris-customdata:0.1 ###Output _____no_output_____ ###Markdown Serve the model locally ###Code !docker run --name "iris_predictor" -d --rm -p 5000:5000 seldonio/sklearn-iris-customdata:0.1 ###Output _____no_output_____ ###Markdown Test using custom protobuf payload ###Code from iris_pb2 import IrisPredictRequest, IrisPredictResponse from seldon_core.proto import prediction_pb2, prediction_pb2_grpc import grpc channel = grpc.insecure_channel("localhost:5000") stub = prediction_pb2_grpc.ModelStub(channel) iris_request = IrisPredictRequest(sepal_length=7.233, sepal_width=4.652, petal_length=7.39, petal_width=0.324) seldon_request = prediction_pb2.SeldonMessage() seldon_request.customData.Pack(iris_request) response = stub.Predict(seldon_request) iris_response = IrisPredictResponse() response.customData.Unpack(iris_response) print(iris_response) ###Output _____no_output_____ ###Markdown Stop serving model ###Code !docker rm iris_predictor --force ###Output _____no_output_____ ###Markdown Setup Seldon CoreUse the [setup notebook](https://github.com/SeldonIO/seldon-core/blob/master/notebooks/seldon_core_setup.ipynb) to setup Seldon Core with an ingress - either Ambassador or IstioThen port-forward to that ingress on localhost:8003 in a separate terminal either with:* Ambassador: `kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080`* Istio: `kubectl port-forward $(kubectl get pods -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].metadata.name}') -n istio-system 8003:80` ###Code !kubectl create namespace seldon !kubectl config set-context $(kubectl config current-context) --namespace=seldon ###Output _____no_output_____ ###Markdown Deploy your Seldon ModelWe first create a configuration file: ###Code %%writefile sklearn_iris_customdata_deployment.yaml apiVersion: machinelearning.seldon.io/v1 kind: SeldonDeployment metadata: name: seldon-deployment-example spec: name: sklearn-iris-deployment predictors: - componentSpecs: - spec: containers: - image: groszewn/sklearn-iris-customdata:0.1 imagePullPolicy: IfNotPresent name: sklearn-iris-classifier graph: children: [] endpoint: type: GRPC name: sklearn-iris-classifier type: MODEL name: sklearn-iris-predictor replicas: 1 ###Output _____no_output_____ ###Markdown Run the model in our clusterApply the Seldon Deployment configuration file we just created ###Code !kubectl create -f sklearn_iris_customdata_deployment.yaml ###Output _____no_output_____ ###Markdown Check that the model has been deployed ###Code !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=seldon-deployment-example -o jsonpath='{.items[0].metadata.name}') ###Output _____no_output_____ ###Markdown Test by sending prediction calls`IrisPredictRequest` sent via the `customData` field. ###Code iris_request = IrisPredictRequest(sepal_length=7.233, sepal_width=4.652, petal_length=7.39, petal_width=0.324) seldon_request = prediction_pb2.SeldonMessage() seldon_request.customData.Pack(iris_request) channel = grpc.insecure_channel("localhost:8003") stub = prediction_pb2_grpc.SeldonStub(channel) metadata = [("seldon", "seldon-deployment-example"), ("namespace", "seldon")] response = stub.Predict(request=seldon_request, metadata=metadata) iris_response = IrisPredictResponse() response.customData.Unpack(iris_response) print(iris_response) ###Output _____no_output_____ ###Markdown Cleanup our deployment ###Code !kubectl delete -f sklearn_iris_customdata_deployment.yaml ###Output _____no_output_____ ###Markdown Scikit-Learn Iris Model using customData* Wrap a scikit-learn python model for use as a prediction microservice in seldon-core * Run locally on Docker to test * Deploy on seldon-core running on a Kubernetes cluster Dependencies* [s2i](https://github.com/openshift/source-to-image)* Seldon Core v1.0.3+ installed* `pip install sklearn seldon-core protobuf grpcio` Train locally ###Code import numpy as np import os from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.externals import joblib from sklearn import datasets def main(): clf = LogisticRegression() p = Pipeline([('clf', clf)]) print('Training model...') p.fit(X, y) print('Model trained!') filename_p = 'IrisClassifier.sav' print('Saving model in %s' % filename_p) joblib.dump(p, filename_p) print('Model saved!') if __name__ == "__main__": print('Loading iris data set...') iris = datasets.load_iris() X, y = iris.data, iris.target print('Dataset loaded!') main() ###Output _____no_output_____ ###Markdown Custom Protobuf SpecificationFirst, we'll need to define our custom protobuf specification so that it can be leveraged. ###Code %%writefile iris.proto syntax = "proto3"; package iris; message IrisPredictRequest { float sepal_length = 1; float sepal_width = 2; float petal_length = 3; float petal_width = 4; } message IrisPredictResponse { float setosa = 1; float versicolor = 2; float virginica = 3; } ###Output _____no_output_____ ###Markdown Custom Protobuf CompilationWe will need to compile our custom protobuf for python so that we can unpack the `customData` field passed to our `predict` method later on. ###Code !python -m grpc.tools.protoc --python_out=./ --proto_path=. iris.proto ###Output _____no_output_____ ###Markdown gRPC testWrap model using s2i ###Code !s2i build . seldonio/seldon-core-s2i-python37:0.19-SNAPSHOT seldonio/sklearn-iris-customdata:0.1 ###Output _____no_output_____ ###Markdown Serve the model locally ###Code !docker run --name "iris_predictor" -d --rm -p 5000:5000 seldonio/sklearn-iris-customdata:0.1 ###Output _____no_output_____ ###Markdown Test using custom protobuf payload ###Code from iris_pb2 import IrisPredictRequest, IrisPredictResponse from seldon_core.proto import prediction_pb2, prediction_pb2_grpc import grpc channel = grpc.insecure_channel("localhost:5000") stub = prediction_pb2_grpc.ModelStub(channel) iris_request = IrisPredictRequest(sepal_length=7.233, sepal_width=4.652, petal_length=7.39, petal_width=0.324) seldon_request = prediction_pb2.SeldonMessage() seldon_request.customData.Pack(iris_request) response = stub.Predict(seldon_request) iris_response = IrisPredictResponse() response.customData.Unpack(iris_response) print(iris_response) ###Output _____no_output_____ ###Markdown Stop serving model ###Code !docker rm iris_predictor --force ###Output _____no_output_____ ###Markdown Setup Seldon CoreUse the [setup notebook](https://github.com/SeldonIO/seldon-core/blob/master/notebooks/seldon_core_setup.ipynb) to setup Seldon Core with an ingress - either Ambassador or IstioThen port-forward to that ingress on localhost:8003 in a separate terminal either with:* Ambassador: `kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080`* Istio: `kubectl port-forward $(kubectl get pods -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].metadata.name}') -n istio-system 8003:80` ###Code !kubectl create namespace seldon !kubectl config set-context $(kubectl config current-context) --namespace=seldon ###Output _____no_output_____ ###Markdown Deploy your Seldon ModelWe first create a configuration file: ###Code %%writefile sklearn_iris_customdata_deployment.yaml apiVersion: machinelearning.seldon.io/v1 kind: SeldonDeployment metadata: name: seldon-deployment-example spec: name: sklearn-iris-deployment predictors: - componentSpecs: - spec: containers: - image: groszewn/sklearn-iris-customdata:0.1 imagePullPolicy: IfNotPresent name: sklearn-iris-classifier graph: children: [] endpoint: type: GRPC name: sklearn-iris-classifier type: MODEL name: sklearn-iris-predictor replicas: 1 ###Output _____no_output_____ ###Markdown Run the model in our clusterApply the Seldon Deployment configuration file we just created ###Code !kubectl create -f sklearn_iris_customdata_deployment.yaml ###Output _____no_output_____ ###Markdown Check that the model has been deployed ###Code !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=seldon-deployment-example -o jsonpath='{.items[0].metadata.name}') ###Output _____no_output_____ ###Markdown Test by sending prediction calls`IrisPredictRequest` sent via the `customData` field. ###Code iris_request = IrisPredictRequest(sepal_length=7.233, sepal_width=4.652, petal_length=7.39, petal_width=0.324) seldon_request = prediction_pb2.SeldonMessage() seldon_request.customData.Pack(iris_request) channel = grpc.insecure_channel("localhost:8003") stub = prediction_pb2_grpc.SeldonStub(channel) metadata = [("seldon", "seldon-deployment-example"), ("namespace", "seldon")] response = stub.Predict(request=seldon_request, metadata=metadata) iris_response = IrisPredictResponse() response.customData.Unpack(iris_response) print(iris_response) ###Output _____no_output_____ ###Markdown Cleanup our deployment ###Code !kubectl delete -f sklearn_iris_customdata_deployment.yaml ###Output _____no_output_____
Data Science With Python/Data Manipulation with Pandas/Missing Values.ipynb
###Markdown 2. Handling missing values with function ###Code #drop NaN(Not a number) values from dataset drop_s= sum_of_series.dropna() drop_s #drop NaN(Not a number) values from zeros(0) fillna_s = sum_of_series.fillna(0) fillna_s #fill values with zeroes before performing addition operations for missing values fill_NaN_with_zeros_before_sum = first_series.add(secod_series,fill_value=0) fill_NaN_with_zeros_before_sum ###Output _____no_output_____
Numpy_tutorial/numpyPlot.ipynb
###Markdown numpy study ###Code import numpy as np import matplotlib.pyplot as plt x = np.arange(-10, 10, 0.1) plt.plot(x, np.e**(3 * x) - 6 * (np.e**(2 * x)) + 32, "r-", label="e^3x-6e^2x+32") plt.axis([-5,5,-20,50]); plt.grid() plt.xlabel("x axis") plt.ylabel("y axis") plt.legend() plt.show() N = 100 x = np.random.rand(N) y = np.random.rand(N) area = np.pi * (15 * np.random.rand(N))**2 # 0 to 15 point radiuses color = 2 * np.pi * np.random.rand(N) plt.xlabel("x axis") plt.ylabel("y axis") plt.scatter(x, y, s=area, c=color, alpha=0.3, cmap=plt.cm.hsv) plt.show() x = np.random.rand(30,1) plt.xlabel("x axis") plt.ylabel("y axis") plt.plot(x,'ro',label='random points') plt.legend() plt.show() n = np.random.rand(5) x = np.linspace(-0.75, 1., 100) fig, axes = plt.subplots(1, 4, figsize=(16,4)) axes[0].scatter(x, x + 0.25*np.random.randn(len(x))) axes[1].step(n, n**2, lw=2) axes[2].bar(n, n**2, align="center", width=0.1, alpha=0.5) axes[3].fill_between(x, 0, x**3 - x**2 + 0.2, color="green", alpha=0.5); plt.show() ###Output _____no_output_____
docs/examples/nbs/interpolPlot.ipynb
###Markdown Interpolation GeoPlots> These plots are created to plot the results of interpolation over a space for a perticular timestamp. You need to provide shape files and a data frame containing the pollutant levels. The function internally uses `polire`, a library built for interpolation to interpolate the values for the whole region and then forms a contour map of the results. Standard libraries to be imported for usage ###Code #! /usr/bin/env python from geopandas import GeoDataFrame import pandas as pd import matplotlib.pyplot as plt from vayu import interpolPlot from sklearn.ensemble import RandomForestRegressor from polire.custom import CustomInterpolator """data files shape files: https://www2.census.gov/geo/tiger/TIGER2017/STATE/ data files: https://aqs.epa.gov/aqsweb/airdata/daily_44201_2019.zip https://openaq.org/#/countries/IN?_k=j7c7ka """ shapeFile = "../data/shapeFiles/tl_2017_us_state.shp" datafile = "../data/daily_44201_2018.csv" long = "Longitude" lat = "Latitude" pollutant = "Arithmetic Mean" df = pd.read_csv(datafile) shape_df = GeoDataFrame.from_file(shapeFile) shape_df.drop(shape_df.index[[34, 35, 36, 40, 41, 49, 31]], inplace=True) interpolPlot(df, shape_df, long, lat, pollutant, CustomInterpolator(RandomForestRegressor)) plt.show() ###Output /home/apoorv/Desktop/github/.env/lib/python3.6/site-packages/sklearn/ensemble/forest.py:245: FutureWarning: The default value of n_estimators will change from 10 in version 0.20 to 100 in 0.22. "10 in version 0.20 to 100 in 0.22.", FutureWarning) No handles with labels found to put in legend. ###Markdown Focusing on Califonria. ###Code shape_df = shape_df[13:14] ###Output _____no_output_____ ###Markdown We don't support `LinearSegmentedColormap` pass a `ListedColormap`. We will be demoing below how to convert from former to latter. ###Code ## This will not work # interpolPlot(df, shape_df, long, lat, pollutant, cmap='Greens') # plt.show() # Converting to ListedColormap import numpy as np from matplotlib import cm from matplotlib.colors import ListedColormap, LinearSegmentedColormap N = 258 cmap_colors = cm.get_cmap('Greens', N) newcolors = (cmap_colors(np.linspace(0, 1, N))[:, :3]).tolist() listedcmap = ListedColormap(newcolors, name='GreensListed', N=N) interpolPlot(df, shape_df, long, lat, pollutant, cmap=listedcmap, Interpolator = CustomInterpolator(RandomForestRegressor)) plt.show() ###Output /home/apoorv/Desktop/github/.env/lib/python3.6/site-packages/sklearn/ensemble/forest.py:245: FutureWarning: The default value of n_estimators will change from 10 in version 0.20 to 100 in 0.22. "10 in version 0.20 to 100 in 0.22.", FutureWarning) No handles with labels found to put in legend. ###Markdown DelhiLet's have a look at pollution data in delhi. ###Code !ls "../data/shapeFiles/delhi" shapeFile = "../data/shapeFiles/delhi/wards delimited.shp" shape_df = GeoDataFrame.from_file(shapeFile) datafile = "../data/delhi_1_5_Aug.csv" df = pd.read_csv(datafile) df.head() locations = pd.unique(df.location) dff = [] for location in locations: mask = df['location'] == location mean = df[mask].value.mean() latitude = df[mask].latitude.mean() longitude = df[mask].longitude.mean() dff.append([latitude, longitude, mean]) df = pd.DataFrame(dff, columns=[lat, long, pollutant]) df.head() interpolPlot(df, shape_df, long, lat, pollutant, cmap=listedcmap, plot_train_points=True, markersize=5, Tcolor='orange', Interpolator = CustomInterpolator(RandomForestRegressor)) plt.show() ###Output /home/apoorv/Desktop/github/.env/lib/python3.6/site-packages/sklearn/ensemble/forest.py:245: FutureWarning: The default value of n_estimators will change from 10 in version 0.20 to 100 in 0.22. "10 in version 0.20 to 100 in 0.22.", FutureWarning) /home/apoorv/Desktop/github/.env/lib/python3.6/site-packages/geopandas/base.py:35: UserWarning: GeoSeries crs mismatch: {'init': 'epsg:4326'} and {'init': 'epsg:4269'} right.crs)) /home/apoorv/Desktop/github/.env/lib/python3.6/site-packages/geopandas/tools/sjoin.py:56: UserWarning: CRS of frames being joined does not match!({'init': 'epsg:4269'} != {'init': 'epsg:4326'}) '(%s != %s)' % (left_df.crs, right_df.crs))
3. Daily vs. Weekly Rebalancing.ipynb
###Markdown Daily vs. Weekly Rebalancing By: Carl FartersonMay 8, 2020 IntroductionWhen it comes to rebalancing a portfolio, a crucial question to ask is, "how often should my portfolio rebalance?" One common theory is, "if trading fees and costs don't exist, the more rebalancing the better!"LOL.Unnecessary rebalancing can fuck up your portfolio.Unnecessary rebalancing can make a trading algorithm less effective.Unnecessary rebalancing is a common sin.I will convince you weekly rebalancing *may* be more effective than daily rebalancing.This is purely an example and should be treated as such. It's 2020 and anyone can build and deploy a trading algorithm from their home computer. Crazy times, huh? Take this into thought when designing your own trading algorithms. A trading algorithmthat's "perfect in theory" can also be "horrible in practice". Trading Algorithm usedOur trading algo uses three (hourly) moving averages.- 50- 100- 200- If 50 > 100 > 200, we have a "bullish" signal- If 50 < 100 < 200, we have a "bearish" signal- All other times, remain neutral Portfolio allocations- `bullish`: 90% Ethereum, 10% USD- `neutral`: 50% Ethereum, 50% USD- `bearish`: 10% Ethereum, 90% USD Rebalance Intervals- `D`: daily- `W`: weekly Question: which trading algorithm will have better performance? Daily or weekly? ###Code # imports import pandas as pd import numpy as np import matplotlib.pyplot as plt from datetime import datetime from FART.py.split_df import split_df from FART.py.simulate import simulate from FART.py.portfolio import Portfolio # Variables assets = ['ETH', 'USD'] # We'll use 180 day windows (6 months) with 120 day overlap (4 months) # Since our dataframe is in hours, multiply by 24 window_len = 24 * 180 overlap = 24 * 120 allocation = { 'bull': [0.90, 0.10], 'neutral': [0.50, 0.50], 'bear': [0.10, 0.90] } wiggle_room = 0.10 volatility_daily = [] volatility_weekly = [] drawdown_daily = [] drawdown_weekly = [] end_pct_daily = [] end_pct_weekly = [] loss_probability_daily = [] loss_probability_weekly = [] df_signals = pd.read_csv('/home/carter/Documents/token-sets/FART/backtests/signals.csv').to_dict(orient='records') # Helper functions def calc_volatility(array): # Determine 30-day volatility # Since we're using hourly data, 24*30 = 720 hours or 30 days volatility_lst = [] for i in range(0, len(array), 720): volatility = (max(array[i:i+720]) - min(array[i:i+720])) / min(array[i:i+720]) volatility_lst.append(volatility) return np.mean(volatility_lst)*100 def calc_drawdown(array): # At each price, compare the highest past price with the lowest future price # to determine the max potential drawdown drawdown = 0 for i in range(1, len(array)): prev_max = max(array[:i]) future_min = min(array[i:]) max_potential_drawdown = (prev_max - future_min) / prev_max if max_potential_drawdown > drawdown: drawdown = max_potential_drawdown return drawdown*100 def calc_end_pct(array): # Return % change between start and finish end_pct = (array[-1] - array[0]) / array[0] return end_pct*100 def calc_loss_probability(array): # Count how many times our array is worth less than the starting value loss_count = sum(np.less(array, array[0])) loss_probability = loss_count / len(array) return loss_probability*100 # Display charts fig = plt.figure(figsize=(20, 20)) for i, df in enumerate(split_df(df_signals, overlap, window_len)): daily, _, _ = simulate(assets, allocation, wiggle_room, df, 'daily') weekly, _, _ = simulate(assets, allocation, wiggle_room, df, 'weekly') # Append calculations volatility_daily.append(calc_volatility(daily)) volatility_weekly.append(calc_volatility(weekly)) drawdown_daily.append(calc_drawdown(daily)) drawdown_weekly.append(calc_drawdown(weekly)) end_pct_daily.append(calc_end_pct(daily)) end_pct_weekly.append(calc_end_pct(weekly)) loss_probability_daily.append(calc_loss_probability(daily)) loss_probability_weekly.append(calc_loss_probability(weekly)) df_plot = pd.DataFrame({'Daily Rebalanced Net Value ($)': daily, 'Weekly Rebalanced Net Value ($)': weekly}) ax = fig.add_subplot(5, 3, i+1) ax.plot(df_plot) ax.legend(df_plot.columns, loc='best') start, end = df[0]['date'][:df[0]['date'].find(' ')], df[-1]['date'][:df[-1]['date'].find(' ')] ax.set_title(f"{start} - {end}") ax.set_xticks([]) plt.show() # Summary df_volatility = pd.DataFrame({'Daily': volatility_daily, 'Weekly': volatility_weekly}) print(f'Average Volatility % over 30 days: \n{round(df_volatility.mean(), 2)}\n') df_drawdown = pd.DataFrame({'Daily': drawdown_daily, 'Weekly': drawdown_weekly}) print(f'Absolute Max Drawdown % over 6 months: \n{round(df_drawdown.max(), 2)}\n') print(f'Average Max Drawdown % over 6 months: \n{round(df_drawdown.mean(), 2)}\n') df_end_pct = pd.DataFrame({'Daily': end_pct_daily, 'Weekly': end_pct_weekly}) print(f'Average End Profit % over 6 months: \n{round(df_end_pct.mean(), 2)}\n') ###Output Average Volatility % over 30 days: Daily 27.70 Weekly 24.82 dtype: float64 Absolute Max Drawdown % over 6 months: Daily 58.21 Weekly 48.77 dtype: float64 Average Max Drawdown % over 6 months: Daily 38.90 Weekly 34.47 dtype: float64 Average End Profit % over 6 months: Daily 10.44 Weekly 16.13 dtype: float64
SprintChallenge/DS_Unit_2_Sprint_Challenge_1.ipynb
###Markdown _Lambda School Data Science, Unit 2_ Regression & Classification Sprint ChallengeTo demonstrate mastery on your Sprint Challenge, do all the required, numbered instructions in this notebook.To earn a score of "3", also do all the stretch goals.You are permitted and encouraged to do as much data exploration as you want. Part 1, Classification- 1.1. Begin with baselines for classification- 1.2. Do train/test split. Arrange data into X features matrix and y target vector- 1.3. Use scikit-learn to fit a logistic regression model- 1.4. Report classification metric: accuracy Part 2, Regression- 2.1. Begin with baselines for regression- 2.2. Do train/validate/test split. - 2.3. Arrange data into X features matrix and y target vector- 2.4. Do one-hot encoding- 2.5. Use scikit-learn to fit a linear regression (or ridge regression) model- 2.6. Report validation MAE and $R^2$ Stretch Goals, Regression- Make visualizations to explore relationships between features and target- Try at least 3 feature combinations. You may select features manually, or automatically- Report validation MAE and $R^2$ for each feature combination you try- Report test MAE and $R^2$ for your final model- Print or plot the coefficients for the features in your model ###Code # If you're in Colab... import sys in_colab = 'google.colab' in sys.modules if in_colab: !pip install category_encoders==2.0.0 !pip install pandas-profiling==2.3.0 !pip install plotly==4.1.1 ###Output Collecting category_encoders==2.0.0 [?25l Downloading https://files.pythonhosted.org/packages/6e/a1/f7a22f144f33be78afeb06bfa78478e8284a64263a3c09b1ef54e673841e/category_encoders-2.0.0-py2.py3-none-any.whl (87kB)  |███▊ | 10kB 17.9MB/s eta 0:00:01  |███████▌ | 20kB 6.3MB/s eta 0:00:01  |███████████▏ | 30kB 8.8MB/s eta 0:00:01  |███████████████ | 40kB 5.6MB/s eta 0:00:01  |██████████████████▋ | 51kB 6.9MB/s eta 0:00:01  |██████████████████████▍ | 61kB 8.1MB/s eta 0:00:01  |██████████████████████████▏ | 71kB 9.3MB/s eta 0:00:01  |█████████████████████████████▉ | 81kB 10.3MB/s eta 0:00:01  |████████████████████████████████| 92kB 9.9MB/s [?25hRequirement already satisfied: numpy>=1.11.3 in /usr/local/lib/python3.6/dist-packages (from category_encoders==2.0.0) (1.16.5) Requirement already satisfied: scikit-learn>=0.20.0 in /usr/local/lib/python3.6/dist-packages (from category_encoders==2.0.0) (0.21.3) Requirement already satisfied: scipy>=0.19.0 in /usr/local/lib/python3.6/dist-packages (from category_encoders==2.0.0) (1.3.1) Requirement already satisfied: statsmodels>=0.6.1 in /usr/local/lib/python3.6/dist-packages (from category_encoders==2.0.0) (0.10.1) Requirement already satisfied: pandas>=0.21.1 in /usr/local/lib/python3.6/dist-packages (from category_encoders==2.0.0) (0.24.2) Requirement already satisfied: patsy>=0.4.1 in /usr/local/lib/python3.6/dist-packages (from category_encoders==2.0.0) (0.5.1) Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn>=0.20.0->category_encoders==2.0.0) (0.13.2) Requirement already satisfied: python-dateutil>=2.5.0 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.21.1->category_encoders==2.0.0) (2.5.3) Requirement already satisfied: pytz>=2011k in /usr/local/lib/python3.6/dist-packages (from pandas>=0.21.1->category_encoders==2.0.0) (2018.9) Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from patsy>=0.4.1->category_encoders==2.0.0) (1.12.0) Installing collected packages: category-encoders Successfully installed category-encoders-2.0.0 Collecting pandas-profiling==2.3.0 [?25l Downloading https://files.pythonhosted.org/packages/2c/2f/aae19e2173c10a9bb7fee5f5cad35dbe53a393960fc91abc477dcc4661e8/pandas-profiling-2.3.0.tar.gz (127kB)  |████████████████████████████████| 133kB 9.7MB/s [?25hRequirement already satisfied: pandas>=0.19 in /usr/local/lib/python3.6/dist-packages (from pandas-profiling==2.3.0) (0.24.2) Requirement already satisfied: matplotlib>=1.4 in /usr/local/lib/python3.6/dist-packages (from pandas-profiling==2.3.0) (3.0.3) Requirement already satisfied: jinja2>=2.8 in /usr/local/lib/python3.6/dist-packages (from pandas-profiling==2.3.0) (2.10.1) Requirement already satisfied: missingno>=0.4.2 in /usr/local/lib/python3.6/dist-packages (from pandas-profiling==2.3.0) (0.4.2) Collecting htmlmin>=0.1.12 (from pandas-profiling==2.3.0) Downloading https://files.pythonhosted.org/packages/b3/e7/fcd59e12169de19f0131ff2812077f964c6b960e7c09804d30a7bf2ab461/htmlmin-0.1.12.tar.gz Collecting phik>=0.9.8 (from pandas-profiling==2.3.0) [?25l Downloading https://files.pythonhosted.org/packages/45/ad/24a16fa4ba612fb96a3c4bb115a5b9741483f53b66d3d3afd987f20fa227/phik-0.9.8-py3-none-any.whl (606kB)  |████████████████████████████████| 614kB 42.9MB/s [?25hCollecting confuse>=1.0.0 (from pandas-profiling==2.3.0) Downloading https://files.pythonhosted.org/packages/4c/6f/90e860cba937c174d8b3775729ccc6377eb91f52ad4eeb008e7252a3646d/confuse-1.0.0.tar.gz Requirement already satisfied: astropy in /usr/local/lib/python3.6/dist-packages (from pandas-profiling==2.3.0) (3.0.5) Requirement already satisfied: pytz>=2011k in /usr/local/lib/python3.6/dist-packages (from pandas>=0.19->pandas-profiling==2.3.0) (2018.9) Requirement already satisfied: python-dateutil>=2.5.0 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.19->pandas-profiling==2.3.0) (2.5.3) Requirement already satisfied: numpy>=1.12.0 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.19->pandas-profiling==2.3.0) (1.16.5) Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=1.4->pandas-profiling==2.3.0) (2.4.2) Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=1.4->pandas-profiling==2.3.0) (0.10.0) Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=1.4->pandas-profiling==2.3.0) (1.1.0) Requirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.6/dist-packages (from jinja2>=2.8->pandas-profiling==2.3.0) (1.1.1) Requirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from missingno>=0.4.2->pandas-profiling==2.3.0) (1.3.1) Requirement already satisfied: seaborn in /usr/local/lib/python3.6/dist-packages (from missingno>=0.4.2->pandas-profiling==2.3.0) (0.9.0) Requirement already satisfied: jupyter-client>=5.2.3 in /usr/local/lib/python3.6/dist-packages (from phik>=0.9.8->pandas-profiling==2.3.0) (5.3.1) Collecting pytest>=4.0.2 (from phik>=0.9.8->pandas-profiling==2.3.0) [?25l Downloading https://files.pythonhosted.org/packages/9a/46/903ea822d83187bb8b354fcb3d085fb10b7787be39f9cf1628bc6ef8f9c9/pytest-5.2.0-py3-none-any.whl (226kB)  |████████████████████████████████| 235kB 54.5MB/s [?25hCollecting pytest-pylint>=0.13.0 (from phik>=0.9.8->pandas-profiling==2.3.0) Downloading https://files.pythonhosted.org/packages/64/dc/6f35f114844fb12e38d60c4f3d2441a55baff7043ad4e013777dff55746c/pytest_pylint-0.14.1-py3-none-any.whl Requirement already satisfied: numba>=0.38.1 in /usr/local/lib/python3.6/dist-packages (from phik>=0.9.8->pandas-profiling==2.3.0) (0.40.1) Requirement already satisfied: nbconvert>=5.3.1 in /usr/local/lib/python3.6/dist-packages (from phik>=0.9.8->pandas-profiling==2.3.0) (5.6.0) Requirement already satisfied: pyyaml in /usr/local/lib/python3.6/dist-packages (from confuse>=1.0.0->pandas-profiling==2.3.0) (3.13) Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.5.0->pandas>=0.19->pandas-profiling==2.3.0) (1.12.0) Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from kiwisolver>=1.0.1->matplotlib>=1.4->pandas-profiling==2.3.0) (41.2.0) Requirement already satisfied: traitlets in /usr/local/lib/python3.6/dist-packages (from jupyter-client>=5.2.3->phik>=0.9.8->pandas-profiling==2.3.0) (4.3.2) Requirement already satisfied: jupyter-core in /usr/local/lib/python3.6/dist-packages (from jupyter-client>=5.2.3->phik>=0.9.8->pandas-profiling==2.3.0) (4.5.0) Requirement already satisfied: pyzmq>=13 in /usr/local/lib/python3.6/dist-packages (from jupyter-client>=5.2.3->phik>=0.9.8->pandas-profiling==2.3.0) (17.0.0) Requirement already satisfied: tornado>=4.1 in /usr/local/lib/python3.6/dist-packages (from jupyter-client>=5.2.3->phik>=0.9.8->pandas-profiling==2.3.0) (4.5.3) Requirement already satisfied: wcwidth in /usr/local/lib/python3.6/dist-packages (from pytest>=4.0.2->phik>=0.9.8->pandas-profiling==2.3.0) (0.1.7) Requirement already satisfied: attrs>=17.4.0 in /usr/local/lib/python3.6/dist-packages (from pytest>=4.0.2->phik>=0.9.8->pandas-profiling==2.3.0) (19.1.0) Collecting pluggy<1.0,>=0.12 (from pytest>=4.0.2->phik>=0.9.8->pandas-profiling==2.3.0) Downloading https://files.pythonhosted.org/packages/92/c7/48439f7d5fd6bddb4c04b850bb862b42e3e2b98570040dfaf68aedd8114b/pluggy-0.13.0-py2.py3-none-any.whl Requirement already satisfied: more-itertools>=4.0.0 in /usr/local/lib/python3.6/dist-packages (from pytest>=4.0.2->phik>=0.9.8->pandas-profiling==2.3.0) (7.2.0) Requirement already satisfied: packaging in /usr/local/lib/python3.6/dist-packages (from pytest>=4.0.2->phik>=0.9.8->pandas-profiling==2.3.0) (19.1) Requirement already satisfied: importlib-metadata>=0.12; python_version < "3.8" in /usr/local/lib/python3.6/dist-packages (from pytest>=4.0.2->phik>=0.9.8->pandas-profiling==2.3.0) (0.23) Requirement already satisfied: atomicwrites>=1.0 in /usr/local/lib/python3.6/dist-packages (from pytest>=4.0.2->phik>=0.9.8->pandas-profiling==2.3.0) (1.3.0) Requirement already satisfied: py>=1.5.0 in /usr/local/lib/python3.6/dist-packages (from pytest>=4.0.2->phik>=0.9.8->pandas-profiling==2.3.0) (1.8.0) Collecting pylint>=1.4.5 (from pytest-pylint>=0.13.0->phik>=0.9.8->pandas-profiling==2.3.0) [?25l Downloading https://files.pythonhosted.org/packages/ff/6f/faf9c586af455175b72186b9831cbf63e2222b17dde74d5eebff829e8ea5/pylint-2.4.1-py3-none-any.whl (302kB)  |████████████████████████████████| 307kB 52.0MB/s [?25hRequirement already satisfied: llvmlite>=0.25.0dev0 in /usr/local/lib/python3.6/dist-packages (from numba>=0.38.1->phik>=0.9.8->pandas-profiling==2.3.0) (0.29.0) Requirement already satisfied: defusedxml in /usr/local/lib/python3.6/dist-packages (from nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0) (0.6.0) Requirement already satisfied: pandocfilters>=1.4.1 in /usr/local/lib/python3.6/dist-packages (from nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0) (1.4.2) Requirement already satisfied: mistune<2,>=0.8.1 in /usr/local/lib/python3.6/dist-packages (from nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0) (0.8.4) Requirement already satisfied: bleach in /usr/local/lib/python3.6/dist-packages (from nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0) (3.1.0) Requirement already satisfied: testpath in /usr/local/lib/python3.6/dist-packages (from nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0) (0.4.2) Requirement already satisfied: entrypoints>=0.2.2 in /usr/local/lib/python3.6/dist-packages (from nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0) (0.3) Requirement already satisfied: nbformat>=4.4 in /usr/local/lib/python3.6/dist-packages (from nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0) (4.4.0) Requirement already satisfied: pygments in /usr/local/lib/python3.6/dist-packages (from nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0) (2.1.3) Requirement already satisfied: decorator in /usr/local/lib/python3.6/dist-packages (from traitlets->jupyter-client>=5.2.3->phik>=0.9.8->pandas-profiling==2.3.0) (4.4.0) Requirement already satisfied: ipython-genutils in /usr/local/lib/python3.6/dist-packages (from traitlets->jupyter-client>=5.2.3->phik>=0.9.8->pandas-profiling==2.3.0) (0.2.0) Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata>=0.12; python_version < "3.8"->pytest>=4.0.2->phik>=0.9.8->pandas-profiling==2.3.0) (0.6.0) Collecting astroid<3,>=2.3.0 (from pylint>=1.4.5->pytest-pylint>=0.13.0->phik>=0.9.8->pandas-profiling==2.3.0) [?25l Downloading https://files.pythonhosted.org/packages/b1/25/c2e9c19144d8f78f985b882c403eb3c45c9856ee2c436737ccd4482c7515/astroid-2.3.0-py3-none-any.whl (191kB)  |████████████████████████████████| 194kB 56.3MB/s [?25hCollecting mccabe<0.7,>=0.6 (from pylint>=1.4.5->pytest-pylint>=0.13.0->phik>=0.9.8->pandas-profiling==2.3.0) Downloading https://files.pythonhosted.org/packages/87/89/479dc97e18549e21354893e4ee4ef36db1d237534982482c3681ee6e7b57/mccabe-0.6.1-py2.py3-none-any.whl Collecting isort<5,>=4.2.5 (from pylint>=1.4.5->pytest-pylint>=0.13.0->phik>=0.9.8->pandas-profiling==2.3.0) [?25l Downloading https://files.pythonhosted.org/packages/e5/b0/c121fd1fa3419ea9bfd55c7f9c4fedfec5143208d8c7ad3ce3db6c623c21/isort-4.3.21-py2.py3-none-any.whl (42kB)  |████████████████████████████████| 51kB 17.7MB/s [?25hRequirement already satisfied: webencodings in /usr/local/lib/python3.6/dist-packages (from bleach->nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0) (0.5.1) Requirement already satisfied: jsonschema!=2.5.0,>=2.4 in /usr/local/lib/python3.6/dist-packages (from nbformat>=4.4->nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0) (2.6.0) Collecting lazy-object-proxy (from astroid<3,>=2.3.0->pylint>=1.4.5->pytest-pylint>=0.13.0->phik>=0.9.8->pandas-profiling==2.3.0) [?25l Downloading https://files.pythonhosted.org/packages/0e/26/534a6d32572a9dbca11619321535c0a7ab34688545d9d67c2c204b9e3a3d/lazy_object_proxy-1.4.2-cp36-cp36m-manylinux1_x86_64.whl (49kB)  |████████████████████████████████| 51kB 14.0MB/s [?25hCollecting typed-ast<1.3.0; implementation_name == "cpython" and python_version < "3.7" (from astroid<3,>=2.3.0->pylint>=1.4.5->pytest-pylint>=0.13.0->phik>=0.9.8->pandas-profiling==2.3.0) [?25l Downloading https://files.pythonhosted.org/packages/52/6c/57ecbb2ecb80274b7f2b7f95d0937de310aa2e66839d7ae9436881525b23/typed_ast-1.2.0-cp36-cp36m-manylinux1_x86_64.whl (724kB)  |████████████████████████████████| 727kB 27.5MB/s [?25hRequirement already satisfied: wrapt in /usr/local/lib/python3.6/dist-packages (from astroid<3,>=2.3.0->pylint>=1.4.5->pytest-pylint>=0.13.0->phik>=0.9.8->pandas-profiling==2.3.0) (1.11.2) Building wheels for collected packages: pandas-profiling, htmlmin, confuse Building wheel for pandas-profiling (setup.py) ... [?25l[?25hdone Created wheel for pandas-profiling: filename=pandas_profiling-2.3.0-py2.py3-none-any.whl size=145035 sha256=8a7f9af4215c283f58fa5fbde8749ad60c00d6d9081200f895a9b6ebb621fbb8 Stored in directory: /root/.cache/pip/wheels/ce/c7/f1/dbfef4848ebb048cb1d4a22d1ed0c62d8ff2523747235e19fe Building wheel for htmlmin (setup.py) ... [?25l[?25hdone Created wheel for htmlmin: filename=htmlmin-0.1.12-cp36-none-any.whl size=27084 sha256=9bcbd5703035d13e0700f7155fc525266eda1dd9ec806e939f64508f2066af56 Stored in directory: /root/.cache/pip/wheels/43/07/ac/7c5a9d708d65247ac1f94066cf1db075540b85716c30255459 Building wheel for confuse (setup.py) ... [?25l[?25hdone Created wheel for confuse: filename=confuse-1.0.0-cp36-none-any.whl size=17486 sha256=ebc3e0d6dfe0d6d57090d1fb5fcbfae0e496fe499e767409c11b3f6377ad7a6b Stored in directory: /root/.cache/pip/wheels/b0/b2/96/2074eee7dbf7b7df69d004c9b6ac4e32dad04fb7666cf943bd Successfully built pandas-profiling htmlmin confuse ERROR: datascience 0.10.6 has requirement folium==0.2.1, but you'll have folium 0.8.3 which is incompatible. Installing collected packages: htmlmin, pluggy, pytest, lazy-object-proxy, typed-ast, astroid, mccabe, isort, pylint, pytest-pylint, phik, confuse, pandas-profiling Found existing installation: pluggy 0.7.1 Uninstalling pluggy-0.7.1: Successfully uninstalled pluggy-0.7.1 Found existing installation: pytest 3.6.4 Uninstalling pytest-3.6.4: Successfully uninstalled pytest-3.6.4 Found existing installation: pandas-profiling 1.4.1 Uninstalling pandas-profiling-1.4.1: Successfully uninstalled pandas-profiling-1.4.1 Successfully installed astroid-2.3.0 confuse-1.0.0 htmlmin-0.1.12 isort-4.3.21 lazy-object-proxy-1.4.2 mccabe-0.6.1 pandas-profiling-2.3.0 phik-0.9.8 pluggy-0.13.0 pylint-2.4.1 pytest-5.2.0 pytest-pylint-0.14.1 typed-ast-1.2.0 Requirement already satisfied: plotly==4.1.1 in /usr/local/lib/python3.6/dist-packages (4.1.1) Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from plotly==4.1.1) (1.12.0) Requirement already satisfied: retrying>=1.3.3 in /usr/local/lib/python3.6/dist-packages (from plotly==4.1.1) (1.3.3) ###Markdown Part 1, Classification: Predict Blood Donations 🚑Our dataset is from a mobile blood donation vehicle in Taiwan. The Blood Transfusion Service Center drives to different universities and collects blood as part of a blood drive.The goal is to predict whether the donor made a donation in March 2007, using information about each donor's history.Good data-driven systems for tracking and predicting donations and supply needs can improve the entire supply chain, making sure that more patients get the blood transfusions they need. ###Code import pandas as pd donors = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/blood-transfusion/transfusion.data') assert donors.shape == (748,5) donors = donors.rename(columns={ 'Recency (months)': 'months_since_last_donation', 'Frequency (times)': 'number_of_donations', 'Monetary (c.c. blood)': 'total_volume_donated', 'Time (months)': 'months_since_first_donation', 'whether he/she donated blood in March 2007': 'made_donation_in_march_2007' }) print(donors.shape) donors.head() ###Output (748, 5) ###Markdown 1.1. Begin with baselinesWhat accuracy score would you get here with a "majority class baseline"? (You don't need to split the data into train and test sets yet. You can answer this question either with a scikit-learn function or with a pandas function.) ###Code donors['made_donation_in_march_2007'].value_counts(normalize=True) mean_baseline = donors['made_donation_in_march_2007'].value_counts(normalize=True)[0] mean_baseline ###Output _____no_output_____ ###Markdown **I would get an accuracy score of 0.762032** 1.2. Do train/test split. Arrange data into X features matrix and y target vectorDo these steps in either order.Split randomly. Use scikit-learn's train/test split function. Include 75% of the data in the train set, and hold out 25% for the test set. ###Code target = 'made_donation_in_march_2007' features = donors.columns.drop(target) X_presplit = donors[features] y_presplit = donors[target] X_presplit.shape, y_presplit.shape # Split data from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X_presplit, y_presplit, train_size=0.8, test_size=0.2, random_state=42 ) X_train.shape, X_test.shape, y_train.shape, y_test.shape # Verify split train_percent = 598/748 test_percent = 150/748 print(train_percent) print(test_percent) ###Output 0.7994652406417112 0.20053475935828877 ###Markdown 1.3. Use scikit-learn to fit a logistic regression modelYou may use any number of features ###Code from sklearn.linear_model import LogisticRegression model = LogisticRegression(solver='liblinear') model.fit(X_train, y_train) y_pred = model.predict(X_test) ###Output _____no_output_____ ###Markdown 1.4. Report classification metric: accuracyWhat is your model's accuracy on the test set?Don't worry if your model doesn't beat the mean baseline. That's okay!_"The combination of some data and an aching desire for an answer does not ensure that a reasonable answer can be extracted from a given body of data."_ —[John Tukey](https://en.wikiquote.org/wiki/John_Tukey) ###Code from sklearn.metrics import accuracy_score # Close, but no cigar print(f'Accuracy Score: {accuracy_score(y_test, y_pred)}') print(f'Mean Baseline: {mean_baseline}') ###Output Accuracy Score: 0.76 Mean Baseline: 0.7620320855614974 ###Markdown Part 2, Regression: Predict home prices in Ames, Iowa 🏠You'll use historical housing data. There's a data dictionary at the bottom of the notebook. Run this code cell to load the dataset: ###Code import pandas as pd URL = 'https://drive.google.com/uc?export=download&id=1522WlEW6HFss36roD_Cd9nybqSuiVcCK' homes = pd.read_csv(URL) assert homes.shape == (2904, 47) ###Output _____no_output_____ ###Markdown 2.1. Begin with baselinesWhat is the Mean Absolute Error and R^2 score for a mean baseline? ###Code homes.columns.unique # Generate predictions based on mean baseline y_train = homes['SalePrice'] mean_baseline = y_train.mean() y_pred = [mean_baseline] * len(y_train) # Check Mean Absolute Error and Rˆ2 Score from sklearn.metrics import mean_absolute_error from sklearn.metrics import r2_score mae = mean_absolute_error(y_train, y_pred) r2 = r2_score(y_train, y_pred) print(f'Mean Absolute Error: {mae}') print(f'Rˆ2 Score: {r2}') ###Output Mean Absolute Error: 58149.92774120811 Rˆ2 Score: 0.0 ###Markdown 2.2. Do train/test splitTrain on houses sold in the years 2006 - 2008. (1,920 rows)Validate on house sold in 2009. (644 rows)Test on houses sold in 2010. (340 rows) ###Code homes['Yr_Sold'].unique() type(homes['Yr_Sold'].loc[0]) train = homes[homes['Yr_Sold'] <= 2008] val = homes[homes['Yr_Sold'] == 2009] test = homes[homes['Yr_Sold'] == 2010] print(train.shape, val.shape, test.shape) train.isnull().sum() ###Output _____no_output_____ ###Markdown 2.3. Arrange data into X features matrix and y target vectorSelect at least one numeric feature and at least one categorical feature.Otherwise, you many choose whichever features and however many you want. ###Code # Get an idea of what the features mean import pandas_profiling homes.profile_report() # Categorical feature = Neighborhood # Numeric features = Year_Built, Full_Bath, Overall_Cond, Overall_Qual target = 'SalePrice' features = ['Neighborhood', 'Year_Built', 'Full_Bath', 'Overall_Cond', 'Overall_Qual'] print(f'Number of features: {len(features)}') # Train X_train = train[features] y_train = train[target] # Validate X_val = val[features] y_val = val[target] # Test X_test = test[features] y_test = test[target] ###Output Number of features: 5 ###Markdown 2.4. Do one-hot encodingEncode your categorical feature(s). ###Code import category_encoders as ce # Create subset X_train_subset = X_train[features] X_val_subset = X_val[features] # Encode encoder = ce.OneHotEncoder(use_cat_names=True) X_train_encoded = encoder.fit_transform(X_train_subset) X_val_encoded = encoder.transform(X_val_subset) ###Output _____no_output_____ ###Markdown 2.5. Use scikit-learn to fit a linear regression (or ridge regression) modelFit your model. ###Code from sklearn.preprocessing import StandardScaler # Scale scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train_encoded) X_val_scaled = scaler.transform(X_val_encoded) from sklearn.linear_model import LinearRegression # Fit model model = LinearRegression() model.fit(X_train_scaled, y_train); ###Output _____no_output_____ ###Markdown 2.6. Report validation MAE and $R^2$What is your model's Mean Absolute Error and $R^2$ score on the validation set? ###Code y_pred = model.predict(X_val_scaled) mae = mean_absolute_error(y_val, y_pred) r2 = r2_score(y_val, y_pred) print(f'Mean Absolute Error: {mae}') print(f'Rˆ2 Score: {r2}') ###Output Mean Absolute Error: 27427.170977266935 Rˆ2 Score: 0.7611909168380177 ###Markdown Stretch Goals, Regression- Make visualizations to explore relationships between features and target- Try at least 3 feature combinations. You may select features manually, or automatically- Report validation MAE and $R^2$ for each feature combination you try- Report test MAE and $R^2$ for your final model- Print or plot the coefficients for the features in your model ###Code X_train_encoded.columns.unique() %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px px.scatter(train, x='Neighborhood', y=target, title='Sale Price by Neighborhood') # Appears to be positive relationship px.scatter(train, x='Year_Built', y=target, title='Sale Price\nby Year Built', trendline='ols') plt.figure(figsize=[20,10]) sns.barplot(x='Full_Bath', y=target, data=train) plt.title('Sale Price\nby Number of Full Bathrooms'); plt.figure(figsize=[20,10]) sns.scatterplot(x='Overall_Cond', y=target, data=train) plt.title('Sale Price\nby Overall Condition'); # Appears to be positive relationship plt.figure(figsize=[20,10]) sns.scatterplot(x='Overall_Qual', y=target, data=train) plt.title('Sale Price\nby Overall Quality'); ###Output _____no_output_____
docs/research/replications/Braverman_2010/braverman_2010.ipynb
###Markdown Reproducing Braverman and Shaffer 2010 using [*gamba*](https://github.com/gamba-dev/gamba)This notebook attempts to reproduce the two tables found in Braverman and Shaffer's 2010 paper on behavioural markers for high-risk internet gambling. To get started, download the data titled '*How Do Gamblers Start Gambling: Identifying Behavioural Markers for High-risk Internet Gambling*' through the link below - you'll need the text files under 'Raw Dataset 2' and 'Analytic Dataset';- [Data Download (thetransparencyproject.org)](http://www.thetransparencyproject.org/download_index.php)- [Original data description](http://www.thetransparencyproject.org/codebooks/Codebook_forHighRisk1.pdf)- [Original paper link](https://academic.oup.com/eurpub/article/22/2/273/508362)File names: **RawDataSet2_DailyAggregation.txt** and **AnalyticDataSet_HighRisk.txt** Data description above implies RawDataSet2 contains actual betting data for players for the duration of the study, when it appears to only include a maximum of 31 days of betting data. This means the AnalyticDataSet cannot be faithfully reproduced using the raw data alone as the analytic data incudes full-duration behavioural measures (see final cell). The `trajectory` measure calculated here disagrees with the analytic data set, it specifically shows more extreme values for the gradient of the stakes. This issue is described below.With the data downloaded, the first step is to import [*gamba*](https://github.com/gamba-dev/gamba), run the cell below to get started; ###Code import gamba as gb ###Output thank you for using gamba! ###Markdown With [*gamba*](https://github.com/gamba-dev/gamba) ready, we need to load in both the analytic and raw data sets from the link above - we need to recreate the analytical data set from the raw data; ###Code raw_data = gb.read_csv('RawDataSet2_DailyAggregation.txt', delimiter='\t', parse_dates=['TimeDATE']) analytic_data = gb.read_csv('AnalyticDataSet_HighRisk.txt', delimiter='\t') print('raw data loaded:', len(raw_data)) print('analytic data loaded:', len(analytic_data)) ###Output raw data loaded: 5161 analytic data loaded: 530 ###Markdown At this point, the data can be prepared for use in the gamba library. This can be done with the purpose-built `prepare_braverman_data` method in the `gamba.data` module; ###Code all_player_bets = gb.prepare_braverman_data('RawDataSet2_DailyAggregation.txt') ###Output _____no_output_____ ###Markdown Now for the start of the study's replication - we begin by calculating the measures reported in the paper which include **intensity**, **frequency**, **variability**, **trajectory**, **sum of stakes**, **total number of bets**, **average bet size**, **duration of account betting**, and the **net loss incurred** for each player. These are all included in the `calculate_braverman_measures` method in the `gamba.measures` module; ###Code measures = gb.calculate_braverman_measures(all_player_bets) # this method saves them to a file called 'gamba_braverman_measures.csv' measures.sort_values('player_id', inplace=True) # lets sort them by ID and display the first 3; display(measures.head(3)) ###Output 100%|██████████| 530/530 [00:03<00:00, 164.08it/s] ###Markdown As a sanity check, we can display the original measures calculated for the three players above (after renaming the columns to more intuitive ones); ###Code players = measures['player_id'].values[:3] # get only the first 3 values (those above) display(analytic_data.columns) display(analytic_data.head(3)) analytic_data['average_bet_size'] = analytic_data['p2sumstake'] / analytic_data['p2sumbet'] original_analysis = analytic_data[['UserID','p2bpd1m','p2totalactivedays1m','p2stakeSD1m','p2stakeSlope1m','p2sumstake','p2sumbet','average_bet_size','p2intvday','p2net']] original_analysis.columns = ['player_id','intensity','frequency','variability','trajectory','sum_of_stakes','total_num_bets','average_bet_size','duration','net_loss'] original_analysis.sort_values('player_id', inplace=True) # after changing the column names, sort them by player ID (as above) display(original_analysis.head(3)) ###Output _____no_output_____ ###Markdown This is a little puzzling as some of the measures align, yet others such as `total_num_bets` and `duration` appear to be underestimates compared to the original analysis, the `trajectory` measure also appears more extreme. To find out what's causing this difference, we can explore the duration of the data in the raw data set; ###Code raw_data = gb.read_csv('RawDataSet2_DailyAggregation.txt', delimiter='\t', parse_dates=['TimeDATE']) all_player_ids = set(list(raw_data['UserID'])) max_duration = 0 for player_id in all_player_ids: player_bets = raw_data[raw_data['UserID'] == player_id].copy() player_bets.rename(columns={'TimeDATE':'bet_time'}, inplace=True) duration = gb.duration(player_bets) if duration > max_duration: max_duration = duration print('unique players found:', len(all_player_ids)) print('maximum duration found:', max_duration) ###Output unique players found: 530 maximum duration found: 31 ###Markdown The raw data contains a maximum of 31 days of betting data per player, therefore the analytic data set cannot be *completely* reproduced using the raw data alone, hence the original analytic data will be taken forward as opposed to an exactly replicated data set.This means that as we cannot compute the measures exactly, the next best thing is to verify the accuracy of the clustering described in the paper, we can do this using the `k_means` functions from gamba's clustering module; This next cell aims to recreate the k-means method described on page 3 of the paper, under the heading *Statistical analysis*; ###Code standardised_measures_table = gb.standardise_measures_table(original_analysis) clustered_data = gb.k_means(standardised_measures_table, clusters=4, data_only=True) gb.describe_clusters(clustered_data) ###Output _____no_output_____
Instructions/Starter_Code/financial-planner.ipynb
###Markdown Unit 5 - Financial Planning ###Code # Initial imports import os import requests import pandas as pd from dotenv import load_dotenv import alpaca_trade_api as tradeapi from MCForecastTools import MCSimulation %matplotlib inline # Load .env enviroment variables load_dotenv() ###Output _____no_output_____ ###Markdown Part 1 - Personal Finance Planner Collect Crypto Prices Using the `requests` Library ###Code # Set current amount of crypto assets # YOUR CODE HERE! # Crypto API URLs btc_url = "https://api.alternative.me/v2/ticker/Bitcoin/?convert=CAD" eth_url = "https://api.alternative.me/v2/ticker/Ethereum/?convert=CAD" # Fetch current BTC price # YOUR CODE HERE! # Fetch current ETH price # YOUR CODE HERE! # Compute current value of my crpto # YOUR CODE HERE! # Print current crypto wallet balance print(f"The current value of your {my_btc} BTC is ${my_btc_value:0.2f}") print(f"The current value of your {my_eth} ETH is ${my_eth_value:0.2f}") ###Output The current value of your 1.2 BTC is $84236.41 The current value of your 5.3 ETH is $22171.16 ###Markdown Collect Investments Data Using Alpaca: `SPY` (stocks) and `AGG` (bonds) ###Code # Set current amount of shares my_agg = 200 my_spy = 50 # Set Alpaca API key and secret # YOUR CODE HERE! # Create the Alpaca API object # YOUR CODE HERE! # Format current date as ISO format # YOUR CODE HERE! # Set the tickers tickers = ["AGG", "SPY"] # Set timeframe to '1D' for Alpaca API timeframe = "1D" # Get current closing prices for SPY and AGG # (use a limit=1000 parameter to call the most recent 1000 days of data) # YOUR CODE HERE! # Preview DataFrame # YOUR CODE HERE! # Pick AGG and SPY close prices # YOUR CODE HERE! # Print AGG and SPY close prices print(f"Current AGG closing price: ${agg_close_price}") print(f"Current SPY closing price: ${spy_close_price}") # Compute the current value of shares # YOUR CODE HERE! # Print current value of shares print(f"The current value of your {my_spy} SPY shares is ${my_spy_value:0.2f}") print(f"The current value of your {my_agg} AGG shares is ${my_agg_value:0.2f}") ###Output The current value of your 50 SPY shares is $14136.50 The current value of your 200 AGG shares is $23380.00 ###Markdown Savings Health Analysis ###Code # Set monthly household income # YOUR CODE HERE! # Consolidate financial assets data # YOUR CODE HERE! # Create savings DataFrame # YOUR CODE HERE! # Display savings DataFrame display(df_savings) # Plot savings pie chart # YOUR CODE HERE! # Set ideal emergency fund emergency_fund = monthly_income * 3 # Calculate total amount of savings # YOUR CODE HERE! # Validate saving health # YOUR CODE HERE! ###Output Congratulations! You have enough money in your emergency fund. ###Markdown Part 2 - Retirement Planning Monte Carlo Simulation ###Code # Set start and end dates of five years back from today. # Sample results may vary from the solution based on the time frame chosen start_date = pd.Timestamp('2016-05-01', tz='America/New_York').isoformat() end_date = pd.Timestamp('2021-05-01', tz='America/New_York').isoformat() # Get 5 years' worth of historical data for SPY and AGG # (use a limit=1000 parameter to call the most recent 1000 days of data) # YOUR CODE HERE! # Display sample data df_stock_data.head() # Configuring a Monte Carlo simulation to forecast 30 years cumulative returns # YOUR CODE HERE! # Printing the simulation input data # YOUR CODE HERE! # Running a Monte Carlo simulation to forecast 30 years cumulative returns # YOUR CODE HERE! # Plot simulation outcomes # YOUR CODE HERE! # Plot probability distribution and confidence intervals # YOUR CODE HERE! ###Output _____no_output_____ ###Markdown Retirement Analysis ###Code # Fetch summary statistics from the Monte Carlo simulation results # YOUR CODE HERE! # Print summary statistics # YOUR CODE HERE! ###Output count 500.000000 mean 22.130012 std 16.088798 min 2.342293 25% 10.465170 50% 17.476764 75% 29.550007 max 104.663919 95% CI Lower 4.779747 95% CI Upper 64.053604 Name: 7560, dtype: float64 ###Markdown Calculate the expected portfolio return at the `95%` lower and upper confidence intervals based on a `$20,000` initial investment. ###Code # Set initial investment initial_investment = 20000 # Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes of our $20,000 # YOUR CODE HERE! # Print results print(f"There is a 95% chance that an initial investment of ${initial_investment} in the portfolio" f" over the next 30 years will end within in the range of" f" ${ci_lower} and ${ci_upper}") ###Output There is a 95% chance that an initial investment of $20000 in the portfolio over the next 30 years will end within in the range of $95594.94 and $1281072.08 ###Markdown Calculate the expected portfolio return at the `95%` lower and upper confidence intervals based on a `50%` increase in the initial investment. ###Code # Set initial investment initial_investment = 20000 * 1.5 # Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes of our $30,000 # YOUR CODE HERE! # Print results print(f"There is a 95% chance that an initial investment of ${initial_investment} in the portfolio" f" over the next 30 years will end within in the range of" f" ${ci_lower} and ${ci_upper}") ###Output There is a 95% chance that an initial investment of $30000.0 in the portfolio over the next 30 years will end within in the range of $143392.41 and $1921608.12 ###Markdown Optional Challenge - Early Retirement Five Years Retirement Option ###Code # Configuring a Monte Carlo simulation to forecast 5 years cumulative returns # YOUR CODE HERE! # Running a Monte Carlo simulation to forecast 5 years cumulative returns # YOUR CODE HERE! # Plot simulation outcomes # YOUR CODE HERE! # Plot probability distribution and confidence intervals # YOUR CODE HERE! # Fetch summary statistics from the Monte Carlo simulation results # YOUR CODE HERE! # Print summary statistics # YOUR CODE HERE! # Set initial investment # YOUR CODE HERE! # Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes of our $60,000 # YOUR CODE HERE! # Print results print(f"There is a 95% chance that an initial investment of ${initial_investment} in the portfolio" f" over the next 5 years will end within in the range of" f" ${ci_lower_five} and ${ci_upper_five}") ###Output There is a 95% chance that an initial investment of $60000 in the portfolio over the next 5 years will end within in the range of $57091.37 and $95195.55 ###Markdown Ten Years Retirement Option ###Code # Configuring a Monte Carlo simulation to forecast 10 years cumulative returns # YOUR CODE HERE! # Running a Monte Carlo simulation to forecast 10 years cumulative returns # YOUR CODE HERE! # Plot simulation outcomes # YOUR CODE HERE! # Plot probability distribution and confidence intervals # YOUR CODE HERE! # Fetch summary statistics from the Monte Carlo simulation results # YOUR CODE HERE! # Print summary statistics # YOUR CODE HERE! # Set initial investment # YOUR CODE HERE! # Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes of our $60,000 # YOUR CODE HERE! # Print results print(f"There is a 95% chance that an initial investment of ${initial_investment} in the portfolio" f" over the next 10 years will end within in the range of" f" ${ci_lower_ten} and ${ci_upper_ten}") ###Output There is a 95% chance that an initial investment of $60000 in the portfolio over the next 10 years will end within in the range of $62986.85 and $129596.12
Example/Grouped-Image-Inputs.ipynb
###Markdown Using GroupedInnotation on the inputsThis allows us to form a grid of input images. ###Code foodfns = sorted(os.listdir('./foods/')) foods = ['./foods/'+f for f in foodfns] # Create permuted datasets images = [] targets = np.zeros((len(foodfns), 4, 4), dtype='int') for view in range(4): images.append(foods[view:] + foods[:view]) image_ins = [ImageInnotation(images[i], None, name='Food '+str(i), width=300, height=250) for i in range(4)] bb_ins = [BoundingBoxInnotation(targets[i], name='bbs '+str(i), source='Food '+str(i), desc='Food Type '+str(i)) for i in range(4)] Innotater( [ GroupedInnotation(*image_ins[0:2]), GroupedInnotation(*image_ins[2:4]) ], bb_ins ) ###Output _____no_output_____ ###Markdown Using GroupedInnotation on the inputsThis allows us to form a grid of input images. ###Code foodfns = sorted(os.listdir('./foods/')) foods = ['./foods/'+f for f in foodfns] # Create permuted datasets images = [] targets = np.zeros((len(foodfns), 4, 4), dtype='int') for view in range(4): images.append(foods[view:] + foods[:view]) image_ins = [ImageInnotation(images[i], None, name='Food '+str(i), width=300, height=250) for i in range(4)] bb_ins = [BoundingBoxInnotation(targets[i], name='bbs '+str(i), source='Food '+str(i), desc='Food Type '+str(i)) for i in range(4)] Innotater( [ GroupedInnotation(*image_ins[0:2]), GroupedInnotation(*image_ins[2:4]) ], bb_ins ) ###Output _____no_output_____
SPRatings - Credit Classification.ipynb
###Markdown 1. Data Cleaning2. Feature Engineering3. Correlation Matrix4. top 20/25 parameters5. Machine Learning Models using LazyPredict6. Tablular Analysis of which model is best7. Graphs along the way 1. Data Cleaning -Convert Date column to desirable type - DoneCompany Name remove or keep? ###Code dummy = SPRatings.iloc[1,2] dum_year = int(str(dummy)[:4]) dum_day = int(str(dummy)[4:6]) dum_month = int(str(dummy)[6:]) print("dum_year:", dum_year, "\ndum_day:", dum_day, "\ndum_month", dum_month) import datetime SPRatings['Year'] = SPRatings.iloc[:,2] SPRatings['Month'] = SPRatings.iloc[:,2] SPRatings['Day'] = SPRatings.iloc[:,2] for i in range(len(SPRatings['Data Date'])): SPRatings['Year'][i] = int(str(SPRatings['Data Date'][i])[:4]) SPRatings['Month'][i] = int(str(SPRatings['Data Date'][i])[4:6]) SPRatings['Day'][i] = int(str(SPRatings['Data Date'][i])[6:]) cols=["Year","Month","Day"] SPRatings['Data Date'] = SPRatings[cols].apply(lambda x: '-'.join(x.values.astype(str)), axis="columns") SPRatings['Data Date'] = pd.to_datetime(SPRatings['Data Date']) SPRatings = SPRatings.drop(['Year','Month','Day'], axis=1) uni = len(pd.unique(SPRatings['S&P Domestic Long Term Issuer Credit Rating'])) uni ###Output _____no_output_____ ###Markdown Feature Engineering Ordinal Encoding for Credit Rating ###Code list123 = SPRatings['S&P Domestic Long Term Issuer Credit Rating'] list123 = np.unique(list123) list123 #import category_encoders as ce # create object of Ordinalencoding #encoder= ce.OrdinalEncoder(cols=['S&P Domestic Long Term Issuer Credit Rating'],return_df=True, # mapping=[{'col':'S&P Domestic Long Term Issuer Credit Rating', #'mapping':{'A':0,'A+':1, 'A-':2, 'AA':3, 'AA+':4, 'AA-':5, 'AAA':6, 'BB+':7, 'BBB':8, 'BBB+':9,'BBB-':10}}]) #SPRatings = encoder.fit_transform(SPRatings) SPRatings temp = SPRatings.iloc[1,1] SPRatings.dtypes scale_mapper = {'A':0,'A+':1, 'A-':2, 'AA':3, 'AA+':4, 'AA-':5, 'AAA':6, 'BB+':7, 'BBB':8, 'BBB+':9,'BBB-':10} SPRatings['scale'] = SPRatings['S&P Domestic Long Term Issuer Credit Rating'].replace(scale_mapper) SPRatings['S&P Domestic Long Term Issuer Credit Rating']=SPRatings['scale'] SPRatings ###Output _____no_output_____ ###Markdown Correlation Matrix ###Code #Correlation Matrix from matplotlib import pyplot as plt f, ax = plt.subplots(1, 1, figsize = (15, 10)) corr = SPRatings.corr() sns.heatmap(corr,yticklabels=False,xticklabels=False) df1 = SPRatings["S&P Domestic Long Term Issuer Credit Rating"] df2 = SPRatings.drop(['S&P Domestic Long Term Issuer Credit Rating'],axis=1) df1 correlation_KP = df2.corrwith(df1,axis=0) correlation_KP = correlation_KP.sort_values(ascending=False) correlation_KP_df = correlation_KP.to_frame() correlation_KP_df = correlation_KP_df.reset_index() correlation_KP_df.columns = ['Independent Variables','Rating Correlation'] correlation_KP_df_pos = correlation_KP_df[correlation_KP_df['Rating Correlation']>0] #not_needed = correlation_KP_df[correlation_KP_df['Rating Correlation']<0] #not_needed correlation_KP_df_pos correlation_kendall = df2.corrwith(df1,axis=0, method = 'spearman') correlation_kendall.sort_values(ascending=False) import plotly.graph_objects as go import pandas as pd fig = go.Figure(data=[go.Table( header=dict(values=['Independent Variables','Rating Correlation'], fill_color='white', align='center'), cells=dict(values=[correlation_KP_df['Independent Variables'],correlation_KP_df['Rating Correlation']], fill_color='lavender', align='left')) ]) fig.show() list_not_needed = not_needed["Independent Variables"].tolist() list_not_needed #SPRatings_cleaned = SPRatings.drop([list_not_needed],axis=1) cols_to_keep = correlation_KP_df_pos['Independent Variables'] ML_Ready_SPRating = pd.DataFrame() for col in cols_to_keep: if col in SPRatings.columns: ML_Ready_SPRating[col] = SPRatings[col] ML_Ready_SPRating = ML_Ready_SPRating.assign(Data_Date=SPRatings['Data Date'],Company_Name=SPRatings['Company Name'],Ticker_Symbol=SPRatings['Ticker Symbol'], Credit_Rating=SPRatings['S&P Domestic Long Term Issuer Credit Rating']) ML_Ready_SPRating ML_Ready_SPRating distinct_comp = ML_Ready_SPRating.Company_Name.unique() distinct_comp asd = distinct_comp[1] if ML_Ready_SPRating['Company_Name'] == asd: X = ML_Ready_SPRating.drop(["S&P Domestic Long Term Issuer Credit Rating"],axis=1) y = ML_Ready_SPRating["S&P Domestic Long Term Issuer Credit Rating"] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) y_test.shape X = SPRatings_dummy.drop(["S&P Domestic Long Term Issuer Credit Rating"],axis=1) y = SPRatings_dummy["S&P Domestic Long Term Issuer Credit Rating"] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) y_test.shape ML_Ready_SPRating = ML_Ready_SPRating[ML_Ready_SPRating['Company_Name'] != 'AFFILIATED MANAGERS GRP INC'] ML_Ready_SPRating for comp in distinct_comp: dum_set = ML_Ready_SPRating[ML_Ready_SPRating['Company_Name']==comp] X = dum_set y = dum_set["Credit_Rating"] #X_train_dum, X_test_dum, y_train_dum, y_test_dum = train_test_split(X, y, test_size=0.3,train_size=0.7, random_state=42) #X_train.append(X_train_dum) #y_train.append(y_train_dum) #X_test.append(X_test_dum) #y_test.append(y_test_dum) ML_Ready_SPRating.to_csv('qwer.csv') X = ML_Ready_SPRating.drop(['Credit_Rating'],axis=1) X = ML_Ready_SPRating.drop(['Data_Date'],axis=1) y = ML_Ready_SPRating.Credit_Rating X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=2) ###Output _____no_output_____ ###Markdown Machine Learning to classify and test ###Code #First Algo - Novel Approach - Logistic Regression from sklearn.linear_model import LogisticRegression lr_model = LogisticRegression() lr_model.fit(X_train,y_train) y_pred = lr_model.predict(x_test) #!pip install lazypredict from lazypredict.Supervised import LazyClassifier clf = LazyClassifier(verbose=0, ignore_warnings=True, custom_metric = None) models,predictions = clf.fit(X_train, X_test, y_train, y_test) print(models) ###Output 100%|██████████| 29/29 [00:17<00:00, 1.69it/s]
C3_W4_Lab_3_Fairness_Indicators.ipynb
###Markdown Ungraded Lab: Fairness IndicatorsIn this colab notebook, you will use [Fairness Indicators](https://www.tensorflow.org/tfx/guide/fairness_indicators) to explore the `Smiling` attribute in a large-scale face image dataset. Fairness Indicators is a suite of tools built on top of [TensorFlow Model Analysis](https://www.tensorflow.org/tfx/model_analysis/get_started) that enable regular evaluation of fairness metrics in product pipelines. This [Introductory Video](https://www.youtube.com/watch?v=pHT-ImFXPQo) provides more details and context on the real-world scenario presented here, one of primary motivations for creating Fairness Indicators. This notebook will teach you to:* Train a simple neural network model to detect a person's smile in images using [TF Keras](https://www.tensorflow.org/guide/keras) and the [CelebFaces Attributes (CelebA)](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html) dataset.* Evaluate model performance against a commonly used fairness metric across age groups using Fairness Indicators.*Credits: Some of the code and discussions are taken from this [Tensorflow tutorial](https://colab.research.google.com/github/tensorflow/fairness-indicators/blob/master/g3doc/tutorials/Fairness_Indicators_TFCO_CelebA_Case_Study.ipynb).* Install Fairness IndicatorsThis will install all related libraries such as TFMA and TFDV. ###Code !pip install fairness-indicators ###Output _____no_output_____ ###Markdown *Note: In Google Colab, you need to restart the runtime at this point to finalize updating the packages you just installed. You can do so by clicking the `Restart Runtime` at the end of the output cell above (after installation), or by selecting `Runtime > Restart Runtime` in the Menu bar. **Please do not proceed to the next section without restarting.** You can also ignore the errors about version incompatibility of some of the bundled packages because we won't be using those in this notebook.* Import packagesNext, you will import the main packages and some utilities you will need in this notebook. Notice that you are not importing `fairness-indicators` directly. As mentioned in the intro, this suite of tools is built on top of TFMA so you can just import TFMA to access it. ###Code import tensorflow as tf import tensorflow_model_analysis as tfma import tensorflow_datasets as tfds from tensorflow import keras ###Output _____no_output_____ ###Markdown The code below should not produce any error. Otherwise, please restart the installation. ###Code print("TensorFlow " + tf.__version__) print("TFMA " + tfma.VERSION_STRING) ###Output _____no_output_____ ###Markdown Download and prepare the dataset[CelebA](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html) is a large-scale face attributes dataset with more than 200,000 celebrity images, each with 40 attribute annotations (such as hair type, fashion accessories, facial features, etc.) and 5 landmark locations (eyes, mouth and nose positions). For more details, you can read more in [this paper](https://liuziwei7.github.io/projects/FaceAttributes.html).With the permission of the owners, this dataset is stored on Google Cloud Storage (GCS) and mostly accessed via [TensorFlow Datasets(`tfds`)](https://www.tensorflow.org/datasets). To save on download time and disk space, you will use the GCS bucket specified below as your data directory. This already contains the TFRecords. If you want to download it to your workspace, you can pass a local directory to the `data_dir` argument. Just take note that it will take time to complete the download. ###Code # URI of the dataset in Goocle Cloud Storage GCS_BASE_DIR = "gs://celeb_a_dataset/" # Load the data using TFDS data, data_info = tfds.load("celeb_a", data_dir=GCS_BASE_DIR, with_info=True, builder_kwargs={'version':'2.0.0'}) ###Output _____no_output_____ ###Markdown You can preview some of the images in the dataset. ###Code # Take 6 examples and preview images fig = tfds.show_examples(data['train'].take(6), data_info) ###Output _____no_output_____ ###Markdown You can also view the dataset as a dataframe to preview the other attributes in tabular format. ###Code # Take 4 examples as a dataframe df = tfds.as_dataframe(data['train'].take(4), data_info) # View the dataframe df.head() ###Output _____no_output_____ ###Markdown Let's list the column header so you can see the attribute names in the dataset. For this notebook, you will just examine the `attributes/Young` and `attributes/Smiling` features but feel free to pick other features once you've gone over the whole exercise. ###Code # List dataframe header df.columns ###Output _____no_output_____ ###Markdown In this notebook:* Your model will attempt to classify whether the subject of the image is smiling, as represented by the `Smiling` attribute*.* Images will be resized from 218x178 to 28x28 to reduce the execution time and memory when training.* Your model's performance will be evaluated across age groups, using the binary `Young` attribute. You will call this "age group" in this notebook.___* *While there is little information available about the labeling methodology for this dataset, you will assume that the "Smiling" attribute was determined by a pleased, kind, or amused expression on the subject's face. For the purpose of this example, you will take these labels as ground truth.* CaveatsBefore moving forward, there are several considerations to keep in mind when using CelebA:* Although, in principle, this notebook could use any dataset of face images, CelebA was chosen because it contains public domain images of public figures.* All of the attribute annotations in CelebA are operationalized as binary categories. For example, the `Young` attribute (as determined by the dataset labelers) is denoted as either present or absent in the image.* CelebA's categorizations do not reflect real human diversity of attributes.* For the purposes of this notebook, the feature containing the `Young` attribute is referred to as "age group". A `True` will put the image as a member of the `Young` age group and a `False` will put the image as a member of the `Not Young` age group. These are assumptions made as this information is not mentioned in the [original paper](http://openaccess.thecvf.com/content_iccv_2015/html/Liu_Deep_Learning_Face_ICCV_2015_paper.html).* As such, performance in the models trained in this notebook is tied to the ways the attributes have been operationalized and annotated by the authors of CelebA.* This model should not be used for commercial purposes as that would violate [CelebA's non-commercial research agreement](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html). Setting Up Input FunctionsNow, you will define the preprocessing functions to prepare your data as model inputs. These include resizing images, normalizing pixels, casting to the right data type, and grouping the features and labels. ###Code # Define Constants ATTR_KEY = "attributes" IMAGE_KEY = "image" LABEL_KEY = "Smiling" GROUP_KEY = "Young" IMAGE_SIZE = 28 # Define Preprocessing Function def preprocess_input_dict(feat_dict): ''' Picks the attributes to study and resizes the images Args: feat_dict (dictionary): features from the dataset Returns: dictionary containing the resized image, label, and age group ''' # Separate out the image and target variable from the feature dictionary. image = feat_dict[IMAGE_KEY] label = feat_dict[ATTR_KEY][LABEL_KEY] group = feat_dict[ATTR_KEY][GROUP_KEY] # Resize and normalize image. image = tf.cast(image, tf.float32) image = tf.image.resize(image, [IMAGE_SIZE, IMAGE_SIZE]) image /= 255.0 # Cast label and group to float32. label = tf.cast(label, tf.float32) group = tf.cast(group, tf.float32) # Put the computed values in a dictionary feat_dict[IMAGE_KEY] = image feat_dict[ATTR_KEY][LABEL_KEY] = label feat_dict[ATTR_KEY][GROUP_KEY] = group return feat_dict # Define lambda functions to group features and labels for training and evaluation get_image_and_label = lambda feat_dict: (feat_dict[IMAGE_KEY], feat_dict[ATTR_KEY][LABEL_KEY]) get_image_label_and_group = lambda feat_dict: (feat_dict[IMAGE_KEY], feat_dict[ATTR_KEY][LABEL_KEY], feat_dict[ATTR_KEY][GROUP_KEY]) ###Output _____no_output_____ ###Markdown Prepare train and test splitsThis next helper function will help split, shuffle, batch and preprocess your training data. For this notebook, you will just develop a model that accepts the image as input and outputs the `Smiling` attribute (i.e. label). ###Code def celeb_a_train_data_wo_group(data, batch_size): ''' Args: data (TF dataset) - dataset to preprocess batch_size (int) - batch size Returns: Batches of preprocessed datasets containing tuples with (image, label) ''' celeb_a_train_data = data.shuffle(1024).repeat().batch(batch_size).map(preprocess_input_dict) return celeb_a_train_data.map(get_image_and_label) ###Output _____no_output_____ ###Markdown The `test` split does not need to be shuffled so you can just preprocess it like below. ###Code # Prepare test data celeb_a_test_data = data['test'].batch(1).map(preprocess_input_dict).map(get_image_label_and_group) ###Output _____no_output_____ ###Markdown As a sanity check, you can examine the contents of a one example in the test data. You should see that it is successfully reshaped and the pixels should be normalized. ###Code # Print info about the test data records for sample in celeb_a_test_data.take(1): print(f'Data type: {type(sample)}') print(f'Number of elements: {len(sample)}') print(f'Shape of 1st element: {sample[0].shape}') print(f'Shape of 2nd element: {sample[1].shape}') print(f'Shape of 3rd element: {sample[2].shape}') print(f'Contents: \n{sample}') ###Output _____no_output_____ ###Markdown Build a simple DNN ModelWith the dataset prepared, you will now assemble a simple `tf.keras.Sequential` model to classify your images. The model consists of:1. An input layer that represents the flattened 28x28x3 image.2. A fully connected layer with 64 units activated by a ReLU function.3. A single-unit readout layer to output real-scores instead of probabilities.You may be able to greatly improve model performance by adding some complexity (e.g., more densely-connected layers, exploring different activation functions, increasing image size), but that may distract from the goal of demonstrating how easy it is to apply the indicators when working with Keras. For that reason, you will first keep the model simple — but feel free to explore this space later. ###Code def create_model(): '''Builds the simple DNN binary classifier''' # Build the model using the Sequential API model = keras.Sequential([ keras.layers.Flatten(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='image'), keras.layers.Dense(64, activation='relu'), keras.layers.Dense(1, activation=None) ]) # Compile the model with hinge loss and binary accuracy metric model.compile( optimizer=tf.keras.optimizers.Adam(0.001), loss='hinge', metrics='binary_accuracy') return model ###Output _____no_output_____ ###Markdown Train & Evaluate ModelYou’re now ready to train your model. To cut back on the amount of execution time and memory, you will train the model by slicing the data into small batches with only a few repeated iterations. ###Code BATCH_SIZE = 32 # Build the model model = create_model() # Train the model model.fit(celeb_a_train_data_wo_group(data['train'], BATCH_SIZE), epochs=5, steps_per_epoch=1000) ###Output _____no_output_____ ###Markdown Evaluating the model on the test data should result in a final accuracy score of just over 85%. Not bad for a simple model with no fine tuning. ###Code # Evaluate trained model on the test data results = model.evaluate(celeb_a_test_data) ###Output _____no_output_____ ###Markdown You will then save the model so you can analyze it in the next section. ###Code # Define model directory MODEL_LOCATION = 'saved_model' # Save the model model.save(MODEL_LOCATION, save_format='tf') ###Output _____no_output_____ ###Markdown Model AnalysisAs you already know, it is usually not enough to just measure your model's performance on global metrics. For instance, performance evaluated across age groups may reveal some shortcomings.To explore this further, you will evaluate the model with Fairness Indicators via TFMA. In particular, you will see whether there is a significant gap in performance between "Young" and "Not Young" categories when evaluated on false positive rate (FPR).A false positive error occurs when the model incorrectly predicts the positive class. In this context, a false positive outcome occurs when the ground truth is an image of a celebrity 'Not Smiling' and the model predicts 'Smiling'. While this seems like a relatively mundane error, false positive errors can sometimes cause more problematic behaviors when deployed in a real world application. For instance, a false positive error in a spam classifier could cause a user to miss an important email.You will mostly follow the same steps as you did in the first ungraded lab of this week. Namely, you will:* Create a TFRecord of the test dataset.* Write an `EvalConfig` file* Create an `EvalSharedModel`* Define a `Schema` message* Run model analysis with TFMA Create TFRecordYou will need to serialize the preprocessed test dataset so it can be read by TFMA. We've provided a helper function to do just that. Notice that the age group feature is transformed into a string ('Young' or 'Not Young'). This will come in handy in the visualization so the tags are easier to interpret (compared to just 1 or 0). ###Code # Define filename TFRECORD_FILE = 'celeb_a_test.tfrecord' def celeb_ds_to_tfrecord(dataset, tfrecord_file): ''' Helper function to convert a TF Dataset to TFRecord Args: dataset (TF Dataset) - dataset to save as TFRecord tfrecord_file (string) - filename to use when writing the TFRecord ''' # Initialize examples list examples = [] for row in dataset: # Get image, label, and group tensors image = row[0] label = row[1] group = row[2] # Flatten image image = tf.reshape(image, [-1]) # Instantiate Example output = tf.train.Example() # Assign features' numpy arrays to the Example feature values output.features.feature[IMAGE_KEY].float_list.value.extend(image.numpy().tolist()) output.features.feature[LABEL_KEY].float_list.value.append(label.numpy()) output.features.feature[GROUP_KEY].bytes_list.value.append(b"Young" if group.numpy() else b'Not Young') # Append to examples list examples.append(output) # Serialize examples and save as tfrecord with tf.io.TFRecordWriter(tfrecord_file) as writer: for example in examples: writer.write(example.SerializeToString()) # Use the helper function to serialize the test dataset celeb_ds_to_tfrecord(celeb_a_test_data, TFRECORD_FILE) ###Output _____no_output_____ ###Markdown Write EvalConfig fileNext, you will define the model, metrics, and slicing specs in an eval config file. As mentioned, you will slice the data across age groups to see if there is an underlying problem. For metrics, you will include the `FairnessIndicators` class. These are commonly-identified fairness metrics for binary and multiclass classifiers. Moreover, you will configure a list of thresholds. These will allow you to observe if the model predicts better when the threshold to determine between the two classes is changed (e.g. will the FPR be lower if the model predicts "Smiling" for outputs greater than 0.22?). ###Code # Import helper module from google.protobuf import text_format # Write EvalConfig string eval_config_pbtxt = """ model_specs { label_key: "%s" } metrics_specs { metrics { class_name: "FairnessIndicators" config: '{ "thresholds": [0.22, 0.5, 0.75] }' } metrics { class_name: "ExampleCount" } } slicing_specs {} slicing_specs { feature_keys: "%s" } """ % (LABEL_KEY, GROUP_KEY) # Parse as a Message eval_config = text_format.Parse(eval_config_pbtxt, tfma.EvalConfig()) ###Output _____no_output_____ ###Markdown Create EvalSharedModelThis will be identical to the command you ran in an earlier lab. This is needed so TFMA will know how to load and configure your model from disk. ###Code # Create EvalSharedModel eval_shared_model = tfma.default_eval_shared_model( eval_saved_model_path=MODEL_LOCATION, eval_config=eval_config) ###Output _____no_output_____ ###Markdown Create a SchemaThis is an additional step from your previous TFMA workflow. It is needed particularly because, unlike the TFMA ungraded lab, you didn't include a serving signature with the model. If you remember, the function called by that signature took care of parsing the tfrecords, converting them to the correct data type, and preprocessing. Since that part is not included in this lab, you will need to provide a schema so TFMA will know what data types are in the serialized examples when it parses the tfrecord into a dictionary of features. You will also need to define the dimensions of the image since that is expected by your model input. That is handled by the `tensor_representation_group` below. ###Code from tensorflow_metadata.proto.v0 import schema_pb2 from google.protobuf import text_format # Define Schema message as string schema_pbtxt = """ tensor_representation_group { key: "" value { tensor_representation { key: "%s" value { dense_tensor { column_name: "%s" shape { dim { size: 28 } dim { size: 28 } dim { size: 3 } } } } } } } feature { name: "%s" type: FLOAT } feature { name: "%s" type: FLOAT } feature { name: "%s" type: BYTES } """ % (IMAGE_KEY, IMAGE_KEY, IMAGE_KEY, LABEL_KEY, GROUP_KEY) # Parse the schema string to a message schema = text_format.Parse(schema_pbtxt, schema_pb2.Schema()) ###Output _____no_output_____ ###Markdown Run TFMAYou will pass the objects you created in the previous sections to `tfma.run_model_analysis()`. As you've done previously, this will take care of loading the model and data, and computing the metrics on the data slices you specified. ###Code # Define output directory OUTPUT_PATH = 'tfma_output' # Run model analysis eval_results = tfma.run_model_analysis( eval_shared_model=eval_shared_model, eval_config=eval_config, data_location=TFRECORD_FILE, schema=schema, output_path=OUTPUT_PATH ) ###Output _____no_output_____ ###Markdown Now you can view the fairness metrics you specified. The FPR should already be selected and you can see that it is considerably higher for the `Not Young` age group. Try to explore the widget and see if you can make other findings. Here are some suggestions:* Toggle the threshold drop down and see how the FPR changes with different thresholds* Change the baseline to `Young: Young` so the percentage difference (in the table below the chart) will be measured against it.* Deselect the `Overall` slice so you can compare the two age groups side by side.* Select other metrics to display and observe their charts. ###Code # Visualize the fairness metrics tfma.addons.fairness.view.widget_view.render_fairness_indicator(eval_results) ###Output _____no_output_____ ###Markdown After studying the discrepancies in your predictions, you can then investigate why that happens and have a plan on remidiating it. Aside from changing your model architecture, you can also look first at your training data. `fairness-indicators` is also packaged with TFDV so you can use it to generate statistics from your data. Here is a short review on how to do that. ###Code import tensorflow_data_validation as tfdv # Define training directory TRAIN_DIR = f'{GCS_BASE_DIR}celeb_a/2.0.0/celeb_a-train.tfrecord*' # View tfrecord filenames in GCS !gsutil ls {TRAIN_DIR} # Filter features to observe stats_options = tfdv.StatsOptions(feature_allowlist=['attributes/Young']) # Compute the statistics statistics = tfdv.generate_statistics_from_tfrecord(TRAIN_DIR, stats_options=stats_options) # Visualize the statistics tfdv.visualize_statistics(statistics) ###Output _____no_output_____ ###Markdown Ungraded Lab: Fairness IndicatorsIn this colab notebook, you will use [Fairness Indicators](https://www.tensorflow.org/tfx/guide/fairness_indicators) to explore the `Smiling` attribute in a large-scale face image dataset. Fairness Indicators is a suite of tools built on top of [TensorFlow Model Analysis](https://www.tensorflow.org/tfx/model_analysis/get_started) that enable regular evaluation of fairness metrics in product pipelines. This [Introductory Video](https://www.youtube.com/watch?v=pHT-ImFXPQo) provides more details and context on the real-world scenario presented here, one of primary motivations for creating Fairness Indicators. This notebook will teach you to:* Train a simple neural network model to detect a person's smile in images using [TF Keras](https://www.tensorflow.org/guide/keras) and the [CelebFaces Attributes (CelebA)](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html) dataset.* Evaluate model performance against a commonly used fairness metric across age groups using Fairness Indicators.*Credits: Some of the code and discussions are taken from this [Tensorflow tutorial](https://colab.research.google.com/github/tensorflow/fairness-indicators/blob/master/g3doc/tutorials/Fairness_Indicators_TFCO_CelebA_Case_Study.ipynb).* Install Fairness IndicatorsThis will install all related libraries such as TFMA and TFDV. ###Code !pip install fairness-indicators ###Output _____no_output_____ ###Markdown *Note: In Google Colab, you need to restart the runtime at this point to finalize updating the packages you just installed. You can do so by clicking the `Restart Runtime` at the end of the output cell above (after installation), or by selecting `Runtime > Restart Runtime` in the Menu bar. **Please do not proceed to the next section without restarting.** You can also ignore the errors about version incompatibility of some of the bundled packages because we won't be using those in this notebook.* Import packagesNext, you will import the main packages and some utilities you will need in this notebook. Notice that you are not importing `fairness-indicators` directly. As mentioned in the intro, this suite of tools is built on top of TFMA so you can just import TFMA to access it. ###Code import tensorflow as tf import tensorflow_model_analysis as tfma import tensorflow_datasets as tfds from tensorflow import keras ###Output _____no_output_____ ###Markdown The code below should not produce any error. Otherwise, please restart the installation. ###Code print("TensorFlow " + tf.__version__) print("TFMA " + tfma.VERSION_STRING) ###Output _____no_output_____ ###Markdown Download and prepare the dataset[CelebA](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html) is a large-scale face attributes dataset with more than 200,000 celebrity images, each with 40 attribute annotations (such as hair type, fashion accessories, facial features, etc.) and 5 landmark locations (eyes, mouth and nose positions). For more details, you can read more in [this paper](https://liuziwei7.github.io/projects/FaceAttributes.html).With the permission of the owners, this dataset is stored on Google Cloud Storage (GCS) and mostly accessed via [TensorFlow Datasets(`tfds`)](https://www.tensorflow.org/datasets). To save on download time and disk space, you will use the GCS bucket specified below as your data directory. This already contains the TFRecords. If you want to download it to your workspace, you can pass a local directory to the `data_dir` argument. Just take note that it will take time to complete the download. ###Code # URI of the dataset in Goocle Cloud Storage GCS_BASE_DIR = "gs://celeb_a_dataset/" # Load the data using TFDS data, data_info = tfds.load("celeb_a", data_dir=GCS_BASE_DIR, with_info=True, builder_kwargs={'version':'2.0.0'}) ###Output _____no_output_____ ###Markdown You can preview some of the images in the dataset. ###Code # Take 6 examples and preview images fig = tfds.show_examples(data['train'].take(6), data_info) ###Output _____no_output_____ ###Markdown You can also view the dataset as a dataframe to preview the other attributes in tabular format. ###Code # Take 4 examples as a dataframe df = tfds.as_dataframe(data['train'].take(4), data_info) # View the dataframe df.head() ###Output _____no_output_____ ###Markdown Let's list the column header so you can see the attribute names in the dataset. For this notebook, you will just examine the `attributes/Young` and `attributes/Smiling` features but feel free to pick other features once you've gone over the whole exercise. ###Code # List dataframe header df.columns ###Output _____no_output_____ ###Markdown In this notebook:* Your model will attempt to classify whether the subject of the image is smiling, as represented by the `Smiling` attribute*.* Images will be resized from 218x178 to 28x28 to reduce the execution time and memory when training.* Your model's performance will be evaluated across age groups, using the binary `Young` attribute. You will call this "age group" in this notebook.___* *While there is little information available about the labeling methodology for this dataset, you will assume that the "Smiling" attribute was determined by a pleased, kind, or amused expression on the subject's face. For the purpose of this example, you will take these labels as ground truth.* CaveatsBefore moving forward, there are several considerations to keep in mind when using CelebA:* Although, in principle, this notebook could use any dataset of face images, CelebA was chosen because it contains public domain images of public figures.* All of the attribute annotations in CelebA are operationalized as binary categories. For example, the `Young` attribute (as determined by the dataset labelers) is denoted as either present or absent in the image.* CelebA's categorizations do not reflect real human diversity of attributes.* For the purposes of this notebook, the feature containing the `Young` attribute is referred to as "age group". A `True` will put the image as a member of the `Young` age group and a `False` will put the image as a member of the `Not Young` age group. These are assumptions made as this information is not mentioned in the [original paper](http://openaccess.thecvf.com/content_iccv_2015/html/Liu_Deep_Learning_Face_ICCV_2015_paper.html).* As such, performance in the models trained in this notebook is tied to the ways the attributes have been operationalized and annotated by the authors of CelebA.* This model should not be used for commercial purposes as that would violate [CelebA's non-commercial research agreement](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html). Setting Up Input FunctionsNow, you will define the preprocessing functions to prepare your data as model inputs. These include resizing images, normalizing pixels, casting to the right data type, and grouping the features and labels. ###Code # Define Constants ATTR_KEY = "attributes" IMAGE_KEY = "image" LABEL_KEY = "Smiling" GROUP_KEY = "Young" IMAGE_SIZE = 28 # Define Preprocessing Function def preprocess_input_dict(feat_dict): ''' Picks the attributes to study and resizes the images Args: feat_dict (dictionary): features from the dataset Returns: dictionary containing the resized image, label, and age group ''' # Separate out the image and target variable from the feature dictionary. image = feat_dict[IMAGE_KEY] label = feat_dict[ATTR_KEY][LABEL_KEY] group = feat_dict[ATTR_KEY][GROUP_KEY] # Resize and normalize image. image = tf.cast(image, tf.float32) image = tf.image.resize(image, [IMAGE_SIZE, IMAGE_SIZE]) image /= 255.0 # Cast label and group to float32. label = tf.cast(label, tf.float32) group = tf.cast(group, tf.float32) # Put the computed values in a dictionary feat_dict[IMAGE_KEY] = image feat_dict[ATTR_KEY][LABEL_KEY] = label feat_dict[ATTR_KEY][GROUP_KEY] = group return feat_dict # Define lambda functions to group features and labels for training and evaluation get_image_and_label = lambda feat_dict: (feat_dict[IMAGE_KEY], feat_dict[ATTR_KEY][LABEL_KEY]) get_image_label_and_group = lambda feat_dict: (feat_dict[IMAGE_KEY], feat_dict[ATTR_KEY][LABEL_KEY], feat_dict[ATTR_KEY][GROUP_KEY]) ###Output _____no_output_____ ###Markdown Prepare train and test splitsThis next helper function will help split, shuffle, batch and preprocess your training data. For this notebook, you will just develop a model that accepts the image as input and outputs the `Smiling` attribute (i.e. label). ###Code def celeb_a_train_data_wo_group(data, batch_size): ''' Args: data (TF dataset) - dataset to preprocess batch_size (int) - batch size Returns: Batches of preprocessed datasets containing tuples with (image, label) ''' celeb_a_train_data = data.shuffle(1024).repeat().batch(batch_size).map(preprocess_input_dict) return celeb_a_train_data.map(get_image_and_label) ###Output _____no_output_____ ###Markdown The `test` split does not need to be shuffled so you can just preprocess it like below. ###Code # Prepare test data celeb_a_test_data = data['test'].batch(1).map(preprocess_input_dict).map(get_image_label_and_group) ###Output _____no_output_____ ###Markdown As a sanity check, you can examine the contents of a one example in the test data. You should see that it is successfully reshaped and the pixels should be normalized. ###Code # Print info about the test data records for sample in celeb_a_test_data.take(1): print(f'Data type: {type(sample)}') print(f'Number of elements: {len(sample)}') print(f'Shape of 1st element: {sample[0].shape}') print(f'Shape of 2nd element: {sample[1].shape}') print(f'Shape of 3rd element: {sample[2].shape}') print(f'Contents: \n{sample}') ###Output _____no_output_____ ###Markdown Build a simple DNN ModelWith the dataset prepared, you will now assemble a simple `tf.keras.Sequential` model to classify your images. The model consists of:1. An input layer that represents the flattened 28x28x3 image.2. A fully connected layer with 64 units activated by a ReLU function.3. A single-unit readout layer to output real-scores instead of probabilities.You may be able to greatly improve model performance by adding some complexity (e.g., more densely-connected layers, exploring different activation functions, increasing image size), but that may distract from the goal of demonstrating how easy it is to apply the indicators when working with Keras. For that reason, you will first keep the model simple — but feel free to explore this space later. ###Code def create_model(): '''Builds the simple DNN binary classifier''' # Build the model using the Sequential API model = keras.Sequential([ keras.layers.Flatten(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='image'), keras.layers.Dense(64, activation='relu'), keras.layers.Dense(1, activation=None) ]) # Compile the model with hinge loss and binary accuracy metric model.compile( optimizer=tf.keras.optimizers.Adam(0.001), loss='hinge', metrics='binary_accuracy') return model ###Output _____no_output_____ ###Markdown Train & Evaluate ModelYou’re now ready to train your model. To cut back on the amount of execution time and memory, you will train the model by slicing the data into small batches with only a few repeated iterations. ###Code BATCH_SIZE = 32 # Build the model model = create_model() # Train the model model.fit(celeb_a_train_data_wo_group(data['train'], BATCH_SIZE), epochs=5, steps_per_epoch=1000) ###Output _____no_output_____ ###Markdown Evaluating the model on the test data should result in a final accuracy score of just over 85%. Not bad for a simple model with no fine tuning. ###Code # Evaluate trained model on the test data results = model.evaluate(celeb_a_test_data) ###Output _____no_output_____ ###Markdown You will then save the model so you can analyze it in the next section. ###Code # Define model directory MODEL_LOCATION = 'saved_model' # Save the model model.save(MODEL_LOCATION, save_format='tf') ###Output _____no_output_____ ###Markdown Model AnalysisAs you already know, it is usually not enough to just measure your model's performance on global metrics. For instance, performance evaluated across age groups may reveal some shortcomings.To explore this further, you will evaluate the model with Fairness Indicators via TFMA. In particular, you will see whether there is a significant gap in performance between "Young" and "Not Young" categories when evaluated on false positive rate (FPR).A false positive error occurs when the model incorrectly predicts the positive class. In this context, a false positive outcome occurs when the ground truth is an image of a celebrity 'Not Smiling' and the model predicts 'Smiling'. While this seems like a relatively mundane error, false positive errors can sometimes cause more problematic behaviors when deployed in a real world application. For instance, a false positive error in a spam classifier could cause a user to miss an important email.You will mostly follow the same steps as you did in the first ungraded lab of this week. Namely, you will:* Create a TFRecord of the test dataset.* Write an `EvalConfig` file* Create an `EvalSharedModel`* Define a `Schema` message* Run model analysis with TFMA Create TFRecordYou will need to serialize the preprocessed test dataset so it can be read by TFMA. We've provided a helper function to do just that. Notice that the age group feature is transformed into a string ('Young' or 'Not Young'). This will come in handy in the visualization so the tags are easier to interpret (compared to just 1 or 0). ###Code # Define filename TFRECORD_FILE = 'celeb_a_test.tfrecord' def celeb_ds_to_tfrecord(dataset, tfrecord_file): ''' Helper function to convert a TF Dataset to TFRecord Args: dataset (TF Dataset) - dataset to save as TFRecord tfrecord_file (string) - filename to use when writing the TFRecord ''' # Initialize examples list examples = [] for row in dataset: # Get image, label, and group tensors image = row[0] label = row[1] group = row[2] # Flatten image image = tf.reshape(image, [-1]) # Instantiate Example output = tf.train.Example() # Assign features' numpy arrays to the Example feature values output.features.feature[IMAGE_KEY].float_list.value.extend(image.numpy().tolist()) output.features.feature[LABEL_KEY].float_list.value.append(label.numpy()) output.features.feature[GROUP_KEY].bytes_list.value.append(b"Young" if group.numpy() else b'Not Young') # Append to examples list examples.append(output) # Serialize examples and save as tfrecord with tf.io.TFRecordWriter(tfrecord_file) as writer: for example in examples: writer.write(example.SerializeToString()) # Use the helper function to serialize the test dataset celeb_ds_to_tfrecord(celeb_a_test_data, TFRECORD_FILE) ###Output _____no_output_____ ###Markdown Write EvalConfig fileNext, you will define the model, metrics, and slicing specs in an eval config file. As mentioned, you will slice the data across age groups to see if there is an underlying problem. For metrics, you will include the `FairnessIndicators` class. These are commonly-identified fairness metrics for binary and multiclass classifiers. Moreover, you will configure a list of thresholds. These will allow you to observe if the model predicts better when the threshold to determine between the two classes is changed (e.g. will the FPR be lower if the model predicts "Smiling" for outputs greater than 0.22?). ###Code # Import helper module from google.protobuf import text_format # Write EvalConfig string eval_config_pbtxt = """ model_specs { label_key: "%s" } metrics_specs { metrics { class_name: "FairnessIndicators" config: '{ "thresholds": [0.22, 0.5, 0.75] }' } metrics { class_name: "ExampleCount" } } slicing_specs {} slicing_specs { feature_keys: "%s" } """ % (LABEL_KEY, GROUP_KEY) # Parse as a Message eval_config = text_format.Parse(eval_config_pbtxt, tfma.EvalConfig()) ###Output _____no_output_____ ###Markdown Create EvalSharedModelThis will be identical to the command you ran in an earlier lab. This is needed so TFMA will know how to load and configure your model from disk. ###Code # Create EvalSharedModel eval_shared_model = tfma.default_eval_shared_model( eval_saved_model_path=MODEL_LOCATION, eval_config=eval_config) ###Output _____no_output_____ ###Markdown Create a SchemaThis is an additional step from your previous TFMA workflow. It is needed particularly because, unlike the TFMA ungraded lab, you didn't include a serving signature with the model. If you remember, the function called by that signature took care of parsing the tfrecords, converting them to the correct data type, and preprocessing. Since that part is not included in this lab, you will need to provide a schema so TFMA will know what data types are in the serialized examples when it parses the tfrecord into a dictionary of features. You will also need to define the dimensions of the image since that is expected by your model input. That is handled by the `tensor_representation_group` below. ###Code from tensorflow_metadata.proto.v0 import schema_pb2 from google.protobuf import text_format # Define Schema message as string schema_pbtxt = """ tensor_representation_group { key: "" value { tensor_representation { key: "%s" value { dense_tensor { column_name: "%s" shape { dim { size: 28 } dim { size: 28 } dim { size: 3 } } } } } } } feature { name: "%s" type: FLOAT } feature { name: "%s" type: FLOAT } feature { name: "%s" type: BYTES } """ % (IMAGE_KEY, IMAGE_KEY, IMAGE_KEY, LABEL_KEY, GROUP_KEY) # Parse the schema string to a message schema = text_format.Parse(schema_pbtxt, schema_pb2.Schema()) ###Output _____no_output_____ ###Markdown Run TFMAYou will pass the objects you created in the previous sections to `tfma.run_model_analysis()`. As you've done previously, this will take care of loading the model and data, and computing the metrics on the data slices you specified. ###Code # Define output directory OUTPUT_PATH = 'tfma_output' # Run model analysis eval_results = tfma.run_model_analysis( eval_shared_model=eval_shared_model, eval_config=eval_config, data_location=TFRECORD_FILE, schema=schema, output_path=OUTPUT_PATH ) ###Output _____no_output_____ ###Markdown Now you can view the fairness metrics you specified. The FPR should already be selected and you can see that it is considerably higher for the `Not Young` age group. Try to explore the widget and see if you can make other findings. Here are some suggestions:* Toggle the threshold drop down and see how the FPR changes with different thresholds* Change the baseline to `Young: Young` so the percentage difference (in the table below the chart) will be measured against it.* Deselect the `Overall` slice so you can compare the two age groups side by side.* Select other metrics to display and observe their charts. ###Code # Visualize the fairness metrics tfma.addons.fairness.view.widget_view.render_fairness_indicator(eval_results) ###Output _____no_output_____ ###Markdown After studying the discrepancies in your predictions, you can then investigate why that happens and have a plan on remidiating it. Aside from changing your model architecture, you can also look first at your training data. `fairness-indicators` is also packaged with TFDV so you can use it to generate statistics from your data. Here is a short review on how to do that. ###Code import tensorflow_data_validation as tfdv # Define training directory TRAIN_DIR = f'{GCS_BASE_DIR}celeb_a/2.0.0/celeb_a-train.tfrecord*' # View tfrecord filenames in GCS !gsutil ls {TRAIN_DIR} # Filter features to observe stats_options = tfdv.StatsOptions(feature_allowlist=['attributes/Young']) # Compute the statistics statistics = tfdv.generate_statistics_from_tfrecord(TRAIN_DIR, stats_options=stats_options) # Visualize the statistics tfdv.visualize_statistics(statistics) ###Output _____no_output_____
.ipynb_checkpoints/SVM_FireSegmentation-checkpoint.ipynb
###Markdown **APLICANDO EM UM CONJUNTO DE IMAGENS** **Lendo as imagens e extraindo os recursos** ###Code image_dataset = pd.DataFrame() img_path = '/content/drive/MyDrive/VisãoComput/ReducedBowFire/images/' for image in os.listdir(img_path): #print(image) df = pd.DataFrame() input_img = cv2.imread(img_path + image) if input_img.ndim == 3 and input_img.shape[-1] == 3: img = cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY) elif input_img.ndim == 2: img = input_img else: raise Exception('Funciona apenas com imagens RGB ou em escala de cinza') # Adicionando as imagens no dataframe pixel_values = img.reshape(-1) df['Pixel_Value'] = pixel_values # os valores dos pixels como label df['Image_Name'] = image # Capturando os nomes das imagens # Gerando os recursos do filtro de Gabor num = 1 kernels = [] for theta in range(2): theta = theta/4. * np.pi for sigma in (1,3): for lamda in np.arange(0, np.pi, np.pi/4): for gamma in (0.05, 0.5): gabor_label = 'Gabor' + str(num) ksize = 9 kernel = cv2.getGaborKernel((ksize,ksize), sigma, lamda, gamma, 0, ktype = cv2.CV_32F) kernels.append(kernel) # Filtrando as imagens e addcionando os valores em uma nova coluna do DF fimg = cv2.filter2D(img, cv2.CV_8UC3, kernel) filtered_img = fimg.reshape(-1) df[gabor_label] = filtered_img num += 1 # Gerando outros recursos e adicionando #Feature 3 is canny edge edges = cv2.Canny(img, 100,200) #Image, min and max values edges1 = edges.reshape(-1) df['Canny Edge'] = edges1 #Add column to original dataframe from skimage.filters import roberts, sobel, scharr, prewitt #Feature 4 is Roberts edge edge_roberts = roberts(img) edge_roberts1 = edge_roberts.reshape(-1) df['Roberts'] = edge_roberts1 #Feature 5 is Sobel edge_sobel = sobel(img) edge_sobel1 = edge_sobel.reshape(-1) df['Sobel'] = edge_sobel1 #Feature 6 is Scharr edge_scharr = scharr(img) edge_scharr1 = edge_scharr.reshape(-1) df['Scharr'] = edge_scharr1 #Feature 7 is Prewitt edge_prewitt = prewitt(img) edge_prewitt1 = edge_prewitt.reshape(-1) df['Prewitt'] = edge_prewitt1 #Feature 8 is Gaussian with sigma=3 from scipy import ndimage as nd gaussian_img = nd.gaussian_filter(img, sigma=3) gaussian_img1 = gaussian_img.reshape(-1) df['Gaussian s3'] = gaussian_img1 #Feature 9 is Gaussian with sigma=7 gaussian_img2 = nd.gaussian_filter(img, sigma=7) gaussian_img3 = gaussian_img2.reshape(-1) df['Gaussian s7'] = gaussian_img3 #Feature 10 is Median with sigma=3 median_img = nd.median_filter(img, size=3) median_img1 = median_img.reshape(-1) df['Median s3'] = median_img1 #Feature 11 is Variance with size=3 variance_img = nd.generic_filter(img, np.var, size=3) variance_img1 = variance_img.reshape(-1) df['Variance s3'] = variance_img1 #Add column to original dataframe image_dataset = image_dataset.append(df) ###Output _____no_output_____ ###Markdown **Lendo as máscaras e criando outro dataframe com os valores das labels e os nomes dos arquivos** ###Code mask_dataset = pd.DataFrame() mask_path = '/content/drive/MyDrive/VisãoComput/ReducedBowFire/masks/' for mask in os.listdir(mask_path): #print(mask) df2 = pd.DataFrame() input_mask = cv2.imread(mask_path + mask) if input_mask.ndim == 3 and input_mask.shape[-1] == 3: label = cv2.cvtColor(input_mask, cv2.COLOR_BGR2GRAY) elif input_mask.ndim == 2: label = input_mask else: raise Exception('Funciona apenas com imagens RGB ou em escala de cinza') label_values = label.reshape(-1) df2['Label_Value'] = label_values df2['Mask_Name'] = mask mask_dataset = mask_dataset.append(df2) ###Output _____no_output_____ ###Markdown **Combinando os dataframes de imagens e máscaras e preparando os dados para o SVM** ###Code image_dataset.reset_index(drop=True, inplace=True) mask_dataset.reset_index(drop=True, inplace=True) dataset = pd.concat([image_dataset, mask_dataset], axis = 1) # dataset = dataset[dataset.Label_Value != 0] # dataset X = dataset.drop(labels = ['Image_Name', 'Mask_Name', 'Label_Value'], axis = 1) Y = dataset['Label_Value'].values from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.4, random_state=20) #X_train = X_train.fillna(X_train.mean()) X_train X_test model = LinearSVC(max_iter=2000) model.fit(X_train,Y_train) prediction_test_train = model.predict(X_train) prediction_test = model.predict(X_test) from sklearn import metrics print ("Acurácia de Treino = ", metrics.accuracy_score(Y_train, prediction_test_train)) print("Acurácia de Teste = ", metrics.accuracy_score(Y_test, prediction_test)) print("\n") print ("IoU de Treino = ", metrics.jaccard_score(Y_train, prediction_test_train,pos_label=0)) print("IoU de Teste = ", metrics.jaccard_score(Y_test, prediction_test, pos_label=0)) print("\n") print ("Dice Coef de Treino = ", metrics.f1_score(Y_train, prediction_test_train,pos_label=0)) print("Dice Coef de Teste = ", metrics.f1_score(Y_test, prediction_test, pos_label=0)) model_name = 'fireSEG_model' pickle.dump(model, open(model_name, 'wb')) ###Output _____no_output_____ ###Markdown **Predizendo com novas imagens** ###Code def feature_extractor(img): df = pd.DataFrame() img2 = img.reshape(-1) df['Original_Image'] = img2 num = 1 kernels = [] for theta in range(2): theta = theta/4. * np.pi for sigma in (1,3): for lamda in np.arange(0, np.pi, np.pi/4): for gamma in (0.05, 0.5): gabor_label = 'Gabor' + str(num) ksize = 9 kernel = cv2.getGaborKernel((ksize,ksize), sigma, lamda, gamma, 0, ktype = cv2.CV_32F) kernels.append(kernel) # Filtrando as imagens e addcionando os valores em uma nova coluna do DF fimg = cv2.filter2D(img, cv2.CV_8UC3, kernel) filtered_img = fimg.reshape(-1) df[gabor_label] = filtered_img num += 1 # Gerando outros recursos e adicionando #Feature 3 is canny edge edges = cv2.Canny(img, 100,200) #Image, min and max values edges1 = edges.reshape(-1) df['Canny Edge'] = edges1 #Add column to original dataframe from skimage.filters import roberts, sobel, scharr, prewitt #Feature 4 is Roberts edge edge_roberts = roberts(img) edge_roberts1 = edge_roberts.reshape(-1) df['Roberts'] = edge_roberts1 #Feature 5 is Sobel edge_sobel = sobel(img) edge_sobel1 = edge_sobel.reshape(-1) df['Sobel'] = edge_sobel1 #Feature 6 is Scharr edge_scharr = scharr(img) edge_scharr1 = edge_scharr.reshape(-1) df['Scharr'] = edge_scharr1 #Feature 7 is Prewitt edge_prewitt = prewitt(img) edge_prewitt1 = edge_prewitt.reshape(-1) df['Prewitt'] = edge_prewitt1 #Feature 8 is Gaussian with sigma=3 from scipy import ndimage as nd gaussian_img = nd.gaussian_filter(img, sigma=3) gaussian_img1 = gaussian_img.reshape(-1) df['Gaussian s3'] = gaussian_img1 #Feature 9 is Gaussian with sigma=7 gaussian_img2 = nd.gaussian_filter(img, sigma=7) gaussian_img3 = gaussian_img2.reshape(-1) df['Gaussian s7'] = gaussian_img3 #Feature 10 is Median with sigma=3 median_img = nd.median_filter(img, size=3) median_img1 = median_img.reshape(-1) df['Median s3'] = median_img1 #Feature 11 is Variance with size=3 variance_img = nd.generic_filter(img, np.var, size=3) variance_img1 = variance_img.reshape(-1) df['Variance s3'] = variance_img1 #Add column to original dataframe return df import pickle from matplotlib import pyplot as plt filename = '/content/fireSEG_model' loaded_model = pickle.load(open(filename, 'rb')) path = '/content/drive/MyDrive/VisãoComput/ReducedBowFire/test_images/' import os for image in os.listdir(path): print(image) img1 = cv2.imread(path+image) img = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) x = feature_extractor(img) result = loaded_model.predict(x) segmented = result.reshape((img.shape)) segmented plt.imshow(segmented) ###Output _____no_output_____ ###Markdown **APLICANDO EM APENAS UMA IMAGEM** ###Code img = cv2.imread('/content/drive/MyDrive/VisãoComput/BowFire/images/fire077.png') img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) plt.imshow(img) df = pd.DataFrame() img2 = img.reshape(-1) df['Imagem Original'] = img2 num = 1 kernels = [] for theta in range(2): theta = theta/4. * np.pi for sigma in (1,3): for lamda in np.arange(0,np.pi,np.pi/4): for gamma in (0.05, 0.5): gabor_label = 'Gabor' + str(num) ksize = 5 kernel = cv2.getGaborKernel((ksize, ksize), sigma, theta, lamda, gamma, 0, ktype=cv2.CV_32F) kernels.append(kernel) #Now filter the image and add values to a new column fimg = cv2.filter2D(img2, cv2.CV_8UC3, kernel) filtered_img = fimg.reshape(-1) df[gabor_label] = filtered_img #Labels columns as Gabor1, Gabor2, etc. print(gabor_label, ': theta=', theta, ': sigma=', sigma, ': lamda=', lamda, ': gamma=', gamma) num += 1 #Increment for gabor column label ######################################## #Gerate OTHER FEATURES and add them to the data frame #CANNY EDGE edges = cv2.Canny(img, 100,200) #Image, min and max values edges1 = edges.reshape(-1) df['Canny Edge'] = edges1 #Add column to original dataframe from skimage.filters import roberts, sobel, scharr, prewitt #ROBERTS EDGE edge_roberts = roberts(img) edge_roberts1 = edge_roberts.reshape(-1) df['Roberts'] = edge_roberts1 #SOBEL edge_sobel = sobel(img) edge_sobel1 = edge_sobel.reshape(-1) df['Sobel'] = edge_sobel1 #SCHARR edge_scharr = scharr(img) edge_scharr1 = edge_scharr.reshape(-1) df['Scharr'] = edge_scharr1 #PREWITT edge_prewitt = prewitt(img) edge_prewitt1 = edge_prewitt.reshape(-1) df['Prewitt'] = edge_prewitt1 #GAUSSIAN with sigma=3 from scipy import ndimage as nd gaussian_img = nd.gaussian_filter(img, sigma=3) gaussian_img1 = gaussian_img.reshape(-1) df['Gaussian s3'] = gaussian_img1 #GAUSSIAN with sigma=7 gaussian_img2 = nd.gaussian_filter(img, sigma=7) gaussian_img3 = gaussian_img2.reshape(-1) df['Gaussian s7'] = gaussian_img3 #MEDIAN with sigma=3 median_img = nd.median_filter(img, size=3) median_img1 = median_img.reshape(-1) df['Median s3'] = median_img1 #VARIANCE with size=3 variance_img = nd.generic_filter(img, np.var, size=3) variance_img1 = variance_img.reshape(-1) df['Variance s3'] = variance_img1 #Add column to original dataframe ###################################### #Now, add a column in the data frame for the Labels #For this, we need to import the labeled image labeled_img = cv2.imread('/content/drive/MyDrive/VisãoComput/BowFire/masks/fire077_gt.png') #Remember that you can load an image with partial labels #But, drop the rows with unlabeled data labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_BGR2GRAY) labeled_img1 = labeled_img.reshape(-1) df['Labels'] = labeled_img1 print(df.head()) #Define the dependent variable that needs to be predicted (labels) Y = df["Labels"].values #Define the independent variables X = df.drop(labels = ["Labels"], axis=1) #Split data into train and test to verify accuracy after fitting the model. from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.4, random_state=20) model = LinearSVC(max_iter=2000) model.fit(X_train,y_train) prediction_test_train = model.predict(X_train) prediction_test = model.predict(X_test) from sklearn import metrics print ("Acurácia de Treino = ", metrics.accuracy_score(y_train, prediction_test_train)) print("Acurácia de Teste = ", metrics.accuracy_score(y_test, prediction_test)) print("\n") print ("IoU de Treino = ", metrics.jaccard_score(y_train, prediction_test_train,pos_label=0)) print("IoU de Teste = ", metrics.jaccard_score(y_test, prediction_test, pos_label=0)) print("\n") print ("Dice Coef de Treino = ", metrics.f1_score(y_train, prediction_test_train,pos_label=0)) print("Dice Coef de Teste = ", metrics.f1_score(y_test, prediction_test, pos_label=0)) import pickle #Save the trained model as pickle string to disk for future use filename = "fire_model" pickle.dump(model, open(filename, 'wb')) #To test the model on future datasets loaded_model = pickle.load(open(filename, 'rb')) result = loaded_model.predict(X) segmented = result.reshape((img.shape)) from matplotlib import pyplot as plt plt.imshow(segmented) #plt.imsave('/content/drive/MyDrive/VisãoComput/fogo_carro.jpg', segmented, cmap ='jet') ###Output _____no_output_____