text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
``` import pandas as pd import numpy as np from sklearn.decomposition import PCA,TruncatedSVD,NMF from sklearn.preprocessing import Normalizer import argparse import time import pickle as pkl def year_binner(year,val=10): return year - year%val def dim_reduction(df,rows): df_svd = TruncatedSVD(n_components=300, n_iter=10, random_state=args.seed) print(f'Explained variance ratio {(df_svd.fit(df).explained_variance_ratio_.sum()):2.3f}') #df_list=df_svd.fit(df).explained_variance_ratio_ df_reduced = df_svd.fit_transform(df) df_reduced = Normalizer(copy=False).fit_transform(df_reduced) df_reduced=pd.DataFrame(df_reduced,index=rows) #df_reduced.reset_index(inplace=True) if args.temporal!=0: df_reduced.index = pd.MultiIndex.from_tuples(df_reduced.index, names=['common', 'time']) return df_reduced parser = argparse.ArgumentParser(description='Gather data necessary for performing Regression') parser.add_argument('--inputdir',type=str, help='Provide directory that has the files with the fivegram counts') parser.add_argument('--outputdir',type=str, help='Provide directory in that the output files should be stored') parser.add_argument('--temporal', type=int, default=0, help='Value to bin the temporal information: 0 (remove temporal information), 1 (no binning), 10 (binning to decades), 20 (binning each 20 years) or 50 (binning each 50 years)') parser.add_argument('--contextual', action='store_true', help='Is the model contextual') parser.add_argument('--cutoff', type=int, default=50, help='Cut-off frequency for each compound per time period : none (0), 20, 50 and 100') parser.add_argument('--seed', type=int, default=1991, help='random seed') parser.add_argument('--storedf', action='store_true', help='Should the embeddings be saved') parser.add_argument('--dims', type=int, default=300, help='Desired number of reduced dimensions') parser.add_argument('--input_format',type=str,default='csv',choices=['csv','pkl'], help='In what format are the input files : csv or pkl') parser.add_argument('--save_format', type=str,default='pkl',choices=['pkl','csv'], help='In what format should the reduced datasets be saved : csv or pkl') args = parser.parse_args('--inputdir ../Compounding/coha_compounds/ --outputdir ../Compounding/coha_compounds/ --cutoff 10 --storedf --input_format csv --save_format csv'.split()) print(f'Cutoff: {args.cutoff}') print(f'Time span: {args.temporal}') print(f'Dimensionality: {args.dims}') print("Creating dense embeddings") if args.contextual: print("CompoundCentric Model") print("Loading the constituent and compound vector datasets") if args.input_format=="csv": compounds=pd.read_csv(args.inputdir+"/compounds.csv",sep="\t") elif args.input=="pkl": compounds=pd.read_pickle(args.inputdir+"/compounds.pkl") compounds.reset_index(inplace=True) compounds.year=compounds.year.astype("int32") compounds=compounds.query('1800 <= year <= 2010').copy() compounds['common']=compounds['modifier']+" "+compounds['head'] #head_list_reduced=compounds['head'].unique().tolist() #modifier_list_reduced=compounds['modifier'].unique().tolist() if args.temporal==0: print('No temporal information is stored') compounds=compounds.groupby(['common','context'])['count'].sum().to_frame() compounds.reset_index(inplace=True) compounds=compounds.loc[compounds.groupby(['common'])['count'].transform('sum').gt(args.cutoff)] compounds=compounds.groupby(['common','context'])['count'].sum() else: compounds['time']=year_binner(compounds['year'].values,args.temporal) compounds=compounds.groupby(['common','context','time'])['count'].sum().to_frame() compounds.reset_index(inplace=True) compounds=compounds.loc[compounds.groupby(['common','time'])['count'].transform('sum').gt(args.cutoff)] compounds=compounds.groupby(['common','time','context'])['count'].sum() if args.input_format=="csv": modifiers=pd.read_csv(args.inputdir+"/modifiers.csv",sep="\t") elif args.input=="pkl": modifiers=pd.read_pickle(args.inputdir+"/modifiers.pkl") modifiers.reset_index(inplace=True) modifiers.year=modifiers.year.astype("int32") modifiers=modifiers.query('1800 <= year <= 2010').copy() modifiers.columns=['common','context','year','count'] modifiers['common']=modifiers['common'].str.replace(r'_noun$', r'_m', regex=True) if args.temporal==0: print('No temporal information is stored') modifiers=modifiers.groupby(['common','context'])['count'].sum().to_frame() modifiers.reset_index(inplace=True) modifiers=modifiers.loc[modifiers.groupby(['common'])['count'].transform('sum').gt(args.cutoff)] modifiers=modifiers.groupby(['common','context'])['count'].sum() else: modifiers['time']=year_binner(modifiers['year'].values,args.temporal) modifiers=modifiers.groupby(['common','context','time'])['count'].sum().to_frame() modifiers=modifiers.loc[modifiers.groupby(['common','time'])['count'].transform('sum').gt(args.cutoff)] modifiers=modifiers.groupby(['common','time','context'])['count'].sum() if args.input_format=="csv": heads=pd.read_csv(args.inputdir+"/heads.csv",sep="\t") elif args.input_format=="pkl": heads=pd.read_pickle(args.inputdir+"/heads.pkl") heads.reset_index(inplace=True) heads.year=heads.year.astype("int32") heads=heads.query('1800 <= year <= 2010').copy() heads.columns=['common','context','year','count'] heads['common']=heads['common'].str.replace(r'_noun$', r'_h', regex=True) if args.temporal==0: print('No temporal information is stored') heads=heads.groupby(['common','context'])['count'].sum().to_frame() heads.reset_index(inplace=True) heads=heads.loc[heads.groupby(['common'])['count'].transform('sum').gt(args.cutoff)] heads=heads.groupby(['common','context'])['count'].sum() else: heads['time']=year_binner(heads['year'].values,args.temporal) heads=heads.groupby(['common','context','time'])['count'].sum().to_frame() heads=heads.loc[heads.groupby(['common','time'])['count'].transform('sum').gt(args.cutoff)] heads=heads.groupby(['common','time','context'])['count'].sum() print('Concatenating all the datasets together') df=pd.concat([heads,modifiers,compounds], sort=True) else: print("CompoundAgnostic Model") wordlist = pkl.load( open( "data/coha_wordlist.pkl", "rb" ) ) if args.input_format=="csv": compounds=pd.read_csv(args.inputdir+"/phrases.csv",sep="\t") elif args.input_format=="pkl": compounds=pd.read_pickle(args.inputdir+"/phrases.pkl") compounds.reset_index(inplace=True) compounds.year=compounds.year.astype("int32") compounds=compounds.query('1800 <= year <= 2010').copy() compounds['common']=compounds['modifier']+" "+compounds['head'] if args.temporal==0: print('No temporal information is stored') compounds=compounds.groupby(['common','context'])['count'].sum().to_frame() compounds.reset_index(inplace=True) compounds=compounds.loc[compounds.groupby(['common'])['count'].transform('sum').gt(args.cutoff)] compounds=compounds.groupby(['common','context'])['count'].sum() else: compounds['time']=year_binner(compounds['year'].values,args.temporal) #compounds = dd.from_pandas(compounds, npartitions=100) compounds=compounds.groupby(['common','context','time'])['count'].sum().to_frame() compounds=compounds.loc[compounds.groupby(['common','time'])['count'].transform('sum').gt(args.cutoff)] compounds=compounds.groupby(['common','time','context'])['count'].sum() if args.input_format=="csv": constituents=pd.read_csv(args.outputdir+"/words.csv",sep="\t") elif args.input_format=="pkl": constituents=pd.read_pickle(args.outputdir+"/words.pkl") constituents.reset_index(inplace=True) constituents.year=constituents.year.astype("int32") constituents=constituents.query('1800 <= year <= 2010').copy() constituents.columns=['common','context','year','count'] constituents.query('common in @wordlist',inplace=True) if args.temporal==0: print('No temporal information is stored') constituents=constituents.groupby(['common','context'])['count'].sum().to_frame() constituents.reset_index(inplace=True) constituents=constituents.loc[constituents.groupby(['common'])['count'].transform('sum').gt(args.cutoff)] constituents=constituents.groupby(['common','context'])['count'].sum() else: constituents['time']=year_binner(constituents['year'].values,args.temporal) constituents=constituents.groupby(['common','context','time'])['count'].sum().to_frame() constituents.reset_index(inplace=True) constituents=constituents.loc[constituents.groupby(['common','time'])['count'].transform('sum').gt(args.cutoff)] constituents=constituents.groupby(['common','time','context'])['count'].sum() print('Concatenating all the datasets together') df=pd.concat([constituents,compounds], sort=True) dtype = pd.SparseDtype(np.float, fill_value=0) df=df.astype(dtype) if args.temporal!=0: df, rows, _ = df.sparse.to_coo(row_levels=['common','time'],column_levels=['context'],sort_labels=False) else: df, rows, _ = df.sparse.to_coo(row_levels=['common'],column_levels=['context'],sort_labels=False) print('Running SVD') df_reduced=dim_reduction(df,rows) print('Splitting back into individual datasets are saving them') if args.temporal!=0: df_reduced.index.names = ['common','time'] else: df_reduced.index.names = ['common'] compounds_reduced=df_reduced.loc[df_reduced.index.get_level_values(0).str.contains(r'\w \w')] compounds_reduced.reset_index(inplace=True) #print(compounds_reduced.head()) #compounds_reduced['modifier'],compounds_reduced['head']=compounds_reduced['common'].str.split(' ', 1).str compounds_reduced[['modifier','head']]=compounds_reduced['common'].str.split(' ', n=1,expand=True).copy() compounds_reduced ```
github_jupyter
# Neural Networks for Regression with TensorFlow > Notebook demonstrates Neural Networks for Regression Problems with TensorFlow - toc: true - badges: true - comments: true - categories: [DeepLearning, NeuralNetworks, TensorFlow, Python, LinearRegression] - image: images/nntensorflow.png ## Neural Network Regression Model with TensorFlow This notebook is continuation of the Blog post [TensorFlow Fundamentals](https://sandeshkatakam.github.io/My-Machine_learning-Blog/tensorflow/machinelearning/2022/02/09/TensorFlow-Fundamentals.html). **The notebook is an account of my working for the Tensorflow tutorial by Daniel Bourke on Youtube**. **The Notebook will cover the following concepts:** * Architecture of a neural network regression model. * Input shapes and output shapes of a regression model(features and labels). * Creating custom data to view and fit. * Steps in modelling * Creating a model, compiling a model, fitting a model, evaluating a model. * Different evaluation methods. * Saving and loading models. **Regression Problems**: A regression problem is when the output variable is a real or continuous value, such as “salary” or “weight”. Many different models can be used, the simplest is the linear regression. It tries to fit data with the best hyper-plane which goes through the points. Examples: * How much will this house sell for? * How many people will buy this app? * How much will my health insurace be? * How much should I save each week for fuel? We can also use the regression model to try and predict where the bounding boxes should be in object detection problem. Object detection thus involves both regression and then classifying the image in the box(classification problem). ### Regression Inputs and outputs Architecture of a regression model: * Hyperparameters: * Input Layer Shape : same as shape of number of features. * Hidden Layrer(s): Problem specific * Neurons per hidden layer : Problem specific. * Output layer shape: same as hape of desired prediction shape. * Hidden activation : Usually ReLU(rectified linear unit) sometimes sigmoid. * Output acitvation: None, ReLU, logistic/tanh. * Loss function : MSE(Mean squared error) or MAE(Mean absolute error) or combination of both. * Optimizer: SGD(Stochastic Gradient Descent), Adam optimizer. **Source:** Adapted from page 239 of [Hands-On Machine learning with Scikit-Learn, Keras & TensorFlow](https://www.oreilly.com/library/view/hands-on-machine-learning/9781492032632/) Example of creating a sample regression model in TensorFlow: ``` # 1. Create a model(specific to your problem) model = tf.keras.Sequential([ tf.keras.Input(shape = (3,)), tf.keras.layers.Dense(100, activation = "relu"), tf.keras.layers.Dense(100, activation = "relu"), tf.keras.layers.Dense(100, activation = "relu"), tf.keras.layers.Dense(1, activation = None) ]) # 2. Compile the model model.compile(loss = tf.keras.losses.mae, optimizer = tf.keras.optimizers.Adam(lr = 0.0001), metrics = ["mae"]) # 3. Fit the model model.fit(X_train, Y_train, epochs = 100) ``` ### Introduction to Regression with Neural Networks in TensorFlow ``` # Import TensorFlow import tensorflow as tf print(tf.__version__) ## Creating data to view and fit import numpy as np import matplotlib.pyplot as plt from matplotlib import style style.use('dark_background') # create features X = np.array([-7.0,-4.0,-1.0,2.0,5.0,8.0,11.0,14.0]) # Create labels y = np.array([3.0,6.0,9.0,12.0,15.0,18.0,21.0,24.0]) # Visualize it plt.scatter(X,y) y == X + 10 ``` Yayy.. we got the relation by just seeing the data. Since the data is small and the relation ship is just linear, it was easy to guess the relation. ### Input and Output shapes ``` # Create a demo tensor for the housing price prediction problem house_info = tf.constant(["bedroom","bathroom", "garage"]) house_price = tf.constant([939700]) house_info, house_price X[0], y[0] X[1], y[1] input_shape = X[0].shape output_shape = y[0].shape input_shape, output_shape X[0].ndim ``` we are specifically looking at scalars here. Scalars have 0 dimension ``` # Turn our numpy arrays into tensors X = tf.cast(tf.constant(X), dtype = tf.float32) y = tf.cast(tf.constant(y), dtype = tf.float32) X.shape, y.shape input_shape = X[0].shape output_shape = y[0].shape input_shape, output_shape plt.scatter(X,y) ``` ### Steps in modelling with Tensorflow 1. **Creating a model** - define the input and output layers, as well as the hidden layers of a deep learning model. 2. **Compiling a model** - define the loss function(how wrong the prediction of our model is) and the optimizer (tells our model how to improve the partterns its learning) and evaluation metrics(what we can use to interpret the performance of our model). 3. Fitting a model - letting the model try to find the patterns between X & y (features and labels). ``` X,y X.shape # Set random seed tf.random.set_seed(42) # Create a model using the Sequential API model = tf.keras.Sequential([ tf.keras.layers.Dense(1) ]) # Compile the model model.compile(loss=tf.keras.losses.mae, # mae is short for mean absolute error optimizer=tf.keras.optimizers.SGD(), # SGD is short for stochastic gradient descent metrics=["mae"]) # Fit the model # model.fit(X, y, epochs=5) # this will break with TensorFlow 2.7.0+ model.fit(tf.expand_dims(X, axis=-1), y, epochs=5) # Check out X and y X, y # Try and make a prediction using our model y_pred = model.predict([17.0]) y_pred ``` The output is very far off from the actual value. So, Our model is not working correctly. Let's go and improve our model in the next section. ### Improving our Model Let's take a look about the three steps when we created the above model. We can improve the model by altering the steps we took to create a model. 1. **Creating a model** - here we might add more layers, increase the number of hidden units(all called neurons) within each of the hidden layers, change the activation function of each layer. 2. **Compiling a model** - here we might change the optimization function or perhaps the learning rate of the optimization function. 3. **Fitting a model** - here we might fit a model for more **epochs** (leave it for training longer) or on more data (give the model more examples to learn from) ``` # Let's rebuild our model with change in the epoch number # 1. Create the model model = tf.keras.Sequential([ tf.keras.layers.Dense(1) ]) # 2. Compile the model model.compile(loss = tf.keras.losses.mae, optimizer = tf.keras.optimizers.SGD(), metrics = ["mae"]) # 3. Fit the model to our dataset model.fit(tf.expand_dims(X, axis=-1), y, epochs=100, verbose = 0) # Our data X , y # Let's see if our model's prediction has improved model.predict([17.0]) ``` We got so close the actual value is 27 we performed a better prediction than the last model we trained. But we need to improve much better. Let's see what more we change and how close can we get to our actual output ``` # Let's rebuild our model with changing the optimization function to Adam # 1. Create the model model = tf.keras.Sequential([ tf.keras.layers.Dense(1) ]) # 2. Compile the model model.compile(loss = tf.keras.losses.mae, optimizer = tf.keras.optimizers.Adam(lr = 0.0001), # lr stands for learning rate metrics = ["mae"]) # 3. Fit the model to our dataset model.fit(tf.expand_dims(X, axis=-1), y, epochs=100, verbose = 0) # Prediction of our newly trained model: model.predict([17.0]) # we are going to predict for the same input value 17 ``` Oh..god!! This result went really bad for us. ``` # Let's rebuild our model by adding one extra hidden layer with 100 units # 1. Create the model model = tf.keras.Sequential([ tf.keras.layers.Dense(100, activation = "relu"), # only difference we made tf.keras.layers.Dense(1) ]) # 2. Compile the model model.compile(loss = "mae", optimizer = tf.keras.optimizers.SGD(), metrics = ["mae"]) # 3. Fit the model to our dataset model.fit(tf.expand_dims(X, axis=-1), y, epochs=100, verbose = 0) # verbose will hide the output from epochs X , y # It's prediction time! model.predict([17.0]) ``` Oh, this should be 27 but this prediction is very far off from our previous prediction. It seems that our previous model did better than this. Even though we find the values of our loss function are very low than that of our previous model. We still are far away from our label value. **Why is that so??** The explanation is our model is overfitting the dataset. That means it is trying to map a function that just fits the already provided examples correctly but it cannot fit the new examples that we are giving. So, the `mae` and `loss value` if not the ultimate metric to check for improving the model. because we need to get less error for new examples that the model has not seen before. ``` # Let's rebuild our model by using Adam optimizer # 1. Create the model model = tf.keras.Sequential([ tf.keras.layers.Dense(100, activation = "relu"), # only difference we made tf.keras.layers.Dense(1) ]) # 2. Compile the model model.compile(loss = "mae", optimizer = tf.keras.optimizers.Adam(), metrics = ["mae"]) # 3. Fit the model to our dataset model.fit(tf.expand_dims(X, axis=-1), y, epochs=100, verbose = 0)# verbose will hide the epochs output model.predict([17.0]) ``` Still not better!! ``` # Let's rebuild our model by adding more layers # 1. Create the model model = tf.keras.Sequential([ tf.keras.layers.Dense(100, activation = "relu"), tf.keras.layers.Dense(100, activation = "relu"), tf.keras.layers.Dense(100, activation = "relu"),# only difference we made tf.keras.layers.Dense(1) ]) # default value of lr is 0.001 # 2. Compile the model model.compile(loss = "mae", optimizer = tf.keras.optimizers.Adam(lr = 0.01), # lr stands for learning rate metrics = ["mae"]) # 3. Fit the model to our dataset model.fit(tf.expand_dims(X, axis=-1), y, epochs=100, verbose = 0) # verbose will hide the epochs output ``` The learning rate is the most important hyperparameter for all the Neural Networks ### Evaluating our model In practice, a typical workflow you'll go through when building a neural network is: ``` Build a model -> fit it -> evaluate it -> tweak a model -> fit it -> evaluate it -> tweak it -> fit it ``` Common ways to improve a deep model: * Adding Layers * Increase the number of hidden units * Change the activation functions * Change the optimization function * Change the learning rate * Fitting on more data * Train for longer (more epochs) **Because we can alter each of these they are called hyperparameters** When it comes to evaluation.. there are 3 words you should memorize: > "Visualize, Visualize, Visualize" It's a good idea to visualize: * The data - what data are working with? What does it look like * The model itself - What does our model look like? * The training of a model - how does a model perform while it learns? * The predictions of the model - how does the prediction of the model line up against the labels(original value) ``` # Make a bigger dataset X_large = tf.range(-100,100,4) X_large y_large = X_large + 10 y_large import matplotlib.pyplot as plt plt.scatter(X_large,y_large) ``` ### The 3 sets ... * **Training set** - The model learns from this data, which is typically 70-80% of the total data you have available. * **validation set** - The model gets tuned on this data, which is typically 10-15% of the data avaialable. * **Test set** - The model gets evaluated on this data to test what it has learned. This set is typically 10-15%. ``` # Check the length of how many samples we have len(X_large) # split the data into train and test sets # since the dataset is small we can skip the valdation set X_train = X_large[:40] X_test = X_large[40:] y_train = y_large[:40] y_test = y_large[40:] len(X_train), len(X_test), len(y_train), len(y_test) ``` ### Visualizing the data Now we've got our data in training and test sets. Let's visualize it. ``` plt.figure(figsize = (10,7)) # Plot the training data in blue plt.scatter(X_train, y_train, c= 'b', label = "Training data") # Plot the test data in green plt.scatter(X_test, y_test, c = "g", label = "Training data") plt.legend(); # Let's have a look at how to build neural network for our data # 1. Create the model model = tf.keras.Sequential([ tf.keras.layers.Dense(1) ]) # default value of lr is 0.001 # 2. Compile the model model.compile(loss = "mae", optimizer = tf.keras.optimizers.SGD(), # lr stands for learning rate metrics = ["mae"]) # 3. Fit the model to our dataset #model.fit(tf.expand_dims(X_train, axis=-1), y_train, epochs=100) ``` Let's visualize it before fitting the model ``` model.summary() ``` model.summary() doesn't work without building the model or fitting the model ``` X[0], y[0] # Let's create a model which builds automatically by defining the input_shape arguments tf.random.set_seed(42) # Create a model(same as above) model = tf.keras.Sequential([ tf.keras.layers.Dense(1, input_shape = [1]) # input_shape is 1 refer above code cell ]) # Compile the model model.compile(loss= "mae", optimizer = tf.keras.optimizers.SGD(), metrics = ["mae"]) model.summary() ``` * **Total params** - total number of parameters in the model. * **Trainable parameters**- these are the parameters (patterns) the model can update as it trains. * **Non-Trainable parameters** - these parameters aren't updated during training(this is typical when you have paramters from other models during **transfer learning**) ``` # Let's have a look at how to build neural network for our data # 1. Create the model model = tf.keras.Sequential([ tf.keras.layers.Dense(10, input_shape = [1], name= "input_layer"), tf.keras.layers.Dense(1, name = "output_layer") ], name = "model_1") # 2. Compile the model model.compile(loss = "mae", optimizer = tf.keras.optimizers.SGD(), # lr stands for learning rate metrics = ["mae"]) model.summary() ``` We have changed the layer names and added our custom model name. ``` from tensorflow.keras.utils import plot_model plot_model(model = model, to_file = 'model1.png', show_shapes = True) # Let's have a look at how to build neural network for our data # 1. Create the model model = tf.keras.Sequential([ tf.keras.layers.Dense(100, activation = "relu"), tf.keras.layers.Dense(100, activation = "relu"), tf.keras.layers.Dense(100, activation = "relu"),# only difference we made tf.keras.layers.Dense(1) ], name) # default value of lr is 0.001 # 2. Compile the model model.compile(loss = "mae", optimizer = tf.keras.optimizers.Adam(lr = 0.01), # lr stands for learning rate metrics = ["mae"]) # 3. Fit the model to our dataset model.fit(tf.expand_dims(X_train, axis=-1), y_train, epochs=100, verbose = 0) model.predict(X_test) ``` wow, we are so close!!! ``` model.summary() from tensorflow.keras.utils import plot_model plot_model(model = model, to_file = 'model.png', show_shapes = True) ``` ### Visualizing our model's predictions To visualize predictions, it's a good idea to plot them against the ground truth labels. Often you'll see this in the form of `y_test` or `y_true` versus `y_pred` ``` # Set random seed tf.random.set_seed(42) # Create a model (same as above) model = tf.keras.Sequential([ tf.keras.layers.Dense(10, input_shape = [1], name = "input_layer"), tf.keras.layers.Dense(1, name = "output_layer") # define the input_shape to our model ], name = "revised_model_1") # Compile model (same as above) model.compile(loss=tf.keras.losses.mae, optimizer=tf.keras.optimizers.SGD(), metrics=["mae"]) model.summary() model.fit(X_train, y_train, epochs=100, verbose=0) model.summary() # Make some predictions y_pred = model.predict(X_test) tf.constant(y_pred) ``` These are our predictions! ``` y_test ``` These are the ground truth labels! ``` plot_model(model, show_shapes=True) ``` **Note:** IF you feel like you're going to reuse some kind of functionality in future, it's a good idea to define a function so that we can reuse it whenever we need. ``` #Let's create a plotting function def plot_predictions(train_data= X_train, train_labels = y_train, test_data = X_test, test_labels =y_test, predictions = y_pred): """ Plots training data, test data and compares predictions to ground truth labels """ plt.figure(figsize = (10,7)) # Plot training data in blue plt.scatter(train_data, train_labels, c= "b", label = "Training data") # Plot testing data in green plt.scatter(test_data, test_labels, c= "g", label = "Testing data") # Plot model's predictions in red plt.scatter(test_data, predictions, c= "r", label = "Predictions") # Show legends plt.legend(); plot_predictions(train_data=X_train, train_labels=y_train, test_data=X_test, test_labels=y_test, predictions=y_pred) ``` We tuned our model very well this time. The predictions are really close to the actual values. ### Evaluating our model's predictions with regression evaluation metrics Depending on the problem you're working on, there will be different evaluation metrics to evaluate your model's performance. Since, we're working on a regression, two of the main metrics: * **MAE** - mean absolute error, "on average, how wrong id each of my model's predictions" * TensorFlow code: `tf.keras.losses.MAE()` * or `tf.metrics.mean_absolute_error()` $$ MAE = \frac{Σ_{i=1}^{n} |y_i - x_i| }{n} $$ * **MSE** - mean square error, "square of the average errors" * `tf.keras.losses.MSE()` * `tf.metrics.mean_square_error()` $$ MSE = \frac{1}{n} Σ_{i=1}^{n}(Y_i - \hat{Y_i})^2$$ $\hat{Y_i}$ is the prediction our model makes. $Y_i$ is the label value. * **Huber** - Combination of MSE and MAE, Less sensitive to outliers than MSE. * `tf.keras.losses.Huber()` ``` # Evaluate the model on test set model.evaluate(X_test, y_test) # calculate the mean absolute error mae = tf.metrics.mean_absolute_error(y_true = y_test, y_pred = tf.constant(y_pred)) mae ``` We got the metric values wrong..why did this happen?? ``` tf.constant(y_pred) y_test ``` Notice that the shape of `y_pred` is (10,1) and the shape of `y_test` is (10,) They might seem the same but they are not of the same shape. Let's reshape the tensor to make the shapes equal. ``` tf.squeeze(y_pred) # Calculate the mean absolute error mae = tf.metrics.mean_absolute_error(y_true = y_test, y_pred = tf.squeeze(y_pred)) mae ``` Now,we got our metric value. The mean absolute error of our model is 3.1969407. Now, let's calculate the mean squared error and see how that goes. ``` # Calculate the mean squared error mse = tf.metrics.mean_squared_error(y_true = y_test, y_pred = tf.squeeze(y_pred)) mse ``` Our mean squared error is 13.070143. Remember, the mean squared error squares the error for every example in the test set and averages the values. So, generally, the mse is largeer than mae. When larger errors are more significant than smaller errors, then it is best to use mse. MAE can be used as a great starter metric for any regression problem. We can also try Huber and see how that goes. ``` # Calculate the Huber metric for our model huber_metric = tf.losses.huber(y_true = y_test, y_pred = tf.squeeze(y_pred)) huber_metric # Make some functions to reuse MAE and MSE and also Huber def mae(y_true, y_pred): return tf.metrics.mean_absolute_error(y_true = y_test, y_pred = tf.squeeze(y_pred)) def mse(y_true, y_pred): return tf.metrics.mean_squared_error(y_true = y_test, y_pred = tf.squeeze(y_pred)) def huber(y_true, y_pred): return tf.losses.huber(y_true = y_test, y_pred = tf.squeeze(y_pred)) ``` ### Running experiments to improve our model ``` Build a model -> fit it -> evaluate it -> tweak a model -> fit it -> evaluate it -> tweak it -> fit it ``` 1. Get more data - get more examples for your model to train on(more oppurtunities to learn patterns or relationships between features and labels). 2. Make your mode larger(using a more complex model) - this might come in the form of more layeres or more hidden unites in each layer. 3. Train for longer - give your model more of a chance to find patterns in the data. Let's do a few modelling experiments: 1. `model_1` - same as original model, 1 layer, trained for 100 epochs. 2. `model_2` - 2 layers, trained for 100 epochs 3. `model_3` - 2 layers, trained for 500 epochs. You can design more experiments too to make the model more better **Build `Model_1`** ``` X_train, y_train # Set random seed tf.random.set_seed(42) # 1. Create the model model_1 = tf.keras.Sequential([ tf.keras.layers.Dense(1, input_shape = [1]) ], name = "Model_1") # 2. Compile the model model_1.compile(loss = tf.keras.losses.mae, optimizer = tf.keras.optimizers.SGD(), metrics = ["mae"]) # 3. Fit the model model_1.fit(X_train, y_train ,epochs = 100, verbose = 0) model_1.summary() # Make and plot the predictions for model_1 y_preds_1 = model_1.predict(X_test) plot_predictions(predictions = y_preds_1) # Calculate model_1 evaluation metrics mae_1 = mae(y_test, y_preds_1) mse_1 = mse(y_test, y_preds_1) mae_1, mse_1 ``` **Build `Model_2`** * 2 dense layers, trained for 100 epochs ``` # Set random seed tf.random.set_seed(42) # 1. Create the model model_2 = tf.keras.Sequential([ tf.keras.layers.Dense(10, input_shape =[1]), tf.keras.layers.Dense(1) ], name = "model_2") # 2. Compile the model model_2.compile(loss = tf.keras.losses.mae, optimizer = tf.keras.optimizers.SGD(), metrics = ["mse"]) # Let's build this model with mse as eval metric. # 3. Fit the model model_2.fit(X_train, y_train ,epochs = 100, verbose = 0) model_2.summary() # Make and plot predictions of model_2 y_preds_2 = model_2.predict(X_test) plot_predictions(predictions = y_preds_2) ``` Yeah,we improved this model very much than the previous one. If you want to compare with previous one..scroll up and see the plot_predictions of previous one and compare it with this one. ``` # Calculate the model_2 evaluation metrics mae_2 = mae(y_test, y_preds_2) mse_2 = mse(y_test, y_preds_2) mae_2, mse_2 ``` **Build `Model_3`** * 2 layers, trained for 500 epochs ``` # Set random seed tf.random.set_seed(42) # 1. Create the model model_3 = tf.keras.Sequential([ tf.keras.layers.Dense(10, input_shape =[1]), tf.keras.layers.Dense(1) ], name = "model_3") # 2. Compile the model model_3.compile(loss = tf.keras.losses.mae, optimizer = tf.keras.optimizers.SGD(), metrics = ["mae"]) # Let's build this model with mse as eval metric. # 3. Fit the model model_2.fit(X_train, y_train ,epochs = 500, verbose = 0) # Make and plot some predictions y_preds_3 = model_3.predict(X_test) plot_predictions(predictions = y_preds_3) ``` This is even terrible performance than the first model. we have actually made the model worse. WHY?? We, overfitted the model too much because we trained it for much longer than we are supposed to. ``` # Calculate the model_3 evaluation metrics mae_3 = mae(y_test, y_preds_3) mse_3 = mse(y_test, y_preds_3) mae_3, mse_3 ``` whoaa, the error is extremely high. I think the best of our models is `model_2` The Machine Learning practitioner's motto: `Experiment, experiment, experiment` **Note:** You want to start with small experiments(small models) and make sure they work and then increase their scale when neccessary. ### Comparing the results of our experiments We've run a few experiments, let's compare the results now. ``` # Let's compare our models'c results using pandas dataframe: import pandas as pd model_results = [["model_1", mae_1.numpy(), mse_1.numpy()], ["model_2", mae_2.numpy(), mse_2.numpy()], ["model_3", mae_3.numpy(), mse_3.numpy()]] all_results = pd.DataFrame(model_results, columns =["model", "mae", "mse"]) all_results ``` It looks like model_2 performed done the best. Let's look at what is model_2 ``` model_2.summary() ``` This is the model that has done the best on our dataset. **Note:** One of your main goals should be to minimize the time between your experiments. The more experiments you do, the more things you will figure out which don't work and in turn, get closer to figuring out what does work. Remeber, the machine learning pracitioner's motto : "experiment, experiment, experiment". ## Tracking your experiments: One really good habit of machine learning modelling is to track the results of your experiments. And when doing so, it can be tedious if you are running lots of experiments. Luckily, there are tools to help us! **Resources:** As you build more models, you'll want to look into using: * TensorBoard - a component of TensorFlow library to help track modelling experiments. It is integrated into the TensorFlow library. * Weights & Biases - A tool for tracking all kinds of machine learning experiments (it plugs straight into tensorboard). ## Saving our models Saving our models allows us to use them outside of Google Colab(or wherever they were trained) such as in a web application or a mobile app. There are two main formats we can save our model: 1. The SavedModel format 2. The HDF5 format `model.save()` allows us to save the model and we can use it again to do add things to the model after reloading it. ``` # Save model using savedmodel format model_2.save("best_model_SavedModel_format") ``` If we are planning to use this model inside the tensorflow framework. we will be better off using the `SavedModel` format. But if we are planning to export the model else where and use it outside the tensorflow framework use the HDF5 format. ``` # Save model using HDF5 format model_2.save("best_model_HDF5_format.h5") ``` Saving a model with SavedModel format will give us a folder with some files regarding our model. Saving a model with HDF5 format will give us just one file with our model. ### Loading in a saved model ``` # Load in the SavedModel format model loaded_SavedModel_format = tf.keras.models.load_model("/content/best_model_SavedModel_format") loaded_SavedModel_format.summary() # Let's check is that the same thing as model_2 model_2.summary() # Compare the model_2 predictions with SavedModel format model predictions model_2_preds = model_2.predict(X_test) loaded_SavedModel_format_preds = loaded_SavedModel_format.predict(X_test) model_2_preds == loaded_SavedModel_format_preds mae(y_true = y_test, y_pred = model_2_preds) == mae(y_true = y_test, y_pred = loaded_SavedModel_format_preds) # Load in a model using the .hf format loaded_h5_model = tf.keras.models.load_model("/content/best_model_HDF5_format.h5") loaded_h5_model.summary() model_2.summary() ``` Yeah the loading of .hf format model matched with our original mode_2 format. So, our model loading worked correctly. ``` # Check to see if loaded .hf model predictions match model_2 model_2_preds = model_2.predict(X_test) loaded_h5_model_preds = loaded_h5_model.predict(X_test) model_2_preds == loaded_h5_model_preds ``` ### Download a model(or any other file) from google colab If you want to download your files from Google Colab: 1. you can go to the files tab and right click on the file you're after and click download. 2. Use code(see the cell below). 3. You can save it to google drive by connecting to google drive and copying it there. ``` # Download a file from Google Colab from google.colab import files files.download("/content/best_model_HDF5_format.h5") # Save a file from Google Colab to Google Drive(requires mounting google drive) !cp /content/best_model_HDF5_format.h5 /content/drive/MyDrive/tensor-flow-deep-learning !ls /content/drive/MyDrive/tensor-flow-deep-learning ``` We have saved our model to our google drive !!! ## A larger example We take a larger dataset to do create a regression model. The model we do is insurance forecast by using linear regression available from kaggle [Medical Cost Personal Datasets](https://www.kaggle.com/mirichoi0218/insurance) ``` # Import required libraries import tensorflow as tf import pandas as pd import matplotlib.pyplot as plt # Read in the insurance data set insurance = pd.read_csv("https://raw.githubusercontent.com/stedy/Machine-Learning-with-R-datasets/master/insurance.csv") insurance ``` This is a quite bigger dataset than the one we have previously worked with. ``` # one hot encoding on a pandas dataframe insurance_one_hot = pd.get_dummies(insurance) insurance_one_hot.head() # Create X & y values (features and labels) X = insurance_one_hot.drop("charges", axis =1) y = insurance_one_hot["charges"] # View X X.head() # View y y.head() # Create training and test sets from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y,test_size = 0.2, random_state = 42) len(X), len(X_train), len(X_test) X_train insurance["smoker"] , insurance["sex"] # Build a neural network (sort of like model_2 above) tf.random.set_seed(42) # 1. Create a model insurance_model = tf.keras.Sequential([ tf.keras.layers.Dense(10), tf.keras.layers.Dense(1) ]) # 2. Compile the model insurance_model.compile(loss = tf.keras.losses.mae, optimizer = tf.keras.optimizers.SGD(), metrics = ["mae"]) #3. Fit the model insurance_model.fit(X_train, y_train,epochs = 100, verbose = 0) # Check the results of the insurance model on the test data insurance_model.evaluate(X_test,y_test) y_train.median(), y_train.mean() ``` Right now it looks like our model is not performing well, lets try and improve it. To try and improve our model, we'll run 2 experiments: 1. Add an extra layer with more hidden units and use the Adam optimizer 2. Train for longer (like 200 epochs) 3. We can also do our custom experiments to improve it. ``` # Set random seed tf.random.set_seed(42) # 1. Create the model insurance_model_2 = tf.keras.Sequential([ tf.keras.layers.Dense(100), tf.keras.layers.Dense(10), tf.keras.layers.Dense(1) ],name = "insurace_model_2") # 2. Compile the model insurance_model_2.compile(loss = tf.keras.losses.mae, optimizer = tf.keras.optimizers.Adam(), metrics = ["mae"]) # 3. Fit the model insurance_model_2.fit(X_train, y_train, epochs = 100, verbose = 0) insurance_model_2.evaluate(X_test, y_test) # Set random seed tf.random.set_seed(42) # 1. Create the model insurance_model_3 = tf.keras.Sequential([ tf.keras.layers.Dense(100), tf.keras.layers.Dense(10), tf.keras.layers.Dense(1) ],name = "insurace_model_2") # 2. Compile the model insurance_model_3.compile(loss = tf.keras.losses.mae, optimizer = tf.keras.optimizers.Adam(), metrics = ["mae"]) # 3. Fit the model history = insurance_model_3.fit(X_train, y_train, epochs = 200, verbose = 0) # Evaluate our third model insurance_model_3.evaluate(X_test, y_test) # Plot history (also known as a loss curve or a training curve) pd.DataFrame(history.history).plot() plt.ylabel("loss") plt.xlabel("epochs") plt.title("Training curve of our model") ``` **Question:** How long should you train for? It depends, It really depends on problem you are working on. However, many people have asked this question before, so TensorFlow has a solution!, It is called [EarlyStopping callback](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/EarlyStopping), which is a TensorFlow component you can add to your model to stop training once it stops improving a certain metric. ## Preprocessing data (normalization and standardization) Short review of our modelling steps in TensorFlow: 1. Get data ready(turn into tensors) 2. Build or pick a pretrained model (to suit your problem) 3. Fit the model to the data and make a prediction. 4. Evaluate the model. 5. Imporve through experimentation. 6. Save and reload your trained models. we are going to focus on the step 1 to make our data set more rich for training. some steps involved in getting data ready: 1. Turn all data into numbers(neural networks can't handle strings). 2. Make sure all of your tensors are the right shape. 3. Scale features(normalize or standardize, neural networks tend to prefer normalization) -- this is the one thing we haven't done while preparing our data. **If you are not sure on which to use for scaling, you could try both and see which perform better** ``` # Import required libraries import tensorflow as tf import pandas as pd import matplotlib.pyplot as plt # Read in the insurance dataframe insurance = pd.read_csv("https://raw.githubusercontent.com/stedy/Machine-Learning-with-R-datasets/master/insurance.csv") insurance ``` To prepare our data, we can borrow few classes from Scikit-Learn ``` from sklearn.compose import make_column_transformer from sklearn.preprocessing import MinMaxScaler, OneHotEncoder from sklearn.model_selection import train_test_split ``` **Feature Scaling**: | **Scaling type** | **what it does** | **Scikit-Learn Function** | **when to use** | | --- | --- | --- | --- | | scale(refers to as normalization) | converts all values to between 0 and 1 whilst preserving the original distribution | `MinMaxScaler` | Use as default scaler with neural networks | | Standarization | Removes the mean and divides each value by the standard deviation | `StandardScaler` | Transform a feature to have close to normal distribution | ``` #Create a column transformer ct = make_column_transformer( (MinMaxScaler(), ["age", "bmi", "children"]), # Turn all values in these columns between 0 and 1 (OneHotEncoder(handle_unknown = "ignore"), ["sex", "smoker", "region"]) ) # Create our X and Y values # because we reimported our dataframe X = insurance.drop("charges", axis = 1) y = insurance["charges"] # Build our train and test set X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2, random_state = 42) # Fit the column transformer to our training data (only training data) ct.fit(X_train) # Transform training and test data with normalization(MinMaxScaler) and OneHotEncoder X_train_normal = ct.transform(X_train) X_test_normal = ct.transform(X_test) # What does our data look like now?? X_train.loc[0] X_train_normal[0], X_train_normal[12], X_train_normal[78] # we have turned all our data into numerical encoding and aso normalized the data X_train.shape, X_train_normal.shape ``` Beautiful! our data has been normalized and One hot encoded. Let's build Neural Network on it and see how it goes. ``` # Build a neural network model to fit on our normalized data tf.random.set_seed(42) # 1. Create the model insurance_model_4 = tf.keras.Sequential([ tf.keras.layers.Dense(100), tf.keras.layers.Dense(10), tf.keras.layers.Dense(1) ]) # 2. Compile the model insurance_model_4.compile(loss = tf.keras.losses.mae, optimizer = tf.keras.optimizers.Adam(), metrics = ["mae"]) # 3. Fit the model history = insurance_model_4.fit(X_train_normal, y_train, epochs= 100, verbose = 0) # Evaluate our insurance model trained on normalized data insurance_model_4.evaluate(X_test_normal, y_test) insurance_model_4.summary() pd.DataFrame(history.history).plot() plt.ylabel("loss") plt.xlabel("epochs") plt.title("Training curve of insurance_model_4") ``` Let's just plot some graphs. Since we have use them the least in this notebook. ``` X["age"].plot(kind = "hist") X["bmi"].plot(kind = "hist") X["children"].value_counts() ``` ## **External Resources:** * [MIT introduction deep learning lecture 1](https://youtu.be/njKP3FqW3Sk) * [Kaggle's datasets](https://www.kaggle.com/data) * [Lion Bridge's collection of datasets](https://lionbridge.ai/datasets/) ## Bibliography: * [Learn TensorFlow and Deep Learning fundamentals with Python (code-first introduction) Part 1/2](https://www.youtube.com/watch?v=tpCFfeUEGs8&list=RDCMUCr8O8l5cCX85Oem1d18EezQ&start_radio=1&rv=tpCFfeUEGs8&t=3) * [Medical cost personal dataset](https://www.kaggle.com/mirichoi0218/insurance) * [TensorFlow documentation](https://www.tensorflow.org/api_docs/python/tf) * [TensorFlow and Deep learning Daniel Bourke GitHub Repo](https://github.com/mrdbourke/tensorflow-deep-learning)
github_jupyter
# Análise de Dados com Python Neste notebook, utilizaremos dados de automóveis para analisar a influência das características de um carro em seu preço, tentando posteriormente prever qual será o preço de venda de um carro. Utilizaremos como fonte de dados um arquivo .csv com dados já tratados em outro notebook. Caso você tenha dúvidas quanto a como realizar o tratamento dos dados, dê uma olhada no meu repositório Learn-Pandas ``` import pandas as pd import numpy as np df = pd.read_csv('clean_auto_df.csv') df.head() ``` <h4> Utilizando visualização de dados para verificar padrões de características individuais</h4> ``` # Importando as bibliotecas "Matplotlib" e "Seaborn # utilizando "%matplotlib inline" para plotar o gráfico dentro do notebook. import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline ``` <h4> Como escolher o método de visualização correto? </h4> <p> Ao visualizar variáveis individuais, é importante primeiro entender com que tipo de variável você está lidando. Isso nos ajudará a encontrar o método de visualização correto para essa variável. Por exemplo, podemos calcular a correlação entre variáveis do tipo "int64" ou "float64" usando o método "corr":</p> ``` df.corr() ``` Os elementos diagonais são sempre um; (estudaremos isso, mais precisamente a correlação de Pearson no final do notebook) ``` # se quisermos verificar a correlação de apenas algumas colunas df[['bore', 'stroke', 'compression-ratio', 'horsepower']].corr() ``` <h2> Variáveis numéricas contínuas: </h2> <p> Variáveis numéricas contínuas são variáveis que podem conter qualquer valor dentro de algum intervalo. Variáveis numéricas contínuas podem ter o tipo "int64" ou "float64". Uma ótima maneira de visualizar essas variáveis é usando gráficos de dispersão com linhas ajustadas. </p> <p> Para começar a compreender a relação (linear) entre uma variável individual e o preço. Podemos fazer isso usando "regplot", que plota o gráfico de dispersão mais a linha de regressão ajustada para os dados. </p> <h4> Relação linear positiva </h4> Vamos encontrar o gráfico de dispersão de "engine-size" e "price" ``` # Engine size as potential predictor variable of price sns.regplot(x="engine-size", y="price", data=df) plt.ylim(0,) ``` <p> Note que conforme o tamanho do motor aumenta, o preço sobe: isso indica uma correlação direta positiva entre essas duas variáveis. O tamanho do motor parece um bom preditor de preço, já que a linha de regressão é quase uma linha diagonal perfeita. </p> ``` # Podemos examinar a correlação entre 'engine-size' e 'price' e ver que é aproximadamente 0,87 df[["engine-size", "price"]].corr() ``` <h4> Relação linear Negativa </h4> ``` # city-mpg também pode ser um bom preditor para a variável price: sns.regplot(x="city-mpg", y="price", data=df) ``` <p> À medida que o city-mpg sobe, o preço desce: isso indica uma relação inversa / negativa entre essas duas variáveis, podendo ser um indicador de preço. </p> ``` df[['city-mpg', 'price']].corr() ``` <h4> Relação linear neutra (ou fraca) </h4> ``` sns.regplot(x="peak-rpm", y="price", data=df) ``` <p> A variável peak-rpm não parece ser um bom preditor do preço, pois a linha de regressão está próxima da horizontal. Além disso, os pontos de dados estão muito dispersos e distantes da linha ajustada, apresentando grande variabilidade. Portanto, não é uma variável confiável. </p> ``` df[['peak-rpm','price']].corr() ``` <h2> Variáveis categóricas: </h2> <p> Essas são variáveis que descrevem uma 'característica' de uma unidade de dados e são selecionadas a partir de um pequeno grupo de categorias. As variáveis categóricas podem ser do tipo "objeto" ou "int64". Uma boa maneira de visualizar variáveis categóricas é usar boxplots. </p> ``` sns.boxplot(x="body-style", y="price", data=df) ``` Vemos que as distribuições de preço entre as diferentes categorias de body-style têm uma sobreposição significativa e, portanto, body-style não seria um bom preditor de preço. Vamos examinar a "engine-location" e o "price" do motor: ``` sns.boxplot(x="engine-location", y="price", data=df) ``` <p> Aqui, vemos que a distribuição de preço entre essas duas categorias de localização do motor, dianteira e traseira, são distintas o suficiente para considerar a localização do motor como um bom indicador de preço em potencial. </p> ``` # drive-wheels sns.boxplot(x="drive-wheels", y="price", data=df) ``` <p> Aqui vemos que a distribuição de preço entre as diferentes categorias de drive-wheels difere e podem ser um indicador de preço. </p> <h2> Estatística Descritiva </h2> <p> Vamos primeiro dar uma olhada nas variáveis usando um método de descrição. </p> <p> A função <b> describe </b> calcula automaticamente estatísticas básicas para todas as variáveis contínuas. Quaisquer valores NaN são automaticamente ignorados nessas estatísticas. </p> Isso mostrará: <ul> <li> a contagem dessa variável </li> <li> a média </li> <li> o desvio padrão (std) </li> <li> o valor mínimo </li> <li> o IQR (intervalo interquartil: 25%, 50% e 75%) </li> <li> o valor máximo </li> <ul> ``` df.describe() # A configuração padrão de "describe" ignora variáveis do tipo de objeto. # Podemos aplicar o método "describe" nas variáveis do tipo 'objeto' da seguinte forma: df.describe(include=['object']) ``` <h3>Value Counts</h3> A contagem de valores é uma boa maneira de entender quantas unidades de cada característica / variável temos. Podemos aplicar o método "value_counts" na coluna 'drive-wheels'. Não se esqueça que o método "value_counts" só funciona na série Pandas, não nos Dataframes Pandas. Por isso, incluímos apenas um colchete "df ['drive-wheels']" e não dois colchetes "df [['drive-wheels']]". ``` df['drive-wheels'].value_counts() # nós podemos converter a série para um dataframe: df['drive-wheels'].value_counts().to_frame() drive_wheels_counts = df['drive-wheels'].value_counts().to_frame() drive_wheels_counts.rename(columns={'drive-wheels': 'value_counts'}, inplace=True) drive_wheels_counts # vamos renomear o index para 'drive-wheels': drive_wheels_counts.index.name = 'drive-wheels' drive_wheels_counts # repetindo o processo para engine-location engine_loc_counts = df['engine-location'].value_counts().to_frame() engine_loc_counts.rename(columns={'engine-location': 'value_counts'}, inplace=True) engine_loc_counts.index.name = 'engine-location' engine_loc_counts.head() ``` <h2>Agrupando</h2> <p> O método "groupby" agrupa os dados por categorias diferentes. Os dados são agrupados com base em uma ou várias variáveis e a análise é realizada nos grupos individuais. </p> <p> Por exemplo, vamos agrupar pela variável "drive-wheels". Vemos que existem 3 categorias diferentes de rodas motrizes. </p> ``` df['drive-wheels'].unique() ``` <p> Se quisermos saber, em média, qual tipo de drive-wheels é mais valiosa, podemos agrupar "drive-wheels" e depois fazer a média delas. </p> <p> Podemos selecionar as colunas 'drive-wheels', 'body-style' e 'price' e, em seguida, atribuí-las à variável "df_group_one". </p> ``` df_group_one = df[['drive-wheels','body-style','price']] # Podemos então calcular o preço médio para cada uma das diferentes categorias de dados df_group_one = df_group_one.groupby(['drive-wheels'],as_index=False).mean() df_group_one ``` <p> Pelos nossos dados, parece que os veículos com tração traseira são, em média, os mais caros, enquanto as 4 rodas e as rodas dianteiras têm preços aproximadamente iguais. </p> <p> Você também pode agrupar com várias variáveis. Por exemplo, vamos agrupar por 'drive-wheels' e 'body-style'. Isso agrupa o dataframe pelas combinações exclusivas 'drive-wheels' e 'body-style'. Podemos armazenar os resultados na variável 'grouped_test1'. </p> ``` df_gptest = df[['drive-wheels','body-style','price']] grouped_test1 = df_gptest.groupby(['drive-wheels','body-style'],as_index=False).mean() grouped_test1 ``` Esses dados agrupados são muito mais fáceis de visualizar quando transformados em uma tabela dinâmica. Uma tabela dinâmica é como uma planilha do Excel, com uma variável ao longo da coluna e outra ao longo da linha. Podemos converter o dataframe em uma tabela dinâmica usando o método "pivô" para criar uma tabela dinâmica a partir dos grupos. Nesse caso, deixaremos a variável da drive-wheels como as linhas da tabela e giraremos no estilo do corpo para se tornar as colunas da tabela: ``` grouped_pivot = grouped_test1.pivot(index='drive-wheels',columns='body-style') grouped_pivot ``` As vezes não teremos dados para algumas das células pivô. Podemos preencher essas células ausentes com o valor 0, mas qualquer outro valor também pode ser usado. Deve ser mencionado que a falta de dados é um assunto bastante complexo... ``` grouped_pivot = grouped_pivot.fillna(0) #fill missing values with 0 grouped_pivot df_gptest2 = df[['body-style','price']] grouped_test_bodystyle = df_gptest2.groupby(['body-style'],as_index= False).mean() grouped_test_bodystyle ``` <h2>Visualização dos dados</h2> Vamos usar um mapa de calor para visualizar a relação entre body-style e price. ``` import matplotlib.pyplot as plt %matplotlib inline plt.pcolor(grouped_pivot, cmap='RdBu') plt.colorbar() plt.show() ``` <p> O mapa de calor representa a variável alvo (price) proporcional à cor em relação às variáveis 'drive-wheels' e 'body-style' nos eixos vertical e horizontal, respectivamente. Isso nos permite visualizar como o preço está relacionado a 'drive-wheels' e 'body-style'. </p> <p> Os rótulos padrão não transmitem informações úteis para nós. Vamos mudar isso: </p> ``` fig, ax = plt.subplots() im = ax.pcolor(grouped_pivot, cmap='RdBu') #label names row_labels = grouped_pivot.columns.levels[1] col_labels = grouped_pivot.index #move ticks and labels to the center ax.set_xticks(np.arange(grouped_pivot.shape[1]) + 0.5, minor=False) ax.set_yticks(np.arange(grouped_pivot.shape[0]) + 0.5, minor=False) #insert labels ax.set_xticklabels(row_labels, minor=False) ax.set_yticklabels(col_labels, minor=False) #rotate label if too long plt.xticks(rotation=90) fig.colorbar(im) plt.show() ``` <p> A visualização é muito importante na ciência de dados e os pacotes de visualização oferecem grande liberdade</p> <p> A principal questão que queremos responder neste notebook é "Quais são as principais características que têm mais impacto no preço do carro?". </p> <p> Para obter uma melhor medida das características importantes, olhamos para a correlação dessas variáveis com o preço do carro, em outras palavras: como o preço do carro depende dessa variável? </p> <h2>Correlação e Causalidade</h2> <p> <b> Correlação </b>: uma medida da extensão da interdependência entre as variáveis. </p> <p> <b> Causalidade </b>: a relação entre causa e efeito entre duas variáveis. </p> <p> É importante saber a diferença entre os dois e que a correlação não implica causalidade. Determinar a correlação é muito mais simples do que determinar a causalidade, pois a causalidade pode exigir experimentação independente. </p> <p3> Correlação de Pearson </p> <p> A Correlação de Pearson mede a dependência linear entre duas variáveis X e Y. </p> <p> O coeficiente resultante é um valor entre -1 e 1 inclusive, onde: </p> <ul> <li> <b> 1 </b>: Correlação linear positiva total. </li> <li> <b> 0 </b>: Sem correlação linear, as duas variáveis provavelmente não se afetam. </li> <li> <b> -1 </b>: Correlação linear negativa total. </li> </ul> <p> Correlação de Pearson é o método padrão da função "corr". Como antes, podemos calcular a Correlação de Pearson das variáveis 'int64' ou 'float64'. </p> ``` df.corr() ``` <b> P-value </b>: <p>P-value é o valor da probabilidade de que a correlação entre essas duas variáveis seja estatisticamente significativa. Normalmente, escolhemos um nível de significância de 0.05, o que significa que temos 95% de confiança de que a correlação entre as variáveis é significativa. </p> Por convenção, quando o <ul> <li> o valor de p é $ <$ 0.001: afirmamos que há fortes evidências de que a correlação é significativa. </li> <li> o valor p é $ <$ 0.05: há evidências moderadas de que a correlação é significativa. </li> <li> o valor p é $ <$ 0.1: há evidências fracas de que a correlação é significativa. </li> <li> o valor p é $> $ 0.1: não há evidências de que a correlação seja significativa. </li> </ul> ``` # Podemos obter essas informações usando o módulo "stats" da biblioteca "scipy" from scipy import stats ``` <h3>Wheel-base vs Price</h3> Vamos calcular o coeficiente de correlação de Pearson e o P-value entre 'wheel-base' e 'price'. ``` pearson_coef, p_value = stats.pearsonr(df['wheel-base'], df['price']) print('Coeficiente de Pearson', pearson_coef) print('P-value', p_value) ``` A notacão científica do resultado indica que o valor é muito maior ou muito pequeno. No caso de 8.076488270733218e-20 significa: 8.076488270733218 vezes 10 elevado a menos 20 (o que faz andar a casa decimal 20 vezes para esquerda): 0,0000000000000000008076488270733218 <h5> Conclusão: </h5> <p> Como o P-value é $ <$ 0.001, a correlação entre wheel-base e price é estatisticamente significativa, embora a relação linear não seja extremamente forte (~ 0,585) </p> <h3>Horsepower vs Price</h3> ``` pearson_coef, p_value = stats.pearsonr(df['horsepower'], df['price']) print('Coeficiente de Pearson', pearson_coef) print('P-value', p_value) ``` <h5> Conclusão: </h5> <p> Como o P-value é $ <$ 0,001, a correlação entre a horsepower e price é estatisticamente significativa, e a relação linear é bastante forte (~ 0,809, próximo de 1) </p> <h3>Length vs Price</h3> ``` pearson_coef, p_value = stats.pearsonr(df['length'], df['price']) print('Coeficiente de Pearson', pearson_coef) print('P-value', p_value) ``` <h5> Conclusão: </h5> <p> Como o valor p é $ <$ 0,001, a correlação entre length e price é estatisticamente significativa, e a relação linear é moderadamente forte (~ 0,691). </p> <h3>Width vs Price</h3> ``` pearson_coef, p_value = stats.pearsonr(df['width'], df['price']) print('Coeficiente de Pearson', pearson_coef) print('P-value', p_value) ``` ##### Conclusão: Como o valor p é <0,001, a correlação entre largura e preço é estatisticamente significativa e a relação linear é bastante forte (~ 0,751). <h2>ANOVA</h2> <p> A Análise de Variância (ANOVA) é um método estatístico usado para testar se existem diferenças significativas entre as médias de dois ou mais grupos. ANOVA retorna dois parâmetros: </p> <p> <b> F-test score </b>: ANOVA assume que as médias de todos os grupos são iguais, calcula o quanto as médias reais se desviam da suposição e relata como a pontuação do F-test. Uma pontuação maior significa que há uma diferença maior entre as médias. </p> <p> <b> P-value </b>: P-value diz o quão estatisticamente significativo é nosso valor de pontuação calculado. </p> <p> Se nossa variável de preço estiver fortemente correlacionada com a variável que estamos analisando, espere que a ANOVA retorne uma pontuação considerável no F-test e um pequeno P-value. </p> <h3>Drive Wheels</h3> <p> Uma vez que ANOVA analisa a diferença entre diferentes grupos da mesma variável, a função groupby será útil. Como o algoritmo ANOVA calcula a média dos dados automaticamente, não precisamos tirar a média antes. </p> <p> Vamos ver se diferentes tipos de 'drive wheels' afetam o 'price', agrupamos os dados. </ p> ``` grouped_test2=df_gptest[['drive-wheels', 'price']].groupby(['drive-wheels']) grouped_test2.head(2) # Podemos obter os valores do grupo de métodos usando o método "get_group". grouped_test2.get_group('4wd')['price'] # podemos usar a função 'f_oneway' no módulo 'stats' para obter pontuação do test-F e o P-value f_val, p_val = stats.f_oneway(grouped_test2.get_group('fwd')['price'], grouped_test2.get_group('rwd')['price'], grouped_test2.get_group('4wd')['price']) print( "ANOVA: F=", f_val, ", P =", p_val) ``` Este é um ótimo resultado, com uma grande pontuação no test-F mostrando uma forte correlação e um P-value de quase 0 implicando em significância estatística quase certa. Mas isso significa que todos os três grupos testados são altamente correlacionados? ``` #### fwd e rwd f_val, p_val = stats.f_oneway(grouped_test2.get_group('fwd')['price'], grouped_test2.get_group('rwd')['price']) print( "ANOVA: F=", f_val, ", P =", p_val ) #### 4wd and rwd f_val, p_val = stats.f_oneway(grouped_test2.get_group('4wd')['price'], grouped_test2.get_group('rwd')['price']) print( "ANOVA: F=", f_val, ", P =", p_val) #### 4wd and fwd f_val, p_val = stats.f_oneway(grouped_test2.get_group('4wd')['price'], grouped_test2.get_group('fwd')['price']) print("ANOVA: F=", f_val, ", P =", p_val) ``` <h3>Conclusão</h3> <p> Agora temos uma ideia melhor de como são os nossos dados e quais variáveis são importantes levar em consideração ao prever o preço do carro.</p> <p> À medida que avançamos na construção de modelos de aprendizado de máquina para automatizar nossa análise, alimentar o modelo com variáveis que afetam significativamente nossa variável de destino melhorará o desempenho de previsão do nosso modelo. </p> # É isso! ### Este é apenas um exemplo de análise de dados com Python Este notebook faz parte de uma série de notebooks com conteúdos extraídos de cursos dos quais participei como aluno, ouvinte, professor, monitor... Reunidos para consulta futura e compartilhamento de idéias, soluções e conhecimento! ### Muito obrigado pela sua leitura! <h4>Anderson Cordeiro</h4> Você pode encontrar mais conteúdo no meu Medium<br> ou então entrar em contato comigo :D <a href="https://www.linkedin.com/in/andercordeiro/" target="_blank">[LinkedIn]</a> <a href="https://medium.com/@andcordeiro" target="_blank">[Medium]</a>
github_jupyter
<table class="ee-notebook-buttons" align="left"> <td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/NAIP/ndwi.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td> <td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/NAIP/ndwi.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td> <td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=NAIP/ndwi.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/NAIP/ndwi.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td> </table> ## Install Earth Engine API Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`. The following script checks if the geehydro package has been installed. If not, it will install geehydro, which automatically install its dependencies, including earthengine-api and folium. ``` import subprocess try: import geehydro except ImportError: print('geehydro package not installed. Installing ...') subprocess.check_call(["python", '-m', 'pip', 'install', 'geehydro']) ``` Import libraries ``` import ee import folium import geehydro ``` Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. ``` try: ee.Initialize() except Exception as e: ee.Authenticate() ee.Initialize() ``` ## Create an interactive map This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function. The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`. ``` Map = folium.Map(location=[40, -100], zoom_start=4) Map.setOptions('HYBRID') ``` ## Add Earth Engine Python script ``` collection = ee.ImageCollection('USDA/NAIP/DOQQ') fromFT = ee.FeatureCollection('ft:1CLldB-ULPyULBT2mxoRNv7enckVF0gCQoD2oH7XP') polys = fromFT.geometry() centroid = polys.centroid() lng, lat = centroid.getInfo()['coordinates'] # print("lng = {}, lat = {}".format(lng, lat)) # lng_lat = ee.Geometry.Point(lng, lat) naip = collection.filterBounds(polys) naip_2015 = naip.filterDate('2015-01-01', '2015-12-31') ppr = naip_2015.mosaic().clip(polys) # print(naip_2015.size().getInfo()) # count = 120 vis = {'bands': ['N', 'R', 'G']} Map.setCenter(lng, lat, 10) # Map.addLayer(naip_2015,vis) Map.addLayer(ppr,vis) # Map.addLayer(fromFT) ndwi = ppr.normalizedDifference(['G', 'N']) ndwiViz = {'min': 0, 'max': 1, 'palette': ['00FFFF', '0000FF']} ndwiMasked = ndwi.updateMask(ndwi.gte(0.05)) ndwi_bin = ndwiMasked.gt(0) Map.addLayer(ndwiMasked, ndwiViz) patch_size = ndwi_bin.connectedPixelCount(256, True) # Map.addLayer(patch_size) patch_id = ndwi_bin.connectedComponents(ee.Kernel.plus(1), 256) Map.addLayer(patch_id) ``` ## Display Earth Engine data layers ``` Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True) Map ```
github_jupyter
## Release the Kraken! ``` # The next library we're going to look at is called Kraken, which was developed by Université # PSL in Paris. It's actually based on a slightly older code base, OCRopus. You can see how the # flexible open-source licenses allow new ideas to grow by building upon older ideas. And, in # this case, I fully support the idea that the Kraken - a mythical massive sea creature - is the # natural progression of an octopus! # # What we are going to use Kraken for is to detect lines of text as bounding boxes in a given # image. The biggest limitation of tesseract is the lack of a layout engine inside of it. Tesseract # expects to be using fairly clean text, and gets confused if we don't crop out other artifacts. # It's not bad, but Kraken can help us out be segmenting pages. Lets take a look. # First, we'll take a look at the kraken module itself import kraken help(kraken) # There isn't much of a discussion here, but there are a number of sub-modules that look # interesting. I spend a bit of time on their website, and I think the pageseg module, which # handles all of the page segmentation, is the one we want to use. Lets look at it from kraken import pageseg help(pageseg) # So it looks like there are a few different functions we can call, and the segment # function looks particularly appropriate. I love how expressive this library is on the # documentation front -- I can see immediately that we are working with PIL.Image files, # and the author has even indicated that we need to pass in either a binarized (e.g. '1') # or grayscale (e.g. 'L') image. We can also see that the return value is a dictionary # object with two keys, "text_direction" which will return to us a string of the # direction of the text, and "boxes" which appears to be a list of tuples, where each # tuple is a box in the original image. # # Lets try this on the image of text. I have a simple bit of text in a file called # two_col.png which is from a newspaper on campus here from PIL import Image im=Image.open("readonly/two_col.png") # Lets display the image inline display(im) # Lets now convert it to black and white and segment it up into lines with kraken bounding_boxes=pageseg.segment(im.convert('1'))['boxes'] # And lets print those lines to the screen print(bounding_boxes) # Ok, pretty simple two column text and then a list of lists which are the bounding boxes of # lines of that text. Lets write a little routine to try and see the effects a bit more # clearly. I'm going to clean up my act a bit and write real documentation too, it's a good # practice def show_boxes(img): '''Modifies the passed image to show a series of bounding boxes on an image as run by kraken :param img: A PIL.Image object :return img: The modified PIL.Image object ''' # Lets bring in our ImageDraw object from PIL import ImageDraw # And grab a drawing object to annotate that image drawing_object=ImageDraw.Draw(img) # We can create a set of boxes using pageseg.segment bounding_boxes=pageseg.segment(img.convert('1'))['boxes'] # Now lets go through the list of bounding boxes for box in bounding_boxes: # An just draw a nice rectangle drawing_object.rectangle(box, fill = None, outline ='red') # And to make it easy, lets return the image object return img # To test this, lets use display display(show_boxes(Image.open("readonly/two_col.png"))) # Not bad at all! It's interesting to see that kraken isn't completely sure what to do with this # two column format. In some cases, kraken has identified a line in just a single column, while # in other cases kraken has spanned the line marker all the way across the page. Does this matter? # Well, it really depends on our goal. In this case, I want to see if we can improve a bit on this. # # So we're going to go a bit off script here. While this week of lectures is about libraries, the # goal of this last course is to give you confidence that you can apply your knowledge to actual # programming tasks, even if the library you are using doesn't quite do what you want. # # I'd like to pause the video for the moment and collect your thoughts. Looking at the image above, # with the two column example and red boxes, how do you think we might modify this image to improve # kraken's ability to text lines? # Thanks for sharing your thoughts, I'm looking forward to seeing the breadth of ideas that everyone # in the course comes up with. Here's my partial solution -- while looking through the kraken docs on # the pageseg() function I saw that there are a few parameters we can supply in order to improve # segmentation. One of these is the black_colseps parameter. If set to True, kraken will assume that # columns will be separated by black lines. This isn't our case here, but, I think we have all of the # tools to go through and actually change the source image to have a black separator between columns. # # The first step is that I want to update the show_boxes() function. I'm just going to do a quick # copy and paste from the above but add in the black_colseps=True parameter def show_boxes(img): '''Modifies the passed image to show a series of bounding boxes on an image as run by kraken :param img: A PIL.Image object :return img: The modified PIL.Image object ''' # Lets bring in our ImageDraw object from PIL import ImageDraw # And grab a drawing object to annotate that image drawing_object=ImageDraw.Draw(img) # We can create a set of boxes using pageseg.segment bounding_boxes=pageseg.segment(img.convert('1'), black_colseps=True)['boxes'] # Now lets go through the list of bounding boxes for box in bounding_boxes: # An just draw a nice rectangle drawing_object.rectangle(box, fill = None, outline ='red') # And to make it easy, lets return the image object return img # The next step is to think of the algorithm we want to apply to detect a white column separator. # In experimenting a bit I decided that I only wanted to add the separator if the space of was # at least 25 pixels wide, which is roughly the width of a character, and six lines high. The # width is easy, lets just make a variable char_width=25 # The height is harder, since it depends on the height of the text. I'm going to write a routine # to calculate the average height of a line def calculate_line_height(img): '''Calculates the average height of a line from a given image :param img: A PIL.Image object :return: The average line height in pixels ''' # Lets get a list of bounding boxes for this image bounding_boxes=pageseg.segment(img.convert('1'))['boxes'] # Each box is a tuple of (top, left, bottom, right) so the height is just top - bottom # So lets just calculate this over the set of all boxes height_accumulator=0 for box in bounding_boxes: height_accumulator=height_accumulator+box[3]-box[1] # this is a bit tricky, remember that we start counting at the upper left corner in PIL! # now lets just return the average height # lets change it to the nearest full pixel by making it an integer return int(height_accumulator/len(bounding_boxes)) # And lets test this with the image with have been using line_height=calculate_line_height(Image.open("readonly/two_col.png")) print(line_height) # Ok, so the average height of a line is 31. # Now, we want to scan through the image - looking at each pixel in turn - to determine if there # is a block of whitespace. How bit of a block should we look for? That's a bit more of an art # than a science. Looking at our sample image, I'm going to say an appropriate block should be # one char_width wide, and six line_heights tall. But, I honestly just made this up by eyeballing # the image, so I would encourage you to play with values as you explore. # Lets create a new box called gap box that represents this area gap_box=(0,0,char_width,line_height*6) gap_box # It seems we will want to have a function which, given a pixel in an image, can check to see # if that pixel has whitespace to the right and below it. Essentially, we want to test to see # if the pixel is the upper left corner of something that looks like the gap_box. If so, then # we should insert a line to "break up" this box before sending to kraken # # Lets call this new function gap_check def gap_check(img, location): '''Checks the img in a given (x,y) location to see if it fits the description of a gap_box :param img: A PIL.Image file :param location: A tuple (x,y) which is a pixel location in that image :return: True if that fits the definition of a gap_box, otherwise False ''' # Recall that we can get a pixel using the img.getpixel() function. It returns this value # as a tuple of integers, one for each color channel. Our tools all work with binarized # images (black and white), so we should just get one value. If the value is 0 it's a black # pixel, if it's white then the value should be 255 # # We're going to assume that the image is in the correct mode already, e.g. it has been # binarized. The algorithm to check our bounding box is fairly easy: we have a single location # which is our start and then we want to check all the pixels to the right of that location # up to gap_box[2] for x in range(location[0], location[0]+gap_box[2]): # the height is similar, so lets iterate a y variable to gap_box[3] for y in range(location[1], location[1]+gap_box[3]): # we want to check if the pixel is white, but only if we are still within the image if x < img.width and y < img.height: # if the pixel is white we don't do anything, if it's black, we just want to # finish and return False if img.getpixel((x,y)) != 255: return False # If we have managed to walk all through the gap_box without finding any non-white pixels # then we can return true -- this is a gap! return True # Alright, we have a function to check for a gap, called gap_check. What should we do once # we find a gap? For this, lets just draw a line in the middle of it. Lets create a new function def draw_sep(img,location): '''Draws a line in img in the middle of the gap discovered at location. Note that this doesn't draw the line in location, but draws it at the middle of a gap_box starting at location. :param img: A PIL.Image file :param location: A tuple(x,y) which is a pixel location in the image ''' # First lets bring in all of our drawing code from PIL import ImageDraw drawing_object=ImageDraw.Draw(img) # next, lets decide what the middle means in terms of coordinates in the image x1=location[0]+int(gap_box[2]/2) # and our x2 is just the same thing, since this is a one pixel vertical line x2=x1 # our starting y coordinate is just the y coordinate which was passed in, the top of the box y1=location[1] # but we want our final y coordinate to be the bottom of the box y2=y1+gap_box[3] drawing_object.rectangle((x1,y1,x2,y2), fill = 'black', outline ='black') # and we don't have anything we need to return from this, because we modified the image # Now, lets try it all out. This is pretty easy, we can just iterate through each pixel # in the image, check if there is a gap, then insert a line if there is. def process_image(img): '''Takes in an image of text and adds black vertical bars to break up columns :param img: A PIL.Image file :return: A modified PIL.Image file ''' # we'll start with a familiar iteration process for x in range(img.width): for y in range(img.height): # check if there is a gap at this point if (gap_check(img, (x,y))): # then update image to one which has a separator drawn on it draw_sep(img, (x,y)) # and for good measure we'll return the image we modified return img # Lets read in our test image and convert it through binarization i=Image.open("readonly/two_col.png").convert("L") i=process_image(i) display(i) #Note: This will take some time to run! Be patient! # Not bad at all! The effect at the bottom of the image is a bit unexpected to me, but it makes # sense. You can imagine that there are several ways we might try and control this. Lets see how # this new image works when run through the kraken layout engine display(show_boxes(i)) # Looks like that is pretty accurate, and fixes the problem we faced. Feel free to experiment # with different settings for the gap heights and width and share in the forums. You'll notice though # method we created is really quite slow, which is a bit of a problem if we wanted to use # this on larger text. But I wanted to show you how you can mix your own logic and work with # libraries you're using. Just because Kraken didn't work perfectly, doesn't mean we can't # build something more specific to our use case on top of it. # # I want to end this lecture with a pause and to ask you to reflect on the code we've written # here. We started this course with some pretty simple use of libraries, but now we're # digging in deeper and solving problems ourselves with the help of these libraries. Before we # go on to our last library, how well prepared do you think you are to take your python # skills out into the wild? ``` ## Comparing Image Data Structures ``` # OpenCV supports reading of images in most file formats, such as JPEG, PNG, and TIFF. Most image and # video analysis requires converting images into grayscale first. This simplifies the image and reduces # noise allowing for improved analysis. Let's write some code that reads an image of as person, Floyd # Mayweather and converts it into greyscale. # First we will import the open cv package cv2 import cv2 as cv # We'll load the floyd.jpg image img = cv.imread('readonly/floyd.jpg') # And we'll convert it to grayscale using the cvtColor image gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) # Now, before we get to the result, lets talk about docs. Just like tesseract, opencv is an external # package written in C++, and the docs for python are really poor. This is unfortunatly quite common # when python is being used as a wrapper. Thankfully, the web docs for opencv are actually pretty good, # so hit the website docs.opencv.org when you want to learn more about a particular function. In this # case cvtColor converts from one color space to another, and we are convering our image to grayscale. # Of course, we already know at least two different ways of doing this, using binarization and PIL # color spaces conversions # Lets instpec this object that has been returned. import inspect inspect.getmro(type(gray)) # We see that it is of type ndarray, which is a fundamental list type coming from the numerical # python project. That's a bit surprising - up until this point we have been used to working with # PIL.Image objects. OpenCV, however, wants to represent an image as a two dimensional sequence # of bytes, and the ndarray, which stands for n dimensional array, is the ideal way to do this. # Lets look at the array contents. gray # The array is shown here as a list of lists, where the inner lists are filled with integers. # The dtype=uint8 definition indicates that each of the items in an array is an 8 bit unsigned # integer, which is very common for black and white images. So this is a pixel by pixel definition # of the image. # # The display package, however, doesn't know what to do with this image. So lets convert it # into a PIL object to render it in the browser. from PIL import Image # PIL can take an array of data with a given color format and convert this into a PIL object. # This is perfect for our situation, as the PIL color mode, "L" is just an array of luminance # values in unsigned integers image = Image.fromarray(gray, "L") display(image) # Lets talk a bit more about images for a moment. Numpy arrays are multidimensional. For # instance, we can define an array in a single dimension: import numpy as np single_dim = np.array([25, 50 , 25, 10, 10]) # In an image, this is analagous to a single row of 5 pixels each in grayscale. But actually, # all imaging libraries tend to expect at least two dimensions, a width and a height, and to # show a matrix. So if we put the single_dim inside of another array, this would be a two # dimensional array with element in the height direction, and five in the width direction double_dim = np.array([single_dim]) double_dim # This should look pretty familiar, it's a lot like a list of lists! Lets see what this new # two dimensional array looks like if we display it display(Image.fromarray(double_dim, "L")) # Pretty unexciting - it's just a little line. Five pixels in a row to be exact, of different # levels of black. The numpy library has a nice attribute called shape that allows us to see how # many dimensions big an array is. The shape attribute returns a tuple that shows the height of # the image, by the width of the image double_dim.shape # Lets take a look at the shape of our initial image which we loaded into the img variable img.shape # This image has three dimensions! That's because it has a width, a height, and what's called # a color depth. In this case, the color is represented as an array of three values. Lets take a # look at the color of the first pixel first_pixel=img[0][0] first_pixel # Here we see that the color value is provided in full RGB using an unsigned integer. This # means that each color can have one of 256 values, and the total number of unique colors # that can be represented by this data is 256 * 256 *256 which is roughly 16 million colors. # We call this 24 bit color, which is 8+8+8. # # If you find yourself shopping for a television, you might notice that some expensive models # are advertised as having 10 bit or even 12 bit panels. These are televisions where each of # the red, green, and blue color channels are represented by 10 or 12 bits instead of 8. For # ten bit panels this means that there are 1 billion colors capable, and 12 bit panels are # capable of over 68 billion colors! # We're not going to talk much more about color in this course, but it's a fun subject. Instead, # lets go back to this array representation of images, because we can do some interesting things # with this. # # One of the most common things to do with an ndarray is to reshape it -- to change the number # of rows and columns that are represented so that we can do different kinds of operations. # Here is our original two dimensional image print("Original image") print(gray) # If we wanted to represent that as a one dimensional image, we just call reshape print("New image") # And reshape takes the image as the first parameter, and a new shape as the second image1d=np.reshape(gray,(1,gray.shape[0]*gray.shape[1])) print(image1d) # So, why are we talking about these nested arrays of bytes, we were supposed to be talking # about OpenCV as a library. Well, I wanted to show you that often libraries working on the # same kind of principles, in this case images stored as arrays of bytes, are not representing # data in the same way in their APIs. But, by exploring a bit you can learn how the internal # representation of data is stored, and build routines to convert between formats. # # For instance, remember in the last lecture when we wanted to look for gaps in an image so # that we could draw lines to feed into kraken? Well, we use PIL to do this, using getpixel() # to look at individual pixels and see what the luminosity was, then ImageDraw.rectangle to # actually fill in a black bar separator. This was a nice high level API, and let us write # routines to do the work we wanted without having to understand too much about how the images # were being stored. But it was computationally very slow. # # Instead, we could write the code to do this using matrix features within numpy. Lets take # a look. import cv2 as cv # We'll load the 2 column image img = cv.imread('readonly/two_col.png') # And we'll convert it to grayscale using the cvtColor image gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) # Now, remember how slicing on a list works, if you have a list of number such as # a=[0,1,2,3,4,5] then a[2:4] will return the sublist of numbers at position 2 through 4 # inclusive - don't forget that lists start indexing at 0! # If we have a two dimensional array, we can slice out a smaller piece of that using the # format a[2:4,1:3]. You can think of this as first slicing along the rows dimension, then # in the columns dimension. So in this example, that would be a matrix of rows 2, and 3, # and columns 1, and 2. Here's a look at our image. gray[2:4,1:3] # So we see that it is all white. We can use this as a "window" and move it around our # our big image. # # Finally, the ndarray library has lots of matrix functions which are generally very fast # to run. One that we want to consider in this case is count_nonzero(), which just returns # the number of entries in the matrix which are not zero. np.count_nonzero(gray[2:4,1:3]) # Ok, the last benefit of going to this low level approach to images is that we can change # pixels very fast as well. Previously we were drawing rectangles and setting a fill and line # width. This is nice if you want to do something like change the color of the fill from the # line, or draw complex shapes. But we really just want a line here. That's really easy to # do - we just want to change a number of luminosity values from 255 to 0. # # As an example, lets create a big white matrix white_matrix=np.full((12,12),255,dtype=np.uint8) display(Image.fromarray(white_matrix,"L")) white_matrix # looks pretty boring, it's just a giant white square we can't see. But if we want, we can # easily color a column to be black white_matrix[:,6]=np.full((1,12),0,dtype=np.uint8) display(Image.fromarray(white_matrix,"L")) white_matrix # And that's exactly what we wanted to do. So, why do it this way, when it seems so much # more low level? Really, the answer is speed. This paradigm of using matricies to store # and manipulate bytes of data for images is much closer to how low level API and hardware # developers think about storing files and bytes in memory. # # How much faster is it? Well, that's up to you to discover; there's an optional assignment # for this week to convert our old code over into this new format, to compare both the # readability and speed of the two different approaches. ``` ## OpenCV ``` # Ok, we're just about at the project for this course. If you reflect on the specialization # as a whole you'll realize that you started with probably little or no understanding of python, # progressed through the basic control structures and libraries included with the language # with the help of a digital textbook, moved on to more high level representations of data # and functions with objects, and now started to explore third party libraries that exist for # python which allow you to manipulate and display images. This is quite an achievement! # # You have also no doubt found that as you have progressed the demands on you to engage in self- # discovery have also increased. Where the first assignments were maybe straight forward, the # ones in this week require you to struggle a bit more with planning and debugging code as # you develop. # # But, you've persisted, and I'd like to share with you just one more set of features before # we head over to a project. The OpenCV library contains mechanisms to do face detection on # images. The technique used is based on Haar cascades, which is a machine learning approach. # Now, we're not going to go into the machine learning bits, we have another specialization on # Applied Data Science with Python which you can take after this if you're interested in that topic. # But here we'll treat OpenCV like a black box. # # OpenCV comes with trained models for detecting faces, eyes, and smiles which we'll be using. # You can train models for detecting other things - like hot dogs or flutes - and if you're # interested in that I'd recommend you check out the Open CV docs on how to train a cascade # classifier: https://docs.opencv.org/3.4/dc/d88/tutorial_traincascade.html # However, in this lecture we just want to use the current classifiers and see if we can detect # portions of an image which are interesting. # # First step is to load opencv and the XML-based classifiers import cv2 as cv face_cascade = cv.CascadeClassifier('readonly/haarcascade_frontalface_default.xml') eye_cascade = cv.CascadeClassifier('readonly/haarcascade_eye.xml') # Ok, with the classifiers loaded, we now want to try and detect a face. Lets pull in the # picture we played with last time img = cv.imread('readonly/floyd.jpg') # And we'll convert it to grayscale using the cvtColor image gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) # The next step is to use the face_cascade classifier. I'll let you go explore the docs if you # would like to, but the norm is to use the detectMultiScale() function. This function returns # a list of objects as rectangles. The first parameter is an ndarray of the image. faces = face_cascade.detectMultiScale(gray) # And lets just print those faces out to the screen faces faces.tolist()[0] # The resulting rectangles are in the format of (x,y,w,h) where x and y denote the upper # left hand point for the image and the width and height represent the bounding box. We know # how to handle this in PIL from PIL import Image # Lets create a PIL image object pil_img=Image.fromarray(gray,mode="L") # Now lets bring in our drawing object from PIL import ImageDraw # And lets create our drawing context drawing=ImageDraw.Draw(pil_img) # Now lets pull the rectangle out of the faces object rec=faces.tolist()[0] # Now we just draw a rectangle around the bounds drawing.rectangle(rec, outline="white") # And display display(pil_img) # So, not quite what we were looking for. What do you think went wrong? # Well, a quick double check of the docs and it is apparent that OpenCV is return the coordinates # as (x,y,w,h), while PIL.ImageDraw is looking for (x1,y1,x2,y2). Looks like an easy fix # Wipe our old image pil_img=Image.fromarray(gray,mode="L") # Setup our drawing context drawing=ImageDraw.Draw(pil_img) # And draw the new box drawing.rectangle((rec[0],rec[1],rec[0]+rec[2],rec[1]+rec[3]), outline="white") # And display display(pil_img) # We see the face detection works pretty good on this image! Note that it's apparent that this is # not head detection, but that the haarcascades file we used is looking for eyes and a mouth. # Lets try this on something a bit more complex, lets read in our MSI recruitment image img = cv.imread('readonly/msi_recruitment.gif') # And lets take a look at that image display(Image.fromarray(img)) # Whoa, what's that error about? It looks like there is an error on a line deep within the PIL # Image.py file, and it is trying to call an internal private member called __array_interface__ # on the img object, but this object is None # # It turns out that the root of this error is that OpenCV can't work with Gif images. This is # kind of a pain and unfortunate. But we know how to fix that right? One was is that we could # just open this in PIL and then save it as a png, then open that in open cv. # # Lets use PIL to open our image pil_img=Image.open('readonly/msi_recruitment.gif') # now lets convert it to greyscale for opencv, and get the bytestream open_cv_version=pil_img.convert("L") # now lets just write that to a file open_cv_version.save("msi_recruitment.png") # Ok, now that the conversion of format is done, lets try reading this back into opencv cv_img=cv.imread('msi_recruitment.png') # We don't need to color convert this, because we saved it as grayscale # lets try and detect faces in that image faces = face_cascade.detectMultiScale(cv_img) # Now, we still have our PIL color version in a gif pil_img=Image.open('readonly/msi_recruitment.gif') # Set our drawing context drawing=ImageDraw.Draw(pil_img) # For each item in faces, lets surround it with a red box for x,y,w,h in faces: # That might be new syntax for you! Recall that faces is a list of rectangles in (x,y,w,h) # format, that is, a list of lists. Instead of having to do an iteration and then manually # pull out each item, we can use tuple unpacking to pull out individual items in the sublist # directly to variables. A really nice python feature # # Now we just need to draw our box drawing.rectangle((x,y,x+w,y+h), outline="white") display(pil_img) # What happened here!? We see that we have detected faces, and that we have drawn boxes # around those faces on the image, but that the colors have gone all weird! This, it turns # out, has to do with color limitations for gif images. In short, a gif image has a very # limited number of colors. This is called a color pallette after the pallette artists # use to mix paints. For gifs the pallette can only be 256 colors -- but they can be *any* # 256 colors. When a new color is introduced, is has to take the space of an old color. # In this case, PIL adds white to the pallette but doesn't know which color to replace and # thus messes up the image. # # Who knew there was so much to learn about image formats? We can see what mode the image # is in with the .mode attribute pil_img.mode # We can see a list of modes in the PILLOW documentation, and they correspond with the # color spaces we have been using. For the moment though, lets change back to RGB, which # represents color as a three byte tuple instead of in a pallette. # Lets read in the image pil_img=Image.open('readonly/msi_recruitment.gif') # Lets convert it to RGB mode pil_img = pil_img.convert("RGB") # And lets print out the mode pil_img.mode # Ok, now lets go back to drawing rectangles. Lets get our drawing object drawing=ImageDraw.Draw(pil_img) # And iterate through the faces sequence, tuple unpacking as we go for x,y,w,h in faces: # And remember this is width and height so we have to add those appropriately. drawing.rectangle((x,y,x+w,y+h), outline="white") display(pil_img) # Awesome! We managed to detect a bunch of faces in that image. Looks like we have missed # four faces. In the machine learning world we would call these false negatives - something # which the machine thought was not a face (so a negative), but that it was incorrect on. # Consequently, we would call the actual faces that were detected as true positives - # something that the machine thought was a face and it was correct on. This leaves us with # false positives - something the machine thought was a face but it wasn't. We see there are # two of these in the image, picking up shadow patterns or textures in shirts and matching # them with the haarcascades. Finally, we have true negatives, or the set of all possible # rectangles the machine learning classifier could consider where it correctly indicated that # the result was not a face. In this case there are many many true negatives. # There are a few ways we could try and improve this, and really, it requires a lot of # experimentation to find good values for a given image. First, lets create a function # which will plot rectanges for us over the image def show_rects(faces): #Lets read in our gif and convert it pil_img=Image.open('readonly/msi_recruitment.gif').convert("RGB") # Set our drawing context drawing=ImageDraw.Draw(pil_img) # And plot all of the rectangles in faces for x,y,w,h in faces: drawing.rectangle((x,y,x+w,y+h), outline="white") #Finally lets display this display(pil_img) # Ok, first up, we could try and binarize this image. It turns out that opencv has a built in # binarization function called threshold(). You simply pass in the image, the midpoint, and # the maximum value, as well as a flag which indicates whether the threshold should be # binary or something else. Lets try this. cv_img_bin=cv.threshold(img,120,255,cv.THRESH_BINARY)[1] # returns a list, we want the second value # Now do the actual face detection faces = face_cascade.detectMultiScale(cv_img_bin) # Now lets see the results show_rects(faces) # That's kind of interesting. Not better, but we do see that there is one false positive # towards the bottom, where the classifier detected the sunglasses as eyes and the dark shadow # line below as a mouth. # # If you're following in the notebook with this video, why don't you pause things and try a # few different parameters for the thresholding value? # The detectMultiScale() function from OpenCV also has a couple of parameters. The first of # these is the scale factor. The scale factor changes the size of rectangles which are # considered against the model, that is, the haarcascades XML file. You can think of it as if # it were changing the size of the rectangles which are on the screen. # # Lets experiment with the scale factor. Usually it's a small value, lets try 1.05 faces = face_cascade.detectMultiScale(cv_img,1.05) # Show those results show_rects(faces) # Now lets also try 1.15 faces = face_cascade.detectMultiScale(cv_img,1.15) # Show those results show_rects(faces) # Finally lets also try 1.25 faces = face_cascade.detectMultiScale(cv_img,1.25) # Show those results show_rects(faces) # We can see that as we change the scale factor we change the number of true and # false positives and negatives. With the scale set to 1.05, we have 7 true positives, # which are correctly identified faces, and 3 false negatives, which are faces which # are there but not detected, and 3 false positives, where are non-faces which # opencv thinks are faces. When we change this to 1.15 we lose the false positives but # also lose one of the true positives, the person to the right wearing a hat. And # when we change this to 1.25 we lost more true positives as well. # # This is actually a really interesting phenomena in machine learning and artificial # intelligence. There is a trade off between not only how accurate a model is, but how # the inaccuracy actually happens. Which of these three models do you think is best? # Well, the answer to that question is really, "it depends". It depends why you are trying # to detect faces, and what you are going to do with them. If you think these issues # are interesting, you might want to check out the Applied Data Science with Python # specialization Michigan offers on Coursera. # # Ok, beyond an opportunity to advertise, did you notice anything else that happened when # we changed the scale factor? It's subtle, but the speed at which the processing ran # took longer at smaller scale factors. This is because more subimages are being considered # for these scales. This could also affect which method we might use. # # Jupyter has nice support for timing commands. You might have seen this before, a line # that starts with a percentage sign in jupyter is called a "magic function". This isn't # normal python - it's actually a shorthand way of writing a function which Jupyter # has predefined. It looks a lot like the decorators we talked about in a previous # lecture, but the magic functions were around long before decorators were part of the # python language. One of the built-in magic functions in juptyer is called timeit, and this # repeats a piece of python ten times (by default) and tells you the average speed it # took to complete. # # Lets time the speed of detectmultiscale when using a scale of 1.05 %timeit face_cascade.detectMultiScale(cv_img,1.05) # Ok, now lets compare that to the speed at scale = 1.15 %timeit face_cascade.detectMultiScale(cv_img,1.15) # You can see that this is a dramatic difference, roughly two and a half times slower # when using the smaller scale! # # This wraps up our discussion of detecting faces in opencv. You'll see that, like OCR, this # is not a foolproof process. But we can build on the work others have done in machine learning # and leverage powerful libraries to bring us closer to building a turn key python-based # solution. Remember that the detection mechanism isn't specific to faces, that's just the # haarcascades training data we used. On the web you'll be able to find other training data # to detect other objects, including eyes, animals, and so forth. ``` ## More Jupyter Widgets ``` # One of the nice things about using the Jupyter notebook systems is that there is a # rich set of contributed plugins that seek to extend this system. In this lecture I # want to introduce you to one such plugin, call ipy web rtc. Webrtc is a fairly new # protocol for real time communication on the web. Yup, I'm talking about chatting. # The widget brings this to the Jupyter notebook system. Lets take a look. # # First, lets import from this library two different classes which we'll use in a # demo, one for the camera and one for images. from ipywebrtc import CameraStream, ImageRecorder # Then lets take a look at the camera stream object help(CameraStream) # We see from the docs that it's east to get a camera facing the user, and we can have # the audio on or off. We don't need audio for this demo, so lets create a new camera # instance camera = CameraStream.facing_user(audio=False) # The next object we want to look at is the ImageRecorder help(ImageRecorder) # The image recorder lets us actually grab images from the camera stream. There are features # for downloading and using the image as well. We see that the default format is a png file. # Lets hook up the ImageRecorder to our stream image_recorder = ImageRecorder(stream=camera) # Now, the docs are a little unclear how to use this within Jupyter, but if we call the # download() function it will actually store the results of the camera which is hooked up # in image_recorder.image. Lets try it out # First, lets tell the recorder to start capturing data image_recorder.recording=True # Now lets download the image image_recorder.download() # Then lets inspect the type of the image type(image_recorder.image) # Ok, the object that it stores is an ipywidgets.widgets.widget_media.Image. How do we do # something useful with this? Well, an inspection of the object shows that there is a handy # value field which actually holds the bytes behind the image. And we know how to display # those. # Lets import PIL Image import PIL.Image # And lets import io import io # And now lets create a PIL image from the bytes img = PIL.Image.open(io.BytesIO(image_recorder.image.value)) # And render it to the screen display(img) # Great, you see a picture! Hopefully you are following along in one of the notebooks # and have been able to try this out for yourself! # # What can you do with this? This is a great way to get started with a bit of computer vision. # You already know how to identify a face in the webcam picture, or try and capture text # from within the picture. With OpenCV there are any number of other things you can do, simply # with a webcam, the Jupyter notebooks, and python! ```
github_jupyter
``` # @title Copyright & License (click to expand) # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Vertex Model Monitoring <table align="left"> <td> <a href="https://console.cloud.google.com/ai-platform/notebooks/deploy-notebook?name=Model%20Monitoring&download_url=https%3A%2F%2Fraw.githubusercontent.com%2FGoogleCloudPlatform%2Fai-platform-samples%2Fmaster%2Fai-platform-unified%2Fnotebooks%2Fofficial%2Fmodel_monitoring%2Fmodel_monitoring.ipynb"> <img src="https://cloud.google.com/images/products/ai/ai-solutions-icon.svg" alt="Google Cloud Notebooks"> Open in GCP Notebooks </a> </td> <td> <a href="https://colab.research.google.com/github/GoogleCloudPlatform/ai-platform-samples/blob/master/ai-platform-unified/notebooks/official/model_monitoring/model_monitoring.ipynb"> <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Open in Colab </a> </td> <td> <a href="https://github.com/GoogleCloudPlatform/ai-platform-samples/blob/master/ai-platform-unified/notebooks/official/model_monitoring/model_monitoring.ipynb"> <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo"> View on GitHub </a> </td> </table> ## Overview ### What is Model Monitoring? Modern applications rely on a well established set of capabilities to monitor the health of their services. Examples include: * software versioning * rigorous deployment processes * event logging * alerting/notication of situations requiring intervention * on-demand and automated diagnostic tracing * automated performance and functional testing You should be able to manage your ML services with the same degree of power and flexibility with which you can manage your applications. That's what MLOps is all about - managing ML services with the best practices Google and the broader computing industry have learned from generations of experience deploying well engineered, reliable, and scalable services. Model monitoring is only one piece of the ML Ops puzzle - it helps answer the following questions: * How well do recent service requests match the training data used to build your model? This is called **training-serving skew**. * How significantly are service requests evolving over time? This is called **drift detection**. If production traffic differs from training data, or varies substantially over time, that's likely to impact the quality of the answers your model produces. When that happens, you'd like to be alerted automatically and responsively, so that **you can anticipate problems before they affect your customer experiences or your revenue streams**. ### Objective In this notebook, you will learn how to... * deploy a pre-trained model * configure model monitoring * generate some artificial traffic * understand how to interpret the statistics, visualizations, other data reported by the model monitoring feature ### Costs This tutorial uses billable components of Google Cloud: * Vertex AI * BigQuery Learn about [Vertex AI pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage pricing](https://cloud.google.com/storage/pricing), and use the [Pricing Calculator](https://cloud.google.com/products/calculator/) to generate a cost estimate based on your projected usage. ### The example model The model you'll use in this notebook is based on [this blog post](https://cloud.google.com/blog/topics/developers-practitioners/churn-prediction-game-developers-using-google-analytics-4-ga4-and-bigquery-ml). The idea behind this model is that your company has extensive log data describing how your game users have interacted with the site. The raw data contains the following categories of information: - identity - unique player identitity numbers - demographic features - information about the player, such as the geographic region in which a player is located - behavioral features - counts of the number of times a player has triggered certain game events, such as reaching a new level - churn propensity - this is the label or target feature, it provides an estimated probability that this player will churn, i.e. stop being an active player. The blog article referenced above explains how to use BigQuery to store the raw data, pre-process it for use in machine learning, and train a model. Because this notebook focuses on model monitoring, rather than training models, you're going to reuse a pre-trained version of this model, which has been exported to Google Cloud Storage. In the next section, you will setup your environment and import this model into your own project. ## Before you begin ### Setup your dependencies ``` import os import sys assert sys.version_info.major == 3, "This notebook requires Python 3." # Google Cloud Notebook requires dependencies to be installed with '--user' IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version") USER_FLAG = "" if IS_GOOGLE_CLOUD_NOTEBOOK: USER_FLAG = "--user" if 'google.colab' in sys.modules: from google.colab import auth auth.authenticate_user() # Install Python package dependencies. ! pip3 install {USER_FLAG} --quiet --upgrade google-api-python-client google-auth-oauthlib \ google-auth-httplib2 oauth2client requests \ google-cloud-aiplatform google-cloud-storage==1.32.0 if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) ``` ### Set up your Google Cloud project **The following steps are required, regardless of your notebook environment.** 1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs. 1. Enter your project id in the first line of the cell below. 1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project). 1. If you are running this notebook locally, you will need to install the [Cloud SDK](https://cloud.google.com/sdk). 1. You'll use the *gcloud* command throughout this notebook. In the following cell, enter your project name and run the cell to authenticate yourself with the Google Cloud and initialize your *gcloud* configuration settings. **Model monitoring is currently supported in regions us-central1, europe-west4, asia-east1, and asia-southeast1. To keep things simple for this lab, we're going to use region us-central1 for all our resources (BigQuery training data, Cloud Storage bucket, model and endpoint locations, etc.). You can use any supported region, so long as all resources are co-located.** ``` # Import globally needed dependencies here, after kernel restart. import copy import numpy as np import os import pprint as pp import random import sys import time PROJECT_ID = "[your-project-id]" # @param {type:"string"} REGION = "us-central1" # @param {type:"string"} SUFFIX = "aiplatform.googleapis.com" API_ENDPOINT = f"{REGION}-{SUFFIX}" PREDICT_API_ENDPOINT = f"{REGION}-prediction-{SUFFIX}" if os.getenv("IS_TESTING"): !gcloud --quiet components install beta !gcloud --quiet components update !gcloud config set project $PROJECT_ID !gcloud config set ai/region $REGION ``` ### Login to your Google Cloud account and enable AI services ``` # The Google Cloud Notebook product has specific requirements IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version") # If on Google Cloud Notebooks, then don't execute this code if not IS_GOOGLE_CLOUD_NOTEBOOK: if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING"): %env GOOGLE_APPLICATION_CREDENTIALS '' !gcloud services enable aiplatform.googleapis.com ``` ### Define utilities Run the following cells to define some utility functions and distributions used later in this notebook. Although these utilities are not critical to understand the main concepts, feel free to expand the cells in this section if you're curious or want to dive deeper into how some of your API requests are made. ``` # @title Utility imports and constants from google.cloud.aiplatform_v1beta1.services.endpoint_service import \ EndpointServiceClient from google.cloud.aiplatform_v1beta1.services.job_service import \ JobServiceClient from google.cloud.aiplatform_v1beta1.services.prediction_service import \ PredictionServiceClient from google.cloud.aiplatform_v1beta1.types.io import BigQuerySource from google.cloud.aiplatform_v1beta1.types.model_deployment_monitoring_job import ( ModelDeploymentMonitoringJob, ModelDeploymentMonitoringObjectiveConfig, ModelDeploymentMonitoringScheduleConfig) from google.cloud.aiplatform_v1beta1.types.model_monitoring import ( ModelMonitoringAlertConfig, ModelMonitoringObjectiveConfig, SamplingStrategy, ThresholdConfig) from google.cloud.aiplatform_v1beta1.types.prediction_service import \ PredictRequest from google.protobuf import json_format from google.protobuf.duration_pb2 import Duration from google.protobuf.struct_pb2 import Value # This is the default value at which you would like the monitoring function to trigger an alert. # In other words, this value fine tunes the alerting sensitivity. This threshold can be customized # on a per feature basis but this is the global default setting. DEFAULT_THRESHOLD_VALUE = 0.001 # @title Utility functions def create_monitoring_job(objective_configs): # Create sampling configuration. random_sampling = SamplingStrategy.RandomSampleConfig(sample_rate=LOG_SAMPLE_RATE) sampling_config = SamplingStrategy(random_sample_config=random_sampling) # Create schedule configuration. duration = Duration(seconds=MONITOR_INTERVAL) schedule_config = ModelDeploymentMonitoringScheduleConfig(monitor_interval=duration) # Create alerting configuration. emails = [USER_EMAIL] email_config = ModelMonitoringAlertConfig.EmailAlertConfig(user_emails=emails) alerting_config = ModelMonitoringAlertConfig(email_alert_config=email_config) # Create the monitoring job. endpoint = f"projects/{PROJECT_ID}/locations/{REGION}/endpoints/{ENDPOINT_ID}" predict_schema = "" analysis_schema = "" job = ModelDeploymentMonitoringJob( display_name=JOB_NAME, endpoint=endpoint, model_deployment_monitoring_objective_configs=objective_configs, logging_sampling_strategy=sampling_config, model_deployment_monitoring_schedule_config=schedule_config, model_monitoring_alert_config=alerting_config, predict_instance_schema_uri=predict_schema, analysis_instance_schema_uri=analysis_schema, ) options = dict(api_endpoint=API_ENDPOINT) client = JobServiceClient(client_options=options) parent = f"projects/{PROJECT_ID}/locations/{REGION}" response = client.create_model_deployment_monitoring_job( parent=parent, model_deployment_monitoring_job=job ) print("Created monitoring job:") print(response) return response def get_thresholds(default_thresholds, custom_thresholds): thresholds = {} default_threshold = ThresholdConfig(value=DEFAULT_THRESHOLD_VALUE) for feature in default_thresholds.split(","): feature = feature.strip() thresholds[feature] = default_threshold for custom_threshold in custom_thresholds.split(","): pair = custom_threshold.split(":") if len(pair) != 2: print(f"Invalid custom skew threshold: {custom_threshold}") return feature, value = pair thresholds[feature] = ThresholdConfig(value=float(value)) return thresholds def get_deployed_model_ids(endpoint_id): client_options = dict(api_endpoint=API_ENDPOINT) client = EndpointServiceClient(client_options=client_options) parent = f"projects/{PROJECT_ID}/locations/{REGION}" response = client.get_endpoint(name=f"{parent}/endpoints/{endpoint_id}") model_ids = [] for model in response.deployed_models: model_ids.append(model.id) return model_ids def set_objectives(model_ids, objective_template): # Use the same objective config for all models. objective_configs = [] for model_id in model_ids: objective_config = copy.deepcopy(objective_template) objective_config.deployed_model_id = model_id objective_configs.append(objective_config) return objective_configs def send_predict_request(endpoint, input): client_options = {"api_endpoint": PREDICT_API_ENDPOINT} client = PredictionServiceClient(client_options=client_options) params = {} params = json_format.ParseDict(params, Value()) request = PredictRequest(endpoint=endpoint, parameters=params) inputs = [json_format.ParseDict(input, Value())] request.instances.extend(inputs) response = client.predict(request) return response def list_monitoring_jobs(): client_options = dict(api_endpoint=API_ENDPOINT) parent = f"projects/{PROJECT_ID}/locations/us-central1" client = JobServiceClient(client_options=client_options) response = client.list_model_deployment_monitoring_jobs(parent=parent) print(response) def pause_monitoring_job(job): client_options = dict(api_endpoint=API_ENDPOINT) client = JobServiceClient(client_options=client_options) response = client.pause_model_deployment_monitoring_job(name=job) print(response) def delete_monitoring_job(job): client_options = dict(api_endpoint=API_ENDPOINT) client = JobServiceClient(client_options=client_options) response = client.delete_model_deployment_monitoring_job(name=job) print(response) # @title Utility distributions # This cell containers parameters enabling us to generate realistic test data that closely # models the feature distributions found in the training data. DAYOFWEEK = {1: 1040, 2: 1223, 3: 1352, 4: 1217, 5: 1078, 6: 1011, 7: 1110} LANGUAGE = { "en-us": 4807, "en-gb": 678, "ja-jp": 419, "en-au": 310, "en-ca": 299, "de-de": 147, "en-in": 130, "en": 127, "fr-fr": 94, "pt-br": 81, "es-us": 65, "zh-tw": 64, "zh-hans-cn": 55, "es-mx": 53, "nl-nl": 37, "fr-ca": 34, "en-za": 29, "vi-vn": 29, "en-nz": 29, "es-es": 25, } OS = {"IOS": 3980, "ANDROID": 3798, "null": 253} MONTH = {6: 3125, 7: 1838, 8: 1276, 9: 1718, 10: 74} COUNTRY = { "United States": 4395, "India": 486, "Japan": 450, "Canada": 354, "Australia": 327, "United Kingdom": 303, "Germany": 144, "Mexico": 102, "France": 97, "Brazil": 93, "Taiwan": 72, "China": 65, "Saudi Arabia": 49, "Pakistan": 48, "Egypt": 46, "Netherlands": 45, "Vietnam": 42, "Philippines": 39, "South Africa": 38, } # Means and standard deviations for numerical features... MEAN_SD = { "julianday": (204.6, 34.7), "cnt_user_engagement": (30.8, 53.2), "cnt_level_start_quickplay": (7.8, 28.9), "cnt_level_end_quickplay": (5.0, 16.4), "cnt_level_complete_quickplay": (2.1, 9.9), "cnt_level_reset_quickplay": (2.0, 19.6), "cnt_post_score": (4.9, 13.8), "cnt_spend_virtual_currency": (0.4, 1.8), "cnt_ad_reward": (0.1, 0.6), "cnt_challenge_a_friend": (0.0, 0.3), "cnt_completed_5_levels": (0.1, 0.4), "cnt_use_extra_steps": (0.4, 1.7), } DEFAULT_INPUT = { "cnt_ad_reward": 0, "cnt_challenge_a_friend": 0, "cnt_completed_5_levels": 1, "cnt_level_complete_quickplay": 3, "cnt_level_end_quickplay": 5, "cnt_level_reset_quickplay": 2, "cnt_level_start_quickplay": 6, "cnt_post_score": 34, "cnt_spend_virtual_currency": 0, "cnt_use_extra_steps": 0, "cnt_user_engagement": 120, "country": "Denmark", "dayofweek": 3, "julianday": 254, "language": "da-dk", "month": 9, "operating_system": "IOS", "user_pseudo_id": "104B0770BAE16E8B53DF330C95881893", } ``` ## Import your model The churn propensity model you'll be using in this notebook has been trained in BigQuery ML and exported to a Google Cloud Storage bucket. This illustrates how you can easily export a trained model and move a model from one cloud service to another. Run the next cell to import this model into your project. **If you've already imported your model, you can skip this step.** ``` MODEL_NAME = "churn" IMAGE = "us-docker.pkg.dev/cloud-aiplatform/prediction/tf2-cpu.2-4:latest" ARTIFACT = "gs://mco-mm/churn" output = !gcloud --quiet beta ai models upload --container-image-uri=$IMAGE --artifact-uri=$ARTIFACT --display-name=$MODEL_NAME --format="value(model)" MODEL_ID = output[1].split("/")[-1] if _exit_code == 0: print(f"Model {MODEL_NAME}/{MODEL_ID} created.") else: print(f"Error creating model: {output}") ``` ## Deploy your endpoint Now that you've imported your model into your project, you need to create an endpoint to serve your model. An endpoint can be thought of as a channel through which your model provides prediction services. Once established, you'll be able to make prediction requests on your model via the public internet. Your endpoint is also serverless, in the sense that Google ensures high availability by reducing single points of failure, and scalability by dynamically allocating resources to meet the demand for your service. In this way, you are able to focus on your model quality, and freed from adminstrative and infrastructure concerns. Run the next cell to deploy your model to an endpoint. **This will take about ten minutes to complete. If you've already deployed a model to an endpoint, you can reuse your endpoint by running the cell after the next one.** ``` ENDPOINT_NAME = "churn" output = !gcloud --quiet beta ai endpoints create --display-name=$ENDPOINT_NAME --format="value(name)" if _exit_code == 0: print("Endpoint created.") else: print(f"Error creating endpoint: {output}") ENDPOINT = output[-1] ENDPOINT_ID = ENDPOINT.split("/")[-1] output = !gcloud --quiet beta ai endpoints deploy-model $ENDPOINT_ID --display-name=$ENDPOINT_NAME --model=$MODEL_ID --traffic-split="0=100" DEPLOYED_MODEL_ID = output[-1].split()[-1][:-1] if _exit_code == 0: print( f"Model {MODEL_NAME}/{MODEL_ID} deployed to Endpoint {ENDPOINT_NAME}/{ENDPOINT_ID}." ) else: print(f"Error deploying model to endpoint: {output}") ``` ### If you already have a deployed endpoint You can reuse your existing endpoint by filling in the value of your endpoint ID in the next cell and running it. **If you've just deployed an endpoint in the previous cell, you should skip this step.** ``` # @title Run this cell only if you want to reuse an existing endpoint. if not os.getenv("IS_TESTING"): ENDPOINT_ID = "" # @param {type:"string"} if ENDPOINT_ID: ENDPOINT = f"projects/{PROJECT_ID}/locations/us-central1/endpoints/{ENDPOINT_ID}" print(f"Using endpoint {ENDPOINT}") else: print("If you want to reuse an existing endpoint, you must specify the endpoint id above.") ``` ## Run a prediction test Now that you have imported a model and deployed that model to an endpoint, you are ready to verify that it's working. Run the next cell to send a test prediction request. If everything works as expected, you should receive a response encoded in a text representation called JSON. **Try this now by running the next cell and examine the results.** ``` print(ENDPOINT) print("request:") pp.pprint(DEFAULT_INPUT) try: resp = send_predict_request(ENDPOINT, DEFAULT_INPUT) print("response") pp.pprint(resp) except Exception: print("prediction request failed") ``` Taking a closer look at the results, we see the following elements: - **churned_values** - a set of possible values (0 and 1) for the target field - **churned_probs** - a corresponding set of probabilities for each possible target field value (5x10^-40 and 1.0, respectively) - **predicted_churn** - based on the probabilities, the predicted value of the target field (1) This response encodes the model's prediction in a format that is readily digestible by software, which makes this service ideal for automated use by an application. ## Start your monitoring job Now that you've created an endpoint to serve prediction requests on your model, you're ready to start a monitoring job to keep an eye on model quality and to alert you if and when input begins to deviate in way that may impact your model's prediction quality. In this section, you will configure and create a model monitoring job based on the churn propensity model you imported from BigQuery ML. ### Configure the following fields: 1. User email - The email address to which you would like monitoring alerts sent. 1. Log sample rate - Your prediction requests and responses are logged to BigQuery tables, which are automatically created when you create a monitoring job. This parameter specifies the desired logging frequency for those tables. 1. Monitor interval - The time window over which to analyze your data and report anomalies. The minimum window is one hour (3600 seconds). 1. Target field - The prediction target column name in training dataset. 1. Skew detection threshold - The skew threshold for each feature you want to monitor. 1. Prediction drift threshold - The drift threshold for each feature you want to monitor. ``` USER_EMAIL = "[your-email-address]" # @param {type:"string"} JOB_NAME = "churn" # Sampling rate (optional, default=.8) LOG_SAMPLE_RATE = 0.8 # @param {type:"number"} # Monitoring Interval in seconds (optional, default=3600). MONITOR_INTERVAL = 3600 # @param {type:"number"} # URI to training dataset. DATASET_BQ_URI = "bq://mco-mm.bqmlga4.train" # @param {type:"string"} # Prediction target column name in training dataset. TARGET = "churned" # Skew and drift thresholds. SKEW_DEFAULT_THRESHOLDS = "country,language" # @param {type:"string"} SKEW_CUSTOM_THRESHOLDS = "cnt_user_engagement:.5" # @param {type:"string"} DRIFT_DEFAULT_THRESHOLDS = "country,language" # @param {type:"string"} DRIFT_CUSTOM_THRESHOLDS = "cnt_user_engagement:.5" # @param {type:"string"} ``` ### Create your monitoring job The following code uses the Google Python client library to translate your configuration settings into a programmatic request to start a model monitoring job. To do this successfully, you need to specify your alerting thresholds (for both skew and drift), your training data source, and apply those settings to all deployed models on your new endpoint (of which there should only be one at this point). Instantiating a monitoring job can take some time. If everything looks good with your request, you'll get a successful API response. Then, you'll need to check your email to receive a notification that the job is running. ``` # Set thresholds specifying alerting criteria for training/serving skew and create config object. skew_thresholds = get_thresholds(SKEW_DEFAULT_THRESHOLDS, SKEW_CUSTOM_THRESHOLDS) skew_config = ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig( skew_thresholds=skew_thresholds ) # Set thresholds specifying alerting criteria for serving drift and create config object. drift_thresholds = get_thresholds(DRIFT_DEFAULT_THRESHOLDS, DRIFT_CUSTOM_THRESHOLDS) drift_config = ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig( drift_thresholds=drift_thresholds ) # Specify training dataset source location (used for schema generation). training_dataset = ModelMonitoringObjectiveConfig.TrainingDataset(target_field=TARGET) training_dataset.bigquery_source = BigQuerySource(input_uri=DATASET_BQ_URI) # Aggregate the above settings into a ModelMonitoringObjectiveConfig object and use # that object to adjust the ModelDeploymentMonitoringObjectiveConfig object. objective_config = ModelMonitoringObjectiveConfig( training_dataset=training_dataset, training_prediction_skew_detection_config=skew_config, prediction_drift_detection_config=drift_config, ) objective_template = ModelDeploymentMonitoringObjectiveConfig( objective_config=objective_config ) # Find all deployed model ids on the created endpoint and set objectives for each. model_ids = get_deployed_model_ids(ENDPOINT_ID) objective_configs = set_objectives(model_ids, objective_template) # Create the monitoring job for all deployed models on this endpoint. monitoring_job = create_monitoring_job(objective_configs) # Run a prediction request to generate schema, if necessary. try: _ = send_predict_request(ENDPOINT, DEFAULT_INPUT) print("prediction succeeded") except Exception: print("prediction failed") ``` After a minute or two, you should receive email at the address you configured above for USER_EMAIL. This email confirms successful deployment of your monitoring job. Here's a sample of what this email might look like: <br> <br> <img src="https://storage.googleapis.com/mco-general/img/mm6.png" /> <br> As your monitoring job collects data, measurements are stored in Google Cloud Storage and you are free to examine your data at any time. The circled path in the image above specifies the location of your measurements in Google Cloud Storage. Run the following cell to take a look at your measurements in Cloud Storage. ``` !gsutil ls gs://cloud-ai-platform-fdfb4810-148b-4c86-903c-dbdff879f6e1/*/* ``` You will notice the following components in these Cloud Storage paths: - **cloud-ai-platform-..** - This is a bucket created for you and assigned to capture your service's prediction data. Each monitoring job you create will trigger creation of a new folder in this bucket. - **[model_monitoring|instance_schemas]/job-..** - This is your unique monitoring job number, which you can see above in both the response to your job creation requesst and the email notification. - **instance_schemas/job-../analysis** - This is the monitoring jobs understanding and encoding of your training data's schema (field names, types, etc.). - **instance_schemas/job-../predict** - This is the first prediction made to your model after the current monitoring job was enabled. - **model_monitoring/job-../serving** - This folder is used to record data relevant to drift calculations. It contains measurement summaries for every hour your model serves traffic. - **model_monitoring/job-../training** - This folder is used to record data relevant to training-serving skew calculations. It contains an ongoing summary of prediction data relative to training data. ### You can create monitoring jobs with other user interfaces In the previous cells, you created a monitoring job using the Python client library. You can also use the *gcloud* command line tool to create a model monitoring job and, in the near future, you will be able to use the Cloud Console, as well for this function. ## Generate test data to trigger alerting Now you are ready to test the monitoring function. Run the following cell, which will generate fabricated test predictions designed to trigger the thresholds you specified above. It takes about five minutes to run this cell and at least an hour to assess and report anamolies in skew or drift so after running this cell, feel free to proceed with the notebook and you'll see how to examine the resulting alert later. ``` def random_uid(): digits = [str(i) for i in range(10)] + ["A", "B", "C", "D", "E", "F"] return "".join(random.choices(digits, k=32)) def monitoring_test(count, sleep, perturb_num={}, perturb_cat={}): # Use random sampling and mean/sd with gaussian distribution to model # training data. Then modify sampling distros for two categorical features # and mean/sd for two numerical features. mean_sd = MEAN_SD.copy() country = COUNTRY.copy() for k, (mean_fn, sd_fn) in perturb_num.items(): orig_mean, orig_sd = MEAN_SD[k] mean_sd[k] = (mean_fn(orig_mean), sd_fn(orig_sd)) for k, v in perturb_cat.items(): country[k] = v for i in range(0, count): input = DEFAULT_INPUT.copy() input["user_pseudo_id"] = str(random_uid()) input["country"] = random.choices([*country], list(country.values()))[0] input["dayofweek"] = random.choices([*DAYOFWEEK], list(DAYOFWEEK.values()))[0] input["language"] = str(random.choices([*LANGUAGE], list(LANGUAGE.values()))[0]) input["operating_system"] = str(random.choices([*OS], list(OS.values()))[0]) input["month"] = random.choices([*MONTH], list(MONTH.values()))[0] for key, (mean, sd) in mean_sd.items(): sample_val = round(float(np.random.normal(mean, sd, 1))) val = max(sample_val, 0) input[key] = val print(f"Sending prediction {i}") try: send_predict_request(ENDPOINT, input) except Exception: print("prediction request failed") time.sleep(sleep) print("Test Completed.") test_time = 300 tests_per_sec = 1 sleep_time = 1 / tests_per_sec iterations = test_time * tests_per_sec perturb_num = {"cnt_user_engagement": (lambda x: x * 3, lambda x: x / 3)} perturb_cat = {"Japan": max(COUNTRY.values()) * 2} monitoring_test(iterations, sleep_time, perturb_num, perturb_cat) ``` ## Interpret your results While waiting for your results, which, as noted, may take up to an hour, you can read ahead to get sense of the alerting experience. ### Here's what a sample email alert looks like... <img src="https://storage.googleapis.com/mco-general/img/mm7.png" /> This email is warning you that the *cnt_user_engagement*, *country* and *language* feature values seen in production have skewed above your threshold between training and serving your model. It's also telling you that the *cnt_user_engagement* feature value is drifting significantly over time, again, as per your threshold specification. ### Monitoring results in the Cloud Console You can examine your model monitoring data from the Cloud Console. Below is a screenshot of those capabilities. #### Monitoring Status <img src="https://storage.googleapis.com/mco-general/img/mm1.png" /> #### Monitoring Alerts <img src="https://storage.googleapis.com/mco-general/img/mm2.png" /> ## Clean up To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial. Otherwise, you can delete the individual resources you created in this tutorial: ``` out = !gcloud ai endpoints undeploy-model $ENDPOINT_ID --deployed-model-id $DEPLOYED_MODEL_ID if _exit_code == 0: print("Model undeployed.") else: print("Error undeploying model:", out) out = !gcloud ai endpoints delete $ENDPOINT_ID --quiet if _exit_code == 0: print("Endpoint deleted.") else: print("Error deleting endpoint:", out) out = !gcloud ai models delete $MODEL_ID --quiet if _exit_code == 0: print("Model deleted.") else: print("Error deleting model:", out) ``` ## Learn more about model monitoring **Congratulations!** You've now learned what model monitoring is, how to configure and enable it, and how to find and interpret the results. Check out the following resources to learn more about model monitoring and ML Ops. - [TensorFlow Data Validation](https://www.tensorflow.org/tfx/guide/tfdv) - [Data Understanding, Validation, and Monitoring At Scale](https://blog.tensorflow.org/2018/09/introducing-tensorflow-data-validation.html) - [Vertex Product Documentation](https://cloud.google.com/vertex) - [Model Monitoring Reference Docs](https://cloud.google.com/vertex/docs/reference) - [Model Monitoring blog article]()
github_jupyter
RMinimum : Full - Test ``` import math import random import queue ``` Testfall : $X = [0, \cdots, n-1]$, $k$ ``` # User input n = 2**10 k = 2**5 # Automatic X = [i for i in range(n)] # Show Testcase print(' Testcase: ') print('=============================') print('X = [0, ..., ' + str(n - 1) + ']') print('k =', k) ``` Algorithmus : Full ``` def rminimum(X, k, cnt = [], rec = 0): # Generate empty cnt list if its not a recursive call if cnt == []: cnt = [0 for _ in range(max(X) + 1)] # Convert parameters if needed k = int(k) n = len(X) # Base case |X| = 3 if len(X) == 3: if X[0] < X[1]: cnt[X[0]] += 2 cnt[X[1]] += 1 cnt[X[2]] += 1 if X[0] < X[2]: mini = X[0] else: mini = X[2] else: cnt[X[0]] += 1 cnt[X[1]] += 2 cnt[X[2]] += 1 if X[1] < X[2]: mini = X[1] else: mini = X[2] return mini, cnt, rec # Run phases W, L, cnt = phase1(X, cnt) M, cnt = phase2(L, k, cnt) Wnew, cnt = phase3(W, k, M, cnt) mini, cnt, rec = phase4(Wnew, k, n, cnt, rec) return mini, cnt, rec return mini, cnt, rec # -------------------------------------------------- def phase1(X, cnt): # Init W, L W = [0 for _ in range(len(X) // 2)] L = [0 for _ in range(len(X) // 2)] # Random pairs random.shuffle(X) for i in range(len(X) // 2): if X[2 * i] > X[2 * i + 1]: W[i] = X[2 * i + 1] L[i] = X[2 * i] else: W[i] = X[2 * i] L[i] = X[2 * i + 1] cnt[X[2 * i + 1]] += 1 cnt[X[2 * i]] += 1 return W, L, cnt # -------------------------------------------------- def phase2(L, k, cnt): # Generate subsets random.shuffle(L) subsets = [L[i * k:(i + 1) * k] for i in range((len(L) + k - 1) // k)] # Init M M = [0 for _ in range(len(subsets))] # Perfectly balanced tournament tree using a Queue for i in range(len(subsets)): q = queue.Queue() for ele in subsets[i]: q.put(ele) while q.qsize() > 1: a = q.get() b = q.get() if a < b: q.put(a) else: q.put(b) cnt[a] += 1 cnt[b] += 1 M[i] = q.get() return M, cnt # -------------------------------------------------- def phase3(W, k, M, cnt): # Generate subsets random.shuffle(W) W_i = [W[i * k:(i + 1) * k] for i in range((len(W) + k - 1) // k)] subsets_filtered = [0 for _ in range(len(subsets))] # Filter subsets for i in range(len(subsets_filtered)): subsets_filtered[i] = [elem for elem in subsets[i] if elem < M[i]] cnt[M[i]] += len(subsets[i]) for elem in subsets[i]: cnt[elem] += 1 # Merge subsets Wnew = [item for sublist in subsets_filtered for item in sublist] return Wnew, cnt # -------------------------------------------------- def phase4(Wnew, k, n0, cnt, rec): # Recursive call check if len(Wnew) <= math.log(n0, 2) ** 2: q = queue.Queue() for ele in Wnew: q.put(ele) while q.qsize() > 1: a = q.get() b = q.get() if a < b: q.put(a) else: q.put(b) cnt[a] += 1 cnt[b] += 1 mini = q.get() return mini, cnt, rec else: rec += 1 rminimum(Wnew, k, cnt, rec) # ================================================== # Testcase mini, cnt, rec = rminimum(X, k) ``` Resultat : ``` def test(X, k, mini, cnt, rec): print('') print('Testfall n / k:', len(X), '/', k) print('====================================') print('Fragile Complexity:') print('-------------------') print('f_min :', cnt[0]) print('f_rem :', max(cnt[1:])) print('f_n :', max(cnt)) print('Work :', int(sum(cnt)/2)) print('====================================') print('Process:') print('--------') print('Minimum :', mini) print('n :', len(X)) print('log(n) :', round(math.log(len(X), 2), 2)) print('log(k) :', round(math.log(k, 2), 2)) print('lg / lglg :', round(math.log(len(X), 2) / math.log(math.log(len(X), 2), 2))) print('n / log(n) :', round(len(X) / math.log(len(X), 2))) print('====================================') return # Testfall test(X, k, mini, cnt, rec) ```
github_jupyter
# Advanced topics The following material is a deep-dive into Yangson, and is not necessarily representative of how one would perform manipulations in a production environment. Please refer to the other tutorials for a better picture of Rosetta's intended use. Keep in mind that the key feature of Yangson is to be able to manipulate YANG data models in a more human-readable format, ala JSON. What lies below digs beneath the higher-level abstractions and should paint a decent picture of the functional nature of Yangson. # Manipulating models with Rosetta and Yangson One of the goals of many network operators is to provide abstractions in a multi-vendor environment. This can be done with YANG and OpenConfig data models, but as they say, the devil is in the details. It occurred to me that you should be able to parse configuration from one vendor and translate it to another. Unfortunately, as we all know, these configurations don't always translate well on a 1-to-1 basis. I will demonstrate this process below and show several features of the related libraries along the way. The following example begins exactly the same as the Cisco parsing tutorial. Let's load up some Juniper config and parse it into a YANG data model. First, we'll read the file. ``` from ntc_rosetta import get_driver import json junos = get_driver("junos", "openconfig") junos_driver = junos() # Strip any rpc tags before and after `<configuration>...</configuration>` with open("data/junos/dev_conf.xml", "r") as fp: config = fp.read() print(config) ``` ## Junos parsing Now, we parse the config and take a look at the data model. ``` from sys import exc_info from yangson.exceptions import SemanticError try: parsed = junos_driver.parse( native={"dev_conf": config}, validate=False, include=[ "/openconfig-interfaces:interfaces", "/openconfig-network-instance:network-instances/network-instance/name", "/openconfig-network-instance:network-instances/network-instance/config", "/openconfig-network-instance:network-instances/network-instance/vlans", ] ) except SemanticError as e: print(f"error: {e}") print(json.dumps(parsed.raw_value(), sort_keys=True, indent=2)) ``` ## Naive translation Since we have a valid data model, let's see if Rosetta can translate it as-is. ``` ios = get_driver("ios", "openconfig") ios_driver = ios() native = ios_driver.translate(candidate=parsed.raw_value()) print(native) ``` Pretty cool, right?! Rosetta does a great job of parsing and translating, but it is a case of "monkey see, monkey do". Rosetta doesn't have any mechanisms to translate interface names, for example. It is up to the operator to perform this sort of manipulation. ## Down the Yangson rabbit hole Yangson allows the developer to easily translate between YANG data models and JSON. Most all of these manipulations can be performed on dictionaries in Python and loaded into data models using [`from_raw`](https://yangson.labs.nic.cz/datamodel.html#yangson.datamodel.DataModel.from_raw). The following examples may appear to be a little obtuse, but the goal is to demonstrate the internals of Yangson. ### And it's mostly functional It is critical to read the short description of the [zipper](https://yangson.labs.nic.cz/instance.html?highlight=zipper#yangson.instance.InstanceNode) interface in the InstanceNode section of the docs. Yanson never manipulates an object, but returns a copy with the manipulated attributes. ### Show me the code! Let's take a look at fixing up the interface names and how we can manipulate data model attributes. To do that, we need to locate the attribute in the tree using the [`parse_resource_id`](https://yangson.labs.nic.cz/datamodel.html#yangson.datamodel.DataModel.parse_resource_id) method. This method returns an [`instance route'](https://yangson.labs.nic.cz/instance.html?highlight=arrayentry#yangson.instance.InstanceRoute). The string passed to the method is an xpath. ``` # Locate the interfaces in the tree. We need to modify this one # Note that we have to URL-escape the forward slashes per https://tools.ietf.org/html/rfc8040#section-3.5.3 irt = parsed.datamodel.parse_resource_id("openconfig-interfaces:interfaces/interface=xe-0%2F0%2F1") current_data = parsed.root.goto(irt) print("Current node configuration: ", json.dumps(current_data.raw_value(), sort_keys=True, indent=2)) modify_data = current_data.raw_value() ifname = 'Ethernet0/0/1' modify_data['name'] = ifname modify_data['config']['name'] = ifname stub = current_data.update(modify_data, raw=True) print("\n\nCandidate node configuration: ", json.dumps(stub.raw_value(), sort_keys=True, indent=2)) ``` ### Instance routes You will notice a `goto` method on child nodes. You _can_ access successors with this method, but you have to build the path from the root `datamodel` attribute as seen in the following example. If you aren't sure where an object is in the tree, you can also rely on its `path` attribute. Quick tangent... what is the difference between `parse_instance_id` and `parse_resource_id`? The answer can be found in the [Yangson glossary](https://yangson.labs.nic.cz/glossary.html) and the respective RFC's. ``` # TL;DR irt = parsed.datamodel.parse_instance_id('/openconfig-network-instance:network-instances/network-instance[1]/vlans/vlan[3]') print(parsed.root.goto(irt).raw_value()) irt = parsed.datamodel.parse_resource_id('openconfig-network-instance:network-instances/network-instance=default/vlans/vlan=10') print(parsed.root.goto(irt).raw_value()) ``` What about the rest of the interfaces in the list? Yangson provides an iterator for array nodes. ``` import re irt = parsed.datamodel.parse_resource_id("openconfig-interfaces:interfaces/interface") iface_objs = parsed.root.goto(irt) # Swap the name as required p, sub = re.compile(r'xe-'), 'Ethernet' # There are a couple challenges here. First is that Yanson doesn't impliment __len__ # The second problem is that you cannot modify a list in-place, so we're basically # hacking this to hijack the index of the current element and looking it up from a "clean" # instance. This is a pet example! It would be much easier using Python dicts. new_ifaces = None for iface in iface_objs: name_irt = parsed.datamodel.parse_instance_id('/name') cname_irt = parsed.datamodel.parse_instance_id('/config/name') if new_ifaces: name = new_ifaces[iface.index].goto(name_irt) else: name = iface.goto(name_irt) name = name.update(p.sub(sub, name.raw_value()), raw=True) cname = name.up().goto(cname_irt) cname = cname.update(p.sub(sub, cname.raw_value()), raw=True) iface = cname.up().up() new_ifaces = iface.up() print(json.dumps(new_ifaces.raw_value(), sort_keys=True, indent=2)) # Translate to Cisco-speak native = ios_driver.translate(candidate=new_ifaces.top().raw_value()) print(native) ``` Hooray! That should work. One final approach, just to show you different ways of doing things. This is another pet example to demonstrate Yangson methods. ``` import re from typing import Dict irt = parsed.datamodel.parse_resource_id("openconfig-interfaces:interfaces") iface_objs = parsed.root.goto(irt) # Nuke the whole branch! iface_objs = iface_objs.delete_item("interface") def build_iface(data: str) -> Dict: # Example template, this could be anything you like that conforms to the schema return { "name": f"{data['name']}", "config": { "name": f"{data['name']}", "description": f"{data['description']}", "type": "iana-if-type:ethernetCsmacd", "enabled": True }, } iface_data = [ build_iface({ "name": f"TenGigabitEthernet0/{idx}", "description": f"This is interface TenGigabitEthernet0/{idx}" }) for idx in range(10, 0, -1) ] initial = iface_data.pop() # Start a new interface list iface_objs = iface_objs.put_member("interface", [initial], raw=True) cur_obj = iface_objs[0] # Yangson exposes `next`, `insert_after`, and `insert_before` methods. # There is no `append`. while iface_data: new_obj = cur_obj.insert_after(iface_data.pop(), raw=True) cur_obj = new_obj # Translate to Cisco-speak native = ios_driver.translate(candidate=cur_obj.top().raw_value()) print(native) ``` ### Deleting individual items Here is an example of deleting an individual item. Navigating the tree can be a bit tricky, but it's not too bad once you get the hang of it. ``` # Locate a vlan by ID and delete it irt = parsed.datamodel.parse_resource_id("openconfig-network-instance:network-instances/network-instance=default/vlans/vlan=10") vlan10 = parsed.root.goto(irt) vlans = vlan10.up().delete_item(vlan10.index) print(json.dumps(vlans.raw_value(), sort_keys=True, indent=2)) ```
github_jupyter
# Planning Search Agent Notebook version of the project [Implement a Planning Search](https://github.com/udacity/AIND-Planning) from [Udacity's Artificial Intelligence Nanodegree](https://www.udacity.com/course/artificial-intelligence-nanodegree--nd889) <br> **Goal**: Solve deterministic logistics planning problems for an Air Cargo transport system using a planning search agent All problems are in the Air Cargo domain. They have the same action schema defined, but different initial states and goals: ``` Action(Load(c, p, a), PRECOND: At(c, a) ∧ At(p, a) ∧ Cargo(c) ∧ Plane(p) ∧ Airport(a) EFFECT: ¬ At(c, a) ∧ In(c, p)) Action(Unload(c, p, a), PRECOND: In(c, p) ∧ At(p, a) ∧ Cargo(c) ∧ Plane(p) ∧ Airport(a) EFFECT: At(c, a) ∧ ¬ In(c, p)) Action(Fly(p, from, to), PRECOND: At(p, from) ∧ Plane(p) ∧ Airport(from) ∧ Airport(to) EFFECT: ¬ At(p, from) ∧ At(p, to)) ``` ## Planning Graph nodes ``` from planning_agent.aimacode.planning import Action from planning_agent.aimacode.search import Problem from planning_agent.aimacode.utils import expr from planning_agent.lp_utils import decode_state class PgNode(): """ Base class for planning graph nodes. includes instance sets common to both types of nodes used in a planning graph parents: the set of nodes in the previous level children: the set of nodes in the subsequent level mutex: the set of sibling nodes that are mutually exclusive with this node """ def __init__(self): self.parents = set() self.children = set() self.mutex = set() def is_mutex(self, other) -> bool: """ Boolean test for mutual exclusion :param other: PgNode the other node to compare with :return: bool True if this node and the other are marked mutually exclusive (mutex) """ if other in self.mutex: return True return False def show(self): """ helper print for debugging shows counts of parents, children, siblings :return: print only """ print("{} parents".format(len(self.parents))) print("{} children".format(len(self.children))) print("{} mutex".format(len(self.mutex))) class PgNode_s(PgNode): """ A planning graph node representing a state (literal fluent) from a planning problem. Args: ---------- symbol : str A string representing a literal expression from a planning problem domain. is_pos : bool Boolean flag indicating whether the literal expression is positive or negative. """ def __init__(self, symbol: str, is_pos: bool): """ S-level Planning Graph node constructor :param symbol: expr :param is_pos: bool Instance variables calculated: literal: expr fluent in its literal form including negative operator if applicable Instance variables inherited from PgNode: parents: set of nodes connected to this node in previous A level; initially empty children: set of nodes connected to this node in next A level; initially empty mutex: set of sibling S-nodes that this node has mutual exclusion with; initially empty """ PgNode.__init__(self) self.symbol = symbol self.is_pos = is_pos self.literal = expr(self.symbol) if not self.is_pos: self.literal = expr('~{}'.format(self.symbol)) def show(self): """helper print for debugging shows literal plus counts of parents, children, siblings :return: print only """ print("\n*** {}".format(self.literal)) PgNode.show(self) def __eq__(self, other): """equality test for nodes - compares only the literal for equality :param other: PgNode_s :return: bool """ if isinstance(other, self.__class__): return (self.symbol == other.symbol) \ and (self.is_pos == other.is_pos) def __hash__(self): return hash(self.symbol) ^ hash(self.is_pos) class PgNode_a(PgNode): """A-type (action) Planning Graph node - inherited from PgNode """ def __init__(self, action: Action): """A-level Planning Graph node constructor :param action: Action a ground action, i.e. this action cannot contain any variables Instance variables calculated: An A-level will always have an S-level as its parent and an S-level as its child. The preconditions and effects will become the parents and children of the A-level node However, when this node is created, it is not yet connected to the graph prenodes: set of *possible* parent S-nodes effnodes: set of *possible* child S-nodes is_persistent: bool True if this is a persistence action, i.e. a no-op action Instance variables inherited from PgNode: parents: set of nodes connected to this node in previous S level; initially empty children: set of nodes connected to this node in next S level; initially empty mutex: set of sibling A-nodes that this node has mutual exclusion with; initially empty """ PgNode.__init__(self) self.action = action self.prenodes = self.precond_s_nodes() self.effnodes = self.effect_s_nodes() self.is_persistent = False if self.prenodes == self.effnodes: self.is_persistent = True def show(self): """helper print for debugging shows action plus counts of parents, children, siblings :return: print only """ print("\n*** {}{}".format(self.action.name, self.action.args)) PgNode.show(self) def precond_s_nodes(self): """precondition literals as S-nodes (represents possible parents for this node). It is computationally expensive to call this function; it is only called by the class constructor to populate the `prenodes` attribute. :return: set of PgNode_s """ nodes = set() for p in self.action.precond_pos: n = PgNode_s(p, True) nodes.add(n) for p in self.action.precond_neg: n = PgNode_s(p, False) nodes.add(n) return nodes def effect_s_nodes(self): """effect literals as S-nodes (represents possible children for this node). It is computationally expensive to call this function; it is only called by the class constructor to populate the `effnodes` attribute. :return: set of PgNode_s """ nodes = set() for e in self.action.effect_add: n = PgNode_s(e, True) nodes.add(n) for e in self.action.effect_rem: n = PgNode_s(e, False) nodes.add(n) return nodes def __eq__(self, other): """equality test for nodes - compares only the action name for equality :param other: PgNode_a :return: bool """ if isinstance(other, self.__class__): return (self.action.name == other.action.name) \ and (self.action.args == other.action.args) def __hash__(self): return hash(self.action.name) ^ hash(self.action.args) ``` ## Planning Graph ``` def mutexify(node1: PgNode, node2: PgNode): """ adds sibling nodes to each other's mutual exclusion (mutex) set. These should be sibling nodes! :param node1: PgNode (or inherited PgNode_a, PgNode_s types) :param node2: PgNode (or inherited PgNode_a, PgNode_s types) :return: node mutex sets modified """ if type(node1) != type(node2): raise TypeError('Attempted to mutex two nodes of different types') node1.mutex.add(node2) node2.mutex.add(node1) class PlanningGraph(): """ A planning graph as described in chapter 10 of the AIMA text. The planning graph can be used to reason about """ def __init__(self, problem: Problem, state: str, serial_planning=True): """ :param problem: PlanningProblem (or subclass such as AirCargoProblem or HaveCakeProblem) :param state: str (will be in form TFTTFF... representing fluent states) :param serial_planning: bool (whether or not to assume that only one action can occur at a time) Instance variable calculated: fs: FluentState the state represented as positive and negative fluent literal lists all_actions: list of the PlanningProblem valid ground actions combined with calculated no-op actions s_levels: list of sets of PgNode_s, where each set in the list represents an S-level in the planning graph a_levels: list of sets of PgNode_a, where each set in the list represents an A-level in the planning graph """ self.problem = problem self.fs = decode_state(state, problem.state_map) self.serial = serial_planning self.all_actions = self.problem.actions_list + self.noop_actions(self.problem.state_map) self.s_levels = [] self.a_levels = [] self.create_graph() def noop_actions(self, literal_list): """create persistent action for each possible fluent "No-Op" actions are virtual actions (i.e., actions that only exist in the planning graph, not in the planning problem domain) that operate on each fluent (literal expression) from the problem domain. No op actions "pass through" the literal expressions from one level of the planning graph to the next. The no-op action list requires both a positive and a negative action for each literal expression. Positive no-op actions require the literal as a positive precondition and add the literal expression as an effect in the output, and negative no-op actions require the literal as a negative precondition and remove the literal expression as an effect in the output. This function should only be called by the class constructor. :param literal_list: :return: list of Action """ action_list = [] for fluent in literal_list: act1 = Action(expr("Noop_pos({})".format(fluent)), ([fluent], []), ([fluent], [])) action_list.append(act1) act2 = Action(expr("Noop_neg({})".format(fluent)), ([], [fluent]), ([], [fluent])) action_list.append(act2) return action_list def create_graph(self): """ build a Planning Graph as described in Russell-Norvig 3rd Ed 10.3 or 2nd Ed 11.4 The S0 initial level has been implemented for you. It has no parents and includes all of the literal fluents that are part of the initial state passed to the constructor. At the start of a problem planning search, this will be the same as the initial state of the problem. However, the planning graph can be built from any state in the Planning Problem This function should only be called by the class constructor. :return: builds the graph by filling s_levels[] and a_levels[] lists with node sets for each level """ # the graph should only be built during class construction if (len(self.s_levels) != 0) or (len(self.a_levels) != 0): raise Exception( 'Planning Graph already created; construct a new planning graph for each new state in the planning sequence') # initialize S0 to literals in initial state provided. leveled = False level = 0 self.s_levels.append(set()) # S0 set of s_nodes - empty to start # for each fluent in the initial state, add the correct literal PgNode_s for literal in self.fs.pos: self.s_levels[level].add(PgNode_s(literal, True)) for literal in self.fs.neg: self.s_levels[level].add(PgNode_s(literal, False)) # no mutexes at the first level # continue to build the graph alternating A, S levels until last two S levels contain the same literals, # i.e. until it is "leveled" while not leveled: self.add_action_level(level) self.update_a_mutex(self.a_levels[level]) level += 1 self.add_literal_level(level) self.update_s_mutex(self.s_levels[level]) if self.s_levels[level] == self.s_levels[level - 1]: leveled = True def add_action_level(self, level): """ add an A (action) level to the Planning Graph :param level: int the level number alternates S0, A0, S1, A1, S2, .... etc the level number is also used as the index for the node set lists self.a_levels[] and self.s_levels[] :return: adds A nodes to the current level in self.a_levels[level] """ self.a_levels.append(set()) # set of a_nodes for a in self.all_actions: a_node = PgNode_a(a) if set(a_node.prenodes).issubset(set(self.s_levels[level])): # True: Valid A node for s_node in self.s_levels[level]: if s_node in a_node.prenodes: # search for the right parents a_node.parents.add(s_node) s_node.children.add(a_node) self.a_levels[level].add(a_node) def add_literal_level(self, level): """ add an S (literal) level to the Planning Graph :param level: int the level number alternates S0, A0, S1, A1, S2, .... etc the level number is also used as the index for the node set lists self.a_levels[] and self.s_levels[] :return: adds S nodes to the current level in self.s_levels[level] """ self.s_levels.append(set()) # set of s_nodes for a in self.a_levels[level-1]: for s_node in a.effnodes: # Valid S nodes a.children.add(s_node) s_node.parents.add(a) self.s_levels[level].add(s_node) def update_a_mutex(self, nodeset): """ Determine and update sibling mutual exclusion for A-level nodes Mutex action tests section from 3rd Ed. 10.3 or 2nd Ed. 11.4 A mutex relation holds between two actions a given level if the planning graph is a serial planning graph and the pair are nonpersistence actions or if any of the three conditions hold between the pair: Inconsistent Effects Interference Competing needs :param nodeset: set of PgNode_a (siblings in the same level) :return: mutex set in each PgNode_a in the set is appropriately updated """ nodelist = list(nodeset) for i, n1 in enumerate(nodelist[:-1]): for n2 in nodelist[i + 1:]: if (self.serialize_actions(n1, n2) or self.inconsistent_effects_mutex(n1, n2) or self.interference_mutex(n1, n2) or self.competing_needs_mutex(n1, n2)): mutexify(n1, n2) def serialize_actions(self, node_a1: PgNode_a, node_a2: PgNode_a) -> bool: """ Test a pair of actions for mutual exclusion, returning True if the planning graph is serial, and if either action is persistent; otherwise return False. Two serial actions are mutually exclusive if they are both non-persistent. :param node_a1: PgNode_a :param node_a2: PgNode_a :return: bool """ # if not self.serial: return False if node_a1.is_persistent or node_a2.is_persistent: return False return True def inconsistent_effects_mutex(self, node_a1: PgNode_a, node_a2: PgNode_a) -> bool: """ Test a pair of actions for inconsistent effects, returning True if one action negates an effect of the other, and False otherwise. HINT: The Action instance associated with an action node is accessible through the PgNode_a.action attribute. See the Action class documentation for details on accessing the effects and preconditions of an action. :param node_a1: PgNode_a :param node_a2: PgNode_a :return: bool """ # Create 1 set with all the adding effects and 1 set with all the removing effects. # (a single action cannot result in inconsistent effects) # If the intersection (&) of the two sets is not empty, then at least one effect negates another effects_add = node_a1.action.effect_add + node_a2.action.effect_add effects_rem = node_a1.action.effect_rem + node_a2.action.effect_rem return bool(set(effects_add) & set(effects_rem)) def interference_mutex(self, node_a1: PgNode_a, node_a2: PgNode_a) -> bool: """ Test a pair of actions for mutual exclusion, returning True if the effect of one action is the negation of a precondition of the other. HINT: The Action instance associated with an action node is accessible through the PgNode_a.action attribute. See the Action class documentation for details on accessing the effects and preconditions of an action. :param node_a1: PgNode_a :param node_a2: PgNode_a :return: bool """ # Similar implementation of inconsistent_effects_mutex but crossing the adding/removing effect of each action # with the negative/positive precondition of the other. # 4 sets are used for 2 separated intersections. The intersection of 2 large sets (pos_add and neg_rem) would # also result True for inconsistent_effects cross_pos = node_a1.action.effect_add + node_a2.action.precond_pos cross_neg = node_a1.action.precond_neg + node_a2.action.effect_rem cross_pos2 = node_a2.action.effect_add + node_a1.action.precond_pos cross_neg2 = node_a2.action.precond_neg + node_a1.action.effect_rem return bool(set(cross_pos) & set(cross_neg)) or bool(set(cross_pos2) & set(cross_neg2)) def competing_needs_mutex(self, node_a1: PgNode_a, node_a2: PgNode_a) -> bool: """ Test a pair of actions for mutual exclusion, returning True if one of the precondition of one action is mutex with a precondition of the other action. :param node_a1: PgNode_a :param node_a2: PgNode_a :return: bool """ # Create a list with the parents of one action node that are mutually exclusive with the parents of the other # and return True if the list is not empty mutex = [i for i in node_a1.parents for j in node_a2.parents if i.is_mutex(j)] return bool(mutex) def update_s_mutex(self, nodeset: set): """ Determine and update sibling mutual exclusion for S-level nodes Mutex action tests section from 3rd Ed. 10.3 or 2nd Ed. 11.4 A mutex relation holds between literals at a given level if either of the two conditions hold between the pair: Negation Inconsistent support :param nodeset: set of PgNode_a (siblings in the same level) :return: mutex set in each PgNode_a in the set is appropriately updated """ nodelist = list(nodeset) for i, n1 in enumerate(nodelist[:-1]): for n2 in nodelist[i + 1:]: if self.negation_mutex(n1, n2) or self.inconsistent_support_mutex(n1, n2): mutexify(n1, n2) def negation_mutex(self, node_s1: PgNode_s, node_s2: PgNode_s) -> bool: """ Test a pair of state literals for mutual exclusion, returning True if one node is the negation of the other, and False otherwise. HINT: Look at the PgNode_s.__eq__ defines the notion of equivalence for literal expression nodes, and the class tracks whether the literal is positive or negative. :param node_s1: PgNode_s :param node_s2: PgNode_s :return: bool """ # Mutual exclusive nodes have the same 'symbol' and different 'is_pos' attributes return (node_s1.symbol == node_s2.symbol) and (node_s1.is_pos != node_s2.is_pos) def inconsistent_support_mutex(self, node_s1: PgNode_s, node_s2: PgNode_s): """ Test a pair of state literals for mutual exclusion, returning True if there are no actions that could achieve the two literals at the same time, and False otherwise. In other words, the two literal nodes are mutex if all of the actions that could achieve the first literal node are pairwise mutually exclusive with all of the actions that could achieve the second literal node. HINT: The PgNode.is_mutex method can be used to test whether two nodes are mutually exclusive. :param node_s1: PgNode_s :param node_s2: PgNode_s :return: bool """ # Get a list with the parents of one node that are not mutually exclusive with at least one parent of the other # Here the inconsistent is detected if the list is empty (none of the actions can lead to these pair of nodes at # the same time) compatible_parents_s1 = [a for a in node_s1.parents for b in node_s2.parents if not a.is_mutex(b)] return not bool(compatible_parents_s1) def h_levelsum(self) -> int: """The sum of the level costs of the individual goals (admissible if goals independent) :return: int """ level_sum = 0 # for each goal in the problem, determine the level cost, then add them together remaining_goals = set(self.problem.goal) # remaining goals to find to determine the level cost # Search for all the goals simultaneously from level 0 for level in range(len(self.s_levels)+1): literals = set([node.literal for node in self.s_levels[level]]) # literals found in the current level match = literals & remaining_goals # set of goals found in literals (empty set if none) level_sum += len(match)*level # add cost of the found goals (0 if none) remaining_goals -= match # remove found goals from the remaining goals if not remaining_goals: # return when all goals are found return level_sum raise Exception("Goal not found") ``` ## Air Cargo Problem ``` from planning_agent.aimacode.logic import PropKB from planning_agent.aimacode.planning import Action from planning_agent.aimacode.search import Node, Problem from planning_agent.aimacode.utils import expr from planning_agent.lp_utils import FluentState, encode_state, decode_state class AirCargoProblem(Problem): def __init__(self, cargos, planes, airports, initial: FluentState, goal: list): """ :param cargos: list of str cargos in the problem :param planes: list of str planes in the problem :param airports: list of str airports in the problem :param initial: FluentState object positive and negative literal fluents (as expr) describing initial state :param goal: list of expr literal fluents required for goal test """ self.state_map = initial.pos + initial.neg self.initial_state_TF = encode_state(initial, self.state_map) Problem.__init__(self, self.initial_state_TF, goal=goal) self.cargos = cargos self.planes = planes self.airports = airports self.actions_list = self.get_actions() def get_actions(self): """ This method creates concrete actions (no variables) for all actions in the problem domain action schema and turns them into complete Action objects as defined in the aimacode.planning module. It is computationally expensive to call this method directly; however, it is called in the constructor and the results cached in the `actions_list` property. Returns: ---------- list<Action> list of Action objects """ def load_actions(): """Create all concrete Load actions and return a list :return: list of Action objects """ loads = [] for c in self.cargos: for p in self.planes: for a in self.airports: precond_pos = [expr("At({}, {})".format(c, a)), expr("At({}, {})".format(p, a))] precond_neg = [] effect_add = [expr("In({}, {})".format(c, p))] effect_rem = [expr("At({}, {})".format(c, a))] load = Action(expr("Load({}, {}, {})".format(c, p, a)), [precond_pos, precond_neg], [effect_add, effect_rem]) loads.append(load) return loads def unload_actions(): """Create all concrete Unload actions and return a list :return: list of Action objects """ unloads = [] for c in self.cargos: for p in self.planes: for a in self.airports: precond_pos = [expr("In({}, {})".format(c, p)), expr("At({}, {})".format(p, a))] precond_neg = [] effect_add = [expr("At({}, {})".format(c, a))] effect_rem = [expr("In({}, {})".format(c, p))] unload = Action(expr("Unload({}, {}, {})".format(c, p, a)), [precond_pos, precond_neg], [effect_add, effect_rem]) unloads.append(unload) return unloads def fly_actions(): """Create all concrete Fly actions and return a list :return: list of Action objects """ flys = [] for fr in self.airports: for to in self.airports: if fr != to: for p in self.planes: precond_pos = [expr("At({}, {})".format(p, fr)), ] precond_neg = [] effect_add = [expr("At({}, {})".format(p, to))] effect_rem = [expr("At({}, {})".format(p, fr))] fly = Action(expr("Fly({}, {}, {})".format(p, fr, to)), [precond_pos, precond_neg], [effect_add, effect_rem]) flys.append(fly) return flys return load_actions() + unload_actions() + fly_actions() def actions(self, state: str) -> list: """ Return the actions that can be executed in the given state. :param state: str state represented as T/F string of mapped fluents (state variables) e.g. 'FTTTFF' :return: list of Action objects """ possible_actions = [] kb = PropKB() kb.tell(decode_state(state, self.state_map).pos_sentence()) for action in self.actions_list: is_possible = True for clause in action.precond_pos: if clause not in kb.clauses: is_possible = False for clause in action.precond_neg: if clause in kb.clauses: is_possible = False if is_possible: possible_actions.append(action) return possible_actions def result(self, state: str, action: Action): """ Return the state that results from executing the given action in the given state. The action must be one of self.actions(state). :param state: state entering node :param action: Action applied :return: resulting state after action """ new_state = FluentState([], []) # Used the same implementation as cake example: old_state = decode_state(state, self.state_map) for fluent in old_state.pos: if fluent not in action.effect_rem: new_state.pos.append(fluent) for fluent in action.effect_add: if fluent not in new_state.pos: new_state.pos.append(fluent) for fluent in old_state.neg: if fluent not in action.effect_add: new_state.neg.append(fluent) for fluent in action.effect_rem: if fluent not in new_state.neg: new_state.neg.append(fluent) return encode_state(new_state, self.state_map) def goal_test(self, state: str) -> bool: """ Test the state to see if goal is reached :param state: str representing state :return: bool """ kb = PropKB() kb.tell(decode_state(state, self.state_map).pos_sentence()) for clause in self.goal: if clause not in kb.clauses: return False return True def h_1(self, node: Node): # note that this is not a true heuristic h_const = 1 return h_const def h_pg_levelsum(self, node: Node): """ This heuristic uses a planning graph representation of the problem state space to estimate the sum of all actions that must be carried out from the current state in order to satisfy each individual goal condition. """ # requires implemented PlanningGraph class pg = PlanningGraph(self, node.state) pg_levelsum = pg.h_levelsum() return pg_levelsum def h_ignore_preconditions(self, node: Node): """ This heuristic estimates the minimum number of actions that must be carried out from the current state in order to satisfy all of the goal conditions by ignoring the preconditions required for an action to be executed. """ # Note: We assume that the number of steps required to solve the relaxed ignore preconditions problem # is equal to the number of unsatisfied goals. # Thus no action results in multiple goals and no action undoes the effects of other actions kb = PropKB() kb.tell(decode_state(node.state, self.state_map).pos_sentence()) # Unsatisfied goals are the ones not found in the clauses of PropKB() for the current state count = len(set(self.goal) - set(kb.clauses)) # print("Current_state: ", kb.clauses, " Goal state: ", self.goal) return count ``` ## Scenarios ``` def air_cargo_p1() -> AirCargoProblem: cargos = ['C1', 'C2'] planes = ['P1', 'P2'] airports = ['JFK', 'SFO'] pos = [expr('At(C1, SFO)'), expr('At(C2, JFK)'), expr('At(P1, SFO)'), expr('At(P2, JFK)'), ] neg = [expr('At(C2, SFO)'), expr('In(C2, P1)'), expr('In(C2, P2)'), expr('At(C1, JFK)'), expr('In(C1, P1)'), expr('In(C1, P2)'), expr('At(P1, JFK)'), expr('At(P2, SFO)'), ] init = FluentState(pos, neg) goal = [expr('At(C1, JFK)'), expr('At(C2, SFO)'), ] return AirCargoProblem(cargos, planes, airports, init, goal) def air_cargo_p2() -> AirCargoProblem: cargos = ['C1', 'C2', 'C3'] planes = ['P1', 'P2', 'P3'] airports = ['SFO', 'JFK', 'ATL'] pos = [expr('At(C1, SFO)'), expr('At(C2, JFK)'), expr('At(C3, ATL)'), expr('At(P1, SFO)'), expr('At(P2, JFK)'), expr('At(P3, ATL)'), ] neg = [expr('At(C1, JFK)'), expr('At(C1, ATL)'), expr('At(C2, SFO)'), expr('At(C2, ATL)'), expr('At(C3, SFO)'), expr('At(C3, JFK)'), expr('In(C1, P1)'), expr('In(C1, P2)'), expr('In(C1, P3)'), expr('In(C2, P1)'), expr('In(C2, P2)'), expr('In(C2, P3)'), expr('In(C3, P1)'), expr('In(C3, P2)'), expr('In(C3, P3)'), expr('At(P1, JFK)'), expr('At(P1, ATL)'), expr('At(P2, SFO)'), expr('At(P2, ATL)'), expr('At(P3, SFO)'), expr('At(P3, JFK)'), ] init = FluentState(pos, neg) goal = [expr('At(C1, JFK)'), expr('At(C2, SFO)'), expr('At(C3, SFO)'), ] return AirCargoProblem(cargos, planes, airports, init, goal) def air_cargo_p3() -> AirCargoProblem: cargos = ['C1', 'C2', 'C3', 'C4'] planes = ['P1', 'P2'] airports = ['SFO', 'JFK', 'ATL', 'ORD'] pos = [expr('At(C1, SFO)'), expr('At(C2, JFK)'), expr('At(C3, ATL)'), expr('At(C4, ORD)'), expr('At(P1, SFO)'), expr('At(P2, JFK)'), ] neg = [expr('At(C1, JFK)'), expr('At(C1, ATL)'), expr('At(C1, ORD)'), expr('At(C2, SFO)'), expr('At(C2, ATL)'), expr('At(C2, ORD)'), expr('At(C3, JFK)'), expr('At(C3, SFO)'), expr('At(C3, ORD)'), expr('At(C4, JFK)'), expr('At(C4, SFO)'), expr('At(C4, ATL)'), expr('In(C1, P1)'), expr('In(C1, P2)'), expr('In(C2, P1)'), expr('In(C2, P2)'), expr('In(C3, P1)'), expr('In(C3, P2)'), expr('In(C4, P1)'), expr('In(C4, P2)'), expr('At(P1, JFK)'), expr('At(P1, ATL)'), expr('At(P1, ORD)'), expr('At(P2, SFO)'), expr('At(P2, ATL)'), expr('At(P2, ORD)'), ] init = FluentState(pos, neg) goal = [expr('At(C1, JFK)'), expr('At(C2, SFO)'), expr('At(C3, JFK)'), expr('At(C4, SFO)'), ] return AirCargoProblem(cargos, planes, airports, init, goal) ``` - Problem 1 initial state and goal: ``` Init(At(C1, SFO) ∧ At(C2, JFK) ∧ At(P1, SFO) ∧ At(P2, JFK) ∧ Cargo(C1) ∧ Cargo(C2) ∧ Plane(P1) ∧ Plane(P2) ∧ Airport(JFK) ∧ Airport(SFO)) Goal(At(C1, JFK) ∧ At(C2, SFO)) ``` - Problem 2 initial state and goal: ``` Init(At(C1, SFO) ∧ At(C2, JFK) ∧ At(C3, ATL) ∧ At(P1, SFO) ∧ At(P2, JFK) ∧ At(P3, ATL) ∧ Cargo(C1) ∧ Cargo(C2) ∧ Cargo(C3) ∧ Plane(P1) ∧ Plane(P2) ∧ Plane(P3) ∧ Airport(JFK) ∧ Airport(SFO) ∧ Airport(ATL)) Goal(At(C1, JFK) ∧ At(C2, SFO) ∧ At(C3, SFO)) ``` - Problem 3 initial state and goal: ``` Init(At(C1, SFO) ∧ At(C2, JFK) ∧ At(C3, ATL) ∧ At(C4, ORD) ∧ At(P1, SFO) ∧ At(P2, JFK) ∧ Cargo(C1) ∧ Cargo(C2) ∧ Cargo(C3) ∧ Cargo(C4) ∧ Plane(P1) ∧ Plane(P2) ∧ Airport(JFK) ∧ Airport(SFO) ∧ Airport(ATL) ∧ Airport(ORD)) Goal(At(C1, JFK) ∧ At(C3, JFK) ∧ At(C2, SFO) ∧ At(C4, SFO)) ``` ## Solving the problem ``` import argparse from timeit import default_timer as timer from planning_agent.aimacode.search import InstrumentedProblem from planning_agent.aimacode.search import (breadth_first_search, astar_search, breadth_first_tree_search, depth_first_graph_search, uniform_cost_search, greedy_best_first_graph_search, depth_limited_search, recursive_best_first_search) PROBLEMS = [["Air Cargo Problem 1", air_cargo_p1], ["Air Cargo Problem 2", air_cargo_p2], ["Air Cargo Problem 3", air_cargo_p3]] SEARCHES = [["breadth_first_search", breadth_first_search, ""], ['breadth_first_tree_search', breadth_first_tree_search, ""], ['depth_first_graph_search', depth_first_graph_search, ""], ['depth_limited_search', depth_limited_search, ""], ['uniform_cost_search', uniform_cost_search, ""], ['recursive_best_first_search', recursive_best_first_search, 'h_1'], ['greedy_best_first_graph_search', greedy_best_first_graph_search, 'h_1'], ['astar_search', astar_search, 'h_1'], ['astar_search', astar_search, 'h_ignore_preconditions'], ['astar_search', astar_search, 'h_pg_levelsum'], ] class PrintableProblem(InstrumentedProblem): """ InstrumentedProblem keeps track of stats during search, and this class modifies the print output of those statistics for air cargo problems """ def __repr__(self): return '{:^10d} {:^10d} {:^10d}'.format(self.succs, self.goal_tests, self.states) def show_solution(node, elapsed_time): print("Plan length: {} Time elapsed in seconds: {}".format(len(node.solution()), elapsed_time)) for action in node.solution(): print("{}{}".format(action.name, action.args)) def run_search(problem, search_function, parameter=None): start = timer() ip = PrintableProblem(problem) if parameter is not None: node = search_function(ip, parameter) else: node = search_function(ip) end = timer() print("\nExpansions Goal Tests New Nodes") print("{}\n".format(ip)) show_solution(node, end - start) print() def main(p_choices, s_choices): problems = [PROBLEMS[i-1] for i in map(int, p_choices)] searches = [SEARCHES[i-1] for i in map(int, s_choices)] for pname, p in problems: for sname, s, h in searches: hstring = h if not h else " with {}".format(h) print("\nSolving {} using {}{}...".format(pname, sname, hstring)) _p = p() _h = None if not h else getattr(_p, h) run_search(_p, s, _h) if __name__=="__main__": main([1,2,3],[1,9]) ```
github_jupyter
``` import pandas as pd df = pd.read_csv('queryset_CNN.csv') print(df.shape) print(df.dtypes) preds = [] pred = [] for index, row in df.iterrows(): doc_id = row.doc_id author_id = row.author_id import ast authorList = ast.literal_eval(row.authorList) candidate = len(authorList) algo = "tfidf_svc" test = algo # change before run level = "word" iterations = 30 dropout = 0.5 samples = 3200 dimensions = 200 loc = authorList.index(author_id) printstate = (("doc_id = %s, candidate = %s, ") % (str(doc_id), str(candidate))) printstate += (("samples = %s, ") % (str(samples))) printstate += (("test = %s") % (str(test))) print("Current test: %s" % (str(printstate))) from sshtunnel import SSHTunnelForwarder with SSHTunnelForwarder(('144.214.121.15', 22), ssh_username='ninadt', ssh_password='Ninad123', remote_bind_address=('localhost', 3306), local_bind_address=('localhost', 3300)): import UpdateDB as db case = db.checkOldML(doc_id = doc_id, candidate = candidate, samples = samples, test = test, port = 3300) if case == False: print("Running: %12s" % (str(printstate))) import StyloML as Stylo (labels_index, train_acc, val_acc, samples) = Stylo.getResults( algo, doc_id = doc_id, authorList = authorList[:], samples = samples) (labels_index, testY, predY, samples) = Stylo.getTestResults( algo, labels_index = labels_index, doc_id = doc_id, authorList = authorList[:], samples = samples) loc = testY test_acc = predY[loc] test_bin = 0 if(predY.tolist().index(max(predY)) == testY): test_bin = 1 from sshtunnel import SSHTunnelForwarder with SSHTunnelForwarder(('144.214.121.15', 22), ssh_username='ninadt', ssh_password='Ninad123', remote_bind_address=('localhost', 3306), local_bind_address=('localhost', 3300)): import UpdateDB as db case = db.updateresultOldML(doc_id = doc_id, candidate = candidate, samples = samples, train_acc = train_acc, val_acc = val_acc, test_acc = test_acc, test_bin = test_bin, test = test, port = 3300) del Stylo import time time.sleep(10) from IPython.display import clear_output clear_output() else: print("Skipped: %12s" % (str(printstate))) # import matplotlib.pyplot as plt # # summarize history for accuracy # plt.plot(history.history['acc']) # plt.plot(history.history['val_acc']) # plt.title('model accuracy') # plt.ylabel('accuracy') # plt.xlabel('epoch') # plt.legend(['train', 'test'], loc='upper left') # plt.show() # # summarize history for loss # plt.plot(history.history['loss']) # plt.plot(history.history['val_loss']) # plt.title('model loss') # plt.ylabel('loss') # plt.xlabel('epoch') # plt.legend(['train', 'test'], loc='upper left') # plt.show() %tb ```
github_jupyter
``` # -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # ``` # Natural Language Processing (NLP) with machine learning (ML) **Preprocessing of textual data** ### Download NLTK data - we need to do this only one time The download process can last longer (with GUI) and all data packages are bigger size of 3.3 GB Uncomment the *nltk.download()* line to download all! It open a new download window, which requires to click ! ``` import nltk # nltk.download() # nltk.download('punkt') # nltk.download('stopwords') # nltk.download('averaged_perceptron_tagger') # Part-of-Speech Tagging (POS) # nltk.download('tagsets') # nltk.download('maxent_ne_chunker') # Name Entity Recognition (NER) # nltk.download('words') ``` ### Tokenization ``` import string import re from nltk.tokenize import word_tokenize from nltk.corpus import stopwords # load data filename = 'data/metamorphosis_clean.txt' file = open(filename, 'rt') text = file.read() file.close() # split into words tokens = word_tokenize(text) # convert to lower case tokens = [w.lower() for w in tokens] # prepare regex for char filtering re_punc = re.compile('[%s]' % re.escape(string.punctuation)) # remove punctuation from each word stripped = [re_punc.sub('', w) for w in tokens] # remove remaining tokens that are not alphabetic words = [word for word in stripped if word.isalpha()] # filter out stop words stop_words = set(stopwords.words('english')) words = [w for w in words if not w in stop_words] print(words[:100]) ``` ### TF-IDF with TfidfVectorizer ``` import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer dataset = [ "I enjoy reading about Machine Learning and Machine Learning is my PhD subject", "I would enjoy a walk in the park", "I was reading in the library" ] vectorizer = TfidfVectorizer(use_idf=True) tfIdf = vectorizer.fit_transform(dataset) df = pd.DataFrame(tfIdf[0].T.todense(), index=vectorizer.get_feature_names(), columns=["TF-IDF"]) df = df.sort_values('TF-IDF', ascending=False) print (df.head(25)) ``` ### TF-IDF with TfidfTransformer ``` from sklearn.feature_extraction.text import TfidfTransformer from sklearn.feature_extraction.text import CountVectorizer transformer = TfidfTransformer(use_idf=True) countVectorizer = CountVectorizer() wordCount = countVectorizer.fit_transform(dataset) newTfIdf = transformer.fit_transform(wordCount) df = pd.DataFrame(newTfIdf[0].T.todense(), index=countVectorizer.get_feature_names(), columns=["TF-IDF"]) df = df.sort_values('TF-IDF', ascending=False) print (df.head(25)) ``` ### Cosine similarity URL: https://stackoverflow.com/questions/12118720/python-tf-idf-cosine-to-find-document-similarity ``` from sklearn.datasets import fetch_20newsgroups from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel # twenty dataset twenty = fetch_20newsgroups() tfidf = TfidfVectorizer().fit_transform(twenty.data) # cosine similarity cosine_similarities = linear_kernel(tfidf[0:1], tfidf).flatten() # top-5 related documents related_docs_indices = cosine_similarities.argsort()[:-5:-1] print(related_docs_indices) print(cosine_similarities[related_docs_indices]) # print the first result to check print(twenty.data[0]) print(twenty.data[958]) ``` ### Text classification URL https://towardsdatascience.com/machine-learning-nlp-text-classification-using-scikit-learn-python-and-nltk-c52b92a7c73a ``` import numpy as np from sklearn.datasets import fetch_20newsgroups from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.pipeline import Pipeline # twenty dataset twenty_train = fetch_20newsgroups(subset='train', shuffle=True) twenty_test = fetch_20newsgroups(subset='test', shuffle=True) print(twenty_train.target_names) # print("\n".join(twenty_train.data[0].split("\n")[:3])) ``` ### Multinomial Naive Bayes ``` from sklearn.naive_bayes import MultinomialNB # Bag-of-words count_vect = CountVectorizer() X_train_counts = count_vect.fit_transform(twenty_train.data) X_test_counts = count_vect.transform(twenty_test.data) # TF-IDF transformer = TfidfTransformer() X_train_tfidf = transformer.fit_transform(X_train_counts) X_test_tfidf = transformer.transform(X_test_counts) # Naive Bayes (NB) for text classification clf = MultinomialNB().fit(X_train_tfidf, twenty_train.target) # Performance of the model predicted = clf.predict(X_test_tfidf) np.mean(predicted == twenty_test.target) ``` ### Pipeline The above code with Multinomial Naive Bayes can be written more ellegant with scikit-learn pipeline. The code will be shorter and more reliable. ``` text_clf = Pipeline([('vect', CountVectorizer(stop_words='english')), ('tfidf', TfidfTransformer()), ('clf', MultinomialNB()), ]) text_clf = text_clf.fit(twenty_train.data, twenty_train.target) # Performance of the model predicted = text_clf.predict(twenty_test.data) np.mean(predicted == twenty_test.target) ``` ### GridSearchCV with Naive Bayes We want and need to optimize the pipeline by hyper-parameter tunning. We may get some better classification results. ``` from sklearn.model_selection import GridSearchCV parameters = {'vect__ngram_range': [(1, 1), (1, 2)], 'tfidf__use_idf': (True, False), 'clf__alpha': (1e-2, 1e-3), } gs_clf = GridSearchCV(text_clf, parameters, n_jobs=-1) gs_clf = gs_clf.fit(twenty_train.data, twenty_train.target) print(gs_clf.best_score_) print(gs_clf.best_params_) ``` ### SGDClassifier We are trying another classifier called SGDClassifier instead of the previous Multinomial Naive Bayes. Let see if this new classifier acts better incomparison with and without optimization. ``` from sklearn.linear_model import SGDClassifier text_clf_svm = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf-svm', SGDClassifier(loss='hinge', penalty='l2', alpha=1e-3, random_state=42)), ]) text_clf_svm = text_clf_svm.fit(twenty_train.data, twenty_train.target) # Performance of the model predicted_svm = text_clf_svm.predict(twenty_test.data) np.mean(predicted_svm == twenty_test.target) ``` ### GridSearchCV with SVM Here a more classifiers, e.g., SVM. We are going to try SVM with Grid Search optimization. ``` from sklearn.model_selection import GridSearchCV parameters_svm = {'vect__ngram_range': [(1, 1), (1, 2)], 'tfidf__use_idf': (True, False), 'clf-svm__alpha': (1e-2, 1e-3), } gs_clf_svm = GridSearchCV(text_clf_svm, parameters_svm, n_jobs=-1) gs_clf_svm = gs_clf_svm.fit(twenty_train.data, twenty_train.target) print(gs_clf_svm.best_score_) print(gs_clf_svm.best_params_) ``` ### Stemming Stemming can improve classifier results too. Let see if it works in our case example with Multinomial Naive Bayes. ``` from nltk.stem.snowball import SnowballStemmer stemmer = SnowballStemmer("english", ignore_stopwords=True) class StemmedCountVectorizer(CountVectorizer): def build_analyzer(self): analyzer = super(StemmedCountVectorizer, self).build_analyzer() return lambda doc: ([stemmer.stem(w) for w in analyzer(doc)]) stemmed_count_vect = StemmedCountVectorizer(stop_words='english') text_mnb_stemmed = Pipeline([('vect', stemmed_count_vect), ('tfidf', TfidfTransformer()), ('mnb', MultinomialNB(fit_prior=False))]) text_mnb_stemmed = text_mnb_stemmed.fit(twenty_train.data, twenty_train.target) predicted_mnb_stemmed = text_mnb_stemmed.predict(twenty_test.data) np.mean(predicted_mnb_stemmed == twenty_test.target) ```
github_jupyter
# Boltzmann Machine ## Downloading the dataset ### ML-100K ``` # !wget "http://files.grouplens.org/datasets/movielens/ml-100k.zip" # !unzip ml-100k.zip # !ls ``` ### ML-1M ``` # !wget "http://files.grouplens.org/datasets/movielens/ml-1m.zip" # !unzip ml-1m.zip # !ls ``` ## Importing the libraries ``` import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.parallel import torch.optim as optim import torch.utils.data from torch.autograd import Variable ``` ## Importing the dataset ``` # We won't be using this dataset. movies = pd.read_csv('ml-1m/movies.dat', sep = '::', header = None, engine = 'python', encoding = 'latin-1') users = pd.read_csv('ml-1m/users.dat', sep = '::', header = None, engine = 'python', encoding = 'latin-1') ratings = pd.read_csv('ml-1m/ratings.dat', sep = '::', header = None, engine = 'python', encoding = 'latin-1') ``` ## Preparing the training set and the test set ``` training_set = pd.read_csv('ml-100k/u1.base', delimiter = '\t') training_set = np.array(training_set, dtype = 'int') test_set = pd.read_csv('ml-100k/u1.test', delimiter = '\t') test_set = np.array(test_set, dtype = 'int') ``` ## Getting the number of users and movies ``` nb_users = int(max(max(training_set[:,0]), max(test_set[:,0]))) nb_movies = int(max(max(training_set[:,1]), max(test_set[:,1]))) print(nb_users,'|',nb_movies) ``` ## Converting the data into an array with users in lines and movies in columns ``` def convert(data): new_data = [] for id_users in range(1, nb_users + 1): id_movies = data[:,1][data[:,0] == id_users] id_ratings = data[:,2][data[:,0] == id_users] ratings = np.zeros(nb_movies) ratings[id_movies - 1] = id_ratings new_data.append(list(ratings)) return new_data training_set = convert(training_set) test_set = convert(test_set) ``` ## Converting the data into Torch Tensors ``` training_set = torch.FloatTensor(training_set) test_set = torch.FloatTensor(test_set) ``` ## Converting the ratings into binary ratings 1 (Liked) or 0 (Not Liked) ``` training_set[training_set == 0] = -1 training_set[training_set == 1] = 0 training_set[training_set == 2] = 0 training_set[training_set >= 3] = 1 test_set[test_set == 0] = -1 test_set[test_set == 1 ] = 0 test_set[test_set == 2] = 0 test_set[test_set >= 3] = 1 ``` ## Creating the architecture of the Neural Network ``` class RBM(): def __init__(self, nv, nh): self.W = torch.randn(nh, nv) self.a = torch.randn(1, nh) self.b = torch.randn(1, nv) def sample_h(self, x): wx = torch.mm(x, self.W.t()) activation = wx + self.a.expand_as(wx) p_h_given_v = torch.sigmoid(activation) return p_h_given_v, torch.bernoulli(p_h_given_v) def sample_v(self, y): wy = torch.mm(y, self.W) activation = wy + self.b.expand_as(wy) p_v_given_h = torch.sigmoid(activation) return p_v_given_h, torch.bernoulli(p_v_given_h) def train(self, v0, vk, ph0, phk): self.W += (torch.mm(v0.t(), ph0) - torch.mm(vk.t(), phk)).t() self.b += torch.sum((v0 - vk), 0) self.a += torch.sum((ph0 - phk), 0) nv = len(training_set[0]) nh = 100 batch_size = 100 rbm = RBM(nv, nh) ``` ## Training the RBM ``` nb_epoch = 10 for epoch in range(1, nb_epoch + 1): train_loss = 0 train_rmse = 0 s = 0. for id_user in range(0, nb_users - batch_size, batch_size): vk = training_set[id_user:id_user+batch_size] v0 = training_set[id_user:id_user+batch_size] ph0,_ = rbm.sample_h(v0) for k in range(10): _,hk = rbm.sample_h(vk) _,vk = rbm.sample_v(hk) vk[v0<0] = v0[v0<0] phk,_ = rbm.sample_h(vk) rbm.train(v0, vk, ph0, phk) train_loss += torch.mean(torch.abs(v0[v0>=0] - vk[v0>=0]))# train_rmse += np.sqrt(torch.mean((v0[v0 >= 0] - vk[v0 >= 0])**2)) s += 1. print('epoch : {} | Loss : {} | RMSE : {}'.format(epoch, train_loss/s, train_rmse/s)) ``` ## Testing the RBM ``` test_loss = 0 test_rmse = 0 s = 0. for id_user in range(nb_users): v = training_set[id_user:id_user+1] vt = test_set[id_user:id_user+1] if len(vt[vt>=0]) > 0: _,h = rbm.sample_h(v) _,v = rbm.sample_v(h) test_loss += torch.mean(torch.abs(vt[vt>=0] - v[vt>=0])) test_rmse += np.sqrt(torch.mean((vt[vt>=0] - v[vt>=0])**2)) s += 1. print('loss : {} | RMSE : {}'.format(test_loss/s, test_rmse/s)) ```
github_jupyter
<div style='background: #FF7B47; padding: 10px; border: thin solid darblue; border-radius: 5px; margin-bottom: 2vh'> # Session 01 - Notebook Like most session notebooks in this course, this notebook is divided into two parts. Part one is a 'manual' that will allow you to code along with the new code that we introduce at the beginning of each session. The second part is the actual lab/assignment part, where you will work through a few practical tasks and write small but useful programs. <div style='background: #FF7B47; padding: 10px; border: thin solid darblue; border-radius: 5px'> ## Part 1 - Manual <div style='background: lightsalmon; padding: 10px; border: thin solid darblue; border-radius: 5px'> A.1 - "hello, world!" ``` print('Hello, world!') ``` <div style='background: lightsalmon; padding: 10px; border: thin solid darblue; border-radius: 5px'> A.2 - basic datatypes - strings and numeric variables ``` # your code here print(2) print(2+4) print(2**4) my_var = 2 print(my_var*2) ``` <div style='background: lightsalmon; padding: 10px; border: thin solid darblue; border-radius: 5px'> A.3 - basic operations ``` # your code here print(my_var *2) print(my_var **4) ``` <div style='background: lightsalmon; padding: 10px; border: thin solid darblue; border-radius: 5px'> A.4 - advanced data types to store collections of data ``` # your code here - lists # your code here - dictionaries my_dict = { "brand": "Ford", "model": "Mustang", "year": 1964, "example" : {'test':'here', 'test_2': 'here_2'} } print(my_dict['brand']) print(my_dict['example']) print(my_dict['example']['test']) ``` <div style='background: lightsalmon; padding: 10px; border: thin solid darblue; border-radius: 5px'> A.5 - for loops ``` # your code here fruits = ['apple', 'banana', 'strawberry', 'peach'] # we are iterating over each element in the data structure for fruit in fruits: print(fruit) fruits_in_fridge = { 'apple': 2, 'banana': 3, 'strawberry':10, 'peach': 4 } for key in fruits_in_fridge: print(key) # printing the keys print(fruits_in_fridge[key]) ``` <div style='background: lightsalmon; padding: 10px; border: thin solid darblue; border-radius: 5px'> A.6 - Python If ... Else ``` # your code here a = 33 b = 33 if b > a: print("b is greater than a") elif a == b: print("a and b are equal") else: print("a is greater than b") ``` <div style='background: lightsalmon; padding: 10px; border: thin solid darblue; border-radius: 5px'> A.6 - Functions ``` # Your code here def say_hello_to(name): print("hello, " + name) say_hello_to('Nathan') list_of_names = ['Megan', 'Robert', 'Jermain', 'Angela', 'Amr', 'Anthony', 'Rex', 'Nathan'] for classmate in list_of_names: say_hello_to(classmate) ``` <div style='background: #6A9EB4; padding: 10px; border: thin solid darblue; border-radius: 5px'> ## Part 2 - Lab During today's lab you will write code that will help the College to perform the house lottery more efficiently and assist the house administrations in a variety of tasks. <div style='background: #ADCAD6; padding: 10px; border: thin solid darblue; border-radius: 5px'> ## Task #1 - automatize the house lottery In the precirculated template folder, you will find the file students.csv with all rising sophomores that will enter the house lottery, i.e. they will get assigned to one of the twelve undergraduate houses. So far, the college has done this task manually but they hope that you can help them to automtize that process. Please load the csv and add another column 'house_id'. Pyhton's csv package will come in handy to load the csv file and treat each row as a list. Having loaded the file, add a random house id to each student and save that information in a new csv file. You might find the python package 'random' quite useful to automatize the lottery process. We've imported the package for you and provided an example. <div style='background: #ADCAD6; padding: 10px; border: thin solid darblue; border-radius: 5px'> Examples and precirculated code: ``` # house ids lookup tables house_to_id = { 'Adams House': 0, 'Cabot House': 1, 'Currier House' : 2, 'Dunster House': 3, 'Eliot House': 4, 'Kirkland House': 5, 'Leverett House': 6, 'Lowell House': 7, 'Mather House': 8, 'Pforzheimer House':9, 'Quincy House': 10, 'Winthrop House': 11 } id_to_house = { 0: 'Adams House', 1: 'Cabot House', 2: 'Currier House', 3: 'Dunster House', 4: 'Eliot House', 5: 'Kirkland House', 6: 'Leverett House', 7: 'Lowell House', 8: 'Mather House', 9: 'Pforzheimer House', 10: 'Quincy House', 11: 'Winthrop House' } # importing useful python packages import random import csv # some example code snippets how to load a csv file and how to write into one # read file_read = open("data/students.csv", "r") reader = csv.reader(file_read) for row in reader: print(row) break # breaking after first element feel free to check out the entire data stucture file_read.close() # write - notice that the file doesn't have to exist beforehand! csv write will create the file automatically, which is very useful! file_write = open('students_with_house.csv', 'w', newline='') writer = csv.writer(file_write) # we just write one row here. It might be useful to put this line into a loop when automatizing things writer.writerow(['first_name', 'last_name', 'HUID','email', 'house_id']) file_write.close() # example - generate a random integer between 1 and 10. example_random = random.randint(1,10) print(example_random) ``` <div style='background: #ADCAD6; padding: 10px; border: thin solid darblue; border-radius: 5px'> Your turn - load the csv file, create a random number for each student between 0-11 and store all students in a new csv file with their respective house assignments. A for loop might come in handy. ``` # your code here file_read = open("data/students.csv", "r") reader = csv.reader(file_read) # write - notice that the file doesn't have to exist beforehand! csv write will create the file automatically, which is very useful! file_write = open('students_with_house.csv', 'w', newline='') writer = csv.writer(file_write) for row in reader: student = row house_tmp = random.randint(0,11) student.append(house_tmp) writer.writerow(student) file_write.close() ``` <div style='background: #ADCAD6; padding: 10px; border: thin solid darblue; border-radius: 5px'> Write a small program that makes sure that you've successfully created and populated a csv with all students and their assigned houses. ``` # your code here file_test = open('students_with_house.csv', 'r') reader = csv.reader(file_test) for row in reader: print(row) break ``` <div style='background: #ADCAD6; padding: 10px; border: thin solid darblue; border-radius: 5px'> ## Task #2 - generate a file for a house on demand OK, you've helped the college out with the lottery but now the house administrators are struggling a bit because they have all 2000 students in one file but only care about the students that were assigned to their particular house. Write a small programm that solves that task on demand and generates a csv for them with only their students. You can write a program that does this task on demand for a given house, or you can generate a csv for each house in advance. ``` # your code here house = 'Adams House' house_id = house_to_id['Adams House'] file_read = open('students_with_house.csv', 'r') reader = csv.reader(file_read) file_write = open('adams_students.csv', 'w', newline='') writer = csv.writer(file_write) for row in reader: if int(row[4]) == house_id: writer.writerow(row) file_write.close() def make_house_file(house): house_id = house_to_id[house] file_read = open('students_with_house.csv', 'r') reader = csv.reader(file_read) file_write = open(house + '_students.csv', 'w', newline='') writer = csv.writer(file_write) for row in reader: if int(row[4]) == house_id: writer.writerow(row) file_write.close() for house_name in house_to_id: make_house_file(house_name) ``` <div style='background: #CBE0A4; padding: 10px; border: thin solid darblue; border-radius: 5px'> ## Bonus Tasks 1. calculate vacant rooms per house 2. write a program that computes the number of students assigned per house in a given csv 3. write a function that checks whether there are problems with the numbers of students assigned to each house 4. write code that assigns students randomly but in such a way that there are no capacity issues. <div style='background: #CBE0A4; padding: 10px; border: thin solid darblue; border-radius: 5px'> Some house administrators have complaned that the list of students is too long to accomodate all new sophomores assigned to their houses. Since some houses are bigger and others are smaller, we cannot simply generate integers and get away with the randomly generated number of students in each house. Rather, we have to check more carefolly whether there is still capacity. Below, find two useful dictionaries hat should help you to solve this task. ``` # bonus is house with exact capacities house_capacity = { 'Adams House': 411, 'Cabot House': 362, 'Currier House' : 356, 'Dunster House': 428, 'Eliot House': 450, 'Kirkland House': 400, 'Leverett House': 480, 'Lowell House': 450, 'Mather House': 426, 'Pforzheimer House':360, 'Quincy House': 420, 'Winthrop House': 500 } # number of occupied rooms after seniors have left house_occupied = { 'Adams House': 236, 'Cabot House': 213, 'Currier House' : 217, 'Dunster House': 296, 'Eliot House': 288, 'Kirkland House': 224, 'Leverett House': 233, 'Lowell House': 242, 'Mather House': 217, 'Pforzheimer House':195, 'Quincy House': 253, 'Winthrop House': 310 } house_names = [] for house in house_capacity: house_names.append(house) ``` <div style='background: #CBE0A4; padding: 10px; border: thin solid darblue; border-radius: 5px'>Let's start by writing a small program that helps us to calculate the vacant rooms for each house. Try to use a dictionary structure that contains all information for each house. Feel free to also write a few lines that check how many vacant rooms there are in total. ``` vacant_rooms = {} # your code here for house in house_names: vacant_rooms[house] = house_capacity[house] - house_occupied[house] # your code ends here print(vacant_rooms) # your code here # take each house name # add their values total = 0 for house in house_names: total = total + vacant_rooms[house] # total += vacant_rooms[house] print(total) ``` <div style='background: #CBE0A4; padding: 10px; border: thin solid darblue; border-radius: 5px'>Let's now write a small function that calculates the number of students assigned per house with our old method and returns a dictionary with that information ``` helper_dict = { "A": 222, "B": 123 } def calculate_students_per_house(filename): # helper dict helper_dict = { 'Adams House': 0, 'Cabot House': 0, 'Currier House' : 0, 'Dunster House': 0, 'Eliot House': 0, 'Kirkland House': 0, 'Leverett House': 0, 'Lowell House': 0, 'Mather House': 0, 'Pforzheimer House':0, 'Quincy House': 0, 'Winthrop House': 0 } # your code here file_read = open(filename, 'r') reader = csv.reader(file_read) for row in reader: house_id = int(row[4]) house_name = id_to_house[house_id] helper_dict[house_name] = helper_dict[house_name] + 1 # your code ends here return helper_dict assigned_students_random = calculate_students_per_house('students_with_house.csv') print(assigned_students_random) ``` <div style='background: #CBE0A4; padding: 10px; border: thin solid darblue; border-radius: 5px'>Next, let's check by how much we were off for each house with our random approach. ``` def house_assignment_check(assignements_per_house_dict): # your code here for house in house_names: difference = vacant_rooms[house] - assignements_per_house_dict[house] if difference < 0: print(f'there is a problem with {house}. We have assigned {abs(difference)} too many students') else: print(f'there is no problemwith {house}') house_assignment_check(assigned_students_random) ``` <div style='background: #CBE0A4; padding: 10px; border: thin solid darblue; border-radius: 5px'>Finally, let's write a function that assignes houses more carefully. We can still generate random integers to assign a house, but we need to check whether that house still has capacity. For that reason, please create a function called assign_house() that generates not only a random number, but also checks whether that number is valid, i.e. if that house still has capacity. If there's no capacity, that function should call itself again until it generates a house (id) that still has capacity. ``` vacant_rooms = {'Adams House': 175, 'Cabot House': 149, 'Currier House': 139, 'Dunster House': 132, 'Eliot House': 162, 'Kirkland House': 176, 'Leverett House': 247, 'Lowell House': 208, 'Mather House': 209, 'Pforzheimer House': 165, 'Quincy House': 167, 'Winthrop House': 190} # solution def assign_house(): house_id = random.randint(0,11) house_name = id_to_house[house_id] if vacant_rooms[house_name] > 0: vacant_rooms[house_name] -= 1 return house_id else: return assign_house() # next, load the students.csv file, read it row by row and use the "assign_house()" function to generate a house for each student. # your code here # your code here file_read = open("data/students.csv", "r") reader = csv.reader(file_read) # write - notice that the file doesn't have to exist beforehand! csv write will create the file automatically, which is very useful! file_write = open('students_with_house_correct_capaity.csv', 'w', newline='') writer = csv.writer(file_write) for row in reader: student = row correct_house = assign_house() student.append(correct_house) writer.writerow(student) file_write.close() # finally, check whether your new solution is working flawlesly # your code here file_read = open("students_with_house_correct_capaity.csv", "r") reader = csv.reader(file_read) for row in reader: print(row) break assigned_students_per_house = calculate_students_per_house('students_with_house_correct_capaity.csv') assigned_students_per_house vacant_rooms = {'Adams House': 175, 'Cabot House': 149, 'Currier House': 139, 'Dunster House': 132, 'Eliot House': 162, 'Kirkland House': 176, 'Leverett House': 247, 'Lowell House': 208, 'Mather House': 209, 'Pforzheimer House': 165, 'Quincy House': 167, 'Winthrop House': 190} house_assignment_check(assigned_students_per_house) ```
github_jupyter
# Autobatching log-densities example [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.sandbox.google.com/github/google/jax/blob/master/docs/notebooks/vmapped_log_probs.ipynb) This notebook demonstrates a simple Bayesian inference example where autobatching makes user code easier to write, easier to read, and less likely to include bugs. Inspired by a notebook by @davmre. ``` import functools import itertools import re import sys import time from matplotlib.pyplot import * import jax from jax import lax import jax.numpy as jnp import jax.scipy as jsp from jax import random import numpy as np import scipy as sp ``` ## Generate a fake binary classification dataset ``` np.random.seed(10009) num_features = 10 num_points = 100 true_beta = np.random.randn(num_features).astype(jnp.float32) all_x = np.random.randn(num_points, num_features).astype(jnp.float32) y = (np.random.rand(num_points) < sp.special.expit(all_x.dot(true_beta))).astype(jnp.int32) y ``` ## Write the log-joint function for the model We'll write a non-batched version, a manually batched version, and an autobatched version. ### Non-batched ``` def log_joint(beta): result = 0. # Note that no `axis` parameter is provided to `jnp.sum`. result = result + jnp.sum(jsp.stats.norm.logpdf(beta, loc=0., scale=1.)) result = result + jnp.sum(-jnp.log(1 + jnp.exp(-(2*y-1) * jnp.dot(all_x, beta)))) return result log_joint(np.random.randn(num_features)) # This doesn't work, because we didn't write `log_prob()` to handle batching. try: batch_size = 10 batched_test_beta = np.random.randn(batch_size, num_features) log_joint(np.random.randn(batch_size, num_features)) except ValueError as e: print("Caught expected exception " + str(e)) ``` ### Manually batched ``` def batched_log_joint(beta): result = 0. # Here (and below) `sum` needs an `axis` parameter. At best, forgetting to set axis # or setting it incorrectly yields an error; at worst, it silently changes the # semantics of the model. result = result + jnp.sum(jsp.stats.norm.logpdf(beta, loc=0., scale=1.), axis=-1) # Note the multiple transposes. Getting this right is not rocket science, # but it's also not totally mindless. (I didn't get it right on the first # try.) result = result + jnp.sum(-jnp.log(1 + jnp.exp(-(2*y-1) * jnp.dot(all_x, beta.T).T)), axis=-1) return result batch_size = 10 batched_test_beta = np.random.randn(batch_size, num_features) batched_log_joint(batched_test_beta) ``` ### Autobatched with vmap It just works. ``` vmap_batched_log_joint = jax.vmap(log_joint) vmap_batched_log_joint(batched_test_beta) ``` ## Self-contained variational inference example A little code is copied from above. ### Set up the (batched) log-joint function ``` @jax.jit def log_joint(beta): result = 0. # Note that no `axis` parameter is provided to `jnp.sum`. result = result + jnp.sum(jsp.stats.norm.logpdf(beta, loc=0., scale=10.)) result = result + jnp.sum(-jnp.log(1 + jnp.exp(-(2*y-1) * jnp.dot(all_x, beta)))) return result batched_log_joint = jax.jit(jax.vmap(log_joint)) ``` ### Define the ELBO and its gradient ``` def elbo(beta_loc, beta_log_scale, epsilon): beta_sample = beta_loc + jnp.exp(beta_log_scale) * epsilon return jnp.mean(batched_log_joint(beta_sample), 0) + jnp.sum(beta_log_scale - 0.5 * np.log(2*np.pi)) elbo = jax.jit(elbo) elbo_val_and_grad = jax.jit(jax.value_and_grad(elbo, argnums=(0, 1))) ``` ### Optimize the ELBO using SGD ``` def normal_sample(key, shape): """Convenience function for quasi-stateful RNG.""" new_key, sub_key = random.split(key) return new_key, random.normal(sub_key, shape) normal_sample = jax.jit(normal_sample, static_argnums=(1,)) key = random.PRNGKey(10003) beta_loc = jnp.zeros(num_features, jnp.float32) beta_log_scale = jnp.zeros(num_features, jnp.float32) step_size = 0.01 batch_size = 128 epsilon_shape = (batch_size, num_features) for i in range(1000): key, epsilon = normal_sample(key, epsilon_shape) elbo_val, (beta_loc_grad, beta_log_scale_grad) = elbo_val_and_grad( beta_loc, beta_log_scale, epsilon) beta_loc += step_size * beta_loc_grad beta_log_scale += step_size * beta_log_scale_grad if i % 10 == 0: print('{}\t{}'.format(i, elbo_val)) ``` ### Display the results Coverage isn't quite as good as we might like, but it's not bad, and nobody said variational inference was exact. ``` figure(figsize=(7, 7)) plot(true_beta, beta_loc, '.', label='Approximated Posterior Means') plot(true_beta, beta_loc + 2*jnp.exp(beta_log_scale), 'r.', label='Approximated Posterior $2\sigma$ Error Bars') plot(true_beta, beta_loc - 2*jnp.exp(beta_log_scale), 'r.') plot_scale = 3 plot([-plot_scale, plot_scale], [-plot_scale, plot_scale], 'k') xlabel('True beta') ylabel('Estimated beta') legend(loc='best') ```
github_jupyter
<a href="https://colab.research.google.com/github/agungsantoso/deep-learning-v2-pytorch/blob/master/intro-to-pytorch/Part%201%20-%20Tensors%20in%20PyTorch%20(Exercises).ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Introduction to Deep Learning with PyTorch In this notebook, you'll get introduced to [PyTorch](http://pytorch.org/), a framework for building and training neural networks. PyTorch in a lot of ways behaves like the arrays you love from Numpy. These Numpy arrays, after all, are just tensors. PyTorch takes these tensors and makes it simple to move them to GPUs for the faster processing needed when training neural networks. It also provides a module that automatically calculates gradients (for backpropagation!) and another module specifically for building neural networks. All together, PyTorch ends up being more coherent with Python and the Numpy/Scipy stack compared to TensorFlow and other frameworks. ## Neural Networks Deep Learning is based on artificial neural networks which have been around in some form since the late 1950s. The networks are built from individual parts approximating neurons, typically called units or simply "neurons." Each unit has some number of weighted inputs. These weighted inputs are summed together (a linear combination) then passed through an activation function to get the unit's output. <img src="https://github.com/agungsantoso/deep-learning-v2-pytorch/blob/master/intro-to-pytorch/assets/simple_neuron.png?raw=1" width=400px> Mathematically this looks like: $$ \begin{align} y &= f(w_1 x_1 + w_2 x_2 + b) \\ y &= f\left(\sum_i w_i x_i +b \right) \end{align} $$ With vectors this is the dot/inner product of two vectors: $$ h = \begin{bmatrix} x_1 \, x_2 \cdots x_n \end{bmatrix} \cdot \begin{bmatrix} w_1 \\ w_2 \\ \vdots \\ w_n \end{bmatrix} $$ ## Tensors It turns out neural network computations are just a bunch of linear algebra operations on *tensors*, a generalization of matrices. A vector is a 1-dimensional tensor, a matrix is a 2-dimensional tensor, an array with three indices is a 3-dimensional tensor (RGB color images for example). The fundamental data structure for neural networks are tensors and PyTorch (as well as pretty much every other deep learning framework) is built around tensors. <img src="https://github.com/agungsantoso/deep-learning-v2-pytorch/blob/master/intro-to-pytorch/assets/tensor_examples.svg?raw=1" width=600px> With the basics covered, it's time to explore how we can use PyTorch to build a simple neural network. ``` # http://pytorch.org/ from os.path import exists from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag platform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag()) cuda_output = !ldconfig -p|grep cudart.so|sed -e 's/.*\.\([0-9]*\)\.\([0-9]*\)$/cu\1\2/' accelerator = cuda_output[0] if exists('/dev/nvidia0') else 'cpu' !pip install -q http://download.pytorch.org/whl/{accelerator}/torch-0.4.1-{platform}-linux_x86_64.whl torchvision import torch # First, import PyTorch import torch def activation(x): """ Sigmoid activation function Arguments --------- x: torch.Tensor """ return 1/(1+torch.exp(-x)) ### Generate some data torch.manual_seed(7) # Set the random seed so things are predictable # Features are 3 random normal variables features = torch.randn((1, 5)) # True weights for our data, random normal variables again weights = torch.randn_like(features) # and a true bias term bias = torch.randn((1, 1)) ``` Above I generated data we can use to get the output of our simple network. This is all just random for now, going forward we'll start using normal data. Going through each relevant line: `features = torch.randn((1, 5))` creates a tensor with shape `(1, 5)`, one row and five columns, that contains values randomly distributed according to the normal distribution with a mean of zero and standard deviation of one. `weights = torch.randn_like(features)` creates another tensor with the same shape as `features`, again containing values from a normal distribution. Finally, `bias = torch.randn((1, 1))` creates a single value from a normal distribution. PyTorch tensors can be added, multiplied, subtracted, etc, just like Numpy arrays. In general, you'll use PyTorch tensors pretty much the same way you'd use Numpy arrays. They come with some nice benefits though such as GPU acceleration which we'll get to later. For now, use the generated data to calculate the output of this simple single layer network. > **Exercise**: Calculate the output of the network with input features `features`, weights `weights`, and bias `bias`. Similar to Numpy, PyTorch has a [`torch.sum()`](https://pytorch.org/docs/stable/torch.html#torch.sum) function, as well as a `.sum()` method on tensors, for taking sums. Use the function `activation` defined above as the activation function. ``` ## Calculate the output of this network using the weights and bias tensors y = activation(torch.sum(features * weights) + bias) ``` You can do the multiplication and sum in the same operation using a matrix multiplication. In general, you'll want to use matrix multiplications since they are more efficient and accelerated using modern libraries and high-performance computing on GPUs. Here, we want to do a matrix multiplication of the features and the weights. For this we can use [`torch.mm()`](https://pytorch.org/docs/stable/torch.html#torch.mm) or [`torch.matmul()`](https://pytorch.org/docs/stable/torch.html#torch.matmul) which is somewhat more complicated and supports broadcasting. If we try to do it with `features` and `weights` as they are, we'll get an error ```python >> torch.mm(features, weights) --------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) <ipython-input-13-15d592eb5279> in <module>() ----> 1 torch.mm(features, weights) RuntimeError: size mismatch, m1: [1 x 5], m2: [1 x 5] at /Users/soumith/minicondabuild3/conda-bld/pytorch_1524590658547/work/aten/src/TH/generic/THTensorMath.c:2033 ``` As you're building neural networks in any framework, you'll see this often. Really often. What's happening here is our tensors aren't the correct shapes to perform a matrix multiplication. Remember that for matrix multiplications, the number of columns in the first tensor must equal to the number of rows in the second column. Both `features` and `weights` have the same shape, `(1, 5)`. This means we need to change the shape of `weights` to get the matrix multiplication to work. **Note:** To see the shape of a tensor called `tensor`, use `tensor.shape`. If you're building neural networks, you'll be using this method often. There are a few options here: [`weights.reshape()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.reshape), [`weights.resize_()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.resize_), and [`weights.view()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.view). * `weights.reshape(a, b)` will return a new tensor with the same data as `weights` with size `(a, b)` sometimes, and sometimes a clone, as in it copies the data to another part of memory. * `weights.resize_(a, b)` returns the same tensor with a different shape. However, if the new shape results in fewer elements than the original tensor, some elements will be removed from the tensor (but not from memory). If the new shape results in more elements than the original tensor, new elements will be uninitialized in memory. Here I should note that the underscore at the end of the method denotes that this method is performed **in-place**. Here is a great forum thread to [read more about in-place operations](https://discuss.pytorch.org/t/what-is-in-place-operation/16244) in PyTorch. * `weights.view(a, b)` will return a new tensor with the same data as `weights` with size `(a, b)`. I usually use `.view()`, but any of the three methods will work for this. So, now we can reshape `weights` to have five rows and one column with something like `weights.view(5, 1)`. > **Exercise**: Calculate the output of our little network using matrix multiplication. ``` ## Calculate the output of this network using matrix multiplication y = activation(torch.mm(features, weights.view(5, 1)) + bias) ``` ### Stack them up! That's how you can calculate the output for a single neuron. The real power of this algorithm happens when you start stacking these individual units into layers and stacks of layers, into a network of neurons. The output of one layer of neurons becomes the input for the next layer. With multiple input units and output units, we now need to express the weights as a matrix. <img src='https://github.com/agungsantoso/deep-learning-v2-pytorch/blob/master/intro-to-pytorch/assets/multilayer_diagram_weights.png?raw=1' width=450px> The first layer shown on the bottom here are the inputs, understandably called the **input layer**. The middle layer is called the **hidden layer**, and the final layer (on the right) is the **output layer**. We can express this network mathematically with matrices again and use matrix multiplication to get linear combinations for each unit in one operation. For example, the hidden layer ($h_1$ and $h_2$ here) can be calculated $$ \vec{h} = [h_1 \, h_2] = \begin{bmatrix} x_1 \, x_2 \cdots \, x_n \end{bmatrix} \cdot \begin{bmatrix} w_{11} & w_{12} \\ w_{21} &w_{22} \\ \vdots &\vdots \\ w_{n1} &w_{n2} \end{bmatrix} $$ The output for this small network is found by treating the hidden layer as inputs for the output unit. The network output is expressed simply $$ y = f_2 \! \left(\, f_1 \! \left(\vec{x} \, \mathbf{W_1}\right) \mathbf{W_2} \right) $$ ``` ### Generate some data torch.manual_seed(7) # Set the random seed so things are predictable # Features are 3 random normal variables features = torch.randn((1, 3)) # Define the size of each layer in our network n_input = features.shape[1] # Number of input units, must match number of input features n_hidden = 2 # Number of hidden units n_output = 1 # Number of output units # Weights for inputs to hidden layer W1 = torch.randn(n_input, n_hidden) # Weights for hidden layer to output layer W2 = torch.randn(n_hidden, n_output) # and bias terms for hidden and output layers B1 = torch.randn((1, n_hidden)) B2 = torch.randn((1, n_output)) ``` > **Exercise:** Calculate the output for this multi-layer network using the weights `W1` & `W2`, and the biases, `B1` & `B2`. ``` ## Your solution here h = activation(torch.mm(features, W1) + B1) output = activation(torch.mm(h, W2) + B2) output ``` If you did this correctly, you should see the output `tensor([[ 0.3171]])`. The number of hidden units a parameter of the network, often called a **hyperparameter** to differentiate it from the weights and biases parameters. As you'll see later when we discuss training a neural network, the more hidden units a network has, and the more layers, the better able it is to learn from data and make accurate predictions. ## Numpy to Torch and back Special bonus section! PyTorch has a great feature for converting between Numpy arrays and Torch tensors. To create a tensor from a Numpy array, use `torch.from_numpy()`. To convert a tensor to a Numpy array, use the `.numpy()` method. ``` import numpy as np a = np.random.rand(4,3) a b = torch.from_numpy(a) b b.numpy() ``` The memory is shared between the Numpy array and Torch tensor, so if you change the values in-place of one object, the other will change as well. ``` # Multiply PyTorch Tensor by 2, in place b.mul_(2) # Numpy array matches new values from Tensor a ```
github_jupyter
## **University of Toronto - CSC413 - Neural Networks and Deep Learning** ## **Programming Assignment 4 - StyleGAN2-Ada** This is a self-contained notebook that allows you to play around with a pre-trained StyleGAN2-Ada generator Disclaimer: Some codes were borrowed from StyleGAN official documentation on Github https://github.com/NVlabs/stylegan Make sure to set your runtime to GPU Remember to save your progress periodically! ``` # Run this for Google CoLab (use TensorFlow 1.x) %tensorflow_version 1.x # clone StyleGAN2 Ada !git clone https://github.com/NVlabs/stylegan2-ada.git #setup some environments (Do not change any of the following) import sys import pickle import os import numpy as np from IPython.display import Image import PIL.Image from PIL import Image import matplotlib.pyplot as plt sys.path.insert(0, "/content/stylegan2-ada") #do not remove this line import dnnlib import dnnlib.tflib as tflib import IPython.display from google.colab import files ``` Next, we will load a pre-trained StyleGan2-ada network. Each of the following pre-trained network is specialized to generate one type of image. ``` # The pre-trained networks are stored as standard pickle files # Uncomment one of the following URL to begin # If you wish, you can also find other pre-trained networks online #URL = "https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/ffhq.pkl" # Human faces #URL = "https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/cifar10.pkl" # CIFAR10, these images are a bit too tiny for our experiment #URL = "https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/afhqwild.pkl" # wild animal pictures #URL = "https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metfaces.pkl" # European portrait paintings #URL = "https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/afhqcat.pkl" # cats #URL = "https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/afhqdog.pkl" # dogs tflib.init_tf() #this creates a default Tensorflow session # we are now going to load the StyleGAN2-Ada model # The following code downloads the file and unpickles it to yield 3 instances of dnnlib.tflib.Network. with dnnlib.util.open_url(URL) as fp: _G, _D, Gs = pickle.load(fp) # Here is a brief description of _G, _D, Gs, for details see the official StyleGAN documentation # _G = Instantaneous snapshot of the generator. Mainly useful for resuming a previous training run. # _D = Instantaneous snapshot of the discriminator. Mainly useful for resuming a previous training run. # Gs = Long-term average of the generator. Yields higher-quality results than the instantaneous snapshot. # We will work with Gs ``` ## Part 1 Sampling and Identifying Fakes Open: https://github.com/NVlabs/stylegan and follow the instructions starting from *There are three ways to use the pre-trained generator....* Complete generate_latent_code and generate_images function in the Colab notebook to generate a small row of $3 - 5$ images. You do not need to include these images into your PDF submission. If you wish, you can try to use https://www.whichfaceisreal.com/learn.html as a guideline to spot any imperfections that you detect in these images, e.g., ``blob artifact" and make a short remark for your attached images. ``` # Sample a batch of latent codes {z_1, ...., z_B}, B is your batch size. def generate_latent_code(SEED, BATCH, LATENT_DIMENSION = 512): """ This function returns a sample a batch of 512 dimensional random latent code - SEED: int - BATCH: int that specifies the number of latent codes, Recommended batch_size is 3 - 6 - LATENT_DIMENSION is by default 512 (see Karras et al.) You should use np.random.RandomState to construct a random number generator, say rnd Then use rnd.randn along with your BATCH and LATENT_DIMENSION to generate your latent codes. This samples a batch of latent codes from a normal distribution https://numpy.org/doc/stable/reference/random/generated/numpy.random.RandomState.randn.html Return latent_codes, which is a 2D array with dimensions BATCH times LATENT_DIMENSION """ ################################################################################ ########################## COMPLETE THE FOLLOWING ############################## ################################################################################ latent_codes = ... ################################################################################ return latent_codes # Sample images from your latent codes https://github.com/NVlabs/stylegan # You can use their default settings ################################################################################ ########################## COMPLETE THE FOLLOWING ############################## ################################################################################ def generate_images(SEED, BATCH, TRUNCATION = 0.7): """ This function generates a batch of images from latent codes. - SEED: int - BATCH: int that specifies the number of latent codes to be generated - TRUNCATION: float between [-1, 1] that decides the amount of clipping to apply to the latent code distribution recommended setting is 0.7 You will use Gs.run() to sample images. See https://github.com/NVlabs/stylegan for details You may use their default setting. """ # Sample a batch of latent code z using generate_latent_code function latent_codes = ... # Convert latent code into images by following https://github.com/NVlabs/stylegan fmt = dict(...) images = Gs.run(...) return PIL.Image.fromarray(np.concatenate(images, axis=1) , 'RGB') ################################################################################ # Generate your images generate_images(...) ``` ## **Part 2 Interpolation** Complete the interpolate_images function using linear interpolation between two latent codes, \begin{equation} z = r z_1 + (1-r) z_2, r \in [0, 1] \end{equation} and feeding this interpolation through the StyleGAN2-Ada generator Gs as done in generate_images. Include a small row of interpolation in your PDF submission as a screen shot if necessary to keep the file size small. ``` ################################################################################ ########################## COMPLETE THE FOLLOWING ############################## ################################################################################ def interpolate_images(SEED1, SEED2, INTERPOLATION, BATCH = 1, TRUNCATION = 0.7): """ - SEED1, SEED2: int, seed to use to generate the two latent codes - INTERPOLATION: int, the number of interpolation between the two images, recommended setting 6 - 10 - BATCH: int, the number of latent code to generate. In this experiment, it is 1. - TRUNCATION: float between [-1, 1] that decides the amount of clipping to apply to the latent code distribution recommended setting is 0.7 You will interpolate between two latent code that you generate using the above formula You can generate an interpolation variable using np.linspace https://numpy.org/doc/stable/reference/generated/numpy.linspace.html This function should return an interpolated image. Include a screenshot in your submission. """ latent_code_1 = ... latent_code_2 = ... images = Gs.run(...) return PIL.Image.fromarray(np.concatenate(images, axis=1) , 'RGB') ################################################################################ # Create an interpolation of your generated images interpolate_images(...) ``` After you have generated interpolated images, an interesting task would be to see how you can create a GIF. Feel free to explore a little bit more. ## **Part 3 Style Mixing and Fine Control** In the final part, you will reproduce the famous style mixing example from the original StyleGAN paper. ### Step 1. We will first learn how to generate from sub-networks of the StyleGAN generator. ``` # You will generate images from sub-networks of the StyleGAN generator # Similar to Gs, the sub-networks are represented as independent instances of dnnlib.tflib.Network # Complete the function by following \url{https://github.com/NVlabs/stylegan} # And Look up Gs.components.mapping, Gs.components.synthesism, Gs.get_var # Remember to use the truncation trick as described in the handout after you obtain src_dlatents from Gs.components.mapping.run def generate_from_subnetwork(src_seeds, LATENT_DIMENSION = 512): """ - src_seeds: a list of int, where each int is used to generate a latent code, e.g., [1,2,3] - LATENT_DIMENSION: by default 512 You will complete the code snippet in the Write Your Code Here block This generates several images from a sub-network of the genrator. To prevent mistakes, we have provided the variable names which corresponds to the ones in the StyleGAN documentation You should use their convention. """ # default arguments to Gs.components.synthesis.run, this is given to you. synthesis_kwargs = { 'output_transform': dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True), 'randomize_noise': False, 'minibatch_size': 4 } ############################################################################ ########################## WRITE YOUR CODE HERE ############################ ############################################################################ truncation = ... src_latents = ... src_dlatents = ... w_avg = ... src_dlatents = ... all_images = Gs.components.synthesis.run(...) ############################################################################ return PIL.Image.fromarray(np.concatenate(all_images, axis=1) , 'RGB') # generate several iamges from the sub-network generate_from_subnetwork(...) ``` ### Step 2. Initialize the col_seeds, row_seeds and col_styles and generate a grid of image. A recommended example for your experiment is as follows: * col_seeds = [1, 2, 3, 4, 5] * row_seeds = [6] * col_styles = [1, 2, 3, 4, 5] and * col_seeds = [1, 2, 3, 4, 5] * row_seeds = [6] * col_styles = [8, 9, 10, 11, 12] You will then incorporate your code from generate from sub_network into the cell below. Experiment with the col_styles variable. Explain what col_styles does, for instance, roughly describe what these numbers corresponds to. Create a simple experiment to backup your argument. Include **at maximum two** sets of images that illustrates the effect of changing col_styles and your explanation. Include them as screen shots to minimize the size of the file. Make reference to the original StyleGAN or the StyleGAN2 paper by Karras et al. as needed https://arxiv.org/pdf/1812.04948.pdf https://arxiv.org/pdf/1912.04958.pdf ``` ################################################################################ ####################COMPLETE THE NEXT THREE LINES############################### ################################################################################ col_seeds = ... row_seeds = ... col_styles = ... ################################################################################ src_seeds = list(set(row_seeds + col_seeds)) # default arguments to Gs.components.synthesis.run, do not change synthesis_kwargs = { 'output_transform': dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True), 'randomize_noise': False, 'minibatch_size': 4 } ################################################################################ ########################## COMPLETE THE FOLLOWING ############################## ################################################################################ # Copy the #### WRITE YOUR CODE HERE #### portion from generate_from_subnetwork() all_images = Gs.components.synthesis.run(...) ################################################################################ # (Do not change) image_dict = {(seed, seed): image for seed, image in zip(src_seeds, list(all_images))} w_dict = {seed: w for seed, w in zip(src_seeds, list(src_dlatents))} # Generating Images (Do not Change) for row_seed in row_seeds: for col_seed in col_seeds: w = w_dict[row_seed].copy() w[col_styles] = w_dict[col_seed][col_styles] image = Gs.components.synthesis.run(w[np.newaxis], **synthesis_kwargs)[0] image_dict[(row_seed, col_seed)] = image # Create an Image Grid (Do not Change) def create_grid_images(): _N, _C, H, W = Gs.output_shape canvas = PIL.Image.new('RGB', (W * (len(col_seeds) + 1), H * (len(row_seeds) + 1)), 'black') for row_idx, row_seed in enumerate([None] + row_seeds): for col_idx, col_seed in enumerate([None] + col_seeds): if row_seed is None and col_seed is None: continue key = (row_seed, col_seed) if row_seed is None: key = (col_seed, col_seed) if col_seed is None: key = (row_seed, row_seed) canvas.paste(PIL.Image.fromarray(image_dict[key], 'RGB'), (W * col_idx, H * row_idx)) return canvas # The following code will create your image, save it as a png, and display the image # Run the following code after you have set your row_seed, col_seed and col_style image_grid = create_grid_images() image_grid.save('image_grid.png') im = Image.open("image_grid.png") im ```
github_jupyter
``` #all_slow ``` # Tutorial - Migrating from Lightning > Incrementally adding fastai goodness to your Lightning training We're going to use the MNIST training code from Lightning's 'Quick Start' (as at August 2020), converted to a module. See `migrating_lightning.py` for the Lightning code we are importing here. ``` from migrating_lightning import * from fastai2.vision.all import * ``` ## Using fastai's training loop We can use the Lightning module directly: ``` model = LitModel() ``` To use it in fastai, we first pull the DataLoaders from the module into a `DataLoaders` object: ``` data = DataLoaders(model.train_dataloader(), model.val_dataloader()).cuda() ``` We can now create a `Learner` and fit: ``` learn = Learner(data, model, loss_func=F.cross_entropy, opt_func=Adam, metrics=accuracy) learn.fit_one_cycle(1, 0.001) ``` As you can see, migrating from Ignite allowed us to reduce the amount of code, and doesn't require you to change any of your existing data pipelines, optimizers, loss functions, models, etc. Once you've made this change, you can then benefit from fastai's rich set of callbacks, transforms, visualizations, and so forth. For instance, in the Lightning example, Tensorboard support was defined a special-case "logger". In fastai, Tensorboard is just another `Callback` that you can add, with the parameter `cbs=Tensorboard`, when you create your `Learner`. The callbacks all work together, so you can add an remove any schedulers, loggers, visualizers, and so forth. You don't have to learn about special types of functionality for each - they are all just plain callbacks. Note that fastai is very different from Lightning, in that it is much more than just a training loop (although we're only using the training loop in this example) - it is a complete framework including GPU-accelerated transformations, end-to-end inference, integrated applications for vision, text, tabular, and collaborative filtering, and so forth. You can use any part of the framework on its own, or combine them together, as described in the [fastai paper](https://arxiv.org/abs/2002.04688). ### Taking advantage of fastai Data Blocks One problem in the Lightning example is that it doesn't actually use a validation set - it's just using the training set a second time as a validation set. You might prefer to use fastai's Data Block API, which makes it really easy to create, visualize, and test your input data processing. Here's how you can create input data for MNIST, for instance: ``` mnist = DataBlock(blocks=(ImageBlock(cls=PILImageBW), CategoryBlock), get_items=get_image_files, splitter=GrandparentSplitter(), get_y=parent_label) ``` Here, we're telling `DataBlock` that we have a B&W image input, and a category output, our input items are file names of images, the images are labeled based on the name of the parent folder, and they are split by training vs validation based on the grandparent folder name. It's important to actually look at your data, so fastai also makes it easy to visualize your inputs and outputs, for instance: ``` dls = mnist.dataloaders(untar_data(URLs.MNIST_TINY)) dls.show_batch(max_n=9, figsize=(4,4)) ```
github_jupyter
``` # Import Required Libraries try: import tensorflow as tf import os import random import numpy as np from tqdm import tqdm from skimage.io import imread, imshow from skimage.transform import resize import matplotlib.pyplot as plt from tensorflow.keras.models import load_model from keras.models import model_from_json print("----Libraries Imported----") except: print("----Libraries Not Imported----") # checking the content of the current directory os.listdir() # Setting up path seed = 42 np.random.seed = seed IMG_WIDTH = 128 IMG_HEIGHT = 128 IMG_CHANNELS = 3 TRAIN_PATH = 'E:/Projects 6th SEM/Orange-Fruit-Recognition-Using-Image-Segmentation/Image Segmentaion/train_data/' TEST_PATH = 'E:/Projects 6th SEM/Orange-Fruit-Recognition-Using-Image-Segmentation/Image Segmentaion/test_data/' train_ids = next(os.walk(TRAIN_PATH))[1] test_ids = next(os.walk(TEST_PATH))[1] print(train_ids) print(test_ids) # Loading data # independent variable X_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8) # dependent variable (what we are trying to predict) Y_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool) print('Resizing training images and masks') for n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)): path = TRAIN_PATH + id_ img = imread(path + '/images/' + id_ + '.jpg')[:,:,:IMG_CHANNELS] img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True) X_train[n] = img #Fill empty X_train with values from img mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool) for mask_file in next(os.walk(path + '/masks/'))[2]: mask_ = imread(path + '/masks/' + mask_file) mask_ = np.expand_dims(resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True), axis=-1) mask = np.maximum(mask, mask_) Y_train[n] = mask # test images X_test = np.zeros((len(test_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8) sizes_test = [] print('Resizing test images') for n, id_ in tqdm(enumerate(test_ids), total=len(test_ids)): path = TEST_PATH + id_ img = imread(path + '/images/' + id_ + '.jpg')[:,:,:IMG_CHANNELS] sizes_test.append([img.shape[0], img.shape[1]]) img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True) X_test[n] = img print('Done!') # Showing Random images from the dataset image_x = random.randint(0, len(train_ids)) imshow(X_train[image_x]) plt.show() imshow(np.squeeze(Y_train[image_x])) plt.show() from UNet_Model import Segmentation_model model = Segmentation_model() model.summary() ################################ #Modelcheckpoint with tf.device('/GPU:0'): results = model.fit(X_train, Y_train, validation_split=0.1, batch_size=4, epochs=100) print('Training DONE') # Plotting Training Results plt.plot(results.history['accuracy'][0:150]) plt.plot(results.history['val_accuracy'][0:150]) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['training_accuracy', 'validation_accuracy']) plt.show() plt.plot(results.history['loss'][0:150]) plt.plot(results.history['val_loss'][0:150]) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['training_loss', 'validation_loss']) plt.show() # Saving model orange_model_json = model.to_json() with open("Segmentation_model.json", "w") as json_file: json_file.write(orange_model_json) model.save_weights("Orange_Fruit_Weights_segmentation.h5") # Loading Unet segmentation_model = model_from_json(open("Segmentation_model.json", "r").read()) segmentation_model.load_weights('Orange_Fruit_Weights_segmentation.h5') #################################### idx = random.randint(0, len(X_train)) print(idx) preds_train = segmentation_model.predict(X_train[:int(X_train.shape[0]*0.9)], verbose=1) preds_val = segmentation_model.predict(X_train[int(X_train.shape[0]*0.9):], verbose=1) preds_test = segmentation_model.predict(X_test, verbose=1) preds_train_t = (preds_train > 0.5).astype(np.uint8) preds_val_t = (preds_val > 0.5).astype(np.uint8) preds_test_t = (preds_test > 0.5).astype(np.uint8) # Perform a sanity check on some random training samples ix = random.randint(0, len(preds_train_t)) imshow(X_train[ix]) plt.show() imshow(np.squeeze(Y_train[ix])) plt.show() imshow(np.squeeze(preds_train_t[ix])) plt.show() # Perform a sanity check on some random validation samples ix = random.randint(0, len(preds_val_t)) imshow(X_train[int(X_train.shape[0]*0.9):][ix]) plt.show() imshow(np.squeeze(Y_train[int(Y_train.shape[0]*0.9):][ix])) plt.show() imshow(np.squeeze(preds_val_t[ix])) plt.show() # Loading Classification Model import Prediction_file as pf classification_model = pf.Loading_Model() # Prediction path1 = 'Images/kiwi.jpg' path2 = 'Images/Orange.jpg' pred1 = pf.predicting(path1,classification_model) pred2 = pf.predicting(path2,classification_model) from tensorflow.keras.preprocessing.image import load_img, img_to_array def process_image(path): img = load_img(path, target_size = (IMG_WIDTH,IMG_HEIGHT)) img_tensor = img_to_array(img) img_tensor = np.expand_dims(img_tensor, axis = 0) img_tensor/=255.0 return img_tensor if pred2 > 0.5: p = segmentation_model.predict(process_image(path2), verbose=1) p_t = (p > 0.5).astype(np.uint8) imshow(np.squeeze(p_t)) plt.show() p = segmentation_model.predict(process_image(path1), verbose=1) p_t = (p > 0.5).astype(np.uint8) imshow(np.squeeze(p_t)) plt.show() ```
github_jupyter
# Autonomous driving - Car detection Welcome to your week 3 programming assignment. You will learn about object detection using the very powerful YOLO model. Many of the ideas in this notebook are described in the two YOLO papers: [Redmon et al., 2016](https://arxiv.org/abs/1506.02640) and [Redmon and Farhadi, 2016](https://arxiv.org/abs/1612.08242). **You will learn to**: - Use object detection on a car detection dataset - Deal with bounding boxes ## <font color='darkblue'>Updates</font> #### If you were working on the notebook before this update... * The current notebook is version "3a". * You can find your original work saved in the notebook with the previous version name ("v3") * To view the file directory, go to the menu "File->Open", and this will open a new tab that shows the file directory. #### List of updates * Clarified "YOLO" instructions preceding the code. * Added details about anchor boxes. * Added explanation of how score is calculated. * `yolo_filter_boxes`: added additional hints. Clarify syntax for argmax and max. * `iou`: clarify instructions for finding the intersection. * `iou`: give variable names for all 8 box vertices, for clarity. Adds `width` and `height` variables for clarity. * `iou`: add test cases to check handling of non-intersecting boxes, intersection at vertices, or intersection at edges. * `yolo_non_max_suppression`: clarify syntax for tf.image.non_max_suppression and keras.gather. * "convert output of the model to usable bounding box tensors": Provides a link to the definition of `yolo_head`. * `predict`: hint on calling sess.run. * Spelling, grammar, wording and formatting updates to improve clarity. ## Import libraries Run the following cell to load the packages and dependencies that you will find useful as you build the object detector! ``` import argparse import os import matplotlib.pyplot as plt from matplotlib.pyplot import imshow import scipy.io import scipy.misc import numpy as np import pandas as pd import PIL import tensorflow as tf from keras import backend as K from keras.layers import Input, Lambda, Conv2D from keras.models import load_model, Model from yolo_utils import read_classes, read_anchors, generate_colors, preprocess_image, draw_boxes, scale_boxes from yad2k.models.keras_yolo import yolo_head, yolo_boxes_to_corners, preprocess_true_boxes, yolo_loss, yolo_body %matplotlib inline ``` **Important Note**: As you can see, we import Keras's backend as K. This means that to use a Keras function in this notebook, you will need to write: `K.function(...)`. ## 1 - Problem Statement You are working on a self-driving car. As a critical component of this project, you'd like to first build a car detection system. To collect data, you've mounted a camera to the hood (meaning the front) of the car, which takes pictures of the road ahead every few seconds while you drive around. <center> <video width="400" height="200" src="nb_images/road_video_compressed2.mp4" type="video/mp4" controls> </video> </center> <caption><center> Pictures taken from a car-mounted camera while driving around Silicon Valley. <br> We thank [drive.ai](htps://www.drive.ai/) for providing this dataset. </center></caption> You've gathered all these images into a folder and have labelled them by drawing bounding boxes around every car you found. Here's an example of what your bounding boxes look like. <img src="nb_images/box_label.png" style="width:500px;height:250;"> <caption><center> <u> **Figure 1** </u>: **Definition of a box**<br> </center></caption> If you have 80 classes that you want the object detector to recognize, you can represent the class label $c$ either as an integer from 1 to 80, or as an 80-dimensional vector (with 80 numbers) one component of which is 1 and the rest of which are 0. The video lectures had used the latter representation; in this notebook, we will use both representations, depending on which is more convenient for a particular step. In this exercise, you will learn how "You Only Look Once" (YOLO) performs object detection, and then apply it to car detection. Because the YOLO model is very computationally expensive to train, we will load pre-trained weights for you to use. ## 2 - YOLO "You Only Look Once" (YOLO) is a popular algorithm because it achieves high accuracy while also being able to run in real-time. This algorithm "only looks once" at the image in the sense that it requires only one forward propagation pass through the network to make predictions. After non-max suppression, it then outputs recognized objects together with the bounding boxes. ### 2.1 - Model details #### Inputs and outputs - The **input** is a batch of images, and each image has the shape (m, 608, 608, 3) - The **output** is a list of bounding boxes along with the recognized classes. Each bounding box is represented by 6 numbers $(p_c, b_x, b_y, b_h, b_w, c)$ as explained above. If you expand $c$ into an 80-dimensional vector, each bounding box is then represented by 85 numbers. #### Anchor Boxes * Anchor boxes are chosen by exploring the training data to choose reasonable height/width ratios that represent the different classes. For this assignment, 5 anchor boxes were chosen for you (to cover the 80 classes), and stored in the file './model_data/yolo_anchors.txt' * The dimension for anchor boxes is the second to last dimension in the encoding: $(m, n_H,n_W,anchors,classes)$. * The YOLO architecture is: IMAGE (m, 608, 608, 3) -> DEEP CNN -> ENCODING (m, 19, 19, 5, 85). #### Encoding Let's look in greater detail at what this encoding represents. <img src="nb_images/architecture.png" style="width:700px;height:400;"> <caption><center> <u> **Figure 2** </u>: **Encoding architecture for YOLO**<br> </center></caption> If the center/midpoint of an object falls into a grid cell, that grid cell is responsible for detecting that object. Since we are using 5 anchor boxes, each of the 19 x19 cells thus encodes information about 5 boxes. Anchor boxes are defined only by their width and height. For simplicity, we will flatten the last two last dimensions of the shape (19, 19, 5, 85) encoding. So the output of the Deep CNN is (19, 19, 425). <img src="nb_images/flatten.png" style="width:700px;height:400;"> <caption><center> <u> **Figure 3** </u>: **Flattening the last two last dimensions**<br> </center></caption> #### Class score Now, for each box (of each cell) we will compute the following element-wise product and extract a probability that the box contains a certain class. The class score is $score_{c,i} = p_{c} \times c_{i}$: the probability that there is an object $p_{c}$ times the probability that the object is a certain class $c_{i}$. <img src="nb_images/probability_extraction.png" style="width:700px;height:400;"> <caption><center> <u> **Figure 4** </u>: **Find the class detected by each box**<br> </center></caption> ##### Example of figure 4 * In figure 4, let's say for box 1 (cell 1), the probability that an object exists is $p_{1}=0.60$. So there's a 60% chance that an object exists in box 1 (cell 1). * The probability that the object is the class "category 3 (a car)" is $c_{3}=0.73$. * The score for box 1 and for category "3" is $score_{1,3}=0.60 \times 0.73 = 0.44$. * Let's say we calculate the score for all 80 classes in box 1, and find that the score for the car class (class 3) is the maximum. So we'll assign the score 0.44 and class "3" to this box "1". #### Visualizing classes Here's one way to visualize what YOLO is predicting on an image: - For each of the 19x19 grid cells, find the maximum of the probability scores (taking a max across the 80 classes, one maximum for each of the 5 anchor boxes). - Color that grid cell according to what object that grid cell considers the most likely. Doing this results in this picture: <img src="nb_images/proba_map.png" style="width:300px;height:300;"> <caption><center> <u> **Figure 5** </u>: Each one of the 19x19 grid cells is colored according to which class has the largest predicted probability in that cell.<br> </center></caption> Note that this visualization isn't a core part of the YOLO algorithm itself for making predictions; it's just a nice way of visualizing an intermediate result of the algorithm. #### Visualizing bounding boxes Another way to visualize YOLO's output is to plot the bounding boxes that it outputs. Doing that results in a visualization like this: <img src="nb_images/anchor_map.png" style="width:200px;height:200;"> <caption><center> <u> **Figure 6** </u>: Each cell gives you 5 boxes. In total, the model predicts: 19x19x5 = 1805 boxes just by looking once at the image (one forward pass through the network)! Different colors denote different classes. <br> </center></caption> #### Non-Max suppression In the figure above, we plotted only boxes for which the model had assigned a high probability, but this is still too many boxes. You'd like to reduce the algorithm's output to a much smaller number of detected objects. To do so, you'll use **non-max suppression**. Specifically, you'll carry out these steps: - Get rid of boxes with a low score (meaning, the box is not very confident about detecting a class; either due to the low probability of any object, or low probability of this particular class). - Select only one box when several boxes overlap with each other and detect the same object. ### 2.2 - Filtering with a threshold on class scores You are going to first apply a filter by thresholding. You would like to get rid of any box for which the class "score" is less than a chosen threshold. The model gives you a total of 19x19x5x85 numbers, with each box described by 85 numbers. It is convenient to rearrange the (19,19,5,85) (or (19,19,425)) dimensional tensor into the following variables: - `box_confidence`: tensor of shape $(19 \times 19, 5, 1)$ containing $p_c$ (confidence probability that there's some object) for each of the 5 boxes predicted in each of the 19x19 cells. - `boxes`: tensor of shape $(19 \times 19, 5, 4)$ containing the midpoint and dimensions $(b_x, b_y, b_h, b_w)$ for each of the 5 boxes in each cell. - `box_class_probs`: tensor of shape $(19 \times 19, 5, 80)$ containing the "class probabilities" $(c_1, c_2, ... c_{80})$ for each of the 80 classes for each of the 5 boxes per cell. #### **Exercise**: Implement `yolo_filter_boxes()`. 1. Compute box scores by doing the elementwise product as described in Figure 4 ($p \times c$). The following code may help you choose the right operator: ```python a = np.random.randn(19*19, 5, 1) b = np.random.randn(19*19, 5, 80) c = a * b # shape of c will be (19*19, 5, 80) ``` This is an example of **broadcasting** (multiplying vectors of different sizes). 2. For each box, find: - the index of the class with the maximum box score - the corresponding box score **Useful references** * [Keras argmax](https://keras.io/backend/#argmax) * [Keras max](https://keras.io/backend/#max) **Additional Hints** * For the `axis` parameter of `argmax` and `max`, if you want to select the **last** axis, one way to do so is to set `axis=-1`. This is similar to Python array indexing, where you can select the last position of an array using `arrayname[-1]`. * Applying `max` normally collapses the axis for which the maximum is applied. `keepdims=False` is the default option, and allows that dimension to be removed. We don't need to keep the last dimension after applying the maximum here. * Even though the documentation shows `keras.backend.argmax`, use `keras.argmax`. Similarly, use `keras.max`. 3. Create a mask by using a threshold. As a reminder: `([0.9, 0.3, 0.4, 0.5, 0.1] < 0.4)` returns: `[False, True, False, False, True]`. The mask should be True for the boxes you want to keep. 4. Use TensorFlow to apply the mask to `box_class_scores`, `boxes` and `box_classes` to filter out the boxes we don't want. You should be left with just the subset of boxes you want to keep. **Useful reference**: * [boolean mask](https://www.tensorflow.org/api_docs/python/tf/boolean_mask) **Additional Hints**: * For the `tf.boolean_mask`, we can keep the default `axis=None`. **Reminder**: to call a Keras function, you should use `K.function(...)`. ``` # GRADED FUNCTION: yolo_filter_boxes def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = .6): """Filters YOLO boxes by thresholding on object and class confidence. Arguments: box_confidence -- tensor of shape (19, 19, 5, 1) boxes -- tensor of shape (19, 19, 5, 4) box_class_probs -- tensor of shape (19, 19, 5, 80) threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box Returns: scores -- tensor of shape (None,), containing the class probability score for selected boxes boxes -- tensor of shape (None, 4), containing (b_x, b_y, b_h, b_w) coordinates of selected boxes classes -- tensor of shape (None,), containing the index of the class detected by the selected boxes Note: "None" is here because you don't know the exact number of selected boxes, as it depends on the threshold. For example, the actual output size of scores would be (10,) if there are 10 boxes. """ # Step 1: Compute box scores ### START CODE HERE ### (≈ 1 line) box_scores = np.multiply(box_confidence, box_class_probs) ### END CODE HERE ### # Step 2: Find the box_classes using the max box_scores, keep track of the corresponding score ### START CODE HERE ### (≈ 2 lines) box_classes = K.argmax(box_scores, axis = -1) box_class_scores = K.max(box_scores, axis = -1) ### END CODE HERE ### # Step 3: Create a filtering mask based on "box_class_scores" by using "threshold". The mask should have the # same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold) ### START CODE HERE ### (≈ 1 line) filtering_mask = box_class_scores >= threshold ### END CODE HERE ### # Step 4: Apply the mask to box_class_scores, boxes and box_classes ### START CODE HERE ### (≈ 3 lines) scores = tf.boolean_mask(box_class_scores, filtering_mask) boxes = tf.boolean_mask(boxes, filtering_mask) classes = tf.boolean_mask(box_classes, filtering_mask) ### END CODE HERE ### return scores, boxes, classes with tf.Session() as test_a: box_confidence = tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1) boxes = tf.random_normal([19, 19, 5, 4], mean=1, stddev=4, seed = 1) box_class_probs = tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1) scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = 0.5) print("scores[2] = " + str(scores[2].eval())) print("boxes[2] = " + str(boxes[2].eval())) print("classes[2] = " + str(classes[2].eval())) print("scores.shape = " + str(scores.shape)) print("boxes.shape = " + str(boxes.shape)) print("classes.shape = " + str(classes.shape)) ``` **Expected Output**: <table> <tr> <td> **scores[2]** </td> <td> 10.7506 </td> </tr> <tr> <td> **boxes[2]** </td> <td> [ 8.42653275 3.27136683 -0.5313437 -4.94137383] </td> </tr> <tr> <td> **classes[2]** </td> <td> 7 </td> </tr> <tr> <td> **scores.shape** </td> <td> (?,) </td> </tr> <tr> <td> **boxes.shape** </td> <td> (?, 4) </td> </tr> <tr> <td> **classes.shape** </td> <td> (?,) </td> </tr> </table> **Note** In the test for `yolo_filter_boxes`, we're using random numbers to test the function. In real data, the `box_class_probs` would contain non-zero values between 0 and 1 for the probabilities. The box coordinates in `boxes` would also be chosen so that lengths and heights are non-negative. ### 2.3 - Non-max suppression ### Even after filtering by thresholding over the class scores, you still end up with a lot of overlapping boxes. A second filter for selecting the right boxes is called non-maximum suppression (NMS). <img src="nb_images/non-max-suppression.png" style="width:500px;height:400;"> <caption><center> <u> **Figure 7** </u>: In this example, the model has predicted 3 cars, but it's actually 3 predictions of the same car. Running non-max suppression (NMS) will select only the most accurate (highest probability) of the 3 boxes. <br> </center></caption> Non-max suppression uses the very important function called **"Intersection over Union"**, or IoU. <img src="nb_images/iou.png" style="width:500px;height:400;"> <caption><center> <u> **Figure 8** </u>: Definition of "Intersection over Union". <br> </center></caption> #### **Exercise**: Implement iou(). Some hints: - In this code, we use the convention that (0,0) is the top-left corner of an image, (1,0) is the upper-right corner, and (1,1) is the lower-right corner. In other words, the (0,0) origin starts at the top left corner of the image. As x increases, we move to the right. As y increases, we move down. - For this exercise, we define a box using its two corners: upper left $(x_1, y_1)$ and lower right $(x_2,y_2)$, instead of using the midpoint, height and width. (This makes it a bit easier to calculate the intersection). - To calculate the area of a rectangle, multiply its height $(y_2 - y_1)$ by its width $(x_2 - x_1)$. (Since $(x_1,y_1)$ is the top left and $x_2,y_2$ are the bottom right, these differences should be non-negative. - To find the **intersection** of the two boxes $(xi_{1}, yi_{1}, xi_{2}, yi_{2})$: - Feel free to draw some examples on paper to clarify this conceptually. - The top left corner of the intersection $(xi_{1}, yi_{1})$ is found by comparing the top left corners $(x_1, y_1)$ of the two boxes and finding a vertex that has an x-coordinate that is closer to the right, and y-coordinate that is closer to the bottom. - The bottom right corner of the intersection $(xi_{2}, yi_{2})$ is found by comparing the bottom right corners $(x_2,y_2)$ of the two boxes and finding a vertex whose x-coordinate is closer to the left, and the y-coordinate that is closer to the top. - The two boxes **may have no intersection**. You can detect this if the intersection coordinates you calculate end up being the top right and/or bottom left corners of an intersection box. Another way to think of this is if you calculate the height $(y_2 - y_1)$ or width $(x_2 - x_1)$ and find that at least one of these lengths is negative, then there is no intersection (intersection area is zero). - The two boxes may intersect at the **edges or vertices**, in which case the intersection area is still zero. This happens when either the height or width (or both) of the calculated intersection is zero. **Additional Hints** - `xi1` = **max**imum of the x1 coordinates of the two boxes - `yi1` = **max**imum of the y1 coordinates of the two boxes - `xi2` = **min**imum of the x2 coordinates of the two boxes - `yi2` = **min**imum of the y2 coordinates of the two boxes - `inter_area` = You can use `max(height, 0)` and `max(width, 0)` ``` # GRADED FUNCTION: iou def iou(box1, box2): """Implement the intersection over union (IoU) between box1 and box2      Arguments: box1 -- first box, list object with coordinates (box1_x1, box1_y1, box1_x2, box_1_y2)     box2 -- second box, list object with coordinates (box2_x1, box2_y1, box2_x2, box2_y2)     """ # Assign variable names to coordinates for clarity (box1_x1, box1_y1, box1_x2, box1_y2) = box1 (box2_x1, box2_y1, box2_x2, box2_y2) = box2 # Calculate the (yi1, xi1, yi2, xi2) coordinates of the intersection of box1 and box2. Calculate its Area. ### START CODE HERE ### (≈ 7 lines) xi1 = box1_x1 if box1_x1 > box2_x1 else box2_x1 yi1 = box1_y1 if box1_y1 > box2_y1 else box2_y1 xi2 = box1_x2 if box1_x2 < box2_x2 else box2_x2 yi2 = box1_y2 if box1_y2 < box2_y2 else box2_y2 inter_width = xi2 - xi1 inter_height = yi2 - yi1 inter_area = 0 if (inter_width < 0 or inter_height < 0) else inter_width * inter_height ### END CODE HERE ###     # Calculate the Union area by using Formula: Union(A,B) = A + B - Inter(A,B) ### START CODE HERE ### (≈ 3 lines) box1_area = (box1_x2 - box1_x1) * (box1_y2 - box1_y1) box2_area = (box2_x2 - box2_x1) * (box2_y2 - box2_y1) union_area = (box1_area + box2_area) - inter_area ### END CODE HERE ### # compute the IoU ### START CODE HERE ### (≈ 1 line) iou = inter_area / union_area ### END CODE HERE ### return iou ## Test case 1: boxes intersect box1 = (2, 1, 4, 3) box2 = (1, 2, 3, 4) print("iou for intersecting boxes = " + str(iou(box1, box2))) ## Test case 2: boxes do not intersect box1 = (1,2,3,4) box2 = (5,6,7,8) print("iou for non-intersecting boxes = " + str(iou(box1,box2))) ## Test case 3: boxes intersect at vertices only box1 = (1,1,2,2) box2 = (2,2,3,3) print("iou for boxes that only touch at vertices = " + str(iou(box1,box2))) ## Test case 4: boxes intersect at edge only box1 = (1,1,3,3) box2 = (2,3,3,4) print("iou for boxes that only touch at edges = " + str(iou(box1,box2))) ``` **Expected Output**: ``` iou for intersecting boxes = 0.14285714285714285 iou for non-intersecting boxes = 0.0 iou for boxes that only touch at vertices = 0.0 iou for boxes that only touch at edges = 0.0 ``` #### YOLO non-max suppression You are now ready to implement non-max suppression. The key steps are: 1. Select the box that has the highest score. 2. Compute the overlap of this box with all other boxes, and remove boxes that overlap significantly (iou >= `iou_threshold`). 3. Go back to step 1 and iterate until there are no more boxes with a lower score than the currently selected box. This will remove all boxes that have a large overlap with the selected boxes. Only the "best" boxes remain. **Exercise**: Implement yolo_non_max_suppression() using TensorFlow. TensorFlow has two built-in functions that are used to implement non-max suppression (so you don't actually need to use your `iou()` implementation): ** Reference documentation ** - [tf.image.non_max_suppression()](https://www.tensorflow.org/api_docs/python/tf/image/non_max_suppression) ``` tf.image.non_max_suppression( boxes, scores, max_output_size, iou_threshold=0.5, name=None ) ``` Note that in the version of tensorflow used here, there is no parameter `score_threshold` (it's shown in the documentation for the latest version) so trying to set this value will result in an error message: *got an unexpected keyword argument 'score_threshold.* - [K.gather()](https://www.tensorflow.org/api_docs/python/tf/keras/backend/gather) Even though the documentation shows `tf.keras.backend.gather()`, you can use `keras.gather()`. ``` keras.gather( reference, indices ) ``` ``` # GRADED FUNCTION: yolo_non_max_suppression def yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold = 0.5): """ Applies Non-max suppression (NMS) to set of boxes Arguments: scores -- tensor of shape (None,), output of yolo_filter_boxes() boxes -- tensor of shape (None, 4), output of yolo_filter_boxes() that have been scaled to the image size (see later) classes -- tensor of shape (None,), output of yolo_filter_boxes() max_boxes -- integer, maximum number of predicted boxes you'd like iou_threshold -- real value, "intersection over union" threshold used for NMS filtering Returns: scores -- tensor of shape (, None), predicted score for each box boxes -- tensor of shape (4, None), predicted box coordinates classes -- tensor of shape (, None), predicted class for each box Note: The "None" dimension of the output tensors has obviously to be less than max_boxes. Note also that this function will transpose the shapes of scores, boxes, classes. This is made for convenience. """ max_boxes_tensor = K.variable(max_boxes, dtype='int32') # tensor to be used in tf.image.non_max_suppression() K.get_session().run(tf.variables_initializer([max_boxes_tensor])) # initialize variable max_boxes_tensor # Use tf.image.non_max_suppression() to get the list of indices corresponding to boxes you keep ### START CODE HERE ### (≈ 1 line) nms_indices = tf.image.non_max_suppression(boxes, scores, max_boxes, iou_threshold) ### END CODE HERE ### # Use K.gather() to select only nms_indices from scores, boxes and classes ### START CODE HERE ### (≈ 3 lines) scores = K.gather(scores, nms_indices) boxes = K.gather(boxes, nms_indices) classes = K.gather(classes, nms_indices) ### END CODE HERE ### return scores, boxes, classes with tf.Session() as test_b: scores = tf.random_normal([54,], mean=1, stddev=4, seed = 1) boxes = tf.random_normal([54, 4], mean=1, stddev=4, seed = 1) classes = tf.random_normal([54,], mean=1, stddev=4, seed = 1) scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes) print("scores[2] = " + str(scores[2].eval())) print("boxes[2] = " + str(boxes[2].eval())) print("classes[2] = " + str(classes[2].eval())) print("scores.shape = " + str(scores.eval().shape)) print("boxes.shape = " + str(boxes.eval().shape)) print("classes.shape = " + str(classes.eval().shape)) ``` **Expected Output**: <table> <tr> <td> **scores[2]** </td> <td> 6.9384 </td> </tr> <tr> <td> **boxes[2]** </td> <td> [-5.299932 3.13798141 4.45036697 0.95942086] </td> </tr> <tr> <td> **classes[2]** </td> <td> -2.24527 </td> </tr> <tr> <td> **scores.shape** </td> <td> (10,) </td> </tr> <tr> <td> **boxes.shape** </td> <td> (10, 4) </td> </tr> <tr> <td> **classes.shape** </td> <td> (10,) </td> </tr> </table> ### 2.4 Wrapping up the filtering It's time to implement a function taking the output of the deep CNN (the 19x19x5x85 dimensional encoding) and filtering through all the boxes using the functions you've just implemented. **Exercise**: Implement `yolo_eval()` which takes the output of the YOLO encoding and filters the boxes using score threshold and NMS. There's just one last implementational detail you have to know. There're a few ways of representing boxes, such as via their corners or via their midpoint and height/width. YOLO converts between a few such formats at different times, using the following functions (which we have provided): ```python boxes = yolo_boxes_to_corners(box_xy, box_wh) ``` which converts the yolo box coordinates (x,y,w,h) to box corners' coordinates (x1, y1, x2, y2) to fit the input of `yolo_filter_boxes` ```python boxes = scale_boxes(boxes, image_shape) ``` YOLO's network was trained to run on 608x608 images. If you are testing this data on a different size image--for example, the car detection dataset had 720x1280 images--this step rescales the boxes so that they can be plotted on top of the original 720x1280 image. Don't worry about these two functions; we'll show you where they need to be called. ``` # GRADED FUNCTION: yolo_eval def yolo_eval(yolo_outputs, image_shape = (720., 1280.), max_boxes=10, score_threshold=.6, iou_threshold=.5): """ Converts the output of YOLO encoding (a lot of boxes) to your predicted boxes along with their scores, box coordinates and classes. Arguments: yolo_outputs -- output of the encoding model (for image_shape of (608, 608, 3)), contains 4 tensors: box_confidence: tensor of shape (None, 19, 19, 5, 1) box_xy: tensor of shape (None, 19, 19, 5, 2) box_wh: tensor of shape (None, 19, 19, 5, 2) box_class_probs: tensor of shape (None, 19, 19, 5, 80) image_shape -- tensor of shape (2,) containing the input shape, in this notebook we use (608., 608.) (has to be float32 dtype) max_boxes -- integer, maximum number of predicted boxes you'd like score_threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box iou_threshold -- real value, "intersection over union" threshold used for NMS filtering Returns: scores -- tensor of shape (None, ), predicted score for each box boxes -- tensor of shape (None, 4), predicted box coordinates classes -- tensor of shape (None,), predicted class for each box """ ### START CODE HERE ### # Retrieve outputs of the YOLO model (≈1 line) box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs # Convert boxes to be ready for filtering functions (convert boxes box_xy and box_wh to corner coordinates) boxes = yolo_boxes_to_corners(box_xy, box_wh) # Use one of the functions you've implemented to perform Score-filtering with a threshold of score_threshold (≈1 line) scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = score_threshold) # Scale boxes back to original image shape. boxes = scale_boxes(boxes, image_shape) # Use one of the functions you've implemented to perform Non-max suppression with # maximum number of boxes set to max_boxes and a threshold of iou_threshold (≈1 line) scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes) ### END CODE HERE ### return scores, boxes, classes with tf.Session() as test_b: yolo_outputs = (tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1), tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1), tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1), tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1)) scores, boxes, classes = yolo_eval(yolo_outputs) print("scores[2] = " + str(scores[2].eval())) print("boxes[2] = " + str(boxes[2].eval())) print("classes[2] = " + str(classes[2].eval())) print("scores.shape = " + str(scores.eval().shape)) print("boxes.shape = " + str(boxes.eval().shape)) print("classes.shape = " + str(classes.eval().shape)) ``` **Expected Output**: <table> <tr> <td> **scores[2]** </td> <td> 138.791 </td> </tr> <tr> <td> **boxes[2]** </td> <td> [ 1292.32971191 -278.52166748 3876.98925781 -835.56494141] </td> </tr> <tr> <td> **classes[2]** </td> <td> 54 </td> </tr> <tr> <td> **scores.shape** </td> <td> (10,) </td> </tr> <tr> <td> **boxes.shape** </td> <td> (10, 4) </td> </tr> <tr> <td> **classes.shape** </td> <td> (10,) </td> </tr> </table> ## Summary for YOLO: - Input image (608, 608, 3) - The input image goes through a CNN, resulting in a (19,19,5,85) dimensional output. - After flattening the last two dimensions, the output is a volume of shape (19, 19, 425): - Each cell in a 19x19 grid over the input image gives 425 numbers. - 425 = 5 x 85 because each cell contains predictions for 5 boxes, corresponding to 5 anchor boxes, as seen in lecture. - 85 = 5 + 80 where 5 is because $(p_c, b_x, b_y, b_h, b_w)$ has 5 numbers, and 80 is the number of classes we'd like to detect - You then select only few boxes based on: - Score-thresholding: throw away boxes that have detected a class with a score less than the threshold - Non-max suppression: Compute the Intersection over Union and avoid selecting overlapping boxes - This gives you YOLO's final output. ## 3 - Test YOLO pre-trained model on images In this part, you are going to use a pre-trained model and test it on the car detection dataset. We'll need a session to execute the computation graph and evaluate the tensors. ``` sess = K.get_session() ``` ### 3.1 - Defining classes, anchors and image shape. * Recall that we are trying to detect 80 classes, and are using 5 anchor boxes. * We have gathered the information on the 80 classes and 5 boxes in two files "coco_classes.txt" and "yolo_anchors.txt". * We'll read class names and anchors from text files. * The car detection dataset has 720x1280 images, which we've pre-processed into 608x608 images. ``` class_names = read_classes("model_data/coco_classes.txt") anchors = read_anchors("model_data/yolo_anchors.txt") image_shape = (720., 1280.) ``` ### 3.2 - Loading a pre-trained model * Training a YOLO model takes a very long time and requires a fairly large dataset of labelled bounding boxes for a large range of target classes. * You are going to load an existing pre-trained Keras YOLO model stored in "yolo.h5". * These weights come from the official YOLO website, and were converted using a function written by Allan Zelener. References are at the end of this notebook. Technically, these are the parameters from the "YOLOv2" model, but we will simply refer to it as "YOLO" in this notebook. Run the cell below to load the model from this file. ``` yolo_model = load_model("model_data/yolo.h5") ``` This loads the weights of a trained YOLO model. Here's a summary of the layers your model contains. ``` yolo_model.summary() ``` **Note**: On some computers, you may see a warning message from Keras. Don't worry about it if you do--it is fine. **Reminder**: this model converts a preprocessed batch of input images (shape: (m, 608, 608, 3)) into a tensor of shape (m, 19, 19, 5, 85) as explained in Figure (2). ### 3.3 - Convert output of the model to usable bounding box tensors The output of `yolo_model` is a (m, 19, 19, 5, 85) tensor that needs to pass through non-trivial processing and conversion. The following cell does that for you. If you are curious about how `yolo_head` is implemented, you can find the function definition in the file ['keras_yolo.py'](https://github.com/allanzelener/YAD2K/blob/master/yad2k/models/keras_yolo.py). The file is located in your workspace in this path 'yad2k/models/keras_yolo.py'. ``` yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names)) ``` You added `yolo_outputs` to your graph. This set of 4 tensors is ready to be used as input by your `yolo_eval` function. ### 3.4 - Filtering boxes `yolo_outputs` gave you all the predicted boxes of `yolo_model` in the correct format. You're now ready to perform filtering and select only the best boxes. Let's now call `yolo_eval`, which you had previously implemented, to do this. ``` scores, boxes, classes = yolo_eval(yolo_outputs, image_shape) ``` ### 3.5 - Run the graph on an image Let the fun begin. You have created a graph that can be summarized as follows: 1. <font color='purple'> yolo_model.input </font> is given to `yolo_model`. The model is used to compute the output <font color='purple'> yolo_model.output </font> 2. <font color='purple'> yolo_model.output </font> is processed by `yolo_head`. It gives you <font color='purple'> yolo_outputs </font> 3. <font color='purple'> yolo_outputs </font> goes through a filtering function, `yolo_eval`. It outputs your predictions: <font color='purple'> scores, boxes, classes </font> **Exercise**: Implement predict() which runs the graph to test YOLO on an image. You will need to run a TensorFlow session, to have it compute `scores, boxes, classes`. The code below also uses the following function: ```python image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608)) ``` which outputs: - image: a python (PIL) representation of your image used for drawing boxes. You won't need to use it. - image_data: a numpy-array representing the image. This will be the input to the CNN. **Important note**: when a model uses BatchNorm (as is the case in YOLO), you will need to pass an additional placeholder in the feed_dict {K.learning_phase(): 0}. #### Hint: Using the TensorFlow Session object * Recall that above, we called `K.get_Session()` and saved the Session object in `sess`. * To evaluate a list of tensors, we call `sess.run()` like this: ``` sess.run(fetches=[tensor1,tensor2,tensor3], feed_dict={yolo_model.input: the_input_variable, K.learning_phase():0 } ``` * Notice that the variables `scores, boxes, classes` are not passed into the `predict` function, but these are global variables that you will use within the `predict` function. ``` def predict(sess, image_file): """ Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the predictions. Arguments: sess -- your tensorflow/Keras session containing the YOLO graph image_file -- name of an image stored in the "images" folder. Returns: out_scores -- tensor of shape (None, ), scores of the predicted boxes out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes out_classes -- tensor of shape (None, ), class index of the predicted boxes Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes. """ # Preprocess your image image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608)) # Run the session with the correct tensors and choose the correct placeholders in the feed_dict. # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0}) ### START CODE HERE ### (≈ 1 line) out_scores, out_boxes, out_classes = sess.run(fetches = [scores, boxes, classes], feed_dict = {yolo_model.input: image_data, K.learning_phase(): 0}) ### END CODE HERE ### # Print predictions info print('Found {} boxes for {}'.format(len(out_boxes), image_file)) # Generate colors for drawing bounding boxes. colors = generate_colors(class_names) # Draw bounding boxes on the image file draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors) # Save the predicted bounding box on the image image.save(os.path.join("out", image_file), quality=90) # Display the results in the notebook output_image = scipy.misc.imread(os.path.join("out", image_file)) imshow(output_image) return out_scores, out_boxes, out_classes ``` Run the following cell on the "test.jpg" image to verify that your function is correct. ``` out_scores, out_boxes, out_classes = predict(sess, "test.jpg") ``` **Expected Output**: <table> <tr> <td> **Found 7 boxes for test.jpg** </td> </tr> <tr> <td> **car** </td> <td> 0.60 (925, 285) (1045, 374) </td> </tr> <tr> <td> **car** </td> <td> 0.66 (706, 279) (786, 350) </td> </tr> <tr> <td> **bus** </td> <td> 0.67 (5, 266) (220, 407) </td> </tr> <tr> <td> **car** </td> <td> 0.70 (947, 324) (1280, 705) </td> </tr> <tr> <td> **car** </td> <td> 0.74 (159, 303) (346, 440) </td> </tr> <tr> <td> **car** </td> <td> 0.80 (761, 282) (942, 412) </td> </tr> <tr> <td> **car** </td> <td> 0.89 (367, 300) (745, 648) </td> </tr> </table> The model you've just run is actually able to detect 80 different classes listed in "coco_classes.txt". To test the model on your own images: 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. 2. Add your image to this Jupyter Notebook's directory, in the "images" folder 3. Write your image's name in the cell above code 4. Run the code and see the output of the algorithm! If you were to run your session in a for loop over all your images. Here's what you would get: <center> <video width="400" height="200" src="nb_images/pred_video_compressed2.mp4" type="video/mp4" controls> </video> </center> <caption><center> Predictions of the YOLO model on pictures taken from a camera while driving around the Silicon Valley <br> Thanks [drive.ai](https://www.drive.ai/) for providing this dataset! </center></caption> ## <font color='darkblue'>What you should remember: - YOLO is a state-of-the-art object detection model that is fast and accurate - It runs an input image through a CNN which outputs a 19x19x5x85 dimensional volume. - The encoding can be seen as a grid where each of the 19x19 cells contains information about 5 boxes. - You filter through all the boxes using non-max suppression. Specifically: - Score thresholding on the probability of detecting a class to keep only accurate (high probability) boxes - Intersection over Union (IoU) thresholding to eliminate overlapping boxes - Because training a YOLO model from randomly initialized weights is non-trivial and requires a large dataset as well as lot of computation, we used previously trained model parameters in this exercise. If you wish, you can also try fine-tuning the YOLO model with your own dataset, though this would be a fairly non-trivial exercise. **References**: The ideas presented in this notebook came primarily from the two YOLO papers. The implementation here also took significant inspiration and used many components from Allan Zelener's GitHub repository. The pre-trained weights used in this exercise came from the official YOLO website. - Joseph Redmon, Santosh Divvala, Ross Girshick, Ali Farhadi - [You Only Look Once: Unified, Real-Time Object Detection](https://arxiv.org/abs/1506.02640) (2015) - Joseph Redmon, Ali Farhadi - [YOLO9000: Better, Faster, Stronger](https://arxiv.org/abs/1612.08242) (2016) - Allan Zelener - [YAD2K: Yet Another Darknet 2 Keras](https://github.com/allanzelener/YAD2K) - The official YOLO website (https://pjreddie.com/darknet/yolo/) **Car detection dataset**: <a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/88x31.png" /></a><br /><span xmlns:dct="http://purl.org/dc/terms/" property="dct:title">The Drive.ai Sample Dataset</span> (provided by drive.ai) is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</a>. We are grateful to Brody Huval, Chih Hu and Rahul Patel for providing this data.
github_jupyter
# Retail Demo Store Messaging Workshop - Amazon Pinpoint In this workshop we will use [Amazon Pinpoint](https://aws.amazon.com/pinpoint/) to add the ability to dynamically send personalized messages to the customers of the Retail Demo Store. We'll build out the following use-cases. - Send new users a welcome email after they sign up for a Retail Demo Store account - When users add items to their shopping cart but do not complete an order, send an email with a coupon code encouraging them to finish their order - Send users an email with product recommendations from the Amazon Personalize campaign we created in the Personalization workshop Recommended Time: 1 hour ## Prerequisites Since this module uses Amazon Personalize to generate and associate personalized product recommendations for users, it is assumed that you have either completed the [Personalization](../1-Personalization/1.1-Personalize.ipynb) workshop or those resources have been pre-provisioned in your AWS environment. If you are unsure and attending an AWS managed event such as a workshop, check with your event lead. ## Architecture Before diving into setting up Pinpoint to send personalize messages to our users, let's review the relevant parts of the Retail Demo Store architecture and how it uses Pinpoint to integrate with the machine learning campaigns created in Personalize. ![Retail Demo Store Pinpoint Architecture](images/retaildemostore-pinpoint-architecture.png) ### AWS Amplify & Amazon Pinpoint The Retail Demo Store's Web UI leverages [AWS Amplify](https://aws.amazon.com/amplify/) to integrate with AWS services for authentication ([Amazon Cognito](https://aws.amazon.com/cognito/)), messaging and analytics ([Amazon Pinpoint](https://aws.amazon.com/pinpoint/)), and to keep our personalization ML models up to date ([Amazon Personalize](https://aws.amazon.com/personalize/)). AWS Amplify provides libraries for JavaScript, iOS, Andriod, and React Native for building web and mobile applications. For this workshop, we'll be focusing on how user information and events from the Retail Demo Store's Web UI are sent to Pinpoint. This is depicted as **(1)** and **(2)** in the architecture above. We'll also show how the user information and events synchronized to Pinpoint are used to create and send personalized messages. When a new user signs up for a Retail Demo Store account, views a product, adds a product to their cart, completes an order, and so on, the relevant function is called in [AnalyticsHandler.js](https://github.com/aws-samples/retail-demo-store/blob/master/src/web-ui/src/analytics/AnalyticsHandler.js) in the Retail Demo Store Web UI. The new user sign up event triggers a call to the `AnalyticsHandler.identify` function where user information from Cognito is used to [update an endpoint](https://docs.aws.amazon.com/pinpoint/latest/apireference/apps-application-id-endpoints.html) in Pinpoint. In Pinpoint, an endpoint represents a destination that you can send messages to, such as a mobile device, email address, or phone number. ```javascript // Excerpt from src/web-ui/src/analytics/AnalyticsHandler.js export const AnalyticsHandler = { identify(user) { Vue.prototype.$Amplify.Auth.currentAuthenticatedUser().then((cognitoUser) => { let endpoint = { userId: user.id, optOut: 'NONE', userAttributes: { Username: [ user.username ], ProfileEmail: [ user.email ], FirstName: [ user.first_name ], LastName: [ user.last_name ], Gender: [ user.gender ], Age: [ user.age.toString() ], Persona: user.persona.split("_") } } if (user.addresses && user.addresses.length > 0) { let address = user.addresses[0] endpoint.location = { City: address.city, Country: address.country, PostalCode: address.zipcode, Region: address.state } } if (cognitoUser.attributes.email) { endpoint.address = cognitoUser.attributes.email endpoint.channelType = 'EMAIL' Amplify.Analytics.updateEndpoint(endpoint) } }) } } ``` Once an `EMAIL` endpoint is created for our user, we can update attributes on that endpoint based on actions the user takes in the web UI. For example, when the user adds an item to their shopping cart, we'll set the attribute `HasShoppingCart` to `true` to indicate that this endpoint has an active shopping cart. We can also set metrics such as the number of items in the endpoint's cart. As we'll see later, we can use these attributes when building Campaigns in Pinpoint to target endpoints based on their activity in the application. ```javascript // Excerpt from src/web-ui/src/analytics/AnalyticsHandler.js productAddedToCart(userId, cart, product, quantity, experimentCorrelationId) { Amplify.Analytics.updateEndpoint({ attributes: { HasShoppingCart: ['true'] }, metrics: { ItemsInCart: cart.items.length } }) } ``` When the user completes an order, we send revenue tracking events to Pinpoint, as shown below, and also update endpoint attributes and metrics. We'll see how these events, attributes, and metrics be can used later in this workshop. ```javascript // Excerpt from src/web-ui/src/analytics/AnalyticsHandler.js orderCompleted(user, cart, order) { // ... for (var itemIdx in order.items) { let orderItem = order.items[itemIdx] Amplify.Analytics.record({ name: '_monetization.purchase', attributes: { userId: user ? user.id : null, cartId: cart.id, orderId: order.id.toString(), _currency: 'USD', _product_id: orderItem.product_id }, metrics: { _quantity: orderItem.quantity, _item_price: +orderItem.price.toFixed(2) } }) } Amplify.Analytics.updateEndpoint({ attributes: { HasShoppingCart: ['false'], HasCompletedOrder: ['true'] }, metrics: { ItemsInCart: 0 } }) } ``` ### Integrating Amazon Pinpoint & Amazon Personalize - Pinpoint Recommenders When building a Campaign in Amazon Pinpoint, you can associate the Pinpoint Campaign with a machine learning model, or recommender, that will be used to retrieve item recommendations for each endpoint eligible for the campaign. A recommender is linked to an Amazon Personalize Campaign. As you may recall from the [Personalization workshop](../1-Personalization/1.1-Personalize.ipynb), a Personalize Campaign only returns a list of item IDs (which represent product IDs for Retail Demo Store products). In order to turn the list of item IDs into more useful information for building a personalized email, Pinpoint supports the option to associate an AWS Lambda function to a recommender. This function is called passing information about the endpoint and the item IDs from Personalize and the function can return metadata about each item ID. Then in your Pinpoint message template you can reference the item metadata to incorporate it into your messages. The Retail Demo Store architecture already has a [Lambda function](https://github.com/aws-samples/retail-demo-store/blob/master/src/aws-lambda/pinpoint-recommender/pinpoint-recommender.py) deployed to use for our Pinpoint recommender. This function calls to Retail Demo Store's [Products](https://github.com/aws-samples/retail-demo-store/tree/master/src/products) microservice to retrieve useful information for each product (name, description, price, image URL, product URL, and so on). We will create a Pinpoint recommender in this workshop to tie it all together. This is depicted as **(3)**, **(4)**, and **(5)** in the architecture above. ## Setup Before we can make API calls to setup Pinpoint from this notebook, we need to install and import the necessary dependencies. ### Import Dependencies Next, let's import the dependencies we'll need for this notebook. We also have to retrieve Uid from a SageMaker notebook instance tag. ``` # Import Dependencies import boto3 import time import json import requests from botocore.exceptions import ClientError # Setup Clients personalize = boto3.client('personalize') ssm = boto3.client('ssm') pinpoint = boto3.client('pinpoint') lambda_client = boto3.client('lambda') iam = boto3.client('iam') # Service discovery will allow us to dynamically discover Retail Demo Store resources servicediscovery = boto3.client('servicediscovery') with open('/opt/ml/metadata/resource-metadata.json') as f: data = json.load(f) sagemaker = boto3.client('sagemaker') sagemakerResponce = sagemaker.list_tags(ResourceArn=data["ResourceArn"]) for tag in sagemakerResponce["Tags"]: if tag['Key'] == 'Uid': Uid = tag['Value'] break ``` ### Determine Pinpoint Application/Project When the Retail Demo Store resources were deployed by the CloudFormation templates, a Pinpoint Application (aka Project) was automatically created with the name "retaildemostore". In order for us to interact with the application via API calls in this notebook, we need to determine the application ID. Let's lookup our Pinpoint application using the Pinpoint API. ``` pinpoint_app_name = 'retaildemostore' pinpoint_app_id = None get_apps_response = pinpoint.get_apps() if get_apps_response['ApplicationsResponse'].get('Item'): for app in get_apps_response['ApplicationsResponse']['Item']: if app['Name'] == pinpoint_app_name: pinpoint_app_id = app['Id'] break assert pinpoint_app_id is not None, 'Retail Demo Store Pinpoint project/application does not exist' print('Pinpoint Application ID: ' + pinpoint_app_id) ``` ### Get Personalize Campaign ARN Before we can create a recommender in Pinpoint, we need the Amazon Personalize Campaign ARN for the product recommendation campaign. Let's look it up in the SSM parameter store where it was set by the Personalize workshop. ``` response = ssm.get_parameter(Name='retaildemostore-product-recommendation-campaign-arn') personalize_campaign_arn = response['Parameter']['Value'] assert personalize_campaign_arn != 'NONE', 'Personalize Campaign ARN not initialized - run Personalization workshop' print('Personalize Campaign ARN: ' + personalize_campaign_arn) ``` ### Get Recommendation Customizer Lambda ARN We also need the ARN for our Lambda function that will return product metadata for the item IDs. This function has already been deployed for you. Let's lookup our function by its name. ``` response = lambda_client.get_function(FunctionName = 'RetailDemoStorePinpointRecommender') lambda_function_arn = response['Configuration']['FunctionArn'] print('Recommendation customizer Lambda ARN: ' + lambda_function_arn) ``` ### Get IAM Role for Pinpoint to access Personalize In order for Pinpoint to access our Personalize campaign to get recommendations, we need to provide it with an IAM Role. The Retail Demo Store deployment has already created a role with the necessary policies. Let's look it up by it's role name. ``` response = iam.get_role(RoleName = Uid+'-PinptP9e') pinpoint_personalize_role_arn = response['Role']['Arn'] print('Pinpoint IAM role for Personalize: ' + pinpoint_personalize_role_arn) ``` ## Create Pinpoint Recommender Configuration With our environment setup and configuration info loaded, we can now create a recommender in Amazon Pinpoint. > We're using the Pinpoint API to create the Recommender Configuration in this workshop. You can also create a recommeder in the AWS Console for Pinpoint under the "Machine learning models" section. A few things to note in the recommender configuration below. - In the `Attributes` section, we're creating user-friendly names for the product information fields returned by our Lambda function. These names will be used in the Pinpoint console UI when designing message templates and can make it easier for template designers to select fields. - We're using `PINPOINT_USER_ID` for the `RecommendationProviderIdType` since the endpoint's `UserId` is where we set the ID for the user in the Retail Demo Store. Since this ID is what we use to represent each user when training the recommendation models in Personalize, we need Pinpoint to use this ID as well when retrieving recommendations. - We're limiting the number of recommendations per message to 4. ``` response = pinpoint.create_recommender_configuration( CreateRecommenderConfiguration={ 'Attributes': { 'Recommendations.Name': 'Product Name', 'Recommendations.URL': 'Product Detail URL', 'Recommendations.Category': 'Product Category', 'Recommendations.Description': 'Product Description', 'Recommendations.Price': 'Product Price', 'Recommendations.ImageURL': 'Product Image URL' }, 'Description': 'Retail Demo Store Personalize recommender for Pinpoint', 'Name': 'retaildemostore-recommender', 'RecommendationProviderIdType': 'PINPOINT_USER_ID', 'RecommendationProviderRoleArn': pinpoint_personalize_role_arn, 'RecommendationProviderUri': personalize_campaign_arn, 'RecommendationTransformerUri': lambda_function_arn, 'RecommendationsPerMessage': 4 } ) recommender_id = response['RecommenderConfigurationResponse']['Id'] print('Pinpoint recommender configuration ID: ' + recommender_id) ``` ### Verify Machine Learning Model / Recommender If you open a web browser window/tab and browse to the Pinpoint service in the AWS console for the AWS account we're working with, you should see the ML Model / Recommender that we just created in Pinpoint. ![Pinpoint ML Model / Recommender](images/pinpoint-ml-model.png) ## Create Personalized Email Templates With Amazon Pinpoint we can create email templates that can be used to send to groups of our users based on criteria. We'll start by creating email templates for the following use-case then step through how we target and send emails to the right users at the appropriate time. - Welcome Email - sent to users shortly after creating a Retail Demo Store account - Abandoned Cart Email - sent to users who leave items in their cart without completing an order - Personalized Recommendations Email - includes recommendations from the recommeder we just created ### Load Welcome Email Templates The first email template will be a welcome email template that is sent to new users of the Retail Demo Store after they create an account. Our templates will support both HTML and plain text formats. We'll load both formats and create the template. You can find all templates used in this workshop in the `pinpoint-templates` directory where this notebook is located. They can also be found in the Retail Demo Store source code repository. Let's load the HTML version of our welcome template and then look at a snippet of it. Complete template is available for review at [pinpoint-templates/welcome-email-template.html](pinpoint-templates/welcome-email-template.html) ``` with open('pinpoint-templates/welcome-email-template.html', 'r') as html_file: html_template = html_file.read() ``` ```html // Excerpt from pinpoint-templates/welcome-email-template.html <table border="0" cellpadding="0" cellspacing="0"> <tr> <td> <h1>Thank you for joining the Retail Demo Store!</h1> <p><strong>Hi, {{User.UserAttributes.FirstName}}.</strong> We just wanted to send you a quick note thanking you for creating an account on the Retail Demo Store. We're excited to serve you. </p> <p>We pride ourselves in providing a wide variety of high quality products in our store and delivering exceptional customer service. </p> <p>Please drop-in and check out our store often to see what's new and for personalized recommendations we think you'll love. </p> <p>Cheers,<br/>Retail Demo Store team </p> </td> </tr> <tr> <td style="text-align: center; padding-top: 20px"> <small>Retail Demo Store &copy; 2019-2020</small> </td> </tr> </table> ``` Notice how we're using the mustache template tagging syntax, `{{User.UserAttributes.FirstName}}`, to display the user's first name. This will provide a nice touch of personalization to our welcome email. Next we'll load and display the text version of our welcome email. ``` with open('pinpoint-templates/welcome-email-template.txt', 'r') as text_file: text_template = text_file.read() print('Text Template:') print(text_template) ``` ### Create Welcome Email Pinpoint Template Now let's take our HTML and text email template source and create a template in Amazon Pinpoint. We'll use a default substitution of "there" for the user's first name attribute if it is not set for some reason. This will result in the email greeting being "Hi there,..." rather than "Hi ,..." if we don't have a value for first name. ``` response = pinpoint.create_email_template( EmailTemplateRequest={ 'Subject': 'Welcome to the Retail Demo Store', 'TemplateDescription': 'Welcome email sent to new customers', 'HtmlPart': html_template, 'TextPart': text_template, 'DefaultSubstitutions': json.dumps({ 'User.UserAttributes.FirstName': 'there' }) }, TemplateName='RetailDemoStore-Welcome' ) welcome_template_arn = response['CreateTemplateMessageBody']['Arn'] print('Welcome email template ARN: ' + welcome_template_arn) ``` ### Load Abandoned Cart Email Templates Next we'll create an email template that includes messaging for users who add items to their cart but fail to complete an order. The following is a snippet of the Abanoned Cart Email template. Notice how multiple style properties are bring set for email formatting. You can also see how the template refers to custom Attributes such as cart item properties like ```ShoppingCartItemTitle``` and ```ShoppingCartItemImageURL``` can be passed. Complete template available for review at [pinpoint-templates/abandoned-cart-email-template.html](pinpoint-templates/abandoned-cart-email-template.html) ``` with open('pinpoint-templates/abandoned-cart-email-template.html', 'r') as html_file: html_template = html_file.read() ``` ```html // Excerpt from pinpoint-templates/abandoned-cart-email-template.html <tr> <td style="width:139px;"> <img alt="{{Attributes.ShoppingCartItemTitle}}" height="auto" src="{{Attributes.ShoppingCartItemImageURL}}" style="border:none;display:block;outline:none;text-decoration:none;height:auto;width:100%;font-size:13px;" width="139" /> </td> </tr> </tbody> </table> </td> </tr> </table> </div> <!--[if mso | IE]> </td> <td style="vertical-align:top;width:285px;" > <![endif]--> <div class="mj-column-per-50 mj-outlook-group-fix" style="font-size:0px;text-align:left;direction:ltr;display:inline-block;vertical-align:top;width:50%;"> <table border="0" cellpadding="0" cellspacing="0" role="presentation" style="vertical-align:top;" width="100%"> <tr> <td align="center" style="font-size:0px;padding:10px 25px;word-break:break-word;"> <div style="font-family:Ubuntu, Helvetica, Arial, sans-serif;font-size:18px;font-weight:bold;line-height:1;text-align:center;color:#000000;"> <p>{{Attributes.ShoppingCartItemTitle}}</p> </div> </td> </tr> <tr> <td align="center" vertical-align="middle" style="font-size:0px;padding:10px 25px;word-break:break-word;"> <table border="0" cellpadding="0" cellspacing="0" role="presentation" style="border-collapse:separate;line-height:100%;"> <tr> <td align="center" bgcolor="#FF9900" role="presentation" style="border:none;border-radius:3px;cursor:auto;mso-padding-alt:10px 25px;background:#FF9900;" valign="middle"> <a href="{{Attributes.WebsiteCartURL}}" style="display:inline-block;background:#FF9900;color:#ffffff;font-family:Ubuntu, Helvetica, Arial, sans-serif;font-size:9px;font-weight:normal;line-height:120%;margin:0;text-decoration:none;text-transform:none;padding:10px 25px;mso-padding-alt:0px;border-radius:3px;" target="_blank"> BUY NOW </a> </td> </tr> ``` ``` with open('pinpoint-templates/abandoned-cart-email-template.txt', 'r') as text_file: text_template = text_file.read() print('Text Template:') print(text_template) ``` ### Create Abandoned Cart Email Template Now we can create an email template in Pinpoint for our abandoned cart use-case. ``` response = pinpoint.create_email_template( EmailTemplateRequest={ 'Subject': 'Retail Demo Store - Motivation to Complete Your Order', 'TemplateDescription': 'Abandoned cart email template', 'HtmlPart': html_template, 'TextPart': text_template, 'DefaultSubstitutions': json.dumps({ 'User.UserAttributes.FirstName': 'there' }) }, TemplateName='RetailDemoStore-AbandonedCart' ) abandoned_cart_template_arn = response['CreateTemplateMessageBody']['Arn'] print('Abandoned cart email template ARN: ' + abandoned_cart_template_arn) ``` ### Load Recommendations Email Templates Next we'll create an email template that includes recommendations from the Amazon Personalize product recommendation campaign that we created in the [Personalization workshop](../1-Personalization/1.1-Personalize.ipynb). If you haven't completed the personalization workshop, please do so now and come back to this workshop when complete. As with the welcome email template, let's load and then view snippets of the HTML and text formats for our template. Complete template is available at [pinpoint-templates/recommendations-email-template.html](pinpoint-templates/recommendations-email-template.html) ``` with open('pinpoint-templates/recommendations-email-template.html', 'r') as html_file: html_template = html_file.read() ``` ``` html // Excerpt from pinpoint-templates/recommendations-email-template.html <table border="0" cellpadding="0" cellspacing="0"> <tr> <td> <h1>Hi, {{User.UserAttributes.FirstName}}. Greetings from the Retail Demo Store!</h1> <p>Here are a few products inspired by your shopping trends</p> <p>&nbsp;</p> </td> </tr> <tr> <td> <table border="0" cellpadding="4" cellspacing="0"> <tr valign="top"> <td style="text-align: left; width: 40%;" width="40%"> <a href="{{Recommendations.URL.[0]}}"> <img src="{{Recommendations.ImageURL.[0]}}" alt="{{Recommendations.Name.[0]}}" style="min-width: 50px; max-width: 300px; border: 0; text-decoration:none; vertical-align: baseline;"/> </a> </td> <td style="text-align: left;"> <h3>{{Recommendations.Name.[0]}}</h3> <p>{{Recommendations.Description.[0]}}</p> <p><strong>{{Recommendations.Price.[0]}}</strong></p> <p><a href="{{Recommendations.URL.[0]}}"><strong>Buy Now!</strong></a></p> </td> </tr> ``` Notice the use of several new mustache template tags in this template. For example, `{{Recommendations.Name.[0]}}` resolves to the product name of the first product recommended by Personalize. The product name came from our Lambda function which was called by Pinpoint after it called `get_recommendations` on our Personalize campaign. Next load the text version of our template. ``` with open('pinpoint-templates/recommendations-email-template.txt', 'r') as text_file: text_template = text_file.read() print('Text Template:') print(text_template) ``` ### Create Recommendations Email Template This time when we create the template in Pinpoint, we'll specify the `RecommenderId` for the machine learning model (Amazon Personalize) that we created earlier. ``` response = pinpoint.create_email_template( EmailTemplateRequest={ 'Subject': 'Retail Demo Store - Products Just for You', 'TemplateDescription': 'Personalized recommendations email template', 'RecommenderId': recommender_id, 'HtmlPart': html_template, 'TextPart': text_template, 'DefaultSubstitutions': json.dumps({ 'User.UserAttributes.FirstName': 'there' }) }, TemplateName='RetailDemoStore-Recommendations' ) recommendations_template_arn = response['CreateTemplateMessageBody']['Arn'] print('Recommendation email template ARN: ' + recommendations_template_arn) ``` ### Verify Email Templates If you open a web browser window/tab and browse to the Pinpoint service in the AWS console for the AWS account we're working with, you should see the message templates we just created. ![Pinpoint Message Templates](images/pinpoint-msg-templates.png) ## Enable Pinpoint Email Channel Before we can setup Segments and Campaigns to send emails, we have to enable the email channel in Pinpoint and verify sending and receiving email addresses. > We'll be using the Pinpoint email channel in sandbox mode. This means that Pinpoint will only send emails from and to addresses that have been verified in the Pinpoint console. In the Pinpoint console, click on "All Projects" and then the "retaildemostore" project. ![Pinpoint Projects](images/pinpoint-projects.png) ### Email Settings From the "retaildemostore" project page, expand "Settings" in the left navigation and then click "Email". You will see that email has not yet been enabled as a channel. Click the "Edit" button to enable Pinpoint to send emails and to verify some email addresses. ![Pinpoint Email Settings](images/pinpoint-email-setup.png) ### Verify Some Email Addresses On the "Edit email" page, check the box to enable the email channel and enter a valid email address that you have the ability to check throughout the rest of this workshop. ![Pinpoint Verify Email Addresses](images/pinpoint-email-verify.png) ### Verify Additional Email Addresses So that we can send an email to more than one endpoint in this workshop, verify a couple more variations of your email address. Assuming your **valid** email address is `[email protected]`, add a few more variations using `+` notation such as... - `[email protected]` - `[email protected]` - `[email protected]` Just enter a variation, click the "Verify email address" button, and repeat until you've added a few more. Write down or commit to memory the variations you created--we'll need them later. By adding these variations, we're able to create separate Retail Demo Store accounts for each email address and therefore separate endpoints in Pinpoint that we can target. Note that emails sent to the these variations should still be delivered to your same inbox. ### Check Your Inbox & Click Verification Links Pinpoint should have sent verification emails to all of the email addresses you added above. Sign in to your email client and check your inbox for the verification emails. Once you receive the emails (it can take a few minutes), click on the verification link in **each email**. If after several minutes you don't receive the verification email or you want to use a different address, repeat the verification process above. > Your email address(es) must be verified before we can setup Campaigns in Pinpoint. After you click the verify link in the email sent to each variation of your email address, you should see a success page like the following. ![Email Verified](images/pinpoint-ses-success.png) ## Let's Go Shopping - Create Retail Demo Store User Accounts & Pinpoint Endpoints Next let's create a few new user accounts in the Retail Demo Store Web UI using the email address(es) that we just verified. Based on the source code snippets we saw earlier, we know that the Retail Demo Store will create endpoints in Pinpoint for new accounts. <div class="alert alert-info"> IMPORTANT: each Retail Demo Store account must be created in an entirely separate web browser session in order for them to be created as separate endpoints in Pinpoint. Signing out and attempting to create a new account in the same browser will NOT work. The easiest way to do this successfully is to use Google Chrome and open new Incognito windows for each new account. Alternatively, you could use multiple browser types (i.e. Chrome, Firefox, Safari, IE) and/or separate devices to create accounts such as a mobile phone or tablet. </div> 1. Open the Retail Demo Store Web UI in an new Incognito window. If you don't already have the Web UI open or need the URL, you can find it in the "Outputs" tab for the Retail Demo Store CloudFormation stack in your AWS account. Look for the "WebURL" output field, right click on the link, and select "Open Link in Incognito Window" (Chrome only). ![CloudFormation Outputs](images/retaildemostore-cfn-outputs.png) 2. Click the "Sign In" button in the top navigation (right side) and then click on the "Create account" link in the "Sign in" form. ![Create Retail Demo Store account](images/retaildemostore-create-account.png) 3. A few seconds after creating your account you should receive an email with a six digit confirmation code. Enter this code on the confirmation page. ![Confirm Retail Demo Store account](images/retaildemostore-confirm-account.png) 4. Once confirmed you can sign in to your account with your user name and password. At this point you should have a endpoint in Pinpoint for this user. 5. Close your Incognito window(s). 6. Open a new Incognito window and **repeat the process for SOME (but not all) of your remaining email address variations** you verified in Pinpoint above. **As a reminder, it's important that you create each Retail Demo Store account in a separate/new Incognito window, browser application, or device. Otherwise, your accounts will overwrite the same endpoint in Pinpoint.** <div class="alert alert-info"> Be sure to hold back one or two of your verified email addresses until after we create a welcome email campaign below so the sign up events fall within the time window of the campaign. </div> ### Shopping Behavior With your Retail Demo Store accounts created, perform some activities with some of your accounts. - For one of your users add some items to the shopping cart but do not checkout to simulate an abandoned cart scenario. - For another user, add some items to the cart and complete an order or two so that revenue events are sent all the way through to Pinpoint. - Also be sure to view a few products by clicking through to the product detail view. Select products that would indicate an affinity for a product type (e.g. shoes or electronics) so you can see how product recommendations are tailored in the product recommendations email. ## Create Pinpoint Segments With our Recommender and message templates in place and a few test users created in the Retail Demo Store, let's turn to creating Segments in Pinpoint. After our Segments are created, we'll create some Campaigns. 1. Start by browsing to the Amazon Pinpoint service page in the AWS account where the Retail Demo Store was deployed. Click on "All Projects" and you should see the "retaildemostore" project. Click on the "retaildemostore" project and then "Segments" in the left navigation. Click on the "Create a segment" button. ![Pinpoint Segments](images/pinpoint-segments.png) 2. Then click on the "Create segment" button. We will be building a dynamic segment based on the endpoints that were automatically created when we created our Retail Demo Store user accounts. We'll include all endpoints that have an email address by adding a filter by channel type with a value of `EMAIL`. Name your segment "AllEmailUsers" and scroll down and click the "Create segment" button at the bottom of the page. ![Pinpoint Create Segment](images/pinpoint-create-segment.png) 3. Create another segment that is based on the "AllEmailUsers" segment you just created but has an additional filter on the `HasShoppingCart` endpoint attribute and has a value of `true`. This represents all users that have a shopping cart and will be used for our abandoned cart campaign. If you don't see this endpoint attribute or don't see `true` as an option, switch to another browser tab/window and add items to the shopping cart for one of your test users. ![Pinpoint Carts Segment](images/pinpoint-carts-segment.png) ## Create Campaigns With our segments created for all users and for users with shopping carts, let's create campaigns for our welcome email, product recommendations, and abandoned cart use-cases. ### Welcome Email Campaign Let's start with with the welcome email campaign. For the "retaildemostore" project in Pinpoint, click "Campaigns" in the left navigation and then the "Create a campaign" button. 1. For Step 1, give your campaign a name such as "WelcomeEmail", select "Standard campaign" as the campaign type, and "Email" as the channel. Click "Next" to continue. ![Pinpoint Create Campaign](images/pinpoint-create-welcome-campaign-1.png) 2. For Step 2, we will be using our "AllEmailUsers" dynamic segment. Click "Next" to continue. ![Pinpoint Create Campaign](images/pinpoint-create-welcome-campaign-2.png) 3. For Step 3, choose the "RetailDemoStore-Welcome" email template, scroll to the bottom of the page, and click "Next". ![Pinpoint Create Campaign](images/pinpoint-create-welcome-campaign-3.png) 4. For Step 4, we want the campaign to be sent when the `UserSignedUp` event occurs. Set the campaign start date to be today's date so that it begins immediately and the end date to be a few days into the future. **Be sure to adjust to your current time zone.** ![Pinpoint Create Campaign](images/pinpoint-create-welcome-campaign-4.png) 5. Scroll to the bottom of the page, click "Next". 6. Click "Launch campaign" to launch your campaign. <div class="alert alert-info"> <strong>Given that the welcome campaign is activated based on sign up events that occur between the campaign start and end times, to test this campaign you must wait until after the camapign starts and then use one of your remaining verified email addresses to create a new Retail Demo Store account.</strong> </div> ### Abandoned Cart Campaign To create an abandoned cart campaign, repeat the steps you followed for the Welcome campaign above but this time select the `UsersWithCarts` segment, the `RetailDemoStore-AbandonedCart` email template, and the `Session Stop` event. This will trigger the abandoned cart email to be sent when users end their session while still having a shopping cart. Launch the campaign, wait for the campaign to start, and then close out some browser sessions for user(s) with items still in their cart. This can take some trial and error and waiting given the how browsers and devices trigger end of session events. ### Recommendations Campaign Finally, create a recommendations campaign that targets the `AllEmailUsers` segment and uses the `RetailDemoStore-Recommendations` message template. This time, however, rather than trigger the campaign based on an event, we'll send the campaign immediately. Click "Next", launch the campaign, and check the email inbox for your test accounts after a few moments. ![Pinpoint Create Campaign](images/pinpoint-create-rec-campaign-4.png) ## Bonus - Pinpoint Journeys With Amazon Pinpoint journeys, you can create custom experiences for your customers using an easy to use, drag-and-drop interface. When you build a journey, you choose the activities that you want to add to the journey. These activities can perform a variety of different actions, like sending an email to journey participants, waiting a defined period of time, or splitting users based on a certain action, such as when they open or click a link in an email. Using the segments and message templates you've already created, experiment with creating a journey that guides users through a messaging experience. For example, start a journey by sending all users the Recommendations message template. Then add a pause/wait step followed by a Multivariate Split that directs users down separate paths whether they've completed an order (hint: create a `OrderCompleted` segment), opened the Recommendations email, or done nothing. Perhaps users who completed an order might receive a message asking them to refer a friend to the Retail Demo Store and users who just opened the email might be sent a message with a coupon to motivate them to get shopping (you'll need to create a new message templates for these). ## Workshop Complete Congratulations! You have completed the Retail Demo Store Pinpoint Workshop. ### Cleanup If you launched the Retail Demo Store in your personal AWS account **AND** you're done with all workshops & your evaluation of the Retail Demo Store, you can remove all provisioned AWS resources and data by deleting the CloudFormation stack you used to deploy the Retail Demo Store. Although deleting the CloudFormation stack will delete the entire "retaildemostore" project in Pinpoint, including all endpoint data, it will not delete resources we created directly in this workshop (i.e. outside of the "retaildemostore" Pinpoint project). The following cleanup steps will remove the resources we created outside the "retaildemostore" Pinpoint project. > If you are participating in an AWS managed event such as a workshop and using an AWS provided temporary account, you can skip the following cleanup steps unless otherwise instructed. #### Delete Recommeder Configuration ``` response = pinpoint.delete_recommender_configuration(RecommenderId=recommender_id) print(json.dumps(response, indent=2)) ``` #### Delete Email Message Templates ``` response = pinpoint.delete_email_template(TemplateName='RetailDemoStore-Welcome') print(json.dumps(response, indent=2)) response = pinpoint.delete_email_template(TemplateName='RetailDemoStore-AbandonedCart') print(json.dumps(response, indent=2)) response = pinpoint.delete_email_template(TemplateName='RetailDemoStore-Recommendations') print(json.dumps(response, indent=2)) ``` Other resources allocated for the Retail Demo Store will be deleted when the CloudFormation stack is deleted. End of workshop
github_jupyter
``` # !pip install graphviz ``` To produce the decision tree visualization you should install the graphviz package into your system: https://stackoverflow.com/questions/35064304/runtimeerror-make-sure-the-graphviz-executables-are-on-your-systems-path-aft ``` # Run one of these in case you have problems with graphviz # All users: try this first # ! conda install graphviz # If that doesn't work: # Ubuntu/Debian users only # ! sudo apt-get update && sudo apt-get install graphviz # Mac users only (assuming you have homebrew installed) # ! brew install graphviz # Windows users, check the stack overflow link. Sorry! from collections import Counter from os.path import join import matplotlib.pyplot as plt import seaborn as sns import numpy as np import pandas as pd from sklearn.cluster import DBSCAN, KMeans, AgglomerativeClustering from sklearn.base import clone from sklearn.metrics import pairwise_distances from scipy.cluster.hierarchy import dendrogram from sklearn.manifold import TSNE from sklearn.tree import DecisionTreeClassifier, export_graphviz from sklearn.model_selection import train_test_split import graphviz sns.set() ``` ## Import preprocessed data ``` df = pd.read_csv(join('..', 'data', 'tugas_preprocessed.csv')) # Splitting feature names into groups non_metric_features = df.columns[df.columns.str.startswith('x')] pc_features = df.columns[df.columns.str.startswith('PC')] metric_features = df.columns[~df.columns.str.startswith('x') & ~df.columns.str.startswith('PC')] ``` # Before we proceed - Consider applying the outlier filtering method discussed last class. - We manually filtered the dataset's outliers based on a univariate analysis - Consider dropping/transforming the variable "rcn". Why? - Very little correlation with any other variables - Remember the Component planes: the SOM's units were indistinguishable on this variable ``` # Based on the hyperparameters found in the previous class dbscan = DBSCAN(eps=1.9, min_samples=20, n_jobs=4) dbscan_labels = dbscan.fit_predict(df[metric_features]) Counter(dbscan_labels) # Save the newly detected outliers (they will be classified later based on the final clusters) df_out = # CODE HERE # New df without outliers and 'rcn' df = # CODE HERE # Update metric features list metric_features = # CODE HERE ``` # Clustering by Perspectives - Demographic/Behavioral Perspective: - Product Perspective: ``` # Split variables into perspectives (example, requires critical thinking and domain knowledge) demographic_features = [ 'income', 'frq', 'per_net_purchase', 'spent_online' ] preference_features = [ 'clothes', 'kitchen', 'small_appliances', 'toys', 'house_keeping', ] df_dem = df[demographic_features].copy() df_prf = df[preference_features].copy() ``` ## Testing on K-means and Hierarchical clustering Based on (1) our previous tests and (2) the context of this problem, the optimal number of clusters is expected to be between 3 and 7. ``` def get_ss(df): """Computes the sum of squares for all variables given a dataset """ ss = np.sum(df.var() * (df.count() - 1)) return ss # return sum of sum of squares of each df variable def r2(df, labels): sst = get_ss(df) ssw = np.sum(df.groupby(labels).apply(get_ss)) return 1 - ssw/sst def get_r2_scores(df, clusterer, min_k=2, max_k=10): """ Loop over different values of k. To be used with sklearn clusterers. """ r2_clust = {} for n in range(min_k, max_k): clust = clone(clusterer).set_params(n_clusters=n) labels = clust.fit_predict(df) r2_clust[n] = r2(df, labels) return r2_clust # Set up the clusterers (try out a KMeans and a AgglomerativeClustering) kmeans = # CODE HERE hierarchical = # CODE HERE ``` ### Finding the optimal clusterer on demographic variables ``` # Obtaining the R² scores for each cluster solution on demographic variables r2_scores = {} r2_scores['kmeans'] = get_r2_scores(df_dem, kmeans) for linkage in ['complete', 'average', 'single', 'ward']: r2_scores[linkage] = get_r2_scores( df_dem, hierarchical.set_params(linkage=linkage) ) pd.DataFrame(r2_scores) # Visualizing the R² scores for each cluster solution on demographic variables pd.DataFrame(r2_scores).plot.line(figsize=(10,7)) plt.title("Demographic Variables:\nR² plot for various clustering methods\n", fontsize=21) plt.legend(title="Cluster methods", title_fontsize=11) plt.xlabel("Number of clusters", fontsize=13) plt.ylabel("R² metric", fontsize=13) plt.show() ``` ### Repeat the process for product variables ``` # Obtaining the R² scores for each cluster solution on product variables r2_scores = {} r2_scores['kmeans'] = get_r2_scores(df_prf, kmeans) for linkage in ['complete', 'average', 'single', 'ward']: r2_scores[linkage] = get_r2_scores( df_prf, hierarchical.set_params(linkage=linkage) ) # Visualizing the R² scores for each cluster solution on product variables pd.DataFrame(r2_scores).plot.line(figsize=(10,7)) plt.title("Product Variables:\nR2 plot for various clustering methods\n", fontsize=21) plt.legend(title="Cluster methods", title_fontsize=11) plt.xlabel("Number of clusters", fontsize=13) plt.ylabel("R2 metric", fontsize=13) plt.show() ``` ## Merging the Perspectives - How can we merge different cluster solutions? ``` # Applying the right clustering (algorithm and number of clusters) for each perspective kmeans_prod = # CODE HERE prod_labels = kmeans_prod.fit_predict(df_prf) kmeans_behav = # CODE HERE behavior_labels = kmeans_behav.fit_predict(df_dem) # Setting new columns df['product_labels'] = prod_labels df['behavior_labels'] = behavior_labels # Count label frequencies (contigency table) # CODE HERE ``` ### Manual merging: Merge lowest frequency clusters into closest clusters ``` # Clusters with low frequency to be merged: to_merge = # CODE HERE df_centroids = df.groupby(['behavior_labels', 'product_labels'])\ [metric_features].mean() # Computing the euclidean distance matrix between the centroids euclidean = # CODE HERE df_dists = pd.DataFrame( euclidean, columns=df_centroids.index, index=df_centroids.index ) # Merging each low frequency clustering (source) to the closest cluster (target) source_target = {} for clus in to_merge: if clus not in source_target.values(): source_target[clus] = df_dists.loc[clus].sort_values().index[1] source_target df_ = df.copy() # Changing the behavior_labels and product_labels based on source_target for source, target in source_target.items(): mask = # CODE HERE (changing the behavior and product labels of each source based on target) df_.loc[mask, 'behavior_labels'] = target[0] df_.loc[mask, 'product_labels'] = target[1] # New contigency table df_.groupby(['product_labels', 'behavior_labels'])\ .size()\ .to_frame()\ .reset_index()\ .pivot('behavior_labels', 'product_labels', 0) ``` ### Merging using Hierarchical clustering ``` # Centroids of the concatenated cluster labels df_centroids = # CODE HERE (group by both on behavior and product label) df_centroids # Using Hierarchical clustering to merge the concatenated cluster centroids hclust = AgglomerativeClustering( linkage='ward', affinity='euclidean', distance_threshold=0, n_clusters=None ) hclust_labels = hclust.fit_predict(df_centroids) # Adapted from: # https://scikit-learn.org/stable/auto_examples/cluster/plot_agglomerative_dendrogram.html#sphx-glr-auto-examples-cluster-plot-agglomerative-dendrogram-py # create the counts of samples under each node (number of points being merged) counts = np.zeros(hclust.children_.shape[0]) n_samples = len(hclust.labels_) # hclust.children_ contains the observation ids that are being merged together # At the i-th iteration, children[i][0] and children[i][1] are merged to form node n_samples + i for i, merge in enumerate(hclust.children_): # track the number of observations in the current cluster being formed current_count = 0 for child_idx in merge: if child_idx < n_samples: # If this is True, then we are merging an observation current_count += 1 # leaf node else: # Otherwise, we are merging a previously formed cluster current_count += counts[child_idx - n_samples] counts[i] = current_count # the hclust.children_ is used to indicate the two points/clusters being merged (dendrogram's u-joins) # the hclust.distances_ indicates the distance between the two points/clusters (height of the u-joins) # the counts indicate the number of points being merged (dendrogram's x-axis) linkage_matrix = np.column_stack( [hclust.children_, hclust.distances_, counts] ).astype(float) # Plot the corresponding dendrogram sns.set() fig = plt.figure(figsize=(11,5)) # The Dendrogram parameters need to be tuned y_threshold = 2.3 dendrogram(linkage_matrix, truncate_mode='level', labels=df_centroids.index, p=5, color_threshold=y_threshold, above_threshold_color='k') plt.hlines(y_threshold, 0, 1000, colors="r", linestyles="dashed") plt.title(f'Hierarchical Clustering - {linkage.title()}\'s Dendrogram', fontsize=21) plt.xlabel('Number of points in node (or index of point if no parenthesis)') plt.ylabel(f'Euclidean Distance', fontsize=13) plt.show() # Re-running the Hierarchical clustering based on the correct number of clusters hclust = # CODE HERE hclust_labels = hclust.fit_predict(df_centroids) df_centroids['hclust_labels'] = hclust_labels df_centroids # centroid's cluster labels # Mapper between concatenated clusters and hierarchical clusters cluster_mapper = df_centroids['hclust_labels'].to_dict() df_ = df.copy() # Mapping the hierarchical clusters on the centroids to the observations df_['merged_labels'] = df_.apply(# CODE HERE) # Merged cluster centroids df_.groupby('merged_labels').mean()[metric_features] #Merge cluster contigency table # Getting size of each final cluster df_counts = df_.groupby('merged_labels')\ .size()\ .to_frame() # Getting the product and behavior labels df_counts = df_counts\ .rename({v:k for k, v in cluster_mapper.items()})\ .reset_index() df_counts['behavior_labels'] = df_counts['merged_labels'].apply(lambda x: x[0]) df_counts['product_labels'] = df_counts['merged_labels'].apply(lambda x: x[1]) df_counts.pivot('behavior_labels', 'product_labels', 0) # Setting df to have the final product, behavior and merged clusters df = df_.copy() ``` ## Cluster Analysis ``` def cluster_profiles(df, label_columns, figsize, compar_titles=None): """ Pass df with labels columns of one or multiple clustering labels. Then specify this label columns to perform the cluster profile according to them. """ if compar_titles == None: compar_titles = [""]*len(label_columns) sns.set() fig, axes = plt.subplots(nrows=len(label_columns), ncols=2, figsize=figsize, squeeze=False) for ax, label, titl in zip(axes, label_columns, compar_titles): # Filtering df drop_cols = [i for i in label_columns if i!=label] dfax = df.drop(drop_cols, axis=1) # Getting the cluster centroids and counts centroids = dfax.groupby(by=label, as_index=False).mean() counts = dfax.groupby(by=label, as_index=False).count().iloc[:,[0,1]] counts.columns = [label, "counts"] # Setting Data pd.plotting.parallel_coordinates(centroids, label, color=sns.color_palette(), ax=ax[0]) sns.barplot(x=label, y="counts", data=counts, ax=ax[1]) #Setting Layout handles, _ = ax[0].get_legend_handles_labels() cluster_labels = ["Cluster {}".format(i) for i in range(len(handles))] ax[0].annotate(text=titl, xy=(0.95,1.1), xycoords='axes fraction', fontsize=13, fontweight = 'heavy') ax[0].legend(handles, cluster_labels) # Adaptable to number of clusters ax[0].axhline(color="black", linestyle="--") ax[0].set_title("Cluster Means - {} Clusters".format(len(handles)), fontsize=13) ax[0].set_xticklabels(ax[0].get_xticklabels(), rotation=-20) ax[1].set_xticklabels(cluster_labels) ax[1].set_xlabel("") ax[1].set_ylabel("Absolute Frequency") ax[1].set_title("Cluster Sizes - {} Clusters".format(len(handles)), fontsize=13) plt.subplots_adjust(hspace=0.4, top=0.90) plt.suptitle("Cluster Simple Profilling", fontsize=23) plt.show() # Profilling each cluster (product, behavior, merged) cluster_profiles( df = df[metric_features.to_list() + ['product_labels', 'behavior_labels', 'merged_labels']], label_columns = ['product_labels', 'behavior_labels', 'merged_labels'], figsize = (28, 13), compar_titles = ["Product clustering", "Behavior clustering", "Merged clusters"] ) ``` ## Cluster visualization using t-SNE ``` # This is step can be quite time consuming two_dim = # CODE HERE (explore the TSNE class and obtain the 2D coordinates) # t-SNE visualization pd.DataFrame(two_dim).plot.scatter(x=0, y=1, c=df['merged_labels'], colormap='tab10', figsize=(15,10)) plt.show() ``` ## Assess feature importance and reclassify outliers ### Using the R² What proportion of each variables total SS is explained between clusters? ``` def get_ss_variables(df): """Get the SS for each variable """ ss_vars = df.var() * (df.count() - 1) return ss_vars def r2_variables(df, labels): """Get the R² for each variable """ sst_vars = get_ss_variables(df) ssw_vars = np.sum(df.groupby(labels).apply(get_ss_variables)) return 1 - ssw_vars/sst_vars # We are essentially decomposing the R² into the R² for each variable # CODE HERE (obtain the R² for each variable using the functions above) ``` ### Using a Decision Tree We get the normalized total reduction of the criterion (gini or entropy) brought by that feature (also known as Gini importance). ``` # Preparing the data X = df.drop(columns=['product_labels','behavior_labels','merged_labels']) y = df.merged_labels # Splitting the data X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) # Fitting the decision tree dt = # CODE HERE (set a simple decision tree with max depth of 3) dt.fit(X_train, y_train) print("It is estimated that in average, we are able to predict {0:.2f}% of the customers correctly".format(dt.score(X_test, y_test)*100)) # Assessing feature importance pd.Series(dt.feature_importances_, index=X_train.columns) # Predicting the cluster labels of the outliers df_out['merged_labels'] = # CODE HERE df_out.head() # Visualizing the decision tree dot_data = export_graphviz(dt, out_file=None, feature_names=X.columns.to_list(), filled=True, rounded=True, special_characters=True) graphviz.Source(dot_data) ```
github_jupyter
## Interpreting Ensemble Compressed Features **Gregory Way, 2019** The following notebook will assign biological knowledge to the compressed features using the network projection approach. I use the model previously identified that was used to predict TP53 inactivation. I observe the BioBombe gene set enrichment scores for the features with high coefficients in this model. ``` import os import sys import pandas as pd ``` ## Load the `All Feature` Ensemble Model ``` model_file = os.path.join("results", "top_model_ensemble_all_features_tp53_feature_for_followup.tsv") top_model_df = pd.read_table(model_file) top_model_df coef_file = os.path.join("results", "mutation_ensemble_all", "TP53", "TP53_ensemble_all_features_coefficients.tsv.gz") coef_df = pd.read_table(coef_file).drop(['signal', 'z_dim', 'seed', 'algorithm'], axis='columns') coef_df.head() full_coef_id_df = ( pd.DataFrame(coef_df.feature.str.split("_").values.tolist(), columns=['algorithm', 'individual_feature', 'seed', 'k', 'signal']) ) full_coef_id_df = pd.concat([full_coef_id_df, coef_df], axis='columns') full_coef_id_df = full_coef_id_df.query("abs > 0").query("signal == 'signal'") print(full_coef_id_df.shape) full_coef_id_df.head() ``` ## Load Network Projection Results ``` gph_dir = os.path.join("..", "6.biobombe-projection", "results", "tcga", "gph", "signal") gph_files = os.listdir(gph_dir) all_scores_list = [] for file in gph_files: file = os.path.join(gph_dir, file) scores_df = pd.read_table(file) all_scores_list.append(scores_df) all_scores_df = pd.concat(all_scores_list, axis='rows') print(all_scores_df.shape) all_scores_df.head() all_scores_df = all_scores_df.assign(big_feature_id=all_scores_df.algorithm + "_" + all_scores_df.feature.astype(str) + "_" + all_scores_df.seed.astype(str) + "_" + all_scores_df.z.astype(str) + "_signal") all_scores_df = all_scores_df.assign(abs_z_score=all_scores_df.z_score.abs()) all_coef_scores_df = ( full_coef_id_df .merge(all_scores_df, how='left', left_on="feature", right_on="big_feature_id") .sort_values(by=['abs', 'abs_z_score'], ascending=False) .reset_index(drop=True) ) all_coef_scores_df.head() # Explore the biobombe scores for specific DAE features top_n_features = 5 biobombe_df = ( all_coef_scores_df .groupby('big_feature_id') .apply(func=lambda x: x.abs_z_score.nlargest(top_n_features)) .reset_index() .merge(all_coef_scores_df .reset_index(), right_on=['index', 'abs_z_score', 'big_feature_id'], left_on=['level_1', 'abs_z_score', 'big_feature_id']) .drop(['level_1', 'index', 'feature_x', 'algorithm_x', 'seed_x', 'model_type', 'algorithm_y', 'feature_y', 'seed_y', 'z'], axis='columns') .sort_values(by=['abs', 'abs_z_score'], ascending=False) .reset_index(drop=True) ) print(biobombe_df.shape) biobombe_df.head(20) # Output biobombe scores applied to the all feature ensemble model file = os.path.join('results', 'tcga_tp53_classify_top_biobombe_scores_all_feature_ensemble_model_table.tsv') biobombe_df.to_csv(file, sep='\t', index=False) ``` ## Detect the highest contributing variables ``` neg_biobombe_df = biobombe_df.query("weight < 0") pos_biobombe_df = biobombe_df.query("weight > 0") top_neg_variables_df = neg_biobombe_df.groupby("variable")['weight'].sum().sort_values(ascending=True) top_pos_variables_df = pos_biobombe_df.groupby("variable")['weight'].sum().sort_values(ascending=False) full_result_df = pd.DataFrame(pd.concat([top_pos_variables_df, top_neg_variables_df])) full_result_df = ( full_result_df .assign(abs_weight=full_result_df.weight.abs()) .sort_values(by='abs_weight', ascending=False) ) full_result_df.head() # Output biobombe scores applied to the all feature ensemble model file = os.path.join('results', 'tcga_tp53_classify_aggregate_biobombe_scores_all_feature_ensemble.tsv') full_result_df.to_csv(file, sep='\t', index=False) ```
github_jupyter
# Algoritmos de Otimização No Deep Learning temos como propósito que nossas redes neurais aprendam a aproximar uma função de interesse, como o preço de casas numa regressão, ou a função que classifica objetos numa foto, no caso da classificação. No último notebook, nós programos nossa primeira rede neural. Além disso, vimos também a fórmula de atualização dos pesos. Se você não lembra, os pesos e os bias foram atualizados da seguinte forma: $$w_i = w_i - \lambda * \partial w $$ $$b_i = b_i - \lambda * \partial b$$ Mas, você já parou pra pensar da onde vêm essas fórmulas? Além disso, será que existem melhores formas de atualizar os pesos? É isso que vamos ver nesse notebook. ## Descida de Gradiente Estocástica (SGD) Na descida de gradiente estocástica separamos os nossos dados de treino em vários subconjuntos, que chamamos de mini-batches. No começo eles serão pequenos, como 32-128 exemplos, para aplicações mais avançadas eles tendem a ser muito maiores, na ordem de 1024 e até mesmo 8192 exemplos por mini-batch. Como na descida de gradiente padrão, computamos o gradiente da função de custo em relação aos exemplos, e subtraímos o gradiente vezes uma taxa de apredizado dos parâmetros da rede. Podemos ver o SGD como tomando um passo pequeno na direção de maior redução do valor da loss. ### Equação $w_{t+1} = w_t - \eta \cdot \nabla L$ ### Código ``` import jax def sgd(weights, gradients, eta): return jax.tree_util.tree_multimap(lambda w, g: w - eta*g, weights, gradients) ``` Vamos usar o SGD para otimizar uma função simples ``` #hide import numpy as np import matplotlib.pyplot as plt %matplotlib inline def f(x): return x**2 - 25 x = np.linspace(-10, 10, num=100) y = f(x) plt.plot(x, y) plt.ylim(-50) x0 = 9.0 f_g = jax.value_and_grad(f) x_ = [] y_ = [] for i in range(10): y0, grads = f_g(x0) x_.append(x0) y_.append(y0) x0 = sgd(x0, grads, 0.9) plt.plot(x, y) plt.plot(x_, y_, color='red', marker='o'); ``` ## Momentum Um problema com o uso de mini-batches é que agora estamos **estimando** a direção que diminui a função de perda no conjunto de treino, e quão menor o mini-batch mais ruidosa é a nossa estimativa. Para consertar esse problema do ruído nós introduzimos a noção de momentum. O momentm faz sua otimização agir como uma bola pesada descendo uma montanha, então mesmo que o caminho seja cheio de montes e buracos a direção da bola não é muito afetada. De um ponto de vista mais matemático as nossas atualizações dos pesos vão ser uma combinação entre os gradientes desse passo e os gradientes anteriores, estabilizando o treino. ### Equação $v_{t} = \gamma v_{t-1} + \nabla L \quad \text{o gamma serve como um coeficiente ponderando entre usar os updates anteriores e o novo gradiente} \\ w_{t+1} = w_t - \eta v_t $ ### Código ``` def momentum(weights, gradients, eta, mom, gamma): mom = jax.tree_util.tree_multimap( lambda v, g: gamma*v + (1 - gamma)*g, weights, gradients) weights = jax.tree_util.tree_multimap( lambda w, v: w - eta*mom, weights, mom) return weights, mom x0 = 9.0 mom = 0.0 x_ = [] y_ = [] for i in range(10): y0, grads = f_g(x0) x_.append(x0) y_.append(y0) x0, mom = momentum(x0, grads, 0.9, mom, 0.99) plt.plot(x, y) plt.plot(x_, y_, color='red', marker='o'); ``` ## RMSProp Criado por Geoffrey Hinton durante uma aula, esse método é o primeiro **método adaptivo** que estamos vendo. O que isso quer dizer é que o método tenta automaticamente computar uma taxa de aprendizado diferente para cada um dos pesos da nossa rede neural, usando taxas pequenas para parâmetros que sofrem atualização frequentemente e taxas maiores para parâmetros que são atualizados mais raramente, permitindo uma otimização mais rápida. Mais especificamente, o RMSProp divide o update normal do SGD pela raiz da soma dos quadrados dos gradientes anteriores (por isso seu nome Root-Mean-Square Proportional), assim reduzindo a magnitude da atualização de acordo com as magnitudes anteriores. ### Equação $ \nu_{t} = \gamma \nu_{t-1} + (1 - \gamma) (\nabla L)^2 \\ w_{t+1} = w_t - \frac{\eta \nabla L}{\sqrt{\nu_t + \epsilon}} $ ### Código ``` def computa_momento(updates, moments, decay, order): return jax.tree_multimap( lambda g, t: (1 - decay) * (g ** order) + decay * t, updates, moments) def rmsprop(weights, gradients, eta, nu, gamma): nu = computa_momento(gradients, nu, gamma, 2) updates = jax.tree_multimap( lambda g, n: g * jax.lax.rsqrt(n + 1e-8), gradients, nu) weights = jax.tree_util.tree_multimap(lambda w, g: w - eta*g, weights, updates) return weights, nu x0 = 9.0 nu = 0.0 x_ = [] y_ = [] for i in range(10): y0, grads = f_g(x0) x_.append(x0) y_.append(y0) x0, nu = rmsprop(x0, grads, 0.9, nu, 0.99) plt.plot(x, y) plt.plot(x_, y_, color='red', marker='o'); ``` ## Adam Por fim o Adam usa ideias semelhantes ao Momentum e ao RMSProp, mantendo médias exponenciais tanto dos gradientes passados, quanto dos seus quadrados. ### Equação $ m_t = \beta_1 m_{t-1} + (1 - \beta_1) \nabla L \\ v_t = \beta_2 v_{t-1} + (1 - \beta_2) (\nabla L)^2 \\ w_{t+1} = w_t - \frac{\eta m_t}{\sqrt{v_t} \epsilon} $ ### Código ``` import jax.numpy as jnp def adam(weights, gradients, eta, mu, nu, b1, b2): mu = computa_momento(gradients, mu, b1, 1) nu = computa_momento(gradients, nu, b2, 2) updates = jax.tree_multimap( lambda m, v: m / (jnp.sqrt(v + 1e-6) + 1e-8), mu, nu) weights = jax.tree_util.tree_multimap(lambda w, g: w - eta*g, weights, updates) return weights, mu, nu x0 = 9.0 mu = 0.0 nu = 0.0 x_ = [] y_ = [] for i in range(10): y0, grads = f_g(x0) x_.append(x0) y_.append(y0) x0, mu, nu = adam(x0, grads, 0.8, mu, nu, 0.9, 0.999) plt.plot(x, y) plt.plot(x_, y_, color='red', marker='o'); ```
github_jupyter
# 07 - Serving predictions The purpose of the notebook is to show how to use the deployed model for online and batch prediction. The notebook covers the following tasks: 1. Test the `Endpoint` resource for online prediction. 2. Use the custom model uploaded as a `Model` resource for batch prediciton. 3. Run a the batch prediction pipeline using `Vertex Pipelines`. ## Setup ### Import libraries ``` import os import time from datetime import datetime import tensorflow as tf from google.cloud import aiplatform as vertex_ai ``` ### Setup Google Cloud project ``` PROJECT_ID = '[your-project-id]' # Change to your project id. REGION = 'us-central1' # Change to your region. BUCKET = '[your-bucket-name]' # Change to your bucket name. if PROJECT_ID == '' or PROJECT_ID is None or PROJECT_ID == '[your-project-id]': # Get your GCP project id from gcloud shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] if BUCKET == '' or BUCKET is None or BUCKET == '[your-bucket-name]': # Set your bucket name using your GCP project id BUCKET = PROJECT_ID # Try to create the bucket if it doesn'exists ! gsutil mb -l $REGION gs://$BUCKET print('') print('Project ID:', PROJECT_ID) print('Region:', REGION) print('Bucket name:', BUCKET) ``` ### Set configurations ``` VERSION = 'v1' DATASET_DISPLAY_NAME = 'chicago-taxi-tips' MODEL_DISPLAY_NAME = f'{DATASET_DISPLAY_NAME}-classifier-{VERSION}' ENDPOINT_DISPLAY_NAME = f'{DATASET_DISPLAY_NAME}-classifier' SERVE_BQ_DATASET_NAME = 'playground_us' # Change to your serving BigQuery dataset name. SERVE_BQ_TABLE_NAME = 'chicago_taxitrips_prep' # Change to your serving BigQuery table name. ``` ## 1. Making an online prediciton ``` vertex_ai.init( project=PROJECT_ID, location=REGION, staging_bucket=BUCKET ) endpoint_name = vertex_ai.Endpoint.list( filter=f'display_name={ENDPOINT_DISPLAY_NAME}', order_by='update_time')[-1].gca_resource.name endpoint = vertex_ai.Endpoint(endpoint_name) test_instances = [ { 'dropoff_grid': ['POINT(-87.6 41.9)'], 'euclidean': [2064.2696], 'loc_cross': [''], 'payment_type': ['Credit Card'], 'pickup_grid': ['POINT(-87.6 41.9)'], 'trip_miles': [1.37], 'trip_day': [12], 'trip_hour': [16], 'trip_month': [2], 'trip_day_of_week': [4], 'trip_seconds': [555] } ] predictions = endpoint.predict(test_instances).predictions for prediction in predictions: print(prediction) # TODO {for Khalid, get error saying model does not support explanations} explanations = endpoint.explain(test_instances).explanations for explanation in explanations: print(explanation) ``` ## 2. Make a batch prediction ``` WORKSPACE = f'gs://{BUCKET}/{DATASET_DISPLAY_NAME}/' SERVING_DATA_DIR = os.path.join(WORKSPACE, 'serving_data') SERVING_INPUT_DATA_DIR = os.path.join(SERVING_DATA_DIR, 'input_data') SERVING_OUTPUT_DATA_DIR = os.path.join(SERVING_DATA_DIR, 'output_predictions') if tf.io.gfile.exists(SERVING_DATA_DIR): print('Removing previous serving data...') tf.io.gfile.rmtree(SERVING_DATA_DIR) print('Creating serving data directory...') tf.io.gfile.mkdir(SERVING_DATA_DIR) print('Serving data directory is ready.') ``` ### Extract serving data to Cloud Storage as JSONL ``` from src.model_training import features as feature_info from src.preprocessing import etl from src.common import datasource_utils LIMIT = 10000 sql_query = datasource_utils.create_bq_source_query( dataset_display_name=DATASET_DISPLAY_NAME, missing=feature_info.MISSING_VALUES, limit=LIMIT ) print(sql_query) args = { #'runner': 'DataflowRunner', 'sql_query': sql_query, 'exported_data_prefix': os.path.join(SERVING_INPUT_DATA_DIR, 'data-'), 'temporary_dir': os.path.join(WORKSPACE, 'tmp'), 'gcs_location': os.path.join(WORKSPACE, 'bq_tmp'), 'project': PROJECT_ID, 'region': REGION, 'setup_file': './setup.py' } tf.get_logger().setLevel('ERROR') print('Data extraction started...') etl.run_extract_pipeline(args) print('Data extraction completed.') ! gsutil ls {SERVING_INPUT_DATA_DIR} ``` ### Submit the batch prediction job ``` model_name = vertex_ai.Model.list( filter=f'display_name={MODEL_DISPLAY_NAME}', order_by='update_time')[-1].gca_resource.name job_resources = { 'machine_type': 'n1-standard-2', #'accelerator_count': 1, #'accelerator_type': 'NVIDIA_TESLA_T4' 'starting_replica_count': 1, 'max_replica_coun': 10, } job_display_name = f'{MODEL_DISPLAY_NAME}-prediction-job-{datetime.now().strftime('%Y%m%d%H%M%S')}' vertex_ai.BatchPredictionJob.create( job_display_name=job_display_name, model_name=model_name, gcs_source=SERVING_INPUT_DATA_DIR + '/*.jsonl', gcs_destination_prefix=SERVING_OUTPUT_DATA_DIR, instances_format='jsonl', predictions_format='jsonl', sync=True, **job_resources, ) ``` ## 3. Run the batch prediction pipeline using `Vertex Pipelines` ``` WORKSPACE = f'{BUCKET}/{DATASET_DISPLAY_NAME}/' MLMD_SQLLITE = 'mlmd.sqllite' ARTIFACT_STORE = os.path.join(WORKSPACE, 'tfx_artifacts') PIPELINE_NAME = f'{MODEL_DISPLAY_NAME}-predict-pipeline' os.environ['PROJECT'] = PROJECT_ID os.environ['REGION'] = REGION os.environ['MODEL_DISPLAY_NAME'] = MODEL_DISPLAY_NAME os.environ['PIPELINE_NAME'] = PIPELINE_NAME os.environ['ARTIFACT_STORE_URI'] = ARTIFACT_STORE os.environ['BATCH_PREDICTION_BQ_DATASET_NAME'] = SERVE_BQ_DATASET_NAME os.environ['BATCH_PREDICTION_BQ_TABLE_NAME'] = SERVE_BQ_TABLE_NAME os.environ['SERVE_LIMIT'] = '1000' os.environ['BEAM_RUNNER'] = 'DirectRunner' os.environ['TFX_IMAGE_URI'] = f'gcr.io/{PROJECT_ID}/{DATASET_DISPLAY_NAME}:{VERSION}' import importlib from src.tfx_pipelines import config importlib.reload(config) for key, value in config.__dict__.items(): if key.isupper(): print(f'{key}: {value}') from src.tfx_pipelines import runner pipeline_definition_file = f'{config.PIPELINE_NAME}.json' pipeline_definition = runner.compile_prediction_pipeline(pipeline_definition_file) from kfp.v2.google.client import AIPlatformClient pipeline_client = AIPlatformClient( project_id=PROJECT_ID, region=REGION) pipeline_client.create_run_from_job_spec( job_spec_path=pipeline_definition_file ) ```
github_jupyter
``` # reload packages %load_ext autoreload %autoreload 2 ``` ### Choose GPU ``` %env CUDA_DEVICE_ORDER=PCI_BUS_ID %env CUDA_VISIBLE_DEVICES=0 import tensorflow as tf gpu_devices = tf.config.experimental.list_physical_devices('GPU') if len(gpu_devices)>0: tf.config.experimental.set_memory_growth(gpu_devices[0], True) print(gpu_devices) tf.keras.backend.clear_session() ``` ### Load packages ``` import tensorflow as tf import numpy as np import matplotlib.pyplot as plt from tqdm.autonotebook import tqdm from IPython import display import pandas as pd import umap import copy import os, tempfile import tensorflow_addons as tfa ``` ### parameters ``` dataset = "cifar10" labels_per_class = 1024 # 'full' n_latent_dims = 1024 confidence_threshold = 0.8 # minimum confidence to include in UMAP graph for learned metric learned_metric = True # whether to use a learned metric, or Euclidean distance between datapoints augmented = True # min_dist= 0.001 # min_dist parameter for UMAP negative_sample_rate = 5 # how many negative samples per positive sample batch_size = 128 # batch size optimizer = tf.keras.optimizers.Adam(1e-3) # the optimizer to train optimizer = tfa.optimizers.MovingAverage(optimizer) label_smoothing = 0.2 # how much label smoothing to apply to categorical crossentropy max_umap_iterations = 50 # how many times, maximum, to recompute UMAP max_epochs_per_graph = 50 # how many epochs maximum each graph trains for (without early stopping) umap_patience = 5 # how long before recomputing UMAP graph ``` #### Load dataset ``` from tfumap.semisupervised_keras import load_dataset ( X_train, X_test, X_labeled, Y_labeled, Y_masked, X_valid, Y_train, Y_test, Y_valid, Y_valid_one_hot, Y_labeled_one_hot, num_classes, dims ) = load_dataset(dataset, labels_per_class) ``` ### load architecture ``` from tfumap.semisupervised_keras import load_architecture encoder, classifier, embedder = load_architecture(dataset, n_latent_dims) ``` ### load pretrained weights ``` from tfumap.semisupervised_keras import load_pretrained_weights encoder, classifier = load_pretrained_weights(dataset, augmented, labels_per_class, encoder, classifier) ``` #### compute pretrained accuracy ``` # test current acc pretrained_predictions = classifier.predict(encoder.predict(X_test, verbose=True), verbose=True) pretrained_predictions = np.argmax(pretrained_predictions, axis=1) pretrained_acc = np.mean(pretrained_predictions == Y_test) print('pretrained acc: {}'.format(pretrained_acc)) ``` ### get a, b parameters for embeddings ``` from tfumap.semisupervised_keras import find_a_b a_param, b_param = find_a_b(min_dist=min_dist) ``` ### build network ``` from tfumap.semisupervised_keras import build_model model = build_model( batch_size=batch_size, a_param=a_param, b_param=b_param, dims=dims, encoder=encoder, classifier=classifier, negative_sample_rate=negative_sample_rate, optimizer=optimizer, label_smoothing=label_smoothing, embedder = None, ) ``` ### build labeled iterator ``` from tfumap.semisupervised_keras import build_labeled_iterator labeled_dataset = build_labeled_iterator(X_labeled, Y_labeled_one_hot, augmented, dims) ``` ### training ``` from livelossplot import PlotLossesKerasTF from tfumap.semisupervised_keras import get_edge_dataset from tfumap.semisupervised_keras import zip_datasets ``` #### callbacks ``` # early stopping callback early_stopping = tf.keras.callbacks.EarlyStopping( monitor='val_classifier_acc', min_delta=0, patience=15, verbose=0, mode='auto', baseline=None, restore_best_weights=False ) # plot losses callback groups = {'acccuracy': ['classifier_accuracy', 'val_classifier_accuracy'], 'loss': ['classifier_loss', 'val_classifier_loss']} plotlosses = PlotLossesKerasTF(groups=groups) history_list = [] current_validation_acc = 0 batches_per_epoch = np.floor(len(X_train)/batch_size).astype(int) epochs_since_last_improvement = 0 for current_umap_iterations in tqdm(np.arange(max_umap_iterations)): # make dataset edge_dataset = get_edge_dataset( model, classifier, encoder, X_train, Y_masked, batch_size, confidence_threshold, labeled_dataset, dims, learned_metric = learned_metric ) # zip dataset zipped_ds = zip_datasets(labeled_dataset, edge_dataset, batch_size) # train dataset history = model.fit( zipped_ds, epochs=max_epochs_per_graph, validation_data=( (X_valid, tf.zeros_like(X_valid), tf.zeros_like(X_valid)), {"classifier": Y_valid_one_hot}, ), callbacks = [early_stopping, plotlosses], max_queue_size = 100, steps_per_epoch = batches_per_epoch, #verbose=0 ) history_list.append(history) # get validation acc pred_valid = classifier.predict(encoder.predict(X_valid)) new_validation_acc = np.mean(np.argmax(pred_valid, axis = 1) == Y_valid) # if validation accuracy has gone up, mark the improvement if new_validation_acc > current_validation_acc: epochs_since_last_improvement = 0 current_validation_acc = copy.deepcopy(new_validation_acc) else: epochs_since_last_improvement += 1 if epochs_since_last_improvement > umap_patience: print('No improvement in {} UMAP iterators'.format(umap_patience)) break # umap loss 0.273 class_pred = classifier.predict(encoder.predict(X_test)) class_acc = np.mean(np.argmax(class_pred, axis=1) == Y_test) print(class_acc) ```
github_jupyter
# MNIST With SET This is an example of training an SET network on the MNIST dataset using synapses, pytorch, and torchvision. ``` #Import torch libraries and get SETLayer from synapses import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets, transforms from synapses import SETLayer #Some extras for visualizations import numpy as np import matplotlib.pyplot as plt import seaborn as sns from IPython.display import clear_output print("done") ``` ## SET Layer The SET layer is a pytorch module that works with a similar API to a standard fully connected layer; to initialize, specify input and output dimensions.<br><br> NOTE: one condition mentioned in the paper is that epsilon (a hyperparameter controlling layer sparsity) be much less than the input dimension and much less than the output dimension. The default value of epsilon is 11. Keep dimensions much bigger than epsilon! (epsilon can be passed in as an init argument to the layer). ``` #initialize the layer sprs = SETLayer(128, 256) #We can see the layer transforms inputs as we expect inp = torch.randn((2, 128)) print('Input batch shape: ', tuple(inp.shape)) out = sprs(inp) print('Output batch shape: ', tuple(out.shape)) ``` In terms of behavior, the SETLayer transforms an input vector into the output space as would a fcl. ## Initial Connection Distribution The intialized layer has randomly assigned connections between input nodes and output nodes; each connection is associated with a weight, drawn from a normal distribution. ``` #Inspect init weight distribution plt.hist(np.array(sprs.weight.data), bins=40) plt.title('Weights distribution on initialization') plt.xlabel('Weight Value') plt.ylabel('Number of weights') plt.show() vec = sprs.connections[:, 0] vec = np.array(vec) values, counts = np.unique(vec, return_counts=True) plt.title('Connections to inputs') plt.bar(values, counts) plt.xlabel('Input vector index') plt.ylabel('Number of connections') plt.show() print("done") ``` The weights are sampled from a normal distribution, as is done with a standard fcl. The connections to the inputs are uniformly distributed.<br><br> ## Killing Connections When connections are reassigned in SET, some proportion (defined by hyperparameter zeta) of the weights closest to zero are removed. We can set these to zero using the zero_connections method on the layer. (This method leaves the connections unchanged.) ``` sprs.zero_connections() #Inspect init weight distribution plt.hist(np.array(sprs.weight.data), bins=40) plt.title('Weights distribution after zeroing connections') plt.xlabel('Weight Value') plt.ylabel('Number of weights') plt.show() print("done") ``` ## Evolving Connections The evolve_connections() method will reassign these weights to new connections between input and output nodes. By default, these weights are initialized by sampling from the same distribution as the init function. Optionally, these weights can be set at zero (with init=False argument). ``` sprs.evolve_connections() plt.hist(np.array(sprs.weight.data), bins=40) plt.title('Weights distribution after evolving connections') plt.show() plt.title('Connections to inputs') plt.bar(values, counts) plt.xlabel('Input vector index') plt.ylabel('Number of connections') plt.show() print("done") ``` We can see these weight values have been re-distributed; the new connections conform to the same uniform distribution as before. (We see in the SET paper, and here later on, that the adaptive algorithm learns to allocate these connections to more important input values.) ## A Simple SET Model The following is a simple sparsely-connected model using SETLayers with default hyperparameters. ``` class SparseNet(nn.Module): def __init__(self): super(SparseNet, self).__init__() self.set_layers = [] self.set1 = SETLayer(784, 512) self.set_layers.append(self.set1) #self.set2 = SETLayer(512, 512) #self.set_layers.append(self.set2) self.set2 = SETLayer(512, 128) self.set_layers.append(self.set2) #Use a dense layer for output because of low output dimensionality self.fc1 = nn.Linear(128, 10) def zero_connections(self): """Sets connections to zero for inferences.""" for layer in self.set_layers: layer.zero_connections() def evolve_connections(self): """Evolves connections.""" for layer in self.set_layers: layer.evolve_connections() def forward(self, x): x = x.reshape(-1, 784) x = F.relu(self.set1(x)) x = F.relu(self.set2(x)) #x = F.relu(self.set3(x)) x = self.fc1(x) return F.log_softmax(x, dim=1) def count_params(model): prms = 0 for parameter in model.parameters(): n_params = 1 for prm in parameter.shape: n_params *= prm prms += n_params return prms device = "cpu" sparse_net = SparseNet().to(device) print('number of params: ', count_params(sparse_net)) ``` Consider a fully-connected model with the same architecture: It would contain more than 20 times the number of parameters!<br> ## Training on MNIST This code was adapted directly from the [pytorch mnist tutorial](https://github.com/pytorch/examples/blob/master/mnist/main.py). ``` class History(object): """Tracks and plots training history""" def __init__(self): self.train_loss = [] self.val_loss = [] self.train_acc = [] self.val_acc = [] def plot(self): clear_output() plt.plot(self.train_loss, label='train loss') plt.plot(self.train_acc, label='train acc') plt.plot(self.val_loss, label='val loss') plt.plot(self.val_acc, label='val acc') plt.legend() plt.show() def train(log_interval, model, device, train_loader, optimizer, epoch, history): model.train() correct = 0 loss_ = [] for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability correct += pred.eq(target.view_as(pred)).sum().item() loss = F.nll_loss(output, target) loss.backward() loss_.append(loss.item()) optimizer.step() if batch_idx % log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item())) history.train_loss.append(np.array(loss_).mean()) history.train_acc.append(correct/len(train_loader.dataset)) return history def test(model, device, test_loader, history): model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: data, target = data.to(device), target.to(device) output = model(data) test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability correct += pred.eq(target.view_as(pred)).sum().item() acc = correct / len(test_loader.dataset) test_loss /= len(test_loader.dataset) print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format( test_loss, correct, len(test_loader.dataset), 100. * acc)) history.val_loss.append(test_loss) history.val_acc.append(acc) return history print("done") torch.manual_seed(0) #Optimizer settings lr = .01 momentum = .5 epochs = 50 batch_size=128 log_interval = 64 test_batch_size=128 train_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=test_batch_size, shuffle=True) print("done") ``` ## Dealing with Optimizer Buffers Synapses recycles parameters. When connections are broken and reassigned, its parameter gets set to zero.<br><br> This system is designed to be computationally efficient, but it comes with a nasty side-effect. Often, we use optimizers with some sort of buffer; the simplest example is momentum in SGD. When we reset a parameter, the information about the overwritten parameter in the optimizer buffer is not useful. We need to overwrite specific values in the buffer also. To do this in pytorch, we need to pass the optimizer to each SETLayer to let synapses do this for us. <br><br> <b>Notice: I'm still working out the best way to initialize adaptive optimizers (current version makes a naive attempt to pick good values); SGD with momentum works fine</b> ``` optimizer = optim.SGD(sparse_net.parameters(), lr=lr, momentum=momentum, weight_decay=1e-2) for layer in sparse_net.set_layers: #here we tell our set layers about layer.optimizer = optimizer #This guy will keep track of optimization metrics. set_history = History() print("done") def show_MNIST_connections(model): vec = model.set1.connections[:, 0] vec = np.array(vec) _, counts = np.unique(vec, return_counts=True) t = counts.reshape(28, 28) sns.heatmap(t, cmap='viridis', xticklabels=[], yticklabels=[], square=True); plt.title('Connections per input pixel'); plt.show(); v = [t[13-i:15+i,13-i:15+i].mean() for i in range(14)] plt.plot(v) plt.show() print("done") import time epochs = 1000 for epoch in range(1, epochs + 1): #In the paper, evolutions occur on each epoch if epoch != 1: set_history.plot() show_MNIST_connections(sparse_net) if epoch != 1: print('Train set: Average loss: {:.4f}, Accuracy: {:.2f}%'.format( set_history.train_loss[epoch-2], 100. * set_history.train_acc[epoch-2])) print('Test set: Average loss: {:.4f}, Accuracy: {:.2f}%'.format( set_history.val_loss[epoch-2], 100. * set_history.val_acc[epoch-2])) sparse_net.evolve_connections() show_MNIST_connections(sparse_net) set_history = train(log_interval, sparse_net, device, train_loader, optimizer, epoch, set_history) #And smallest connections are removed during inference. sparse_net.zero_connections() set_history = test(sparse_net, device, test_loader, set_history) time.sleep(10) ```
github_jupyter
# Regression Errors Let's talk about errors in regression problems. Typically, in regression, we have a variable $y$ for which we want to learn a model to predict. The prediction from the model is usually denoted as $\hat{y}$. The error $e$ is thus defined as follows - $e = y - \hat{y}$ Since we have many pairs of the truth, $y$ and $\hat{y}$, we want to average over the differences. I will denote this error as the Mean Error `ME`. - $\mathrm{ME} = \frac{1}{n} \sum{y - \hat{y}}$ The problem with ME is that averaging over the differences may result in something close to zero. The reason is because the positive and negative differences will have a cancelling effect. No one really computes the error of a regression model in this way. A better way is to consider the Mean Absolute Error `MAE`, where we take the average of the absolute differences. - $\mathrm{MAE} = \frac{1}{n} \sum |y - \hat{y}|$ In MAE, since there are only positive differences resulting from $|y - \hat{y}|$, we avoid the cancelling effect of positive and negative values when averaging. Many times, data scientists want to punish models that predict values further from the truth. In that case, the Root Mean Squared Error `RMSE` is used. - $\mathrm{RMSE} = \sqrt{\frac{1}{n} \sum (y - \hat{y})^2}$ In RMSE, we do not take the difference as in ME or the absolute difference as in MAE, rather, we square the difference. The idea is that when a model's prediction is off from the truth, we should exaggerate the consequences as it reflects the reality that being further away from the truth is orders of magnitude worse. However, the squaring of the difference results in something that is no longer in the unit of $y$, as such, we take the square root to bring the scalar value back into unit with $y$. For all these measures of performance, the closer the value is to zero, the better. Let's look at the following made-up example where a hypothetical model has made some prediction $\hat{y}$ or `y_pred` and for each of these prediction, we have the ground truth $y$ or `y_true`. ``` import pandas as pd df = pd.DataFrame({ 'y_true': [10, 8, 7, 9, 4], 'y_pred': [11, 7, 6, 15, 1] }) df = pd.DataFrame({ 'y_true': [10, 8, 7, 9, 4], 'y_pred': [11, 7, 5, 11, 1] }) df ``` We will now compute the error `E`, absolute error `AE` and squared errors `SE` for each pair. ``` import numpy as np df['E'] = df.y_true - df.y_pred df['AE'] = np.abs(df.y_true - df.y_pred) df['SE'] = np.power(df.y_true - df.y_pred, 2.0) df ``` From E, AE and SE, we can compute the average or mean errors, ME, MAE, RMSE, respectively, as follows. ``` errors = df[['E', 'AE', 'SE']].mean() errors.se = np.sqrt(errors.SE) errors.index = ['ME', 'MAE', 'RMSE'] errors ``` As you can see, these judgement of errors are saying different things and might lead you to draw contradictory and/or conflicting conclusions. We know ME is defective, and so we will ignore interpreting ME. MAE says we can expect to be `2.4` off from the truth while RMSE says we can expect to be `9.6` off from the truth. The values `2.4` and `9.6` are very different; while `2.4` may seem to be tolerably `good`, on the other hand, `9.6` seems `bad`. One thing we can try to do is to `normalize` these values. Let's just look at RMSE. Here are some ways we can normalize RMSE. - using the `mean` of y, denoted as $\bar{y}$ - using the `standard deviation` of y, denoted as $\sigma_y$ - using the range of y, denoted as $y_{\mathrm{max}} - y_{\mathrm{min}}$ - using the interquartile range of y, denoted as $Q_y^1 - Q_y^3$ The code to compute these is as follows. - $\bar{y}$ is `me_y` - $\sigma_y$ is `sd_y` - $y_{\mathrm{max}} - y_{\mathrm{min}}$ is `ra_y` - $Q_y^1 - Q_y^3$ is `iq_y` Since these are used to divide RMSE, let's group them under a series as `denominators`. ``` from scipy.stats import iqr me_y = df.y_true.mean() sd_y = df.y_true.std() ra_y = df.y_true.max() - df.y_true.min() iq_y = iqr(df.y_true) denominators = pd.Series([me_y, sd_y, ra_y, iq_y], index=['me_y', 'sd_y', 'ra_y', 'iq_y']) denominators ``` Here's the results of normalizing RMSE with the mean `me`, standard deviation `sd`, range `ra` and interquartile range `iq`. ``` pd.DataFrame([{ r'$\mathrm{RMSE}_{\mathrm{me}}$': errors.RMSE / denominators.me_y, r'$\mathrm{RMSE}_{\mathrm{sd}}$': errors.RMSE / denominators.sd_y, r'$\mathrm{RMSE}_{\mathrm{ra}}$': errors.RMSE / denominators.ra_y, r'$\mathrm{RMSE}_{\mathrm{iq}}$': errors.RMSE / denominators.iq_y, }]).T.rename(columns={0: 'values'}) ``` That we have normalized RMSE, we can make a little bit better interpretation. - $\mathrm{RMSE}_{\mathrm{me}}$ is saying we can expect to be 126% away from the truth. - $\mathrm{RMSE}_{\mathrm{sd}}$ is saying we can expect to be over 4.2 standard deviation from the truth. - $\mathrm{RMSE}_{\mathrm{ra}}$ is saying we can expect to be 1.6 - $\mathrm{RMSE}_{\mathrm{iq}}$
github_jupyter
# UCI Dodgers dataset ``` import pandas as pd import numpy as np import os from pathlib import Path from config import data_raw_folder, data_processed_folder from timeeval import Datasets import matplotlib.pyplot as plt %matplotlib inline plt.rcParams['figure.figsize'] = (20, 10) dataset_collection_name = "Dodgers" source_folder = Path(data_raw_folder) / "UCI ML Repository/Dodgers" target_folder = Path(data_processed_folder) print(f"Looking for source datasets in {source_folder.absolute()} and\nsaving processed datasets in {target_folder.absolute()}") dataset_name = "101-freeway-traffic" train_type = "unsupervised" train_is_normal = False input_type = "univariate" datetime_index = True dataset_type = "real" # create target directory dataset_subfolder = Path(input_type) / dataset_collection_name target_subfolder = target_folder / dataset_subfolder try: os.makedirs(target_subfolder) print(f"Created directories {target_subfolder}") except FileExistsError: print(f"Directories {target_subfolder} already exist") pass dm = Datasets(target_folder) data_file = source_folder / "Dodgers.data" events_file = source_folder / "Dodgers.events" # transform data df = pd.read_csv(data_file, header=None, encoding="latin1", parse_dates=[0], infer_datetime_format=True) df.columns = ["timestamp", "count"] #df["count"] = df["count"].replace(-1, np.nan) # read and add labels df_events = pd.read_csv(events_file, header=None, encoding="latin1") df_events.columns = ["date", "begin", "end", "game attendance" ,"away team", "game score"] df_events.insert(0, "begin_timestamp", pd.to_datetime(df_events["date"] + " " + df_events["begin"])) df_events.insert(1, "end_timestamp", pd.to_datetime(df_events["date"] + " " + df_events["end"])) df_events = df_events.drop(columns=["date", "begin", "end", "game attendance" ,"away team", "game score"]) # labelling df["is_anomaly"] = 0 for _, (t1, t2) in df_events.iterrows(): tmp = df[df["timestamp"] >= t1] tmp = tmp[tmp["timestamp"] <= t2] df.loc[tmp.index, "is_anomaly"] = 1 # mark missing values as anomaly as well df.loc[df["count"] == -1, "is_anomaly"] = 1 filename = f"{dataset_name}.test.csv" path = os.path.join(dataset_subfolder, filename) target_filepath = os.path.join(target_subfolder, filename) dataset_length = len(df) df.to_csv(target_filepath, index=False) print(f"Processed dataset {dataset_name} -> {target_filepath}") # save metadata dm.add_dataset((dataset_collection_name, dataset_name), train_path = None, test_path = path, dataset_type = dataset_type, datetime_index = datetime_index, split_at = None, train_type = train_type, train_is_normal = train_is_normal, input_type = input_type, dataset_length = dataset_length ) dm.save() dm.refresh() dm.df().loc[(slice(dataset_collection_name,dataset_collection_name), slice(None))] ``` ## Experimentation ``` data_file = source_folder / "Dodgers.data" df = pd.read_csv(data_file, header=None, encoding="latin1", parse_dates=[0], infer_datetime_format=True) df.columns = ["timestamp", "count"] #df["count"] = df["count"].replace(-1, np.nan) df events_file = source_folder / "Dodgers.events" df_events = pd.read_csv(events_file, header=None, encoding="latin1") df_events.columns = ["date", "begin", "end", "game attendance" ,"away team", "game score"] df_events.insert(0, "begin_timestamp", pd.to_datetime(df_events["date"] + " " + df_events["begin"])) df_events.insert(1, "end_timestamp", pd.to_datetime(df_events["date"] + " " + df_events["end"])) df_events = df_events.drop(columns=["date", "begin", "end", "game attendance" ,"away team", "game score"]) df_events # labelling df["is_anomaly"] = 0 for _, (t1, t2) in df_events.iterrows(): tmp = df[df["timestamp"] >= t1] tmp = tmp[tmp["timestamp"] <= t2] df.loc[tmp.index, "is_anomaly"] = 1 df.loc[df["count"] == -1, "is_anomaly"] = 1 df.iloc[15000:20000].plot(x="timestamp", y=["count", "is_anomaly"]) ```
github_jupyter
# Train a ready to use TensorFlow model with a simple pipeline ``` import os import sys import warnings warnings.filterwarnings("ignore") import numpy as np import matplotlib.pyplot as plt # the following line is not required if BatchFlow is installed as a python package. sys.path.append("../..") from batchflow import Pipeline, B, C, D, F, V from batchflow.opensets import MNIST, CIFAR10, CIFAR100 from batchflow.models.tf import ResNet18 ``` BATCH_SIZE might be increased for modern GPUs with lots of memory (4GB and higher). ``` BATCH_SIZE = 64 ``` # Create a dataset [MNIST](http://yann.lecun.com/exdb/mnist/) is a dataset of handwritten digits frequently used as a baseline for machine learning tasks. Downloading MNIST database might take a few minutes to complete. ``` dataset = MNIST(bar=True) ``` There are also predefined CIFAR10 and CIFAR100 datasets. # Define a pipeline config Config allows to create flexible pipelines which take parameters. For instance, if you put a model type into config, you can run a pipeline against different models. See [a list of available models](https://analysiscenter.github.io/batchflow/intro/tf_models.html#ready-to-use-models) to choose the one which fits you best. ``` config = dict(model=ResNet18) ``` # Create a template pipeline A template pipeline is not linked to any dataset. It's just an abstract sequence of actions, so it cannot be executed, but it serves as a convenient building block. ``` train_template = (Pipeline() .init_variable('loss_history', []) .init_model('conv_nn', C('model'), 'dynamic', config={'inputs/images/shape': B.image_shape, 'inputs/labels/classes': D.num_classes, 'initial_block/inputs': 'images'}) .to_array() .train_model('conv_nn', fetches='loss', images=B.images, labels=B.labels, save_to=V('loss_history', mode='a')) ) ``` # Train the model Apply a dataset and a config to a template pipeline to create a runnable pipeline: ``` train_pipeline = (train_template << dataset.train) << config ``` Run the pipeline (it might take from a few minutes to a few hours depending on your hardware) ``` train_pipeline.run(BATCH_SIZE, shuffle=True, n_epochs=1, drop_last=True, bar=True, prefetch=1) ``` Note that the progress bar often increments by 2 at a time - that's prefetch in action. It does not give much here, though, since almost all time is spent in model training which is performed under a thread-lock one batch after another without any parallelism (otherwise the model would not learn anything as different batches would rewrite one another's model weights updates). ``` plt.figure(figsize=(15, 5)) plt.plot(train_pipeline.v('loss_history')) plt.xlabel("Iterations"), plt.ylabel("Loss") plt.show() ``` # Test the model It is much faster than training, but if you don't have GPU it would take some patience. ``` test_pipeline = (dataset.test.p .import_model('conv_nn', train_pipeline) .init_variable('predictions') .init_variable('metrics') .to_array() .predict_model('conv_nn', fetches='predictions', images=B.images, save_to=V('predictions')) .gather_metrics('class', targets=B.labels, predictions=V('predictions'), fmt='logits', axis=-1, save_to=V('metrics', mode='a')) .run(BATCH_SIZE, shuffle=True, n_epochs=1, drop_last=False, bar=True) ) ``` Let's get the accumulated [metrics information](https://analysiscenter.github.io/batchflow/intro/models.html#model-metrics) ``` metrics = test_pipeline.get_variable('metrics') ``` Or a shorter version: `metrics = test_pipeline.v('metrics')` Now we can easiliy calculate any metrics we need ``` metrics.evaluate('accuracy') metrics.evaluate(['false_positive_rate', 'false_negative_rate'], multiclass=None) ``` # Save the model After learning the model, you may need to save it. It's easy to do this. ``` train_pipeline.save_model_now('conv_nn', path='path/to/save') ``` ## What's next? See [the image augmentation tutorial](./06_image_augmentation.ipynb) or return to the [table of contents](./00_description.ipynb).
github_jupyter
``` %pip install bs4 %pip install lxml %pip install nltk %pip install textblob import urllib.request as ur from bs4 import BeautifulSoup ``` ## STEP 1: Read data from HTML and parse it to clean string ``` #We would extract the abstract from this HTML page article articleURL = "https://www.washingtonpost.com/news/the-switch/wp/2016/10/18/the-pentagons-massive-new-telescope-is-designed-to-track-space-junk-and-watch-out-for-killer-asteroids/" #HTML contains extra tags in a tree like structure page = ur.urlopen(articleURL).read().decode('utf8','ignore') soup = BeautifulSoup(page,"lxml") soup #We want the article or base text only soup.find('article') #Remove the article tags and get the plain text soup.find('article').text #Take all the articles from the page using find_all and combine together into a single string with a " " text = ' '.join(map(lambda p: p.text, soup.find_all('article'))) text #The encode() method encodes the string using the specified encoding. Convert back the encoded version to string by using decode() #Replace special encoded characters with a '?', further replace question mark with a blank char to get plain text from encoded article text. text.encode('ascii', errors='replace').decode('utf8').replace("?"," ") #All above steps encapsulated- to read and parse data from HTMl text import urllib.request as ur from bs4 import BeautifulSoup def getTextWaPo(url): page = ur.urlopen(url).read().decode('utf8') soup = BeautifulSoup(page,"lxml") text = ' '.join(map(lambda p: p.text, soup.find_all('article'))) return text.encode('ascii', errors='replace').decode('utf8').replace("?"," ") #calling function articleURL= "https://www.washingtonpost.com/news/the-switch/wp/2016/10/18/the-pentagons-massive-new-telescope-is-designed-to-track-space-junk-and-watch-out-for-killer-asteroids/" text = getTextWaPo(articleURL) text ``` ## STEP 2: Extract summary ``` import nltk from nltk.tokenize import sent_tokenize,word_tokenize from nltk.corpus import stopwords from string import punctuation #Strip all se4ntences in the text # A sentence is identified by a period or full stop. A space has to be accompanied by the full-stop else both sentences would be treated as a single sentence nltk.download('punkt') sents = sent_tokenize(text) sents #Strip all words/tokens in the text word_sent = word_tokenize(text.lower()) word_sent #Get all english stop words and punctuation marks nltk.download('stopwords') _stopwords = set(stopwords.words('english') + list(punctuation)) _stopwords #Filter stop words from our list of words in text word_sent=[word for word in word_sent if word not in _stopwords] word_sent #Use build in function to determine the frequency or the number of times each word occurs in the text #The higher the frequency, more is the importance of word from nltk.probability import FreqDist freq = FreqDist(word_sent) freq #The nlargest () function of the Python module heapq returns the specified number of largest elements from a Python iterable like a list, tuple and others. #heapq.nlargest(n, iterable, key=sorting_key, here used the dict.get function to get the value(frequency for a word) from key:value pair) from heapq import nlargest nlargest(10, freq, key=freq.get) #To check if these most important words match with the central theme of the article 'Space asteroid attack' #Now that we have the Word importance, we can calculate the significance score for each sentence #Word_Imp=Frequency of word in corpus #Sentence_Significance_score=SUM(Word_Imp for Words in the sentence) from collections import defaultdict ranking = defaultdict(int) for i,sent in enumerate(sents): for w in word_tokenize(sent.lower()): if w in freq: ranking[i] += freq[w] ranking #{Index of sentence : Sentence significance score} #Top most important 4 sentences - having maximum sentence significance score sents_idx = nlargest(4, ranking, key=ranking.get) sents_idx #Get the sentences from the top indices summary_1=[sents[j] for j in sorted(sents_idx)] summary_1 #Concat most important sentences to form the summary summary="" for i in range(len(summary_1)): summary=summary + summary_1[i] summary def summarize(text, n): sents = sent_tokenize(text) assert n <= len(sents) #Check if the sentences list have atleast n sentences word_sent = word_tokenize(text.lower()) _stopwords = set(stopwords.words('english') + list(punctuation)) word_sent=[word for word in word_sent if word not in _stopwords] freq = FreqDist(word_sent) ranking = defaultdict(int) for i,sent in enumerate(sents): for w in word_tokenize(sent.lower()): if w in freq: ranking[i] += freq[w] sents_idx = nlargest(n, ranking, key=ranking.get) summary_1= [sents[j] for j in sorted(sents_idx)] summary="" for i in range(len(summary_1)): summary=summary + summary_1[i] return summary #calling summarize(text,4) ```
github_jupyter
# Just-in-time Compilation with [Numba](http://numba.pydata.org/) ## Numba is a JIT compiler which translates Python code in native machine language * Using special decorators on Python functions Numba compiles them on the fly to machine code using LLVM * Numba is compatible with Numpy arrays which are the basis of many scientific packages in Python * It enables parallelization of machine code so that all the CPU cores are used ``` import math import numpy as np import matplotlib.pyplot as plt import numba ``` ## Using `numba.jit` Numba offers `jit` which can used to decorate Python functions. ``` def is_prime(n): if n <= 1: raise ArithmeticError('"%s" <= 1' % n) if n == 2 or n == 3: return True elif n % 2 == 0: return False else: n_sqrt = math.ceil(math.sqrt(n)) for i in range(3, n_sqrt): if n % i == 0: return False return True n = np.random.randint(2, 10000000, dtype=np.int64) # Get a random integer between 2 and 10000000 print(n, is_prime(n)) #is_prime(1) @numba.jit def is_prime_jitted(n): if n <= 1: raise ArithmeticError('"%s" <= 1' % n) if n == 2 or n == 3: return True elif n % 2 == 0: return False else: n_sqrt = math.ceil(math.sqrt(n)) for i in range(3, n_sqrt): if n % i == 0: return False return True numbers = np.random.randint(2, 100000, dtype=np.int64, size=10000) %time p1 = [is_prime(n) for n in numbers] %time p2 = [is_prime_jitted(n) for n in numbers] ``` ## Using `numba.jit` with `nopython=True` ``` @numba.jit(nopython=True) def is_prime_njitted(n): if n <= 1: raise ArithmeticError('"%s" <= 1' % n) if n == 2 or n == 3: return True elif n % 2 == 0: return False else: n_sqrt = math.ceil(math.sqrt(n)) for i in range(3, n_sqrt): if n % i == 0: return False return True numbers = np.random.randint(2, 100000, dtype=np.int64, size=1000) %time p1 = [is_prime_jitted(n) for n in numbers] %time p2 = [is_prime_njitted(n) for n in numbers] ``` ## Using ` @numba.jit(nopython=True)` is equivalent to using ` @numba.njit` ``` @numba.njit def is_prime_njitted(n): if n <= 1: raise ArithmeticError('n <= 1') if n == 2 or n == 3: return True elif n % 2 == 0: return False else: n_sqrt = math.ceil(math.sqrt(n)) for i in range(3, n_sqrt): if n % i == 0: return False return True numbers = np.random.randint(2, 100000, dtype=np.int64, size=1000) %time p = [is_prime_jitted(n) for n in numbers] %time p = [is_prime_njitted(n) for n in numbers] ``` ## Use `cache=True` to cache the compiled function ``` import math from numba import njit @njit(cache=True) def is_prime_njitted_cached(n): if n <= 1: raise ArithmeticError('n <= 1') if n == 2 or n == 3: return True elif n % 2 == 0: return False else: n_sqrt = math.ceil(math.sqrt(n)) for i in range(3, n_sqrt): if n % i == 0: return False return True numbers = np.random.randint(2, 100000, dtype=np.int64, size=1000) %time p = [is_prime_njitted(n) for n in numbers] %time p = [is_prime_njitted_cached(n) for n in numbers] ``` ## Vector Triad Benchmark Python vs Numpy vs Numba ``` from timeit import default_timer as timer def vecTriad(a, b, c, d): for j in range(a.shape[0]): a[j] = b[j] + c[j] * d[j] def vecTriadNumpy(a, b, c, d): a[:] = b + c * d @numba.njit() def vecTriadNumba(a, b, c, d): for j in range(a.shape[0]): a[j] = b[j] + c[j] * d[j] # Initialize Vectors n = 10000 # Vector size r = 100 # Iterations a = np.zeros(n, dtype=np.float64) b = np.empty_like(a) b[:] = 1.0 c = np.empty_like(a) c[:] = 1.0 d = np.empty_like(a) d[:] = 1.0 # Python version start = timer() for i in range(r): vecTriad(a, b, c, d) end = timer() mflops = 2.0 * r * n / ((end - start) * 1.0e6) print(f'Python: Mflops/sec: {mflops}') # Numpy version start = timer() for i in range(r): vecTriadNumpy(a, b, c, d) end = timer() mflops = 2.0 * r * n / ((end - start) * 1.0e6) print(f'Numpy: Mflops/sec: {mflops}') # Numba version vecTriadNumba(a, b, c, d) # Run once to avoid measuring the compilation overhead start = timer() for i in range(r): vecTriadNumba(a, b, c, d) end = timer() mflops = 2.0 * r * n / ((end - start) * 1.0e6) print(f'Numba: Mflops/sec: {mflops}') ``` ## Eager compilation using function signatures ``` import math from numba import njit @njit(['boolean(int64)', 'boolean(int32)']) def is_prime_njitted_eager(n): if n <= 1: raise ArithmeticError('n <= 1') if n == 2 or n == 3: return True elif n % 2 == 0: return False else: n_sqrt = math.ceil(math.sqrt(n)) for i in range(3, n_sqrt): if n % i == 0: return False return True numbers = np.random.randint(2, 1000000, dtype=np.int64, size=1000) # Run twice aft %time p1 = [is_prime_njitted_eager(n) for n in numbers] %time p2 = [is_prime_njitted_eager(n) for n in numbers] p1 = [is_prime_njitted_eager(n) for n in numbers.astype(np.int32)] #p2 = [is_prime_njitted_eager(n) for n in numbers.astype(np.float64)] ``` ## Calculating and plotting the [Mandelbrot set](https://en.wikipedia.org/wiki/Mandelbrot_set) ``` X, Y = np.meshgrid(np.linspace(-2.0, 1, 1000), np.linspace(-1.0, 1.0, 1000)) def mandelbrot(X, Y, itermax): mandel = np.empty(shape=X.shape, dtype=np.int32) for i in range(X.shape[0]): for j in range(X.shape[1]): it = 0 cx = X[i, j] cy = Y[i, j] x = 0.0 y = 0.0 while x * x + y * y < 4.0 and it < itermax: x, y = x * x - y * y + cx, 2.0 * x * y + cy it += 1 mandel[i, j] = it return mandel fig = plt.figure(figsize=(15, 10)) ax = fig.add_subplot(111) %time m = mandelbrot(X, Y, 100) ax.imshow(np.log(1 + m), extent=[-2.0, 1, -1.0, 1.0]); ax.set_aspect('equal') ax.set_ylabel('Im[c]') ax.set_xlabel('Re[c]'); @numba.njit(parallel=True) def mandelbrot_jitted(X, Y, radius2, itermax): mandel = np.empty(shape=X.shape, dtype=np.int32) for i in numba.prange(X.shape[0]): for j in range(X.shape[1]): it = 0 cx = X[i, j] cy = Y[i, j] x = cx y = cy while x * x + y * y < 4.0 and it < itermax: x, y = x * x - y * y + cx, 2.0 * x * y + cy it += 1 mandel[i, j] = it return mandel fig = plt.figure(figsize=(15, 10)) ax = fig.add_subplot(111) %time m = mandelbrot_jitted(X, Y, 4.0, 100) ax.imshow(np.log(1 + m), extent=[-2.0, 1, -1.0, 1.0]); ax.set_aspect('equal') ax.set_ylabel('Im[c]') ax.set_xlabel('Re[c]'); ``` ### Getting parallelization information ``` mandelbrot_jitted.parallel_diagnostics(level=3) ``` ## Creating `ufuncs` using `numba.vectorize` ``` from math import sin from numba import float64, int64 def my_numpy_sin(a, b): return np.sin(a) + np.sin(b) @np.vectorize def my_sin(a, b): return sin(a) + sin(b) @numba.vectorize([float64(float64, float64), int64(int64, int64)], target='parallel') def my_sin_numba(a, b): return np.sin(a) + np.sin(b) x = np.random.randint(0, 100, size=9000000) y = np.random.randint(0, 100, size=9000000) %time _ = my_numpy_sin(x, y) %time _ = my_sin(x, y) %time _ = my_sin_numba(x, y) ``` ### Vectorize the testing of prime numbers ``` @numba.vectorize('boolean(int64)') def is_prime_v(n): if n <= 1: raise ArithmeticError(f'"0" <= 1') if n == 2 or n == 3: return True elif n % 2 == 0: return False else: n_sqrt = math.ceil(math.sqrt(n)) for i in range(3, n_sqrt): if n % i == 0: return False return True numbers = np.random.randint(2, 10000000000, dtype=np.int64, size=100000) %time p = is_prime_v(numbers) ``` ### Parallelize the vectorized function ``` @numba.vectorize(['boolean(int64)', 'boolean(int32)'], target='parallel') def is_prime_vp(n): if n <= 1: raise ArithmeticError('n <= 1') if n == 2 or n == 3: return True elif n % 2 == 0: return False else: n_sqrt = math.ceil(math.sqrt(n)) for i in range(3, n_sqrt): if n % i == 0: return False return True numbers = np.random.randint(2, 10000000000, dtype=np.int64, size=100000) %time p1 = is_prime_v(numbers) %time p2 = is_prime_vp(numbers) # Print the largest primes from to 1 and 10 millions numbers = np.arange(1000000, 10000001, dtype=np.int32) %time p1 = is_prime_vp(numbers) primes = numbers[p1] for n in primes[-10:]: print(n) ```
github_jupyter
``` import sys sys.path.append('../input/shopee-competition-utils') sys.path.insert(0,'../input/pytorch-image-models') import numpy as np import pandas as pd import torch from torch import nn from torch.nn import Parameter from torch.nn import functional as F from torch.utils.data import Dataset, DataLoader import albumentations from albumentations.pytorch.transforms import ToTensorV2 from custom_scheduler import ShopeeScheduler from custom_activation import replace_activations, Mish from custom_optimizer import Ranger import math import cv2 import timm import os import random import gc from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import GroupKFold from sklearn.neighbors import NearestNeighbors from tqdm.notebook import tqdm class CFG: DATA_DIR = '../input/shopee-product-matching/train_images' TRAIN_CSV = '../input/shopee-product-matching/train.csv' # data augmentation IMG_SIZE = 512 MEAN = [0.485, 0.456, 0.406] STD = [0.229, 0.224, 0.225] SEED = 2021 # data split N_SPLITS = 5 TEST_FOLD = 0 VALID_FOLD = 1 EPOCHS = 8 BATCH_SIZE = 8 NUM_WORKERS = 4 DEVICE = 'cuda:0' CLASSES = 6609 SCALE = 30 MARGINS = [0.5,0.6,0.7,0.8,0.9] MARGIN = 0.5 BEST_THRESHOLD = 0.19 BEST_THRESHOLD_MIN2 = 0.225 MODEL_NAME = 'resnet50' MODEL_NAMES = ['resnet50','resnext50_32x4d','densenet121','efficientnet_b3','eca_nfnet_l0'] LOSS_MODULE = 'arc' LOSS_MODULES = ['arc','curricular'] USE_ARCFACE = True MODEL_PATH = f'{MODEL_NAME}_{LOSS_MODULE}_face_epoch_8_bs_8_margin_{MARGIN}.pt' FC_DIM = 512 SCHEDULER_PARAMS = { "lr_start": 1e-5, "lr_max": 1e-5 * 32, "lr_min": 1e-6, "lr_ramp_ep": 5, "lr_sus_ep": 0, "lr_decay": 0.8, } def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = True # set True to be faster seed_everything(CFG.SEED) def get_test_transforms(): return albumentations.Compose( [ albumentations.Resize(CFG.IMG_SIZE,CFG.IMG_SIZE,always_apply=True), albumentations.Normalize(mean=CFG.MEAN, std=CFG.STD), ToTensorV2(p=1.0) ] ) class ShopeeImageDataset(torch.utils.data.Dataset): """for validating and test """ def __init__(self,df, transform = None): self.df = df self.root_dir = CFG.DATA_DIR self.transform = transform def __len__(self): return len(self.df) def __getitem__(self,idx): row = self.df.iloc[idx] img_path = os.path.join(self.root_dir,row.image) image = cv2.imread(img_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) label = row.label_group if self.transform: augmented = self.transform(image=image) image = augmented['image'] return image,torch.tensor(1) ``` ## ArcMarginProduct ``` class ArcMarginProduct(nn.Module): r"""Implement of large margin arc distance: : Args: in_features: size of each input sample out_features: size of each output sample s: norm of input feature m: margin cos(theta + m) """ def __init__(self, in_features, out_features, s=30.0, m=0.50, easy_margin=False, ls_eps=0.0): print('Using Arc Face') super(ArcMarginProduct, self).__init__() self.in_features = in_features self.out_features = out_features self.s = s self.m = m self.ls_eps = ls_eps # label smoothing self.weight = Parameter(torch.FloatTensor(out_features, in_features)) nn.init.xavier_uniform_(self.weight) self.easy_margin = easy_margin self.cos_m = math.cos(m) self.sin_m = math.sin(m) self.th = math.cos(math.pi - m) self.mm = math.sin(math.pi - m) * m def forward(self, input, label): # --------------------------- cos(theta) & phi(theta) --------------------------- cosine = F.linear(F.normalize(input), F.normalize(self.weight)) sine = torch.sqrt(1.0 - torch.pow(cosine, 2)) phi = cosine * self.cos_m - sine * self.sin_m if self.easy_margin: phi = torch.where(cosine > 0, phi, cosine) else: phi = torch.where(cosine > self.th, phi, cosine - self.mm) # --------------------------- convert label to one-hot --------------------------- # one_hot = torch.zeros(cosine.size(), requires_grad=True, device='cuda') one_hot = torch.zeros(cosine.size(), device=CFG.DEVICE) one_hot.scatter_(1, label.view(-1, 1).long(), 1) if self.ls_eps > 0: one_hot = (1 - self.ls_eps) * one_hot + self.ls_eps / self.out_features # -------------torch.where(out_i = {x_i if condition_i else y_i) ------------- output = (one_hot * phi) + ((1.0 - one_hot) * cosine) output *= self.s return output, nn.CrossEntropyLoss()(output,label) ``` ## CurricularFace ``` ''' credit : https://github.com/HuangYG123/CurricularFace/blob/8b2f47318117995aa05490c05b455b113489917e/head/metrics.py#L70 ''' def l2_norm(input, axis = 1): norm = torch.norm(input, 2, axis, True) output = torch.div(input, norm) return output class CurricularFace(nn.Module): def __init__(self, in_features, out_features, s = 30, m = 0.50): super(CurricularFace, self).__init__() print('Using Curricular Face') self.in_features = in_features self.out_features = out_features self.m = m self.s = s self.cos_m = math.cos(m) self.sin_m = math.sin(m) self.threshold = math.cos(math.pi - m) self.mm = math.sin(math.pi - m) * m self.kernel = nn.Parameter(torch.Tensor(in_features, out_features)) self.register_buffer('t', torch.zeros(1)) nn.init.normal_(self.kernel, std=0.01) def forward(self, embbedings, label): embbedings = l2_norm(embbedings, axis = 1) kernel_norm = l2_norm(self.kernel, axis = 0) cos_theta = torch.mm(embbedings, kernel_norm) cos_theta = cos_theta.clamp(-1, 1) # for numerical stability with torch.no_grad(): origin_cos = cos_theta.clone() target_logit = cos_theta[torch.arange(0, embbedings.size(0)), label].view(-1, 1) sin_theta = torch.sqrt(1.0 - torch.pow(target_logit, 2)) cos_theta_m = target_logit * self.cos_m - sin_theta * self.sin_m #cos(target+margin) mask = cos_theta > cos_theta_m final_target_logit = torch.where(target_logit > self.threshold, cos_theta_m, target_logit - self.mm) hard_example = cos_theta[mask] with torch.no_grad(): self.t = target_logit.mean() * 0.01 + (1 - 0.01) * self.t cos_theta[mask] = hard_example * (self.t + hard_example) cos_theta.scatter_(1, label.view(-1, 1).long(), final_target_logit) output = cos_theta * self.s return output, nn.CrossEntropyLoss()(output,label) class ShopeeModel(nn.Module): def __init__( self, n_classes = CFG.CLASSES, model_name = CFG.MODEL_NAME, fc_dim = CFG.FC_DIM, margin = CFG.MARGIN, scale = CFG.SCALE, use_fc = True, pretrained = True, use_arcface = CFG.USE_ARCFACE): super(ShopeeModel,self).__init__() print(f'Building Model Backbone for {model_name} model, margin = {margin}') self.backbone = timm.create_model(model_name, pretrained=pretrained) if 'efficientnet' in model_name: final_in_features = self.backbone.classifier.in_features self.backbone.classifier = nn.Identity() self.backbone.global_pool = nn.Identity() elif 'resnet' in model_name: final_in_features = self.backbone.fc.in_features self.backbone.fc = nn.Identity() self.backbone.global_pool = nn.Identity() elif 'resnext' in model_name: final_in_features = self.backbone.fc.in_features self.backbone.fc = nn.Identity() self.backbone.global_pool = nn.Identity() elif 'densenet' in model_name: final_in_features = self.backbone.classifier.in_features self.backbone.classifier = nn.Identity() self.backbone.global_pool = nn.Identity() elif 'nfnet' in model_name: final_in_features = self.backbone.head.fc.in_features self.backbone.head.fc = nn.Identity() self.backbone.head.global_pool = nn.Identity() self.pooling = nn.AdaptiveAvgPool2d(1) self.use_fc = use_fc if use_fc: self.dropout = nn.Dropout(p=0.0) self.fc = nn.Linear(final_in_features, fc_dim) self.bn = nn.BatchNorm1d(fc_dim) self._init_params() final_in_features = fc_dim if use_arcface: self.final = ArcMarginProduct(final_in_features, n_classes, s=scale, m=margin) else: self.final = CurricularFace(final_in_features, n_classes, s=scale, m=margin) def _init_params(self): nn.init.xavier_normal_(self.fc.weight) nn.init.constant_(self.fc.bias, 0) nn.init.constant_(self.bn.weight, 1) nn.init.constant_(self.bn.bias, 0) def forward(self, image, label): feature = self.extract_feat(image) logits = self.final(feature,label) return logits def extract_feat(self, x): batch_size = x.shape[0] x = self.backbone(x) x = self.pooling(x).view(batch_size, -1) if self.use_fc: x = self.dropout(x) x = self.fc(x) x = self.bn(x) return x def read_dataset(): df = pd.read_csv(CFG.TRAIN_CSV) df['matches'] = df.label_group.map(df.groupby('label_group').posting_id.agg('unique').to_dict()) df['matches'] = df['matches'].apply(lambda x: ' '.join(x)) gkf = GroupKFold(n_splits=CFG.N_SPLITS) df['fold'] = -1 for i, (train_idx, valid_idx) in enumerate(gkf.split(X=df, groups=df['label_group'])): df.loc[valid_idx, 'fold'] = i labelencoder= LabelEncoder() df['label_group'] = labelencoder.fit_transform(df['label_group']) train_df = df[df['fold']!=CFG.TEST_FOLD].reset_index(drop=True) train_df = train_df[train_df['fold']!=CFG.VALID_FOLD].reset_index(drop=True) valid_df = df[df['fold']==CFG.VALID_FOLD].reset_index(drop=True) test_df = df[df['fold']==CFG.TEST_FOLD].reset_index(drop=True) train_df['label_group'] = labelencoder.fit_transform(train_df['label_group']) return train_df, valid_df, test_df def precision_score(y_true, y_pred): y_true = y_true.apply(lambda x: set(x.split())) y_pred = y_pred.apply(lambda x: set(x.split())) intersection = np.array([len(x[0] & x[1]) for x in zip(y_true, y_pred)]) len_y_pred = y_pred.apply(lambda x: len(x)).values precision = intersection / len_y_pred return precision def recall_score(y_true, y_pred): y_true = y_true.apply(lambda x: set(x.split())) y_pred = y_pred.apply(lambda x: set(x.split())) intersection = np.array([len(x[0] & x[1]) for x in zip(y_true, y_pred)]) len_y_true = y_true.apply(lambda x: len(x)).values recall = intersection / len_y_true return recall def f1_score(y_true, y_pred): y_true = y_true.apply(lambda x: set(x.split())) y_pred = y_pred.apply(lambda x: set(x.split())) intersection = np.array([len(x[0] & x[1]) for x in zip(y_true, y_pred)]) len_y_pred = y_pred.apply(lambda x: len(x)).values len_y_true = y_true.apply(lambda x: len(x)).values f1 = 2 * intersection / (len_y_pred + len_y_true) return f1 def get_image_embeddings(df, model): image_dataset = ShopeeImageDataset(df,transform=get_test_transforms()) image_loader = torch.utils.data.DataLoader( image_dataset, batch_size=CFG.BATCH_SIZE, pin_memory=True, num_workers = CFG.NUM_WORKERS, drop_last=False ) embeds = [] with torch.no_grad(): for img,label in tqdm(image_loader): img = img.to(CFG.DEVICE) label = label.to(CFG.DEVICE) feat,_ = model(img,label) image_embeddings = feat.detach().cpu().numpy() embeds.append(image_embeddings) del model image_embeddings = np.concatenate(embeds) print(f'Our image embeddings shape is {image_embeddings.shape}') del embeds gc.collect() return image_embeddings def get_image_neighbors(df, embeddings, threshold = 0.2, min2 = False): nbrs = NearestNeighbors(n_neighbors = 50, metric = 'cosine') nbrs.fit(embeddings) distances, indices = nbrs.kneighbors(embeddings) predictions = [] for k in range(embeddings.shape[0]): if min2: idx = np.where(distances[k,] < CFG.BEST_THRESHOLD)[0] ids = indices[k,idx] if len(ids) <= 1 and distances[k,1] < threshold: ids = np.append(ids,indices[k,1]) else: idx = np.where(distances[k,] < threshold)[0] ids = indices[k,idx] posting_ids = ' '.join(df['posting_id'].iloc[ids].values) predictions.append(posting_ids) df['pred_matches'] = predictions df['f1'] = f1_score(df['matches'], df['pred_matches']) df['recall'] = recall_score(df['matches'], df['pred_matches']) df['precision'] = precision_score(df['matches'], df['pred_matches']) del nbrs, distances, indices gc.collect() return df def search_best_threshold(valid_df,model): search_space = np.arange(10, 50, 1) valid_embeddings = get_image_embeddings(valid_df, model) print("Searching best threshold...") best_f1_valid = 0. best_threshold = 0. for i in search_space: threshold = i / 100 valid_df = get_image_neighbors(valid_df, valid_embeddings, threshold=threshold) valid_f1 = valid_df.f1.mean() valid_recall = valid_df.recall.mean() valid_precision = valid_df.precision.mean() print(f"threshold = {threshold} -> f1 score = {valid_f1}, recall = {valid_recall}, precision = {valid_precision}") if (valid_f1 > best_f1_valid): best_f1_valid = valid_f1 best_threshold = threshold print("Best threshold =", best_threshold) print("Best f1 score =", best_f1_valid) CFG.BEST_THRESHOLD = best_threshold # phase 2 search print("Searching best min2 threshold...") search_space = np.arange(CFG.BEST_THRESHOLD * 100, CFG.BEST_THRESHOLD * 100 + 20, 0.5) best_f1_valid = 0. best_threshold = 0. for i in search_space: threshold = i / 100 valid_df = get_image_neighbors(valid_df, valid_embeddings, threshold=threshold,min2=True) valid_f1 = valid_df.f1.mean() valid_recall = valid_df.recall.mean() valid_precision = valid_df.precision.mean() print(f"min2 threshold = {threshold} -> f1 score = {valid_f1}, recall = {valid_recall}, precision = {valid_precision}") if (valid_f1 > best_f1_valid): best_f1_valid = valid_f1 best_threshold = threshold print("Best min2 threshold =", best_threshold) print("Best f1 score after min2 =", best_f1_valid) CFG.BEST_THRESHOLD_MIN2 = best_threshold def save_embeddings(): """Save valid and test image embeddings. """ train_df, valid_df, test_df = read_dataset() PATH_PREFIX = '../input/image-model-trained/' for i in range(len(CFG.LOSS_MODULES)): CFG.LOSS_MODULE = CFG.LOSS_MODULES[i] if 'arc' in CFG.LOSS_MODULE: CFG.USE_ARCFACE = True else: CFG.USE_ARCFACE = False for j in range(len(CFG.MODEL_NAMES)): CFG.MODEL_NAME = CFG.MODEL_NAMES[j] for k in range(len(CFG.MARGINS)): CFG.MARGIN = CFG.MARGINS[k] model = ShopeeModel(model_name = CFG.MODEL_NAME, margin = CFG.MARGIN, use_arcface = CFG.USE_ARCFACE) model.eval() model = replace_activations(model, torch.nn.SiLU, Mish()) CFG.MODEL_PATH = f'{CFG.MODEL_NAME}_{CFG.LOSS_MODULE}_face_epoch_8_bs_8_margin_{CFG.MARGIN}.pt' MODEL_PATH = PATH_PREFIX + CFG.MODEL_PATH model.load_state_dict(torch.load(MODEL_PATH)) model = model.to(CFG.DEVICE) valid_embeddings = get_image_embeddings(valid_df, model) VALID_EMB_PATH = '../input/image-embeddings/' + CFG.MODEL_PATH[:-3] + '_valid_embed.csv' np.savetxt(VALID_EMB_PATH, valid_embeddings, delimiter=',') TEST_EMB_PATH = '../input/image-embeddings/' + CFG.MODEL_PATH[:-3] + '_test_embed.csv' test_embeddings = get_image_embeddings(test_df, model) np.savetxt(TEST_EMB_PATH, test_embeddings, delimiter=',') save_embeddings() ``` ## Run test Parameters: + `CFG.MARGIN = [0.5,0.6,0.7,0.8,0.9]` + `CFG.MODEL_NAME = ['resnet50','resnext50_32x4d','densenet121','efficientnet_b3','eca_nfnet_l0']` + `CFG.LOSS_MODULE = ['arc','curricular']` ## save and load embeddings + `np.savetxt('tf_efficientnet_b5_ns.csv', image_embeddings1, delimiter=',')` + `image_embeddings4 = np.loadtxt(CFG.image_embeddings4_path, delimiter=',')` + `image_embeddings4_path = '../input/image-embeddings/efficientnet_b3.csv'`
github_jupyter
# 7. előadás *Tartalom:* Függvények, pár további hasznos library (import from ... import ... as szintaktika, time, random, math, regex (regular expressions), os, sys) ### Függvények Találkozhattunk már függvényekkel más programnyelvek kapcsán. De valójában mik is azok a függvények? A függvények: • újrahasználható kódok • valamilyen specifikus feladatot végeznek el • flexibilisek • egyszer definiálandók • nem csinálnak semmit, amíg nem hívjuk meg őket Hogyan definiálunk egy függvényt? ``` python def fuggveny_neve(parameter1, parameter2, stb): # ide írjuk a kódot return visszatérési_érték_lista1, v_2, v_3 # opcionális! ``` Ezt hívni a következőképp lehet: ``` python a, b, c = fuggveny_neve(parameter1, parameter2 …) # vagy, ha nem vagyunk kiváncsiak az összes visszatérési értékre a, _, _ = fuggveny_neve(parameter1, parameter2 …) ``` Kezdjük egy egyszerű függvénnyel! Írjunk egy olyan függvényt, amely a `"Helló, <név>"` sztringet adja vissza: ``` def udvozlet(nev): print("Hello,", nev, end="!") udvozlet("Tamás") ``` Egyes függvényeknek van paraméterük/argumentumuk, másoknak azonban nincs. Egyes esetekben a paraméteres függvények bizonyulhatnak megfelelőnek, pont a paraméterek nyújtotta flexibilitás miatt, más esetekben pedig pont a paraméter nélküli függvények használata szükséges. Egyes függvényeknek sok visszatérési érékük van, másoknál elhagyható a `return`. Írjunk egy olyan `adat_generalas` függvényt, amely 3 paramétert fogad: a generálandó listák darabszámát, lépésközét és opcionálisan egy eltolás értéket, amit hozzáadunk a generált adatokhoz. Amenyyiben nem adjuk meg a 3. paramétert, az eltolást, úgy az alapértelmezett érték legyen `0`. A 3 visszatérsi érték az `X` adat, valamint a 2 `Y` adat (koszinusz és színusz). ``` import math def adat_generalas(darab, lepes, eltolas = 0): x = [x * lepes for x in range(0, darab)] y_sin = [math.sin(x * lepes) + eltolas for x in range(0, darab)] y_cos = [math.cos(x * lepes) + eltolas for x in range(0, darab)] return x, y_sin, y_cos ``` Próbáljuk ki a fenti függvényt és jelezzük ki `plot`-ként. ``` import matplotlib.pyplot as plt x1, y1, y2 = adat_generalas(80, 0.1) x2, y3, _ = adat_generalas(400, 0.02, 0.2) plt.plot(x1, y1, "*") plt.plot(x1, y2, "*") plt.plot(x2, y3, "*") plt.show() ``` A paraméterekkel rendelkező függvényeknél nagyon fontos a paraméterek sorrendje. Nem mindegy, hogy függvényhíváskor milyen sorrendben adjuk meg a paramétereket. Pl.: ``` def upload_events(events_file, location): # … (feltehetően a kód jön ide) … return # Helytelen hívás!!! upload_events("New York", "events.csv") # Helyes hívás!!! upload_events("events.csv", "New York") ``` Ha azonban nem kedveljük ezt a szabályt, akár át is hághatjuk úgy, hogy megmondjuk a függvénynek, melyik érték, melyik változóhoz tartozik! ``` # Így is helyes!!! upload_events(location="New York", events_file="events.csv") ``` Fontos még megemlíteni a függvények visszatérési értékét. A függvények egy vagy több értéket adhatnak vissza a **return** parancsszó meghívásával. A nem meghatározott visszatérési érték az ún. **None**. Ezen kívül visszatérési érték lehet *szám, karakter, sztring, lista, könyvtár, TRUE, FALSE,* vagy bármilyen más típus. ``` # Egy érték vissza adása def product(x, y): return x*y product(5,6) # Több érték vissza adása def get_attendees(filename): # Ide jön a kód, amely vissza adja a „teacher”, „assistants” és „students” értékeket. #teacher, assistants, students = get_attendees("file.csv") return ``` ## További hasznos könyvtárak: #### Az *import* szintakszis Ahhoz, hogy beépített függvényeket használjunk a kódban, szükség van azok elérésére. Ezek általában modulokban, vagy csomagokban találhatók. Ezeket az **import** kulcsszóval tudjuk beépíteni. Amikor a Python importálja pl. a *hello* nevű modult, akkor az interpreter először végig listázza a beépített modulokat, és ha nem találja ezek között, akkor elkezdi keresni a *hello.py* nevű fájlt azokban a mappákban, amelyeknek listáját a **sys.path** változótól kapja meg. Az importálás alapvető szintaktikája az **import** kulcsszóból és a beillesztendő modul nevéből áll. **import hello** Mikor importálunk egy modult, elérhetővé tesszük azt a kódunkban, mint egy különálló névteret (*namespace*). Ez azt jelenti, hogy amikor meghívunk egy függvényt, pont (.) jellel kell összekapcsolnunk a modulnévvel így: [modul].[függvény] ``` import random random.randint(0,5) #meghívja a randint függvényt, ami egy random egész számot ad vissza ``` Nézzünk erre egy példát, amely 10 véletlenszerű egész számot ír ki! ``` import random for i in range(10): print(random.randint(1, 25)) ``` #### A *from … import …* szintakszis Általában akkor használjuk, mikor hivatkozni akarunk egy konkrét függvényre a modulból, így elkerüljük a ponttal való referálást. Nézzük az előző példát ezzel a megoldással: ``` from random import randint for i in range(10): print(randint(1, 25)) ``` #### Aliasok használata, az *import … as …* szintakszis Pythonban lehetőség van a modulok és azok függvényeinek átnevezésére az **as** kulcsszó segítségével, _ilyet már régebben is használtunk_. Pl.: ``` import math as m ``` Egyes hosszú nevű modulok helyett pl. általánosan megszokott az aliasok használata. Ilyet régóta használunk pl.: ``` import matplotlib.pyplot as plt ``` ### A *time* és *datetime* könyvtárak: A **time** modul alapvető idővel és dátummal kapcsolatos függvényeket tartalmaz. Két különböző megjelenítési formát használ és számos függvény segít ezeknek az oda-vissza konvertálásában: - **float** másodpercek száma – ez a UNIX belső megjelenítési formája. Ebben a megjelenítési formában az egyes időpontok között eltelt idő egy lebegőpontos szám. - **struct_time** oblektum – ez kilenc attribútummal rendelkezik egy időpont megjelenítésére, a Gergely naptár szerinti dátumkövetést alkalmazva. Ebben a formában nincs lehetőség két időpont között eltelt idő megjelenítésére, ilyenkor oda-vissza kell konvertálnunk a **float** és **struct_time** között. A **datetime** modul tartalmaz minden szükséges objektumot és metódust, amelyek szükségesek lehetnek a Gergely naptár szerinti időszámítás helyes kezeléséhez. A **datetime** csak egyfajta időpont megjelenítési formát tartalmaz, ellenben négy olyan osztállyal rendelkezik, amelyekkel könnyedén kezelhetjük a dátumokat és időpontokat: - **datetime.time** – négy attribítummal rendelkezik, ezek az óra, perc, másodperc és a századmásodperc. - **datetime.date** – három attribútuma van, ezek az év, hónap és a nap. - **datetime.datetime** – ez kombinálni képes a **datetime.time** és **datetime.date** osztályokat. - **datetime.timedelta** – ez az eltelt időt mutatja meg két **date**, **time** vagy **datetime** között. Az értékei megjeleníthetők napokban, másodpercekben vagy századmásodpercekben. Nézzünk néhány példát: ``` from datetime import date print(date.today()) # aktuális napi dátum from datetime import datetime print(datetime.now()) # pillanatnyi idő ``` Próbáljuk ki, hogy amennyiben két időpont között várunk 2 másodpercet, az ténylegesen pontosan mennyi eltelt időt jelent. ``` from datetime import timedelta from time import sleep t1 = datetime.now() print(t1) sleep(2) # várjunk 2 másodpercet t2 = datetime.now() print(t2) print("Ténylegesen eltelt idő:", timedelta(minutes=(t2-t1).total_seconds())) # valamilyen eltelt idő import time time.clock() # másodpercek lebegőpontos ábrázolása. UNIX rendszeren a processzor időt, Windowson a függvény első hívásától eltelt időt mutatja fali óra szerint. ``` ### A *random* könytár: Ez a könyvtár pszeudó-random generátorokat képes létrehozni. Nézzünk néhány konkrét példát: ``` from random import random random() #random float [0, 1) között from random import uniform uniform(2.5, 10.0) #random float [2.5, 10.0) között from random import expovariate expovariate(1/10) # 1 osztva egy nem nulla értékű középértékkel from random import randrange num1=randrange(10) # [0,9] közé eső random integer: randrange(stop) num2=randrange(0, 101, 2) # [0,100] közé eső páros integer: randrange(start, stop, step) num1, num2 from random import choice choice(['win', 'lose', 'draw']) # egy lista egyik random eleme from random import shuffle deck = 'ace two three four'.split() shuffle(deck) # elemek összekeverése deck from random import sample sample([10, 20, 30, 40, 50, 60, 70], k=4) #k=n darab elemet ad vissza a halmazból from random import randint randint(1,10) # random integert ad [1,10] intervallumban ``` ### A *math* könyvtár Matematikai függvények használatát teszi lehetővé C szabvány alapján. Néhány konkrét példa: ``` import math math.ceil(5.6) # lebegőpontos számok felkerekítése egész típusú számmá math.factorial(10) # fatoriális számítás math.floor(5.6) # lebegőpontos számok lefele kerekítése egész típusú számmá math.gcd(122, 6) # visszaadja a két szám legnyagyobb közös osztóját math.exp(2) # Euler-féle szám az x hatványra emelve ``` Ennek pythonban nem sok értelme van, ahatványozás megy a `**` művelettel is. (`2 ** 3`). ``` math.pow(2, 3) # pow(x,y): x az y hatványra emelve, rövidebben 2**3 math.sqrt(36) # négyzetgyökvonás, rövidebben 36**0.5 math.cos(0.5) # cosinus függvény radiánokban kifejezve, ugyanígy működik a math.sin(x) is. math.degrees(1) # radián-fok konverzió, ugyanígy math.radian(x) fok-radián átalakító. ``` Matematikai állandók: ``` print(math.pi) # Pi print(math.e) # Euler-szám print(math.inf) # végtelen print(math.nan) # nem szám (not a number) ``` ### Az *OS* könyvtár Az OS modul alapvetően olyan függvényeket tartalmaz melyek az operációs rendszerrel kapcsolatos műveleteket támogatja. A modul beszúrása az **`import os`** paranccsal történik. Nézzünk néhány ilyen metódust: ``` import os os.system("dir") # shell parancsot hajt végre, ha a parancs hatással van a kimentre, akkor ez nem jelenik meg ``` Visszatérésként csak `0`-t ad, ha sikeres és `1`-et, ha nem sikeres a művelet. De a konzolon megjelenik minden kimenet: ```python Directory of C:\Users\herno\Documents\GitHub\sze-academic-python\eload 2018-11-03 16:42 <DIR> . 2018-11-03 16:42 <DIR> .. 2018-10-24 06:16 <DIR> .ipynb_checkpoints 2018-10-24 11:02 <DIR> data 2018-10-24 06:16 5,846 ea00.md 2018-11-03 15:30 18,775 ea01.ipynb 2018-07-31 17:41 21,838 ea02.ipynb 2018-07-31 17:41 26,484 ea03.ipynb 2018-09-10 15:23 293,223 ea04.ipynb 2018-10-24 06:16 128,088 ea05.ipynb 2018-07-17 11:01 34,838 ea06.ipynb 2018-11-03 16:42 49,489 ea07.ipynb 2018-11-03 15:30 10,384 ea08.ipynb 2018-11-03 15:30 401,267 ea10.ipynb ``` ``` os.getcwd() # kiírja az ektuális munkakönytárat os.getpid() # ez a futó folyamat ID-jét írja ki ``` A további példák nem kerülnek lefuttatásra, csak felsoroljuk őket: 1) `os.chroot(path)` - a folyamat root könytárának megváltoztatása `'path'` elérésre 2) `os.listdir(path)` - belépések száma az adott könytárba 3) `os.mkdir(path)` - könyvtár létrehozása a `'path'` útvonalon 4) `os.remove(path)` - a `'path'`-ban levő fájl törlése 5) `os.removedirs(path)` - könyvtárak rekurzív törlése 6) `os.rename(src, dst)` - az `'src'` átnevezése `'dst'`-re, ez lehet fájl vagy könyvtár ### A *sys* könyvtár A **sys** modul különféle információval szolgál az egyes Python interptreterrel kapcsolatos konstansokról, függvényekről és metódusokról. ``` import sys sys.stderr.write('Ez egy stderr szoveg\n') # stderr kimenetre küld hibaüzenetet sys.stderr.flush() # a pufferben eltárolt tartalmat flush-olja a kimenetre sys.stdout.write('Ez egy stdout szoveg\n') # szöveget küld az stdout-ra print ("script name is", sys.argv[0]) # az 'argv' tartalmazza a parancssori argumentumokat, # ezen belül az argv[0] maga a szkript neve print ("Az elérési útvonalon", len(sys.path), "elem van.") # lekérdezzük a 'path'-ban levő elemek számát sys.path # kiíratjuk a 'path'-ban levő elemeket ``` ``` python print(sys.modules.keys()) # kiíratjuk az importált modulok neveit ``` ``` dict_keys(['pkgutil', '__future__', 'filecmp', 'mpl_toolkits', 'platform', 'distutils.debug', 'jedi.evaluate.context', 'ctypes._endian', 'msvcrt', 'parso.pgen2.parse', '_stat', 'jedi.evaluate', '_pickle', 'parso.python.pep8', 'jedi.evaluate.context.module', 'distutils.log', 'collections', ...... ``` ``` print (sys.platform) # lekérdezzük a platformunk típusát ``` ``` python print("hello") sys.exit(1) # nem lép ki egyből, hanem meghívja a 'SystemExit' kivételt. Ennek kezelésére látsd a következő példát. print("there") Output: hello SystemExit: 1 ``` ``` print ("hello") try: # 'try'-al kezeljük a 'SystemExit' kivételt sys.exit(1) except SystemExit: pass print ("there") ``` ### A reguláris kifejezések: *regex* Számos helyzetben kell sztringeket feldolgoznunk. Ezek a sztringek azonban nem mindig érkeznek egyből értelmezhető formában, vagy nem követnek semmilyen mintát. Ilyenkor elég nehéz ezek feldolgozása. Erre nyújt egy elfogadható megoldást a reguláris kifejezések alkalmazása. A **reguláris kifejezés** tehát egy minta, vagy mintasorozat, amelynek segítségével könnyebben feldolgozhatjuk az egy adott halmazhoz tartozó adatokat, melyeknek egy közös mintát kellene követniük. Nézzünk egy példát! Ha pl. a minta szabályom az "aba" szócska, akkor minden olyan sztring, amely megfelel az "aba" formátumnak, beletartozik a halmazba. Egy ilyen egyszerű szabály azonban csak egyszerű sztringeket képes kezelni. Egy összetettebb szabály, mint pl. a "ab&ast;a" már sokkal több kimeneti lehetőséget nyújt. Lényegében, sztringek végtelen halmazát képes generálni, olyanokat mint: "aa", "aba", "abba", stb. Itt már kicsit nehezebb ellenőrizni, hogy a generált sztring ebből a szabályból származik-e. #### Néhány alapszabályt a reguláris kifejezések létrehozásával kapcsolatban - Bármely hagyományos karakter, akár önmagában is reguláris kifejezést alkothat - A '.' karakter bármely egyedülálló karakternek megfelelhet. Pl. az "x.y"-nak megfelelhet a "xay", "xby", stb., de a "xaby" már NEM. - A szögletes zárójelek [...] olyan szabályt definiálnak, amely alapján az adott elem megfelel a zárójelben levő halmaznak. Pl. a "x[abc]z"-nak megfelelhet az "xaz", "xbz" és az "xcz" is. Vagy pl. az "x[a-z]z" mintában a középső karakter az ABC bármely betűje lehet. Ilyenkor az intervallumot kötőjellel '-' jelöljük meg. - A '^' jellel módosított szögletes zárójelek [^...] azt jelentik, hogy a reguláris kifejezésben a zárójelben felsoroltakon kívűl minden más szerepelhet. Pl. a "1[^2-8]2" kifejezésben a zárójel helyén csak '1' és '9' lehet, mert a "2-8" közötti intervallumot kizártuk. - Több reguláris kifejezést csoportosíthatunk zárójelek (...) segítségével. Pl. a "(ab)c" szabály az "ab" és 'c' reguláris kifejezések csoportosítása. - A reguláris kifejezések ismétlődhetnek. Pl. a "x&ast;" megismételheti a 'x'-t nullászor, vagy ennél többször, a "x+" megismétli az 'x'-t 1 vagy annál többször, a "x?" megismétli 'x'-t 0 vagy 1 alkalommal. Konkrét példa: a "1(abc)&ast;2" kifejezésnek megfelelhet a "12", "1abc2", és akár a "1abcabcabc2" is. - Ha valamilyen szabály a sor elején kezdődik, akkor azt "^" mintával jelöljük, ha a sor végén található, akkor pedig a "^&dollar;" mintával. #### A reguláris kifejezések alkalmazása Pythonban A reguláris kifejezések alkalmazásához be kell szúrnunk a **re** könyvtárat. Nézzünk ezzel kapcsolatban néhány konkrét feladatot: ``` import re szov1 = "2019 november 12" print("Minden szám:", re.findall("\d+", szov1)) print("Minden egyéb:", re.findall("[^\d+]", szov1)) print("Angol a-z A-Z:", re.findall("[a-zA-Z]+", szov1)) szov2 = "<body>Ez egy példa<br></body>" print(re.findall("<.*?>", szov2)) szov3 = "sör sört sár sír sátor Pártol Piros Sanyi Peti Pite Pete " print("s.r :", re.findall("s.r", szov3)) print("s.r. :", re.findall("s.r.", szov3)) print("s[áí]r :", re.findall("s[áí]r", szov3)) print("P.t. :", re.findall("P.t.", szov3)) print("P.*t. :", re.findall("P.*t.", szov3)) print("P.{0,3}t.:", re.findall("P.{0,3}t.", szov3)) ``` Nézzünk egy összetettebb példát: ``` fajl_lista = [ "valami_S015_y001.png", "valami_S015_y001.npy", "valami_S014_y001.png", "valami_S014_y001.npy", "valami_S013_y001.png", "valami_S013_y001.npy", "_S999999_y999.npy"] r1 = re.compile(r"_S\d+_y\d+\.png") r2 = re.compile(r"_S\d+_y\d+\..*") f1 = list(filter(r1.search, fajl_lista)) f2 = list(filter(r2.search, fajl_lista)) print("_S\d+_y\d+\.png \n---------------") for f in f1: print(f) print(""); print("_S\d+_y\d+\..* \n---------------") for f in f2: print(f) ``` További információt a reguláris kifejezésekkel kapcsolatban [ITT](https://www.regular-expressions.info/index.html) találnak. ## _Used sources_ / Felhasznált források: - [Shannon Turner: Python lessons repository](https://github.com/shannonturner/python-lessons) MIT license (c) Shannon Turner 2013-2014, - [Siki Zoltán: Python mogyoróhéjban](http://www.agt.bme.hu/gis/python/python_oktato.pdf) GNU FDL license (c) Siki Zoltán, - [BME AUT](https://github.com/bmeaut) MIT License Copyright (c) BME AUT 2016-2018, - [Python Software Foundation documents](https://docs.python.org/3/) Copyright (c), Python Software Foundation, 2001-2018, - [Regular expressions](https://www.regular-expressions.info/index.html) Copyright (c) 2003-2018 Jan Goyvaerts. All rights reserved.
github_jupyter
<a href="https://colab.research.google.com/github/google/neural-tangents/blob/master/notebooks/phase_diagram.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> Copyright 2020 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ### Imports & Utils ``` !pip install -q git+https://www.github.com/google/neural-tangents import jax.numpy as np from jax.experimental import optimizers from jax.api import grad, jit, vmap from jax import lax from jax.config import config config.update('jax_enable_x64', True) from functools import partial import neural_tangents as nt from neural_tangents import stax _Kernel = nt.utils.kernel.Kernel def Kernel(K): """Create an input Kernel object out of an np.ndarray.""" return _Kernel(cov1=np.diag(K), nngp=K, cov2=None, ntk=None, is_gaussian=True, is_reversed=False, diagonal_batch=True, diagonal_spatial=False, shape1=(K.shape[0], 1024), shape2=(K.shape[1], 1024), x1_is_x2=True, is_input=True, batch_axis=0, channel_axis=1) def fixed_point(f, initial_value, threshold): """Find fixed-points of a function f:R->R using Newton's method.""" g = lambda x: f(x) - x dg = grad(g) def cond_fn(x): x, last_x = x return np.abs(x - last_x) > threshold def body_fn(x): x, _ = x return x - g(x) / dg(x), x return lax.while_loop(cond_fn, body_fn, (initial_value, 0.0))[0] from IPython.display import set_matplotlib_formats set_matplotlib_formats('pdf', 'svg') import matplotlib.pyplot as plt import seaborn as sns sns.set_style(style='white') def format_plot(x='', y='', grid=True): ax = plt.gca() plt.grid(grid) plt.xlabel(x, fontsize=20) plt.ylabel(y, fontsize=20) def finalize_plot(shape=(1, 1)): plt.gcf().set_size_inches( shape[0] * 1.5 * plt.gcf().get_size_inches()[1], shape[1] * 1.5 * plt.gcf().get_size_inches()[1]) plt.tight_layout() ``` # Phase Diagram We will reproduce the phase diagram described in [Poole et al.](https://papers.nips.cc/paper/6322-exponential-expressivity-in-deep-neural-networks-through-transient-chaos) and [Schoenholz et al.](https://arxiv.org/abs/1611.01232) using Neural Tangents. In these and subsequent papers, it was found that deep neural networks can exhibit a phase transition as a function of the variance of their weights ($\sigma_w^2$) and biases ($\sigma_b^2$). For networks with $\tanh$ activation functions, this phase transition is between an "ordered" phase and a "chaotic" phase. In the ordered phase, pairs of inputs collapse to a single point as they propagate through the network. By contrast, in the chaotic phase, nearby inputs become increasingly dissimilar in later layers of the network. This phase diagram is shown below. A number of properties of neural networks - such as trainability, mode-collapse, and maximum learing rate - have now been related to this phase diagram over many papers (recently e.g. [Yang et al.](https://arxiv.org/abs/1902.08129), [Jacot et al.](https://arxiv.org/abs/1907.05715), [Hayou et al.](https://arxiv.org/abs/1905.13654), and [Xiao et al.](https://arxiv.org/abs/1912.13053)). \ ![Phase Diagram](https://raw.githubusercontent.com/google/neural-tangents/master/notebooks/figures/pennington_phase_diagram.svg?sanitize=true) > Phase diagram for $\tanh$ neural networks (appeared in [Pennington et al.](https://arxiv.org/abs/1802.09979)). \ Consider two inputs to a neural network, $x_1$ and $x_2$, normalized such that $\|x_1\| = \|x_2\| = q^0$. We can compute the cosine-angle between the inputs, $c^0 = \cos\theta_{12} = \frac{x_1 \cdot x_2}{q^0}$. Additionally, we can keep track of the norm and cosine angle of the resulting pre-activations ($q^l$ and $c^l$ respectively) as signal passes through layers of the neural network. In the wide-network limit there are deterministic functions, called the $\mathcal Q$-map and the $\mathcal{C}$-map, such that $q^{l+1} = \mathcal Q(q^l)$ and $c^{l+1} = \mathcal C(q^l, c^l)$. \ In fully-connected networks with $\tanh$-like activation functions, both the $\mathcal Q$-map and $\mathcal C$-map have unique stable-fixed-points, $q^*$ and $c^*$, such that $q^* = \mathcal Q(q^*)$ and $c^* = \mathcal C(q^*, c^*)$. To simplify the discussion, we typically choose to normalize our inputs so that $q^0 = q^*$ and we can restrict our study to the $\mathcal C$-map. The $\mathcal C$-map always has a fixed point at $c^* = 1$ since two identical inputs will remain identical as they pass through the network. However, this fixed point is not always stable and two points that start out very close together will often separate. Indeed, the ordered and chaotic phases are characterized by the stability of the $c^* = 1$ fixed point. In the ordered phase $c^* = 1$ is stable and pairs of inputs converge to one another as they pass through the network. In the chaotic phase the $c^* = 1$ point is unstable and a new, stable, fixed point with $c^* < 1$ emerges. The phase boundary is defined as the point where $c^* = 1$ is marginally stable. \ To understand the stability of a fixed point, $c^*$, we will use the standard technique in Dynamical Systems theory and expand the $\mathcal C$-map in $\epsilon^l = c^l - c^*$ which implies that $\epsilon^{l+1} = \chi(c^*)\epsilon^l$ where $\chi = \frac{\partial\mathcal C}{\partial C}$. This implies that sufficiently close to a fixed point of the dynamics, $\epsilon^l = \chi(c^*)^l$. If $\chi(c^*) < 1$ then the fixed point is stable and points move towards the fixed point exponentially quickly. If $\chi(c^*) > 1$ then points move away from the fixed point exponentially quickly. This implies that the phase boundary, being defined by the marginal stability of $c^* = 1$, will be where $\chi_1 = \chi(1) = 1$. \ To reproduce these results in Neural Tangents, we notice first that the $\mathcal{C}$-map described above is intimately related to the NNGP kernel, $K^l$, of [Lee et al.](https://arxiv.org/abs/1711.00165), [Matthews et al.](https://arxiv.org/abs/1804.11271), and [Novak et al.](https://arxiv.org/abs/1810.05148). The core of Neural Tangents is a map $\mathcal T$ for a wide range of architectures such that $K^{l + 1} = \mathcal T(K^l)$. Since $C^l$ can be written in terms of the NNGP kernel as $C^l = K^l_{12} / q^*$ this implies that Neural Tangents provides a way of computing the $\mathcal{C}$-map for a wide range of network architectures. \ To produce the phase diagam above, we must compute $q^*$ and $c^*$ as well as $\chi_1$. We will use a fully-connected network with $\text{Erf}$ activation functions since they admit an analytic kernel function and are very similar to $\tanh$ networks. We will first define the $\mathcal Q$-map by noting that the $\mathcal Q$-map will be identical to $\mathcal T$ if the covariance matrix has only a single entry. We will use Newton's method to find $q^*$ given the $\mathcal Q$-map. Next we will use the relationship above to define the $\mathcal C$-map in terms of $\mathcal T$. We will again use Newton's method to find the stable $c^*$ fixed point. We can define $\chi$ by using JAX's automatic differentiation to compute the derivative of the $\mathcal C$-map. This can be written relatively concisely below. \ Note: this particular phase diagram holds for a wide range of neural networks but, emphatically, not for ReLUs. The ReLU phase diagram is somewhat different and could be investigated using Neural Tangents. However, we will save it for a followup notebook. ``` def c_map(W_var, b_var): W_std = np.sqrt(W_var) b_std = np.sqrt(b_var) # Create a single layer of a network as an affine transformation composed # with an Erf nonlinearity. kernel_fn = stax.serial(stax.Dense(1024, W_std, b_std), stax.Erf())[2] def q_map_fn(q): return kernel_fn(Kernel(np.array([[q]]))).nngp[0, 0] qstar = fixed_point(q_map_fn, 1.0, 1e-7) def c_map_fn(c): K = np.array([[qstar, qstar * c], [qstar * c, qstar]]) K_out = kernel_fn(Kernel(K)).nngp return K_out[1, 0] / qstar return c_map_fn c_star = lambda W_var, b_var: fixed_point(c_map(W_var, b_var), 0.1, 1e-7) chi = lambda c, W_var, b_var: grad(c_map(W_var, b_var))(c) chi_1 = partial(chi, 1.) ``` To generate the phase diagram above, we would like to compute the fixed-point correlation not only at a single value of $(\sigma_w^2,\sigma_b^2)$ but on a whole mesh. We can use JAX's `vmap` functionality to do this. Here we define vectorized versions of the above functions. ``` def vectorize_over_sw_sb(fn): # Vectorize over the weight variance. fn = vmap(fn, (0, None)) # Vectorize over the bias variance. fn = vmap(fn, (None, 0)) return fn c_star = jit(vectorize_over_sw_sb(c_star)) chi_1 = jit(vectorize_over_sw_sb(chi_1)) ``` We can use these functions to plot $c^*$ as a function of the weight and bias variance. As expected, we see a region where $c^* = 1$ and a region where $c^* < 1$. ``` W_var = np.arange(0, 3, 0.01) b_var = np.arange(0., 0.25, 0.001) plt.contourf(W_var, b_var, c_star(W_var, b_var)) plt.colorbar() plt.title('$C^*$ as a function of weight and bias variance', fontsize=14) format_plot('$\\sigma_w^2$', '$\\sigma_b^2$') finalize_plot((1.15, 1)) ``` We can, of course, threshold on $c^*$ to get a cleaner definition of the phase diagram. ``` plt.contourf(W_var, b_var, c_star(W_var, b_var) > 0.999, levels=3, colors=[[1.0, 0.89, 0.811], [0.85, 0.85, 1]]) plt.title('Phase diagram in terms of weight and bias variance', fontsize=14) format_plot('$\\sigma_w^2$', '$\\sigma_b^2$') finalize_plot((1, 1)) ``` As described above, the boundary between the two phases should be defined by $\chi_1(\sigma_w^2, \sigma_b^2) = 1$ where $\chi_1$ is given by the derivative of the $\mathcal C$-map. ``` plt.contourf(W_var, b_var, chi_1(W_var, b_var)) plt.colorbar() plt.title(r'$\chi^1$ as a function of weight and bias variance', fontsize=14) format_plot('$\\sigma_w^2$', '$\\sigma_b^2$') finalize_plot((1.15, 1)) ``` We can see that the boundary where $\chi_1$ crosses 1 corresponds to the phase boundary we observe above. ``` plt.contourf(W_var, b_var, c_star(W_var, b_var) > 0.999, levels=3, colors=[[1.0, 0.89, 0.811], [0.85, 0.85, 1]]) plt.contourf(W_var, b_var, np.abs(chi_1(W_var, b_var) - 1) < 0.003, levels=[0.5, 1], colors=[[0, 0, 0]]) plt.title('Phase diagram in terms of weight and bias variance', fontsize=14) format_plot('$\\sigma_w^2$', '$\\sigma_b^2$') finalize_plot((1, 1)) ```
github_jupyter
### Genarating names with character-level RNN In this notebook we are going to follow the previous notebook wher we classified name's nationalities based on a character level RNN. This time around we are going to generate names using character level RNN. Example: _given a nationality and three starting characters we want to generate some names based on those characters_ We will be following [this pytorch tutorial](https://pytorch.org/tutorials/intermediate/char_rnn_generation_tutorial.html). The difference between this notebook and the previous notebook is that we instead of predicting the class where the name belongs, we are going to output one letter at a time until we generate a name. This can be done on a word level but we will do this on a character based level in our case. ### Data preparation The dataset that we are going to use was downloaded [here](https://download.pytorch.org/tutorial/data.zip). This dataset has nationality as a file name and inside the files we will see the names that belongs to that nationality. I've uploaded this dataset on my google drive so that we can load it eaisly. ### Mounting the drive ``` from google.colab import drive drive.mount('/content/drive') data_path = '/content/drive/My Drive/NLP Data/names-dataset/names' ``` ### Imports ``` from __future__ import unicode_literals, print_function, division import os, string, unicodedata, random import torch from torch import nn from torch.nn import functional as F torch.__version__ all_letters = string.ascii_letters + " .,;'-" n_letters = len(all_letters) + 1 # Plus EOS marker ``` A function that converts all unicodes to ASCII. ``` def unicodeToAscii(s): return ''.join( c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' and c in all_letters ) def read_lines(filename): with open(filename, encoding='utf-8') as some_file: return [unicodeToAscii(line.strip()) for line in some_file] # Build the category_lines dictionary, a list of lines per category category_lines = {} all_categories = [] for filename in os.listdir(data_path): category = filename.split(".")[0] all_categories.append(category) lines = read_lines(os.path.join(data_path, filename)) category_lines[category] = lines n_categories = len(all_categories) print('# categories:', n_categories, all_categories) ``` ### Creating the Network This network extends from the previous notebook with an etra argumeny for the category tensor which is concatenated along with others. The category tensor is one-hot vector just like the letter input. We will output the most probable letter and used it as input to the next letter. ![img](https://i.imgur.com/jzVrf7f.png) ``` class RNN(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(RNN, self).__init__() self.hidden_size = hidden_size self.i2h = nn.Linear(n_categories + input_size + hidden_size, hidden_size) self.i2o = nn.Linear(n_categories + input_size + hidden_size, output_size) self.o2o = nn.Linear(hidden_size + output_size, output_size) self.dropout = nn.Dropout(0.1) self.softmax = nn.LogSoftmax(dim=1) def forward(self, category, input, hidden): input_combined = torch.cat((category, input, hidden), 1) hidden = self.i2h(input_combined) output = self.i2o(input_combined) output_combined = torch.cat((hidden, output), 1) output = self.o2o(output_combined) output = self.dropout(output) output = self.softmax(output) return output, hidden def initHidden(self): return torch.zeros(1, self.hidden_size) ``` ### Training First of all, helper functions to get random pairs of (category, line): ``` # Random item from a list def randomChoice(l): return l[random.randint(0, len(l) - 1)] # Get a random category and random line from that category def randomTrainingPair(): category = randomChoice(all_categories) line = randomChoice(category_lines[category]) return category, line line, cate = randomTrainingPair() line, cate ``` For each timestep (that is, for each letter in a training word) the inputs of the network will be ``(category, current letter, hidden state)`` and the outputs will be ``(next letter, next hidden state)``. So for each training set, we’ll need the category, a set of input letters, and a set of output/target letters. Since we are predicting the next letter from the current letter for each timestep, the letter pairs are groups of consecutive letters from the line - e.g. for `"ABCD<EOS>"` we would create (“A”, “B”), (“B”, “C”), (“C”, “D”), (“D”, “EOS”). ![img](https://i.imgur.com/JH58tXY.png) The category tensor is a one-hot tensor of size `<1 x n_categories>`. When training we feed it to the network at every timestep - this is a design choice, it could have been included as part of initial hidden state or some other strategy. ``` def category_tensor(category): li = all_categories.index(category) tensor = torch.zeros(1, n_categories) tensor[0][li] = 1 return tensor # out = 3 def input_tensor(line): tensor = torch.zeros(len(line), 1, n_letters) for li in range(len(line)): letter = line[li] tensor[li][0][all_letters.find(letter)] = 1 return tensor def target_tensor(line): letter_indexes = [all_letters.find(line[li]) for li in range(1, len(line))] letter_indexes.append(n_letters - 1) # EOS return torch.LongTensor(letter_indexes) ``` For convenience during training we’ll make a `randomTrainingExample` function that fetches a random (category, line) pair and turns them into the required (category, input, target) tensors. ``` # Make category, input, and target tensors from a random category, line pair def randomTrainingExample(): category, line = randomTrainingPair() category_t = category_tensor(category) input_line_tensor = input_tensor(line) target_line_tensor = target_tensor(line) return category_t, input_line_tensor, target_line_tensor ``` ### Training the Network In contrast to classification, where only the last output is used, we are making a prediction at every step, so we are calculating loss at every step. The magic of autograd allows you to simply sum these losses at each step and call backward at the end. ``` criterion = nn.NLLLoss() learning_rate = 0.0005 def train(category_tensor, input_line_tensor, target_line_tensor): target_line_tensor.unsqueeze_(-1) hidden = rnn.initHidden() rnn.zero_grad() loss = 0 for i in range(input_line_tensor.size(0)): output, hidden = rnn(category_tensor, input_line_tensor[i], hidden) l = criterion(output, target_line_tensor[i]) loss += l loss.backward() for p in rnn.parameters(): p.data.add_(p.grad.data, alpha=-learning_rate) return output, loss.item() / input_line_tensor.size(0) ``` To keep track of how long training takes I am adding a `time_since(timestamp)` function which returns a human readable string: ``` import time, math def time_since(since): now = time.time() s = now - since m = math.floor(s / 60) s -= m * 60 return '%dm %ds' % (m, s) ``` Training is business as usual - call train a bunch of times and wait a few minutes, printing the current time and loss every `print_every` examples, and keeping store of an average loss per `plot_every` examples in `all_losses` for plotting later. ``` rnn = RNN(n_letters, 128, n_letters) n_iters = 100000 print_every = 5000 plot_every = 500 all_losses = [] total_loss = 0 # Reset every plot_every iters start = time.time() for iter in range(1, n_iters + 1): output, loss = train(*randomTrainingExample()) total_loss += loss if iter % print_every == 0: print('%s (%d %d%%) %.4f' % (time_since(start), iter, iter / n_iters * 100, loss)) if iter % plot_every == 0: all_losses.append(total_loss / plot_every) total_loss = 0 ``` ### Plotting the losses * Plotting the historical loss from all_losses shows the network learning: ``` import matplotlib.pyplot as plt plt.figure() plt.plot(all_losses) plt.show() ``` #### Sampling the network To sample we give the network a letter and ask what the next one is, feed that in as the next letter, and repeat until the `EOS` token. * Create tensors for input category, starting letter, and empty hidden state * Create a string output_name with the starting letter * Up to a maximum output length, * Feed the current letter to the network * Get the next letter from highest output, and next hidden state * If the letter is EOS, stop here * If a regular letter, add to output_name and continue * Return the final name > Rather than having to give it a starting letter, another strategy would have been to include a “start of string” token in training and have the network choose its own starting letter. ``` max_length = 20 # Sample from a category and starting letter def sample(category, start_letter='A'): with torch.no_grad(): # no need to track history in sampling category_t = category_tensor(category) input = input_tensor(start_letter) hidden = rnn.initHidden() output_name = start_letter for i in range(max_length): output, hidden = rnn(category_t, input[0], hidden) topv, topi = output.topk(1) topi = topi[0][0] if topi == n_letters - 1: #eos break else: letter = all_letters[topi] output_name += letter input = input_tensor(letter) return output_name # Get multiple samples from one category and multiple starting letters def samples(category, start_letters='ABC'): for start_letter in start_letters: print(sample(category, start_letter)) samples('Russian', 'RUS') samples('German', 'GER') samples('Spanish', 'SPA') samples('Chinese', 'CHI') ``` ### Ref * [pytorch tutorial](https://pytorch.org/tutorials/intermediate/char_rnn_generation_tutorial.html) * [Understanding LSTM Networks](https://colah.github.io/posts/2015-08-Understanding-LSTMs/) * [The Unreasonable Effectiveness of Recurrent Neural Networks](https://karpathy.github.io/2015/05/21/rnn-effectiveness/) ``` ```
github_jupyter
# Shashank V. Sonar ## Task 5: Exploratory Data Analysis - Sports ### Step -1: Importing the required Libraries ``` import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns %matplotlib inline from sklearn.cluster import KMeans from sklearn import datasets import warnings warnings.filterwarnings("ignore") import os import mpl_toolkits import json print('Libraries are imported Successfully') ``` ### Step-2: Importing the dataset ``` #Reading deliveries dataset df_deliveries=pd.read_csv('C:/Users/91814/Desktop/GRIP/Task 5/ipl/deliveries.csv',low_memory=False) print('Data Read Successfully') #Displaying the deliveries dataset df_deliveries #reading matches dataset df_matches=pd.read_csv('C:/Users/91814/Desktop/GRIP/Task 5/ipl/df_matches.csv',low_memory=False) print('Data Read Successfully') #displaying matches dataset df_matches ``` ### Step-3 Pre processing of Data ``` #displaying the first five rows of the matches dataset df_matches.head() df_matches.tail()#displaying the last five rows of the matches dataset df_matches['team1'].unique() #displaying team 1 df_matches['team2'].unique() #displaying team 2 df_deliveries['batting_team'].unique() #displaying the batting team df_deliveries['bowling_team'].unique() #displaying the bowling team #replacing with short team names in matches dataset df_matches.replace(['Royal Challengers Bangalore', 'Sunrisers Hyderabad', 'Rising Pune Supergiant', 'Mumbai Indians', 'Kolkata Knight Riders', 'Gujarat Lions', 'Kings XI Punjab', 'Delhi Daredevils', 'Chennai Super Kings', 'Rajasthan Royals', 'Deccan Chargers', 'Kochi Tuskers Kerala', 'Pune Warriors', 'Rising Pune Supergiants', 'Delhi Capitals'], ['RCB', 'SRH', 'PW', 'MI', 'KKR', 'GL', 'KXIP', 'DD','CSK','RR', 'DC', 'KTK','PW','RPS','DC'],inplace =True) #replacing with short team names in deliveries dataset df_deliveries.replace(['Royal Challengers Bangalore', 'Sunrisers Hyderabad', 'Rising Pune Supergiant', 'Mumbai Indians', 'Kolkata Knight Riders', 'Gujarat Lions', 'Kings XI Punjab', 'Delhi Daredevils', 'Chennai Super Kings', 'Rajasthan Royals', 'Deccan Chargers', 'Kochi Tuskers Kerala', 'Pune Warriors', 'Rising Pune Supergiants', 'Delhi Capitals'], ['RCB', 'SRH', 'PW', 'MI', 'KKR', 'GL', 'KXIP', 'DD','CSK','RR', 'DC', 'KTK','PW','RPS','DC'],inplace =True) print('Total Matches played:',df_matches.shape[0]) print('\n Venues played at:', df_matches['city'].unique()) print('\n Teams:', df_matches['team1'].unique) print('Total venues playes at:', df_matches['city'].nunique()) print('\n Total umpires:', df_matches['umpire1'].nunique()) print((df_matches['player_of_match'].value_counts()).idxmax(), ':has most man of the match awards') print((df_matches['winner'].value_counts()).idxmax(), ':has the highest number of match wins') df_matches.dtypes df_matches.nunique() ``` ### Full Data Summary ``` df_matches.info() ``` ### Statistical Summary of Data ``` df_matches.describe() ``` ### Observations ``` # The .csv file has data of ipl matches starting from the season 2008 to 2019. # The biggest margin of victory for the team batting first(win by runs) is 146. # The biggest victory of the team batting second(win by wickets) is by 10 Wickets. # 75% of the victorious teams that bat first won by the margin of 19 runs. # 75% of the victorious teams that bat second won by a margin of 6 Wickets. # There were 756 Ipl matches hosted from 2008 to 2019. ``` ### Columns in the data ``` df_matches.columns ``` ### Getting Unique values of each column ``` for col in df_matches: print(df_matches[col].unique()) ``` ### Finding out Null values in Each Columns ``` df_matches.isnull().sum() ``` ### Dropping of Columns having significant number of Null values ``` df_matches1 =df_matches.drop(columns=['umpire3'], axis =1) ``` ### Verification of Dropped Column ``` df_matches df_matches.isnull().sum() df_matches.fillna(0,inplace=True) df_matches df_matches.isnull().sum() # We have successfully replace the Null values with 'Zeros' df_deliveries.dtypes df_deliveries.shape df_deliveries.dtypes df_deliveries.info() df_deliveries.describe() df_deliveries.columns ``` ### Counting the Null Values in the data set ``` df_deliveries.isnull().sum() ``` ### Total number of null values in the dataset ``` df_deliveries.isnull().sum().sum() ``` ### Step - 4 Analysing the data ### Which Team had won by maximum runs? ``` df_matches.iloc[df_matches['win_by_runs'].idxmax()] ``` ### Which Team had won by maximum wicket? ``` df_matches.iloc[df_matches['win_by_wickets'].idxmax()] ``` ### Which Team had won by minimum Margin? ``` df_matches.iloc[df_matches[df_matches['win_by_runs'].ge(1)].win_by_runs.idxmin()] ``` ### Which Team had won by Minimum Wickets? ``` df_matches.iloc[df_matches[df_matches['win_by_wickets'].ge(1)].win_by_wickets.idxmin()] len(df_matches['season'].unique()) df_deliveries.fillna(0,inplace=True) df_deliveries df_deliveries.isnull().sum() ``` ### The team with most number of Wins per season ``` teams_per_season =df_matches.groupby('season')['winner'].value_counts() teams_per_season """ for i, w in wins_per_season.iteritmes(): print(i, w) for items in win_per_season.iteritems(): print(items) """ year = 2008 win_per_season_df_matches=pd.DataFrame(columns=['year', 'team', 'wins']) for items in teams_per_season.iteritems(): if items[0][0]==year: print(items) win_series =pd.DataFrame({ 'year': [items[0][0]], 'team':[items[0][1]], 'wins':[items[1]] }) win_per_season_df_matches = win_per_season_df_matches.append(win_series) year +=1 win_per_season_df_matches ``` ### Step- 5 DATA Visualising ``` venue_ser=df_matches['venue'].value_counts() venue_df_matches =pd.DataFrame(columns=['venue', 'matches']) for items in venue_ser.iteritems(): temp_df_matches =pd.DataFrame({ 'venue':[items[0]], 'matches':[items[1]] }) venue_df_matches = venue_df_matches.append(temp_df_matches,ignore_index=True) ``` ### Ipl Venues plt.title('Ipl Venues') sns.barplot(x='matches', y='venue', data =venue_df_matches); ### Number of Matches played and venue ``` venue_df_matches ``` ### The most Successful IPL team ``` team_wins_ser= df_matches['winner'].value_counts() team_wins_df_matches=pd.DataFrame(columns=['team', 'wins']) for items in team_wins_ser.iteritems(): temp_df1 =pd.DataFrame({ 'team':[items[0]], 'wins':[items[1]] }) team_wins_df_matches= team_wins_df_matches.append(temp_df1,ignore_index=True) ``` ### Finding the most successful Ipl team ``` team_wins_df_matches ``` ### IPL Victories by team ``` plt.title('Total victories of IPL Teams') sns.barplot(x='wins', y='team', data =team_wins_df_matches, palette ='ocean_r'); ``` ### Most valuable players ``` mpv_ser=df_matches['player_of_match'].value_counts() mvp_10_df_matches=pd.DataFrame(columns=['player','wins']) count = 0 for items in mvp_ser.iteritems(): if count>9: break else: temp_df2=pd.DataFrame({ 'player':[items[0]], 'wins':[items[1]] }) mvp_10_df_matches =mvp_10_df_matches.append(temp_df2,ignore_index=True) count += 1 ``` ### Top 10 Most valuable players ``` mvp_10_df_matches plt.title("Top IPL Player") sns.barplot(x='wins', y='player', data =mvp_10_df_matches, palette ='cool') ``` ### Team that won the most number of toss ``` toss_ser =df_matches['toss_winner'].value_counts() toss_df_matches=pd.DataFrame(columns=['team','wins']) for items in toss_ser.iteritems(): temp_df3=pd.DataFrame({ 'team':[items[0]], 'wins':[items[1]] }) toss_df_matches = toss_df_matches.append(temp_df3,ignore_index=True) ``` ### Count of Number of toss wins and teams ``` toss_df_matches ``` ### Teams which has won More number of Toss ``` plt.title('Which team won more number of Toss') sns.barplot(x='wins', y='team', data=toss_df_matches, palette='Dark2') ``` ### Observations ``` # Mumbai Indians has won the most toss(till 2019) in ipl history. ``` ### Numbebr of Matches won by team ``` plt.figure(figsize = (18,10)) sns.countplot(x='winner',data=df_matches, palette='cool') plt.title("Numbers of matches won by team ",fontsize=20) plt.xticks(rotation=50) plt.xlabel("Teams",fontsize=15) plt.ylabel("No of wins",fontsize=15) plt.show() df_matches.result.value_counts() plt.subplots(figsize=(10,6)) sns.countplot(x='season', hue='toss_decision', data=df_matches) plt.show() ``` ### Maximum Toss Winner ``` plt.subplots(figsize=(10,8)) ax=df_matches['toss_winner'].value_counts().plot.bar(width=0.9,color=sns.color_palette('RdYlGn', 20)) for p in ax.patches: ax.annotate(format(p.get_height()),(p.get_x()+0.15, p.get_height()+1)) plt.show() ``` ### Matches played across each seasons ``` plt.subplots(figsize=(10,8)) sns.countplot(x='season', data=df_matches,palette=sns.color_palette('winter')) plt.show() ``` ### Top 10 Batsman from the dataset ``` plt.subplots(figsize=(10,6)) max_runs=df_deliveries.groupby(['batsman'])['batsman_runs'].sum() ax=max_runs.sort_values(ascending=False)[:10].plot.bar(width=0.8,color=sns.color_palette('winter_r',20)) for p in ax.patches: ax.annotate(format(p.get_height()),(p.get_x()+0.1, p.get_height()+50),fontsize=15) plt.show() ``` ### Number of matches won by Toss winning side ``` plt.figure(figsize = (18,10)) sns.countplot('season',hue='toss_winner',data=df_matches,palette='hsv') plt.title("Numbers of matches won by batting and bowling first ",fontsize=20) plt.xlabel("season",fontsize=15) plt.ylabel("Count",fontsize=15) plt.show() df_matches # we will print winner season wise final_matches=df_matches.drop_duplicates(subset=['season'], keep='last') final_matches[['season','winner']].reset_index(drop=True).sort_values('season') # we will print number of season won by teams final_matches["winner"].value_counts() # we will print toss winner, toss decision, winner in final matches. final_matches[['toss_winner','toss_decision','winner']].reset_index(drop=True) # we will print man of the match final_matches[['winner','player_of_match']].reset_index(drop=True) # we will print numbers of fours hit by team season_data=df_matches[['id','season','winner']] complete_data=df_deliveries.merge(season_data,how='inner',left_on='match_id',right_on='id') four_data=complete_data[complete_data['batsman_runs']==4] four_data.groupby('batting_team')['batsman_runs'].agg([('runs by fours','sum'),('fours','count')]) Toss=final_matches.toss_decision.value_counts() labels=np.array(Toss.index) sizes = Toss.values colors = ['#FFBF00', '#FA8072'] plt.figure(figsize = (10,8)) plt.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%', shadow=True,startangle=90) plt.title('Toss Result', fontsize=20) plt.axis('equal') plt.show() # we will plot graph on four hit by players batsman_four=four_data.groupby('batsman')['batsman_runs'].agg([('four','count')]).reset_index().sort_values('four',ascending=0) ax=batsman_four.iloc[:10,:].plot('batsman','four',kind='bar',color='black') plt.title("Numbers of fours hit by playes ",fontsize=20) plt.xticks(rotation=50) plt.xlabel("Player name",fontsize=15) plt.ylabel("No of fours",fontsize=15) plt.show() # we will plot graph on no of four hit in each season ax=four_data.groupby('season')['batsman_runs'].agg([('four','count')]).reset_index().plot('season','four',kind='bar',color = 'red') plt.title("Numbers of fours hit in each season ",fontsize=20) plt.xticks(rotation=50) plt.xlabel("season",fontsize=15) plt.ylabel("No of fours",fontsize=15) plt.show() # we will print no of sixes hit by team six_data=complete_data[complete_data['batsman_runs']==6] six_data.groupby('batting_team')['batsman_runs'].agg([('runs by six','sum'),('sixes','count')]) # we will plot graph of six hit by players batsman_six=six_data.groupby('batsman')['batsman_runs'].agg([('six','count')]).reset_index().sort_values('six',ascending=0) ax=batsman_six.iloc[:10,:].plot('batsman','six',kind='bar',color='green') plt.title("Numbers of six hit by playes ",fontsize=20) plt.xticks(rotation=50) plt.xlabel("Player name",fontsize=15) plt.ylabel("No of six",fontsize=15) plt.show() # we will plot graph on no of six hit in each season ax=six_data.groupby('season')['batsman_runs'].agg([('six','count')]).reset_index().plot('season','six',kind='bar',color = 'blue') plt.title("Numbers of fours hit in each season ",fontsize=20) plt.xticks(rotation=50) plt.xlabel("season",fontsize=15) plt.ylabel("No of fours",fontsize=15) plt.show() # we will print no of matches played by batsman No_Matches_player= df_deliveries[["match_id","player_dismissed"]] No_Matches_player =No_Matches_player .groupby("player_dismissed")["match_id"].count().reset_index().sort_values(by="match_id",ascending=False).reset_index(drop=True) No_Matches_player.columns=["batsman","No_of Matches"] No_Matches_player .head(10) # Dismissals in IPL plt.figure(figsize=(18,10)) ax=sns.countplot(df_deliveries.dismissal_kind) plt.title("Dismissals in IPL",fontsize=20) plt.xlabel("Dismissals kind",fontsize=15) plt.ylabel("count",fontsize=15) plt.xticks(rotation=50) plt.show() wicket_data=df_deliveries.dropna(subset=['dismissal_kind']) wicket_data=wicket_data[~wicket_data['dismissal_kind'].isin(['run out','retired hurt','obstructing the field'])] # we will print ipl most wicket taking bowlers wicket_data.groupby('bowler')['dismissal_kind'].agg(['count']).reset_index().sort_values('count',ascending=False).reset_index(drop=True).iloc[:10,:] ``` ### Conclusion : The highest number of match played in IPL season was 2013,2014,2015. The highest number of match won by Mumbai Indians i.e 4 match out of 12 matches. Teams which Bowl first has higher chances of winning then the team which bat first. After winning toss more teams decide to do fielding first. In finals teams which decide to do fielding first win the matches more then the team which bat first. In finals most teams after winning toss decide to do fielding first. Top player of match winning are CH gayle, AB de villers. It is interesting that out of 12 IPL finals,9 times the team that won the toss was also the winner of IPL. The highest number of four hit by player is Shikar Dhawan. The highest number of six hit by player is CH gayle. Top leading run scorer in IPL are Virat kholi, SK Raina, RG Sharma. Dismissals in IPL was most by Catch out. The IPL most wicket taken blower is Harbajan Singh. The highest number of matches played by player name are SK Raina, RG Sharma.
github_jupyter
# Random Signals *This jupyter notebook is part of a [collection of notebooks](../index.ipynb) on various topics of Digital Signal Processing. Please direct questions and suggestions to [[email protected]](mailto:[email protected]).* ## Independent Processes The independence of random signals is a desired property in many applications of statistical signal processing, as well as uncorrelatedness and orthogonality. The concept of independence is introduced in the following together with a discussion of the links to uncorrelatedness and orthogonality. ### Definition Two stochastic events are said to be [independent](https://en.wikipedia.org/wiki/Independence_(probability_theory%29) if the probability of occurrence of one event is not affected by the occurrence of the other event. Or more specifically, if their joint probability equals the product of their individual probabilities. In terms of the bivariate probability density function (PDF) of two continuous-amplitude real-valued random processes $x[k]$ and $y[k]$ this reads \begin{equation} p_{xy}(\theta_x, \theta_y, k_x, k_y) = p_x(\theta_x, k_x) \cdot p_y(\theta_y, k_y) \end{equation} where $p_x(\theta_x, k_x)$ and $p_y(\theta_y, k_y)$ denote the univariate ([marginal](https://en.wikipedia.org/wiki/Marginal_distribution)) PDFs of the random processes for the time-instances $k_x$ and $k_y$, respectively. The bivariate PDF of two independent random processes is given by the multiplication of their univariate PDFs. It follows that the [second-order ensemble average](ensemble_averages.ipynb#Second-Order-Ensemble-Averages) for a linear mapping is given as \begin{equation} E\{ x[k_x] \cdot y[k_y] \} = E\{ x[k_x] \} \cdot E\{ y[k_y] \} \end{equation} The linear second-order ensemble average of two independent random signals is equal to the multiplication of their linear first-order ensemble averages. For jointly wide-sense stationary (WSS) processes, the bivariate PDF does only depend on the difference $\kappa = k_x - k_y$ of the time instants. Hence, two jointly WSS random signals are independent if \begin{equation} \begin{split} p_{xy}(\theta_x, \theta_y, \kappa) &= p_x(\theta_x, k_x) \cdot p_y(\theta_y, k_x - \kappa) \\ &= p_x(\theta_x) \cdot p_y(\theta_y, \kappa) \end{split} \end{equation} Above bivariate PDF is rewritten using the definition of [conditional probabilities](https://en.wikipedia.org/wiki/Conditional_probability) in order to specialize the definition of independence to one WSS random signal $x[k]$ \begin{equation} p_{xy}(\theta_x, \theta_y, \kappa) = p_{y|x}(\theta_x, \theta_y, \kappa) \cdot p_x(\theta_x) \end{equation} where $p_{y|x}(\theta_x, \theta_y, \kappa)$ denotes the conditional probability that $y[k - \kappa]$ takes the amplitude value $\theta_y$ under the condition that $x[k]$ takes the amplitude value $\theta_x$. Under the assumption that $y[k-\kappa] = x[k-\kappa]$ and substituting $\theta_x$ and $\theta_y$ by $\theta_1$ and $\theta_2$, independence for one random signal is defined as \begin{equation} p_{xx}(\theta_1, \theta_2, \kappa) = \begin{cases} p_x(\theta_1) \cdot \delta(\theta_2 - \theta_1) & \text{for } \kappa = 0 \\ p_x(\theta_1) \cdot p_x(\theta_2, \kappa) & \text{for } \kappa \neq 0 \end{cases} \end{equation} since the conditional probability $p_{x[k]|x[k-\kappa]}(\theta_1, \theta_2, \kappa) = \delta(\theta_2 - \theta_1)$ for $\kappa = 0$ since this represents a sure event. The bivariate PDF of an independent random signal is equal to the product of the univariate PDFs of the signal and the time-shifted signal for $\kappa \neq 0$. A random signal for which this condition does not hold shows statistical dependencies between samples. These dependencies can be exploited for instance for coding or prediction. #### Example - Comparison of bivariate PDF and product of marginal PDFs The following example estimates the bivariate PDF $p_{xx}(\theta_1, \theta_2, \kappa)$ of a WSS random signal $x[k]$ by computing its two-dimensional histogram. The univariate PDFs $p_x(\theta_1)$ and $p_x(\theta_2, \kappa)$ are additionally estimated. Both the estimated bivariate PDF and the product of the two univariate PDFs $p_x(\theta_1) \cdot p_x(\theta_2, \kappa)$ are plotted for different $\kappa$. ``` import numpy as np import matplotlib.pyplot as plt %matplotlib inline N = 10000000 # number of random samles M = 50 # number of bins for bivariate/marginal histograms def compute_plot_histograms(kappa): # shift signal x2 = np.concatenate((x1[kappa:], np.zeros(kappa))) # compute bivariate and marginal histograms pdf_xx, x1edges, x2edges = np.histogram2d(x1, x2, bins=(M,M), range=((-1.5, 1.5),(-1.5, 1.5)), normed=True) pdf_x1, _ = np.histogram(x1, bins=M, range=(-1.5, 1.5), density=True) pdf_x2, _ = np.histogram(x2, bins=M, range=(-1.5, 1.5), density=True) # plot results fig = plt.figure(figsize=(10, 10)) plt.subplot(121, aspect='equal') plt.pcolormesh(x1edges, x2edges, pdf_xx) plt.xlabel(r'$\theta_1$') plt.ylabel(r'$\theta_2$') plt.title(r'Bivariate PDF $p_{{xy}}(\theta_1, \theta_2, \kappa)$') plt.colorbar(fraction=0.046) plt.subplot(122, aspect='equal') plt.pcolormesh(x1edges, x2edges, np.outer(pdf_x1, pdf_x2)) plt.xlabel(r'$\theta_1$') plt.ylabel(r'$\theta_2$') plt.title(r'Product of PDFs $p_x(\theta_1) \cdot p_x(\theta_2, \kappa)$') plt.colorbar(fraction=0.046) fig.suptitle('Shift $\kappa =$ {:<2.0f}'.format(kappa), y=0.72) fig.tight_layout() # generate signal x = np.random.normal(size=N) x1 = np.convolve(x, [1, .5, .3, .7, .3], mode='same') # compute and plot the PDFs for various shifts compute_plot_histograms(0) compute_plot_histograms(2) compute_plot_histograms(20) ``` **Exercise** * With the given results, how can you evaluate the independence of the random signal? * Can the random signal assumed to be independent? Solution: According to the definition of independence, the bivariate PDF and the product of the univariate PDFs has to be equal for $\kappa \neq 0$. This is obviously not the case for $\kappa=2$. Hence, the random signal is not independent in a strict sense. However for $\kappa=20$ the condition for independence is sufficiently fulfilled, considering the statistical uncertainty due to a finite number of samples. ### Independence versus Uncorrelatedness Two continuous-amplitude real-valued jointly WSS random processes $x[k]$ and $y[k]$ are termed as [uncorrelated](correlation_functions.ipynb#Properties) if their cross-correlation function (CCF) is equal to the product of their linear means, $\varphi_{xy}[\kappa] = \mu_x \cdot \mu_y$. If two random signals are independent then they are also uncorrelated. This can be proven by introducing above findings for the linear second-order ensemble average of independent random signals into the definition of the CCF \begin{equation} \varphi_{xy}[\kappa] = E \{ x[k] \cdot y[k - \kappa] \} = E \{ x[k] \} \cdot E \{ y[k - \kappa] \} = \mu_x \cdot \mu_y \end{equation} where the last equality is a consequence of the assumed wide-sense stationarity. The reverse, that two uncorrelated signals are also independent does not hold in general from this result. The auto-correlation function (ACF) of an [uncorrelated signal](correlation_functions.ipynb#Properties) is given as $\varphi_{xx}[\kappa] = \mu_x^2 + \sigma_x^2 \cdot \delta[\kappa]$. Introducing the definition of independence into the definition of the ACF yields \begin{equation} \begin{split} \varphi_{xx}[\kappa] &= E \{ x[k] \cdot x[k - \kappa] \} \\ &= \begin{cases} E \{ x^2[k] \} & \text{for } \kappa = 0 \\ E \{ x[k] \} \cdot E \{ x[k - \kappa] \} & \text{for } \kappa \neq 0 \end{cases} \\ &= \begin{cases} \mu_x^2 + \sigma_x^2 & \text{for } \kappa = 0 \\ \mu_x^2 & \text{for } \kappa \neq 0 \end{cases} \\ &= \mu_x^2 + \sigma_x^2 \delta[\kappa] \end{split} \end{equation} where the result for $\kappa = 0$ follows from the bivariate PDF $p_{xx}(\theta_1, \theta_2, \kappa)$ of an independent signal, as derived above. It can be concluded from this result that an independent random signal is also uncorrelated. The reverse, that an uncorrelated signal is independent does not hold in general. ### Independence versus Orthogonality In geometry, two vectors are said to be [orthogonal](https://en.wikipedia.org/wiki/Orthogonality) if their dot product equals zero. This definition is frequently applied to finite-length random signals by interpreting them as vectors. The relation between independence, correlatedness and orthogonality is derived in the following. Let's assume two continuous-amplitude real-valued jointly wide-sense ergodic random signals $x_N[k]$ and $y_M[k]$ with finite lengths $N$ and $M$, respectively. The CCF $\varphi_{xy}[\kappa]$ between both can be reformulated as follows \begin{equation} \begin{split} \varphi_{xy}[\kappa] &= \frac{1}{N} \sum_{k=0}^{N-1} x_N[k] \cdot y_M[k-\kappa] \\ &= \frac{1}{N} < \mathbf{x}_N, \mathbf{y}_M[\kappa] > \end{split} \end{equation} where $<\cdot, \cdot>$ denotes the [dot product](https://en.wikipedia.org/wiki/Dot_product). The $(N+2M-2) \times 1$ vector $\mathbf{x}_N$ is defined as $$\mathbf{x}_N = \left[ \mathbf{0}^T_{(M-1) \times 1}, x[0], x[1], \dots, x[N-1], \mathbf{0}^T_{(M-1) \times 1} \right]^T$$ where $\mathbf{0}_{(M-1) \times 1}$ denotes the zero vector of length $M-1$. The $(N+2M-2) \times 1$ vector $\mathbf{y}_M[\kappa]$ is defined as $$\mathbf{y}_M = \left[ \mathbf{0}^T_{\kappa \times 1}, y[0], y[1], \dots, y[M-1], \mathbf{0}^T_{(N+M-2-\kappa) \times 1} \right]^T$$ It follows from above definition of orthogonality that two finite-length random signals are orthogonal if their CCF is zero. This implies that at least one of the two signals has to be mean free. It can be concluded further that two independent random signals are also orthogonal and uncorrelated if at least one of them is mean free. The reverse, that orthogonal signals are independent, does not hold in general. The concept of orthogonality can also be extended to one random signal by setting $\mathbf{y}_M[\kappa] = \mathbf{x}_N[\kappa]$. Since a random signal cannot be orthogonal to itself for $\kappa = 0$, the definition of orthogonality has to be extended for this case. According to the ACF of a mean-free uncorrelated random signal $x[k]$, self-orthogonality may be defined as \begin{equation} \frac{1}{N} < \mathbf{x}_N, \mathbf{x}_N[\kappa] > = \begin{cases} \sigma_x^2 & \text{for } \kappa = 0 \\ 0 & \text{for } \kappa \neq 0 \end{cases} \end{equation} An independent random signal is also orthogonal if it is zero-mean. The reverse, that an orthogonal signal is independent does not hold in general. #### Example - Computation of cross-correlation by dot product This example illustrates the computation of the CCF by the dot product. First, a function is defined which computes the CCF by means of the dot product ``` def ccf_by_dotprod(x, y): N = len(x) M = len(y) xN = np.concatenate((np.zeros(M-1), x, np.zeros(M-1))) yM = np.concatenate((y, np.zeros(N+M-2))) return np.fromiter([np.dot(xN, np.roll(yM, kappa)) for kappa in range(N+M-1)], float) ``` Now the CCF is computed using different methods: computation by the dot product and by the built-in correlation function. The CCF is plotted for the computation by the dot product, as well as the difference (magnitude) between both methods. The resulting difference is in the typical expected range due to numerical inaccuracies. ``` N = 32 # length of signals # generate signals np.random.seed(1) x = np.random.normal(size=N) y = np.convolve(x, [1, .5, .3, .7, .3], mode='same') # compute CCF ccf1 = 1/N * np.correlate(x, y, mode='full') ccf2 = 1/N * ccf_by_dotprod(x, y) kappa = np.arange(-N+1, N) # plot results plt.figure(figsize=(10, 4)) plt.subplot(121) plt.stem(kappa, ccf1) plt.xlabel('$\kappa$') plt.ylabel(r'$\varphi_{xy}[\kappa]$') plt.title('CCF by dot product') plt.grid() plt.subplot(122) plt.stem(kappa, np.abs(ccf1-ccf2)) plt.xlabel('$\kappa$') plt.title('Difference (magnitude)') plt.tight_layout() ``` **Copyright** This notebook is provided as [Open Educational Resource](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebook for your own purposes. The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *Sascha Spors, Digital Signal Processing - Lecture notes featuring computational examples, 2016-2018*.
github_jupyter
# Model Development V1 - This is really more like scratchwork - Divide this into multiple notebooks for easier reading **Reference** - http://zacstewart.com/2014/08/05/pipelines-of-featureunions-of-pipelines.html ``` import json import pickle from pymongo import MongoClient import numpy as np import pandas as pd from matplotlib import pyplot as plt %matplotlib inline import nltk import os from nltk.corpus import stopwords from sklearn.utils.extmath import randomized_svd # gensim from gensim import corpora, models, similarities, matutils # sklearn from sklearn import datasets from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.model_selection import train_test_split from sklearn.cluster import KMeans from sklearn.neighbors import KNeighborsClassifier import sklearn.metrics.pairwise as smp import logging logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) ``` # NYT Corpus ## Read data in - pickle from mongod output on amazon ec2 instance scp -i ~/.ssh/aws_andrew [email protected]:/home/andrew/Notebooks/initial-model-df.pkl ~/ds/metis/challenges/ ``` with open('initial-model-df.pkl', 'rb') as nyt_data: df = pickle.load(nyt_data) df.shape df.columns df.head(30) df1 = df.dropna() ``` ## LSI Preprocessing ``` # docs = data['lead_paragraph'][0:100] docs = df1['lead_paragraph'] docs.shape for doc in docs: doc = doc.decode("utf8") # create a list of stopwords stopwords_set = frozenset(stopwords.words('english')) # Update iterator to remove stopwords class SentencesIterator(object): # giving 'stop' a list of stopwords would exclude them def __init__(self, dirname, stop=None): self.dirname = dirname def __iter__(self): # os.listdr is ALSO a generator for fname in os.listdir(self.dirname): for line in open(os.path.join(self.dirname, fname),encoding="latin-1"): # at each step, gensim needs a list of words line = line.lower().split() if stop: outline = [] for word in line: if word not in stopwords_set: outline.append(word) yield outline else: yield line docs1 = docs.dropna() for doc in docs1: doc = SentencesIterator(doc.decode("utf8")) docs = pd.Series.tolist(docs1) tfidf = TfidfVectorizer(stop_words="english", token_pattern="\\b[a-zA-Z][a-zA-Z]+\\b", min_df=10) tfidf_vecs = tfidf.fit_transform(docs) tfidf_vecs.shape # it's too big to see in a dataframe: # pd.DataFrame(tfidf_vecs.todense(), # columns=tfidf.get_feature_names() # ).head(30) ``` ## BASELINE: Multinomial Naive Bayes Classification - language is fundamentally different - captures word choice ``` pd.DataFrame(tfidf_vecs.todense(), columns=tfidf.get_feature_names() ).head() df1.shape, tfidf_vecs.shape # Train/Test split X_train, X_test, y_train, y_test = train_test_split(tfidf_vecs, df1['source'], test_size=0.33) # Train nb = MultinomialNB() nb.fit(X_train, y_train) # Test nb.score(X_test, y_test) ``` # LSI Begin **Essentially, this has been my workflow so far:** 1. TFIDF in sklearn --> output a sparse corpus matrix DTM 2. LSI (SVD) in gensim --> output a 300 dim matrix TDM - Analyze topic vectors 3. Viewed LSI[tfidf] ``` # terms by docs instead of docs by terms tfidf_corpus = matutils.Sparse2Corpus(tfidf_vecs.transpose()) # Row indices id2word = dict((v, k) for k, v in tfidf.vocabulary_.items()) # This is a hack for Python 3! id2word = corpora.Dictionary.from_corpus(tfidf_corpus, id2word=id2word) # Build an LSI space from the input TFIDF matrix, mapping of row id to word, and num_topics # num_topics is the number of dimensions (k) to reduce to after the SVD # Analagous to "fit" in sklearn, it primes an LSI space trained to 300-500 dimensions lsi = models.LsiModel(tfidf_corpus, id2word=id2word, num_topics=300) # Retrieve vectors for the original tfidf corpus in the LSI space ("transform" in sklearn) lsi_corpus = lsi[tfidf_corpus] # pass using square brackets # what are the values given by lsi? (topic distributions) # ALSO, IT IS LAZY! IT WON'T ACTUALLY DO THE TRANSFORMING COMPUTATION UNTIL ITS CALLED. IT STORES THE INSTRUCTIONS # Dump the resulting document vectors into a list so we can take a look doc_vecs = [doc for doc in lsi_corpus] doc_vecs[0] #print the first document vector for all the words ``` ## Doc-Term Cosine Similarity using LSI Corpus - cosine similarity of [docs to terms](http://localhost:8888/notebooks/ds/metis/classnotes/5.24.17%20Vector%20Space%20Models%2C%20NMF%2C%20W2V.ipynb#Toy-Example:-Conceptual-Similarity-Between-Arbitrary-Text-Blobs) ``` # Convert the gensim-style corpus vecs to a numpy array for sklearn manipulations nyt_lsi = matutils.corpus2dense(lsi_corpus, num_terms=300).transpose() nyt_lsi.shape lsi.show_topic(0) # Create an index transformer that calculates similarity based on our space index = similarities.MatrixSimilarity(lsi_corpus, num_features=len(id2word)) # all docs by 300 topic vectors (word vectors) pd.DataFrame(nyt_lsi).head() # need to transform by cosine similarity # look up if I need to change into an LDA corpus # take the mean of every word vector! (averaged across all document vectors) df.mean() # describes word usage ('meaning') across the body of documents in the nyt corpus # answers the question: what 'topics' has the nyt been talking about the most over 2005-2015? df.mean().sort_values() ``` # Sorted doc-doc cosine similarity! ``` # Create an index transformer that calculates similarity based on our space index = similarities.MatrixSimilarity(doc_vecs, num_features=len(id2word)) # Return the sorted list of cosine similarities to the first document sims = sorted(enumerate(index[doc_vecs[0]]), key=lambda item: -item[1]) sims # Document 1491 is very similar (.66) to document 0 # Let's take a look at how we did by analyzing syntax for sim_doc_id, sim_score in enumerate(sims[0:30]): print("DocumentID: {}, Similarity Score: {} ".format(sim_score[0], sim_score[1])) print("Headline: " + str(df1.iloc[sim_doc_id].headline.decode('utf-8'))) print("Lead Paragraph: " + str(df1.iloc[sim_doc_id].lead_paragraph.decode('utf-8'))) print("Publish Date: " + str(df1.iloc[sim_doc_id].date)) print('\n') ``` ## Pass into KMeans Clustering ``` # Convert the gensim-style corpus vecs to a numpy array for sklearn manipulations (back to docs to terms matrix) nyt_lsi = matutils.corpus2dense(lsi_corpus, num_terms=300).transpose() nyt_lsi.shape # Create KMeans. kmeans = KMeans(n_clusters=3) # Cluster nyt_lsi_clusters = kmeans.fit_predict(nyt_lsi) # Take a look. It likely didn't do cosine distances. print(nyt_lsi_clusters[0:50]) print("Lead Paragraph: \n" + str(df1.iloc[0:5].lead_paragraph)) ``` ## LSA Begin ``` lda = models.LdaModel(corpus=tfidf_corpus, num_topics=20, id2word=id2word, passes=3) lda.print_topics() lda_corpus = lda[tfidf_corpus] nyt_lda = matutils.corpus2dense(lda_corpus, num_terms=20).transpose() df3 = pd.DataFrame(nyt_lda) df3.mean().sort_values(ascending=False).head(10) ``` ## Logistic Regression / Random Forest - <s>Tried KNN Classifier </s> Destroyed me - probabilistic classification on a spectrum from nyt to natl enq ``` from sklearn.neighbors import KNeighborsClassifier import sklearn.metrics.pairwise as smp # Train/Test X_train, X_test, y_train, y_test = train_test_split(nyt_lsi, df1['source'], test_size=0.33) # X_train = X_train.reshape(1,-1) # X_test = X_test.reshape(1,-1) y_train = np.reshape(y_train.values, (-1,1)) y_test = np.reshape(y_test.values, (-1,1)) X_train.shape, X_test.shape y_train.shape, y_test.shape # WARNING: This ruined me # Need pairwise Cosine for KNN # Fit KNN classifier to training set with cosine distance. One of the best algorithms for clustering documents # knn = KNeighborsClassifier(n_neighbors=3, metric=smp.cosine_distances) # knn.fit(X_train, y_train) # knn.score(X_test, y_test) ``` # PHASE 2: pull in natl enq data - mix in labels, source labels - pull labels (source category in nyt) - Review Nlp notes - Feature trans & Pipelines - Gensim doc2vec ``` with open('mag-model-df.pkl', 'rb') as mag_data: df1 = pickle.load(mag_data) df1.head() df1.dropna(axis=0, how='all') df1.shape docs2 = df1['lead_paragraph'] docs2 = docs2.dropna() for doc in docs2: doc = SentencesIterator(doc) docs = pd.Series.tolist(docs2) tfidf = TfidfVectorizer(stop_words="english", token_pattern="\\b[a-zA-Z][a-zA-Z]+\\b", min_df=10) tfidf_vecs = tfidf.fit_transform(docs) tfidf_vecs.shape ``` ## BASELINE: Multinomial Naive Bayes ``` pd.DataFrame(tfidf_vecs.todense(), columns=tfidf.get_feature_names() ).head() # Train/Test split X_train, X_test, y_train, y_test = train_test_split(tfidf_vecs, df1['source'], test_size=0.33) # Train nb = MultinomialNB() nb.fit(X_train, y_train) # Test nb.score(X_test, y_test) ``` ## LSA Begin 2 ``` # terms by docs instead of docs by terms tfidf_corpus = matutils.Sparse2Corpus(tfidf_vecs.transpose()) # Row indices id2word = dict((v, k) for k, v in tfidf.vocabulary_.items()) # This is a hack for Python 3! id2word = corpora.Dictionary.from_corpus(tfidf_corpus, id2word=id2word) lda = models.LdaModel(corpus=tfidf_corpus, num_topics=20, id2word=id2word, passes=3) lda.print_topics() lda_corpus = lda[tfidf_corpus] nyt_lda = matutils.corpus2dense(lda_corpus, num_terms=20).transpose() df3 = pd.DataFrame(nyt_lda) df3.mean().sort_values(ascending=False).head(10) ``` # Future Work ===================================== # Troubleshoot doc2vec - look into the output of this ``` from gensim.models.doc2vec import Doc2Vec, TaggedDocument from pprint import pprint import multiprocessing # Create doc2Vec model d2v = doc2vec.Doc2Vec(tfidf_corpus,min_count=3,workers=5) ``` # PHASE 3: Visualize clusters - [NLP visualization PyLDAvis](https://github.com/bmabey/pyLDAvis) - [Bokeh](http://bokeh.pydata.org/en/latest/) - [Bqplot](https://github.com/bloomberg/bqplot) - I'd rather not d3... ``` with open('nyt-model-df.pkl', 'rb') as nyt_data: df = pickle.load(nyt_data) with open('mag-model-df.pkl', 'rb') as mag_data: df1 = pickle.load(mag_data) # select the relevant columns in our ratings dataset nyt_df = df[['lead_paragraph', 'source']] mag_df = df1[['lead_paragraph', 'source']] # For the word cloud: https://www.jasondavies.com/wordcloud/ nyt_df['lead_paragraph'].to_csv(path='nyt-text.csv', index=False) # For the word cloud: https://www.jasondavies.com/wordcloud/ mag_df['lead_paragraph'].to_csv(path='mag-text.csv', index=False) !ls ```
github_jupyter
``` import numpy as np import pandas as pd from keras.models import * from keras.layers import Input, merge, Conv2D, MaxPooling2D, UpSampling2D, Dropout, Cropping2D from keras.optimizers import * from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau from datetime import datetime import time import sys import configparser import json import pickle import matplotlib.pyplot as plt #%matplotlib inline from unet.generator import * from unet.loss import * from unet.maskprocessor import * from unet.visualization import * from unet.modelfactory import * ``` This notebook trains the Road Segmentation model. The exported .py script takes in the config filename via a command line parameter. To run this notebook directly in jupyter notebook, please manually set config_file to point to a configuration file (e.g. cfg/default.cfg). ``` # command line args processing "python RoadSegmentor.py cfg/your_config.cfg" if len(sys.argv) > 1 and '.cfg' in sys.argv[1]: config_file = sys.argv[1] else: config_file = 'cfg/default.cfg' #print('missing argument. please provide config file as argument. syntax: python RoadSegmentor.py <config_file>') #exit(0) print('reading configurations from config file: {}'.format(config_file)) settings = configparser.ConfigParser() settings.read(config_file) x_data_dir = settings.get('data', 'train_x_dir') y_data_dir = settings.get('data', 'train_y_dir') print('x_data_dir: {}'.format(x_data_dir)) print('y_data_dir: {}'.format(y_data_dir)) data_csv_path = settings.get('data', 'train_list_csv') print('model configuration options:', settings.options("model")) model_dir = settings.get('model', 'model_dir') print('model_dir: {}'.format(model_dir)) timestr = time.strftime("%Y%m%d-%H%M%S") model_id = settings.get('model', 'id') print('model: {}'.format(model_id)) optimizer_label = 'Adam' # default if settings.has_option('model', 'optimizer'): optimizer_label = settings.get('model', 'optimizer') if settings.has_option('model', 'source'): model_file = settings.get('model', 'source') print('model_file: {}'.format(model_file)) else: model_file = None learning_rate = settings.getfloat('model', 'learning_rate') max_number_epoch = settings.getint('model', 'max_epoch') print('learning rate: {}'.format(learning_rate)) print('max epoch: {}'.format(max_number_epoch)) min_learning_rate = 0.000001 if settings.has_option('model', 'min_learning_rate'): min_learning_rate = settings.getfloat('model', 'min_learning_rate') print('minimum learning rate: {}'.format(min_learning_rate)) lr_reduction_factor = 0.1 if settings.has_option('model', 'lr_reduction_factor'): lr_reduction_factor = settings.getfloat('model', 'lr_reduction_factor') print('lr_reduction_factor: {}'.format(lr_reduction_factor)) batch_size = settings.getint('model', 'batch_size') print('batch size: {}'.format(batch_size)) input_width = settings.getint('model', 'input_width') input_height = settings.getint('model', 'input_height') img_gen = CustomImgGenerator(x_data_dir, y_data_dir, data_csv_path) train_gen = img_gen.trainGen(batch_size=batch_size, is_Validation=False) validation_gen = img_gen.trainGen(batch_size=batch_size, is_Validation=True) timestr = time.strftime("%Y%m%d-%H%M%S") model_filename = model_dir + '{}-{}.hdf5'.format(model_id, timestr) print('model checkpoint file path: {}'.format(model_filename)) # Early stopping prevents overfitting on training data # Make sure the patience value for EarlyStopping > patience value for ReduceLROnPlateau. # Otherwise ReduceLROnPlateau will never be called. early_stop = EarlyStopping(monitor='val_loss', patience=3, min_delta=0, verbose=1, mode='auto') model_checkpoint = ModelCheckpoint(model_filename, monitor='val_loss', verbose=1, save_best_only=True) reduceLR = ReduceLROnPlateau(monitor='val_loss', factor=lr_reduction_factor, patience=2, verbose=1, min_lr=min_learning_rate, epsilon=1e-4) training_start_time = datetime.now() number_validations = img_gen.validation_samples_count() samples_per_epoch = img_gen.training_samples_count() modelFactory = ModelFactory(num_channels = 3, img_rows = input_height, img_cols = input_width) if model_file is not None: model = load_model(model_dir + model_file, custom_objects={'dice_coef_loss': dice_coef_loss, 'dice_coef': dice_coef, 'binary_crossentropy_dice_loss': binary_crossentropy_dice_loss}) else: model = modelFactory.get_model(model_id) print(model.summary()) if optimizer_label == 'Adam': optimizer = Adam(lr = learning_rate) elif optimizer_label == 'RMSprop': optimizer = RMSprop(lr = learning_rate) else: raise ValueError('unsupported optimizer: {}'.format(optimizer_label)) model.compile(optimizer = optimizer, loss = dice_coef_loss, metrics = ['accuracy', dice_coef]) history = model.fit_generator(generator=train_gen, steps_per_epoch=np.ceil(float(samples_per_epoch) / float(batch_size)), validation_data=validation_gen, validation_steps=np.ceil(float(number_validations) / float(batch_size)), epochs=max_number_epoch, verbose=1, callbacks=[model_checkpoint, early_stop, reduceLR]) time_spent_trianing = datetime.now() - training_start_time print('model training complete. time spent: {}'.format(time_spent_trianing)) print(history.history) historyFilePath = model_dir + '{}-{}-train-history.png'.format(model_id, timestr) trainingHistoryPlot(model_id + timestr, historyFilePath, history.history) pickleFilePath = model_dir + '{}-{}-history-dict.pickle'.format(model_id, timestr) with open(pickleFilePath, 'wb') as handle: pickle.dump(history.history, handle, protocol=pickle.HIGHEST_PROTOCOL) ```
github_jupyter
``` from neo4j import GraphDatabase import json with open('credentials.json') as json_file: credentials = json.load(json_file) username = credentials['username'] pwd = credentials['password'] ``` ### NOTE ❣️ * BEFORE running this, still need to run `bin\neo4j console` to enable bolt on 127.0.0.1:7687 * When the queryyou wrote is wrong, the error will show connection or credential has problem, you don't really need to restart the server, after the query has been corrected, everything will be running fine. #### Userful Links * Results can be outputed: https://neo4j.com/docs/api/python-driver/current/results.html ``` driver = GraphDatabase.driver("bolt://localhost:7687", auth=(username, pwd)) def delete_all(tx): result = tx.run("""match (n) detach delete n""").single() if result is None: print('Removed All!') def create_entity(tx, entity_id, entity_name, entity_properties): query = """CREATE ("""+entity_id+""": """+entity_name+entity_properties+""")""" result = tx.run(query) def display_all(tx, query): results = tx.run(query) for record in results: print(record) return results.graph() with driver.session() as session: session.write_transaction(delete_all) session.write_transaction(create_entity, entity_id='Alice', entity_name='Client', entity_properties = "{name:'Alice', ip: '1.1.1.1', shipping_address: 'a place', billing_address: 'a place'}") graph = session.read_transaction(display_all, query="MATCH (c:Client) RETURN c") def create_all(tx, query): result = tx.run(query) query = """ // Clients CREATE (Alice:Client {name:'Alice', ip: '1.1.1.1', shipping_address: 'a place', billing_address: 'a place'}) CREATE (Bob:Client {name:'Bob', ip: '1.1.1.2', shipping_address: 'b place', billing_address: 'b place'}) CREATE (Cindy:Client {name:'Cindy', ip: '1.1.1.3', shipping_address: 'c place', billing_address: 'c place'}) CREATE (Diana:Client {name:'Diana', ip: '1.1.1.4', shipping_address: 'd place', billing_address: 'd place'}) CREATE (Emily:Client {name:'Emily', ip: '1.1.1.5', shipping_address: 'e place', billing_address: 'e place'}) CREATE (Fiona:Client {name:'Fiona', ip: '1.1.1.6', shipping_address: 'f place', billing_address: 'f place'}) // Products CREATE (prod1:Product {name: 'strawberry ice-cream', category: 'ice-cream', price: 6.9, unit: 'box'}) CREATE (prod2:Product {name: 'mint ice-cream', category: 'ice-cream', price: 6.9, unit: 'box'}) CREATE (prod3:Product {name: 'mango ice-cream', category: 'ice-cream', price: 6.9, unit: 'box'}) CREATE (prod4:Product {name: 'cheesecake ice-cream', category: 'ice-cream', price: 7.9, unit: 'box'}) CREATE (prod5:Product {name: 'orange', category: 'furit', unit: 'lb', price: 2.6, unit: 'box'}) CREATE (prod6:Product {name: 'dragon fruit', category: 'furit', unit: 'lb', price: 4.8, unit: 'box'}) CREATE (prod7:Product {name: 'kiwi', category: 'furit', unit: 'lb', price: 5.3, unit: 'box'}) CREATE (prod8:Product {name: 'cherry', category: 'furit', unit: 'lb', price: 4.8, unit: 'box'}) CREATE (prod9:Product {name: 'strawberry', category: 'furit', unit: 'lb', price: 3.9, unit: 'box'}) // Orders CREATE (d1:Order {id:'d1', name:'d1', deliverdate:'20190410', status:'delivered'}) CREATE (d2:Order {id:'d2', name:'d2', deliverdate:'20130708', status:'delivered'}) CREATE (d3:Order {id:'d3', name:'d3', deliverdate:'20021201', status:'delivered'}) CREATE (d4:Order {id:'d4', name:'d4', deliverdate:'20040612', status:'delivered'}) CREATE (d5:Order {id:'d5', name:'d5', deliverdate:'20110801', status:'delivered'}) CREATE (d6:Order {id:'d6', name:'d6',deliverdate:'20171212', status:'delivered'}) // Link Clients, Orders and ProductsCREATE CREATE (Alice)-[:PLACED]->(d1)-[:CONTAINS {quantity:1}]->(prod1), (d1)-[:CONTAINS {quantity:2}]->(prod2), (Bob)-[:PLACED]->(d2)-[:CONTAINS {quantity:2}]->(prod1), (d2)-[:CONTAINS {quantity:6}]->(prod7), (Cindy)-[:PLACED]->(d3)-[:CONTAINS {quantity:1}]->(prod9), (Alice)-[:PLACED]->(d4)-[:CONTAINS {quantity:100}]->(prod4), (Alice)-[:PLACED]->(d5)-[:CONTAINS {quantity:10}]->(prod8), (Alice)-[:PLACED]->(d6)-[:CONTAINS {quantity:1}]->(prod7); """ with driver.session() as session: session.write_transaction(delete_all) session.write_transaction(create_all, query) graph = session.read_transaction(display_all, query="""MATCH (c:Client)-[:PLACED]-(o)-[:CONTAINS]->(p) return c, o, p;""") graph with driver.session() as session: graph = session.read_transaction(display_all, query="""MATCH (c:Client {name:'Alice'})-[:PLACED]->(o)-[cts:CONTAINS]->(p) WITH c, o, SUM(cts.quantity * p.price) as order_price ORDER BY o.deliverdate WITH c.name AS name, COLLECT(o) AS os, COLLECT(order_price) as ops UNWIND [i IN RANGE(0, SIZE(os)-1) | {name: name, id: os[i].id, current_order_cost: round(ops[i]), other_orders: [x IN os[0..i] + os[i+1..SIZE(os)] | x.id], other_orders_costs: [x IN ops[0..i] + ops[i+1..SIZE(os)] | round(x)] }] AS result WITH result.name as name, result.id as order_id, result.current_order_cost as current_order_cost, result.other_orders as other_orders, result.other_orders_costs as other_orders_costs UNWIND(other_orders_costs) as unwind_other_orders_costs return name, order_id, current_order_cost, other_orders, other_orders_costs, round(stDev(unwind_other_orders_costs)) as other_costs_std;""") graph ```
github_jupyter
This notebook shows the MEP quickstart sample, which also exists as a non-notebook version at: https://bitbucket.org/vitotap/python-spark-quickstart It shows how to use Spark (http://spark.apache.org/) for distributed processing on the PROBA-V Mission Exploitation Platform. (https://proba-v-mep.esa.int/) The sample intentionally implements a very simple computation: for each PROBA-V tile in a given bounding box and time range, a histogram is computed. The results are then summed and printed. Computation of the histograms runs in parallel. ## First step: get file paths A catalog API is available to easily retrieve paths to PROBA-V files: https://readthedocs.org/projects/mep-catalogclient/ ``` from catalogclient import catalog cat=catalog.Catalog() cat.get_producttypes() date = "2016-01-01" products = cat.get_products('PROBAV_L3_S1_TOC_333M', fileformat='GEOTIFF', startdate=date, enddate=date, min_lon=0, max_lon=10, min_lat=36, max_lat=53) #extract NDVI geotiff files from product metadata files = [p.file('NDVI')[5:] for p in products] print('Found '+str(len(files)) + ' files.') print(files[0]) #check if file exists !file {files[0]} ``` ## Second step: define function to apply Define the histogram function, this can also be done inline, which allows for a faster feedback loop when writing the code, but here we want to clearly separate the processing 'algorithm' from the parallelization code. ``` # Calculates the histogram for a given (single band) image file. def histogram(image_file): import numpy as np import gdal # Open image file img = gdal.Open(image_file) if img is None: print( '-ERROR- Unable to open image file "%s"' % image_file ) # Open raster band (first band) raster = img.GetRasterBand(1) xSize = img.RasterXSize ySize = img.RasterYSize # Read raster data data = raster.ReadAsArray(0, 0, xSize, ySize) # Calculate histogram hist, _ = np.histogram(data, bins=256) return hist ``` ## Third step: setup Spark To work on the processing cluster, we need to specify the resources we want: * spark.executor.cores: Number of cores per executor. Usually our tasks are single threaded, so 1 is a good default. * spark.executor.memory: memory to assign per executor. For the Java/Spark processing, not the Python part. * spark.yarn.executor.memoryOverhead: memory available for Python in each executor. We set up the SparkConf with these parameters, and create a SparkContext sc, which will be our access point to the cluster. ``` %%time # ================================================================ # === Calculate the histogram for a given number of files. The === # === processing is performed by spreading them over a cluster === # === of Spark nodes. === # ================================================================ from datetime import datetime from operator import add import pyspark import os # Setup the Spark cluster conf = pyspark.SparkConf() conf.set('spark.yarn.executor.memoryOverhead', 512) conf.set('spark.executor.memory', '512m') sc = pyspark.SparkContext(conf=conf) ``` ## Fourth step: compute histograms We use a couple of Spark functions to run our job on the cluster. Comments are provided in the code. ``` %%time # Distribute the local file list over the cluster. filesRDD = sc.parallelize(files,len(files)) # Apply the 'histogram' function to each filename using 'map', keep the result in memory using 'cache'. hists = filesRDD.map(histogram).cache() count = hists.count() # Combine distributed histograms into a single result total = list(hists.reduce(lambda h, i: map(add, h, i))) hists.unpersist() print( "Sum of %i histograms: %s" % (count, total)) #stop spark session if we no longer need it sc.stop() ``` ## Fifth step: plot our result Plot the array of values as a simple line chart using matplotlib. This is the most basic Python library. More advanced options such as bokeh, mpld3 and seaborn are also available. ``` %matplotlib inline import matplotlib.pyplot as plt plt.plot(total) plt.show() ```
github_jupyter
``` from statsmodels.stats.outliers_influence import variance_inflation_factor from sklearn.model_selection import KFold from sklearn.datasets import make_regression from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score from sklearn.model_selection import cross_val_score %matplotlib inline %config InlineBackend.figure_formats = {'png', 'retina'} pd.options.mode.chained_assignment = None # default='warn'? # data_key DataFrame data_key = pd.read_csv('key.csv') # data_train DataFrame data_train = pd.read_csv('train.csv') # data_weather DataFrame data_weather = pd.read_csv('weather.csv') rain_text = ['FC', 'TS', 'GR', 'RA', 'DZ', 'SN', 'SG', 'GS', 'PL', 'IC', 'FG', 'BR', 'UP', 'FG+'] other_text = ['HZ', 'FU', 'VA', 'DU', 'DS', 'PO', 'SA', 'SS', 'PY', 'SQ', 'DR', 'SH', 'FZ', 'MI', 'PR', 'BC', 'BL', 'VC' ] data_weather['codesum'].replace("+", "") a = [] for i in range(len(data_weather['codesum'])): a.append(data_weather['codesum'].values[i].split(" ")) for i_text in a[i]: if len(i_text) == 4: a[i].append(i_text[:2]) a[i].append(i_text[2:]) data_weather["nothing"] = 1 data_weather["rain"] = 0 data_weather["other"] = 0 b = -1 for ls in a: b += 1 for text in ls: if text in rain_text: data_weather.loc[b, 'rain'] = 1 data_weather.loc[b, 'nothing'] = 0 elif text in other_text: data_weather.loc[b,'other'] = 1 data_weather.loc[b, 'nothing'] = 0 # 모든 데이터 Merge df = pd.merge(data_weather, data_key) station_nbr = df['station_nbr'] df.drop('station_nbr', axis=1, inplace=True) df['station_nbr'] = station_nbr df = pd.merge(df, data_train) # T 값 처리 하기. Remained Subject = > 'M' and '-' df['snowfall'][df['snowfall'] == ' T'] = 0.05 df['preciptotal'][df['preciptotal'] == ' T'] = 0.005 # 주말과 주중 구분 작업 하기 df['date'] = pd.to_datetime(df['date']) df['week7'] = df['date'].dt.dayofweek df['weekend'] = 0 df.loc[df['week7'] == 5, 'weekend'] = 1 df.loc[df['week7'] == 6, 'weekend'] = 1 df1 = df[df['station_nbr'] == 1]; df11 = df[df['station_nbr'] == 11] df2 = df[df['station_nbr'] == 2]; df12 = df[df['station_nbr'] == 12] df3 = df[df['station_nbr'] == 3]; df13 = df[df['station_nbr'] == 13] df4 = df[df['station_nbr'] == 4]; df14 = df[df['station_nbr'] == 14] df5 = df[df['station_nbr'] == 5]; df15 = df[df['station_nbr'] == 15] df6 = df[df['station_nbr'] == 6]; df16 = df[df['station_nbr'] == 16] df7 = df[df['station_nbr'] == 7]; df17 = df[df['station_nbr'] == 17] df8 = df[df['station_nbr'] == 8]; df18 = df[df['station_nbr'] == 18] df9 = df[df['station_nbr'] == 9]; df19 = df[df['station_nbr'] == 19] df10 = df[df['station_nbr'] == 10]; df20 = df[df['station_nbr'] == 20] df4 = df4.apply(pd.to_numeric, errors = 'coerce') df4.describe().iloc[:, 14:] # 없는 Column = codesum, station_nbr, date, store_nbr df4_drop_columns = ['date', 'station_nbr', 'codesum', 'store_nbr'] df4 = df4.drop(columns = df4_drop_columns) df4['store_nbr'].unique() # np.nan를 포함하고 있는 변수(column)를 찾아서, 그 변수에 mean 값 대입해서 Frame의 모든 Value가 fill 되게 하기. df4_columns = df4.columns # Cateogry 값을 포함하는 변수는 np.nan에 mode값으로 대체하고, 나머지 실수 값을 포함한 변수는 np.nan에 mean값으로 대체 for i in df4_columns: if (i == 'resultdir'): df4[i].fillna(df4[i].mode()[0], inplace=True) print(df4[i].mode()[0]) else: df4[i].fillna(df4[i].mean(), inplace=True) # 이제 모든 변수가 숫자로 표기 되었기 때문에, 가능 함. # 상대 습도 추가 # df4['relative_humility'] = 100*(np.exp((17.625*((df4['dewpoint']-32)/1.8))/(243.04+((df4['dewpoint']-32)/1.8)))/np.exp((17.625*((df4['tavg']-32)/1.8))/(243.04+((df4['tavg']-32)/1.8)))) # 체감온도 계산 df4["windchill"] = 35.74 + 0.6215*df4["tavg"] - 35.75*(df4["avgspeed"]**0.16) + 0.4275*df4["tavg"]*(df4["avgspeed"]**0.16) df4 = df4[df4['units'] != 0] model_df4 = sm.OLS.from_formula('np.log1p(units) ~ tmax + tmin + tavg + dewpoint + wetbulb + heat + cool + preciptotal + stnpressure + \ sealevel + resultspeed + resultdir + avgspeed + C(nothing) + C(rain) + C(other) + C(item_nbr) + C(week7) + \ C(weekend) + relative_humility + windchill + 0', data = df4) result_df4 = model_df4.fit() print(result_df4.summary()) anova_result_df4 = sm.stats.anova_lm(result_df4, typ=2).sort_values(by=['PR(>F)'], ascending = False) anova_result_df4[anova_result_df4['PR(>F)'] <= 0.05] vif = pd.DataFrame() vif["VIF Factor"] = [variance_inflation_factor(df4.values, i) for i in range(df4.shape[1])] vif["features"] = df4.columns vif = vif.sort_values("VIF Factor").reset_index(drop=True) vif # 10순위까지 겹치는것만 쓴다 # item_nbr, weekend, week7, preciptotal # item_nbr, weekend, week7, preciptotal model_df4 = sm.OLS.from_formula('np.log1p(units) ~ C(item_nbr) + C(week7) + C(weekend) + scale(preciptotal) + 0', data = df4) result_df4 = model_df4.fit() print(result_df4.summary()) X4 = df4[['week7', 'weekend', 'item_nbr', 'preciptotal']] y4 = df4['units'] model4 = LinearRegression() cv4 = KFold(n_splits=10, shuffle=True, random_state=0) cross_val_score(model4, X4, y4, scoring="r2", cv=cv4) ```
github_jupyter
# Title **Exercise: B.1 - MLP by Hand** # Description In this exercise, we will **construct a neural network** to classify 3 species of iris. The classification is based on 4 measurement predictor variables: sepal length & width, and petal length & width in the given dataset. <img src="../img/image5.jpeg" style="width: 500px;"> # Instructions: The Neural Network will be built from scratch using pre-trained weights and biases. Hence, we will only be doing the forward (i.e., prediction) pass. - Load the iris dataset from sklearn standard datasets. - Assign the predictor and response variables appropriately. - One hot encode the categorical labels of the predictor variable. - Load and inspect the pre-trained weights and biases. - Construct the MLP: - Augment X with a column of ones to create the augmented design matrix X - Create the first layer weight matrix by vertically stacking the bias vector on top of the weight vector - Perform the affine transformation - Activate the output of the affine transformation using ReLU - Repeat the first 3 steps for the hidden layer (augment, vertical stack, affine) - Use softmax on the final layer - Finally, predict y # Hints: This will further develop our intuition for the architecture of a deep neural network. This diagram shows the structure of our network. You may find it useful to refer to it during the exercise. <img src="../img/image6.png" style="width: 500px;"> This is our first encounter with a multi-class classification problem and also the softmax activation on the output layer. Note: $f_1()$ above is the ReLU activation and $f_2()$ is the softmax. <a href="https://www.tensorflow.org/api_docs/python/tf/keras/utils/to_categorical" target="_blank">to_categorical(y, num_classes=None, dtype='float32')</a> : Converts a class vector (integers) to the binary class matrix. <a href="https://numpy.org/doc/stable/reference/generated/numpy.vstack.html" target="_blank">np.vstack(tup)</a> : Stack arrays in sequence vertically (row-wise). <a href="https://numpy.org/doc/stable/reference/generated/numpy.dot.html" target="_blank">numpy.dot(a, b, out=None)</a> : Returns the dot product of two arrays. <a href="https://numpy.org/doc/stable/reference/generated/numpy.argmax.html" target="_blank">numpy.argmax(a, axis=None, out=None)</a> : Returns the indices of the maximum values along an axis. Note: This exercise is **auto-graded and you can try multiple attempts.** ``` #Import library import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from sklearn.datasets import load_iris from tensorflow.keras.utils import to_categorical %matplotlib inline #Load the iris data iris_data = load_iris() #Get the predictor and reponse variables X = iris_data.data y = iris_data.target #See the shape of the data print(f'X shape: {X.shape}') print(f'y shape: {y.shape}') #One-hot encode target labels Y = to_categorical(y) print(f'Y shape: {Y.shape}') ``` Load and inspect the pre-trained weights and biases. Compare their shapes to the NN diagram. ``` #Load and inspect the pre-trained weights and biases weights = np.load('data/weights.npy', allow_pickle=True) # weights for hidden (1st) layer w1 = weights[0] # biases for hidden (1st) layer b1 = weights[1] # weights for output (2nd) layer w2 = weights[2] #biases for output (2nd) layer b2 = weights[3] #Compare their shapes to that in the NN diagram. for arr, name in zip([w1,b1,w2,b2], ['w1','b1','w2','b2']): print(f'{name} - shape: {arr.shape}') print(arr) print() ``` For the first affine transformation we need to multiple the augmented input by the first weight matrix (i.e., layer). $$ \begin{bmatrix} 1 & X_{11} & X_{12} & X_{13} & X_{14}\\ 1 & X_{21} & X_{22} & X_{23} & X_{24}\\ \vdots & \vdots & \vdots & \vdots & \vdots \\ 1 & X_{n1} & X_{n2} & X_{n3} & X_{n4}\\ \end{bmatrix} \begin{bmatrix} b_{1}^1 & b_{2}^1 & b_{3}^1\\ W_{11}^1 & W_{12}^1 & W_{13}^1\\ W_{21}^1 & W_{22}^1 & W_{23}^1\\ W_{31}^1 & W_{32}^1 & W_{33}^1\\ W_{41}^1 & W_{42}^1 & W_{43}^1\\ \end{bmatrix} = \begin{bmatrix} z_{11}^1 & z_{12}^1 & z_{13}^1\\ z_{21}^1 & z_{22}^1 & z_{23}^1\\ \vdots & \vdots & \vdots \\ z_{n1}^1 & z_{n2}^1 & z_{n3}^1\\ \end{bmatrix} = \textbf{Z}^{(1)} $$ <span style='color:gray'>About the notation: superscript refers to the layer and subscript refers to the index in the particular matrix. So $W_{23}^1$ is the weight in the 1st layer connecting the 2nd input to 3rd hidden node. Compare this matrix representation to the slide image. Also note the bias terms and ones that have been added to 'augment' certain matrices. You could consider $b_1^1$ to be $W_{01}^1$.</span><div></div> <span style='color:blue'>1. Augment X with a column of ones to create `X_aug`</span><div></div><span style='color:blue'>2. Create the first layer weight matrix `W1` by vertically stacking the bias vector `b1`on top of `w1` (consult `add_ones_col` for ideas. Don't forget your `Tab` and `Shift+Tab` tricks!)</span><div></div><span style='color:blue'>3. Do the matrix multiplication to find `Z1`</span> ``` def add_ones_col(X): '''Augment matrix with a column of ones''' X_aug = np.hstack((np.ones((X.shape[0],1)), X)) return X_aug #Use add_ones_col() X_aug = add_ones_col(___) #Use np.vstack to add biases to weight matrix W1 = np.vstack((___,___)) #Use np.dot() to multiple X_aug and W1 Z1 = np.dot(___,___) ``` Next, we use our non-linearity $$ \textit{a}_{\text{relu}}(\textbf{Z}^{(1)})= \begin{bmatrix} h_{11} & h_{12} & h_{13}\\ h_{21} & h_{22} & h_{23}\\ \vdots & \vdots & \vdots \\ h_{n1} & h_{n2} & h_{n3}\\ \end{bmatrix}= \textbf{H} $$ <span style='color:blue'>1. Define the ReLU activation</span><div></div> <span style='color:blue'>2. use `plot_activation_func` to confirm implementation</span><div></div> <span style='color:blue'>3. Use relu on `Z1` to create `H`</span> ``` def relu(z: np.array) -> np.array: # hint: # relu(z) = 0 when z < 0 # otherwise relu(z) = z # your code here h = np.maximum(___,___) # np.maximum() will help return h #Helper code to plot the activation function def plot_activation_func(f, name): lin_x = np.linspace(-10,10,200) h = f(lin_x) plt.plot(lin_x, h) plt.xlabel('x') plt.ylabel('y') plt.title(f'{name} Activation Function') plot_activation_func(relu, name='RELU') # use your relu activation function on Z1 H = relu(___) ``` The next step is very similar to the first and so we've filled it in for you. $$ \begin{bmatrix} 1 & h_{11} & h_{12} & h_{13}\\ 1 & h_{21} & h_{22} & h_{23}\\ \vdots & \vdots & \vdots & \vdots \\ 1 & h_{n1} & h_{n2} & h_{n3}\\ \end{bmatrix} \begin{bmatrix} b_{1}^{(2)} & b_{2}^2 & b_{3}^2\\ W_{11}^2 & W_{12}^2 & W_{13}^2\\ W_{21}^2 & W_{22}^2 & W_{23}^2\\ W_{31}^2 & W_{32}^2 & W_{33}^2\\ \end{bmatrix}= \begin{bmatrix} z_{11}^2 & z_{12}^2 & z_{13}^2\\ z_{21}^2 & z_{22}^2 & z_{23}^2\\ \vdots & \vdots & \vdots \\ z_{n1}^2 & z_{n2}^2 & z_{n3}^2\\ \end{bmatrix} = \textbf{Z}^{(2)} $$ <span style='color:blue'>1. Augment `H` with ones to create `H_aug`</span><div></div> <span style='color:blue'>2. Combine `w2` and `b2` to create the output weight matric `W2`</span><div></div> <span style='color:blue'>3. Perform the matrix multiplication to produce `Z2`</span><div></div> ``` #Use add_ones_col() H_aug = ___ #Use np.vstack to add biases to weight matrix W2 = ___ #Use np.dot() Z2 = np.dot(H_aug,W2) ``` Finally we use the softmax activation on `Z2`. Now for each observation we have an output vector of length 3 which can be interpreted as a probability (they sum to 1). $$ \textit{a}_{\text{softmax}}(\textbf{Z}^2)= \begin{bmatrix} \hat{y}_{11} & \hat{y}_{12} & \hat{y}_{13}\\ \hat{y}_{21} & \hat{y}_{22} & \hat{y}_{23}\\ \vdots & \vdots & \vdots \\ \hat{y}_{n1} & \hat{y}_{n2} & \hat{y}_{n3}\\ \end{bmatrix} = \hat{\textbf{Y}} $$ <span style='color:blue'>1. Define softmax</span><div></div> <span style='color:blue'>2. Use `softmax` on `Z2` to create `Y_hat`</span><div></div> ``` def softmax(z: np.array) -> np.array: ''' Input: z - 2D numpy array of logits rows are observations, classes are columns Returns: y_hat - 2D numpy array of probabilities rows are observations, classes are columns ''' # hint: we are summing across the columns y_hat = np.exp(___)/np.sum(np.exp(___), axis=___, keepdims=True) return y_hat #Calling the softmax function Y_hat = softmax(___) ``` <span style='color:blue'>Now let's see how accuract the model's predictions are! Use `np.argmax` to collapse the columns of `Y_hat` to create `y_hat`, a vector of class labels like the original `y` before one-hot encoding.</span><div></div> ``` ### edTest(test_acc) ### # Compute the accuracy y_hat = np.argmax(___, axis=___) acc = sum(y == y_hat)/len(y) print(f'accuracy: {acc:.2%}') ```
github_jupyter
# Code for Chapter 1. In this case we will review some of the basic R functions and coding paradigms we will use throughout this book. This includes loading, viewing, and cleaning raw data; as well as some basic visualization. This specific case we will use data from reported UFO sightings to investigate what, if any, seasonal trends exists in the data. ## Load data ``` import pandas as pd df = pd.read_csv('data/ufo/ufo_awesome.tsv', sep='\t', error_bad_lines=False, header=None) df.shape #error_bad_lines=False - for some lines the last column which contains a description, contains also invalid character \t, which is separator for us. We ignore it. df.head() ``` ## Set columns names ``` df.columns = ["DateOccurred","DateReported","Location","ShortDescription", "Duration","LongDescription"] ``` ## Check data size for the DateOccurred and DateReported columns ``` import matplotlib.pyplot as plt %matplotlib inline df['DateOccurred'].astype(str).str.len().plot(kind='hist') df['DateReported'].astype(str).str.len().plot(kind='hist') ``` ## Remove rows with incorrect dates (length not eqauls 8) ``` mask = (df['DateReported'].astype('str').str.len() == 8) & (df['DateOccurred'].astype('str').str.len() == 8) df = df.loc[mask] df.shape ``` ## Convert the DateReported and DateOccurred columns to Date type ``` df['DateReported'] = pd.to_datetime(df['DateReported'], format='%Y%m%d', errors='coerce') df['DateOccurred'] = pd.to_datetime(df['DateOccurred'], format='%Y%m%d', errors='coerce') ``` ## Split the 'Locaton' column to two new columns: 'City' and 'State' ``` df_location = df['Location'].str.partition(', ')[[0, 2]] df_location.columns = ['USCity', 'USState'] df['USCity'] = df_location['USCity'] df['USState'] = df_location['USState'] df.head() ``` ## Keep rows only with correct US states ``` USStates = ["AK","AL","AR","AZ","CA","CO","CT","DE","FL","GA","HI","IA","ID","IL", "IN","KS","KY","LA","MA","MD","ME","MI","MN","MO","MS","MT","NC","ND","NE","NH", "NJ","NM","NV","NY","OH","OK","OR","PA","RI","SC","SD","TN","TX","UT","VA","VT", "WA","WI","WV","WY"] df = df[df['USState'].isin(USStates)] df.head() ``` ## Creating a histogram of frequencies for UFO sightings over time ``` df['DateOccurred'].dt.year.plot(kind='hist', bins=15, title='Exploratory histogram of UFO data over time') ``` ## We will only look at incidents that occurred from 1990 to the most recent ``` df = df[(df['DateOccurred'] >= '1990-01-01')] df['DateOccurred'].dt.year.plot(kind='hist', bins=15, title='Histogram of subset UFO data over time (1900 - 2010)') ``` ## Create finally histogram of subset UFO data over time (1900 - 2010) by US state ``` import matplotlib.pyplot as plt plt.style.use('ggplot') # set up figure & axes fig, axes = plt.subplots(nrows=10, ncols=5, sharex=True, sharey=True, figsize=(18, 12), dpi= 80) # drop sharex, sharey, layout & add ax=axes df['YearOccurred'] = df['DateOccurred'].dt.year df.hist(column='YearOccurred',by='USState', ax=axes) # set title and axis labels plt.suptitle('Number of UFO sightings by Month-Year and U.S. State (1990-2010)', x=0.5, y=1, ha='center', fontsize='xx-large') fig.text(0.5, 0.06, 'Times', ha='center', fontsize='x-large') fig.text(0.05, 0.5, 'Number of Sightings', va='center', rotation='vertical', fontsize='x-large') plt.savefig('images/ufo_sightings.pdf', format='pdf') ```
github_jupyter
# Attempting to load higher order ASPECT elements An initial attempt at loading higher order element output from ASPECT. The VTU files have elements with a VTU type of `VTK_LAGRANGE_HEXAHEDRON` (VTK ID number 72, https://vtk.org/doc/nightly/html/classvtkLagrangeHexahedron.html#details), corresponding to 2nd order (quadratic) hexahedron, resulting in 27 nodes. Some useful links about this type of FEM output: * https://blog.kitware.com/modeling-arbitrary-order-lagrange-finite-elements-in-the-visualization-toolkit/ * https://github.com/Kitware/VTK/blob/0ce0d74e67927fd964a27c045d68e2f32b5f65f7/Common/DataModel/vtkCellType.h#L112 * https://github.com/ju-kreber/paraview-scripts * https://doi.org/10.1016/B978-1-85617-633-0.00006-X * https://discourse.paraview.org/t/about-high-order-non-traditional-lagrange-finite-element/1577/4 * https://gitlab.kitware.com/vtk/vtk/-/blob/7a0b92864c96680b1f42ee84920df556fc6ebaa3/Common/DataModel/vtkHigherOrderInterpolation.cxx At present, tis notebook requires the `vtu72` branch on the `meshio` fork at https://github.com/chrishavlin/meshio/pull/new/vtu72 to attempt to load the `VTK_LAGRANGE_HEXAHEDRON` output. As seen below, the data can be loaded with the general `unstructured_mesh_loader` but `yt` can not presently handle higher order output. ``` import os, yt, numpy as np import xmltodict, meshio DataDir=os.path.join(os.environ.get('ASPECTdatadir','../'),'litho_defo_sample','data') pFile=os.path.join(DataDir,'solution-00002.pvtu') if os.path.isfile(pFile) is False: print("data file not found") class pvuFile(object): def __init__(self,file,**kwargs): self.file=file self.dataDir=kwargs.get('dataDir',os.path.split(file)[0]) with open(file) as data: self.pXML = xmltodict.parse(data.read()) # store fields for convenience self.fields=self.pXML['VTKFile']['PUnstructuredGrid']['PPointData']['PDataArray'] def load(self): conlist=[] # list of 2D connectivity arrays coordlist=[] # global, concatenated coordinate array nodeDictList=[] # list of node_data dicts, same length as conlist con_offset=-1 for mesh_id,src in enumerate(self.pXML['VTKFile']['PUnstructuredGrid']['Piece']): mesh_name="connect{meshnum}".format(meshnum=mesh_id+1) # connect1, connect2, etc. srcFi=os.path.join(self.dataDir,src['@Source']) # full path to .vtu file [con,coord,node_d]=self.loadPiece(srcFi,mesh_name,con_offset+1) con_offset=con.max() conlist.append(con.astype("i8")) coordlist.extend(coord.astype("f8")) nodeDictList.append(node_d) self.connectivity=conlist self.coordinates=np.array(coordlist) self.node_data=nodeDictList def loadPiece(self,srcFi,mesh_name,connectivity_offset=0): # print(srcFi) meshPiece=meshio.read(srcFi) # read it in with meshio coords=meshPiece.points # coords and node_data are already global connectivity=meshPiece.cells_dict['lagrange_hexahedron'] # 2D connectivity array # parse node data node_data=self.parseNodeData(meshPiece.point_data,connectivity,mesh_name) # offset the connectivity matrix to global value connectivity=np.array(connectivity)+connectivity_offset return [connectivity,coords,node_data] def parseNodeData(self,point_data,connectivity,mesh_name): # for each field, evaluate field data by index, reshape to match connectivity con1d=connectivity.ravel() conn_shp=connectivity.shape comp_hash={0:'cx',1:'cy',2:'cz'} def rshpData(data1d): return np.reshape(data1d[con1d],conn_shp) node_data={} for fld in self.fields: nm=fld['@Name'] if nm in point_data.keys(): if '@NumberOfComponents' in fld.keys() and int(fld['@NumberOfComponents'])>1: # we have a vector, deal with components for component in range(int(fld['@NumberOfComponents'])): comp_name=nm+'_'+comp_hash[component] # e.g., velocity_cx m_F=(mesh_name,comp_name) # e.g., ('connect1','velocity_cx') node_data[m_F]=rshpData(point_data[nm][:,component]) else: # just a scalar! m_F=(mesh_name,nm) # e.g., ('connect1','T') node_data[m_F]=rshpData(point_data[nm]) return node_data pvuData=pvuFile(pFile) pvuData.load() ``` So it loads... `meshio`'s treatment of high order elements is not complicated: it assumes the same number of nodes per elements and just reshapes the 1d connectivity array appropriately. In this case, a single element has 27 nodes: ``` pvuData.connectivity[0].shape ``` And yes, it can load: ``` ds4 = yt.load_unstructured_mesh( pvuData.connectivity, pvuData.coordinates, node_data = pvuData.node_data ) ``` but the plots are don't actually take advantage of all the data, noted by the warning when slicing: "High order elements not yet supported, dropping to 1st order." ``` p=yt.SlicePlot(ds4, "x", ("all", "T")) p.set_log("T",False) p.show() ``` This run is a very high aspect ratio cartesian simulation so let's rescale the coords first and then reload (**TO DO** look up how to do this with *yt* after loading the data...) ``` def minmax(x): return [x.min(),x.max()] for idim in range(0,3): print([idim,minmax(pvuData.coordinates[:,idim])]) # some artificial rescaling for idim in range(0,3): pvuData.coordinates[:,idim]=pvuData.coordinates[:,idim] / pvuData.coordinates[:,idim].max() ds4 = yt.load_unstructured_mesh( pvuData.connectivity, pvuData.coordinates, node_data = pvuData.node_data ) p=yt.SlicePlot(ds4, "x", ("all", "T")) p.set_log("T",False) p.show() ``` To use all the data, we need to add a new element mapping for sampling these elements (see `yt/utilities/lib/element_mappings.pyx`). These element mappings can be automatically generated using a symbolic math library, e.g., `sympy`. See `ASPECT_VTK_quad_hex_mapping.ipynb`
github_jupyter
# Индекс поиска ``` import numpy as np import pandas as pd import datetime import matplotlib from matplotlib import pyplot as plt import matplotlib.patches as mpatches matplotlib.style.use('ggplot') %matplotlib inline ``` ### Описание: Индекс строится на основе кризисных дескрипторов, взятых из [статьи Столбова.](https://yadi.sk/i/T24TXCw2Jzy8oQ) Автор посмотрел, что активнее всего гуглилось по категории "Финансы и страхование" в пик кризиса 2008 года. Он отобрал эти дескрипторы и разбавил их ещё несколькими терминами. ### Технические особенности: Скачиваем поисковые запросы по всем дескрипторам Столбова. Можно и руками, но для больших объёмов поисковой скачки, есть [рекурсивный парсер.](https://nbviewer.jupyter.org/github/FUlyankin/Parsers/blob/master/Parsers%20/google_trends_selenium_parser.ipynb) Индекс будем строить двумя способами: - взвесив все слова с коэффицентами $$ w_i = \frac{\sum_{j} r_{ij}}{\sum_{i,j} r_{ij}} $$ - взяв одну из компонент PCA-разложения. Брать будем не первую компоненту, а ту компоненту, которая улавливает в себе "пики". В нашем случае это вторая компонента. ``` !ls ../01_Google_trends_parser path = '../01_Google_trends_parser/krizis_poisk_odinar_month.tsv' df_poisk = pd.read_csv(path, sep='\t') df_poisk.set_index('Месяц', inplace=True) print(df_poisk.shape) df_poisk.head() def index_make(df_term): corr_matrix = df_term.corr() w = np.array(corr_matrix.sum()/corr_matrix.sum().sum()) print(w) index = (np.array(df_term).T*w.reshape(len(w),1)).sum(axis = 0) mx = index.max() mn = index.min() return 100*(index - mn)/(mx - mn) def min_max_scaler(df, col): mx = df[col].max() mn = df[col].min() df[col] = 100*(df[col] - mn)/(mx - mn) pass index_poisk = index_make(df_poisk) df_pi = pd.DataFrame() df_pi['fielddate'] = df_poisk.index df_pi['poiskInd_ind_corr'] = index_poisk df_pi.set_index('fielddate').plot(legend=True, figsize=(12,6)); from sklearn.decomposition import PCA model_pca = PCA(n_components= 15) model_pca.fit(df_poisk) df_pi_pca = model_pca.transform(df_poisk) plt.plot(model_pca.explained_variance_, label='Component variances') plt.xlabel('n components') plt.ylabel('variance') plt.legend(loc='upper right'); df_pi['poiskInd_ind_pca'] = list(df_pi_pca[:,1]) min_max_scaler(df_pi, 'poiskInd_ind_pca') df_pi.plot(legend=True, figsize=(12,6)); ``` -------- ``` df_pi.to_csv('../Индексы/data_simple_index_v2/poisk_krizis_index_month.tsv', sep="\t", index=None) ``` -------------
github_jupyter
``` import torch import torchvision import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import matplotlib.pyplot as plt import random import backwardcompatibilityml.loss as bcloss import backwardcompatibilityml.scores as scores # Initialize random seed random.seed(123) torch.manual_seed(456) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False %matplotlib inline n_epochs = 3 batch_size_train = 64 batch_size_test = 1000 learning_rate = 0.01 momentum = 0.5 log_interval = 10 torch.backends.cudnn.enabled = False train_loader = list(torch.utils.data.DataLoader( torchvision.datasets.MNIST('datasets/', train=True, download=True, transform=torchvision.transforms.Compose([ torchvision.transforms.ToTensor(), torchvision.transforms.Normalize( (0.1307,), (0.3081,)) ])), batch_size=batch_size_train, shuffle=True)) test_loader = list(torch.utils.data.DataLoader( torchvision.datasets.MNIST('datasets/', train=False, download=True, transform=torchvision.transforms.Compose([ torchvision.transforms.ToTensor(), torchvision.transforms.Normalize( (0.1307,), (0.3081,)) ])), batch_size=batch_size_test, shuffle=True)) train_loader_a = train_loader[:int(len(train_loader)/2)] train_loader_b = train_loader[int(len(train_loader)/2):] fig = plt.figure() for i in range(6): plt.subplot(2,3,i+1) plt.tight_layout() plt.imshow(train_loader_a[0][0][i][0], cmap='gray', interpolation='none') plt.title("Ground Truth: {}".format(train_loader_a[0][1][i])) plt.xticks([]) plt.yticks([]) fig class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(320, 50) self.fc2 = nn.Linear(50, 10) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = x.view(-1, 320) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return x, F.softmax(x, dim=1), F.log_softmax(x, dim=1) network = Net() optimizer = optim.SGD(network.parameters(), lr=learning_rate, momentum=momentum) train_losses = [] train_counter = [] test_losses = [] test_counter = [i*len(train_loader_a)*batch_size_train for i in range(n_epochs + 1)] def train(epoch): network.train() for batch_idx, (data, target) in enumerate(train_loader_a): optimizer.zero_grad() _, _, output = network(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() if batch_idx % log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader_a)*batch_size_train, 100. * batch_idx / len(train_loader_a), loss.item())) train_losses.append(loss.item()) train_counter.append( (batch_idx*64) + ((epoch-1)*len(train_loader_a)*batch_size_train)) def test(): network.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: _, _, output = network(data) test_loss += F.nll_loss(output, target, reduction="sum").item() pred = output.data.max(1, keepdim=True)[1] correct += pred.eq(target.data.view_as(pred)).sum() test_loss /= len(train_loader_a)*batch_size_train test_losses.append(test_loss) print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(train_loader_a)*batch_size_train, 100. * correct / (len(train_loader_a)*batch_size_train))) test() for epoch in range(1, n_epochs + 1): train(epoch) test() fig = plt.figure() plt.plot(train_counter, train_losses, color='blue') plt.scatter(test_counter, test_losses, color='red') plt.legend(['Train Loss', 'Test Loss'], loc='upper right') plt.xlabel('number of training examples seen') plt.ylabel('negative log likelihood loss') fig with torch.no_grad(): _, _, output = network(test_loader[0][0]) fig = plt.figure() for i in range(6): plt.subplot(2,3,i+1) plt.tight_layout() plt.imshow(test_loader[0][0][i][0], cmap='gray', interpolation='none') plt.title("Prediction: {}".format( output.data.max(1, keepdim=True)[1][i].item())) plt.xticks([]) plt.yticks([]) fig import copy h1 = copy.deepcopy(network) h2 = copy.deepcopy(network) h1.eval() new_optimizer = optim.SGD(h2.parameters(), lr=learning_rate, momentum=momentum) lambda_c = 1.0 bc_loss = bcloss.BCNLLLoss(h1, h2, lambda_c) update_train_losses = [] update_train_counter = [] update_test_losses = [] update_test_counter = [i*len(train_loader_b)*batch_size_train for i in range(n_epochs + 1)] def train_update(epoch): for batch_idx, (data, target) in enumerate(train_loader_b): new_optimizer.zero_grad() loss = bc_loss(data, target) loss.backward() new_optimizer.step() if batch_idx % log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader_b)*batch_size_train, 100. * batch_idx / len(train_loader_b), loss.item())) update_train_losses.append(loss.item()) update_train_counter.append( (batch_idx*64) + ((epoch-1)*len(train_loader_b)*batch_size_train)) def test_update(): h2.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: _, _, output = h2(data) test_loss += F.nll_loss(output, target, reduction="sum").item() pred = output.data.max(1, keepdim=True)[1] correct += pred.eq(target.data.view_as(pred)).sum() test_loss /= len(train_loader_b)*batch_size_train update_test_losses.append(test_loss) print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(train_loader_b)*batch_size_train, 100. * correct / (len(train_loader_b)*batch_size_train))) test_update() for epoch in range(1, n_epochs + 1): train_update(epoch) test_update() fig = plt.figure() plt.plot(update_train_counter, update_train_losses, color='blue') plt.scatter(update_test_counter, update_test_losses, color='red') plt.legend(['Train Loss', 'Test Loss'], loc='upper right') plt.xlabel('number of training examples seen') plt.ylabel('negative log likelihood loss') fig h2.eval() h1.eval() test_index = 2 with torch.no_grad(): _, _, h1_output = h1(test_loader[test_index][0]) _, _, h2_output = h2(test_loader[test_index][0]) h1_labels = h1_output.data.max(1)[1] h2_labels = h2_output.data.max(1)[1] expected_labels = test_loader[test_index][1] fig = plt.figure() for i in range(6): plt.subplot(2,3,i+1) plt.tight_layout() plt.imshow(test_loader[test_index][0][i][0], cmap='gray', interpolation='none') plt.title("Prediction: {}".format( h2_labels[i].item())) plt.xticks([]) plt.yticks([]) fig trust_compatibility = scores.trust_compatibility_score(h1_labels, h2_labels, expected_labels) error_compatibility = scores.error_compatibility_score(h1_labels, h2_labels, expected_labels) print(f"Error Compatibility Score: {error_compatibility}") print(f"Trust Compatibility Score: {trust_compatibility}") ```
github_jupyter
``` import numpy as np import os from astropy.table import Table from astropy.cosmology import FlatLambdaCDM from matplotlib import pyplot as plt from astropy.io import ascii from astropy.coordinates import SkyCoord import healpy import astropy.units as u import pandas as pd import matplotlib import pyccl from scipy import stats os.environ['CLMM_MODELING_BACKEND'] = 'ccl' # here you may choose ccl, nc (NumCosmo) or ct (cluster_toolkit) import clmm from clmm.support.sampler import fitters from importlib import reload import sys sys.path.append('../../') from magnification_library import * clmm.__version__ matplotlib.rcParams.update({'font.size': 16}) #define cosmology #astropy object cosmo = FlatLambdaCDM(H0=71, Om0=0.265, Tcmb0=0 , Neff=3.04, m_nu=None, Ob0=0.0448) #ccl object cosmo_ccl = pyccl.Cosmology(Omega_c=cosmo.Om0-cosmo.Ob0, Omega_b=cosmo.Ob0, h=cosmo.h, sigma8= 0.80, n_s=0.963) #clmm object cosmo_clmm = clmm.Cosmology(be_cosmo=cosmo_ccl) path_file = '../../../' key = 'LBGp' ``` ## **Profiles measured with TreeCorr** ``` quant = np.load(path_file + "output_data/binned_correlation_fct_Mpc_"+key+".npy", allow_pickle=True) quant_NK = np.load(path_file + "output_data/binned_correlation_fct_NK_Mpc_"+key+".npy", allow_pickle=True) ``` ## **Measuring profiles with astropy and CLMM** ## Open data ``` gal_cat_raw = pd.read_hdf(path_file+'input_data/cat_'+key+'.h5', key=key) dat = np.load(path_file+"input_data/source_sample_properties.npy", allow_pickle=True) mag_cut, alpha_cut, alpha_cut_err, mag_null, gal_dens, zmean = dat[np.where(dat[:,0]==key)][0][1:] print (alpha_cut) mag_cut selection_source = (gal_cat_raw['ra']>50) & (gal_cat_raw['ra']<73.1) & (gal_cat_raw['dec']<-27.) & (gal_cat_raw['dec']>-45.) selection = selection_source & (gal_cat_raw['mag_i_lsst']<mag_cut) & (gal_cat_raw['redshift']>1.5) gal_cat = gal_cat_raw[selection] [z_cl, mass_cl, n_halo] = np.load(path_file + "output_data/halo_bin_properties.npy", allow_pickle=True) np.sum(n_halo) ``` ## **Magnification profiles prediction** ``` def Mpc_to_arcmin(x_Mpc, z, cosmo=cosmo): return x_Mpc * cosmo.arcsec_per_kpc_proper(z).to(u.arcmin/u.Mpc).value def arcmin_to_Mpc(x_arc, z, cosmo=cosmo): return x_arc * cosmo.kpc_proper_per_arcmin(z).to(u.Mpc/u.arcmin).value def magnification_biais_model(rproj, mass_lens, z_lens, alpha, z_source, cosmo_clmm, delta_so='200', massdef='mean', Mc_relation ='Diemer15'): conc = get_halo_concentration(mass_lens, z_lens, cosmo_clmm.be_cosmo, Mc_relation, mdef[0], delta_so ) magnification = np.zeros(len(rproj)) for k in range(len(rproj)): magnification[k] = np.mean(clmm.theory.compute_magnification(rproj[k], mdelta=mass_lens, cdelta=conc, z_cluster=z_lens, z_source=z_source, cosmo=cosmo_clmm, delta_mdef=delta_so, massdef = massdef, halo_profile_model='NFW', z_src_model='single_plane')) model = mu_bias(magnification, alpha) - 1. return model, magnification def get_halo_concentration(mass_lens, z_lens, cosmo_ccl, relation="Diemer15", mdef="matter", delta_so=200): mdef = pyccl.halos.massdef.MassDef(delta_so, mdef, c_m_relation=relation) concdef = pyccl.halos.concentration.concentration_from_name(relation)() conc = concdef.get_concentration(cosmo=cosmo_clmm.be_cosmo, M=mass_lens, a=cosmo_clmm.get_a_from_z(z=z_lens), mdef_other=mdef) return conc hist = plt.hist(gal_cat['redshift'][selection], bins=100, range=[1.8,3.1], density=True, stacked=True); pdf_zsource = zpdf_from_hist(hist, zmin=0, zmax=10) plt.plot(pdf_zsource.x, pdf_zsource.y, 'r') plt.xlim(1,3.4) plt.xlabel('z source') plt.ylabel('pdf') zint = np.linspace(0, 3.5, 1000) zrand = np.random.choice(zint, 1000, p=pdf_zsource(zint)/np.sum(pdf_zsource(zint))) Mc_relation = "Diemer15" mdef = ["matter", "mean"] #differet terminology for ccl and clmm delta_so=200 #model with the full redshift distribution rp_Mpc = np.logspace(-2, 3, 100) model_mbias = np.zeros((rp_Mpc.size, len(z_cl), len(mass_cl))) model_magnification = np.zeros((rp_Mpc.size, len(z_cl), len(mass_cl))) for i in np.arange(z_cl.shape[0]): for j in np.arange(mass_cl.shape[1]): #rp_Mpc = arcmin_to_Mpc(rp, z_cl[i,j], cosmo) models = magnification_biais_model(rp_Mpc, mass_cl[i,j], z_cl[i,j], alpha_cut, zrand, cosmo_clmm, delta_so, massdef=mdef[1], Mc_relation=Mc_relation) model_mbias[:,i,j] = models[0] model_magnification[:,i,j] = models[1] ``` ## **Plotting figures** ## Example for one mass/z bin ``` i,j = 1,2 corr = np.mean(gal_cat['magnification']) - 1 plt.fill_between(quant[i,j][0], y1= quant[i,j][1] - np.sqrt(np.diag(quant[i,j][2])),\ y2 = quant[i,j][1] + np.sqrt(np.diag(quant[i,j][2])),color = 'grey', alpha=0.4, label='measured') expected_mu_bias = mu_bias(quant_NK[i,j][1] - corr, alpha_cut) - 1. expected_mu_bias_err = expected_mu_bias * (alpha_cut -1 ) * np.sqrt(np.diag(quant_NK[i,j][2])) /(quant_NK[i,j][1]) plt.errorbar(quant_NK[i,j][0], expected_mu_bias, yerr = expected_mu_bias_err, fmt='r.', label = 'predicted from meas. $\mu$') plt.plot(rp_Mpc, model_mbias[:,i,j],'k', lw=2, label='model (1 halo term)') plt.axvline(cosmo.kpc_proper_per_arcmin(z_cl[i,j]).to(u.Mpc/u.arcmin).value*healpy.nside2resol(4096, arcmin = True), linestyle="dotted", color='grey', label ='healpix resol.') plt.xscale('log') plt.xlim(0.1,8) plt.ylim(-0.25,1) plt.grid() plt.xlabel('$\\theta$ [Mpc]') plt.ylabel('$\delta_{\mu}$') plt.legend(fontsize='small', ncol=1) ``` ## Magnification biais profiles for cluster in mass/z bins ``` fig, axes = plt.subplots(5,5, figsize=[20,15], sharex=True) corr = np.mean(gal_cat['magnification']) - 1 for i,h in zip([0,1,2,3,4],range(5)): for j,k in zip([0,1,2,3,4],range(5)): ax = axes[5-1-k,h] ax.fill_between(quant[i,j][0], y1= quant[i,j][1] - np.sqrt(np.diag(quant[i,j][2])),\ y2 = quant[i,j][1] + np.sqrt(np.diag(quant[i,j][2])),color = 'grey', alpha=0.4) expected_mu_bias = mu_bias(quant_NK[i,j][1] - corr, alpha_cut) - 1. expected_mu_bias_err = expected_mu_bias * (alpha_cut -1 ) * np.sqrt(np.diag(quant_NK[i,j][2])) /(quant_NK[i,j][1]) ax.errorbar(quant_NK[i,j][0], expected_mu_bias, yerr = expected_mu_bias_err, fmt='r.', label = 'predicted from meas. $\mu$') ax.axvline(cosmo.kpc_proper_per_arcmin(z_cl[i,j]).to(u.Mpc/u.arcmin).value*healpy.nside2resol(4096, arcmin = True), linestyle="dotted", color='grey', label ='healpix resol.') ax.text(0.5, 0.80, "<z>="+str(round(z_cl[i,j],2)), transform=ax.transAxes, fontsize='x-small') ax.text(0.5, 0.90, "<M/1e14>="+str(round(mass_cl[i,j]/1e14,2)), transform=ax.transAxes, fontsize='x-small'); ax.plot(rp_Mpc, model_mbias[:,i,j],'k--') ax.axvline(0, color='black') [axes[4,j].set_xlabel('$\\theta$ [Mpc]') for j in range(5)] [axes[i,0].set_ylabel('$\delta_{\mu}$') for i in range(5)] plt.tight_layout() axes[0,0].set_xscale('log') axes[0,0].set_xlim(0.1,8) for i in range(axes.shape[0]): axes[4,i].set_ylim(-0.2,0.6) axes[3,i].set_ylim(-0.2,1.3) axes[2,i].set_ylim(-0.2,1.3) axes[1,i].set_ylim(-0.2,2.0) axes[0,i].set_ylim(-0.2,2.5) ``` ## Fitting the mass from the magnification biais profiles using the NFW model ``` def predict_function(radius_Mpc, logM, z_cl): mass_guess = 10**logM return magnification_biais_model(radius_Mpc, mass_guess, z_cl, alpha_cut, zrand, cosmo_clmm, delta_so, massdef=mdef[1], Mc_relation=Mc_relation)[0] def fit_mass(predict_function, data_for_fit, z): popt, pcov = fitters['curve_fit'](lambda radius_Mpc, logM: predict_function(radius_Mpc, logM, z), data_for_fit[0], data_for_fit[1], np.sqrt(np.diag(data_for_fit[2])), bounds=[10.,17.], absolute_sigma=True, p0=(13.)) logm, logm_err = popt[0], np.sqrt(pcov[0][0]) return {'logm':logm, 'logm_err':logm_err, 'm': 10**logm, 'm_err': (10**logm)*logm_err*np.log(10)} fit_mass_magnification = np.zeros(z_cl.shape, dtype=object) mass_eval = np.zeros((z_cl.shape)) mass_min = np.zeros((z_cl.shape)) mass_max = np.zeros((z_cl.shape)) for i in range(5): for j in range(5): fit_mass_magnification[i,j] = fit_mass(predict_function, quant[i,j], z_cl[i,j]) mass_eval[i,j] = fit_mass_magnification[i,j]['m'] mass_min[i,j] = fit_mass_magnification[i,j]['m'] - fit_mass_magnification[i,j]['m_err'] mass_max[i,j] = fit_mass_magnification[i,j]['m'] + fit_mass_magnification[i,j]['m_err'] fig, ax = plt.subplots(1, 3, figsize=(18,4)) ax[0].errorbar(mass_cl[0,:]*0.90, mass_eval[0,:],\ yerr = (mass_eval[0,:] - mass_min[0,:], mass_max[0,:] - mass_eval[0,:]),fmt='-o', label ="z="+str(round(z_cl[0,0],2))) ax[0].errorbar(mass_cl[1,:]*0.95, mass_eval[1,:],\ yerr = (mass_eval[1,:] - mass_min[1,:], mass_max[1,:] - mass_eval[1,:]),fmt='-o', label ="z="+str(round(z_cl[1,0],2))) ax[0].errorbar(mass_cl[2,:]*1.00, mass_eval[2,:],\ yerr = (mass_eval[2,:] - mass_min[2,:], mass_max[2,:] - mass_eval[2,:]),fmt='-o', label ="z="+str(round(z_cl[2,0],2))) ax[0].errorbar(mass_cl[3,:]*1.05, mass_eval[3,:],\ yerr = (mass_eval[3,:] - mass_min[3,:], mass_max[3,:] - mass_eval[3,:]),fmt='-o', label ="z="+str(round(z_cl[3,0],2))) ax[0].errorbar(mass_cl[4,:]*1.10, mass_eval[4,:],\ yerr = (mass_eval[4,:] - mass_min[4,:], mass_max[4,:] - mass_eval[4,:]),fmt='-o', label ="z="+str(round(z_cl[4,0],2))) ax[0].set_xscale('log') ax[0].set_yscale('log') ax[0].plot((4e13, 5e14),(4e13,5e14), color='black', lw=2) ax[0].legend(fontsize = 'small', ncol=1) ax[0].set_xlabel("$M_{FoF}$ true [$M_{\odot}$]") ax[0].set_ylabel("$M_{200,m}$ eval [$M_{\odot}$]") ax[0].grid() ax[1].errorbar(mass_cl[0,:]*0.96, mass_eval[0,:]/mass_cl[0,:],\ yerr = (mass_eval[0,:] - mass_min[0,:], mass_max[0,:] - mass_eval[0,:])/mass_cl[0,:],fmt='-o', label ="z="+str(round(z_cl[0,0],2))) ax[1].errorbar(mass_cl[1,:]*0.98, mass_eval[1,:]/mass_cl[1,:],\ yerr = (mass_eval[1,:] - mass_min[1,:], mass_max[1,:] - mass_eval[1,:])/mass_cl[1,:],fmt='-o', label ="z="+str(round(z_cl[1,0],2))) ax[1].errorbar(mass_cl[2,:]*1.00, mass_eval[2,:]/mass_cl[2,:],\ yerr = (mass_eval[2,:] - mass_min[2,:], mass_max[2,:] - mass_eval[2,:])/mass_cl[2,:],fmt='-o', label ="z="+str(round(z_cl[2,0],2))) ax[1].errorbar(mass_cl[3,:]*1.02, mass_eval[3,:]/mass_cl[3,:],\ yerr = (mass_eval[3,:] - mass_min[3,:], mass_max[3,:] - mass_eval[3,:])/mass_cl[3,:],fmt='-o', label ="z="+str(round(z_cl[3,0],2))) ax[1].errorbar(mass_cl[4,:]*1.04, mass_eval[4,:]/mass_cl[4,:],\ yerr = (mass_eval[4,:] - mass_min[4,:], mass_max[4,:] - mass_eval[4,:])/mass_cl[4,:],fmt='-o', label ="z="+str(round(z_cl[4,0],2))) ax[1].set_xlim(4e13, 5e14) ax[1].set_xscale('log') ax[1].axhline(1, color='black') #ax[1].legend() ax[1].set_xlabel("$M_{FoF}$ true [$M_{\odot}$]") ax[1].set_ylabel("$M_{200,m}$ eval/$M_{FoF}$ true") ax[2].errorbar(z_cl[0,:]*0.96, mass_eval[0,:]/mass_cl[0,:],\ yerr = (mass_eval[0,:] - mass_min[0,:], mass_max[0,:] - mass_eval[0,:])/mass_cl[0,:],fmt='-o', label ="z="+str(round(z_cl[0,0],2))) ax[2].errorbar(z_cl[1,:]*0.98, mass_eval[1,:]/mass_cl[1,:],\ yerr = (mass_eval[1,:] - mass_min[1,:], mass_max[1,:] - mass_eval[1,:])/mass_cl[1,:],fmt='-o', label ="z="+str(round(z_cl[1,0],2))) ax[2].errorbar(z_cl[2,:]*1.00, mass_eval[2,:]/mass_cl[2,:],\ yerr = (mass_eval[2,:] - mass_min[2,:], mass_max[2,:] - mass_eval[2,:])/mass_cl[2,:],fmt='-o', label ="z="+str(round(z_cl[2,0],2))) ax[2].errorbar(z_cl[3,:]*1.02, mass_eval[3,:]/mass_cl[3,:],\ yerr = (mass_eval[3,:] - mass_min[3,:], mass_max[3,:] - mass_eval[3,:])/mass_cl[3,:],fmt='-o', label ="z="+str(round(z_cl[3,0],2))) ax[2].errorbar(z_cl[4,:]*1.04, mass_eval[4,:]/mass_cl[4,:],\ yerr = (mass_eval[4,:] - mass_min[4,:], mass_max[4,:] - mass_eval[4,:])/mass_cl[4,:],fmt='-o', label ="z="+str(round(z_cl[4,0],2))) ax[2].axhline(1, color='black') ax[2].set_ylabel("$M_{200,m}$ eval/$M_{FoF}$ true") ax[2].set_xlabel('z') plt.tight_layout() np.save(path_file + "output_data/fitted_mass_from_magnification_bias_"+key+"_"+mdef[0]+str(delta_so)+"_cM_"+Mc_relation,[mass_eval, mass_min, mass_max]) ``` ## Comparison to the mass fitted from the magnification profile ``` mass_eval_mag, mass_min_mag, mass_max_mag = np.load(path_file + "output_data/fitted_mass_from_magnification_"+key+"_"+mdef[0]+str(delta_so)+"_cM_"+Mc_relation+".npy") fig, ax = plt.subplots(1, 3, figsize=(18,4))# sharex=True )#,sharey=True) colors = ["blue", "green" , "orange", "red", "purple"] ax[0].errorbar(mass_eval_mag[0,:], mass_eval[0,:],xerr = (mass_eval_mag[0,:] - mass_min_mag[0,:], mass_max_mag[0,:] - mass_eval_mag[0,:]),\ yerr = (mass_eval[0,:] - mass_min[0,:], mass_max[0,:] - mass_eval[0,:]),\ fmt='.', color = colors[0], mfc='none', label ="z="+str(round(z_cl[0,0],2))) ax[0].errorbar(mass_eval_mag[1,:], mass_eval[1,:],xerr = (mass_eval_mag[1,:] - mass_min_mag[1,:], mass_max_mag[1,:] - mass_eval_mag[1,:]),\ yerr = (mass_eval[1,:] - mass_min[1,:], mass_max[1,:] - mass_eval[1,:]),\ fmt='.', color = colors[1], mfc='none', label ="z="+str(round(z_cl[1,0],2))) ax[0].errorbar(mass_eval_mag[2,:], mass_eval[2,:],xerr = (mass_eval_mag[2,:] - mass_min_mag[2,:], mass_max_mag[2,:] - mass_eval_mag[2,:]),\ yerr = (mass_eval[2,:] - mass_min[2,:], mass_max[2,:] - mass_eval[2,:]),\ fmt='.', color = colors[2], mfc='none', label ="z="+str(round(z_cl[2,0],2))) ax[0].errorbar(mass_eval_mag[3,:], mass_eval[3,:],xerr = (mass_eval_mag[3,:] - mass_min_mag[3,:], mass_max_mag[3,:] - mass_eval_mag[3,:]),\ yerr = (mass_eval[3,:] - mass_min[3,:], mass_max[3,:] - mass_eval[3,:]),\ fmt='.', color = colors[3], mfc='none', label ="z="+str(round(z_cl[3,0],2))) ax[0].errorbar(mass_eval_mag[4,:], mass_eval[4,:],xerr = (mass_eval_mag[4,:] - mass_min_mag[4,:], mass_max_mag[4,:] - mass_eval_mag[4,:]),\ yerr = (mass_eval[4,:] - mass_min[4,:], mass_max[4,:] - mass_eval[4,:]),\ fmt='.', color = colors[4], mfc='none', label ="z="+str(round(z_cl[4,0],2))) ax[0].set_xscale('log') ax[0].set_yscale('log') ax[0].plot((4e13, 5e14),(4e13,5e14), color='black', lw=2) ax[0].legend(fontsize='small') ax[0].set_xlabel("$M_{200,m}~eval~from~\mu$[$M_{\odot}$]") ax[0].set_ylabel("$M_{200,m}~eval~from~\delta_{\mu}$[$M_{\odot}$]") ax[0].grid() ratio = mass_eval/mass_eval_mag ratio_err = ratio *( (0.5*(mass_max - mass_min))/mass_eval + (0.5*(mass_max_mag - mass_min_mag))/mass_eval_mag ) ax[1].errorbar(mass_cl[0,:]*0.96, ratio[0], yerr = ratio_err[0],fmt = 'o', color = colors[0]) ax[1].errorbar(mass_cl[1,:]*0.98, ratio[1], yerr = ratio_err[1],fmt = 'o', color = colors[1]) ax[1].errorbar(mass_cl[2,:]*1.00, ratio[2], yerr = ratio_err[2],fmt = 'o', color = colors[2]) ax[1].errorbar(mass_cl[3,:]*1.02, ratio[3], yerr = ratio_err[3],fmt = 'o', color = colors[3]) ax[1].errorbar(mass_cl[4,:]*1.04, ratio[4], yerr = ratio_err[4],fmt = 'o', color = colors[4]) ax[1].axhline(1, color='black') ax[1].set_xlabel("$M_{FoF}$ true [$M_{\odot}$]") ax[1].set_ylabel("$\\frac{M_{200,m}~eval~from~\delta_{\mu}}{M_{200,m}~eval~from~\mu}$") ax[1].set_xlim(4e13, 5e14) ax[1].set_xscale('log') ax[2].errorbar(z_cl[0,:]*0.96, ratio[0], yerr = ratio_err[0], fmt = 'o', color = colors[0]) ax[2].errorbar(z_cl[1,:]*0.98, ratio[1], yerr = ratio_err[1], fmt = 'o', color = colors[1]) ax[2].errorbar(z_cl[2,:]*1.00, ratio[2], yerr = ratio_err[2], fmt = 'o', color = colors[2]) ax[2].errorbar(z_cl[3,:]*1.02, ratio[3], yerr = ratio_err[3], fmt = 'o', color = colors[3]) ax[2].errorbar(z_cl[4,:]*1.04, ratio[4], yerr = ratio_err[4], fmt = 'o', color = colors[4]) ax[2].axhline(1, color='black') ax[2].set_ylabel("$\\frac{M_{200,m}~eval~from~\mu}{M_{200,m}~eval~from~\delta_{\mu}}$") ax[2].set_xlabel('z') plt.tight_layout() diff = (mass_eval - mass_eval_mag)/1e14 diff_err = (1/1e14) * np.sqrt((0.5*(mass_max - mass_min))**2 + (0.5*(mass_max_mag - mass_min_mag))**2) plt.hist((diff/diff_err).flatten()); plt.xlabel('$\chi$') plt.axvline(0, color='black') plt.axvline(-1, color='black', ls='--') plt.axvline(1, color='black', ls='--') plt.axvline(np.mean((diff/diff_err).flatten()), color='red') plt.axvline(np.mean((diff/diff_err).flatten()) - np.std((diff/diff_err).flatten()), color='red', ls=':') plt.axvline(np.mean((diff/diff_err).flatten()) + np.std((diff/diff_err).flatten()), color='red', ls=':') print("$\chi$ stats \n", \ "mean",np.round(np.mean((diff/diff_err).flatten()),2),\ ", mean err", np.round(np.std((diff/diff_err).flatten())/np.sqrt(25),2),\ ", std", np.round(np.std((diff/diff_err).flatten()),2),\ ", std approx err", np.round(np.std((diff/diff_err).flatten())/np.sqrt(2*(25-1)),2)) ``` ## Profile plot with the model corresponding to the fitted mass ``` model_for_fitted_mass = np.zeros(z_cl.shape,dtype=object) model_for_fitted_mass_min = np.zeros(z_cl.shape,dtype=object) model_for_fitted_mass_max = np.zeros(z_cl.shape,dtype=object) for i in range(z_cl.shape[0]): for j in range(z_cl.shape[1]): model_for_fitted_mass[i,j] = magnification_biais_model(rp_Mpc, mass_eval[i,j], z_cl[i,j], alpha_cut, zrand, cosmo_clmm, delta_so, massdef=mdef[1], Mc_relation=Mc_relation)[0] model_for_fitted_mass_min[i,j] = magnification_biais_model(rp_Mpc, mass_min[i,j], z_cl[i,j], alpha_cut, zrand, cosmo_clmm, delta_so, massdef=mdef[1], Mc_relation=Mc_relation)[0] model_for_fitted_mass_max[i,j] = magnification_biais_model(rp_Mpc, mass_max[i,j], z_cl[i,j], alpha_cut, zrand, cosmo_clmm, delta_so, massdef=mdef[1], Mc_relation=Mc_relation)[0] fig, axes = plt.subplots(5,5, figsize=[20,15], sharex=True) corr = np.mean(gal_cat['magnification']) - 1 for i,h in zip([0,1,2,3,4],range(5)): for j,k in zip([0,1,2,3,4],range(5)): ax = axes[5-1-k,h] ax.fill_between(quant[i,j][0], y1= quant[i,j][1] - np.sqrt(np.diag(quant[i,j][2])),\ y2 = quant[i,j][1] + np.sqrt(np.diag(quant[i,j][2])),color = 'grey', alpha=0.4) expected_mu_bias = mu_bias(quant_NK[i,j][1] - corr, alpha_cut) - 1. expected_mu_bias_err = expected_mu_bias * (alpha_cut -1 ) * np.sqrt(np.diag(quant_NK[i,j][2])) /(quant_NK[i,j][1]) ax.errorbar(quant_NK[i,j][0], expected_mu_bias, yerr = expected_mu_bias_err, fmt='r.', label = 'predicted from meas. $\mu$') ax.axvline(cosmo.kpc_proper_per_arcmin(z_cl[i,j]).to(u.Mpc/u.arcmin).value*healpy.nside2resol(4096, arcmin = True), linestyle="dotted", color='grey', label ='healpix resol.') ax.text(0.55, 0.80, "<z>="+str(round(z_cl[i,j],2)), transform=ax.transAxes, fontsize='x-small') ax.text(0.55, 0.90, "<M/1e14>="+str(round(mass_cl[i,j]/1e14,2)), transform=ax.transAxes, fontsize='x-small'); ax.set_xlabel('$\\theta$ [Mpc]') ax.set_ylabel('$\delta_{\mu}$') ax.plot(rp_Mpc, model_mbias[:,i,j],'k--') ax.fill_between(rp_Mpc, y1 = model_for_fitted_mass_min[i,j], y2 = model_for_fitted_mass_max[i,j],color='red', alpha=0.5) plt.tight_layout() axes[0,0].set_xscale('log') axes[0,0].set_xlim(0.1,8) for i in range(axes.shape[0]): axes[4,i].set_ylim(-0.2,0.6) axes[3,i].set_ylim(-0.2,1.3) axes[2,i].set_ylim(-0.2,1.3) axes[1,i].set_ylim(-0.2,2.0) axes[0,i].set_ylim(-0.2,2.5) ```
github_jupyter
``` from __future__ import print_function import sys import numpy as np from time import time import matplotlib.pyplot as plt from tqdm import tqdm import math import struct import binascii sys.path.append('/home/xilinx') from pynq import Overlay from pynq import allocate def float2bytes(fp): packNo = struct.pack('>f', np.float32(fp)) return int(binascii.b2a_hex(packNo), 16) print("Entry:", sys.argv[0]) print("System argument(s):", len(sys.argv)) print("Start of \"" + sys.argv[0] + "\"") # Overlay and IP ol = Overlay("/home/xilinx/xrwang/SQA_Opt3.bit") ipSQA = ol.QuantumMonteCarloOpt3_0 ipDMAIn = ol.axi_dma_0 # Number of Spins and Number of Trotters and Wirte Trotters numSpins = 1024 numTrotters = 8 trotters = np.random.randint(2, size=(numTrotters*numSpins)) k = 0 for addr in tqdm(range(0x400, 0x2400, 0x04)): # 8 * 1024 * 1 Byte (it extend boolean into uint8) tmp = (trotters[k+3] << 24) + (trotters[k+2] << 16) + (trotters[k+1] << 8) + (trotters[k]) ipSQA.write(addr, int(tmp)) k += 4 # Generate Random Numbers rndNum = np.ndarray(shape=(numSpins), dtype=np.float32) for i in tqdm(range(numSpins)): # rndNum[i] = np.random.randn() rndNum[i] = i+1 rndNum /= numSpins rndNum # Generate J coupling inBuffer0 = allocate(shape=(numSpins,numSpins), dtype=np.float32) for i in tqdm(range(numSpins)): for j in range(numSpins): inBuffer0[i][j] = - rndNum[i] * rndNum[j] # Some Constant Kernel Arguments ipSQA.write(0x10, numTrotters) # nTrot ipSQA.write(0x18, numSpins) # nSpin for addr in range(0x2400, 0x3400, 0x04): ipSQA.write(addr, 0) # h[i] # Iterations Parameters iter = 500 maxBeta = 8.0 Beta = 1.0 / 4096.0 G0 = 8.0 dBeta = (maxBeta-Beta) / iter # Iterations timeList = [] trottersList = [] for i in tqdm(range(iter)): # # Write Random Numbers (8*1024*4Bytes) # rn = np.random.uniform(0.0, 1.0, size=numTrotters*numSpins) # rn = np.log(rn) * numTrotters # Generate Jperp Gamma = G0 * (1.0 - i/iter) Jperp = -0.5 * np.log(np.tanh((Gamma/numTrotters) * Beta)) / Beta # # Write Random Nubmers # k = 0 # for addr in range(0x4000, 0xC000, 0x04): # ipSQA.write(addr, float2bytes(rn[k])) # k += 1 # Write Beta & Jperp ipSQA.write(0x3400, float2bytes(Jperp)) ipSQA.write(0x3408, float2bytes(Beta)) timeKernelStart = time() # Start Kernel ipSQA.write(0x00, 0x01) # Write Jcoup Stream ipDMAIn.sendchannel.transfer(inBuffer0) # Stream of Jcoup ipDMAIn.sendchannel.wait() # Wait at Here while (ipSQA.read(0x00) & 0x4) == 0x0: continue timeKernelEnd = time() timeList.append(timeKernelEnd - timeKernelStart) # Beta Incremental Beta += dBeta k = 0 newTrotters=np.ndarray(shape=numTrotters*numSpins) for addr in range(0x400, 0x2400, 0x04): # 8 * 1024 * 1 Byte (it extend boolean into uint8) tmp = ipSQA.read(addr) newTrotters[k] = (tmp) & 0x01 newTrotters[k+1] = (tmp>>8) & 0x01 newTrotters[k+2] = (tmp>>16) & 0x01 newTrotters[k+3] = (tmp>>24) & 0x01 k += 4 trottersList.append(newTrotters) print("Kernel execution time: " + str(np.sum(timeList)) + " s") best = (0,0,0,0,10e22) sumEnergy = [] k = 0 for trotters in tqdm(trottersList): a = 0 b = 0 sumE = 0 k += 1 for t in range(numTrotters): for i in range(numSpins): if trotters[t*numSpins+i] == 0: a += rndNum[i] else: b += rndNum[i] E = (a-b)**2 sumE += E if best[4] > E : best = (k, t, a, b, E) sumEnergy.append(sumE) plt.figure(figsize=(30,10)) plt.plot(sumEnergy) best ```
github_jupyter
**NOTE: An version of this post is on the PyMC3 [examples](https://docs.pymc.io/notebooks/blackbox_external_likelihood.html) page.** <!-- PELICAN_BEGIN_SUMMARY --> [PyMC3](https://docs.pymc.io/index.html) is a great tool for doing Bayesian inference and parameter estimation. It has a load of [in-built probability distributions](https://docs.pymc.io/api/distributions.html) that you can use to set up priors and likelihood functions for your particular model. You can even create your own [custom distributions](https://docs.pymc.io/prob_dists.html#custom-distributions). However, this is not necessarily that simple if you have a model function, or probability distribution, that, for example, relies on an external code that you have little/no control over (and may even be, for example, wrapped `C` code rather than Python). This can be problematic went you need to pass parameters set as PyMC3 distributions to these external functions; your external function probably wants you to pass it floating point numbers rather than PyMC3 distributions! <!-- PELICAN_END_SUMMARY --> ```python import pymc3 as pm: from external_module import my_external_func # your external function! # set up your model with pm.Model(): # your external function takes two parameters, a and b, with Uniform priors a = pm.Uniform('a', lower=0., upper=1.) b = pm.Uniform('b', lower=0., upper=1.) m = my_external_func(a, b) # <--- this is not going to work! ``` Another issue is that if you want to be able to use the gradient-based step samplers like [NUTS](https://docs.pymc.io/api/inference.html#module-pymc3.step_methods.hmc.nuts) and [Hamiltonian Monte Carlo (HMC)](https://docs.pymc.io/api/inference.html#hamiltonian-monte-carlo) then your model/likelihood needs a gradient to be defined. If you have a model that is defined as a set of Theano operators then this is no problem - internally it will be able to do automatic differentiation - but if yor model is essentially a "black box" then you won't necessarily know what the gradients are. Defining a model/likelihood that PyMC3 can use that calls your "black box" function is possible, but it relies on creating a [custom Theano Op](https://docs.pymc.io/advanced_theano.html#writing-custom-theano-ops). There are many [threads](https://discourse.pymc.io/search?q=as_op) on the PyMC3 [discussion forum](https://discourse.pymc.io/) about this (e.g., [here](https://discourse.pymc.io/t/custom-theano-op-to-do-numerical-integration/734), [here](https://discourse.pymc.io/t/using-pm-densitydist-and-customized-likelihood-with-a-black-box-function/1760) and [here](https://discourse.pymc.io/t/connecting-pymc3-to-external-code-help-with-understanding-theano-custom-ops/670)), but I couldn't find any clear example that described doing what I mention above. So, thanks to a very nice example [sent](https://discourse.pymc.io/t/connecting-pymc3-to-external-code-help-with-understanding-theano-custom-ops/670/7?u=mattpitkin) to me by [Jørgen Midtbø](https://github.com/jorgenem/), I have created what I hope is a clear description. Do let [me](https://twitter.com/matt_pitkin) know if you have any questions/spot any mistakes. In the examples below, I'm going to create a very simple model and log-likelihood function in [Cython](http://cython.org/). I use Cython just as an example to show what you might need if calling external `C` codes, but you could in fact be using pure Python codes. The log-likelihood function I use is actually just a [Normal distribution](https://en.wikipedia.org/wiki/Normal_distribution), so this is obviously overkill (and I'll compare it to doing the same thing purely with PyMC3 distributions), but should provide a simple to follow demonstration. ``` %matplotlib inline %load_ext Cython import numpy as np import pymc3 as pm import theano import theano.tensor as tt # for reproducibility here's some version info for modules used in this notebook import platform import cython import IPython import matplotlib import emcee import corner import os print("Python version: {}".format(platform.python_version())) print("IPython version: {}".format(IPython.__version__)) print("Cython version: {}".format(cython.__version__)) print("GSL version: {}".format(os.popen('gsl-config --version').read().strip())) print("Numpy version: {}".format(np.__version__)) print("Theano version: {}".format(theano.__version__)) print("PyMC3 version: {}".format(pm.__version__)) print("Matplotlib version: {}".format(matplotlib.__version__)) print("emcee version: {}".format(emcee.__version__)) print("corner version: {}".format(corner.__version__)) ``` First, I'll define my "_super-complicated_"&trade; model (a straight line!), which is parameterised by two variables (a gradient `m` and a y-intercept `c`) and calculated at a vector of points `x`. I'll define the model in [Cython](http://cython.org/) and call [GSL](https://www.gnu.org/software/gsl/) functions just to show that you could be calling some other `C` library that you need. In this case, the model parameters are all packed into a list/array/tuple called `theta`. I'll also define my "_really-complicated_"&trade; log-likelihood function (a Normal log-likelihood that ignores the normalisation), which takes in the list/array/tuple of model parameter values `theta`, the points at which to calculate the model `x`, the vector of "observed" data points `data`, and the standard deviation of the noise in the data `sigma`. ``` %%cython -I/usr/include -L/usr/lib/x86_64-linux-gnu -lgsl -lgslcblas -lm import cython cimport cython import numpy as np cimport numpy as np ### STUFF FOR USING GSL (FEEL FREE TO IGNORE!) ### # declare GSL vector structure and functions cdef extern from "gsl/gsl_block.h": cdef struct gsl_block: size_t size double * data cdef extern from "gsl/gsl_vector.h": cdef struct gsl_vector: size_t size size_t stride double * data gsl_block * block int owner ctypedef struct gsl_vector_view: gsl_vector vector int gsl_vector_scale (gsl_vector * a, const double x) nogil int gsl_vector_add_constant (gsl_vector * a, const double x) nogil gsl_vector_view gsl_vector_view_array (double * base, size_t n) nogil ################################################### # define your super-complicated model that uses load of external codes cpdef my_model(theta, np.ndarray[np.float64_t, ndim=1] x): """ A straight line! Note: This function could simply be: m, c = thetha return m*x + x but I've made it more complicated for demonstration purposes """ m, c = theta # unpack line gradient and y-intercept cdef size_t length = len(x) # length of x cdef np.ndarray line = np.copy(x) # make copy of x vector cdef gsl_vector_view lineview # create a view of the vector lineview = gsl_vector_view_array(<double *>line.data, length) # multiply x by m gsl_vector_scale(&lineview.vector, <double>m) # add c gsl_vector_add_constant(&lineview.vector, <double>c) # return the numpy array return line # define your really-complicated likelihood function that uses loads of external codes cpdef my_loglike(theta, np.ndarray[np.float64_t, ndim=1] x, np.ndarray[np.float64_t, ndim=1] data, sigma): """ A Gaussian log-likelihood function for a model with parameters given in theta """ model = my_model(theta, x) return -(0.5/sigma**2)*np.sum((data - model)**2) ``` Now, as things are, if we wanted to sample from this log-likelihood function, using certain prior distributions for the model parameters (gradient and y-intercept) using PyMC3 we might try something like this (using a [PyMC3 `DensityDist`](https://docs.pymc.io/prob_dists.html#custom-distributions)): ```python import pymc3 as pm # create/read in our "data" (I'll show this in the real example below) x = ... sigma = ... data = ... with pm.Model(): # set priors on model gradient and y-intercept m = pm.Uniform('m', lower=-10., upper=10.) c = pm.Uniform('c', lower=-10., upper=10.) # create custom distribution pm.DensityDist('likelihood', my_loglike, observed={'theta': (m, c), 'x': x, 'data': data, 'sigma': sigma}) # sample from the distribution trace = pm.sample(1000) ``` But, this will give an error like: ``` ValueError: setting an array element with a sequence. ``` This is because `m` and `c` are Theano tensor-type objects. So, what we actually need to do is create a [Theano Op](http://deeplearning.net/software/theano/extending/extending_theano.html). This will be a new class that wraps our log-likelihood function (or just our model function, if that is all that is required) into something that can take in Theano tensor objects, but internally can cast them as floating point values that can be passed to our log-likelihood function. I will do this below, initially without defining a [`grad()` method](http://deeplearning.net/software/theano/extending/op.html#grad) for the Op. ``` # define a theano Op for our likelihood function class LogLike(tt.Op): """ Specify what type of object will be passed and returned to the Op when it is called. In our case we will be passing it a vector of values (the parameters that define our model) and returning a single "scalar" value (the log-likelihood) """ itypes = [tt.dvector] # expects a vector of parameter values when called otypes = [tt.dscalar] # outputs a single scalar value (the log likelihood) def __init__(self, loglike, data, x, sigma): """ Initialise the Op with various things that our log-likelihood function requires. Below are the things that are needed in this particular example. Parameters ---------- loglike: The log-likelihood (or whatever) function we've defined data: The "observed" data that our log-likelihood function takes in x: The dependent variable (aka 'x') that our model requires sigma: The noise standard deviation that our function requires. """ # add inputs as class attributes self.likelihood = loglike self.data = data self.x = x self.sigma = sigma def perform(self, node, inputs, outputs): # the method that is used when calling the Op theta, = inputs # this will contain my variables # call the log-likelihood function logl = self.likelihood(theta, self.x, self.data, self.sigma) outputs[0][0] = np.array(logl) # output the log-likelihood ``` Now, let's use this Op to repeat the example shown above. To do this I'll create some data containing a straight line with additive Gaussian noise (with a mean of zero and a standard deviation of `sigma`). I'll set uniform prior distributions on the gradient and y-intercept. As I've not set the `grad()` method of the Op PyMC3 will not be able to use the gradient-based samplers, so will fall back to using the [Slice](https://docs.pymc.io/api/inference.html#module-pymc3.step_methods.slicer) sampler. ``` # set up our data N = 10 # number of data points sigma = 1. # standard deviation of noise x = np.linspace(0., 9., N) mtrue = 0.4 # true gradient ctrue = 3. # true y-intercept truemodel = my_model([mtrue, ctrue], x) # make data data = sigma*np.random.randn(N) + truemodel ndraws = 3000 # number of draws from the distribution nburn = 1000 # number of "burn-in points" (which we'll discard) # create our Op logl = LogLike(my_loglike, data, x, sigma) # use PyMC3 to sampler from log-likelihood with pm.Model(): # uniform priors on m and c m = pm.Uniform('m', lower=-10., upper=10.) c = pm.Uniform('c', lower=-10., upper=10.) # convert m and c to a tensor vector theta = tt.as_tensor_variable([m, c]) # use a DensityDist (use a lamdba function to "call" the Op) pm.DensityDist('likelihood', lambda v: logl(v), observed={'v': theta}) trace = pm.sample(ndraws, tune=nburn, discard_tuned_samples=True) # plot the traces _ = pm.traceplot(trace, lines=(('m', {}, [mtrue]), ('c', {}, [ctrue]))) # put the chains in an array (for later!) samples_pymc3 = np.vstack((trace['m'], trace['c'])).T ``` What if we wanted to use NUTS or HMC? If we knew the analytical derivatives of the model/likelihood function then we could add a [`grad()` method](http://deeplearning.net/software/theano/extending/op.html#grad) to the Op using that analytical form. But, what if we don't know the analytical form. If our model/likelihood is purely Python and made up of standard maths operators and Numpy functions, then the [autograd](https://github.com/HIPS/autograd) module could potentially be used to find gradients (also, see [here](https://github.com/ActiveState/code/blob/master/recipes/Python/580610_Auto_differentiation/recipe-580610.py) for a nice Python example of automatic differentiation). But, if our model/likelihood truely is a "black box" then we can just use the good-old-fashioned [finite difference](https://en.wikipedia.org/wiki/Finite_difference) to find the gradients - this can be slow, especially if there are a large number of variables, or the model takes a long time to evaluate. Below, I've written a function that uses finite difference (the central difference) to find gradients - it uses an iterative method with successively smaller step sizes to check that the gradient converges. But, you could do something far simpler and just use, for example, the SciPy [`approx_fprime`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.approx_fprime.html) function. ``` import warnings def gradients(vals, func, releps=1e-3, abseps=None, mineps=1e-9, reltol=1e-3, epsscale=0.5): """ Calculate the partial derivatives of a function at a set of values. The derivatives are calculated using the central difference, using an iterative method to check that the values converge as step size decreases. Parameters ---------- vals: array_like A set of values, that are passed to a function, at which to calculate the gradient of that function func: A function that takes in an array of values. releps: float, array_like, 1e-3 The initial relative step size for calculating the derivative. abseps: float, array_like, None The initial absolute step size for calculating the derivative. This overrides `releps` if set. `releps` is set then that is used. mineps: float, 1e-9 The minimum relative step size at which to stop iterations if no convergence is achieved. epsscale: float, 0.5 The factor by which releps if scaled in each iteration. Returns ------- grads: array_like An array of gradients for each non-fixed value. """ grads = np.zeros(len(vals)) # maximum number of times the gradient can change sign flipflopmax = 10. # set steps if abseps is None: if isinstance(releps, float): eps = np.abs(vals)*releps eps[eps == 0.] = releps # if any values are zero set eps to releps teps = releps*np.ones(len(vals)) elif isinstance(releps, (list, np.ndarray)): if len(releps) != len(vals): raise ValueError("Problem with input relative step sizes") eps = np.multiply(np.abs(vals), releps) eps[eps == 0.] = np.array(releps)[eps == 0.] teps = releps else: raise RuntimeError("Relative step sizes are not a recognised type!") else: if isinstance(abseps, float): eps = abseps*np.ones(len(vals)) elif isinstance(abseps, (list, np.ndarray)): if len(abseps) != len(vals): raise ValueError("Problem with input absolute step sizes") eps = np.array(abseps) else: raise RuntimeError("Absolute step sizes are not a recognised type!") teps = eps # for each value in vals calculate the gradient count = 0 for i in range(len(vals)): # initial parameter diffs leps = eps[i] cureps = teps[i] flipflop = 0 # get central finite difference fvals = np.copy(vals) bvals = np.copy(vals) # central difference fvals[i] += 0.5*leps # change forwards distance to half eps bvals[i] -= 0.5*leps # change backwards distance to half eps cdiff = (func(fvals)-func(bvals))/leps while 1: fvals[i] -= 0.5*leps # remove old step bvals[i] += 0.5*leps # change the difference by a factor of two cureps *= epsscale if cureps < mineps or flipflop > flipflopmax: # if no convergence set flat derivative (TODO: check if there is a better thing to do instead) warnings.warn("Derivative calculation did not converge: setting flat derivative.") grads[count] = 0. break leps *= epsscale # central difference fvals[i] += 0.5*leps # change forwards distance to half eps bvals[i] -= 0.5*leps # change backwards distance to half eps cdiffnew = (func(fvals)-func(bvals))/leps if cdiffnew == cdiff: grads[count] = cdiff break # check whether previous diff and current diff are the same within reltol rat = (cdiff/cdiffnew) if np.isfinite(rat) and rat > 0.: # gradient has not changed sign if np.abs(1.-rat) < reltol: grads[count] = cdiffnew break else: cdiff = cdiffnew continue else: cdiff = cdiffnew flipflop += 1 continue count += 1 return grads ``` So, now we can just redefine our Op with a `grad()` method, right? It's not quite so simple! The `grad()` method itself requires that its inputs are Theano tensor variables, whereas our `gradients` function above, like our `my_loglike` function, wants a list of floating point values. So, we need to define another Op that calculates the gradients. Below, I define a new version of the `LogLike` Op, called `LogLikeWithGrad` this time, that has a `grad()` method. This is followed by anothor Op called `LogLikeGrad` that, when called with a vector of Theano tensor variables, returns another vector of values that are the gradients (i.e., the [Jacobian](https://en.wikipedia.org/wiki/Jacobian_matrix_and_determinant)) of our log-likelihood function at those values. Note that the `grad()` method itself does not return the gradients directly, but instead returns the [Jacobian](https://en.wikipedia.org/wiki/Jacobian_matrix_and_determinant)-vector product (you can hopefully just copy what I've done and not worry about what this means too much!). ``` # define a theano Op for our likelihood function class LogLikeWithGrad(tt.Op): itypes = [tt.dvector] # expects a vector of parameter values when called otypes = [tt.dscalar] # outputs a single scalar value (the log likelihood) def __init__(self, loglike, data, x, sigma): """ Initialise with various things that the function requires. Below are the things that are needed in this particular example. Parameters ---------- loglike: The log-likelihood (or whatever) function we've defined data: The "observed" data that our log-likelihood function takes in x: The dependent variable (aka 'x') that our model requires sigma: The noise standard deviation that out function requires. """ # add inputs as class attributes self.likelihood = loglike self.data = data self.x = x self.sigma = sigma # initialise the gradient Op (below) self.logpgrad = LogLikeGrad(self.likelihood, self.data, self.x, self.sigma) def perform(self, node, inputs, outputs): # the method that is used when calling the Op theta, = inputs # this will contain my variables # call the log-likelihood function logl = self.likelihood(theta, self.x, self.data, self.sigma) outputs[0][0] = np.array(logl) # output the log-likelihood def grad(self, inputs, g): # the method that calculates the gradients - it actually returns the # vector-Jacobian product - g[0] is a vector of parameter values theta, = inputs # our parameters return [g[0]*self.logpgrad(theta)] class LogLikeGrad(tt.Op): """ This Op will be called with a vector of values and also return a vector of values - the gradients in each dimension. """ itypes = [tt.dvector] otypes = [tt.dvector] def __init__(self, loglike, data, x, sigma): """ Initialise with various things that the function requires. Below are the things that are needed in this particular example. Parameters ---------- loglike: The log-likelihood (or whatever) function we've defined data: The "observed" data that our log-likelihood function takes in x: The dependent variable (aka 'x') that our model requires sigma: The noise standard deviation that out function requires. """ # add inputs as class attributes self.likelihood = loglike self.data = data self.x = x self.sigma = sigma def perform(self, node, inputs, outputs): theta, = inputs # define version of likelihood function to pass to derivative function def lnlike(values): return self.likelihood(values, self.x, self.data, self.sigma) # calculate gradients grads = gradients(theta, lnlike) outputs[0][0] = grads ``` Now, let's re-run PyMC3 with our new "grad"-ed Op. This time it will be able to automatically use NUTS. _Aside: As an addition, I've also defined a `my_model_random` function (note that, in this case, it requires that the `x` variable needed by the model function is global). This is used by the `random` argument of [`DensityDist`](https://docs.pymc.io/api/distributions/utilities.html?highlight=densitydist#pymc3.distributions.DensityDist) to define a function to use to draw instances of the model using the sampled parameters for [posterior predictive](https://docs.pymc.io/api/inference.html?highlight=sample_posterior_predictive#pymc3.sampling.sample_posterior_predictive) checks. This is only really needed if you want to fo posterior predictive check, but otherwise can be left out._ ``` # create our Op logl = LogLikeWithGrad(my_loglike, data, x, sigma) def my_model_random(point=None, size=None): """ Draw posterior predictive samples from model. """ return my_model((point["m"], point["c"]), x) # use PyMC3 to sampler from log-likelihood with pm.Model() as opmodel: # uniform priors on m and c m = pm.Uniform('m', lower=-10., upper=10.) c = pm.Uniform('c', lower=-10., upper=10.) # convert m and c to a tensor vector theta = tt.as_tensor_variable([m, c]) # use a DensityDist pm.DensityDist( 'likelihood', lambda v: logl(v), observed={'v': theta}, random=my_model_random, ) trace = pm.sample(ndraws, tune=nburn, discard_tuned_samples=True) # plot the traces _ = pm.traceplot(trace, lines=(('m', {}, [mtrue]), ('c', {}, [ctrue]))) # put the chains in an array (for later!) samples_pymc3_2 = np.vstack((trace['m'], trace['c'])).T # just because we can, let's draw posterior predictive samples of the model ppc = pm.sample_posterior_predictive(trace, samples=250, model=opmodel) for vals in ppc['likelihood']: matplotlib.pyplot.plot(x, vals, color='b', alpha=0.05, lw=3) matplotlib.pyplot.plot(x, my_model((mtrue, ctrue), x), 'k--', lw=2) ``` Now, finally, just to check things actually worked as we might expect, let's do the same thing purely using PyMC3 distributions (because in this simple example we can!) ``` with pm.Model() as pymodel: # uniform priors on m and c m = pm.Uniform('m', lower=-10., upper=10.) c = pm.Uniform('c', lower=-10., upper=10.) # convert m and c to a tensor vector theta = tt.as_tensor_variable([m, c]) # use a Normal distribution pm.Normal('likelihood', mu=(m*x + c), sd = sigma, observed=data) trace = pm.sample(ndraws, tune=nburn, discard_tuned_samples=True) # plot the traces _ = pm.traceplot(trace, lines=(('m', {}, [mtrue]), ('c', {}, [ctrue]))) # put the chains in an array (for later!) samples_pymc3_3 = np.vstack((trace['m'], trace['c'])).T ``` To check that they match let's plot all the examples together and also find the autocorrelation lengths. ``` import warnings warnings.simplefilter(action='ignore', category=FutureWarning) # supress emcee autocorr FutureWarning matplotlib.rcParams['font.size'] = 22 hist2dkwargs = {'plot_datapoints': False, 'plot_density': False, 'levels': 1.0 - np.exp(-0.5 * np.arange(1.5, 2.1, 0.5) ** 2)} # roughly 1 and 2 sigma colors = ['r', 'g', 'b'] labels = ['Theanp Op (no grad)', 'Theano Op (with grad)', 'Pure PyMC3'] for i, samples in enumerate([samples_pymc3, samples_pymc3_2, samples_pymc3_3]): # get maximum chain autocorrelartion length autocorrlen = int(np.max(emcee.autocorr.integrated_time(samples, c=3))); print('Auto-correlation length ({}): {}'.format(labels[i], autocorrlen)) if i == 0: fig = corner.corner(samples, labels=[r"$m$", r"$c$"], color=colors[i], hist_kwargs={'density': True}, **hist2dkwargs, truths=[mtrue, ctrue]) else: corner.corner(samples, color=colors[i], hist_kwargs={'density': True}, fig=fig, **hist2dkwargs) fig.set_size_inches(9, 9) ``` We can now check that the gradient Op works as we expect it to. First, just create and call the `LogLikeGrad` class, which should return the gradient directly (note that we have to create a [Theano function](http://deeplearning.net/software/theano/library/compile/function.html) to convert the output of the Op to an array). Secondly, we call the gradient from `LogLikeWithGrad` by using the [Theano tensor gradient](http://deeplearning.net/software/theano/library/gradient.html#theano.gradient.grad) function. Finally, we will check the gradient returned by the PyMC3 model for a Normal distribution, which should be the same as the log-likelihood function we defined. In all cases we evaluate the gradients at the true values of the model function (the straight line) that was created. ``` # test the gradient Op by direct call theano.config.compute_test_value = "ignore" theano.config.exception_verbosity = "high" var = tt.dvector() test_grad_op = LogLikeGrad(my_loglike, data, x, sigma) test_grad_op_func = theano.function([var], test_grad_op(var)) grad_vals = test_grad_op_func([mtrue, ctrue]) print('Gradient returned by "LogLikeGrad": {}'.format(grad_vals)) # test the gradient called through LogLikeWithGrad test_gradded_op = LogLikeWithGrad(my_loglike, data, x, sigma) test_gradded_op_grad = tt.grad(test_gradded_op(var), var) test_gradded_op_grad_func = theano.function([var], test_gradded_op_grad) grad_vals_2 = test_gradded_op_grad_func([mtrue, ctrue]) print('Gradient returned by "LogLikeWithGrad": {}'.format(grad_vals_2)) # test the gradient that PyMC3 uses for the Normal log likelihood test_model = pm.Model() with test_model: m = pm.Uniform('m', lower=-10., upper=10.) c = pm.Uniform('c', lower=-10., upper=10.) pm.Normal('likelihood', mu=(m*x + c), sigma=sigma, observed=data) gradfunc = test_model.logp_dlogp_function([m, c], dtype=None) gradfunc.set_extra_values({'m_interval__': mtrue, 'c_interval__': ctrue}) grad_vals_pymc3 = gradfunc(np.array([mtrue, ctrue]))[1] # get dlogp values print('Gradient returned by PyMC3 "Normal" distribution: {}'.format(grad_vals_pymc3)) ``` We can also do some [profiling](http://docs.pymc.io/notebooks/profiling.html) of the Op, as used within a PyMC3 Model, to check performance. First, we'll profile using the `LogLikeWithGrad` Op, and then doing the same thing purely using PyMC3 distributions. ``` # profile logpt using our Op opmodel.profile(opmodel.logpt).summary() # profile using our PyMC3 distribution pymodel.profile(pymodel.logpt).summary() ``` The Jupyter notebook used to produce this page can be downloaded from [here](http://mattpitkin.github.io/samplers-demo/downloads/notebooks/PyMC3CustomExternalLikelihood.ipynb).
github_jupyter
![Panel HighCharts Logo](https://raw.githubusercontent.com/MarcSkovMadsen/panel-highcharts/main/assets/images/panel-highcharts-logo.png) # 📈 Panel HighMap Reference Guide The [Panel](https://panel.holoviz.org) `HighMap` pane allows you to use the powerful [HighCharts](https://www.highcharts.com/) [Maps](https://www.highcharts.com/products/maps/) from within the comfort of Python 🐍 and Panel ❤️. ## License The `panel-highcharts` python package and repository is open source and free to use (MIT License), however the **Highcharts js library requires a license for commercial use**. For more info see the Highcharts license [FAQs](https://shop.highsoft.com/faq). ## Parameters: For layout and styling related parameters see the [Panel Customization Guide](https://panel.holoviz.org/user_guide/Customization.html). * **``object``** (dict): The initial user `configuration` of the `chart`. * **``object_update``** (dict) Incremental update to the existing `configuration` of the `chart`. * **``event``** (dict): Events like `click` and `mouseOver` if subscribed to using the `@` terminology. ## Methods * **``add_series``**: The method adds a new series to the chart. Takes the `options`, `redraw` and `animation` arguments. ___ # Usage ## Imports You must import something from panel_highcharts before you run `pn.extension('highmap')` ``` import panel_highcharts as ph ``` Additionally you can specify extra Highcharts `js_files` to include. `mapdata` can be supplied as a list. See the full list at [https://code.highcharts.com](https://code.highcharts.com) ``` ph.config.js_files(mapdata=["custom/europe"]) # Imports https://code.highcharts.com/mapdata/custom/europe.js import panel as pn pn.extension('highmap') ``` ## Configuration The `HighChart` pane is configured by providing a simple `dict` to the `object` parameter. For examples see the HighCharts [demos](https://www.highcharts.com/demo). ``` configuration = { "chart": {"map": "custom/europe", "borderWidth": 1}, "title": {"text": "Nordic countries"}, "subtitle": {"text": "Demo of drawing all areas in the map, only highlighting partial data"}, "legend": {"enabled": False}, "series": [ { "name": "Country", "data": [["is", 1], ["no", 1], ["se", 1], ["dk", 1], ["fi", 1]], "dataLabels": { "enabled": True, "color": "#FFFFFF", "formatter": """function () { if (this.point.value) { return this.point.name; } }""", }, "tooltip": {"headerFormat": "", "pointFormat": "{point.name}"}, } ], } chart = ph.HighMap(object=configuration, sizing_mode="stretch_both", min_height=600) chart ``` ## Layout ``` settings = pn.WidgetBox( pn.Param( chart, parameters=["height", "width", "sizing_mode", "margin", "object", "object_update", "event", ], widgets={"object": pn.widgets.LiteralInput, "object_update": pn.widgets.LiteralInput, "event": pn.widgets.StaticText}, sizing_mode="fixed", show_name=False, width=250, ) ) pn.Row(settings, chart, sizing_mode="stretch_both") ``` Try changing the `sizing_mode` to `fixed` and the `width` to `400`. ## Updates You can *update* the chart by providing a partial `configuration` to the `object_update` parameter. ``` object_update = { "title": {"text": "Panel HighMap - Nordic countries"}, } chart.object_update=object_update ``` Verify that the `title` and `series` was updated in the chart above. ## Events You can subscribe to chart events using an the `@` notation as shown below. If you add a string like `@name`, then the key-value pair `'channel': 'name'` will be added to the `event` dictionary. ``` event_update = { "series": [ { "allowPointSelect": "true", "point": { "events": { "click": "@click;}", "mouseOver": "@mouseOverFun", "select": "@select", "unselect": "@unselect", } }, "events": { "mouseOut": "@mouseOutFun", } } ] } chart.object_update=event_update ``` Verify that you can trigger the `click`, `mouseOver`, `select`, `unselect` and `mouseOut` events in the chart above and that the relevant `channel` value is used. ## Javascript You can use Javascript in the configuration via the `function() {...}` notation. ``` js_update = { "series": [ { "dataLabels": { "formatter": """function () { if (this.point.value) { if (this.point.name=="Denmark"){ return "❤️ " + this.point.name; } else { return this.point.name; } } }""", } } ], } chart.object_update=js_update ``` Verify that the x-axis labels now has `km` units appended in the chart above. # App Finally we can wrap it up into a nice app template. ``` chart.object =configuration = { "chart": {"map": "custom/europe", "borderWidth": 1}, "title": {"text": "Nordic countries"}, "subtitle": {"text": "Demo of drawing all areas in the map, only highlighting partial data"}, "legend": {"enabled": False}, "series": [ { "name": "Country", "data": [["is", 1], ["no", 1], ["se", 1], ["dk", 1], ["fi", 1]], "dataLabels": { "enabled": True, "color": "#FFFFFF", "formatter": """function () { if (this.point.value) { if (this.point.name=="Denmark"){ return "❤️ " + this.point.name; } else { return this.point.name; } } }""", }, "tooltip": {"headerFormat": "", "pointFormat": "{point.name}"}, "allowPointSelect": "true", "point": { "events": { "click": "@click;}", "mouseOver": "@mouseOverFun", "select": "@select", "unselect": "@unselect", } }, "events": { "mouseOut": "@mouseOutFun", } } ], } app = pn.template.FastListTemplate( site="Panel Highcharts", title="HighMap Reference Example", sidebar=[settings], main=[chart] ).servable() ``` You can serve with `panel serve HighMap.ipynb` and explore the app at http://localhost:5006/HighMap. Add the `--autoreload` flag to get *hot reloading* when you save the notebook. ![HighMap Reference Guide](https://raw.githubusercontent.com/MarcSkovMadsen/panel-highcharts/main/assets/images/HighMapApp.gif)
github_jupyter
# Classification metrics Author: Geraldine Klarenberg Based on the Google Machine Learning Crash Course ## Tresholds In previous lessons, we have talked about using regression models to predict values. But sometimes we are interested in **classifying** things: "spam" vs "not spam", "bark" vs "not barking", etc. Logistic regression is a great tool to use in ML classification models. We can use the outputs from these models by defining **classification thresholds**. For instance, if our model tells us there's a probability of 0.8 that an email is spam (based on some characteristics), the model classifies it as such. If the probability estimate is less than 0.8, the model classifies it as "not spam". The threshold allows us to map a logistic regression value to a binary category (the prediction). Tresholds are problem-dependent, so they will have to be tuned for the specific problem you are dealing with. In this lesson we will look at metrics you can use to evaluate a classification model's predictions, and what changing the threshold does to your model and predictions. ## True, false, positive, negative... Now, we could simply look at "accuracy": the ratio of all correct predictions to all predictions. This is simple, intuitive and straightfoward. But there are some problems with this approach: * This approach does not work well if there is (class) imbalance; situations where certain negative or positive values or outcomes are rare; * and, most importantly: different kind of mistakes can have different costs... ### The boy who cried wolf... We all know the story! ![Illustration of the boy who cried wolf](../nb-images/wolfpic.jpg) For this example, we define "there actually is a wolf" as a positive class, and "there is no wolf" as a negative class. The predictions that a model makes can be true or false for both classes, generating 4 outcomes: ![An table showing a confusion matrix based on the story of the boy who cried wolf](../nb-images/confusionmatrix_wolf.png) This table is also called a *confusion matrix*. There are 2 metrics we can derive from these outcomes: precision and recall. ## Precision Precision asks the question what proportion of the positive predictions was actually correct? To calculate the precision of your model, take all true positives divided by *all* positive predictions: $$\text{Precision} = \frac{TP}{TP+FP}$$ Basically: **did the model cry 'wolf' too often or too little?** **NB** If your model produces no negative positives, the value of the precision is 1.0. Too many negative positives gives values greater than 1, too few gives values less than 1. ### Exercise Calculate the precision of a model with the following outcomes true positives (TP): 1 | false positives (FP): 1 -------|-------- **false negatives (FN): 8** | **true negatives (TN): 90** ## Recall Recall tries to answer the question what proportion of actual positives was answered correctly? To calculate recall, divide all true positives by the true positives plus the false negatives: $$\text{Recall} = \frac{TP}{TP+FN}$$ Basically: **how many wolves that tried to get into the village did the model actually get?** **NB** If the model produces no false negative, recall equals 1.0 ### Exercise For the same confusion matrix as above, calculate the recall. ## Balancing precision and recall To evaluate your model, should look at **both** precision and recall. They are often in tension though: improving one reduces the other. Lowering the classification treshold improves recall (your model will call wolf at every little sound it hears) but will negatively affect precision (it will call wolf too often). ### Exercise #### Part 1 Look at the outputs of a model that classifies incoming emails as "spam" or "not spam". ![Image of outcomes of a spam/not spam classification model](../nb-images/PrecisionVsRecallBase.svg) The confusion matrix looks as follows true positives (TP): 8 | false positives (FP): 2 -------|-------- **false negatives (FN): 3** | **true negatives (TN): 17** Calculate the precision and recall for this model. #### Part 2 Now see what happens to the outcomes (below) if we increase the threshold ![Image of outcomes of a spam/not spam classification model](../nb-images/PrecisionVsRecallRaiseThreshold.svg) The confusion matrix looks as follows true positives (TP): 7 | false positives (FP): 4 -------|-------- **false negatives (FN): 1** | **true negatives (TN): 18** Calculate the precision and recall again. **Compare the precision and recall from the first and second model. What do you notice?** ## Evaluate model performance We can evaluate the performance of a classification model at all classification thresholds. For all different thresholds, calculate the *true positive rate* and the *false positive rate*. The true positive rate is synonymous with recall (and sometimes called *sensitivity*) and is thus calculated as $ TPR = \frac{TP} {TP + FN} $ False positive rate (sometimes called *specificity*) is: $ FPR = \frac{FP} {FP + TN} $ When you plot the pairs of TPR and FPR for all the different thresholds, you get a Receiver Operating Characteristics (ROC) curve. Below is a typical ROC curve. ![Image of an ROC curve](../nb-images/ROCCurve.svg) To evaluate the model, we look at the area under the curve (AUC). The AUC has a probabilistic interpretation: it represents the probability that a random positive (green) example is positioned to the right of a random negative (red) example. ![Image with predictions ranked according to logistic regression score](../nb-images/AUCPredictionsRanked.svg) So if that AUC is 0.9, that's the probability the pair-wise prediction is correct. Below are a few visualizations of AUC results. On top are the distributions of the outcomes of the negative and positive outcomes at various thresholds. Below is the corresponding ROC. ![Image with distributions of positive and negative classes - perfect](../nb-images/TowardsDataScienceAUC_perfect.png) ![Image with AUC - perfect](../nb-images/TowardsDataScienceAUC_perfect2.png) **This AUC suggests a perfect model** (which is suspicious!) ![Image with distributions of positive and negative classes - normal](../nb-images/TowardsDataScienceAUC_normal.png) ![Image with AUC - normal](../nb-images/TowardsDataScienceAUC_normal2.png) **This is what most AUCs look like**. In this case, AUC = 0.7 means that there is 70% chance the model will be able to distinguish between positive and negative classes. ![Image with distributions of positive and negative classes - worst](../nb-images/TowardsDataScienceAUC_worst.png) ![Image with AUC - worst](../nb-images/TowardsDataScienceAUC_worst2.png) **This is actually the worst case scenario.** This model has no discrimination capacity at all... ## Prediction bias Logistic regression should be unbiased, meaning that the average of the predictions should be more or less equal to the average of the observations. **Prediction bias** is the difference between the average of the predictions and the average of the labels in a data set. This approach is not perfect, e.g. if your model almost always predicts the average there will not be much bias. However, if there **is** bias ("significant nonzero bias"), that means there is something something going on that needs to be checked, specifically that the model is wrong about the frequency of positive labels. Possible root causes of prediction bias are: * Incomplete feature set * Noisy data set * Buggy pipeline * Biased training sample * Overly strong regularization ### Buckets and prediction bias For logistic regression, this process is a bit more involved, as the labels assigned to an examples are either 0 or 1. So you cannot accurately predict the prediction bias based on one example. You need to group data in "buckets" and examine the prediction bias on that. Prediction bias for logistic regression only makes sense when grouping enough examples together to be able to compare a predicted value (for example, 0.392) to observed values (for example, 0.394). You can create buckets by linearly breaking up the target predictions, or create quantiles. The plot below is a calibration plot. Each dot represents a bucket with 1000 values. On the x-axis we have the average value of the predictions for that bucket and on the y-axis the average of the actual observations. Note that the axes are on logarithmic scales. ![Image of a calibration plot with buckets](../nb-images/BucketingBias.svg) ## Coding Recall the logistic regression model we made in the previous lesson. That was a perfect fit, so not that useful when we look at the metrics we just discussed. In the cloud plot with the sepal length and petal width plotted against each other, it is clear that the other two iris species are less separated. Let's use one of these as an example. We'll rework the example so we're classifying irises for being "virginica" or "not virginica". ``` import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import load_iris import pandas as pd iris = load_iris() X = iris.data y = iris.target df = pd.DataFrame(X, columns = ['sepal_length(cm)', 'sepal_width(cm)', 'petal_length(cm)', 'petal_width(cm)']) df['species_id'] = y species_map = {0: 'setosa', 1: 'versicolor', 2: 'virginica'} df['species_name'] = df['species_id'].map(species_map) df.head() ``` Now extract the data we need and create the necessary dataframes again. ``` X = np.c_[X[:,0], X[:,3]] y = [] for i in range(len(X)): if i > 99: y.append(1) else: y.append(0) y = np.array(y) plt.scatter(X[:,0], X[:,1], c = y) ``` Create our test and train data, and run a model. The default classification threshold is 0.5. If the predicted probability is > 0.5, the predicted result is 'virgnica'. If it is < 0.5, the predicted result is 'not virginica'. ``` random = np.random.permutation(len(X)) x_train = X[random][30:] x_test = X[random][:30] y_train= y[random][30:] y_test = y[random][:30] from sklearn.linear_model import LogisticRegression log_reg = LogisticRegression() log_reg.fit(x_train,y_train) ``` Instead of looking at the probabilities and the plot, like in the last lesson, let's run some classification metrics on the training dataset. If you use ".score", you get the mean accuracy. ``` log_reg.score(x_train, y_train) ``` Let's predict values and see what this ouput means and how we can look at other metrics. ``` predictions = log_reg.predict(x_train) predictions, y_train ``` There is a way to look at the confusion matrix. The output that is generated has the same structure as the confusion matrices we showed earlier: true positives (TP) | false positives (FP) -------|-------- **false negatives (FN)** | **true negatives (TN)** ``` from sklearn.metrics import confusion_matrix confusion_matrix(y_train, predictions) ``` Indeed, for the accuracy calculation: we predicted 81 + 33 = 114 correct (true positives and true negatives), and 114/120 (remember, our training data had 120 points) = 0.95. There is also a function to calculate recall and precision: Since we also have a testing data set, let's see what the metrics look like for that. ``` from sklearn.metrics import recall_score recall_score(y_train, predictions) from sklearn.metrics import precision_score precision_score(y_train, predictions) ``` And, of course, there are also built-in functions to check the ROC curve and AUC! For these functions, the inputs are the labels of the original dataset and the predicted probabilities (- not the predicted labels -> **why?**). Remember what the two columns mean? ``` proba_virginica = log_reg.predict_proba(x_train) proba_virginica[0:10] from sklearn.metrics import roc_curve fpr_model, tpr_model, thresholds_model = roc_curve(y_train, proba_virginica[:,1]) fpr_model tpr_model thresholds_model ``` Plot the ROC curve as follows ``` plt.plot(fpr_model, tpr_model,label='our model') plt.plot([0,1],[0,1],label='random') plt.plot([0,0,1,1],[0,1,1,1],label='perfect') plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.legend() plt.show() ``` The AUC: ``` from sklearn.metrics import roc_auc_score auc_model = roc_auc_score(y_train, proba_virginica[:,1]) auc_model ``` You can use the ROC and AUC metric to evaluate competing models. Many people prefer to use these metrics to analyze each model’s performance because it does not require selecting a threshold and helps balance true positive rate and false positive rate. Now let's do the same thing for our test data (but again, this dataset is fairly small, and K-fold cross-validation is recommended). ``` log_reg.score(x_test, y_test) predictions = log_reg.predict(x_test) predictions, y_test confusion_matrix(y_test, predictions) recall_score(y_test, predictions) precision_score(y_test, predictions) proba_virginica = log_reg.predict_proba(x_test) fpr_model, tpr_model, thresholds_model = roc_curve(y_test, proba_virginica[:,1]) plt.plot(fpr_model, tpr_model,label='our model') plt.plot([0,1],[0,1],label='random') plt.plot([0,0,1,1],[0,1,1,1],label='perfect') plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.legend() plt.show() auc_model = roc_auc_score(y_test, proba_virginica[:,1]) auc_model ``` Learn more about the logistic regression function and options at https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html
github_jupyter
# Multi-panel detector The AGIPD detector, which is already in use at the SPB experiment, consists of 16 modules of 512×128 pixels each. Each module is further divided into 8 ASICs (application-specific integrated circuit). <img src="AGIPD.png" width="300" align="left"/> <img src="agipd_geometry_14_1.png" width="420" align="right"/> <div style="clear: both"><small>Photo © European XFEL</small></div> ## Simulation Demonstration ``` import os, shutil, sys import h5py import matplotlib.pyplot as plt import numpy as np import modules from extra_geom import AGIPD_1MGeometry modules.load('maxwell' ,'openmpi/3.1.6') SimExPath = '/gpfs/exfel/data/user/juncheng/simex-branch/Sources/python/' SimExExtLib = '/gpfs/exfel/data/user/juncheng/simex-branch/lib/python3.7/site-packages/' SimExBin = ':/gpfs/exfel/data/user/juncheng/miniconda3/envs/simex-branch/bin/' sys.path.insert(0,SimExPath) sys.path.insert(0,SimExExtLib) os.environ["PATH"] += SimExBin from SimEx.Calculators.AbstractPhotonDiffractor import AbstractPhotonDiffractor from SimEx.Calculators.CrystFELPhotonDiffractor import CrystFELPhotonDiffractor from SimEx.Parameters.CrystFELPhotonDiffractorParameters import CrystFELPhotonDiffractorParameters from SimEx.Parameters.PhotonBeamParameters import PhotonBeamParameters from SimEx.Parameters.DetectorGeometry import DetectorGeometry, DetectorPanel from SimEx.Utilities.Units import electronvolt, joule, meter, radian ``` ## Data path setup ``` data_path = './diffr' ``` Clean up any data from a previous run: ``` if os.path.isdir(data_path): shutil.rmtree(data_path) if os.path.isfile(data_path + '.h5'): os.remove(data_path + '.h5') ``` ## Set up X-ray Beam Parameters ``` beamParam = PhotonBeamParameters( photon_energy = 4972.0 * electronvolt, # photon energy in eV beam_diameter_fwhm=130e-9 * meter, # focus diameter in m pulse_energy=45e-3 * joule, # pulse energy in J photon_energy_relative_bandwidth=0.003, # relative bandwidth dE/E divergence=0.0 * radian, # Beam divergence in rad photon_energy_spectrum_type='tophat', # Spectrum type. Acceptable values are "tophat", "SASE", and "twocolor") ) ``` ## Detector Setting ``` geom = AGIPD_1MGeometry.from_quad_positions(quad_pos=[ (-525, 625), (-550, -10), (520, -160), (542.5, 475), ]) geom.inspect() geom_file = 'agipd_simple_2d.geom' geom.write_crystfel_geom( geom_file, dims=('frame', 'ss', 'fs'), adu_per_ev=1.0, clen=0.13, # Sample - detector distance in m photon_energy=4972, # eV data_path='/data/data', ) ``` ## Diffractor Settings ``` diffParam = CrystFELPhotonDiffractorParameters( sample='3WUL.pdb', # Looks up pdb file in cwd, if not found, queries from RCSB pdb mirror. uniform_rotation=True, # Apply random rotation number_of_diffraction_patterns=2, # powder=False, # Set xto True to create a virtual powder diffraction pattern (unested) intensities_file=None, # File that contains reflection intensities. If set to none, use uniform intensity distribution crystal_size_range=[1e-7, 1e-7], # Range ([min,max]) in units of metres of crystal size. poissonize=False, # Set to True to add Poisson noise. number_of_background_photons=0, # Change number to add uniformly distributed background photons. suppress_fringes=False, # Set to True to suppress side maxima between reflection peaks. beam_parameters=beamParam, # Beam parameters object from above detector_geometry=geom_file, # External file that contains the detector geometry in CrystFEL notation. ) diffractor = CrystFELPhotonDiffractor( parameters=diffParam, output_path=data_path ) ``` ## Run the simulation ``` diffractor.backengine() diffractor.saveH5_geom() data_f = h5py.File(data_path + '.h5', 'r') frame = data_f['data/0000001/data'][...].reshape(16, 512, 128) fig, ax = plt.subplots(figsize=(12, 10)) geom.plot_data_fast(frame, axis_units='m', ax=ax, vmax=1000); ``` This project has received funding from the European Union's Horizon 2020 research and innovation programme under grant agreement No. 823852.
github_jupyter
tgb - 6/12/2021 - The goal is to see whether it would be possible to train a NN/MLR outputting results in quantile space while still penalizing them following the mean squared error in physical space. tgb - 4/15/2021 - Recycling this notebook but fitting in percentile space (no scale_dict, use output in percentile units) tgb - 4/15/2020 - Adapting Ankitesh's notebook that builds and train a "brute-force" network to David Walling's hyperparameter search - Adding the option to choose between aquaplanet and real-geography data ``` import sys sys.path.insert(1,"/home1/07064/tg863631/anaconda3/envs/CbrainCustomLayer/lib/python3.6/site-packages") #work around for h5py from cbrain.imports import * from cbrain.cam_constants import * from cbrain.utils import * from cbrain.layers import * from cbrain.data_generator import DataGenerator from cbrain.climate_invariant import * import tensorflow as tf physical_devices = tf.config.experimental.list_physical_devices('GPU') tf.config.experimental.set_memory_growth(physical_devices[0], True) tf.config.experimental.set_memory_growth(physical_devices[1], True) tf.config.experimental.set_memory_growth(physical_devices[2], True) import os os.environ["CUDA_VISIBLE_DEVICES"]="1" from tensorflow import math as tfm from tensorflow.keras.layers import * from tensorflow.keras.models import * import tensorflow_probability as tfp import xarray as xr import numpy as np from cbrain.model_diagnostics import ModelDiagnostics import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.image as imag import scipy.integrate as sin # import cartopy.crs as ccrs import matplotlib.ticker as mticker # from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER import pickle # from climate_invariant import * from tensorflow.keras import layers import datetime from climate_invariant_utils import * import yaml ``` ## Global Variables ``` # Load coordinates (just pick any file from the climate model run) # Comet path below # coor = xr.open_dataset("/oasis/scratch/comet/ankitesh/temp_project/data/sp8fbp_minus4k.cam2.h1.0000-01-01-00000.nc",\ # decode_times=False) # GP path below path_0K = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/fluxbypass_aqua/' coor = xr.open_dataset(path_0K+"AndKua_aqua_SPCAM3.0_sp_fbp_f4.cam2.h1.0000-09-02-00000.nc") lat = coor.lat; lon = coor.lon; lev = coor.lev; coor.close(); # Comet path below # TRAINDIR = '/oasis/scratch/comet/ankitesh/temp_project/PrepData/CRHData/' # path = '/home/ankitesh/CBrain_project/CBRAIN-CAM/cbrain/' # GP path below TRAINDIR = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/SPCAM_PHYS/' path = '/export/nfs0home/tbeucler/CBRAIN-CAM/cbrain/' path_nnconfig = '/export/nfs0home/tbeucler/CBRAIN-CAM/nn_config/' # Load hyam and hybm to calculate pressure field in SPCAM path_hyam = 'hyam_hybm.pkl' hf = open(path+path_hyam,'rb') hyam,hybm = pickle.load(hf) # Scale dictionary to convert the loss to W/m2 scale_dict = load_pickle(path_nnconfig+'scale_dicts/009_Wm2_scaling.pkl') ``` New Data generator class for the climate-invariant network. Calculates the physical rescalings needed to make the NN climate-invariant ## Data Generators ### Choose between aquaplanet and realistic geography here ``` # GP paths below #path_aquaplanet = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/SPCAM_PHYS/' #path_realgeography = '' # GP /fast paths below path_aquaplanet = '/fast/tbeucler/climate_invariant/aquaplanet/' # Comet paths below # path_aquaplanet = '/oasis/scratch/comet/ankitesh/temp_project/PrepData/' # path_realgeography = '/oasis/scratch/comet/ankitesh/temp_project/PrepData/geography/' path = path_aquaplanet ``` ### Data Generator using RH ``` #scale_dict_RH = load_pickle('/home/ankitesh/CBrain_project/CBRAIN-CAM/nn_config/scale_dicts/009_Wm2_scaling_2.pkl') scale_dict_RH = scale_dict.copy() scale_dict_RH['RH'] = 0.01*L_S/G, # Arbitrary 0.1 factor as specific humidity is generally below 2% in_vars_RH = ['RH','TBP','PS', 'SOLIN', 'SHFLX', 'LHFLX'] # if path==path_realgeography: out_vars_RH = ['PTEQ','PTTEND','FSNT','FSNS','FLNT','FLNS'] # elif path==path_aquaplanet: out_vars_RH = ['PHQ','TPHYSTND','FSNT', 'FSNS', 'FLNT', 'FLNS'] if path==path_aquaplanet: out_vars_RH = ['PHQ','TPHYSTND','QRL','QRS'] # New GP path below TRAINFILE_RH = '2021_01_24_O3_small_shuffle.nc' NORMFILE_RH = '2021_02_01_NORM_O3_RH_small.nc' # Comet/Ankitesh path below # TRAINFILE_RH = 'CI_RH_M4K_NORM_train_shuffle.nc' # NORMFILE_RH = 'CI_RH_M4K_NORM_norm.nc' # VALIDFILE_RH = 'CI_RH_M4K_NORM_valid.nc' train_gen_RH = DataGenerator( data_fn = path+TRAINFILE_RH, input_vars = in_vars_RH, output_vars = out_vars_RH, norm_fn = path+NORMFILE_RH, input_transform = ('mean', 'maxrs'), output_transform = scale_dict_RH, batch_size=1024, shuffle=True, ) ``` ### Data Generator using QSATdeficit We only need the norm file for this generator as we are solely using it as an input to determine the right normalization for the combined generator ``` # New GP path below TRAINFILE_QSATdeficit = '2021_02_01_O3_QSATdeficit_small_shuffle.nc' NORMFILE_QSATdeficit = '2021_02_01_NORM_O3_QSATdeficit_small.nc' in_vars_QSATdeficit = ['QSATdeficit','TBP','PS', 'SOLIN', 'SHFLX', 'LHFLX'] # if path==path_realgeography: out_vars_RH = ['PTEQ','PTTEND','FSNT','FSNS','FLNT','FLNS'] # elif path==path_aquaplanet: out_vars_RH = ['PHQ','TPHYSTND','FSNT', 'FSNS', 'FLNT', 'FLNS'] if path==path_aquaplanet: out_vars_QSATdeficit = ['PHQ','TPHYSTND','QRL','QRS'] train_gen_QSATdeficit = DataGenerator( data_fn = path+TRAINFILE_QSATdeficit, input_vars = in_vars_QSATdeficit, output_vars = out_vars_QSATdeficit, norm_fn = path+NORMFILE_QSATdeficit, input_transform = ('mean', 'maxrs'), output_transform = scale_dict, batch_size=1024, shuffle=True, ) ``` ### Data Generator using TNS ``` in_vars = ['QBP','TfromNS','PS', 'SOLIN', 'SHFLX', 'LHFLX'] if path==path_aquaplanet: out_vars = ['PHQ','TPHYSTND','FSNT', 'FSNS', 'FLNT', 'FLNS'] elif path==path_realgeography: out_vars = ['PTEQ','PTTEND','FSNT','FSNS','FLNT','FLNS'] TRAINFILE_TNS = '2021_02_01_O3_TfromNS_small_shuffle.nc' NORMFILE_TNS = '2021_02_01_NORM_O3_TfromNS_small.nc' VALIDFILE_TNS = 'CI_TNS_M4K_NORM_valid.nc' train_gen_TNS = DataGenerator( data_fn = path+TRAINFILE_TNS, input_vars = in_vars, output_vars = out_vars, norm_fn = path+NORMFILE_TNS, input_transform = ('mean', 'maxrs'), output_transform = scale_dict, batch_size=1024, shuffle=True, ) ``` ### Data Generator using BCONS ``` in_vars = ['QBP','BCONS','PS', 'SOLIN', 'SHFLX', 'LHFLX'] if path==path_aquaplanet: out_vars = ['PHQ','TPHYSTND','FSNT', 'FSNS', 'FLNT', 'FLNS'] elif path==path_realgeography: out_vars = ['PTEQ','PTTEND','FSNT','FSNS','FLNT','FLNS'] TRAINFILE_BCONS = '2021_02_01_O3_BCONS_small_shuffle.nc' NORMFILE_BCONS = '2021_02_01_NORM_O3_BCONS_small.nc' train_gen_BCONS = DataGenerator( data_fn = path+TRAINFILE_BCONS, input_vars = in_vars, output_vars = out_vars, norm_fn = path+NORMFILE_BCONS, input_transform = ('mean', 'maxrs'), output_transform = scale_dict, batch_size=1024, shuffle=True, ) ``` ### Data Generator using NSto220 ``` in_vars = ['QBP','T_NSto220','PS', 'SOLIN', 'SHFLX', 'LHFLX'] if path==path_aquaplanet: out_vars = ['PHQ','TPHYSTND','FSNT', 'FSNS', 'FLNT', 'FLNS'] elif path==path_realgeography: out_vars = ['PTEQ','PTTEND','FSNT','FSNS','FLNT','FLNS'] TRAINFILE_T_NSto220 = '2021_03_31_O3_T_NSto220_small.nc' NORMFILE_T_NSto220 = '2021_03_31_NORM_O3_T_NSto220_small.nc' train_gen_T_NSto220 = DataGenerator( data_fn = path+TRAINFILE_T_NSto220, input_vars = in_vars, output_vars = out_vars, norm_fn = path+NORMFILE_T_NSto220, input_transform = ('mean', 'maxrs'), output_transform = scale_dict, batch_size=8192, shuffle=True, ) ``` ### Data Generator using LHF_nsDELQ ``` in_vars = ['QBP','TBP','PS', 'SOLIN', 'SHFLX', 'LHF_nsDELQ'] if path==path_aquaplanet: out_vars = ['PHQ','TPHYSTND','FSNT', 'FSNS', 'FLNT', 'FLNS'] elif path==path_realgeography: out_vars = ['PTEQ','PTTEND','FSNT','FSNS','FLNT','FLNS'] TRAINFILE_LHF_nsDELQ = '2021_02_01_O3_LHF_nsDELQ_small_shuffle.nc' NORMFILE_LHF_nsDELQ = '2021_02_01_NORM_O3_LHF_nsDELQ_small.nc' train_gen_LHF_nsDELQ = DataGenerator( data_fn = path+TRAINFILE_LHF_nsDELQ, input_vars = in_vars, output_vars = out_vars, norm_fn = path+NORMFILE_LHF_nsDELQ, input_transform = ('mean', 'maxrs'), output_transform = scale_dict, batch_size=8192, shuffle=True, ) ``` ### Data Generator using LHF_nsQ ``` in_vars = ['QBP','TBP','PS', 'SOLIN', 'SHFLX', 'LHF_nsQ'] if path==path_aquaplanet: out_vars = ['PHQ','TPHYSTND','FSNT', 'FSNS', 'FLNT', 'FLNS'] elif path==path_realgeography: out_vars = ['PTEQ','PTTEND','FSNT','FSNS','FLNT','FLNS'] TRAINFILE_LHF_nsQ = '2021_02_01_O3_LHF_nsQ_small_shuffle.nc' NORMFILE_LHF_nsQ = '2021_02_01_NORM_O3_LHF_nsQ_small.nc' train_gen_LHF_nsQ = DataGenerator( data_fn = path+TRAINFILE_LHF_nsQ, input_vars = in_vars, output_vars = out_vars, norm_fn = path+NORMFILE_LHF_nsQ, input_transform = ('mean', 'maxrs'), output_transform = scale_dict, batch_size=8192, shuffle=True, ) ``` ### Data Generator Combined (latest flexible version) ``` in_vars = ['QBP','TBP','PS', 'SOLIN', 'SHFLX', 'LHFLX'] #if path==path_aquaplanet: out_vars=['PHQPERC','TPHYSTNDPERC','QRLPERC','QRSPERC'] out_vars = ['PHQ','TPHYSTND','QRL','QRS'] # TRAINFILE = '2021_01_24_O3_TRAIN_shuffle.nc' NORMFILE = '2021_01_24_NORM_O3_small.nc' # VALIDFILE = '2021_01_24_O3_VALID.nc' # GENTESTFILE = 'CI_SP_P4K_valid.nc' # In physical space TRAINFILE = '2021_03_18_O3_TRAIN_M4K_shuffle.nc' VALIDFILE = '2021_03_18_O3_VALID_M4K.nc' TESTFILE_DIFFCLIMATE = '2021_03_18_O3_TRAIN_P4K_shuffle.nc' TESTFILE_DIFFGEOG = '2021_04_18_RG_TRAIN_M4K_shuffle.nc' # In percentile space #TRAINFILE = '2021_04_09_PERC_TRAIN_M4K_shuffle.nc' #TRAINFILE = '2021_01_24_O3_small_shuffle.nc' #VALIDFILE = '2021_04_09_PERC_VALID_M4K.nc' #TESTFILE = '2021_04_09_PERC_TEST_P4K.nc' ``` Old data generator by Ankitesh Improved flexible data generator ``` train_gen_CI = DataGeneratorCI(data_fn = path+TRAINFILE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=scale_dict, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling=None, Tscaling=None, LHFscaling=None, SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=None, inp_div_Qscaling=None, inp_sub_Tscaling=None, inp_div_Tscaling=None, inp_sub_LHFscaling=None, inp_div_LHFscaling=None, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') valid_gen_CI = DataGeneratorCI(data_fn = path+VALIDFILE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=scale_dict, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling=None, Tscaling=None, LHFscaling=None, SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=None, inp_div_Qscaling=None, inp_sub_Tscaling=None, inp_div_Tscaling=None, inp_sub_LHFscaling=None, inp_div_LHFscaling=None, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') test_diffclimate_gen_CI = DataGeneratorCI(data_fn = path+TESTFILE_DIFFCLIMATE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=scale_dict, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling=None, Tscaling=None, LHFscaling=None, SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=None, inp_div_Qscaling=None, inp_sub_Tscaling=None, inp_div_Tscaling=None, inp_sub_LHFscaling=None, inp_div_LHFscaling=None, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') test_diffgeog_gen_CI = DataGeneratorCI(data_fn = path+TESTFILE_DIFFGEOG, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=scale_dict, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling=None, Tscaling=None, LHFscaling=None, SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=None, inp_div_Qscaling=None, inp_sub_Tscaling=None, inp_div_Tscaling=None, inp_sub_LHFscaling=None, inp_div_LHFscaling=None, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') ``` ## Add callback class to track loss on multiple sets during training From [https://stackoverflow.com/questions/47731935/using-multiple-validation-sets-with-keras] ``` test_diffgeog_gen_CI[0][0].shape np.argwhere(np.isnan(test_gen_CI[0][1])) np.argwhere(np.isnan(test_gen_CI[0][0])) class AdditionalValidationSets(Callback): def __init__(self, validation_sets, verbose=0, batch_size=None): """ :param validation_sets: a list of 3-tuples (validation_data, validation_targets, validation_set_name) or 4-tuples (validation_data, validation_targets, sample_weights, validation_set_name) :param verbose: verbosity mode, 1 or 0 :param batch_size: batch size to be used when evaluating on the additional datasets """ super(AdditionalValidationSets, self).__init__() self.validation_sets = validation_sets self.epoch = [] self.history = {} self.verbose = verbose self.batch_size = batch_size def on_train_begin(self, logs=None): self.epoch = [] self.history = {} def on_epoch_end(self, epoch, logs=None): logs = logs or {} self.epoch.append(epoch) # record the same values as History() as well for k, v in logs.items(): self.history.setdefault(k, []).append(v) # evaluate on the additional validation sets for validation_set in self.validation_sets: valid_generator,valid_name = validation_set #tf.print('Results') results = self.model.evaluate_generator(generator=valid_generator) #tf.print(results) for metric, result in zip(self.model.metrics_names,[results]): #tf.print(metric,result) valuename = valid_name + '_' + metric self.history.setdefault(valuename, []).append(result) ``` ## Quick test to develop custom loss fx (no loss tracking across multiple datasets) #### Input and Output Rescaling (T=BCONS) ``` Tscaling_name = 'BCONS' train_gen_T = train_gen_BCONS train_gen_CI = DataGeneratorCI(data_fn = path+TRAINFILE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=None, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') valid_gen_CI = DataGeneratorCI(data_fn = path+VALIDFILE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=None, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') test_diffclimate_gen_CI = DataGeneratorCI(data_fn = path+TESTFILE_DIFFCLIMATE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=None, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') test_diffgeog_gen_CI = DataGeneratorCI(data_fn = path+TESTFILE_DIFFGEOG, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=None, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') inp = Input(shape=(64,)) ## input after rh and tns transformation dense_out = Dense(120, activation='linear')(inp) dense_out = tf.keras.activations.sigmoid(dense_out) model = tf.keras.models.Model(inp, dense_out) model.summary() pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' pdf = {} for ipath,path in enumerate([TRAINFILE,VALIDFILE,TESTFILE_DIFFCLIMATE,TESTFILE_DIFFGEOG]): hf = open(pathPKL+'/'+path+'_PERC.pkl','rb') pdf[path] = pickle.load(hf) def mse_physical(pdf): def loss(y_true,y_pred): y_true_physical = tf.identity(y_true) y_pred_physical = tf.identity(y_pred) for ilev in range(120): y_true_physical[:,ilev] = \ tfp.math.interp_regular_1d_grid(y_true[:,ilev], x_ref_min=0,x_ref_max=1,y_ref=pdf[:,ilev]) y_pred_physical[:,ilev] = \ tfp.math.interp_regular_1d_grid(y_pred[:,ilev], x_ref_min=0,x_ref_max=1,y_ref=pdf[:,ilev]) return tf.mean(tf.math.squared_difference(y_pred, y_true), axis=-1) return loss # model = load_model('model.h5', # custom_objects={'loss': asymmetric_loss(alpha)}) model.compile(tf.keras.optimizers.Adam(), loss=mse_physical(pdf=np.float32(pdf['2021_03_18_O3_TRAIN_M4K_shuffle.nc']['PERC_array'][:,94:]))) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' #save_name = '2021_06_12_LOGI_PERC_RH_BCONS_LHF_nsDELQ' save_name = '2021_06_12_Test' # history = AdditionalValidationSets([(train_gen_CI,valid_gen_CI,test_gen_CI)]) history = AdditionalValidationSets([(test_diffclimate_gen_CI,'trainP4K'),(test_diffgeog_gen_CI,'trainM4K_RG')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') Nep = 20 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos]) hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` ## Models tracking losses across climates and geography (Based on cold Aquaplanet) ### MLR or Logistic regression #### BF ``` inp = Input(shape=(64,)) ## input after rh and tns transformation dense_out = Dense(120, activation='linear')(inp) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_04_26_MLR' # history = AdditionalValidationSets([(train_gen_CI,valid_gen_CI,test_gen_CI)]) history = AdditionalValidationSets([(test_diffclimate_gen_CI,'trainP4K'),(test_diffgeog_gen_CI,'trainM4K_RG')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') #model.load_weights(path_HDF5+save_name+'.hdf5') Nep = 20 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` #### Input Rescaling (T=T-TNS) ``` Tscaling_name = 'TfromNS' train_gen_T = train_gen_TNS train_gen_CI = DataGeneratorCI(data_fn = path+TRAINFILE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=scale_dict, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') valid_gen_CI = DataGeneratorCI(data_fn = path+VALIDFILE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=scale_dict, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') test_diffclimate_gen_CI = DataGeneratorCI(data_fn = path+TESTFILE_DIFFCLIMATE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=scale_dict, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') test_diffgeog_gen_CI = DataGeneratorCI(data_fn = path+TESTFILE_DIFFGEOG, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=scale_dict, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') inp = Input(shape=(64,)) ## input after rh and tns transformation dense_out = Dense(120, activation='linear')(inp) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_04_26_MLR_RH_TfromNS_LHF_nsDELQ' # history = AdditionalValidationSets([(train_gen_CI,valid_gen_CI,test_gen_CI)]) history = AdditionalValidationSets([(test_diffclimate_gen_CI,'trainP4K'),(test_diffgeog_gen_CI,'trainM4K_RG')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') Nep = 20 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` #### Input Rescaling (T=BCONS) ``` Tscaling_name = 'BCONS' train_gen_T = train_gen_BCONS train_gen_CI = DataGeneratorCI(data_fn = path+TRAINFILE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=scale_dict, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') valid_gen_CI = DataGeneratorCI(data_fn = path+VALIDFILE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=scale_dict, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') test_diffclimate_gen_CI = DataGeneratorCI(data_fn = path+TESTFILE_DIFFCLIMATE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=scale_dict, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') test_diffgeog_gen_CI = DataGeneratorCI(data_fn = path+TESTFILE_DIFFGEOG, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=scale_dict, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') inp = Input(shape=(64,)) ## input after rh and tns transformation dense_out = Dense(120, activation='linear')(inp) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_04_26_MLR_RH_BCONS_LHF_nsDELQ' # history = AdditionalValidationSets([(train_gen_CI,valid_gen_CI,test_gen_CI)]) history = AdditionalValidationSets([(test_diffclimate_gen_CI,'trainP4K'),(test_diffgeog_gen_CI,'trainM4K_RG')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') Nep = 20 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` #### Input and Output Rescaling (T=T-TNS) ``` in_vars = ['QBP','TBP','PS', 'SOLIN', 'SHFLX', 'LHFLX'] out_vars=['PHQPERC','TPHYSTNDPERC','QRLPERC','QRSPERC'] # TRAINFILE = '2021_01_24_O3_TRAIN_shuffle.nc' NORMFILE = '2021_01_24_NORM_O3_small.nc' # VALIDFILE = '2021_01_24_O3_VALID.nc' # GENTESTFILE = 'CI_SP_P4K_valid.nc' # In percentile space TRAINFILE = '2021_04_09_PERC_TRAIN_M4K_shuffle.nc' VALIDFILE = '2021_04_09_PERC_VALID_M4K.nc' TESTFILE_DIFFCLIMATE = '2021_04_09_PERC_TRAIN_P4K_shuffle.nc' TESTFILE_DIFFGEOG = '2021_04_24_RG_PERC_TRAIN_M4K_shuffle.nc' Tscaling_name = 'TfromNS' train_gen_T = train_gen_TNS train_gen_CI = DataGeneratorCI(data_fn = path+TRAINFILE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=None, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') valid_gen_CI = DataGeneratorCI(data_fn = path+VALIDFILE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=None, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') test_diffclimate_gen_CI = DataGeneratorCI(data_fn = path+TESTFILE_DIFFCLIMATE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=None, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') test_diffgeog_gen_CI = DataGeneratorCI(data_fn = path+TESTFILE_DIFFGEOG, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=None, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') inp = Input(shape=(64,)) ## input after rh and tns transformation dense_out = Dense(120, activation='linear')(inp) dense_out = tf.keras.activations.sigmoid(dense_out) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_04_26_LOGI_PERC_RH_TfromNS_LHF_nsDELQ' # history = AdditionalValidationSets([(train_gen_CI,valid_gen_CI,test_gen_CI)]) history = AdditionalValidationSets([(test_diffclimate_gen_CI,'trainP4K'),(test_diffgeog_gen_CI,'trainM4K_RG')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') Nep = 20 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` #### Input and Output Rescaling (T=BCONS) ``` Tscaling_name = 'BCONS' train_gen_T = train_gen_BCONS train_gen_CI = DataGeneratorCI(data_fn = path+TRAINFILE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=None, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') valid_gen_CI = DataGeneratorCI(data_fn = path+VALIDFILE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=None, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') test_diffclimate_gen_CI = DataGeneratorCI(data_fn = path+TESTFILE_DIFFCLIMATE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=None, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') test_diffgeog_gen_CI = DataGeneratorCI(data_fn = path+TESTFILE_DIFFGEOG, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=None, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') inp = Input(shape=(64,)) ## input after rh and tns transformation dense_out = Dense(120, activation='linear')(inp) dense_out = tf.keras.activations.sigmoid(dense_out) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_04_26_LOGI_PERC_RH_BCONS_LHF_nsDELQ' # history = AdditionalValidationSets([(train_gen_CI,valid_gen_CI,test_gen_CI)]) history = AdditionalValidationSets([(test_diffclimate_gen_CI,'trainP4K'),(test_diffgeog_gen_CI,'trainM4K_RG')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') Nep = 20 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` ### NN ``` in_vars = ['QBP','TBP','PS', 'SOLIN', 'SHFLX', 'LHFLX'] #if path==path_aquaplanet: out_vars=['PHQPERC','TPHYSTNDPERC','QRLPERC','QRSPERC'] out_vars = ['PHQ','TPHYSTND','QRL','QRS'] # TRAINFILE = '2021_01_24_O3_TRAIN_shuffle.nc' NORMFILE = '2021_01_24_NORM_O3_small.nc' # VALIDFILE = '2021_01_24_O3_VALID.nc' # GENTESTFILE = 'CI_SP_P4K_valid.nc' # In physical space TRAINFILE = '2021_03_18_O3_TRAIN_M4K_shuffle.nc' VALIDFILE = '2021_03_18_O3_VALID_M4K.nc' TESTFILE_DIFFCLIMATE = '2021_03_18_O3_TRAIN_P4K_shuffle.nc' TESTFILE_DIFFGEOG = '2021_04_18_RG_TRAIN_M4K_shuffle.nc' train_gen_CI = DataGeneratorCI(data_fn = path+TRAINFILE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=scale_dict, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling=None, Tscaling=None, LHFscaling=None, SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=None, inp_div_Qscaling=None, inp_sub_Tscaling=None, inp_div_Tscaling=None, inp_sub_LHFscaling=None, inp_div_LHFscaling=None, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') valid_gen_CI = DataGeneratorCI(data_fn = path+VALIDFILE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=scale_dict, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling=None, Tscaling=None, LHFscaling=None, SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=None, inp_div_Qscaling=None, inp_sub_Tscaling=None, inp_div_Tscaling=None, inp_sub_LHFscaling=None, inp_div_LHFscaling=None, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') test_diffclimate_gen_CI = DataGeneratorCI(data_fn = path+TESTFILE_DIFFCLIMATE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=scale_dict, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling=None, Tscaling=None, LHFscaling=None, SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=None, inp_div_Qscaling=None, inp_sub_Tscaling=None, inp_div_Tscaling=None, inp_sub_LHFscaling=None, inp_div_LHFscaling=None, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') test_diffgeog_gen_CI = DataGeneratorCI(data_fn = path+TESTFILE_DIFFGEOG, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=scale_dict, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling=None, Tscaling=None, LHFscaling=None, SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=None, inp_div_Qscaling=None, inp_sub_Tscaling=None, inp_div_Tscaling=None, inp_sub_LHFscaling=None, inp_div_LHFscaling=None, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') ``` #### BF ``` inp = Input(shape=(64,)) ## input after rh and tns transformation densout = Dense(128, activation='linear')(inp) densout = LeakyReLU(alpha=0.3)(densout) for i in range (6): densout = Dense(128, activation='linear')(densout) densout = LeakyReLU(alpha=0.3)(densout) dense_out = Dense(120, activation='linear')(densout) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_04_26_NN' # history = AdditionalValidationSets([(train_gen_CI,valid_gen_CI,test_gen_CI)]) history = AdditionalValidationSets([(test_diffclimate_gen_CI,'trainP4K'),(test_diffgeog_gen_CI,'trainM4K_RG')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') #model.load_weights(path_HDF5+save_name+'.hdf5') Nep = 20 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` #### Input Rescaling (T=T-TNS) ``` Tscaling_name = 'TfromNS' train_gen_T = train_gen_TNS train_gen_CI = DataGeneratorCI(data_fn = path+TRAINFILE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=scale_dict, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') valid_gen_CI = DataGeneratorCI(data_fn = path+VALIDFILE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=scale_dict, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') test_diffclimate_gen_CI = DataGeneratorCI(data_fn = path+TESTFILE_DIFFCLIMATE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=scale_dict, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') test_diffgeog_gen_CI = DataGeneratorCI(data_fn = path+TESTFILE_DIFFGEOG, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=scale_dict, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') inp = Input(shape=(64,)) ## input after rh and tns transformation densout = Dense(128, activation='linear')(inp) densout = LeakyReLU(alpha=0.3)(densout) for i in range (6): densout = Dense(128, activation='linear')(densout) densout = LeakyReLU(alpha=0.3)(densout) dense_out = Dense(120, activation='linear')(densout) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_04_26_NN_RH_TfromNS_LHF_nsDELQ' # history = AdditionalValidationSets([(train_gen_CI,valid_gen_CI,test_gen_CI)]) history = AdditionalValidationSets([(test_diffclimate_gen_CI,'trainP4K'),(test_diffgeog_gen_CI,'trainM4K_RG')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') Nep = 20 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` #### Input Rescaling (T=BCONS) ``` Tscaling_name = 'BCONS' train_gen_T = train_gen_BCONS train_gen_CI = DataGeneratorCI(data_fn = path+TRAINFILE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=scale_dict, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') valid_gen_CI = DataGeneratorCI(data_fn = path+VALIDFILE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=scale_dict, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') test_diffclimate_gen_CI = DataGeneratorCI(data_fn = path+TESTFILE_DIFFCLIMATE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=scale_dict, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') test_diffgeog_gen_CI = DataGeneratorCI(data_fn = path+TESTFILE_DIFFGEOG, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=scale_dict, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') inp = Input(shape=(64,)) ## input after rh and tns transformation densout = Dense(128, activation='linear')(inp) densout = LeakyReLU(alpha=0.3)(densout) for i in range (6): densout = Dense(128, activation='linear')(densout) densout = LeakyReLU(alpha=0.3)(densout) dense_out = Dense(120, activation='linear')(densout) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_04_26_NN_RH_BCONS_LHF_nsDELQ' # history = AdditionalValidationSets([(train_gen_CI,valid_gen_CI,test_gen_CI)]) history = AdditionalValidationSets([(test_diffclimate_gen_CI,'trainP4K'),(test_diffgeog_gen_CI,'trainM4K_RG')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') Nep = 20 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` #### Input and Output Rescaling (T=T-TNS) ``` in_vars = ['QBP','TBP','PS', 'SOLIN', 'SHFLX', 'LHFLX'] out_vars=['PHQPERC','TPHYSTNDPERC','QRLPERC','QRSPERC'] # TRAINFILE = '2021_01_24_O3_TRAIN_shuffle.nc' NORMFILE = '2021_01_24_NORM_O3_small.nc' # VALIDFILE = '2021_01_24_O3_VALID.nc' # GENTESTFILE = 'CI_SP_P4K_valid.nc' # In percentile space TRAINFILE = '2021_04_09_PERC_TRAIN_M4K_shuffle.nc' VALIDFILE = '2021_04_09_PERC_VALID_M4K.nc' TESTFILE_DIFFCLIMATE = '2021_04_09_PERC_TRAIN_P4K_shuffle.nc' TESTFILE_DIFFGEOG = '2021_04_24_RG_PERC_TRAIN_M4K_shuffle.nc' Tscaling_name = 'TfromNS' train_gen_T = train_gen_TNS train_gen_CI = DataGeneratorCI(data_fn = path+TRAINFILE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=None, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') valid_gen_CI = DataGeneratorCI(data_fn = path+VALIDFILE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=None, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') test_diffclimate_gen_CI = DataGeneratorCI(data_fn = path+TESTFILE_DIFFCLIMATE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=None, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') test_diffgeog_gen_CI = DataGeneratorCI(data_fn = path+TESTFILE_DIFFGEOG, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=None, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') inp = Input(shape=(64,)) ## input after rh and tns transformation densout = Dense(128, activation='linear')(inp) densout = LeakyReLU(alpha=0.3)(densout) for i in range (6): densout = Dense(128, activation='linear')(densout) densout = LeakyReLU(alpha=0.3)(densout) dense_out = Dense(120, activation='linear')(densout) dense_out = tf.keras.activations.sigmoid(dense_out) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_04_26_NN_PERC_RH_TfromNS_LHF_nsDELQ' # history = AdditionalValidationSets([(train_gen_CI,valid_gen_CI,test_gen_CI)]) history = AdditionalValidationSets([(test_diffclimate_gen_CI,'trainP4K'),(test_diffgeog_gen_CI,'trainM4K_RG')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') Nep = 20 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` #### Input and Output Rescaling (T=BCONS) ``` Tscaling_name = 'BCONS' train_gen_T = train_gen_BCONS train_gen_CI = DataGeneratorCI(data_fn = path+TRAINFILE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=None, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') valid_gen_CI = DataGeneratorCI(data_fn = path+VALIDFILE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=None, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') test_diffclimate_gen_CI = DataGeneratorCI(data_fn = path+TESTFILE_DIFFCLIMATE, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=None, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') test_diffgeog_gen_CI = DataGeneratorCI(data_fn = path+TESTFILE_DIFFGEOG, input_vars=in_vars, output_vars=out_vars, norm_fn=path+NORMFILE, input_transform=('mean', 'maxrs'), output_transform=None, batch_size=8192, shuffle=True, xarray=False, var_cut_off=None, Qscaling='RH', Tscaling=Tscaling_name, LHFscaling='LHF_nsDELQ', SHFscaling=None, output_scaling=False, interpolate=False, hyam=hyam,hybm=hybm, inp_sub_Qscaling=train_gen_RH.input_transform.sub, inp_div_Qscaling=train_gen_RH.input_transform.div, inp_sub_Tscaling=train_gen_T.input_transform.sub, inp_div_Tscaling=train_gen_T.input_transform.div, inp_sub_LHFscaling=train_gen_LHF_nsDELQ.input_transform.sub, inp_div_LHFscaling=train_gen_LHF_nsDELQ.input_transform.div, inp_sub_SHFscaling=None, inp_div_SHFscaling=None, lev=None, interm_size=40, lower_lim=6,is_continous=True,Tnot=5, epsQ=1e-3,epsT=1,mode='train') inp = Input(shape=(64,)) ## input after rh and tns transformation densout = Dense(128, activation='linear')(inp) densout = LeakyReLU(alpha=0.3)(densout) for i in range (6): densout = Dense(128, activation='linear')(densout) densout = LeakyReLU(alpha=0.3)(densout) dense_out = Dense(120, activation='linear')(densout) dense_out = tf.keras.activations.sigmoid(dense_out) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_04_26_NN_PERC_RH_BCONS_LHF_nsDELQ' # history = AdditionalValidationSets([(train_gen_CI,valid_gen_CI,test_gen_CI)]) history = AdditionalValidationSets([(test_diffclimate_gen_CI,'trainP4K'),(test_diffgeog_gen_CI,'trainM4K_RG')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') Nep = 20 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` ## Models tracking losses across climates/geography (Warm to Cold) ## Brute-Force Model ### Climate-invariant (T,Q,PS,S0,SHF,LHF)->($\dot{T}$,$\dot{q}$,RADFLUX) ``` inp = Input(shape=(64,)) ## input after rh and tns transformation densout = Dense(128, activation='linear')(inp) densout = LeakyReLU(alpha=0.3)(densout) for i in range (6): densout = Dense(128, activation='linear')(densout) densout = LeakyReLU(alpha=0.3)(densout) dense_out = Dense(64, activation='linear')(densout) model = tf.keras.models.Model(inp, dense_out) # Where to save the model path_HDF5 = '/oasis/scratch/comet/tbeucler/temp_project/CBRAIN_models/' save_name = 'BF_temp' model.compile(tf.keras.optimizers.Adam(), loss=mse) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') # tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=0, update_freq=1000,embeddings_freq=1) Nep = 10 model.fit_generator(train_gen, epochs=Nep, validation_data=valid_gen,\ callbacks=[earlyStopping, mcp_save_pos]) ``` ### Ozone (T,Q,$O_{3}$,S0,PS,LHF,SHF)$\rightarrow$($\dot{q}$,$\dot{T}$,lw,sw) ``` inp = Input(shape=(94,)) ## input after rh and tns transformation densout = Dense(128, activation='linear')(inp) densout = LeakyReLU(alpha=0.3)(densout) for i in range (6): densout = Dense(128, activation='linear')(densout) densout = LeakyReLU(alpha=0.3)(densout) dense_out = Dense(120, activation='linear')(densout) model = tf.keras.models.Model(inp, dense_out) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_01_25_O3' model.compile(tf.keras.optimizers.Adam(), loss=mse) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') # tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=0, update_freq=1000,embeddings_freq=1) Nep = 10 model.fit_generator(train_gen_O3, epochs=Nep, validation_data=valid_gen_O3,\ callbacks=[earlyStopping, mcp_save_pos]) Nep = 10 model.fit_generator(train_gen_O3, epochs=Nep, validation_data=valid_gen_O3,\ callbacks=[earlyStopping, mcp_save_pos]) ``` ### No Ozone (T,Q,S0,PS,LHF,SHF)$\rightarrow$($\dot{q}$,$\dot{T}$,lw,sw) ``` inp = Input(shape=(64,)) ## input after rh and tns transformation densout = Dense(128, activation='linear')(inp) densout = LeakyReLU(alpha=0.3)(densout) for i in range (6): densout = Dense(128, activation='linear')(densout) densout = LeakyReLU(alpha=0.3)(densout) dense_out = Dense(120, activation='linear')(densout) model = tf.keras.models.Model(inp, dense_out) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_01_25_noO3' model.compile(tf.keras.optimizers.Adam(), loss=mse) model.load_weights(path_HDF5+save_name+'.hdf5') earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') # tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=0, update_freq=1000,embeddings_freq=1) # Nep = 15 # model.fit_generator(train_gen_noO3, epochs=Nep, validation_data=valid_gen_noO3,\ # callbacks=[earlyStopping, mcp_save_pos]) Nep = 10 model.fit_generator(train_gen_noO3, epochs=Nep, validation_data=valid_gen_noO3,\ callbacks=[earlyStopping, mcp_save_pos]) ``` ### BF linear version ``` inp = Input(shape=(64,)) ## input after rh and tns transformation # densout = Dense(128, activation='linear')(inp) # densout = LeakyReLU(alpha=0.3)(densout) # for i in range (6): # densout = Dense(128, activation='linear')(densout) # densout = LeakyReLU(alpha=0.3)(densout) dense_out = Dense(120, activation='linear')(inp) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_04_15_MLR_PERC' # history = AdditionalValidationSets([(train_gen_CI,valid_gen_CI,test_gen_CI)]) history = AdditionalValidationSets([(test_gen_CI,'testP4K')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') Nep = 15 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) history.history hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` ### BF Logistic version ``` inp = Input(shape=(64,)) ## input after rh and tns transformation # densout = Dense(128, activation='linear')(inp) # densout = LeakyReLU(alpha=0.3)(densout) # for i in range (6): # densout = Dense(128, activation='linear')(densout) # densout = LeakyReLU(alpha=0.3)(densout) dense_out = Dense(120, activation='linear')(inp) dense_out = tf.keras.activations.sigmoid(dense_out) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_04_15_Log_PERC' # history = AdditionalValidationSets([(train_gen_CI,valid_gen_CI,test_gen_CI)]) history = AdditionalValidationSets([(test_gen_CI,'testP4K')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') Nep = 15 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) history.history hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` ### BF NN version with test loss tracking ``` inp = Input(shape=(64,)) ## input after rh and tns transformation densout = Dense(128, activation='linear')(inp) densout = LeakyReLU(alpha=0.3)(densout) for i in range (6): densout = Dense(128, activation='linear')(densout) densout = LeakyReLU(alpha=0.3)(densout) dense_out = Dense(120, activation='linear')(densout) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_04_08_NN6L' # history = AdditionalValidationSets([(train_gen_CI,valid_gen_CI,test_gen_CI)]) history = AdditionalValidationSets([(test_gen_CI,'testP4K')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') Nep = 20 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) history.history hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` ### RH Logistic version ``` inp = Input(shape=(64,)) ## input after rh and tns transformation # densout = Dense(128, activation='linear')(inp) # densout = LeakyReLU(alpha=0.3)(densout) # for i in range (6): # densout = Dense(128, activation='linear')(densout) # densout = LeakyReLU(alpha=0.3)(densout) dense_out = Dense(120, activation='linear')(inp) dense_out = tf.keras.activations.sigmoid(dense_out) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_04_15_Log_PERC_RH' # history = AdditionalValidationSets([(train_gen_CI,valid_gen_CI,test_gen_CI)]) history = AdditionalValidationSets([(test_gen_CI,'testP4K')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') Nep = 15 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) history.history hist_rec = history.history pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` ### RH linear version ``` inp = Input(shape=(64,)) ## input after rh and tns transformation dense_out = Dense(120, activation='linear')(inp) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_03_19_MLR_RH' history = AdditionalValidationSets([(test_gen_CI,'testP4K')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') Nep = 10 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` ### QSATdeficit linear version ``` inp = Input(shape=(64,)) ## input after rh and tns transformation dense_out = Dense(120, activation='linear')(inp) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_03_19_MLR_QSATdeficit' history = AdditionalValidationSets([(test_gen_CI,'testP4K')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') Nep = 10 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` ### TfromNS linear version ``` inp = Input(shape=(64,)) ## input after rh and tns transformation dense_out = Dense(120, activation='linear')(inp) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_03_19_MLR_TfromNS' history = AdditionalValidationSets([(test_gen_CI,'testP4K')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') Nep = 10 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` ### BCONS linear version ``` inp = Input(shape=(64,)) ## input after rh and tns transformation dense_out = Dense(120, activation='linear')(inp) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_03_19_MLR_BCONS' history = AdditionalValidationSets([(test_gen_CI,'testP4K')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') Nep = 10 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` ## Mixed Model ``` inp = Input(shape=(64,)) ## input after rh and tns transformation dense_out = Dense(120, activation='linear')(inp) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_03_19_MLR_RH_BCONS' history = AdditionalValidationSets([(test_gen_CI,'testP4K')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') Nep = 10 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` ### RH+(T-TNS) ### RH+NSto220 ``` inp = Input(shape=(64,)) ## input after rh and tns transformation dense_out = Dense(120, activation='linear')(inp) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_03_31_MLR_RH_NSto220' history = AdditionalValidationSets([(test_gen_CI,'testP4K')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') Nep = 10 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` ### RH+LHF_nsQ ``` inp = Input(shape=(64,)) ## input after rh and tns transformation dense_out = Dense(120, activation='linear')(inp) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_03_19_MLR_RH_LHF_nsQ' history = AdditionalValidationSets([(test_gen_CI,'testP4K')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') Nep = 10 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` ### RH+TfromNS+LHF_nsDELQ NN version with test loss tracking ``` inp = Input(shape=(64,)) ## input after rh and tns transformation densout = Dense(128, activation='linear')(inp) densout = LeakyReLU(alpha=0.3)(densout) for i in range (6): densout = Dense(128, activation='linear')(densout) densout = LeakyReLU(alpha=0.3)(densout) dense_out = Dense(120, activation='linear')(densout) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_04_09_NN7L_RH_TfromNS_LHF_nsDELQ' history = AdditionalValidationSets([(test_gen_CI,'testP4K')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') Nep = 20 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` ### RH+TfromNS+LHF_nsQ ``` inp = Input(shape=(64,)) ## input after rh and tns transformation dense_out = Dense(120, activation='linear')(inp) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_03_23_MLR_RH_TfromNS_LHF_nsQ' history = AdditionalValidationSets([(test_gen_CI,'testP4K')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') Nep = 10 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` ### RH+BCONS+LHF_nsDELQ ``` inp = Input(shape=(64,)) ## input after rh and tns transformation dense_out = Dense(120, activation='linear')(inp) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_03_23_MLR_RH_BCONS_LHF_nsDELQ' history = AdditionalValidationSets([(test_gen_CI,'testP4K')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') Nep = 10 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` ### RH+NSto220+LHF_nsDELQ ``` inp = Input(shape=(64,)) ## input after rh and tns transformation dense_out = Dense(120, activation='linear')(inp) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_04_01_MLR_RH_NSto220_LHF_nsDELQ' history = AdditionalValidationSets([(test_gen_CI,'testP4K')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') Nep = 10 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ``` ### RH+NSto220+LHF_nsQ ``` inp = Input(shape=(64,)) ## input after rh and tns transformation dense_out = Dense(120, activation='linear')(inp) model = tf.keras.models.Model(inp, dense_out) model.summary() model.compile(tf.keras.optimizers.Adam(), loss=mse) # Where to save the model path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/' save_name = '2021_04_03_MLR_RH_NSto220_LHF_nsQ' history = AdditionalValidationSets([(test_gen_CI,'testP4K')]) earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min') mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min') Nep = 10 model.fit_generator(train_gen_CI, epochs=Nep, validation_data=valid_gen_CI,\ callbacks=[earlyStopping, mcp_save_pos, history]) hist_rec = history.history hist_rec pathPKL = '/export/home/tbeucler/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA' hf = open(pathPKL+save_name+'_hist.pkl','wb') F_data = {'hist':hist_rec} pickle.dump(F_data,hf) hf.close() ```
github_jupyter
``` import json import glob import re import malaya tokenizer = malaya.preprocessing._SocialTokenizer().tokenize def is_number_regex(s): if re.match("^\d+?\.\d+?$", s) is None: return s.isdigit() return True def detect_money(word): if word[:2] == 'rm' and is_number_regex(word[2:]): return True else: return False def preprocessing(string): tokenized = tokenizer(string) tokenized = [w.lower() for w in tokenized if len(w) > 2] tokenized = ['<NUM>' if is_number_regex(w) else w for w in tokenized] tokenized = ['<MONEY>' if detect_money(w) else w for w in tokenized] return tokenized left, right, label = [], [], [] for file in glob.glob('quora/*.json'): with open(file) as fopen: x = json.load(fopen) for i in x: splitted = i[0].split(' <> ') if len(splitted) != 2: continue left.append(splitted[0]) right.append(splitted[1]) label.append(i[1]) len(left), len(right), len(label) with open('synonym0.json') as fopen: s = json.load(fopen) with open('synonym1.json') as fopen: s1 = json.load(fopen) synonyms = {} for l, r in (s + s1): if l not in synonyms: synonyms[l] = r + [l] else: synonyms[l].extend(r) synonyms = {k: list(set(v)) for k, v in synonyms.items()} import random def augmentation(s, maximum = 0.8): s = s.lower().split() for i in range(int(len(s) * maximum)): index = random.randint(0, len(s) - 1) word = s[index] sy = synonyms.get(word, [word]) sy = random.choice(sy) s[index] = sy return s train_left, test_left = left[:-50000], left[-50000:] train_right, test_right = right[:-50000], right[-50000:] train_label, test_label = label[:-50000], label[-50000:] len(train_left), len(test_left) aug = [' '.join(augmentation(train_left[0])) for _ in range(10)] + [train_left[0].lower()] aug = list(set(aug)) aug aug = [' '.join(augmentation(train_right[0])) for _ in range(10)] + [train_right[0].lower()] aug = list(set(aug)) aug train_label[0] from tqdm import tqdm LEFT, RIGHT, LABEL = [], [], [] for i in tqdm(range(len(train_left))): aug_left = [' '.join(augmentation(train_left[i])) for _ in range(3)] + [train_left[i].lower()] aug_left = list(set(aug_left)) aug_right = [' '.join(augmentation(train_right[i])) for _ in range(3)] + [train_right[i].lower()] aug_right = list(set(aug_right)) for l in aug_left: for r in aug_right: LEFT.append(l) RIGHT.append(r) LABEL.append(train_label[i]) len(LEFT), len(RIGHT), len(LABEL) for i in tqdm(range(len(LEFT))): LEFT[i] = preprocessing(LEFT[i]) RIGHT[i] = preprocessing(RIGHT[i]) for i in tqdm(range(len(test_left))): test_left[i] = preprocessing(test_left[i]) test_right[i] = preprocessing(test_right[i]) with open('train-similarity.json', 'w') as fopen: json.dump({'left': LEFT, 'right': RIGHT, 'label': LABEL}, fopen) with open('test-similarity.json', 'w') as fopen: json.dump({'left': test_left, 'right': test_right, 'label': test_label}, fopen) ```
github_jupyter
``` image_shape = (56,64,1) train_path = "D:\\Projects\\EYE_GAME\\eye_img\\datav2\\train\\" test_path = "D:\\Projects\\EYE_GAME\\eye_img\\datav2\\test\\" import os import pandas as pd from glob import glob import numpy as np import matplotlib as plt from matplotlib.image import imread import seaborn as sns from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense, Conv2D, MaxPooling2D from tensorflow.keras.models import Model from tensorflow.keras.preprocessing import image from tensorflow.keras.preprocessing.image import ImageDataGenerator %matplotlib inline os.listdir(train_path) img=imread(train_path+'left\\'+'68.jpg') folders=glob(test_path + '/*') traindata_gen=ImageDataGenerator( rotation_range=10, rescale=1/255., width_shift_range=0.1, height_shift_range=0.1, shear_range=0.1, zoom_range=0.1, fill_mode='nearest' ) testdata_gen=ImageDataGenerator( rescale=1./255) traindata_gen.flow_from_directory(train_path) model = Sequential() model.add(Conv2D(filters=32, kernel_size=(3,3), strides=1, padding='same',input_shape=image_shape, activation='relu',)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(filters=64, kernel_size=(3,3), strides=1, padding='same',input_shape=image_shape, activation='relu',)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(filters=128, kernel_size=(3,3), strides=1, padding='same',input_shape=image_shape, activation='relu',)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(len(folders))) model.add(Activation('sigmoid')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) model.summary() from tensorflow.keras.callbacks import EarlyStopping early_stop = EarlyStopping(monitor='val_loss',patience=2) batch_size = 32 traning_set=traindata_gen.flow_from_directory(train_path, target_size =image_shape[:2], batch_size = batch_size, color_mode="grayscale", class_mode = 'categorical') testing_set=testdata_gen.flow_from_directory(test_path, target_size = image_shape[:2], batch_size = batch_size, color_mode="grayscale", class_mode = 'categorical', shuffle=False) testing_set.class_indices result = model.fit( traning_set, epochs=8, validation_data=testing_set, callbacks=[early_stop] ) losses = pd.DataFrame(model.history.history) losses[['loss','val_loss']].plot() losses[['loss','val_loss']].plot() model.metrics_names model.save('gazev3.1.h5') ```
github_jupyter
# ism Import and Plotting This example shows how to measure an impedance spectrum and then plot it in Bode and Nyquist using the Python library [matplotlib](https://matplotlib.org/). ``` import sys from thales_remote.connection import ThalesRemoteConnection from thales_remote.script_wrapper import PotentiostatMode,ThalesRemoteScriptWrapper from thales_file_import.ism_import import IsmImport import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import EngFormatter from jupyter_utils import executionInNotebook, notebookCodeToPython ``` # Connect Python to the already launched Thales-Software ``` if __name__ == "__main__": zenniumConnection = ThalesRemoteConnection() connectionSuccessful = zenniumConnection.connectToTerm("localhost", "ScriptRemote") if connectionSuccessful: print("connection successfull") else: print("connection not possible") sys.exit() zahnerZennium = ThalesRemoteScriptWrapper(zenniumConnection) zahnerZennium.forceThalesIntoRemoteScript() ``` # Setting the parameters for the measurement After the connection with Thales, the naming of the files of the measurement results is set. Measure EIS spectra with a sequential number in the file name that has been specified. Starting with number 1. ``` zahnerZennium.setEISNaming("counter") zahnerZennium.setEISCounter(1) zahnerZennium.setEISOutputPath(r"C:\THALES\temp\test1") zahnerZennium.setEISOutputFileName("spectra") ``` Setting the parameters for the spectra. Alternatively a rule file can be used as a template. ``` zahnerZennium.setPotentiostatMode(PotentiostatMode.POTMODE_POTENTIOSTATIC) zahnerZennium.setAmplitude(10e-3) zahnerZennium.setPotential(0) zahnerZennium.setLowerFrequencyLimit(0.01) zahnerZennium.setStartFrequency(1000) zahnerZennium.setUpperFrequencyLimit(200000) zahnerZennium.setLowerNumberOfPeriods(3) zahnerZennium.setLowerStepsPerDecade(5) zahnerZennium.setUpperNumberOfPeriods(20) zahnerZennium.setUpperStepsPerDecade(10) zahnerZennium.setScanDirection("startToMax") zahnerZennium.setScanStrategy("single") ``` After setting the parameters, the measurement is started. <div class="alert alert-block alert-info"> <b>Note:</b> If the potentiostat is set to potentiostatic before the impedance measurement and is switched off, the measurement is performed at the open circuit voltage/potential. </div> After the measurement the potentiostat is switched off. ``` zahnerZennium.enablePotentiostat() zahnerZennium.measureEIS() zahnerZennium.disablePotentiostat() zenniumConnection.disconnectFromTerm() ``` # Importing the ism file Import the spectrum from the previous measurement. This was saved under the set path and name with the number expanded. The measurement starts at 1 therefore the following path results: "C:\THALES\temp\test1\spectra_0001.ism". ``` ismFile = IsmImport(r"C:\THALES\temp\test1\spectra_0001.ism") impedanceFrequencies = ismFile.getFrequencyArray() impedanceAbsolute = ismFile.getImpedanceArray() impedancePhase = ismFile.getPhaseArray() impedanceComplex = ismFile.getComplexImpedanceArray() ``` The Python datetime object of the measurement date is output to the console next. ``` print("Measurement end time: " + str(ismFile.getMeasurementEndDateTime())) ``` # Displaying the measurement results The spectra are presented in the Bode and Nyquist representation. For this test, the Zahner test box was measured in the lin position. ## Nyquist Plot The matplotlib diagram is configured to match the Nyquist representation. For this, the diagram aspect is set equal and the axes are labeled in engineering units. The axis labeling is realized with [LaTeX](https://www.latex-project.org/) for subscript text. The possible settings of the graph can be found in the detailed documentation and tutorials of [matplotlib](https://matplotlib.org/). ``` figNyquist, (nyquistAxis) = plt.subplots(1, 1) figNyquist.suptitle("Nyquist") nyquistAxis.plot(np.real(impedanceComplex), -np.imag(impedanceComplex), marker="x", markersize=5) nyquistAxis.grid(which="both") nyquistAxis.set_aspect("equal") nyquistAxis.xaxis.set_major_formatter(EngFormatter(unit="$\Omega$")) nyquistAxis.yaxis.set_major_formatter(EngFormatter(unit="$\Omega$")) nyquistAxis.set_xlabel(r"$Z_{\rm re}$") nyquistAxis.set_ylabel(r"$-Z_{\rm im}$") figNyquist.set_size_inches(18, 18) plt.show() figNyquist.savefig("nyquist.svg") ``` ## Bode Plot The matplotlib representation was also adapted for the Bode plot. A figure with two plots was created for the separate display of phase and impedance which are plotted over the same x-axis. ``` figBode, (impedanceAxis, phaseAxis) = plt.subplots(2, 1, sharex=True) figBode.suptitle("Bode") impedanceAxis.loglog(impedanceFrequencies, impedanceAbsolute, marker="+", markersize=5) impedanceAxis.xaxis.set_major_formatter(EngFormatter(unit="Hz")) impedanceAxis.yaxis.set_major_formatter(EngFormatter(unit="$\Omega$")) impedanceAxis.set_xlabel(r"$f$") impedanceAxis.set_ylabel(r"$|Z|$") impedanceAxis.grid(which="both") phaseAxis.semilogx(impedanceFrequencies, np.abs(impedancePhase * (360 / (2 * np.pi))), marker="+", markersize=5) phaseAxis.xaxis.set_major_formatter(EngFormatter(unit="Hz")) phaseAxis.yaxis.set_major_formatter(EngFormatter(unit="$°$", sep="")) phaseAxis.set_xlabel(r"$f$") phaseAxis.set_ylabel(r"$|Phase|$") phaseAxis.grid(which="both") phaseAxis.set_ylim([0, 90]) figBode.set_size_inches(18, 12) plt.show() figBode.savefig("bode.svg") ``` # Deployment of the source code **The following instruction is not needed by the user.** It automatically extracts the pure python code from the jupyter notebook to provide it to the user. Thus the user does not need jupyter itself and does not have to copy the code manually. The source code is saved in a .py file with the same name as the notebook. ``` if executionInNotebook() == True: notebookCodeToPython("EISImportPlot.ipynb") ```
github_jupyter
#### Omega and Xi To implement Graph SLAM, a matrix and a vector (omega and xi, respectively) are introduced. The matrix is square and labelled with all the robot poses (xi) and all the landmarks (Li). Every time you make an observation, for example, as you move between two poses by some distance `dx` and can relate those two positions, you can represent this as a numerical relationship in these matrices. It's easiest to see how these work in an example. Below you can see a matrix representation of omega and a vector representation of xi. <img src='images/omega_xi.png' width="20%" height="20%" /> Next, let's look at a simple example that relates 3 poses to one another. * When you start out in the world most of these values are zeros or contain only values from the initial robot position * In this example, you have been given constraints, which relate these poses to one another * Constraints translate into matrix values <img src='images/omega_xi_constraints.png' width="70%" height="70%" /> If you have ever solved linear systems of equations before, this may look familiar, and if not, let's keep going! ### Solving for x To "solve" for all these x values, we can use linear algebra; all the values of x are in the vector `mu` which can be calculated as a product of the inverse of omega times xi. <img src='images/solution.png' width="30%" height="30%" /> --- **You can confirm this result for yourself by executing the math in the cell below.** ``` import numpy as np # define omega and xi as in the example omega = np.array([[1,0,0], [-1,1,0], [0,-1,1]]) xi = np.array([[-3], [5], [3]]) # calculate the inverse of omega omega_inv = np.linalg.inv(np.matrix(omega)) # calculate the solution, mu mu = omega_inv*xi # print out the values of mu (x0, x1, x2) print(mu) ``` ## Motion Constraints and Landmarks In the last example, the constraint equations, relating one pose to another were given to you. In this next example, let's look at how motion (and similarly, sensor measurements) can be used to create constraints and fill up the constraint matrices, omega and xi. Let's start with empty/zero matrices. <img src='images/initial_constraints.png' width="35%" height="35%" /> This example also includes relationships between poses and landmarks. Say we move from x0 to x1 with a displacement `dx` of 5. Then we have created a motion constraint that relates x0 to x1, and we can start to fill up these matrices. <img src='images/motion_constraint.png' width="50%" height="50%" /> In fact, the one constraint equation can be written in two ways. So, the motion constraint that relates x0 and x1 by the motion of 5 has affected the matrix, adding values for *all* elements that correspond to x0 and x1. --- ### 2D case In these examples, we've been showing you change in only one dimension, the x-dimension. In the project, it will be up to you to represent x and y positional values in omega and xi. One solution could be to create an omega and xi that are 2x larger that the number of robot poses (that will be generated over a series of time steps) and the number of landmarks, so that they can hold both x and y values for poses and landmark locations. I might suggest drawing out a rough solution to graph slam as you read the instructions in the next notebook; that always helps me organize my thoughts. Good luck!
github_jupyter
<a href="https://colab.research.google.com/github/bitprj/Bitcamp-DataSci/blob/master/Week1-Introduction-to-Python-_-NumPy/Intro_to_Python_plus_NumPy.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> <img src="https://github.com/bitprj/Bitcamp-DataSci/blob/master/Week1-Introduction-to-Python-_-NumPy/assets/icons/bitproject.png?raw=1" width="200" align="left"> <img src="https://github.com/bitprj/Bitcamp-DataSci/blob/master/Week1-Introduction-to-Python-_-NumPy/assets/icons/data-science.jpg?raw=1" width="300" align="right"> # Introduction to Python ### Table of Contents - Why, Where, and How we use Python - What we will be learning today - Goals - Numbers - Types of Numbers - Basic Arithmetic - Arithmetic Continued - Variable Assignment - Strings - Creating Strings - Printing Strings - String Basics - String Properties - Basic Built-In String Methods - Print Formatting - **1.0 Now Try This** - Booleans - Lists - Creating Lists - Basic List Methods - Nesting Lists - List Comprehensions - **2.0 Now Try This** - Tuples - Constructing Tuples - Basic Tuple Methods - Immutability - When To Use Tuples - **3.0 Now Try This** - Dictionaries - Constructing a Dictionary - Nesting With Dictionaries - Dictionary Methods - **4.0 Now Try This** - Comparison Operators - Functions - Intro to Functions - `def` Statements - Examples - Using `return` - **5.0 Now Try This** - Modules and Packages - Overview - NumPy - Creating Arrays - Indexing - Slicing - **6.0 Now Try This** - Data Types - **7.0 Now Try This** - Copy vs. View - **8.0 Now Try This** - Shape - **9.0 Now Try This** - Iterating Through Arrays - Joining Arrays - Splitting Arrays - Searching Arrays - Sorting Arrays - Filtering Arrays - **10.0 Now Try This** - Resources ## Why, Where, and How we use Python Python is a very popular scripting language that you can use to create applications and programs of all sizes and complexity. It is very easy to learn and has very little syntax, making it very efficient to code with. Python is also the language of choice for many when performing comprehensive data analysis. ## What we will be learning today ### Goals - Understanding key Python data types, operators and data structures - Understanding functions - Understanding modules - Understanding errors and exceptions First data type we'll cover in detail is Numbers! ## Numbers ### Types of numbers Python has various "types" of numbers. We'll strictly cover integers and floating point numbers for now. Integers are just whole numbers, positive or negative. (2,4,-21,etc.) Floating point numbers in Python have a decimal point in them, or use an exponential (e). For example 3.14 and 2.17 are *floats*. 5E7 (5 times 10 to the power of 7) is also a float. This is scientific notation and something you've probably seen in math classes. Let's start working through numbers and arithmetic: ### Basic Arithmetic ``` # Addition 4+5 # Subtraction 5-10 # Multiplication 4*8 # Division 25/5 # Floor Division 12//4 ``` What happened here? The reason we get this result is because we are using "*floor*" division. The // operator (two forward slashes) removes any decimals and doesn't round. This always produces an integer answer. **So what if we just want the remainder of division?** ``` # Modulo 9 % 4 ``` 4 goes into 9 twice, with a remainder of 1. The % operator returns the remainder after division. ### Arithmetic continued ``` # Powers 4**2 # A way to do roots 144**0.5 # Order of Operations 4 + 20 * 52 + 5 # Can use parentheses to specify orders (21+5) * (4+89) ``` ## Variable Assignments We can do a lot more with Python than just using it as a calculator. We can store any numbers we create in **variables**. We use a single equals sign to assign labels or values to variables. Let's see a few examples of how we can do this. ``` # Let's create a variable called "a" and assign to it the number 10 a = 10 a ``` Now if I call *a* in my Python script, Python will treat it as the integer 10. ``` # Adding the objects a+a ``` What happens on reassignment? Will Python let us write it over? ``` # Reassignment a = 20 # Check a ``` Yes! Python allows you to write over assigned variable names. We can also use the variables themselves when doing the reassignment. Here is an example of what I mean: ``` # Use A to redefine A a = a+a # check a ``` The names you use when creating these labels need to follow a few rules: 1. Names can not start with a number. 2. There can be no spaces in the name, use _ instead. 3. Can't use any of these symbols :'",<>/?|\()!@#$%^&*~-+ 4. Using lowercase names are best practice. 5. Can't words that have special meaning in Python like "list" and "str", we'll see why later Using variable names can be a very useful way to keep track of different variables in Python. For example: ``` # Use object names to keep better track of what's going on in your code! income = 1000 tax_rate = 0.2 taxes = income*tax_rate # Show the result! taxes ``` So what have we learned? We learned some of the basics of numbers in Python. We also learned how to do arithmetic and use Python as a basic calculator. We then wrapped it up with learning about Variable Assignment in Python. Up next we'll learn about Strings! ## Strings Strings are used in Python to record text information, such as names. Strings in Python are not treated like their own objects, but rather like a *sequence*, a consecutive series of characters. For example, Python understands the string "hello' to be a sequence of letters in a specific order. This means we will be able to use indexing to grab particular letters (like the first letter, or the last letter). ### Creating Strings To create a string in Python you need to use either single quotes or double quotes. For example: ``` # A word 'hi' # A phrase 'A string can even be a sentence like this.' # Using double quotes "The quote type doesn't really matter." # Be wary of contractions and apostrophes! 'I'm using single quotes, but this will create an error!' ``` The reason for the error above is because the single quote in <code>I'm</code> stopped the string. You can use combinations of double and single quotes to get the complete statement. ``` "This shouldn't cause an error now." ``` Now let's learn about printing strings! ### Printing Strings Jupyter Notebooks have many neat behaviors that aren't available in base python. One of those is the ability to print strings by just typing it into a cell. The universal way to display strings however, is to use a **print()** function. ``` # In Jupyter, this is all we need 'Hello World' # This is the same as: print('Hello World') # Without the print function, we can't print multiple times in one block of code: 'Hello World' 'Second string' ``` A print statement can look like the following. ``` print('Hello World') print('Second string') print('\n prints a new line') print('\n') print('Just to prove it to you.') ``` Now let's move on to understanding how we can manipulate strings in our programs. ### String Basics Oftentimes, we would like to know how many characters are in a string. We can do this very easily with the **len()** function (short for 'length'). ``` len('Hello World') ``` Python's built-in len() function counts all of the characters in the string, including spaces and punctuation. Naturally, we can assign strings to variables. ``` # Assign 'Hello World' to mystring variable mystring = 'Hello World' # Did it work? mystring # Print it to make sure print(mystring) ``` As stated before, Python treats strings as a sequence of characters. That means we can interact with each letter in a string and manipulate it. The way we access these letters is called **indexing**. Each letter has an index, which corresponds to their position in the string. In python, indices start at 0. For instance, in the string 'Hello World', 'H' has an index of 0, 'e' has an index of 1, the 'W' has an index of 6 (because spaces count as characters), and 'd' has an index of 10. The syntax for indexing is shown below. ``` # Extract first character in a string. mystring[0] mystring[1] mystring[2] ``` We can use a <code>:</code> to perform *slicing* which grabs everything up to a designated index. For example: ``` # Grab all letters past the first letter all the way to the end of the string mystring[:] # This does not change the original string in any way mystring # Grab everything UP TO the 5th index mystring[:5] ``` Note what happened above. We told Python to grab everything from 0 up to 5. It doesn't include the character in the 5th index. You'll notice this a lot in Python, where statements are usually in the context of "up to, but not including". ``` # The whole string mystring[:] # The 'default' values, if you leave the sides of the colon blank, are 0 and the length of the string end = len(mystring) # See that is matches above mystring[0:end] ``` But we don't have to go forwards. Negative indexing allows us to start from the *end* of the string and work backwards. ``` # The LAST letter (one index 'behind' 0, so it loops back around) mystring[-1] # Grab everything but the last letter mystring[:-1] ``` We can also use indexing and slicing to grab characters by a specified step size (1 is the default). See the following examples. ``` # Grab everything (default), go in steps size of 1 mystring[::1] # Grab everything, but go in step sizes of 2 (every other letter) mystring[0::2] # A handy way to reverse a string! mystring[::-1] ``` Strings have certain properties to them that affect the way we can, and cannot, interact with them. ### String Properties It's important to note that strings are *immutable*. This means that once a string is created, the elements within it can not be changed or replaced. For example: ``` mystring # Let's try to change the first letter mystring[0] = 'a' ``` The error tells it us to straight. Strings do not support assignment the same way other data types do. However, we *can* **concatenate** strings. ``` mystring # Combine strings through concatenation mystring + ". It's me." # We can reassign mystring to a new value, however mystring = mystring + ". It's me." mystring ``` One neat trick we can do with strings is use multiplication whenever we want to repeat characters a certain number of times. ``` letter = 'a' letter*20 ``` We already saw how to use len(). This is an example of a built-in string method, but there are quite a few more which we will cover next. ### Basic Built-in String methods Objects in Python usually have built-in methods. These methods are functions inside the object that can perform actions or commands on the object itself. We call methods with a period and then the method name. Methods are in the form: object.method(parameters) Parameters are extra arguments we can pass into the method. Don't worry if the details don't make 100% sense right now. We will be going into more depth with these later. Here are some examples of built-in methods in strings: ``` mystring # Make all letters in a string uppercase mystring.upper() # Make all letters in a string lowercase mystring.lower() # Split strings with a specified character as the separator. Spaces are the default. mystring.split() # Split by a specific character (doesn't include the character in the resulting string) mystring.split('W') ``` ### 1.0 Now Try This Given the string 'Amsterdam' give an index command that returns 'd'. Enter your code in the cell below: ``` s = 'Amsterdam' # Print out 'd' using indexing answer1 = # INSERT CODE HERE print(answer1) ``` Reverse the string 'Amsterdam' using slicing: ``` s ='Amsterdam' # Reverse the string using slicing answer2 = # INSERT CODE HERE print(answer2) ``` Given the string Amsterdam, extract the letter 'm' using negative indexing. ``` s ='Amsterdam' # Print out the 'm' answer3 = # INSERT CODE HERE print(answer3) ``` ## Booleans Python comes with *booleans* (values that are essentially binary: True or False, 1 or 0). It also has a placeholder object called None. Let's walk through a few quick examples of Booleans. ``` # Set object to be a boolean a = True #Show a ``` We can also use comparison operators to create booleans. We'll cover comparison operators a little later. ``` # Output is boolean 1 > 2 ``` We can use None as a placeholder for an object that we don't want to reassign yet: ``` # None placeholder b = None # Show print(b) ``` That's all to booleans! Next we start covering data structures. First up, lists. ## Lists Earlier when discussing strings we introduced the concept of a *sequence*. Lists is the most generalized version of sequences in Python. Unlike strings, they are mutable, meaning the elements inside a list can be changed! Lists are constructed with brackets [] and commas separating every element in the list. Let's start with seeing how we can build a list. ### Creating Lists ``` # Assign a list to an variable named my_list my_list = [1,2,3] ``` We just created a list of integers, but lists can actually hold elements of multiple data types. For example: ``` my_list = ['A string',23,100.232,'o'] ``` Just like strings, the len() function will tell you how many items are in the sequence of the list. ``` len(my_list) my_list = ['one','two','three',4,5] # Grab element at index 0 my_list[0] # Grab index 1 and everything past it my_list[1:] # Grab everything UP TO index 3 my_list[:3] ``` We can also use + to concatenate lists, just like we did for strings. ``` my_list + ['new item'] ``` Note: This doesn't actually change the original list! ``` my_list ``` You would have to reassign the list to make the change permanent. ``` # Reassign my_list = my_list + ['add new item permanently'] my_list ``` We can also use the * for a duplication method similar to strings: ``` # Make the list double my_list * 2 # Again doubling not permanent my_list ``` Use the **append** method to permanently add an item to the end of a list: ``` # Append list1.append('append me!') # Show list1 ``` ### List Comprehensions Python has an advanced feature called list comprehensions. They allow for quick construction of lists. To fully understand list comprehensions we need to understand for loops. So don't worry if you don't completely understand this section, and feel free to just skip it since we will return to this topic later. But in case you want to know now, here are a few examples! ``` # Build a list comprehension by deconstructing a for loop within a [] first_col = [row[0] for row in matrix] first_col ``` We used a list comprehension here to grab the first element of every row in the matrix object. We will cover this in much more detail later on! ### 2.0 Now Try This Build this list [0,0,0] using any of the shown ways. ``` # Build the list answer1 = #INSERT CODE HERE print(answer1) ``` ## Tuples In Python tuples are very similar to lists, however, unlike lists they are *immutable* meaning they can not be changed. You would use tuples to present things that shouldn't be changed, such as days of the week, or dates on a calendar. You'll have an intuition of how to use tuples based on what you've learned about lists. We can treat them very similarly with the major distinction being that tuples are immutable. ### Constructing Tuples The construction of a tuples use () with elements separated by commas. For example: ``` # Create a tuple t = (1,2,3) # Check len just like a list len(t) # Can also mix object types t = ('one',2) # Show t # Use indexing just like we did in lists t[0] # Slicing just like a list t[-1] ``` ### Basic Tuple Methods Tuples have built-in methods, but not as many as lists do. Let's look at two of them: ``` # Use .index to enter a value and return the index t.index('one') # Use .count to count the number of times a value appears t.count('one') ``` ### Immutability It can't be stressed enough that tuples are immutable. To drive that point home: ``` t[0]= 'change' ``` Because of this immutability, tuples can't grow. Once a tuple is made we can not add to it. ``` t.append('nope') ``` ### When to use Tuples You may be wondering, "Why bother using tuples when they have fewer available methods?" To be honest, tuples are not used as often as lists in programming, but are used when immutability is necessary. If in your program you are passing around an object and need to make sure it does not get changed, then a tuple becomes your solution. It provides a convenient source of data integrity. You should now be able to create and use tuples in your programming as well as have an understanding of their immutability. ### 3.0 Now Try This Create a tuple. ``` answer1 = #INSERT CODE HERE print(type(answer1)) ``` ## Dictionaries We've been learning about *sequences* in Python but now we're going to switch gears and learn about *mappings* in Python. If you're familiar with other languages you can think of dictionaries as hash tables. So what are mappings? Mappings are a collection of objects that are stored by a *key*, unlike a sequence that stored objects by their relative position. This is an important distinction, since mappings won't retain order as is no *order* to keys.. A Python dictionary consists of a key and then an associated value. That value can be almost any Python object. ### Constructing a Dictionary Let's see how we can build dictionaries and better understand how they work. ``` # Make a dictionary with {} and : to signify a key and a value my_dict = {'key1':'value1','key2':'value2'} # Call values by their key my_dict['key2'] ``` Its important to note that dictionaries are very flexible in the data types they can hold. For example: ``` my_dict = {'key1':123,'key2':[12,23,33],'key3':['item0','item1','item2']} # Let's call items from the dictionary my_dict['key3'] # Can call an index on that value my_dict['key3'][0] # Can then even call methods on that value my_dict['key3'][0].upper() ``` We can affect the values of a key as well. For instance: ``` my_dict['key1'] # Subtract 123 from the value my_dict['key1'] = my_dict['key1'] - 123 #Check my_dict['key1'] ``` A quick note, Python has a built-in method of doing a self subtraction or addition (or multiplication or division). We could have also used += or -= for the above statement. For example: ``` # Set the object equal to itself minus 123 my_dict['key1'] -= 123 my_dict['key1'] ``` We can also create keys by assignment. For instance if we started off with an empty dictionary, we could continually add to it: ``` # Create a new dictionary d = {} # Create a new key through assignment d['animal'] = 'Dog' # Can do this with any object d['answer'] = 42 #Show d ``` ### Nesting with Dictionaries Hopefully you're starting to see how powerful Python is with its flexibility of nesting objects and calling methods on them. Let's see a dictionary nested inside a dictionary: ``` # Dictionary nested inside a dictionary nested inside a dictionary d = {'key1':{'nestkey':{'subnestkey':'value'}}} ``` Seems complicated, but let's see how we can grab that value: ``` # Keep calling the keys d['key1']['nestkey']['subnestkey'] ``` ### Dictionary Methods There are a few methods we can call on a dictionary. Let's get a quick introduction to a few of them: ``` # Create a typical dictionary d = {'key1':1,'key2':2,'key3':3} # Method to return a list of all keys d.keys() # Method to grab all values d.values() # Method to return tuples of all items (we'll learn about tuples soon) d.items() ``` ### 4.0 Now Try This Using keys and indexing, grab the 'hello' from the following dictionaries: ``` d = {'simple_key':'hello'} # Grab 'hello' answer1 = #INSERT CODE HERE print(answer1) d = {'k1':{'k2':'hello'}} # Grab 'hello' answer2 = #INSERT CODE HERE print(answer2) # Getting a little tricker d = {'k1':[{'nest_key':['this is deep',['hello']]}]} #Grab hello answer3 = #INSERT CODE HERE print(answer3) # This will be hard and annoying! d = {'k1':[1,2,{'k2':['this is tricky',{'tough':[1,2,['hello']]}]}]} # Grab hello answer4 = #INSERT CODE HERE print(answer4) ``` ## Comparison Operators As stated previously, comparison operators allow us to compare variables and output a Boolean value (True or False). These operators are the exact same as what you've seen in Math, so there's nothing new here. First we'll present a table of the comparison operators and then work through some examples: <h2> Table of Comparison Operators </h2><p> In the table below, a=9 and b=11.</p> <table class="table table-bordered"> <tr> <th style="width:10%">Operator</th><th style="width:45%">Description</th><th>Example</th> </tr> <tr> <td>==</td> <td>If the values of two operands are equal, then the condition becomes true.</td> <td> (a == b) is not true.</td> </tr> <tr> <td>!=</td> <td>If the values of two operands are not equal, then the condition becomes true.</td> <td>(a != b) is true</td> </tr> <tr> <td>&gt;</td> <td>If the value of the left operand is greater than the value of the right operand, then the condition becomes true.</td> <td> (a &gt; b) is not true.</td> </tr> <tr> <td>&lt;</td> <td>If the value of the left operand is less than the value of the right operand, then the condition becomes true.</td> <td> (a &lt; b) is true.</td> </tr> <tr> <td>&gt;=</td> <td>If the value of the left operand is greater than or equal to the value of the right operand, then the condition becomes true.</td> <td> (a &gt;= b) is not true. </td> </tr> <tr> <td>&lt;=</td> <td>If the value of the left operand is less than or equal to the value of the right operand, then the condition becomes true.</td> <td> (a &lt;= b) is true. </td> </tr> </table> Let's now work through quick examples of each of these. #### Equal ``` 4 == 4 1 == 0 ``` Note that <code>==</code> is a <em>comparison</em> operator, while <code>=</code> is an <em>assignment</em> operator. #### Not Equal ``` 4 != 5 1 != 1 ``` #### Greater Than ``` 8 > 3 1 > 9 ``` #### Less Than ``` 3 < 8 7 < 0 ``` #### Greater Than or Equal to ``` 7 >= 7 9 >= 4 ``` #### Less than or Equal to ``` 4 <= 4 1 <= 3 ``` Hopefully this was more of a review than anything new! Next, we move on to one of the most important aspects of building programs: functions and how to use them. ## Functions ### Introduction to Functions Here, we will explain what a function is in Python and how to create one. Functions will be one of our main building blocks when we construct larger and larger amounts of code to solve problems. **So what is a function?** Formally, a function is a useful device that groups together a set of statements so they can be run more than once. They can also let us specify parameters that can serve as inputs to the functions. On a more fundamental level, functions allow us to not have to repeatedly write the same code again and again. If you remember back to the lessons on strings and lists, remember that we used a function len() to get the length of a string. Since checking the length of a sequence is a common task you would want to write a function that can do this repeatedly at command. Functions will be one of most basic levels of reusing code in Python, and it will also allow us to start thinking of program design. ### def Statements Let's see how to build out a function's syntax in Python. It has the following form: ``` def name_of_function(arg1,arg2): ''' This is where the function's Document String (docstring) goes ''' # Do stuff here # Return desired result ``` We begin with <code>def</code> then a space followed by the name of the function. Try to keep names relevant, for example len() is a good name for a length() function. Also be careful with names, you wouldn't want to call a function the same name as a [built-in function in Python](https://docs.python.org/2/library/functions.html) (such as len). Next come a pair of parentheses with a number of arguments separated by a comma. These arguments are the inputs for your function. You'll be able to use these inputs in your function and reference them. After this you put a colon. Now here is the important step, you must indent to begin the code inside your function correctly. Python makes use of *whitespace* to organize code. Lots of other programing languages do not do this, so keep that in mind. Next you'll see the docstring, this is where you write a basic description of the function. Docstrings are not necessary for simple functions, but it's good practice to put them in so you or other people can easily understand the code you write. After all this you begin writing the code you wish to execute. The best way to learn functions is by going through examples. So let's try to go through examples that relate back to the various objects and data structures we learned about before. ### A simple print 'hello' function ``` def say_hello(): print('hello') ``` Call the function: ``` say_hello() ``` ### A simple greeting function Let's write a function that greets people with their name. ``` def greeting(name): print('Hello %s' %(name)) greeting('Bob') ``` ### Using return Let's see some example that use a <code>return</code> statement. <code>return</code> allows a function to *return* a result that can then be stored as a variable, or used in whatever manner a user wants. ### Example 3: Addition function ``` def add_num(num1,num2): return num1+num2 add_num(4,5) # Can also save as variable due to return result = add_num(4,5) print(result) ``` What happens if we input two strings? ``` add_num('one','two') ``` Note that because we don't declare variable types in Python, this function could be used to add numbers or sequences together! We'll later learn about adding in checks to make sure a user puts in the correct arguments into a function. Let's also start using <code>break</code>, <code>continue</code>, and <code>pass</code> statements in our code. We introduced these during the <code>while</code> lecture. Finally let's go over a full example of creating a function to check if a number is prime (a common interview exercise). We know a number is prime if that number is only evenly divisible by 1 and itself. Let's write our first version of the function to check all the numbers from 1 to N and perform modulo checks. ``` def is_prime(num): ''' Naive method of checking for primes. ''' for n in range(2,num): #'range()' is a function that returns an array based on the range you provide. Here, it is from 2 to 'num' inclusive. if num % n == 0: print(num,'is not prime') break # 'break' statements signify that we exit the loop if the above condition holds true else: # If never mod zero, then prime print(num,'is prime!') is_prime(16) is_prime(17) ``` Note how the <code>else</code> lines up under <code>for</code> and not <code>if</code>. This is because we want the <code>for</code> loop to exhaust all possibilities in the range before printing our number is prime. Also note how we break the code after the first print statement. As soon as we determine that a number is not prime we break out of the <code>for</code> loop. We can actually improve this function by only checking to the square root of the target number, and by disregarding all even numbers after checking for 2. We'll also switch to returning a boolean value to get an example of using return statements: ``` import math def is_prime2(num): ''' Better method of checking for primes. ''' if num % 2 == 0 and num > 2: return False for i in range(3, int(math.sqrt(num)) + 1, 2): if num % i == 0: return False return True is_prime2(27) ``` Why don't we have any <code>break</code> statements? It should be noted that as soon as a function *returns* something, it shuts down. A function can deliver multiple print statements, but it will only obey one <code>return</code>. ### 5.0 Now Try This Write a function that capitalizes the first and fourth letters of a name. For this, you might want to make use of a string's `.upper()` method. cap_four('macdonald') --> MacDonald Note: `'macdonald'.capitalize()` returns `'Macdonald'` ``` def cap_four(name): return new_name # Check answer1 = cap_four('macdonald') print(answer1) ``` ## Modules and Packages ### Understanding modules Modules in Python are simply Python files with the .py extension, which implement a set of functions. Modules are imported from other modules using the import command. To import a module, we use the import command. Check out the full list of built-in modules in the Python standard library here. The first time a module is loaded into a running Python script, it is initialized by executing the code in the module once. If another module in your code imports the same module again, it will not be loaded twice. If we want to import the math module, we simply import the name of the module: ``` # import the library import math # use it (ceiling rounding) math.ceil(3.2) ``` ## Why, Where, and How we use NumPy NumPy is a library for Python that allows you to create matrices and multidimensional arrays, as well as perform many sophisticated mathematical operations on them. Previously, dealing with anything more than a single-dimensional array was very difficult in base Python. Additionally, there weren't a lot of built-in functionality to perform many standard mathematical operations that data scientists typically do with data, such as transposing, dot products, cumulative sums, etc. All of this makes NumPy very useful in statistical analyses and analyzing datasets to produce insights. ### Creating Arrays NumPy allows you to work with arrays very efficiently. The array object in NumPy is called *ndarray*. This is short for 'n-dimensional array'. We can create a NumPy ndarray object by using the array() function. ``` import numpy as np arr = np.array([1,2,3,4,5,6,7,8,9,10]) print(arr) print(type(arr)) ``` ### Indexing Indexing is the same thing as accessing an element of a list or string. In this case, we will be accessing an array element. You can access an array element by referring to its **index number**. The indexes in NumPy arrays start with 0, also like in base Python. The following example shows how you can access multiple elements of an array and perform operations on them. ``` import numpy as np arr = np.array([1,2,3,4,5,6,7,8,9,10]) print(arr[4] + arr[8]) ``` ### Slicing Slicing in NumPy behaves much like in base Python, a quick recap from above: We slice using this syntax: [start:end]. We can also define the step, like this: [start:end:step]. ``` # Reverse an array through backwards/negative stepping import numpy as np arr = np.array([3,7,9,0]) print(arr[::-1]) # Slice elements from the beginning to index 8 import numpy as np arr = np.array([1, 2, 3, 4, 5, 6, 7,8,9,10]) print(arr[:8]) ``` You'll notice we only got to index 7. That's because the end is always *non-inclusive*. We slice up to but not including the end value. The start index on the other hand, **is** inclusive. ### 6.0 Now Try This: Create an array of at least size 10 and populate it with random numbers. Then, use slicing to split it into two and create two new arrays. Then, find the sum of the third digits in each array. ``` # Answer here ``` ### Data Types Just like base Python, NumPy has many data types available. They are all differentiated by a single character, and here are the most common few: * i - int / integer (whole numbers) * b - boolean (true/false) * f - float (decimal numbers) * S - string * There are many more too! ``` # Checking the data type of an array import numpy as np arr = np.array([5, 7, 3, 1]) print(arr.dtype) # How to convert between types import numpy as np arr = np.array([4.4, 24.1, 3.7]) print(arr) print(arr.dtype) # Converts decimal numbers by rounding them all down to whole numbers newarr = arr.astype('i') print(newarr) print(newarr.dtype) ``` ### 7.0 Now Try This: Modify the code below to fix the error and make the addition work: ``` import numpy as np arr = np.array([1,3,5,7],dtype='S') arr2 = np.array([2,4,6,8],dtype='i') print(arr + arr2) ``` ### Copy vs. View In NumPy, you can work with either a copy of the data or the data itself, and it's very important that you know the difference. Namely, modifying a copy of the data will not change the original dataset but modifying the view **will**. Here are some examples: ``` # A Copy import numpy as np arr = np.array([6, 2, 1, 5, 3]) x = arr.copy() arr[0] = 8 print(arr) print(x) # A View import numpy as np arr = np.array([6, 2, 1, 5, 3]) x = arr.view() arr[0] = 8 print(arr) print(x) ``` ### 8.0 Now Try This: A student wants to create a copy of an array and modify the first element. The following is the code they wrote for it: arr = np.array([1,2,3,4,5]) x = arr x[0] = 0 Is this correct? ### Shape All NumPy arrays have an attribute called *shape*. This is helpful for 2d or n-dimensional arrays, but for simple lists, it is just the number of elements that it has. ``` # Print the shape of an array import numpy as np arr = np.array([2,7,3,7]) print(arr.shape) ``` ### 9.0 Now Try This: Without using Python, what is the shape of this array? Answer in the same format as the `shape` method. arr = np.array([[0,1,2].[3,4,5]) ### Iterating Through Arrays Iterating simply means to traverse or travel through an object. In the case of arrays, we can iterate through them by using simple for loops. ``` import numpy as np arr = np.array([1, 5, 7]) for x in arr: print(x) ``` ### Joining Arrays Joining combining the elements of multiple arrays into one. The basic way to do it is like this: ``` import numpy as np arr1 = np.array([7, 1, 0]) arr2 = np.array([2, 8, 1]) arr = np.concatenate((arr1, arr2)) print(arr) ``` ### Splitting Arrays Splitting is the opposite of joining arrays. It takes one array and creates multiple from it. ``` # Split array into 4 import numpy as np arr = np.array([1, 2, 3, 4, 5, 6,7,8]) newarr = np.array_split(arr, 4) print(newarr) ``` ### Searching Arrays Searching an array to find a certain element is a very important and basic operation. We can do this using the *where()* method. ``` import numpy as np arr = np.array([1, 2, 5, 9, 5, 3, 4]) x = np.where(arr == 4) # Returns the index of the array element(s) that matches this condition print(x) # Find all the odd numbers in an array import numpy as np arr = np.array([10, 20, 30, 40, 50, 60, 70, 80,99]) x = np.where(arr%2 == 1) print(x) ``` ### Sorting Arrays Sorting an array is another very important and commonly used operation. NumPy has a function called sort() for this task. ``` import numpy as np arr = np.array([4, 1, 0, 3]) print(np.sort(arr)) # Sorting a string array alphabetically import numpy as np arr = np.array(['zephyr', 'gate', 'match']) print(np.sort(arr)) ``` ### Filtering Arrays Sometimes you would want to create a new array from an existing array where you select elements out based on a certain condition. Let's say you have an array with all integers from 1 to 10. You would like to create a new array with only the odd numbers from that list. You can do this very efficiently with **filtering**. When you filter something, you only take out what you want, and the same principle applies to objects in NumPy. NumPy uses what's called a **boolean index list** to filter. This is an array of True and False values that correspond directly to the target array and what values you would like to filter. For example, using the example above, the target array would look like this: [1,2,3,4,5,6,7,8,9,10] And if you wanted to filter out the odd values, you would use this particular boolean index list: [True,False,True,False,True,False,True,False,True,False] Applying this list onto the target array will get you what you want: [1,3,5,7,9] A working code example is shown below: ``` import numpy as np arr = np.array([51, 52, 53, 54]) x = [False, False, True, True] newarr = arr[x] print(newarr) ``` We don't need to hard-code the True and False values. Like stated previously, we can filter based on conditions. ``` arr = np.array([51, 52, 53, 54]) # Create an empty list filter_arr = [] # go through each element in arr for element in arr: # if the element is higher than 52, set the value to True, otherwise False: if element > 52: filter_arr.append(True) else: filter_arr.append(False) newarr = arr[filter_arr] print(filter_arr) print(newarr) ``` Filtering is a very common task when working with data and as such, NumPy has an even more efficient way to perform it. It is possible to create a boolean index list directly from the target array and then apply it to obtain the filtered array. See the example below: ``` import numpy as np arr = np.array([10,20,30,40,50,60,70,80,90,100]) filter = arr > 50 filter_arr = arr[filter] print(filter) print(filter_arr) ``` ### 10.0 Now Try This: Create an array with the first 10 numbers of the Fibonacci sequence. Split this array into two. On each half, search for any multiples of 4. Next, filter both arrays for multiples of 5. Finally, take the two filtered arrays, join them, and sort them. ``` # Answer here ``` ## Resources - [Python Documentation](https://docs.python.org/3/) - [Official Python Tutorial](https://docs.python.org/3/tutorial/) - [W3Schools Python Tutorial](https://www.w3schools.com/python/)
github_jupyter
Move current working directory, in case for developing the machine learning program by remote machine or it is fine not to use below single line. ``` %cd /tmp/pycharm_project_881 import numpy as np import pandas as pd def sigmoid(x): return 1/(1+np.exp(-x)) def softmax(x): x = x - x.max(axis=1, keepdims=True) return np.exp(x)/np.sum(np.exp(x),axis=1, keepdims=True) df = pd.read_csv("adult.data.txt", names=["age","workclass","fnlwgt","education","education-num","marital-status" \ ,"occupation","relationship","race","sex","capital-gain","capital-loss","hours-per-week","native-country","class"]) dx = pd.read_csv("adult.test.txt", names=["age","workclass","fnlwgt","education","education-num","marital-status" \ ,"occupation","relationship","race","sex","capital-gain","capital-loss","hours-per-week","native-country","class"]) df.head() for lf in df: if df[lf].dtype == "object": df[lf] = df[lf].astype("category").cat.codes dx[lf] = dx[lf].astype("category").cat.codes else : df[lf] = (df[lf] - df[lf].mean())/(df[lf].max() - df[lf].min()) dx[lf] = (dx[lf] - dx[lf].mean()) / (dx[lf].max() - dx[lf].min()) df.head() ``` Set initial hyperparameters.. ``` x = df.drop(columns=["class"]) y = df["class"].values x_test = dx.drop(columns=["class"]) y_test = dx["class"].values multi_y = np.zeros((y.size, y.max()+1)) multi_y[np.arange(y.size), y] = 1 multi_y_test = np.zeros((y_test.size, y_test.max()+1)) multi_y_test[np.arange(y_test.size), y_test] = 1 inputSize = len(x.columns) numberOfNodes = 150 numberOfClass = y.max() + 1 numberOfExamples = x.shape[0] w1 = np.random.random_sample(size=(inputSize, numberOfNodes)) b1 = np.random.random_sample(numberOfNodes) w2 = np.random.random_sample(size=(numberOfNodes, numberOfClass)) b2 = np.random.random_sample(numberOfClass) batchSize = 32 trainNum = 150 learningRate = 0.01 # Start Training for k in range(trainNum + 1): cost = 0 accuracy = 0 for i in range(int(numberOfExamples/batchSize)): # Forward-Propagation z = x[i * batchSize : (i+1) * batchSize] z_y = multi_y[i * batchSize : (i+1) * batchSize] layer1 = np.matmul(z, w1) + b1 sig_layer1 = sigmoid(layer1) layer2 = np.matmul(sig_layer1, w2) + b2 soft_layer2 = softmax(layer2) pred = np.argmax(soft_layer2, axis=1) # Cost Function: Cross-Entropy loss cost += -(z_y * np.log(soft_layer2 + 1e-9) + (1-z_y) * np.log(1 - soft_layer2 + 1e-9)).sum() accuracy += (pred == y[i * batchSize : (i + 1) * batchSize]).sum() # Back-Propagation dlayer2 = soft_layer2 - multi_y[i * batchSize : (i+1) * batchSize] dw2 = np.matmul(sig_layer1.T, dlayer2) / batchSize db2 = dlayer2.mean(axis=0) dsig_layer1 = (dlayer2.dot(w2.T)) dlayer1 = sigmoid(layer1) * (1 - sigmoid(layer1)) * dsig_layer1 dw1 = np.matmul(z.T, dlayer1) / batchSize db1 = dlayer1.mean(axis=0) w2 -= learningRate * dw2 w1 -= learningRate * dw1 b2 -= learningRate * db2 b1 -= learningRate * db1 if k % 10 == 0 : print("-------- # : {} ---------".format(k)) print("cost: {}".format(cost/numberOfExamples)) print("accuracy: {} %".format(accuracy/numberOfExamples * 100)) # Test the trained model test_cost = 0 test_accuracy = 0 # Forward-Propagation layer1 = np.matmul(x_test, w1) + b1 sig_layer1 = sigmoid(layer1) layer2 = np.matmul(sig_layer1, w2) + b2 soft_layer2 = softmax(layer2) pred = np.argmax(soft_layer2, axis=1) # Cost Function: Cross-Entropy loss test_cost += -(multi_y_test * np.log(soft_layer2 + 1e-9) + (1-multi_y_test) * np.log(1 - soft_layer2 + 1e-9)).sum() test_accuracy += (pred == y_test).sum() print("---- Result of applying test data to the trained model") print("cost: {}".format(test_cost/numberOfExamples)) print("accuracy: {} %".format(test_accuracy/numberOfExamples * 100)) ```
github_jupyter
## Fashion Item Recognition with CNN > Antonopoulos Ilias (p3352004) <br /> > Ndoja Silva (p3352017) <br /> > MSc Data Science AUEB ## Table of Contents - [Data Loading](#Data-Loading) - [Hyperparameter Tuning](#Hyperparameter-Tuning) - [Model Selection](#Model-Selection) - [Evaluation](#Evaluation) ``` import gc import itertools import numpy as np import keras_tuner as kt import matplotlib.pyplot as plt import tensorflow as tf from sklearn.metrics import confusion_matrix print(tf.__version__) print("Num GPUs Available: ", len(tf.config.list_physical_devices("GPU"))) ``` ### Data Loading ``` fashion_mnist = tf.keras.datasets.fashion_mnist (train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data() train_images.shape train_labels set(train_labels) test_images.shape ``` This is a dataset of 60,000 28x28 grayscale images of 10 fashion categories, along with a test set of 10,000 images. The classes are: | Label | Description | |:-----:|-------------| | 0 | T-shirt/top | | 1 | Trouser | | 2 | Pullover | | 3 | Dress | | 4 | Coat | | 5 | Sandal | | 6 | Shirt | | 7 | Sneaker | | 8 | Bag | | 9 | Ankle boot | ``` class_names = [ "T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot", ] ``` ### Hyperparameter Tuning ``` SEED = 123456 np.random.seed(SEED) tf.random.set_seed(SEED) def clean_up(model_): tf.keras.backend.clear_session() del model_ gc.collect() def cnn_model_builder(hp): """Creates a HyperModel instance (or callable that takes hyperparameters and returns a Model instance).""" model = tf.keras.Sequential( [ tf.keras.layers.Conv2D( filters=hp.Int("1st-filter", min_value=32, max_value=128, step=16), kernel_size=(3, 3), strides=(1, 1), padding="same", kernel_regularizer="l2", dilation_rate=(1, 1), activation="relu", input_shape=(28, 28, 1), name="1st-convolution", ), tf.keras.layers.MaxPool2D( pool_size=(2, 2), strides=(2, 2), padding="same", name="1st-max-pooling" ), tf.keras.layers.Dropout( rate=hp.Float("1st-dropout", min_value=0.0, max_value=0.4, step=0.1), name="1st-dropout", ), tf.keras.layers.Conv2D( filters=hp.Int("2nd-filter", min_value=32, max_value=64, step=16), kernel_size=(3, 3), strides=(1, 1), padding="same", kernel_regularizer="l2", dilation_rate=(1, 1), activation="relu", name="2nd-convolution", ), tf.keras.layers.MaxPool2D( pool_size=(2, 2), strides=(2, 2), padding="same", name="2nd-max-pooling" ), tf.keras.layers.Dropout( rate=hp.Float("2nd-dropout", min_value=0.0, max_value=0.4, step=0.1), name="2nd-dropout", ), tf.keras.layers.Flatten(name="flatten-layer"), tf.keras.layers.Dense( units=hp.Int("dense-layer-units", min_value=32, max_value=128, step=16), kernel_regularizer="l2", activation="relu", name="dense-layer", ), tf.keras.layers.BatchNormalization(), tf.keras.layers.Dense(units=10, activation="softmax", name="output-layer"), ] ) model.compile( optimizer=tf.keras.optimizers.Adam( learning_rate=hp.Choice( "learning-rate", values=[1e-3, 1e-4, 2 * 1e-4, 4 * 1e-4] ) ), loss=tf.keras.losses.SparseCategoricalCrossentropy(), metrics=["accuracy"], ) return model # BayesianOptimization tuning with Gaussian process # THERE IS A BUG HERE: https://github.com/keras-team/keras-tuner/pull/655 # tuner = kt.BayesianOptimization( # cnn_model_builder, # objective="val_accuracy", # max_trials=5, # the total number of trials (model configurations) to test at most # allow_new_entries=True, # tune_new_entries=True, # seed=SEED, # directory="hparam-tuning", # project_name="cnn", # ) # Li, Lisha, and Kevin Jamieson. # "Hyperband: A Novel Bandit-Based Approach to Hyperparameter Optimization." # Journal of Machine Learning Research 18 (2018): 1-52. # https://jmlr.org/papers/v18/16-558.html tuner = kt.Hyperband( cnn_model_builder, objective="val_accuracy", max_epochs=50, # the maximum number of epochs to train one model seed=SEED, directory="hparam-tuning", project_name="cnn", ) tuner.search_space_summary() stop_early = tf.keras.callbacks.EarlyStopping(monitor="val_loss", patience=5) tuner.search( train_images, train_labels, epochs=40, validation_split=0.2, callbacks=[stop_early] ) # get the optimal hyperparameters best_hps = tuner.get_best_hyperparameters(num_trials=1)[0] print( f""" The hyperparameter search is complete. \n Results ======= | ---- optimal number of output filters in the 1st convolution : {best_hps.get('1st-filter')} | ---- optimal first dropout rate : {best_hps.get('1st-dropout')} | ---- optimal number of output filters in the 2nd convolution : {best_hps.get('2nd-filter')} | ---- optimal second dropout rate : {best_hps.get('2nd-dropout')} | ---- optimal number of units in the densely-connected layer : {best_hps.get('dense-layer-units')} | ---- optimal learning rate for the optimizer : {best_hps.get('learning-rate')} """ ) ``` ### Model Selection ``` model = tuner.get_best_models(num_models=1)[0] model.summary() tf.keras.utils.plot_model( model, to_file="static/cnn_model.png", show_shapes=True, show_layer_names=True ) clean_up(model) # build the model with the optimal hyperparameters and train it on the data for 50 epochs model = tuner.hypermodel.build(best_hps) history = model.fit(train_images, train_labels, epochs=50, validation_split=0.2) # keep best epoch val_acc_per_epoch = history.history["val_accuracy"] best_epoch = val_acc_per_epoch.index(max(val_acc_per_epoch)) + 1 print("Best epoch: %d" % (best_epoch,)) clean_up(model) hypermodel = tuner.hypermodel.build(best_hps) # retrain the model history = hypermodel.fit( train_images, train_labels, epochs=best_epoch, validation_split=0.2 ) ``` ### Evaluation ``` eval_result = hypermodel.evaluate(test_images, test_labels, verbose=3) print("[test loss, test accuracy]:", eval_result) def plot_history(hs, epochs, metric): print() plt.style.use("dark_background") plt.rcParams["figure.figsize"] = [15, 8] plt.rcParams["font.size"] = 16 plt.clf() for label in hs: plt.plot( hs[label].history[metric], label="{0:s} train {1:s}".format(label, metric), linewidth=2, ) plt.plot( hs[label].history["val_{0:s}".format(metric)], label="{0:s} validation {1:s}".format(label, metric), linewidth=2, ) x_ticks = np.arange(0, epochs + 1, epochs / 10) x_ticks[0] += 1 plt.xticks(x_ticks) plt.ylim((0, 1)) plt.xlabel("Epochs") plt.ylabel("Loss" if metric == "loss" else "Accuracy") plt.legend() plt.show() print("Train Loss : {0:.5f}".format(history.history["loss"][-1])) print("Validation Loss : {0:.5f}".format(history.history["val_loss"][-1])) print("Test Loss : {0:.5f}".format(eval_result[0])) print("-------------------") print("Train Accuracy : {0:.5f}".format(history.history["accuracy"][-1])) print("Validation Accuracy : {0:.5f}".format(history.history["val_accuracy"][-1])) print("Test Accuracy : {0:.5f}".format(eval_result[1])) # Plot train and validation error per epoch. plot_history(hs={"CNN": history}, epochs=best_epoch, metric="loss") plot_history(hs={"CNN": history}, epochs=best_epoch, metric="accuracy") def plot_confusion_matrix( cm, classes, normalize=False, title="Confusion matrix", cmap=plt.cm.PuBuGn ): plt.style.use("default") plt.rcParams["figure.figsize"] = [11, 9] plt.imshow(cm, interpolation="nearest", cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=90) plt.yticks(tick_marks, classes) if normalize: cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis] thresh = cm.max() / 2.0 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text( j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black", ) plt.tight_layout() plt.ylabel("True label") plt.xlabel("Predicted label") # Predict the values from the validation dataset Y_pred = hypermodel.predict(test_images) # Convert predictions classes to one hot vectors Y_pred_classes = np.argmax(Y_pred, axis=1) # compute the confusion matrix confusion_mtx = confusion_matrix(test_labels, Y_pred_classes) # plot the confusion matrix plot_confusion_matrix( confusion_mtx, classes=class_names, ) incorrect = [] for i in range(len(test_labels)): if not Y_pred_classes[i] == test_labels[i]: incorrect.append(i) if len(incorrect) == 4: break fig, ax = plt.subplots(2, 2, figsize=(12, 6)) fig.set_size_inches(10, 10) ax[0, 0].imshow(test_images[incorrect[0]].reshape(28, 28), cmap="gray") ax[0, 0].set_title( "Predicted Label : " + class_names[Y_pred_classes[incorrect[0]]] + "\n" + "Actual Label : " + class_names[test_labels[incorrect[0]]] ) ax[0, 1].imshow(test_images[incorrect[1]].reshape(28, 28), cmap="gray") ax[0, 1].set_title( "Predicted Label : " + class_names[Y_pred_classes[incorrect[1]]] + "\n" + "Actual Label : " + class_names[test_labels[incorrect[1]]] ) ax[1, 0].imshow(test_images[incorrect[2]].reshape(28, 28), cmap="gray") ax[1, 0].set_title( "Predicted Label : " + class_names[Y_pred_classes[incorrect[2]]] + "\n" + "Actual Label : " + class_names[test_labels[incorrect[2]]] ) ax[1, 1].imshow(test_images[incorrect[3]].reshape(28, 28), cmap="gray") ax[1, 1].set_title( "Predicted Label : " + class_names[Y_pred_classes[incorrect[3]]] + "\n" + "Actual Label : " + class_names[test_labels[incorrect[3]]] ) ```
github_jupyter
``` %matplotlib inline # Packages import os, glob, scipy, sys import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # Project directory base_dir = os.path.realpath('..') print(base_dir) # Project-specific functions funDir = os.path.join(base_dir,'Code/Functions') print(funDir) sys.path.append(funDir) import choiceModels, costFunctions, penalizedModelFit, simulateModel # General-use python functions dbPath = '/'.join(base_dir.split('/')[0:4]) sys.path.append('%s/Python'%dbPath) import FigureTools ``` ## Choose set #### Select subs who are constant in their study 1 cluster ``` model = 'MP_ppSOE' study = 1 clusters_4 = pd.read_csv(os.path.join(base_dir,'Data/Study1/ComputationalModel', 'ParamsClusters_study-1_baseMult-4_model-MP_ppSOE_precision-100.csv'),index_col=0)[ ['sub','ClustName']] clusters_6 = pd.read_csv(os.path.join(base_dir,'Data/Study1/ComputationalModel', 'ParamsClusters_study-1_baseMult-6_model-MP_ppSOE_precision-100.csv'),index_col=0)[ ['sub','ClustName']] exclude = np.array(pd.read_csv(os.path.join(base_dir,'Data/Study1/HMTG/exclude.csv'),index_col=None,header=None).T)[0] clusters = clusters_4.merge(clusters_6,on='sub') clusters = clusters.loc[~clusters['sub'].isin(exclude)] clusters.columns = ['sub','x4','x6'] clusters['stable'] = 1*(clusters['x4']==clusters['x6']) clusters.head() clusters = clusters[['sub','x4','stable']] clusters.columns = ['sub','cluster','stable'] clusters_study2 = pd.read_csv(os.path.join(base_dir,'Data/Study2/ComputationalModel', 'ParamsClusters_study-2_model-MP_ppSOE_precision-100.csv'),index_col=0)[ ['sub','ClustName']] exclude = np.array(pd.read_csv(os.path.join(base_dir,'Data/Study2/HMTG/exclude.csv'),index_col=0,header=0).T)[0] clusters_study2 = clusters_study2.loc[~clusters_study2['sub'].isin(exclude)] clusters_study2.columns = ['sub','cluster'] clusters_study2['stable'] = 1 clusters = clusters.append(clusters_study2) clusters.head() print(clusters.query('sub < 150')['stable'].sum()) print(clusters.query('sub > 150')['stable'].sum()) print(clusters['stable'].sum()) ``` #### Load self-reported strategy ``` strat_1 = pd.read_csv(os.path.join(base_dir, 'Data/Study%i/SelfReportStrategy/parsed.csv'%1),index_col=0) strat_1['sub'] = strat_1['record']-110000 strat_1.replace(to_replace=np.nan,value=0,inplace=True) strat_1.head() strat_2 = pd.read_csv(os.path.join(base_dir, 'Data/Study%i/SelfReportStrategy/parsed.csv'%2),index_col=0) strat_2.head() strat_2.replace(to_replace=np.nan,value=0,inplace=True) strat_2_map = pd.read_csv(os.path.join(base_dir,'Data/Study2/SubCastorMap.csv'),index_col=None,header=None) strat_2_map.columns = ['sub','record'] strat_2['record'] = strat_2['record'].astype(int) strat_2 = strat_2.merge(strat_2_map,on='record') strat_2.head() strat_both = strat_1.append(strat_2) strat_both = strat_both[['sub','GR','IA','GA','Altruism','AdvantEquity','DoubleInv','MoralOpport','Reciprocity','Return10','ReturnInv','RiskAssess','SplitEndow']] strat_both.replace(to_replace=np.nan,value=0,inplace=True) strat_both.head() ### Merge with clustering and additional measures strat_use = strat_both.merge(clusters,on='sub') strat_use = strat_use.loc[(strat_use['stable']==1)] strat_use.head() print (strat_use.shape) ``` ## Plot ``` strategyList = ['GR','IA','GA','Altruism','AdvantEquity','DoubleInv','MoralOpport', 'Reciprocity','Return10','ReturnInv','RiskAssess','SplitEndow'] allStrategies_melted = strat_use.melt(id_vars=['sub','cluster'],value_vars=strategyList, var_name='Strategy',value_name='Weight') allStrategies_melted.head() FigureTools.mydesign(context='poster') sns.set_palette('tab10',len(strategyList)) strategyListOrder = [list(strategyList).index(list(strat_use.iloc[:,1:-2].mean().sort_values( ascending=False).index)[i]) for i in range(len(strategyList))] strategyListOrdered = [strategyList[i] for i in strategyListOrder] fig,ax = plt.subplots(1,1,figsize=[16,5]) sns.barplot(data=allStrategies_melted,x='Strategy',y='Weight',ax=ax, errwidth = 1, capsize = 0.1,errcolor='k',alpha=.9, hue='cluster',hue_order=['GR','GA','IA','MO'], order = strategyListOrdered, ) strategyListOrdered_renamed = list(['50-50','Keep','Expectation'])+strategyListOrdered[3:] plt.xticks(range(len(strategyList)),strategyListOrdered_renamed,rotation=45); for i,strat in enumerate(strategyListOrdered): allImp = allStrategies_melted.loc[(allStrategies_melted['Strategy']==strat),'Weight'] stats = scipy.stats.f_oneway( allStrategies_melted.loc[(allStrategies_melted['Strategy']==strat) & (allStrategies_melted['cluster']=='GR'),'Weight'], allStrategies_melted.loc[(allStrategies_melted['Strategy']==strat) & (allStrategies_melted['cluster']=='GA'),'Weight'], allStrategies_melted.loc[(allStrategies_melted['Strategy']==strat) & (allStrategies_melted['cluster']=='IA'),'Weight'], allStrategies_melted.loc[(allStrategies_melted['Strategy']==strat) & (allStrategies_melted['cluster']=='MO'),'Weight']) if stats[1] < 0.05: FigureTools.add_sig_markers(ax,relationships=[[i-.2,i+.2,stats[1]]],linewidth=0,ystart=70) print ('%s: F = %.2f, p = %.4f'%(strat,stats[0],stats[1])) plt.xlabel('Self-reported strategy') plt.ylabel('Importance (%)') plt.legend(title='Model-derived strategy') groups = ['GR','GA','IA','MO'] pairs = [[0,1],[0,2],[0,3],[1,2],[2,3],[1,3]] for strat in ['IA','GA','GR']: print (strat) stratResults = pd.DataFrame(columns=['group1','group2','t','df','p']) for pair in pairs: group1 = groups[pair[0]] group2 = groups[pair[1]] samp1 = allStrategies_melted.loc[(allStrategies_melted['Strategy']==strat) & (allStrategies_melted['cluster']==group1),'Weight'] samp2 = allStrategies_melted.loc[(allStrategies_melted['Strategy']==strat) & (allStrategies_melted['cluster']==group2),'Weight'] df = len(samp1) + len(samp2) -1 stats = scipy.stats.ttest_ind(samp1,samp2) # print '%s vs %s: t(%i) = %.2f, p = %.4f, p-corr = %.4f'%( # group1,group2,df,stats[0],stats[1],stats[1]*len(pairs)) stratResults = stratResults.append(pd.DataFrame([[group1,group2,df,stats[0],stats[1]]], columns=stratResults.columns)) stratResults = stratResults.sort_values(by='p',ascending=False) stratResults['p_holm'] = np.multiply(np.array(stratResults['p']),np.arange(1,7)) print (stratResults) savedat = allStrategies_melted.loc[allStrategies_melted['Strategy'].isin(['IA','GA','GR','Altruism'])].reset_index(drop=True) savedat.to_csv(base_dir+'/Data/Pooled/SelfReportStrategies/SelfReportStrategies2.csv') ``` ## Plot by group in 3-strat space ``` stratsInclude = ['GR', 'IA', 'GA'] dat = allStrategies_melted.loc[allStrategies_melted['Strategy'].isin(stratsInclude)] dat.head() sns.barplot(data=dat,x='cluster',y='Weight', errwidth = 1, capsize = 0.1,errcolor='k',alpha=.9, hue='Strategy',hue_order=stratsInclude, order = ['GR','GA','IA','MO'], ) plt.legend(loc=[1.1,.5]) # plt.legend(['Keep','50-50','Expectation','Altruism']) dat_piv = dat.pivot_table(index=['sub','cluster'],columns='Strategy',values='Weight').reset_index() dat_piv.head() sns.lmplot(data=dat_piv,x='GA',y='IA',hue='cluster',fit_reg=False) FigureTools.mydesign() sns.set_context('talk') colors = sns.color_palette('tab10',4) markers = ['o','*','s','d'] sizes = [70,170,60,80] clusters = ['GR','GA','IA','MO'] fig,ax = plt.subplots(1,3,figsize=[12,4]) axisContents = [['IA','GA'],['GA','GR'],['GR','IA']] faceWhiteFactor = 3 faceColors = colors for i in range(faceWhiteFactor): faceColors = np.add(faceColors,np.tile([1,1,1],[4,1])) faceColors = faceColors/(faceWhiteFactor+1) stratTranslate = dict(zip(['IA','GA','GR'],['50-50','Expectation','Keep'])) for i in range(3): points = [] axCur = ax[i] for clustInd,clust in enumerate(clusters): print (clust) x_point = dat_piv.loc[dat_piv['cluster']==clust,axisContents[i][0]].mean() y_point = dat_piv.loc[dat_piv['cluster']==clust,axisContents[i][1]].mean() handle = axCur.scatter(x_point,y_point, alpha=1,zorder=10, linewidth=2, edgecolor=colors[clustInd], c=[faceColors[clustInd]], s=sizes[clustInd], marker=markers[clustInd]) points.append(handle) x_sterr = scipy.stats.sem(dat_piv.loc[dat_piv['cluster']==clust,axisContents[i][0]]) y_sterr = scipy.stats.sem(dat_piv.loc[dat_piv['cluster']==clust,axisContents[i][1]]) x_range = [x_point - x_sterr, x_point + x_sterr] y_range = [y_point - y_sterr, y_point + y_sterr] axCur.plot(x_range,[y_point,y_point],c=colors[clustInd],linewidth=2,zorder=1)#,alpha=.5) axCur.plot([x_point,x_point],y_range,c=colors[clustInd],linewidth=2,zorder=1)#,alpha=.5) axCur.set(xlabel = 'Percentage %s'%stratTranslate[axisContents[i][0]], ylabel = 'Percentage %s'%stratTranslate[axisContents[i][1]]) ax[2].legend(points,clusters)#,loc=[1.1,.5]) for i in range(3): ax[i].set(xlim = [0,85], ylim = [0,85], aspect=1) plt.tight_layout() plt.suptitle('Relative importance of main 3 motives',y=1.05) plt.show() # FigureTools.mysavefig(fig,'Motives') ``` ##### Set up 3d plot ``` %matplotlib inline from mpl_toolkits.mplot3d import Axes3D FigureTools.mydesign() sns.set_style('darkgrid', {"axes.facecolor": "1"}) sns.set_context('paper') colors = sns.color_palette('tab10',4) markers = ['o','*','s','d'] sizes = [70,170,60,80] clusters = ['GR','GA','IA','MO'] faceWhiteFactor = 3 faceColors = colors for i in range(faceWhiteFactor): faceColors = np.add(faceColors,np.tile([1,1,1],[4,1])) faceColors = faceColors/(faceWhiteFactor+1) stratTranslate = dict(zip(['IA','GA','GR'],['50-50','Expectation','Keep'])) fig = plt.figure(figsize = [11,8]) ax = fig.add_subplot(111, projection='3d') sns.set_context('talk') points = [] for clustInd,clust in enumerate(clusters): dat = dat_piv.query('cluster == @clust') means = dat[['IA','GA','GR']].mean().values sterrs = scipy.stats.sem(dat[['IA','GA','GR']]) handle = ax.scatter(*means, linewidth=1, edgecolor=colors[clustInd], c=[faceColors[clustInd]], s=sizes[clustInd]/2, marker=markers[clustInd]) points.append(handle) ax.plot([0,means[0]],[means[1],means[1]],[means[2],means[2]],':',color=colors[clustInd]) ax.plot([means[0],means[0]],[0,means[1]],[means[2],means[2]],':',color=colors[clustInd]) ax.plot([means[0],means[0]],[means[1],means[1]],[0,means[2]],':',color=colors[clustInd]) ax.plot([means[0] - sterrs[0],means[0] + sterrs[0]], [means[1],means[1]], [means[2],means[2]], c=colors[clustInd],linewidth=2,zorder=1) ax.plot([means[0],means[0]], [means[1] - sterrs[1],means[1] + sterrs[1]], [means[2],means[2]], c=colors[clustInd],linewidth=2,zorder=1) ax.plot([means[0],means[0]], [means[1],means[1]], [means[2] - sterrs[2],means[2] + sterrs[2]], c=colors[clustInd],linewidth=2,zorder=1) ax.set(xlabel = '%% %s'%stratTranslate['IA'], ylabel = '%% %s'%stratTranslate['GA'], zlabel = '%% %s'%stratTranslate['GR']) ax.legend(points,clusters, title = 'Participant\ngroup', loc = [1.1,.5], frameon=False) ax.set(xlim = [0,85], ylim = [0,50], zlim = [0,85]) plt.title('Self-reported importance of motives',y=1.05) plt.tight_layout() ax.view_init(elev=35,azim=-15) # Or azim -110 plt.savefig(base_dir + '/Results/Figure6.pdf',bbox_inches='tight') ```
github_jupyter
``` import sys sys.path.append('../src') from mcmc_norm_learning.algorithm_1_v4 import to_tuple from mcmc_norm_learning.rules_4 import get_log_prob from pickle_wrapper import unpickle import pandas as pd import yaml import tqdm from numpy import log with open("../params_nc.yaml", 'r') as fd: params = yaml.safe_load(fd) num_obs=params["num_observations"] true_norm=params['true_norm']['exp'] num_obs base_path="../data_nc/exp_nc3/" exp_paths=!ls $base_path def get_num_viols(nc_obs): n_viols=0 for obs in nc_obs: for action_pairs in zip(obs, obs[1:]): if action_pairs[0] in [(('pickup', 8), ('putdown', 8, '1')),(('pickup', 40), ('putdown', 40, '1'))]: if action_pairs[1][1][2] =='1': #not in obl zone n_viols+=1 elif action_pairs[1][1][2] =='3': if action_pairs[1][1][1] not in [35,13]: #permission not applicable n_viols+=1 return (n_viols) z1=pd.DataFrame() for exp_path in exp_paths: temp=pd.DataFrame() #Add params obs_path=base_path+exp_path+"/obs.pickle" obs = unpickle(obs_path) temp["w_nc"] = [float(exp_path.split("w_nc=")[1].split(",")[0])] trial=1 if "trial" not in exp_path else exp_path.split(",trial=")[-1] temp["trial"]=[int(trial)] #Add violations n_viols=get_num_viols(obs) temp["violation_rate"]=[n_viols/num_obs] #Add lik,post prior_true=!grep "For True Norm" {base_path+exp_path+"/run.log"} lik_true=!grep "lik_no_norm" {base_path+exp_path+"/run.log"} post_true=float(prior_true[0].split("log_prior=")[-1]) + float(lik_true[0].split("lik_true_norm=")[1]) temp["true_norm_posterior"]=[post_true] #Add if True Norm found in some chain if_true_norm=!grep "True norm in some chain(s)" {base_path+exp_path+"/chain_info.txt"} temp["if_true_norm_found"]= ["False" not in if_true_norm[0]] #Rank of True Norm if found as per posterior rank_df=pd.read_csv(base_path+exp_path+"/ranked_posteriors.csv",index_col=False) rank_true=rank_df.loc[rank_df.expression==str(to_tuple(true_norm))][["post_rank","log_posterior"]].values rank=rank_true[0][0] if rank_true.shape[0]==1 else None temp["true_norm_rank_wrt_posterior"]= [rank] #max posterior found in chains rank_1=rank_df.loc[rank_df.post_rank==1] temp["max_posterior_in_chain"]= [rank_1.log_posterior.values[0]] temp["norm_wi_max_post"]= [rank_1.expression.values[0]] #chain summary chain_details = pd.read_csv(f"{base_path+exp_path}/chain_posteriors_nc.csv") n_chains1=chain_details.loc[chain_details.expression==str(true_norm)].chain_number.nunique() temp["#chains_wi_true_norm"]= [n_chains1] chain_max_min=chain_details.groupby(["chain_number"])[["log_posterior"]].agg(['min', 'max', 'mean', 'std']) n_chains2=(chain_max_min["log_posterior","max"]>post_true).sum() temp["#chains_wi_post_gt_true_norm"]= [n_chains2] #Posterior Estimation n=params["n"] top_norms=chain_details.loc[chain_details.chain_pos>2*n\ ].groupby(["expression"]).agg({"log_posterior":["mean","count"]}) top_norms["chain_rank"]=top_norms[[('log_posterior', 'count')]].rank(method='dense',ascending=False) top_norms.sort_values(by=["chain_rank"],inplace=True) rank_true_wi_freq=top_norms.iloc[top_norms.index==str(true_norm)]["chain_rank"].values rank_true_wi_freq = float(rank_true_wi_freq[0]) if rank_true_wi_freq.size>0 else None temp["#rank_true_wi_freq"]= [rank_true_wi_freq] post_norm_top=top_norms.loc[top_norms.chain_rank==1]["log_posterior","mean"].values post_norm_top = post_norm_top[0] if post_norm_top.size>0 else None temp["posterior_norm_top"]= [post_norm_top] #Num equivalent norms in posterior log_lik=float(lik_true[0].split("lik_true_norm=")[1]) top_norms["log_prior"]=top_norms.index.to_series().apply(lambda x: get_log_prob("NORMS",eval(x)))[0] top_norms["log_lik"]=top_norms[('log_posterior', 'mean')]-top_norms["log_prior"] mask_equiv=abs((top_norms["log_lik"]-log_lik)/log_lik)<=0.0005 n_equiv=mask_equiv.sum() temp["total_equiv_norms_in_top_norms"]= [n_equiv] n_equiv_20=mask_equiv[:20].sum() temp["total_equiv_norms_in_top_20_norms"]= [n_equiv_20] best_equiv_norm_rank=top_norms.loc[mask_equiv]["chain_rank"].min() temp["best_equiv_norm_rank"]= [best_equiv_norm_rank] best_equiv_norm=eval(top_norms.loc[mask_equiv].index[0]) if n_equiv>0 else None temp["best_equiv_norm"]= [best_equiv_norm] z1=z1.append(temp) z1.columns z1["if_equiv_norm_found"]=z1["total_equiv_norms_in_top_norms"]>0 z1["if_true_or_equiv_norm_found"]=z1["if_equiv_norm_found"] | z1["if_true_norm_found"] z1["true_post/max_post"]=z1["true_norm_posterior"]/z1["max_posterior_in_chain"] z1["%chains_wi_true_norm"]=z1["#chains_wi_true_norm"]/10 z1["%chains_wi_post_gt_true_norm"]=z1["#chains_wi_post_gt_true_norm"]/10 z1["expected_violation_rate"]=z1["w_nc"]*108/243 z1["chk"]=z1["violation_rate"]/z1["expected_violation_rate"] ``` ### Summary ``` print ("%trials where true norms found: {:.2%}".format(z1["if_true_norm_found"].mean())) print ("%trials where equiv norms found: {:.2%}".format(z1["if_equiv_norm_found"].mean())) print ("%trials where true/equiv norms found: {:.2%}".format(z1["if_true_or_equiv_norm_found"].mean())) ``` ### Where are neither True nor equivalent Norms found ? ``` z1.groupby(["chk"]).agg({"if_true_or_equiv_norm_found":"mean","trial":"count"}) import matplotlib.pyplot as plt z1.plot(x="chk",y=["max_posterior_in_chain","true_norm_posterior",\ "#rank_true_wi_freq","best_equiv_norm_rank"],subplots=True,\ marker="o",kind = 'line',ls="none",figsize = (10,10)) #z1.plot(x="chk",y="true_norm_posterior",kind="scatter") 108/243*0.3 z1.groupby(["w_nc","violation_rate"]).agg({"trial":"count","if_true_or_equiv_norm_found":"mean"}) z1.groupby(["w_nc"]).agg({"trial":"count","if_true_norm_found":[("mean")],"if_equiv_norm_found":"mean",\ "if_true_or_equiv_norm_found":"mean","true_norm_posterior":"mean",\ "true_post/max_post":"mean","%chains_wi_true_norm":"mean"}) z1.dtypes z1.groupby(["w_nc"]).mean() true_norm z1.loc[~(z1.if_true_norm_found)][["w_nc","best_equiv_norm_rank","best_equiv_norm"]].values ```
github_jupyter
# Data Management with OpenACC This version of the lab is intended for C/C++ programmers. The Fortran version of this lab is available [here](../../Fortran/jupyter_notebook/openacc_fortran_lab2.ipynb). You will receive a warning five minutes before the lab instance shuts down. Remember to save your work! If you are about to run out of time, please see the [Post-Lab](#Post-Lab-Summary) section for saving this lab to view offline later. --- Let's execute the cell below to display information about the GPUs running on the server. To do this, execute the cell block below by giving it focus (clicking on it with your mouse), and hitting Ctrl-Enter, or pressing the play button in the toolbar above. If all goes well, you should see some output returned below the grey cell. ``` !pgaccelinfo ``` --- ## Introduction Our goal for this lab is to use the OpenACC Data Directives to properly manage our data. <img src="images/development_cycle.png" alt="OpenACC development cycle" width="50%"> This is the OpenACC 3-Step development cycle. **Analyze** your code, and predict where potential parallelism can be uncovered. Use profiler to help understand what is happening in the code, and where parallelism may exist. **Parallelize** your code, starting with the most time consuming parts. Focus on maintaining correct results from your program. **Optimize** your code, focusing on maximizing performance. Performance may not increase all-at-once during early parallelization. We are currently tackling the **parallelize** and **optimize** steps by adding the *data clauses* necessary to parallelize the code without CUDA Managed Memory and then *structured data directives* to optimize the data movement of our code. --- ## Run the Code (With Managed Memory) In the [previous lab](openacc_c_lab1.ipynb), we added OpenACC loop directives and relied on a feature called CUDA Managed Memory to deal with the separate CPU & GPU memories for us. Just adding OpenACC to our two loop nests we achieved a considerable performance boost. However, managed memory is not compatible with all GPUs or all compilers and it sometimes performs worse than programmer-defined memory management. Let's start with our solution from the previous lab and use this as our performance baseline. Note the runtime from the follow cell. ``` !cd ../source_code/lab2 && make clean && make laplace_managed && ./laplace_managed ``` ### Optional: Analyze the Code If you would like a refresher on the code files that we are working on, you may view both of them using the two links below by openning the downloaded file. [jacobi.c](../source_code/lab2/jacobi.c) [laplace2d.c](../source_code/lab2/laplace2d.c) ## Building Without Managed Memory For this exercise we ultimately don't want to use CUDA Managed Memory, so let's remove the managed option from our compiler options. Try building and running the code now. What happens? ``` !cd ../source_code/lab2 && make clean && make laplace_no_managed && ./laplace ``` Uh-oh, this time our code failed to build. Let's take a look at the compiler output to understand why: ``` jacobi.c: laplace2d.c: PGC-S-0155-Compiler failed to translate accelerator region (see -Minfo messages): Could not find allocated-variable index for symbol (laplace2d.c: 47) calcNext: 47, Accelerator kernel generated Generating Tesla code 48, #pragma acc loop gang /* blockIdx.x */ 50, #pragma acc loop vector(128) /* threadIdx.x */ 54, Generating implicit reduction(max:error) 48, Accelerator restriction: size of the GPU copy of Anew,A is unknown 50, Loop is parallelizable PGC-F-0704-Compilation aborted due to previous errors. (laplace2d.c) PGC/x86-64 Linux 18.7-0: compilation aborted ``` This error message is not very intuitive, so let me explain it to you.: * `PGC-S-0155-Compiler failed to translate accelerator region (see -Minfo messages): Could not find allocated-variable index for symbol (laplace2d.c: 47)` - The compiler doesn't like something about a variable from line 47 of our code. * `48, Accelerator restriction: size of the GPU copy of Anew,A is unknown` - I don't see any further information about line 47, but at line 48 the compiler is struggling to understand the size and shape of the arrays Anew and A. It turns out, this is our problem. So, what these cryptic compiler errors are telling us is that the compiler needs to create copies of A and Anew on the GPU in order to run our code there, but it doesn't know how big they are, so it's giving up. We'll need to give the compiler more information about these arrays before it can move forward, so let's find out how to do that. ## OpenACC Data Clauses Data clauses allow the programmer to specify data transfers between the host and device (or in our case, the CPU and the GPU). Because they are clauses, they can be added to other directives, such as the `parallel loop` directive that we used in the previous lab. Let's look at an example where we do not use a data clause. ```cpp int *A = (int*) malloc(N * sizeof(int)); #pragma acc parallel loop for( int i = 0; i < N; i++ ) { A[i] = 0; } ``` We have allocated an array `A` outside of our parallel region. This means that `A` is allocated in the CPU memory. However, we access `A` inside of our loop, and that loop is contained within a *parallel region*. Within that parallel region, `A[i]` is attempting to access a memory location within the GPU memory. We didn't explicitly allocate `A` on the GPU, so one of two things will happen. 1. The compiler will understand what we are trying to do, and automatically copy `A` from the CPU to the GPU. 2. The program will check for an array `A` in GPU memory, it won't find it, and it will throw an error. Instead of hoping that we have a compiler that can figure this out, we could instead use a *data clause*. ```cpp int *A = (int*) malloc(N * sizeof(int)); #pragma acc parallel loop copy(A[0:N]) for( int i = 0; i < N; i++ ) { A[i] = 0; } ``` We will learn the `copy` data clause first, because it is the easiest to use. With the inclusion of the `copy` data clause, our program will now copy the content of `A` from the CPU memory, into GPU memory. Then, during the execution of the loop, it will properly access `A` from the GPU memory. After the parallel region is finished, our program will copy `A` from the GPU memory back to the CPU memory. Let's look at one more direct example. ```cpp int *A = (int*) malloc(N * sizeof(int)); for( int i = 0; i < N; i++ ) { A[i] = 0; } #pragma acc parallel loop copy(A[0:N]) for( int i = 0; i < N; i++ ) { A[i] = 1; } ``` Now we have two loops; the first loop will execute on the CPU (since it does not have an OpenACC parallel directive), and the second loop will execute on the GPU. Array `A` will be allocated on the CPU, and then the first loop will execute. This loop will set the contents of `A` to be all 0. Then the second loop is encountered; the program will copy the array `A` (which is full of 0's) into GPU memory. Then, we will execute the second loop on the GPU. This will edit the GPU's copy of `A` to be full of 1's. At this point, we have two separate copies of `A`. The CPU copy is full of 0's, and the GPU copy is full of 1's. Now, after the parallel region finishes, the program will copy `A` back from the GPU to the CPU. After this copy, both the CPU and the GPU will contain a copy of `A` that contains all 1's. The GPU copy of `A` will then be deallocated. This image offers another step-by-step example of using the copy clause. ![copy_step_by_step](images/copy_step_by_step.png) We are also able to copy multiple arrays at once by using the following syntax. ```cpp #pragma acc parallel loop copy(A[0:N], B[0:N]) for( int i = 0; i < N; i++ ) { A[i] = B[i]; } ``` Of course, we might not want to copy our data both to and from the GPU memory. Maybe we only need the array's values as inputs to the GPU region, or maybe it's only the final results we care about, or perhaps the array is only used temporarily on the GPU and we don't want to copy it either directive. The following OpenACC data clauses provide a bit more control than just the `copy` clause. * `copyin` - Create space for the array and copy the input values of the array to the device. At the end of the region, the array is deleted without copying anything back to the host. * `copyout` - Create space for the array on the device, but don't initialize it to anything. At the end of the region, copy the results back and then delete the device array. * `create` - Create space of the array on the device, but do not copy anything to the device at the beginning of the region, nor back to the host at the end. The array will be deleted from the device at the end of the region. * `present` - Don't do anything with these variables. I've put them on the device somewhere else, so just assume they're available. You may also use them to operate on multiple arrays at once, by including those arrays as a comma separated list. ```cpp #pragma acc parallel loop copy( A[0:N], B[0:M], C[0:Q] ) ``` You may also use more than one data clause at a time. ```cpp #pragma acc parallel loop create( A[0:N] ) copyin( B[0:M] ) copyout( C[0:Q] ) ``` ### Array Shaping The shape of the array specifies how much data needs to be transferred. Let's look at an example: ```cpp #pragma acc parallel loop copy(A[0:N]) for( int i = 0; i < N; i++ ) { A[i] = 0; } ``` Focusing specifically on the `copy(A[0:N])`, the shape of the array is defined within the brackets. The syntax for array shape is `[starting_index:size]`. This means that (in the code example) we are copying data from array `A`, starting at index 0 (the start of the array), and copying N elements (which is most likely the length of the entire array). We are also able to only copy a portion of the array: ```cpp #pragma acc parallel loop copy(A[1:N-2]) ``` This would copy all of the elements of `A` except for the first, and last element. Lastly, if you do not specify a starting index, 0 is assumed. This means that ```cpp #pragma acc parallel loop copy(A[0:N]) ``` is equivalent to ```cpp #pragma acc parallel loop copy(A[:N]) ``` ## Making the Sample Code Work without Managed Memory In order to build our example code without CUDA managed memory we need to give the compiler more information about the arrays. How do our two loop nests use the arrays `A` and `Anew`? The `calcNext` function take `A` as input and generates `Anew` as output, but also needs Anew copied in because we need to maintain that *hot* boundary at the top. So you will want to add a `copyin` clause for `A` and a `copy` clause for `Anew` on your region. The `swap` function takes `Anew` as input and `A` as output, so it needs the exact opposite data clauses. It's also necessary to tell the compiler the size of the two arrays by using array shaping. Our arrays are `m` times `n` in size, so we'll tell the compiler their shape starts at `0` and has `n*m` elements, using the syntax above. Go ahead and add data clauses to the two `parallel loop` directives in `laplace2d.c`. From the top menu, click on *File*, and *Open* `laplace2d.c` from the current directory at `C/source_code/lab2` directory. Remember to **SAVE** your code after changes, before running below cells. Then try to build again. ``` !cd ../source_code/lab2 && make clean && make laplace_no_managed && ./laplace ``` Well, the good news is that it should have built correctly and run. If it didn't, check your data clauses carefully. The bad news is that now it runs a whole lot slower than it did before. Let's try to figure out why. The PGI compiler provides your executable with built-in timers, so let's start by enabling them and seeing what it shows. You can enable these timers by setting the environment variable `PGI_ACC_TIME=1`. Run the cell below to get the program output with the built-in profiler enabled. **Note:** Profiling will not be covered in this lab. Please have a look at the supplementary [slides](https://drive.google.com/file/d/1Asxh0bpntlmYxAPjBxOSThFIz7Ssd48b/view?usp=sharing). ``` !cd ../source_code/lab2 && make clean && make laplace_no_managed && PGI_ACC_TIME=1 ./laplace ``` Your output should look something like what you see below. ``` total: 189.014216 s Accelerator Kernel Timing data /labs/lab2/English/C/laplace2d.c calcNext NVIDIA devicenum=0 time(us): 53,290,779 47: compute region reached 1000 times 47: kernel launched 1000 times grid: [4094] block: [128] device time(us): total=2,427,090 max=2,447 min=2,406 avg=2,427 elapsed time(us): total=2,486,633 max=2,516 min=2,464 avg=2,486 47: reduction kernel launched 1000 times grid: [1] block: [256] device time(us): total=19,025 max=20 min=19 avg=19 elapsed time(us): total=48,308 max=65 min=44 avg=48 47: data region reached 4000 times 47: data copyin transfers: 17000 device time(us): total=33,878,622 max=2,146 min=6 avg=1,992 57: data copyout transfers: 10000 device time(us): total=16,966,042 max=2,137 min=9 avg=1,696 /labs/lab2/English/C/laplace2d.c swap NVIDIA devicenum=0 time(us): 36,214,666 62: compute region reached 1000 times 62: kernel launched 1000 times grid: [4094] block: [128] device time(us): total=2,316,826 max=2,331 min=2,305 avg=2,316 elapsed time(us): total=2,378,419 max=2,426 min=2,366 avg=2,378 62: data region reached 2000 times 62: data copyin transfers: 8000 device time(us): total=16,940,591 max=2,352 min=2,114 avg=2,117 70: data copyout transfers: 9000 device time(us): total=16,957,249 max=2,133 min=13 avg=1,884 ``` The total runtime was roughly 190 with the profiler turned on, but only about 130 seconds without. We can see that `calcNext` required roughly 53 seconds to run by looking at the `time(us)` line under the `calcNext` line. We can also look at the `data region` section and determine that 34 seconds were spent copying data to the device and 17 seconds copying data out for the device. The `swap` function has very similar numbers. That means that the program is actually spending very little of its runtime doing calculations. Why is the program copying so much data around? The screenshot below comes from the Nsight Systems profiler and shows part of one step of our outer while loop. The greenish and pink colors are data movement and the blue colors are our kernels (calcNext and swap). Notice that for each kernel we have copies to the device (greenish) before and copies from the device (pink) after. The means we have 4 segments of data copies for every iteration of the outer while loop. ![Profile before adding data region](images/pre-data-c.png) Let's contrast this with the managed memory version. The image below shows the same program built with managed memory. Notice that there's a lot of "data migration" at the first kernel launch, where the data is first used, but there's no data movement between kernels after that. This tells me that the data movement isn't really needed between these kernels, but we need to tell the compiler that. ![Profile using managed memory](images/managed-c.png) Because the loops are in two separate function, the compiler can't really see that the data is reused on the GPU between those function. We need to move our data movement up to a higher level where we can reuse it for each step through the program. To do that, we'll add OpenACC data directives. --- ## OpenACC Structured Data Directive The OpenACC data directives allow the programmer to explicitly manage the data on the device (in our case, the GPU). Specifically, the structured data directive will mark a static region of our code as a **data region**. ```cpp < Initialize data on host (CPU) > #pragma acc data < data clauses > { < Code > } ``` Device memory allocation happens at the beginning of the region, and device memory deallocation happens at the end of the region. Additionally, any data movement from the host to the device (CPU to GPU) happens at the beginning of the region, and any data movement from the device to the host (GPU to CPU) happens at the end of the region. Memory allocation/deallocation and data movement is defined by which clauses the programmer includes (the `copy`, `copyin`, `copyout`, and `create` clauses we saw above). ### Encompassing Multiple Compute Regions A single data region can contain any number of parallel/kernels regions. Take the following example: ```cpp #pragma acc data copyin(A[0:N], B[0:N]) create(C[0:N]) { #pragma acc parallel loop for( int i = 0; i < N; i++ ) { C[i] = A[i] + B[i]; } #pragma acc parallel loop for( int i = 0; i < N; i++ ) { A[i] = C[i] + B[i]; } } ``` You may also encompass function calls within the data region: ```cpp void copy(int *A, int *B, int N) { #pragma acc parallel loop for( int i = 0; i < N; i++ ) { A[i] = B[i]; } } ... #pragma acc data copyout(A[0:N],B[0:N]) copyin(C[0:N]) { copy(A, C, N); copy(A, B, N); } ``` ### Adding the Structured Data Directive to our Code Add a structured data directive to the code to properly handle the arrays `A` and `Anew`. We've already added data clauses to our two functions, so this time we'll move up the calltree and add a structured data region around our while loop in the main program. Think about the input and output to this while loop and choose your data clauses for `A` and `Anew` accordingly. From the top menu, click on *File*, and *Open* `jacobi.c` from the current directory at `C/source_code/lab2` directory. Remember to **SAVE** your code after changes, before running below cells. Then, run the following script to check you solution. You code should run just as good as (or slightly better) than our managed memory code. ``` !cd ../source_code/lab2 && make clean && make laplace_no_managed && ./laplace ``` Did your runtime go down? It should have but the answer should still match the previous runs. Let's take a look at the profiler now. ![Profile after adding data region](images/post-data-c.png) Notice that we no longer see the greenish and pink bars on either side of each iteration, like we did before. Instead, we see a red OpenACC `Enter Data` region which contains some greenish bars corresponding to host-to-device data transfer preceding any GPU kernel launches. This is because our data movement is now handled by the outer data region, not the data clauses on each loop. Data clauses count how many times an array has been placed into device memory and only copies data the outermost time it encounters an array. This means that the data clauses we added to our two functions are now used only for shaping and no data movement will actually occur here anymore, thanks to our outer `data` region. This reference counting behavior is really handy for code development and testing. Just like we just did, you can add clauses to each of your OpenACC `parallel loop` or `kernels` regions to get everything running on the accelerator and then just wrap those functions with a data region when you're done and the data movement magically disappears. Furthermore, if you want to isolate one of those functions into a standalone test case you can do so easily, because the data clause is already in the code. --- ## OpenACC Update Directive When we use the data clauses you are only able to copy data between host and device memory at the beginning and end of your regions, but what if you need to copy data in the middle? For example, what if we wanted to debug our code by printing out the array every 100 steps to make sure it looks right? In order to transfer data at those times, we can use the `update` directive. The update directive will explicitly transfer data between the host and the device. The `update` directive has two clauses: * `self` - The self clause will transfer data from the device to the host (GPU to CPU). You will sometimes see this clause called the `host` clause. * `device` - The device clause will transfer data from the host to the device (CPU to GPU). The syntax would look like: `#pragma acc update self(A[0:N])` `#pragma acc update device(A[0:N])` All of the array shaping rules apply. As an example, let's create a version of our laplace code where we want to print the array `A` after every 100 iterations of our loop. The code will look like this: ```cpp #pragma acc data copyin( A[:m*n],Anew[:m*n] ) { while ( error > tol && iter < iter_max ) { error = calcNext(A, Anew, m, n); swap(A, Anew, m, n); if(iter % 100 == 0) { printf("%5d, %0.6f\n", iter, error); for( int i = 0; i < n; i++ ) { for( int j = 0; j < m; j++ ) { printf("%0.2f ", A[i+j*m]); } printf("\n"); } } iter++; } } ``` Let's run this code (on a very small data set, so that we don't overload the console by printing thousands of numbers). ``` !cd ../source_code/lab2/update && make clean && make laplace_no_update && ./laplace_no_update 10 10 ``` We can see that the array is not changing. This is because the host copy of `A` is not being *updated* between loop iterations. Let's add the update directive, and see how the output changes. ```cpp #pragma acc data copyin( A[:m*n],Anew[:m*n] ) { while ( error > tol && iter < iter_max ) { error = calcNext(A, Anew, m, n); swap(A, Anew, m, n); if(iter % 100 == 0) { printf("%5d, %0.6f\n", iter, error); #pragma acc update self(A[0:m*n]) for( int i = 0; i < n; i++ ) { for( int j = 0; j < m; j++ ) { printf("%0.2f ", A[i+j*m]); } printf("\n"); } } iter++; } } ``` ``` !cd ../source_code/lab2/update/solution && make clean && make laplace_update && ./laplace_update 10 10 ``` Although you weren't required to add an `update` directive to this example code, except in the contrived example above, it's an extremely important directive for real applications because it allows you to do I/O or communication necessary for your code to execute without having to pay the cost of allocating and decallocating arrays on the device each time you do so. --- ## Conclusion Relying on managed memory to handle data management can reduce the effort the programmer needs to parallelize their code, however, not all GPUs work with managed memory, and it is also lower performance than using explicit data management. In this lab you learned about how to use *data clauses* and *structured data directives* to explicitly manage device memory and remove your reliance on CUDA Managed Memory. --- ## Bonus Task If you would like some additional lessons on using OpenACC, there is an Introduction to OpenACC video series available from the OpenACC YouTube page. The fifth video in the series covers a lot of the content that was covered in this lab. [Introduction to Parallel Programming with OpenACC - Part 5](https://youtu.be/0zTX7-CPvV8) ## Post-Lab Summary If you would like to download this lab for later viewing, it is recommend you go to your browsers File menu (not the Jupyter notebook file menu) and save the complete web page. This will ensure the images are copied down as well. You can also execute the following cell block to create a zip-file of the files you've been working on, and download it with the link below. ``` %%bash cd .. rm -f openacc_files.zip zip -r openacc_files.zip * ``` **After** executing the above zip command, you should be able to download the zip file [here](../openacc_files.zip) --- ## Licensing This material is released by NVIDIA Corporation under the Creative Commons Attribution 4.0 International (CC BY 4.0).
github_jupyter
``` import sys sys.path.insert(1, '../functions') import importlib import numpy as np import nbformat import plotly.express import plotly.express as px import pandas as pd import scipy.optimize as optimization import food_bank_functions import food_bank_bayesian import matplotlib.pyplot as plt import seaborn as sns from food_bank_functions import * from food_bank_bayesian import * import time importlib.reload(food_bank_functions) np.random.seed(1) problem = 'poisson' loc = '../simulations/' + problem + '/' plt.style.use('PaperDoubleFig.mplstyle.txt') # Make some style choices for plotting colorWheel =['#2bd1e5', '#281bf5', '#db1bf5', '#F5CD1B', '#FF5733','#9cf51b',] dash_styles = ["", (4, 1.5), (1, 1), (3, 1, 1.5, 1), (5, 1, 1, 1), (5, 1, 2, 1, 2, 1), (2, 2, 3, 1.5), (1, 2.5, 3, 1.2)] ``` # Scaling with n dataset ``` algos_to_exclude = ['Threshold','Expected-Filling', 'Expect-Threshold', 'Fixed-Threshold', 'Expected_Filling', 'Expect_Threshold', 'Fixed_Threshold'] df_one = pd.read_csv(loc+'scale_with_n.csv') # algos_to_exclude = ['Threshold','Expected-Filling'] df_one = (df_one[~df_one.variable.isin(algos_to_exclude)] .rename({'variable': 'Algorithm'}, axis = 1) ) df_one = df_one.sort_values(by='Algorithm') df_one.Algorithm.unique() print(df_one.Algorithm.str.title) df_one.Algorithm.unique() ``` # Expected Waterfilling Levels ``` df_two = pd.read_csv(loc+'comparison_of_waterfilling_levels.csv') df_two = (df_two[~df_two.variable.isin(algos_to_exclude)].rename({'variable': 'Algorithm'}, axis=1)) df_two['Algorithm'] = df_two['Algorithm'].replace({'hope_Online':'Hope-Online', 'hope_Full':'Hope-Full', 'et_Online':'ET-Online', 'et_Full':'ET-Full', 'Max_Min_Heuristic':'Max-Min'}) df_two = df_two.sort_values(by='Algorithm') print(df_two.Algorithm.unique()) df_two.head df_two = df_two.sort_values(by='Algorithm') df_two.Algorithm.unique() ``` # Group allocation difference ``` df_three = pd.read_csv(loc+'fairness_group_by_group.csv') df_three = (df_three[~df_three.variable.isin(algos_to_exclude)] .rename({'variable': 'Algorithm'}, axis = 1) ) df_three = df_three.sort_values(by='Algorithm') df_three.Algorithm.unique() legends = False fig = plt.figure(figsize = (20,15)) # Create an array with the colors you want to use colors = ["#FFC20A", "#1AFF1A", "#994F00", "#006CD1", "#D35FB7", "#40B0A6", "#E66100"]# Set your custom color palette plt.subplot(2,2,1) sns.set_palette(sns.color_palette(colors)) if legends: g = sns.lineplot(x='NumGroups', y='value', hue='Algorithm', style = 'Algorithm', dashes = dash_styles, data=df_one[df_one.Norm == 'Linf']) else: g = sns.lineplot(x='NumGroups', y='value', hue='Algorithm', style = 'Algorithm', dashes = dash_styles, data=df_one[df_one.Norm == 'Linf'], legend=False) plt.xlabel('Number of Agents') plt.ylabel('Distance') plt.title('Maximum Difference Between OPT and ALG Allocations') plt.subplot(2,2,2) sns.set_palette(sns.color_palette(colors)) if legends: g = sns.lineplot(x='NumGroups', y='value', hue='Algorithm', style = 'Algorithm', dashes = dash_styles, data=df_one[df_one.Norm == 'L1']) else: g = sns.lineplot(x='NumGroups', y='value', hue='Algorithm', style = 'Algorithm', dashes = dash_styles, data=df_one[df_one.Norm == 'L1'], legend=False) plt.xlabel('Number of Agents') plt.ylabel('Distance') plt.title('Total Difference Between OPT and ALG Allocations') plt.subplot(2,2,3) new_colors = colors[1:3] + colors[4:]+['#000000'] new_dashes = dash_styles[1:3]+dash_styles[4:] sns.set_palette(sns.color_palette(new_colors)) if legends: g = sns.lineplot(x='Group', y='value', style='Algorithm', hue = 'Algorithm', data=df_two, dashes=new_dashes) else: g = sns.lineplot(x='Group', y='value', style='Algorithm', hue = 'Algorithm', data=df_two, dashes=new_dashes, legend=False) plt.title('Estimated Threshold Level by Agent') plt.xlabel('Agent') plt.ylabel('Level') # plt.xlabel('Estimated Level') plt.subplot(2,2,4) sns.set_palette(sns.color_palette(colors)) try: sns.lineplot(x='Agent', y='value', hue='Algorithm', data=df_three, style = 'Algorithm', dashes = dash_styles) except ValueError: sns.lineplot(x='Group', y='value', hue='Algorithm', data=df_three, style = 'Algorithm', dashes = dash_styles) plt.title('Allocation Difference per Agent between OPT and ALG') plt.ylabel('Difference') plt.xlabel('Agent') plt.show() fig.savefig(problem+'.pdf', bbox_inches = 'tight',pad_inches = 0.01, dpi=900) print(colors) print(new_colors) ```
github_jupyter
``` import sys import keras import tensorflow as tf print('python version:', sys.version) print('keras version:', keras.__version__) print('tensorflow version:', tf.__version__) ``` # 6.3 Advanced use of recurrent neural networks --- ## A temperature-forecasting problem ### Inspecting the data of the Jena weather dataset ``` import matplotlib.pyplot as plt import numpy as np import os %matplotlib inline data_dir = 'jena_climate' fname = os.path.join(data_dir, 'jena_climate_2009_2016.csv') f = open(fname) data = f.read() f.close() lines = data.split('\n') header = lines[0].split(',') lines = lines[1:] print(header) print(len(lines)) ``` ### Parsing the data ``` float_data = np.zeros((len(lines), len(header) - 1)) for i, line in enumerate(lines): values = [float(x) for x in line.split(',')[1:]] float_data[i, :] = values ``` ### Plotting the temperature timeseries ``` temp = float_data[:, 1] plt.plot(range(len(temp)), temp) plt.show() ``` ### Plotting the first 10 days of the temperature timeseries ``` plt.plot(range(1440), temp[:1440]) plt.show() ``` ### Normalizing the data ``` mean = float_data[:200000].mean(axis = 0) float_data -= mean std = float_data[:200000].std(axis = 0) float_data /= std ``` ### Generator yielding timeseries samples and their targets ``` def generator(data, lookback, delay, min_index, max_index, shuffle = False, batch_size = 128, step = 6, revert = False): if max_index is None: max_index = len(data) - delay - 1 i = min_index + lookback while 1: if shuffle: rows = np.random.randint(min_index + lookback, max_index, size = batch_size) else: if i + batch_size >= max_index: i = min_index + lookback rows = np.arange(i, min(i + batch_size, max_index)) i += len(rows) samples = np.zeros((len(rows), lookback//step, data.shape[-1])) targets = np.zeros((len(rows),)) for j, row in enumerate(rows): indices = range(rows[j] - lookback, rows[j], step) samples[j] = data[indices] targets[j] = data[rows[j] + delay][1] if revert: yield samples[:, ::-1, :], targets else: yield samples, targets ``` ### Preparing the training, validation and test generators ``` lookback = 1440 step = 6 delay = 144 batch_size = 128 train_gen = generator(float_data, lookback = lookback, delay = delay, min_index = 0, max_index = 200000, shuffle = True, step = step, batch_size = batch_size) val_gen = generator(float_data, lookback = lookback, delay = delay, min_index = 200001, max_index = 300000, step = step, batch_size = batch_size) test_gen = generator(float_data, lookback = lookback, delay = delay, min_index = 300001, max_index = None, step = step, batch_size = batch_size) train_gen_r = generator(float_data, lookback = lookback, delay = delay, min_index = 0, max_index = 200000, shuffle = True, step = step, batch_size = batch_size, revert = True) val_gen_r = generator(float_data, lookback = lookback, delay = delay, min_index = 200001, max_index = 300000, step = step, batch_size = batch_size, revert = True) test_gen_r = generator(float_data, lookback = lookback, delay = delay, min_index = 300001, max_index = None, step = step, batch_size = batch_size, revert = True) # How many steps to draw from val_gen in order to see the entire validation set val_steps = (300000 - 200001 - lookback) // batch_size # How many steps to draw from test_gen in order to see the entire test set test_steps = (len(float_data) - 300001 - lookback) // batch_size ``` ### Computing the common-sense baseline MAE ``` def evaluate_naive_method(): batch_maes = [] for step in range(val_steps): samples, targets = next(val_gen) preds = samples[:, -1, 1] mae = np.mean(np.abs(preds - targets)) batch_maes.append(mae) print(np.mean(batch_maes)) evaluate_naive_method() ``` ### Training and evaluating a densely connected model ``` from keras import Sequential from keras import layers from keras.optimizers import RMSprop model = Sequential() model.add(layers.Flatten(input_shape = (lookback // step, float_data.shape[-1]))) model.add(layers.Dense(32, activation = 'relu')) model.add(layers.Dense(1)) model.compile(optimizer = RMSprop(), loss = 'mae') history = model.fit_generator(train_gen, steps_per_epoch = 500, epochs = 20, validation_data = val_gen, validation_steps = val_steps) ``` ### Plotting results ``` loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(loss) + 1) plt.figure() plt.plot(epochs, loss, 'bo', label = 'Training loss') plt.plot(epochs, val_loss, 'b', label = 'Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() ``` ### Training and evaluating a GRU-based model ``` from keras.models import Sequential from keras import layers from keras.optimizers import RMSprop model = Sequential() model.add(layers.GRU(32, implementation = 1, input_shape = (None, float_data.shape[-1]))) model.add(layers.Dense(1)) model.compile(optimizer = RMSprop(), loss = 'mae') history = model.fit_generator(train_gen, steps_per_epoch = 500, epochs = 20, validation_data = val_gen, validation_steps = val_steps) ``` ### Plotting results ``` loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(loss) + 1) plt.figure() plt.plot(epochs, loss, 'bo', label = 'Training loss') plt.plot(epochs, val_loss, 'b', label = 'Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() ``` ### Training and evaluating a dropout-regularized GRU-based model ``` from keras.models import Sequential from keras import layers from keras.optimizers import RMSprop model = Sequential() model.add(layers.GRU(32, implementation = 1, dropout = 0.2, recurrent_dropout = 0.2, input_shape = (None, float_data.shape[-1]))) model.add(layers.Dense(1)) model.compile(optimizer = RMSprop(), loss = 'mae') history = model.fit_generator(train_gen, steps_per_epoch = 500, epochs = 40, validation_data = val_gen, validation_steps = val_steps) ``` ### Plotting results ``` loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(loss) + 1) plt.figure() plt.plot(epochs, loss, 'bo', label = 'Training loss') plt.plot(epochs, val_loss, 'b', label = 'Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() ``` ### Training and evaluating a dropout-regularized, stacked GRU model ``` from keras.models import Sequential from keras import layers from keras.optimizers import RMSprop model = Sequential() model.add(layers.GRU(32, implementation = 1, dropout = 0.1, recurrent_dropout = 0.5, return_sequences = True, input_shape = (None, float_data.shape[-1]))) model.add(layers.GRU(64, implementation = 1, activation = 'relu', dropout = 0.1, recurrent_dropout = 0.5)) model.add(layers.Dense(1)) model.compile(optimizer = RMSprop(), loss = 'mae') history = model.fit_generator(train_gen, steps_per_epoch = 500, epochs = 40, validation_data = val_gen, validation_steps = val_steps) ``` ### Plotting results ``` loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(loss) + 1) plt.figure() plt.plot(epochs, loss, 'bo', label = 'Training loss') plt.plot(epochs, val_loss, 'b', label = 'Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() ``` ### Training and evaluating an GRU-based model using reversed sequences ``` from keras.models import Sequential from keras import layers from keras.optimizers import RMSprop model = Sequential() model.add(layers.GRU(32, implementation = 1, input_shape = (None, float_data.shape[-1]))) model.add(layers.Dense(1)) model.compile(optimizer = RMSprop(), loss = 'mae') history = model.fit_generator(train_gen_r, steps_per_epoch = 500, epochs = 20, validation_data = val_gen_r, validation_steps = val_steps) ``` ### Plotting results ``` loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(loss) + 1) plt.figure() plt.plot(epochs, loss, 'bo', label = 'Training loss') plt.plot(epochs, val_loss, 'b', label = 'Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() ``` ### Training and evaluating an LSTM using reversed sequences ``` from keras.datasets import imdb from keras.preprocessing import sequence from keras import layers from keras.models import Sequential max_features = 10000 # Number of words to consider as features maxlen = 500 # Cuts off texts after this number of words (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words = max_features) # Reverses sequences x_train = [x[::-1] for x in x_train] x_test = [x[::-1] for x in x_test] # Pads sequences x_train = sequence.pad_sequences(x_train, maxlen = maxlen) x_test = sequence.pad_sequences(x_test, maxlen = maxlen) model = Sequential() model.add(layers.Embedding(max_features, 128)) model.add(layers.LSTM(32)) model.add(layers.Dense(1, activation = 'sigmoid')) model.compile(optimizer = 'rmsprop', loss = 'binary_crossentropy', metrics = ['acc']) history = model.fit(x_train, y_train, epochs = 10, batch_size = 128, validation_split = 0.2) ``` ### Training and evaluating a bidirectional LSTM ``` model = Sequential() model.add(layers.Embedding(max_features, 32)) model.add(layers.Bidirectional(layers.LSTM(32))) model.add(layers.Dense(1, activation = 'sigmoid')) model.compile(optimizer = 'rmsprop', loss = 'binary_crossentropy', metrics = ['acc']) history = model.fit(x_train, y_train, epochs = 10, batch_size = 128, validation_split = 0.2) ``` ### Training a bidirectional GRU ``` from keras.models import Sequential from keras import layers from keras.optimizers import RMSprop model = Sequential() model.add(layers.Bidirectional(layers.GRU(32, implementation = 1), input_shape = (None, float_data.shape[-1]))) model.add(layers.Dense(1)) model.compile(optimizer = RMSprop(), loss = 'mae') history = model.fit_generator(train_gen, steps_per_epoch = 500, epochs = 40, validation_data = val_gen, validation_steps = val_steps) ``` ### Plotting results ``` loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(loss) + 1) plt.figure() plt.plot(epochs, loss, 'bo', label = 'Training loss') plt.plot(epochs, val_loss, 'b', label = 'Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() ```
github_jupyter
# Improving Data Quality **Learning Objectives** 1. Resolve missing values 2. Convert the Date feature column to a datetime format 3. Rename a feature column, remove a value from a feature column 4. Create one-hot encoding features 5. Understand temporal feature conversions ## Introduction Recall that machine learning models can only consume numeric data, and that numeric data should be "1"s or "0"s. Data is said to be "messy" or "untidy" if it is missing attribute values, contains noise or outliers, has duplicates, wrong data, upper/lower case column names, and is essentially not ready for ingestion by a machine learning algorithm. This notebook presents and solves some of the most common issues of "untidy" data. Note that different problems will require different methods, and they are beyond the scope of this notebook. Each learning objective will correspond to a __#TODO__ in the [student lab notebook](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/deepdive2/launching_into_ml/labs/improve_data_quality.ipynb) -- try to complete that notebook first before reviewing this solution notebook. ``` # Use the chown command to change the ownership of the repository to user !sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst ``` ### Import Libraries ``` import os # Here we'll import Pandas and Numpy data processing libraries import pandas as pd import numpy as np from datetime import datetime # Use matplotlib for visualizing the model import matplotlib.pyplot as plt # Use seaborn for data visualization import seaborn as sns %matplotlib inline ``` ### Load the Dataset The dataset is based on California's [Vehicle Fuel Type Count by Zip Code](https://data.ca.gov/dataset/vehicle-fuel-type-count-by-zip-codeSynthetic) report. The dataset has been modified to make the data "untidy" and is thus a synthetic representation that can be used for learning purposes. ``` # Creating directory to store dataset if not os.path.isdir("../data/transport"): os.makedirs("../data/transport") # Download the raw .csv data by copying the data from a cloud storage bucket. !gsutil cp gs://cloud-training-demos/feat_eng/transport/untidy_vehicle_data.csv ../data/transport # ls shows the working directory's contents. # Using the -l parameter will lists files with assigned permissions !ls -l ../data/transport ``` ### Read Dataset into a Pandas DataFrame Next, let's read in the dataset just copied from the cloud storage bucket and create a Pandas DataFrame. We also add a Pandas .head() function to show you the top 5 rows of data in the DataFrame. Head() and Tail() are "best-practice" functions used to investigate datasets. ``` # Reading "untidy_vehicle_data.csv" file using the read_csv() function included in the pandas library. df_transport = pd.read_csv('../data/transport/untidy_vehicle_data.csv') # Output the first five rows. df_transport.head() ``` ### DataFrame Column Data Types DataFrames may have heterogenous or "mixed" data types, that is, some columns are numbers, some are strings, and some are dates etc. Because CSV files do not contain information on what data types are contained in each column, Pandas infers the data types when loading the data, e.g. if a column contains only numbers, Pandas will set that column’s data type to numeric: integer or float. Run the next cell to see information on the DataFrame. ``` # The .info() function will display the concise summary of an dataframe. df_transport.info() ``` From what the .info() function shows us, we have six string objects and one float object. We can definitely see more of the "string" object values now! ``` # Let's print out the first and last five rows of each column. print(df_transport,5) ``` ### Summary Statistics At this point, we have only one column which contains a numerical value (e.g. Vehicles). For features which contain numerical values, we are often interested in various statistical measures relating to those values. Note, that because we only have one numeric feature, we see only one summary stastic - for now. ``` # We can use .describe() to see some summary statistics for the numeric fields in our dataframe. df_transport.describe() ``` Let's investigate a bit more of our data by using the .groupby() function. ``` # The .groupby() function is used for spliting the data into groups based on some criteria. grouped_data = df_transport.groupby(['Zip Code','Model Year','Fuel','Make','Light_Duty','Vehicles']) # Get the first entry for each month. df_transport.groupby('Fuel').first() ``` ### Checking for Missing Values Missing values adversely impact data quality, as they can lead the machine learning model to make inaccurate inferences about the data. Missing values can be the result of numerous factors, e.g. "bits" lost during streaming transmission, data entry, or perhaps a user forgot to fill in a field. Note that Pandas recognizes both empty cells and “NaN” types as missing values. #### Let's show the null values for all features in the DataFrame. ``` df_transport.isnull().sum() ``` To see a sampling of which values are missing, enter the feature column name. You'll notice that "False" and "True" correpond to the presence or abscence of a value by index number. ``` print (df_transport['Date']) print (df_transport['Date'].isnull()) print (df_transport['Make']) print (df_transport['Make'].isnull()) print (df_transport['Model Year']) print (df_transport['Model Year'].isnull()) ``` ### What can we deduce about the data at this point? # Let's summarize our data by row, column, features, unique, and missing values. ``` # In Python shape() is used in pandas to give the number of rows/columns. # The number of rows is given by .shape[0]. The number of columns is given by .shape[1]. # Thus, shape() consists of an array having two arguments -- rows and columns print ("Rows : " ,df_transport.shape[0]) print ("Columns : " ,df_transport.shape[1]) print ("\nFeatures : \n" ,df_transport.columns.tolist()) print ("\nUnique values : \n",df_transport.nunique()) print ("\nMissing values : ", df_transport.isnull().sum().values.sum()) ``` Let's see the data again -- this time the last five rows in the dataset. ``` # Output the last five rows in the dataset. df_transport.tail() ``` ### What Are Our Data Quality Issues? 1. **Data Quality Issue #1**: > **Missing Values**: Each feature column has multiple missing values. In fact, we have a total of 18 missing values. 2. **Data Quality Issue #2**: > **Date DataType**: Date is shown as an "object" datatype and should be a datetime. In addition, Date is in one column. Our business requirement is to see the Date parsed out to year, month, and day. 3. **Data Quality Issue #3**: > **Model Year**: We are only interested in years greater than 2006, not "<2006". 4. **Data Quality Issue #4**: > **Categorical Columns**: The feature column "Light_Duty" is categorical and has a "Yes/No" choice. We cannot feed values like this into a machine learning model. In addition, we need to "one-hot encode the remaining "string"/"object" columns. 5. **Data Quality Issue #5**: > **Temporal Features**: How do we handle year, month, and day? #### Data Quality Issue #1: ##### Resolving Missing Values Most algorithms do not accept missing values. Yet, when we see missing values in our dataset, there is always a tendency to just "drop all the rows" with missing values. Although Pandas will fill in the blank space with “NaN", we should "handle" them in some way. While all the methods to handle missing values is beyond the scope of this lab, there are a few methods you should consider. For numeric columns, use the "mean" values to fill in the missing numeric values. For categorical columns, use the "mode" (or most frequent values) to fill in missing categorical values. In this lab, we use the .apply and Lambda functions to fill every column with its own most frequent value. You'll learn more about Lambda functions later in the lab. Let's check again for missing values by showing how many rows contain NaN values for each feature column. ``` # The isnull() method is used to check and manage NULL values in a data frame. # TODO 1a df_transport.isnull().sum() ``` Run the cell to apply the lambda function. ``` # Here we are using the apply function with lambda. # We can use the apply() function to apply the lambda function to both rows and columns of a dataframe. # TODO 1b df_transport = df_transport.apply(lambda x:x.fillna(x.value_counts().index[0])) ``` Let's check again for missing values. ``` # The isnull() method is used to check and manage NULL values in a data frame. # TODO 1c df_transport.isnull().sum() ``` #### Data Quality Issue #2: ##### Convert the Date Feature Column to a Datetime Format ``` # The date column is indeed shown as a string object. We can convert it to the datetime datatype with the to_datetime() function in Pandas. # TODO 2a df_transport['Date'] = pd.to_datetime(df_transport['Date'], format='%m/%d/%Y') # Date is now converted and will display the concise summary of an dataframe. # TODO 2b df_transport.info() # Now we will parse Date into three columns that is year, month, and day. df_transport['year'] = df_transport['Date'].dt.year df_transport['month'] = df_transport['Date'].dt.month df_transport['day'] = df_transport['Date'].dt.day #df['hour'] = df['date'].dt.hour - you could use this if your date format included hour. #df['minute'] = df['date'].dt.minute - you could use this if your date format included minute. # The .info() function will display the concise summary of an dataframe. df_transport.info() ``` # Let's confirm the Date parsing. This will also give us a another visualization of the data. ``` # Here, we are creating a new dataframe called "grouped_data" and grouping by on the column "Make" grouped_data = df_transport.groupby(['Make']) # Get the first entry for each month. df_transport.groupby('Fuel').first() ``` Now that we have Dates as a integers, let's do some additional plotting. ``` # Here we will visualize our data using the figure() function in the pyplot module of matplotlib's library -- which is used to create a new figure. plt.figure(figsize=(10,6)) # Seaborn's .jointplot() displays a relationship between 2 variables (bivariate) as well as 1D profiles (univariate) in the margins. This plot is a convenience class that wraps JointGrid. sns.jointplot(x='month',y='Vehicles',data=df_transport) # The title() method in matplotlib module is used to specify title of the visualization depicted and displays the title using various attributes. plt.title('Vehicles by Month') ``` #### Data Quality Issue #3: ##### Rename a Feature Column and Remove a Value. Our feature columns have different "capitalizations" in their names, e.g. both upper and lower "case". In addition, there are "spaces" in some of the column names. In addition, we are only interested in years greater than 2006, not "<2006". We can also resolve the "case" problem too by making all the feature column names lower case. ``` # Let's remove all the spaces for feature columns by renaming them. # TODO 3a df_transport.rename(columns = { 'Date': 'date', 'Zip Code':'zipcode', 'Model Year': 'modelyear', 'Fuel': 'fuel', 'Make': 'make', 'Light_Duty': 'lightduty', 'Vehicles': 'vehicles'}, inplace = True) # Output the first two rows. df_transport.head(2) ``` **Note:** Next we create a copy of the dataframe to avoid the "SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame" warning. Run the cell to remove the value '<2006' from the modelyear feature column. ``` # Here, we create a copy of the dataframe to avoid copy warning issues. # TODO 3b df = df_transport.loc[df_transport.modelyear != '<2006'].copy() # Here we will confirm that the modelyear value '<2006' has been removed by doing a value count. df['modelyear'].value_counts(0) ``` #### Data Quality Issue #4: ##### Handling Categorical Columns The feature column "lightduty" is categorical and has a "Yes/No" choice. We cannot feed values like this into a machine learning model. We need to convert the binary answers from strings of yes/no to integers of 1/0. There are various methods to achieve this. We will use the "apply" method with a lambda expression. Pandas. apply() takes a function and applies it to all values of a Pandas series. ##### What is a Lambda Function? Typically, Python requires that you define a function using the def keyword. However, lambda functions are anonymous -- which means there is no need to name them. The most common use case for lambda functions is in code that requires a simple one-line function (e.g. lambdas only have a single expression). As you progress through the Course Specialization, you will see many examples where lambda functions are being used. Now is a good time to become familiar with them. ``` # Lets count the number of "Yes" and"No's" in the 'lightduty' feature column. df['lightduty'].value_counts(0) # Let's convert the Yes to 1 and No to 0. # The .apply takes a function and applies it to all values of a Pandas series (e.g. lightduty). df.loc[:,'lightduty'] = df['lightduty'].apply(lambda x: 0 if x=='No' else 1) df['lightduty'].value_counts(0) # Confirm that "lightduty" has been converted. df.head() ``` #### One-Hot Encoding Categorical Feature Columns Machine learning algorithms expect input vectors and not categorical features. Specifically, they cannot handle text or string values. Thus, it is often useful to transform categorical features into vectors. One transformation method is to create dummy variables for our categorical features. Dummy variables are a set of binary (0 or 1) variables that each represent a single class from a categorical feature. We simply encode the categorical variable as a one-hot vector, i.e. a vector where only one element is non-zero, or hot. With one-hot encoding, a categorical feature becomes an array whose size is the number of possible choices for that feature. Panda provides a function called "get_dummies" to convert a categorical variable into dummy/indicator variables. ``` # Making dummy variables for categorical data with more inputs. data_dummy = pd.get_dummies(df[['zipcode','modelyear', 'fuel', 'make']], drop_first=True) # Output the first five rows. data_dummy.head() # Merging (concatenate) original data frame with 'dummy' dataframe. # TODO 4a df = pd.concat([df,data_dummy], axis=1) df.head() # Dropping attributes for which we made dummy variables. Let's also drop the Date column. # TODO 4b df = df.drop(['date','zipcode','modelyear', 'fuel', 'make'], axis=1) # Confirm that 'zipcode','modelyear', 'fuel', and 'make' have been dropped. df.head() ``` #### Data Quality Issue #5: ##### Temporal Feature Columns Our dataset now contains year, month, and day feature columns. Let's convert the month and day feature columns to meaningful representations as a way to get us thinking about changing temporal features -- as they are sometimes overlooked. Note that the Feature Engineering course in this Specialization will provide more depth on methods to handle year, month, day, and hour feature columns. ``` # Let's print the unique values for "month", "day" and "year" in our dataset. print ('Unique values of month:',df.month.unique()) print ('Unique values of day:',df.day.unique()) print ('Unique values of year:',df.year.unique()) ``` Don't worry, this is the last time we will use this code, as you can develop an input pipeline to address these temporal feature columns in TensorFlow and Keras - and it is much easier! But, sometimes you need to appreciate what you're not going to encounter as you move through the course! Run the cell to view the output. ``` # Here we map each temporal variable onto a circle such that the lowest value for that variable appears right next to the largest value. We compute the x- and y- component of that point using the sin and cos trigonometric functions. df['day_sin'] = np.sin(df.day*(2.*np.pi/31)) df['day_cos'] = np.cos(df.day*(2.*np.pi/31)) df['month_sin'] = np.sin((df.month-1)*(2.*np.pi/12)) df['month_cos'] = np.cos((df.month-1)*(2.*np.pi/12)) # Let's drop month, and day # TODO 5 df = df.drop(['month','day','year'], axis=1) # scroll left to see the converted month and day coluumns. df.tail(4) ``` ### Conclusion This notebook introduced a few concepts to improve data quality. We resolved missing values, converted the Date feature column to a datetime format, renamed feature columns, removed a value from a feature column, created one-hot encoding features, and converted temporal features to meaningful representations. By the end of our lab, we gained an understanding as to why data should be "cleaned" and "pre-processed" before input into a machine learning model. Copyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
github_jupyter
# Books Recommender System ![](http://labs.criteo.com/wp-content/uploads/2017/08/CustomersWhoBought3.jpg) This is the second part of my project on Book Data Analysis and Recommendation Systems. In my first notebook ([The Story of Book](https://www.kaggle.com/omarzaghlol/goodreads-1-the-story-of-book/)), I attempted at narrating the story of book by performing an extensive exploratory data analysis on Books Metadata collected from Goodreads. In this notebook, I will attempt at implementing a few recommendation algorithms (Basic Recommender, Content-based and Collaborative Filtering) and try to build an ensemble of these models to come up with our final recommendation system. # What's in this kernel? - [Importing Libraries and Loading Our Data](#1) - [Clean the dataset](#2) - [Simple Recommender](#3) - [Top Books](#4) - [Top "Genres" Books](#5) - [Content Based Recommender](#6) - [Cosine Similarity](#7) - [Popularity and Ratings](#8) - [Collaborative Filtering](#9) - [User Based](#10) - [Item Based](#11) - [Hybrid Recommender](#12) - [Conclusion](#13) - [Save Model](#14) # Importing Libraries and Loading Our Data <a id="1"></a> <br> ``` import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import datetime import warnings warnings.filterwarnings('ignore') books = pd.read_csv('../input/goodbooks-10k//books.csv') ratings = pd.read_csv('../input/goodbooks-10k//ratings.csv') book_tags = pd.read_csv('../input/goodbooks-10k//book_tags.csv') tags = pd.read_csv('../input/goodbooks-10k//tags.csv') ``` # Clean the dataset <a id="2"></a> <br> As with nearly any real-life dataset, we need to do some cleaning first. When exploring the data I noticed that for some combinations of user and book there are multiple ratings, while in theory there should only be one (unless users can rate a book several times). Furthermore, for the collaborative filtering it is better to have more ratings per user. So I decided to remove users who have rated fewer than 3 books. ``` books['original_publication_year'] = books['original_publication_year'].fillna(-1).apply(lambda x: int(x) if x != -1 else -1) ratings_rmv_duplicates = ratings.drop_duplicates() unwanted_users = ratings_rmv_duplicates.groupby('user_id')['user_id'].count() unwanted_users = unwanted_users[unwanted_users < 3] unwanted_ratings = ratings_rmv_duplicates[ratings_rmv_duplicates.user_id.isin(unwanted_users.index)] new_ratings = ratings_rmv_duplicates.drop(unwanted_ratings.index) new_ratings['title'] = books.set_index('id').title.loc[new_ratings.book_id].values new_ratings.head(10) ``` # Simple Recommender <a id="3"></a> <br> The Simple Recommender offers generalized recommnendations to every user based on book popularity and (sometimes) genre. The basic idea behind this recommender is that books that are more popular and more critically acclaimed will have a higher probability of being liked by the average audience. This model does not give personalized recommendations based on the user. The implementation of this model is extremely trivial. All we have to do is sort our books based on ratings and popularity and display the top books of our list. As an added step, we can pass in a genre argument to get the top books of a particular genre. I will use IMDB's *weighted rating* formula to construct my chart. Mathematically, it is represented as follows: Weighted Rating (WR) = $(\frac{v}{v + m} . R) + (\frac{m}{v + m} . C)$ where, * *v* is the number of ratings for the book * *m* is the minimum ratings required to be listed in the chart * *R* is the average rating of the book * *C* is the mean rating across the whole report The next step is to determine an appropriate value for *m*, the minimum ratings required to be listed in the chart. We will use **95th percentile** as our cutoff. In other words, for a book to feature in the charts, it must have more ratings than at least 95% of the books in the list. I will build our overall Top 250 Chart and will define a function to build charts for a particular genre. Let's begin! ``` v = books['ratings_count'] m = books['ratings_count'].quantile(0.95) R = books['average_rating'] C = books['average_rating'].mean() W = (R*v + C*m) / (v + m) books['weighted_rating'] = W qualified = books.sort_values('weighted_rating', ascending=False).head(250) ``` ## Top Books <a id="4"></a> <br> ``` qualified[['title', 'authors', 'average_rating', 'weighted_rating']].head(15) ``` We see that J.K. Rowling's **Harry Potter** Books occur at the very top of our chart. The chart also indicates a strong bias of Goodreads Users towards particular genres and authors. Let us now construct our function that builds charts for particular genres. For this, we will use relax our default conditions to the **85th** percentile instead of 95. ## Top "Genres" Books <a id="5"></a> <br> ``` book_tags.head() tags.head() genres = ["Art", "Biography", "Business", "Chick Lit", "Children's", "Christian", "Classics", "Comics", "Contemporary", "Cookbooks", "Crime", "Ebooks", "Fantasy", "Fiction", "Gay and Lesbian", "Graphic Novels", "Historical Fiction", "History", "Horror", "Humor and Comedy", "Manga", "Memoir", "Music", "Mystery", "Nonfiction", "Paranormal", "Philosophy", "Poetry", "Psychology", "Religion", "Romance", "Science", "Science Fiction", "Self Help", "Suspense", "Spirituality", "Sports", "Thriller", "Travel", "Young Adult"] genres = list(map(str.lower, genres)) genres[:4] available_genres = tags.loc[tags.tag_name.str.lower().isin(genres)] available_genres.head() available_genres_books = book_tags[book_tags.tag_id.isin(available_genres.tag_id)] print('There are {} books that are tagged with above genres'.format(available_genres_books.shape[0])) available_genres_books.head() available_genres_books['genre'] = available_genres.tag_name.loc[available_genres_books.tag_id].values available_genres_books.head() def build_chart(genre, percentile=0.85): df = available_genres_books[available_genres_books['genre'] == genre.lower()] qualified = books.set_index('book_id').loc[df.goodreads_book_id] v = qualified['ratings_count'] m = qualified['ratings_count'].quantile(percentile) R = qualified['average_rating'] C = qualified['average_rating'].mean() qualified['weighted_rating'] = (R*v + C*m) / (v + m) qualified.sort_values('weighted_rating', ascending=False, inplace=True) return qualified ``` Let us see our method in action by displaying the Top 15 Fiction Books (Fiction almost didn't feature at all in our Generic Top Chart despite being one of the most popular movie genres). ``` cols = ['title','authors','original_publication_year','average_rating','ratings_count','work_text_reviews_count','weighted_rating'] genre = 'Fiction' build_chart(genre)[cols].head(15) ``` For simplicity, you can just pass the index of the wanted genre from below. ``` list(enumerate(available_genres.tag_name)) idx = 24 # romance build_chart(list(available_genres.tag_name)[idx])[cols].head(15) ``` # Content Based Recommender <a id="6"></a> <br> ![](https://miro.medium.com/max/828/1*1b-yMSGZ1HfxvHiJCiPV7Q.png) The recommender we built in the previous section suffers some severe limitations. For one, it gives the same recommendation to everyone, regardless of the user's personal taste. If a person who loves business books (and hates fiction) were to look at our Top 15 Chart, s/he wouldn't probably like most of the books. If s/he were to go one step further and look at our charts by genre, s/he wouldn't still be getting the best recommendations. For instance, consider a person who loves *The Fault in Our Stars*, *Twilight*. One inference we can obtain is that the person loves the romaintic books. Even if s/he were to access the romance chart, s/he wouldn't find these as the top recommendations. To personalise our recommendations more, I am going to build an engine that computes similarity between movies based on certain metrics and suggests books that are most similar to a particular book that a user liked. Since we will be using book metadata (or content) to build this engine, this also known as **Content Based Filtering.** I will build this recommender based on book's *Title*, *Authors* and *Genres*. ``` from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.metrics.pairwise import linear_kernel, cosine_similarity ``` My approach to building the recommender is going to be extremely *hacky*. These are steps I plan to do: 1. **Strip Spaces and Convert to Lowercase** from authors. This way, our engine will not confuse between **Stephen Covey** and **Stephen King**. 2. Combining books with their corresponding **genres** . 2. I then use a **Count Vectorizer** to create our count matrix. Finally, we calculate the cosine similarities and return books that are most similar. ``` books['authors'] = books['authors'].apply(lambda x: [str.lower(i.replace(" ", "")) for i in x.split(', ')]) def get_genres(x): t = book_tags[book_tags.goodreads_book_id==x] return [i.lower().replace(" ", "") for i in tags.tag_name.loc[t.tag_id].values] books['genres'] = books.book_id.apply(get_genres) books['soup'] = books.apply(lambda x: ' '.join([x['title']] + x['authors'] + x['genres']), axis=1) books.soup.head() count = CountVectorizer(analyzer='word',ngram_range=(1, 2),min_df=0, stop_words='english') count_matrix = count.fit_transform(books['soup']) ``` ## Cosine Similarity <a id="7"></a> <br> I will be using the Cosine Similarity to calculate a numeric quantity that denotes the similarity between two books. Mathematically, it is defined as follows: $cosine(x,y) = \frac{x. y^\intercal}{||x||.||y||} $ ``` cosine_sim = cosine_similarity(count_matrix, count_matrix) indices = pd.Series(books.index, index=books['title']) titles = books['title'] def get_recommendations(title, n=10): idx = indices[title] sim_scores = list(enumerate(cosine_sim[idx])) sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True) sim_scores = sim_scores[1:31] book_indices = [i[0] for i in sim_scores] return list(titles.iloc[book_indices].values)[:n] get_recommendations("The One Minute Manager") ``` What if I want a specific book but I can't remember it's full name!! So I created the following *method* to get book titles from a **partial** title. ``` def get_name_from_partial(title): return list(books.title[books.title.str.lower().str.contains(title) == True].values) title = "business" l = get_name_from_partial(title) list(enumerate(l)) get_recommendations(l[1]) ``` ## Popularity and Ratings <a id="8"></a> <br> One thing that we notice about our recommendation system is that it recommends books regardless of ratings and popularity. It is true that ***Across the River and Into the Trees*** and ***The Old Man and the Sea*** were written by **Ernest Hemingway**, but the former one was cnosidered a bad (not the worst) book that shouldn't be recommended to anyone, since that most people hated the book for it's static plot and overwrought emotion. Therefore, we will add a mechanism to remove bad books and return books which are popular and have had a good critical response. I will take the top 30 movies based on similarity scores and calculate the vote of the 60th percentile book. Then, using this as the value of $m$, we will calculate the weighted rating of each book using IMDB's formula like we did in the Simple Recommender section. ``` def improved_recommendations(title, n=10): idx = indices[title] sim_scores = list(enumerate(cosine_sim[idx])) sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True) sim_scores = sim_scores[1:31] book_indices = [i[0] for i in sim_scores] df = books.iloc[book_indices][['title', 'ratings_count', 'average_rating', 'weighted_rating']] v = df['ratings_count'] m = df['ratings_count'].quantile(0.60) R = df['average_rating'] C = df['average_rating'].mean() df['weighted_rating'] = (R*v + C*m) / (v + m) qualified = df[df['ratings_count'] >= m] qualified = qualified.sort_values('weighted_rating', ascending=False) return qualified.head(n) improved_recommendations("The One Minute Manager") improved_recommendations(l[1]) ``` I think the sorting of similar is more better now than before. Therefore, we will conclude our Content Based Recommender section here and come back to it when we build a hybrid engine. # Collaborative Filtering <a id="9"></a> <br> ![](https://miro.medium.com/max/706/1*DYJ-HQnOVvmm5suNtqV3Jw.png) Our content based engine suffers from some severe limitations. It is only capable of suggesting books which are *close* to a certain book. That is, it is not capable of capturing tastes and providing recommendations across genres. Also, the engine that we built is not really personal in that it doesn't capture the personal tastes and biases of a user. Anyone querying our engine for recommendations based on a book will receive the same recommendations for that book, regardless of who s/he is. Therefore, in this section, we will use a technique called **Collaborative Filtering** to make recommendations to Book Readers. Collaborative Filtering is based on the idea that users similar to a me can be used to predict how much I will like a particular product or service those users have used/experienced but I have not. I will not be implementing Collaborative Filtering from scratch. Instead, I will use the **Surprise** library that used extremely powerful algorithms like **Singular Value Decomposition (SVD)** to minimise RMSE (Root Mean Square Error) and give great recommendations. There are two classes of Collaborative Filtering: ![](https://miro.medium.com/max/1280/1*QvhetbRjCr1vryTch_2HZQ.jpeg) - **User-based**, which measures the similarity between target users and other users. - **Item-based**, which measures the similarity between the items that target users rate or interact with and other items. ## - User Based <a id="10"></a> <br> ``` # ! pip install surprise from surprise import Reader, Dataset, SVD from surprise.model_selection import cross_validate reader = Reader() data = Dataset.load_from_df(new_ratings[['user_id', 'book_id', 'rating']], reader) svd = SVD() cross_validate(svd, data, measures=['RMSE', 'MAE']) ``` We get a mean **Root Mean Sqaure Error** of about 0.8419 which is more than good enough for our case. Let us now train on our dataset and arrive at predictions. ``` trainset = data.build_full_trainset() svd.fit(trainset); ``` Let us pick users 10 and check the ratings s/he has given. ``` new_ratings[new_ratings['user_id'] == 10] svd.predict(10, 1506) ``` For book with ID 1506, we get an estimated prediction of **3.393**. One startling feature of this recommender system is that it doesn't care what the book is (or what it contains). It works purely on the basis of an assigned book ID and tries to predict ratings based on how the other users have predicted the book. ## - Item Based <a id="11"></a> <br> Here we will build a table for users with their corresponding ratings for each book. ``` # bookmat = new_ratings.groupby(['user_id', 'title'])['rating'].mean().unstack() bookmat = new_ratings.pivot_table(index='user_id', columns='title', values='rating') bookmat.head() def get_similar(title, mat): title_user_ratings = mat[title] similar_to_title = mat.corrwith(title_user_ratings) corr_title = pd.DataFrame(similar_to_title, columns=['correlation']) corr_title.dropna(inplace=True) corr_title.sort_values('correlation', ascending=False, inplace=True) return corr_title title = "Twilight (Twilight, #1)" smlr = get_similar(title, bookmat) smlr.head(10) ``` Ok, we got similar books, but we need to filter them by their *ratings_count*. ``` smlr = smlr.join(books.set_index('title')['ratings_count']) smlr.head() ``` Get similar books with at least 500k ratings. ``` smlr[smlr.ratings_count > 5e5].sort_values('correlation', ascending=False).head(10) ``` That's more interesting and reasonable result, since we could get *Twilight* book series in our top results. # Hybrid Recommender <a id="12"></a> <br> ![](https://www.toonpool.com/user/250/files/hybrid_20095.jpg) In this section, I will try to build a simple hybrid recommender that brings together techniques we have implemented in the content based and collaborative filter based engines. This is how it will work: * **Input:** User ID and the Title of a Book * **Output:** Similar books sorted on the basis of expected ratings by that particular user. ``` def hybrid(user_id, title, n=10): idx = indices[title] sim_scores = list(enumerate(cosine_sim[idx])) sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True) sim_scores = sim_scores[1:51] book_indices = [i[0] for i in sim_scores] df = books.iloc[book_indices][['book_id', 'title', 'original_publication_year', 'ratings_count', 'average_rating']] df['est'] = df['book_id'].apply(lambda x: svd.predict(user_id, x).est) df = df.sort_values('est', ascending=False) return df.head(n) hybrid(4, 'Eat, Pray, Love') hybrid(10, 'Eat, Pray, Love') ``` We see that for our hybrid recommender, we get (almost) different recommendations for different users although the book is the same. But maybe we can make it better through following steps: 1. Use our *improved_recommendations* technique , that we used in the **Content Based** seciton above 2. Combine it with the user *estimations*, by dividing their summation by 2 3. Finally, put the result into a new feature ***score*** ``` def improved_hybrid(user_id, title, n=10): idx = indices[title] sim_scores = list(enumerate(cosine_sim[idx])) sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True) sim_scores = sim_scores[1:51] book_indices = [i[0] for i in sim_scores] df = books.iloc[book_indices][['book_id', 'title', 'ratings_count', 'average_rating', 'original_publication_year']] v = df['ratings_count'] m = df['ratings_count'].quantile(0.60) R = df['average_rating'] C = df['average_rating'].mean() df['weighted_rating'] = (R*v + C*m) / (v + m) df['est'] = df['book_id'].apply(lambda x: svd.predict(user_id, x).est) df['score'] = (df['est'] + df['weighted_rating']) / 2 df = df.sort_values('score', ascending=False) return df[['book_id', 'title', 'original_publication_year', 'ratings_count', 'average_rating', 'score']].head(n) improved_hybrid(4, 'Eat, Pray, Love') improved_hybrid(10, 'Eat, Pray, Love') ``` Ok, we see that the new results make more sense, besides to, the recommendations are more personalized and tailored towards particular users. # Conclusion <a id="13"></a> <br> In this notebook, I have built 4 different recommendation engines based on different ideas and algorithms. They are as follows: 1. **Simple Recommender:** This system used overall Goodreads Ratings Count and Rating Averages to build Top Books Charts, in general and for a specific genre. The IMDB Weighted Rating System was used to calculate ratings on which the sorting was finally performed. 2. **Content Based Recommender:** We built content based engines that took book title, authors and genres as input to come up with predictions. We also deviced a simple filter to give greater preference to books with more votes and higher ratings. 3. **Collaborative Filtering:** We built two Collaborative Filters; - one that uses the powerful Surprise Library to build an **user-based** filter based on single value decomposition, since the RMSE obtained was less than 1, and the engine gave estimated ratings for a given user and book. - And the other (**item-based**) which built a pivot table for users ratings corresponding to each book, and the engine gave similar books for a given book. 4. **Hybrid Engine:** We brought together ideas from content and collaborative filterting to build an engine that gave book suggestions to a particular user based on the estimated ratings that it had internally calculated for that user. Previous -> [The Story of Book](https://www.kaggle.com/omarzaghlol/goodreads-1-the-story-of-book/)
github_jupyter
``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt import pandas as pd from scipy.stats import gaussian_kde, chi2, pearsonr SMALL_SIZE = 16 MEDIUM_SIZE = 18 BIGGER_SIZE = 20 plt.rc('font', size=SMALL_SIZE) # controls default text sizes plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title SEED = 35010732 # from random.org np.random.seed(SEED) print(plt.style.available) plt.style.use('seaborn-white') cor1000 = pd.read_csv("correlations1kbig.csv") cor10k = pd.read_csv("correlations10kbig.csv") cor1000 corr1000_avg = cor1000.groupby('rho').mean().reset_index() corr1000_std = cor1000.groupby('rho').std().reset_index() corr1000_avg plt.figure(figsize=(5,5)) rho_theory = np.linspace(-0.95,0.95,100) c_theory = 2*np.abs(rho_theory)/(1-np.abs(rho_theory))*np.sign(rho_theory) plt.scatter(cor1000['rho'],cor1000['C']) plt.plot(rho_theory,c_theory) plt.axhline(y=0.0, color='r') plt.figure(figsize=(5,5)) rho_theory = np.linspace(-0.95,0.95,100) c_theory = 2*np.abs(rho_theory)/(1-np.abs(rho_theory))*np.sign(rho_theory) plt.errorbar(corr1000_avg['rho'],corr1000_avg['C'],yerr=corr1000_avg['dC'],fmt="o",color='k') plt.plot(rho_theory,c_theory,"k") plt.axhline(y=0.0, color='k') plt.xlabel(r'$\rho$') plt.ylabel("C") plt.savefig("corr.png",format='png',dpi=300,bbox_inches='tight',facecolor="white",backgroundcolor="white") for rho in corr1000_avg['rho']: data1000_rho = cor1000[cor1000['rho']==rho] print(rho,data1000_rho['A1'].mean(),data1000_rho['A1'].std(),data1000_rho['dA1'].mean()) print(rho,data1000_rho['A2'].mean(),data1000_rho['A2'].std(),data1000_rho['dA2'].mean()) data1000_05 = cor1000[cor1000['rho']==0.4999999999999997] data1000_05 plt.hist(data1000_05['A1'],bins=10,density=True) data1k05 = pd.read_csv('correlations1k05.csv') data1k05 plt.hist(data1k05['a2'],bins=30,density=True) print(data1k05['A1'].mean(),data1k05['A1'].std(),data1k05['dA1'].mean(),data1k05['dA1'].std()) print(data1k05['a1'].mean(),data1k05['a1'].std(),data1k05['da1'].mean(),data1k05['da1'].std()) print(data1k05['A2'].mean(),data1k05['A2'].std(),data1k05['dA2'].mean(),data1k05['dA2'].std()) print(data1k05['a2'].mean(),data1k05['a2'].std(),data1k05['da2'].mean(),data1k05['da2'].std()) plt.figure(facecolor="white") xs = np.linspace(0.25,2,200) densityA1 = gaussian_kde(data1k05['A1']) densityA2 = gaussian_kde(data1k05['A2']) densitya1 = gaussian_kde(data1k05['a1']) densitya2 = gaussian_kde(data1k05['a2']) plt.plot(xs,densityA1(xs),"k-",label=r"$A_{1}$ MCMC") plt.plot(xs,densitya1(xs),"k:",label=r"$A_{1}$ ML") plt.axvline(x=1.0,color="k") plt.legend() plt.xlabel(r"$A_1$") plt.ylabel(r"$p(A_{1})$") plt.savefig("A1kde05.png",format="png",dpi=300,bbox_inches='tight',facecolor="white",backgroundcolor="white") plt.figure(facecolor="white") xs = np.linspace(0.25,0.5,200) densityA2 = gaussian_kde(data1k05['A2']) densitya2 = gaussian_kde(data1k05['a2']) plt.plot(xs,densityA2(xs),"k-",label=r"$A_{2}$ MCMC") plt.plot(xs,densitya2(xs),"k:",label=r"$A_{2}$ ML") plt.axvline(x=0.3333,color="k") plt.legend() plt.xlabel(r"$A_2$") plt.ylabel(r"$p(A_{2})$") plt.savefig("A2kde05.png",format="png",dpi=300,bbox_inches='tight',facecolor="white",backgroundcolor="white") data1k025 = pd.read_csv('correlations1k025.csv') data1k025 plt.hist(data1k025['a2'],bins=30,density=True) print(data1k025['A1'].mean(),data1k025['A1'].std(),data1k025['dA1'].mean(),data1k025['dA1'].std()) print(data1k025['a1'].mean(),data1k025['a1'].std(),data1k025['da1'].mean(),data1k025['da1'].std()) print(data1k025['A2'].mean(),data1k025['A2'].std(),data1k025['dA2'].mean(),data1k025['dA2'].std()) print(data1k025['a2'].mean(),data1k025['a2'].std(),data1k025['da2'].mean(),data1k025['da2'].std()) plt.figure(facecolor="white") xs = np.linspace(0.25,2,200) densityA1 = gaussian_kde(data1k025['A1']) densityA2 = gaussian_kde(data1k025['A2']) densitya1 = gaussian_kde(data1k025['a1']) densitya2 = gaussian_kde(data1k025['a2']) plt.plot(xs,densityA1(xs),"k-",label=r"$A_{1}$ MCMC") plt.plot(xs,densitya1(xs),"k:",label=r"$A_{1}$ ML") plt.axvline(x=1.0,color="k") plt.legend() plt.xlabel(r"$A_1$") plt.ylabel(r"$p(A_{1})$") plt.savefig("A1kde025.png",format="png",dpi=300,bbox_inches='tight',facecolor="white",backgroundcolor="white") plt.figure(facecolor="white") xs = np.linspace(0.35,1,200) densityA2 = gaussian_kde(data1k025['A2']) densitya2 = gaussian_kde(data1k025['a2']) plt.plot(xs,densityA2(xs),"k-",label=r"$A_{2}$ MCMC") plt.plot(xs,densitya2(xs),"k:",label=r"$A_{2}$ ML") plt.axvline(x=0.6,color="k") plt.legend() plt.xlabel(r"$A_2$") plt.ylabel(r"$p(A_{2})$") plt.savefig("A2kde025.png",format="png",dpi=300,bbox_inches='tight',facecolor="white",backgroundcolor="white") plt.figure(facecolor="white") plt.scatter(data1k05['D'],data1k05['d']) plt.xlabel(r"$A_1$ MCMC") plt.ylabel(r"$A_{1}$ ML") plt.savefig("A1corrkde025.png",format="png",dpi=300,bbox_inches='tight',facecolor="white",backgroundcolor="white") print(pearsonr(data1k025['A1'],data1k025['a1'])) print(pearsonr(data1k025['A2'],data1k025['a2'])) print(pearsonr(data1k025['D'],data1k025['d'])) p1 = np.polyfit(data1k05['dA1'],data1k05['da1'],1) print(p1) print("factor of underestimation: ",1/p1[0]) dA1 = np.linspace(0.09,0.4,200) da1 = p1[0]*dA1 + p1[1] plt.figure(facecolor="white") plt.scatter(data1k05['dA1'],data1k05['da1'],color="k") plt.plot(dA1,da1,"k:") plt.xlabel(r"$dA_1$ MCMC") plt.ylabel(r"$dA_{1}$ ML") plt.savefig("dA1corrkde05.png",format="png",dpi=300,bbox_inches='tight',facecolor="white",backgroundcolor="white") p2 = np.polyfit(data1k05['dA2'],data1k05['da2'],1) print(p2) print("factor of underestimation: ",1/p2[0]) dA2 = np.linspace(0.03,0.15,200) da2 = p2[0]*dA2 + p2[1] plt.figure(facecolor="white") plt.scatter(data1k05['dA2'],data1k05['da2'],color="k") plt.plot(dA2,da2,"k:") plt.xlabel(r"$dA_2$ MCMC") plt.ylabel(r"$dA_{2}$ ML") plt.savefig("dA2corrkde05.png",format="png",dpi=300,bbox_inches='tight',facecolor="white",backgroundcolor="white") p1 = np.polyfit(data1k025['dA1'],data1k025['da1'],1) print(p1) p1 = np.polyfit(data1k05['dA1'],data1k05['da1'],1) print(p1) print("factor of underestimation: ",1/p1[0]) dA1 = np.linspace(0.05,0.4,200) da1 = p1[0]*dA1 + p1[1] plt.figure(facecolor="white") plt.scatter(data1k05['dA1'],data1k05['da1'],color="k") plt.plot(dA1,da1,"k:") plt.xlabel(r"$dA_1$ MCMC") plt.ylabel(r"$dA_{1}$ ML") plt.savefig("dA1corrkde05.png",format="png",dpi=300,bbox_inches='tight',facecolor="white",backgroundcolor="white") p2 = np.polyfit(data1k05['dA2'],data1k05['da2'],1) print(p2) print("factor of underestimation: ",1/p2[0]) dA2 = np.linspace(0.015,0.05,200) da2 = p2[0]*dA2 + p2[1] plt.figure(facecolor="white") plt.scatter(data1k05['dA2'],data1k05['da2'],color="k") plt.plot(dA2,da2,"k:") plt.xlabel(r"$dA_2$ MCMC") plt.ylabel(r"$dA_{2}$ ML") plt.savefig("dA2corrkde05.png",format="png",dpi=300,bbox_inches='tight',facecolor="white",backgroundcolor="white") ```
github_jupyter
# WGAN 元論文 : Wasserstein GAN https://arxiv.org/abs/1701.07875 (2017) WGANはGANのLossを変えることで、数学的に画像生成の学習を良くしよう!っていうもの。 通常のGANはKLDivergenceを使って、Generatorによる確率分布を、生成したい画像の生起分布に近づけていく。だが、KLDでは連続性が保証されないので、代わりにWasserstain距離を用いて、近似していこうというのがWGAN。 Wasserstain距離によるLossを実現するために、WGANのDiscriminatorでは最後にSigmoid関数を適用しない。つまり、LossもSigmoid Cross Entropyでなく、Discriminatorの出力の値をそのまま使う。 WGANのアルゴリズムは、イテレーション毎に以下のDiscriminatorとGeneratorの学習を交互に行っていく。 - 最適化 : RMSProp(LearningRate:0.0005) #### Discriminatorの学習(以下操作をcriticの数値だけ繰り返す) 1. Real画像と、一様分布からzをサンプリングする 2. Loss $L_D = \frac{1}{|Minibatch|} \{ \sum_{i} D(x^{(i)}) - \sum_i D (G(z^{(i)})) \}$ を計算し、SGD 3. Discriminatorのパラメータを全て、 [- clip, clip] にクリッピングする #### Generatorの学習 1. 一様分布からzをサンプリングする 2. Loss $L_G = \frac{1}{|Minibatch|} \sum_i D (G(z^{(i)})) $ を計算し、SGD (WGANは収束がすごく遅い、、学習回数がめちゃくちゃ必要なので、注意!!!!) ## Import and Config ``` import torch import torch.nn.functional as F import torchvision import numpy as np from collections import OrderedDict from easydict import EasyDict import argparse import os import matplotlib.pyplot as plt import pandas as pd from _main_base import * #--- # config #--- cfg = EasyDict() # class cfg.CLASS_LABEL = ['akahara', 'madara'] # list, dict('label' : '[B, G, R]') cfg.CLASS_NUM = len(cfg.CLASS_LABEL) # model cfg.INPUT_Z_DIM = 128 cfg.INPUT_MODE = None cfg.OUTPUT_HEIGHT = 32 cfg.OUTPUT_WIDTH = 32 cfg.OUTPUT_CHANNEL = 3 cfg.OUTPUT_MODE = 'RGB' # RGB, GRAY, EDGE, CLASS_LABEL cfg.G_DIM = 64 cfg.D_DIM = 64 cfg.CHANNEL_AXIS = 1 # 1 ... [mb, c, h, w], 3 ... [mb, h, w, c] cfg.GPU = False cfg.DEVICE = torch.device('cuda' if cfg.GPU and torch.cuda.is_available() else 'cpu') # train cfg.TRAIN = EasyDict() cfg.TRAIN.DISPAY_ITERATION_INTERVAL = 50 cfg.PREFIX = 'WGAN' cfg.TRAIN.MODEL_G_SAVE_PATH = 'models/' + cfg.PREFIX + '_G_{}.pt' cfg.TRAIN.MODEL_D_SAVE_PATH = 'models/' + cfg.PREFIX + '_D_{}.pt' cfg.TRAIN.MODEL_SAVE_INTERVAL = 200 cfg.TRAIN.ITERATION = 5000 cfg.TRAIN.MINIBATCH = 32 cfg.TRAIN.OPTIMIZER_G = torch.optim.Adam cfg.TRAIN.LEARNING_PARAMS_G = {'lr' : 0.0002, 'betas' : (0.5, 0.9)} cfg.TRAIN.OPTIMIZER_D = torch.optim.Adam cfg.TRAIN.LEARNING_PARAMS_D = {'lr' : 0.0002, 'betas' : (0.5, 0.9)} cfg.TRAIN.LOSS_FUNCTION = None cfg.TRAIN.DATA_PATH = './data/' cfg.TRAIN.DATA_HORIZONTAL_FLIP = False # data augmentation : holizontal flip cfg.TRAIN.DATA_VERTICAL_FLIP = False # data augmentation : vertical flip cfg.TRAIN.DATA_ROTATION = False # data augmentation : rotation False, or integer cfg.TRAIN.LEARNING_PROCESS_RESULT_SAVE = True cfg.TRAIN.LEARNING_PROCESS_RESULT_INTERVAL = 500 cfg.TRAIN.LEARNING_PROCESS_RESULT_IMAGE_PATH = 'result/' + cfg.PREFIX + '_result_{}.jpg' cfg.TRAIN.LEARNING_PROCESS_RESULT_LOSS_PATH = 'result/' + cfg.PREFIX + '_loss.txt' #--- # WGAN config #--- cfg.TRAIN.WGAN_CLIPS_VALUE = 0.01 cfg.TRAIN.WGAN_CRITIC_N = 5 # test cfg.TEST = EasyDict() cfg.TEST.MODEL_G_PATH = cfg.TRAIN.MODEL_G_SAVE_PATH.format('final') cfg.TEST.DATA_PATH = './data' cfg.TEST.MINIBATCH = 10 cfg.TEST.ITERATION = 2 cfg.TEST.RESULT_SAVE = False cfg.TEST.RESULT_IMAGE_PATH = 'result/' + cfg.PREFIX + '_result_{}.jpg' # random seed torch.manual_seed(0) # make model save directory def make_dir(path): if '/' in path: model_save_dir = '/'.join(path.split('/')[:-1]) os.makedirs(model_save_dir, exist_ok=True) make_dir(cfg.TRAIN.MODEL_G_SAVE_PATH) make_dir(cfg.TRAIN.MODEL_D_SAVE_PATH) make_dir(cfg.TRAIN.LEARNING_PROCESS_RESULT_IMAGE_PATH) make_dir(cfg.TRAIN.LEARNING_PROCESS_RESULT_LOSS_PATH) ``` ## Define Model ``` class Generator(torch.nn.Module): def __init__(self): super(Generator, self).__init__() self.module = torch.nn.Sequential(OrderedDict({ 'G_layer_1' : torch.nn.ConvTranspose2d(cfg.INPUT_Z_DIM, cfg.G_DIM * 4, kernel_size=[cfg.OUTPUT_HEIGHT // 8, cfg.OUTPUT_WIDTH // 8], stride=1, bias=False), 'G_layer_1_bn' : torch.nn.BatchNorm2d(cfg.G_DIM * 4), 'G_layer_1_ReLU' : torch.nn.ReLU(), 'G_layer_2' : torch.nn.ConvTranspose2d(cfg.G_DIM * 4, cfg.G_DIM * 2, kernel_size=4, stride=2, padding=1, bias=False), 'G_layer_2_bn' : torch.nn.BatchNorm2d(cfg.G_DIM * 2), 'G_layer_2_ReLU' : torch.nn.ReLU(), 'G_layer_3' : torch.nn.ConvTranspose2d(cfg.G_DIM * 2, cfg.G_DIM, kernel_size=4, stride=2, padding=1, bias=False), 'G_layer_3_bn' : torch.nn.BatchNorm2d(cfg.G_DIM), 'G_layer_3_ReLU' : torch.nn.ReLU(), 'G_layer_out' : torch.nn.ConvTranspose2d(cfg.G_DIM, cfg.OUTPUT_CHANNEL, kernel_size=4, stride=2, padding=1, bias=False), 'G_layer_out_tanh' : torch.nn.Tanh() })) def forward(self, x): x = self.module(x) return x class Discriminator(torch.nn.Module): def __init__(self): super(Discriminator, self).__init__() self.module = torch.nn.Sequential(OrderedDict({ 'D_layer_1' : torch.nn.Conv2d(cfg.OUTPUT_CHANNEL, cfg.D_DIM, kernel_size=4, padding=1, stride=2, bias=False), 'D_layer_1_leakyReLU' : torch.nn.LeakyReLU(0.2, inplace=True), 'D_layer_2' : torch.nn.Conv2d(cfg.D_DIM, cfg.D_DIM * 2, kernel_size=4, padding=1, stride=2, bias=False), 'D_layer_2_bn' : torch.nn.BatchNorm2d(cfg.D_DIM * 2), 'D_layer_2_leakyReLU' : torch.nn.LeakyReLU(0.2, inplace=True), 'D_layer_3' : torch.nn.Conv2d(cfg.D_DIM * 2, cfg.D_DIM * 4, kernel_size=4, padding=1, stride=2, bias=False), 'G_layer_3_bn' : torch.nn.BatchNorm2d(cfg.D_DIM * 4), 'D_layer_3_leakyReLU' : torch.nn.LeakyReLU(0.2, inplace=True), 'D_layer_out' : torch.nn.Conv2d(cfg.D_DIM * 4, 1, kernel_size=[cfg.OUTPUT_HEIGHT // 8, cfg.OUTPUT_WIDTH // 8], padding=0, stride=1, bias=False), })) def forward(self, x): x = self.module(x) return x ``` ## Train ``` def result_show(G, z, path=None, save=False, show=False): if (save or show) is False: print('argument save >> {} and show >> {}, so skip') return Gz = G(z) Gz = Gz.detach().cpu().numpy() Gz = (Gz * 127.5 + 127.5).astype(np.uint8) Gz = Gz.reshape([-1, cfg.OUTPUT_CHANNEL, cfg.OUTPUT_HEIGHT, cfg.OUTPUT_WIDTH]) Gz = Gz.transpose(0, 2, 3, 1) for i in range(cfg.TEST.MINIBATCH): _G = Gz[i] plt.subplot(1, cfg.TEST.MINIBATCH, i + 1) plt.imshow(_G) plt.axis('off') if path is not None: plt.savefig(path) print('result was saved to >> {}'.format(path)) if show: plt.show() # train def train(): # model G = Generator().to(cfg.DEVICE) D = Discriminator().to(cfg.DEVICE) opt_G = cfg.TRAIN.OPTIMIZER_G(G.parameters(), **cfg.TRAIN.LEARNING_PARAMS_G) opt_D = cfg.TRAIN.OPTIMIZER_D(D.parameters(), **cfg.TRAIN.LEARNING_PARAMS_D) #path_dict = data_load(cfg) #paths = path_dict['paths'] #paths_gt = path_dict['paths_gt'] trainset = torchvision.datasets.CIFAR10(root=cfg.TRAIN.DATA_PATH , train=True, download=True, transform=None) train_Xs = trainset.data train_ys = trainset.targets # training mbi = 0 train_N = len(train_Xs) train_ind = np.arange(train_N) np.random.seed(0) np.random.shuffle(train_ind) list_iter = [] list_loss_G = [] list_loss_D = [] list_loss_D_real = [] list_loss_D_fake = [] list_loss_WDistance = [] one = torch.FloatTensor([1]) minus_one = one * -1 print('training start') progres_bar = '' for i in range(cfg.TRAIN.ITERATION): if mbi + cfg.TRAIN.MINIBATCH > train_N: mb_ind = train_ind[mbi:] np.random.shuffle(train_ind) mb_ind = np.hstack((mb_ind, train_ind[ : (cfg.TRAIN.MINIBATCH - (train_N - mbi))])) mbi = cfg.TRAIN.MINIBATCH - (train_N - mbi) else: mb_ind = train_ind[mbi : mbi + cfg.TRAIN.MINIBATCH] mbi += cfg.TRAIN.MINIBATCH # update D for _ in range(cfg.TRAIN.WGAN_CRITIC_N): opt_D.zero_grad() # parameter clipping > [-clip_value, clip_value] for param in D.parameters(): param.data.clamp_(- cfg.TRAIN.WGAN_CLIPS_VALUE, cfg.TRAIN.WGAN_CLIPS_VALUE) # sample X Xs = torch.tensor(preprocess(train_Xs[mb_ind], cfg, cfg.OUTPUT_MODE), dtype=torch.float).to(cfg.DEVICE) # sample x z = np.random.uniform(-1, 1, size=(cfg.TRAIN.MINIBATCH, cfg.INPUT_Z_DIM, 1, 1)) z = torch.tensor(z, dtype=torch.float).to(cfg.DEVICE) # forward Gz = G(z) loss_D_fake = D(Gz).mean(0).view(1) loss_D_real = D(Xs).mean(0).view(1) loss_D = loss_D_fake - loss_D_real loss_D_real.backward(one) loss_D_fake.backward(minus_one) opt_D.step() Wasserstein_distance = loss_D_real - loss_D_fake # update G opt_G.zero_grad() z = np.random.uniform(-1, 1, size=(cfg.TRAIN.MINIBATCH, cfg.INPUT_Z_DIM, 1, 1)) z = torch.tensor(z, dtype=torch.float).to(cfg.DEVICE) loss_G = D(G(z)).mean(0).view(1) loss_G.backward(one) opt_G.step() progres_bar += '|' print('\r' + progres_bar, end='') _loss_G = loss_G.item() _loss_D = loss_D.item() _loss_D_real = loss_D_real.item() _loss_D_fake = loss_D_fake.item() _Wasserstein_distance = Wasserstein_distance.item() if (i + 1) % 10 == 0: progres_bar += str(i + 1) print('\r' + progres_bar, end='') # save process result if cfg.TRAIN.LEARNING_PROCESS_RESULT_SAVE: list_iter.append(i + 1) list_loss_G.append(_loss_G) list_loss_D.append(_loss_D) list_loss_D_real.append(_loss_D_real) list_loss_D_fake.append(_loss_D_fake) list_loss_WDistance.append(_Wasserstein_distance) # display training state if (i + 1) % cfg.TRAIN.DISPAY_ITERATION_INTERVAL == 0: print('\r' + ' ' * len(progres_bar), end='') print('\rIter:{}, LossG (fake:{:.4f}), LossD:{:.4f} (real:{:.4f}, fake:{:.4f}), WDistance:{:.4f}'.format( i + 1, _loss_G, _loss_D, _loss_D_real, _loss_D_fake, _Wasserstein_distance)) progres_bar = '' # save parameters if (cfg.TRAIN.MODEL_SAVE_INTERVAL != False) and ((i + 1) % cfg.TRAIN.MODEL_SAVE_INTERVAL == 0): G_save_path = cfg.TRAIN.MODEL_G_SAVE_PATH.format('iter{}'.format(i + 1)) D_save_path = cfg.TRAIN.MODEL_D_SAVE_PATH.format('iter{}'.format(i + 1)) torch.save(G.state_dict(), G_save_path) torch.save(D.state_dict(), D_save_path) print('save G >> {}, D >> {}'.format(G_save_path, D_save_path)) # save process result if cfg.TRAIN.LEARNING_PROCESS_RESULT_SAVE and ((i + 1) % cfg.TRAIN.LEARNING_PROCESS_RESULT_INTERVAL == 0): result_show( G, z, cfg.TRAIN.LEARNING_PROCESS_RESULT_IMAGE_PATH.format('iter' + str(i + 1)), save=cfg.TRAIN.LEARNING_PROCESS_RESULT_SAVE, show=True) G_save_path = cfg.TRAIN.MODEL_G_SAVE_PATH.format('final') D_save_path = cfg.TRAIN.MODEL_D_SAVE_PATH.format('final') torch.save(G.state_dict(), G_save_path) torch.save(D.state_dict(), D_save_path) print('final paramters were saved to G >> {}, D >> {}'.format(G_save_path, D_save_path)) if cfg.TRAIN.LEARNING_PROCESS_RESULT_SAVE: f = open(cfg.TRAIN.LEARNING_PROCESS_RESULT_LOSS_PATH, 'w') df = pd.DataFrame({'iteration' : list_iter, 'loss_G' : list_loss_G, 'loss_D' : list_loss_D, 'loss_D_real' : list_loss_D_real, 'loss_D_fake' : list_loss_D_fake, 'Wasserstein_Distance' : list_loss_WDistance}) df.to_csv(cfg.TRAIN.LEARNING_PROCESS_RESULT_LOSS_PATH, index=False) print('loss was saved to >> {}'.format(cfg.TRAIN.LEARNING_PROCESS_RESULT_LOSS_PATH)) train() ``` ## Test ``` # test def test(): print('-' * 20) print('test function') print('-' * 20) G = Generator().to(cfg.DEVICE) G.load_state_dict(torch.load(cfg.TEST.MODEL_G_PATH, map_location=torch.device(cfg.DEVICE))) G.eval() np.random.seed(0) for i in range(cfg.TEST.ITERATION): z = np.random.uniform(-1, 1, size=(cfg.TEST.MINIBATCH, cfg.INPUT_Z_DIM, 1, 1)) z = torch.tensor(z, dtype=torch.float).to(cfg.DEVICE) result_show(G, z, cfg.TEST.RESULT_IMAGE_PATH.format(i + 1), save=cfg.TEST.RESULT_SAVE, show=True) test() def arg_parse(): parser = argparse.ArgumentParser(description='CNN implemented with Keras') parser.add_argument('--train', dest='train', action='store_true') parser.add_argument('--test', dest='test', action='store_true') args = parser.parse_args() return args # main if __name__ == '__main__': args = arg_parse() if args.train: train() if args.test: test() if not (args.train or args.test): print("please select train or test flag") print("train: python main.py --train") print("test: python main.py --test") print("both: python main.py --train --test") ```
github_jupyter
``` #hide # default_exp script ``` # Script - command line interfaces > A fast way to turn your python function into a script. Part of [fast.ai](https://www.fast.ai)'s toolkit for delightful developer experiences. ## Overview Sometimes, you want to create a quick script, either for yourself, or for others. But in Python, that involves a whole lot of boilerplate and ceremony, especially if you want to support command line arguments, provide help, and other niceties. You can use [argparse](https://docs.python.org/3/library/argparse.html) for this purpose, which comes with Python, but it's complex and verbose. `fastcore.script` makes life easier. There are much fancier modules to help you write scripts (we recommend [Python Fire](https://github.com/google/python-fire), and [Click](https://click.palletsprojects.com/en/7.x/) is also popular), but fastcore.script is very fast and very simple. In fact, it's <50 lines of code! Basically, it's just a little wrapper around `argparse` that uses modern Python features and some thoughtful defaults to get rid of the boilerplate. For full details, see the [docs](https://fastcore.script.fast.ai) for `core`. ## Example Here's a complete example (available in `examples/test_fastcore.py`): ```python from fastcore.script import * @call_parse def main(msg:Param("The message", str), upper:Param("Convert to uppercase?", store_true)): "Print `msg`, optionally converting to uppercase" print(msg.upper() if upper else msg) ```` If you copy that info a file and run it, you'll see: ``` $ examples/test_fastcore.py --help usage: test_fastcore.py [-h] [--upper] [--pdb PDB] [--xtra XTRA] msg Print `msg`, optionally converting to uppercase positional arguments: msg The message optional arguments: -h, --help show this help message and exit --upper Convert to uppercase? (default: False) --pdb PDB Run in pdb debugger (default: False) --xtra XTRA Parse for additional args (default: '') ``` As you see, we didn't need any `if __name__ == "__main__"`, we didn't have to parse arguments, we just wrote a function, added a decorator to it, and added some annotations to our function's parameters. As a bonus, we can also use this function directly from a REPL such as Jupyter Notebook - it's not just for command line scripts! ## Param annotations Each parameter in your function should have an annotation `Param(...)` (as in the example above). You can pass the following when calling `Param`: `help`,`type`,`opt`,`action`,`nargs`,`const`,`choices`,`required` . Except for `opt`, all of these are just passed directly to `argparse`, so you have all the power of that module at your disposal. Generally you'll want to pass at least `help` (since this is provided as the help string for that parameter) and `type` (to ensure that you get the type of data you expect). `opt` is a bool that defines whether a param is optional or required (positional) - but you'll generally not need to set this manually, because fastcore.script will set it for you automatically based on *default* values. You should provide a default (after the `=`) for any *optional* parameters. If you don't provide a default for a parameter, then it will be a *positional* parameter. ## setuptools scripts There's a really nice feature of pip/setuptools that lets you create commandline scripts directly from functions, makes them available in the `PATH`, and even makes your scripts cross-platform (e.g. in Windows it creates an exe). fastcore.script supports this feature too. The trick to making a function available as a script is to add a `console_scripts` section to your setup file, of the form: `script_name=module:function_name`. E.g. in this case we use: `test_fastcore.script=fastcore.script.test_cli:main`. With this, you can then just type `test_fastcore.script` at any time, from any directory, and your script will be called (once it's installed using one of the methods below). You don't actually have to write a `setup.py` yourself. Instead, just use [nbdev](https://nbdev.fast.ai). Then modify `settings.ini` as appropriate for your module/script. To install your script directly, you can type `pip install -e .`. Your script, when installed this way (it's called an [editable install](http://codumentary.blogspot.com/2014/11/python-tip-of-year-pip-install-editable.html), will automatically be up to date even if you edit it - there's no need to reinstall it after editing. With nbdev you can even make your module and script available for installation directly from pip and conda by running `make release`. ## API details ``` from fastcore.test import * #export import inspect,functools,argparse,shutil from fastcore.imports import * from fastcore.utils import * #export def store_true(): "Placeholder to pass to `Param` for `store_true` action" pass #export def store_false(): "Placeholder to pass to `Param` for `store_false` action" pass #export def bool_arg(v): "Use as `type` for `Param` to get `bool` behavior" return str2bool(v) #export def clean_type_str(x:str): x = str(x) x = re.sub("(enum |class|function|__main__\.|\ at.*)", '', x) x = re.sub("(<|>|'|\ )", '', x) # spl characters return x class Test: pass test_eq(clean_type_str(argparse.ArgumentParser), 'argparse.ArgumentParser') test_eq(clean_type_str(Test), 'Test') test_eq(clean_type_str(int), 'int') test_eq(clean_type_str(float), 'float') test_eq(clean_type_str(store_false), 'store_false') #export class Param: "A parameter in a function used in `anno_parser` or `call_parse`" def __init__(self, help=None, type=None, opt=True, action=None, nargs=None, const=None, choices=None, required=None, default=None): if type==store_true: type,action,default=None,'store_true' ,False if type==store_false: type,action,default=None,'store_false',True if type and isinstance(type,typing.Type) and issubclass(type,enum.Enum) and not choices: choices=list(type) store_attr() def set_default(self, d): if self.default is None: if d==inspect.Parameter.empty: self.opt = False else: self.default = d if self.default is not None: self.help += f" (default: {self.default})" @property def pre(self): return '--' if self.opt else '' @property def kwargs(self): return {k:v for k,v in self.__dict__.items() if v is not None and k!='opt' and k[0]!='_'} def __repr__(self): if not self.help and self.type is None: return "" if not self.help and self.type is not None: return f"{clean_type_str(self.type)}" if self.help and self.type is None: return f"<{self.help}>" if self.help and self.type is not None: return f"{clean_type_str(self.type)} <{self.help}>" test_eq(repr(Param("Help goes here")), '<Help goes here>') test_eq(repr(Param("Help", int)), 'int <Help>') test_eq(repr(Param(help=None, type=int)), 'int') test_eq(repr(Param(help=None, type=None)), '') ``` Each parameter in your function should have an annotation `Param(...)`. You can pass the following when calling `Param`: `help`,`type`,`opt`,`action`,`nargs`,`const`,`choices`,`required` (i.e. it takes the same parameters as `argparse.ArgumentParser.add_argument`, plus `opt`). Except for `opt`, all of these are just passed directly to `argparse`, so you have all the power of that module at your disposal. Generally you'll want to pass at least `help` (since this is provided as the help string for that parameter) and `type` (to ensure that you get the type of data you expect). `opt` is a bool that defines whether a param is optional or required (positional) - but you'll generally not need to set this manually, because fastcore.script will set it for you automatically based on *default* values. You should provide a default (after the `=`) for any *optional* parameters. If you don't provide a default for a parameter, then it will be a *positional* parameter. Param's `__repr__` also allows for more informative function annotation when looking up the function's doc using shift+tab. You see the type annotation (if there is one) and the accompanying help documentation with it. ``` def f(required:Param("Required param", int), a:Param("param 1", bool_arg), b:Param("param 2", str)="test"): "my docs" ... help(f) p = Param(help="help", type=int) p.set_default(1) test_eq(p.kwargs, {'help': 'help (default: 1)', 'type': int, 'default': 1}) #export def anno_parser(func, prog=None, from_name=False): "Look at params (annotated with `Param`) in func and return an `ArgumentParser`" cols = shutil.get_terminal_size((120,30))[0] fmtr = partial(argparse.HelpFormatter, max_help_position=cols//2, width=cols) p = argparse.ArgumentParser(description=func.__doc__, prog=prog, formatter_class=fmtr) for k,v in inspect.signature(func).parameters.items(): param = func.__annotations__.get(k, Param()) param.set_default(v.default) p.add_argument(f"{param.pre}{k}", **param.kwargs) p.add_argument(f"--pdb", help=argparse.SUPPRESS, action='store_true') p.add_argument(f"--xtra", help=argparse.SUPPRESS, type=str) return p ``` This converts a function with parameter annotations of type `Param` into an `argparse.ArgumentParser` object. Function arguments with a default provided are optional, and other arguments are positional. ``` _en = str_enum('_en', 'aa','bb','cc') def f(required:Param("Required param", int), a:Param("param 1", bool_arg), b:Param("param 2", str)="test", c:Param("param 3", _en)=_en.aa): "my docs" ... p = anno_parser(f, 'progname') p.print_help() #export def args_from_prog(func, prog): "Extract args from `prog`" if prog is None or '#' not in prog: return {} if '##' in prog: _,prog = prog.split('##', 1) progsp = prog.split("#") args = {progsp[i]:progsp[i+1] for i in range(0, len(progsp), 2)} for k,v in args.items(): t = func.__annotations__.get(k, Param()).type if t: args[k] = t(v) return args ``` Sometimes it's convenient to extract arguments from the actual name of the called program. `args_from_prog` will do this, assuming that names and values of the params are separated by a `#`. Optionally there can also be a prefix separated by `##` (double underscore). ``` exp = {'a': False, 'b': 'baa'} test_eq(args_from_prog(f, 'foo##a#0#b#baa'), exp) test_eq(args_from_prog(f, 'a#0#b#baa'), exp) #export SCRIPT_INFO = SimpleNamespace(func=None) #export def call_parse(func): "Decorator to create a simple CLI from `func` using `anno_parser`" mod = inspect.getmodule(inspect.currentframe().f_back) if not mod: return func @functools.wraps(func) def _f(*args, **kwargs): mod = inspect.getmodule(inspect.currentframe().f_back) if not mod: return func(*args, **kwargs) if not SCRIPT_INFO.func and mod.__name__=="__main__": SCRIPT_INFO.func = func.__name__ if len(sys.argv)>1 and sys.argv[1]=='': sys.argv.pop(1) p = anno_parser(func) args = p.parse_args().__dict__ xtra = otherwise(args.pop('xtra', ''), eq(1), p.prog) tfunc = trace(func) if args.pop('pdb', False) else func tfunc(**merge(args, args_from_prog(func, xtra))) if mod.__name__=="__main__": setattr(mod, func.__name__, _f) SCRIPT_INFO.func = func.__name__ return _f() else: return _f @call_parse def test_add(a:Param("param a", int), b:Param("param 1",int)): return a + b ``` `call_parse` decorated functions work as regular functions and also as command-line interface functions. ``` test_eq(test_add(1,2), 3) ``` This is the main way to use `fastcore.script`; decorate your function with `call_parse`, add `Param` annotations as shown above, and it can then be used as a script. ## Export - ``` #hide from nbdev.export import notebook2script notebook2script() ```
github_jupyter
# Image Processing Dense Array, JPEG, PNG > In this post, we will cover the basics of working with images in Matplotlib, OpenCV and Keras. - toc: true - badges: true - comments: true - categories: [Image Processing, Computer Vision] - image: images/freedom.png Images are dense matrixes, and have a certain numbers of rows and columns. They can have 1 (grey) or 3 (RGB) or 4 (RGB + alpha-transparency) channels. The dimension of the image matrix is ( height, width, channels). ``` import numpy as np import matplotlib.pyplot as plt import matplotlib.image as mpimg from PIL import Image import cv2 from sys import getsizeof import tensorflow as tf ``` # 1. Load image files (*.jpg, *.png, *.bmp, *.tif) - ~~using PIL~~ - using matplotlib: reads image as RGB - using cv2 : reads image as BRG - imread: reads a file from disk and decodes it - imsave: encodes a image and writes it to a file on disk ``` #using PIL #image = Image.open("images/freedom.png") #plt.show(image) ``` #### Load image using Matplotlib The Matplotlib image tutorial recommends using matplotlib.image.imread to read image formats from disk. This function will automatically change image array values to floats between zero and one, and it doesn't give any other options about how to read the image. - imshow works on 0-1 floats & 0-255 uint8 values - It doesn't work on int! ``` #using matplotlib.image image = mpimg.imread("images/freedom.png") plt.imshow(image) plt.colorbar() print(image.dtype) freedom_array_uint8 = (image*255).astype(np.uint8) #convert to 0-255 values ``` #### Load image using OpenCV ``` #using opencv image = cv2.imread("images/freedom.png") #OpenCV uses BGR as its default colour order for images, matplotlib uses RGB RGB_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)# cv2.cvtColor() method is used to convert an image from one color space to another plt.imshow(RGB_image) plt.colorbar() ``` For this image, the matrix will have 600 x 400 x 3 = 720,000 values. Each value is an unsigned 8-bit integer, in total 720,000 bytes. Using unsigned 8-bit integers (256 possible values) for each value in the image array is enough for displaying images to humans. But when working with image data, it isn't uncommon to switch to 32-bit floats, for example. This increases tremendously the size of the data. By loading the image files we can save them as arrays. Typical array operations can be performed on them. ``` print (RGB_image.shape, RGB_image.dtype) ``` ### Load image using keras.preprocessing.image - load_img(image): loads and decodes image - img_to_array(image) ``` image_keras = tf.keras.preprocessing.image.load_img("images/freedom.png") # loads and decodes image print(type(image_keras)) print(image_keras.format) print(image_keras.mode) print(image_keras.size) #image_keras.show() ``` # 2. Image Processing #### Dense Array One way to store complete raster image data is by serializing a NumPy array to disk. image04npy = 720,128 bytes The file image04npy has 128 more bytes than the one required to store the array values. Those extra bytes specify things like the array shape/dimensions. ``` np.save("images/freedom.npy", RGB_image) freedomnpy = np.load('images/freedom.npy') print("Size of array:", freedomnpy.nbytes) print("Size of disk:", getsizeof(freedomnpy)) ``` Storing one pixels takes several bytes.There are two main options for saving images: whether to lose some information while saving, or not. #### JPG format - JPEG is lossy by deflaut - When saving an image as $*$.JPEG and read from it again, it is not necessary to get back the same values - The "image04_jpg.jpg" has 6.3 kB, less than the 7\% of $*$.npy file that generated it - cv2.IMWRITE_JPEG_QUALITY is between (0, 100), and allows to save losseless ``` cv2.imwrite("images/freedom_jpg.jpg", freedomnpy, [cv2.IMWRITE_JPEG_QUALITY, 0]) freedom_jpg = cv2.imread("images/freedom_jpg.jpg") plt.imshow(freedom_jpg) ``` #### PNG format - PNG is lossless - When saving an image as $*$.PNG and read from it again one gets the same value backs - cv2.IMWRITE_PNG_COMPRESSION is between (0, 1): bigger file, slower compression - freedom_png.png = 721.8 kB, close to freedomnpy ``` cv2.imwrite("images/freedom_png.png", freedomnpy, [cv2.IMWRITE_PNG_COMPRESSION, 0]) freedom_png = cv2.imread("images/freedom_png.png") plt.imshow(freedom_png) ``` References: <https://planspace.org/20170403-images_and_tfrecords/> <https://subscription.packtpub.com/book/application_development/9781788474443/1/ch01lvl1sec14/saving-images-using-lossy-and-lossless-compression> <https://www.tensorflow.org/tutorials/load_data/tfrecord> <https://machinelearningmastery.com/how-to-load-convert-and-save-images-with-the-keras-api/>
github_jupyter
Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/contrib/fairness/fairlearn-azureml-mitigation.png) # Unfairness Mitigation with Fairlearn and Azure Machine Learning **This notebook shows how to upload results from Fairlearn's GridSearch mitigation algorithm into a dashboard in Azure Machine Learning Studio** ## Table of Contents 1. [Introduction](#Introduction) 1. [Loading the Data](#LoadingData) 1. [Training an Unmitigated Model](#UnmitigatedModel) 1. [Mitigation with GridSearch](#Mitigation) 1. [Uploading a Fairness Dashboard to Azure](#AzureUpload) 1. Registering models 1. Computing Fairness Metrics 1. Uploading to Azure 1. [Conclusion](#Conclusion) <a id="Introduction"></a> ## Introduction This notebook shows how to use [Fairlearn (an open source fairness assessment and unfairness mitigation package)](http://fairlearn.github.io) and Azure Machine Learning Studio for a binary classification problem. This example uses the well-known adult census dataset. For the purposes of this notebook, we shall treat this as a loan decision problem. We will pretend that the label indicates whether or not each individual repaid a loan in the past. We will use the data to train a predictor to predict whether previously unseen individuals will repay a loan or not. The assumption is that the model predictions are used to decide whether an individual should be offered a loan. Its purpose is purely illustrative of a workflow including a fairness dashboard - in particular, we do **not** include a full discussion of the detailed issues which arise when considering fairness in machine learning. For such discussions, please [refer to the Fairlearn website](http://fairlearn.github.io/). We will apply the [grid search algorithm](https://fairlearn.github.io/master/api_reference/fairlearn.reductions.html#fairlearn.reductions.GridSearch) from the Fairlearn package using a specific notion of fairness called Demographic Parity. This produces a set of models, and we will view these in a dashboard both locally and in the Azure Machine Learning Studio. ### Setup To use this notebook, an Azure Machine Learning workspace is required. Please see the [configuration notebook](../../configuration.ipynb) for information about creating one, if required. This notebook also requires the following packages: * `azureml-contrib-fairness` * `fairlearn==0.4.6` (v0.5.0 will work with minor modifications) * `joblib` * `shap` Fairlearn relies on features introduced in v0.22.1 of `scikit-learn`. If you have an older version already installed, please uncomment and run the following cell: ``` # !pip install --upgrade scikit-learn>=0.22.1 ``` Finally, please ensure that when you downloaded this notebook, you also downloaded the `fairness_nb_utils.py` file from the same location, and placed it in the same directory as this notebook. <a id="LoadingData"></a> ## Loading the Data We use the well-known `adult` census dataset, which we will fetch from the OpenML website. We start with a fairly unremarkable set of imports: ``` from fairlearn.reductions import GridSearch, DemographicParity, ErrorRate from fairlearn.widget import FairlearnDashboard from sklearn.compose import ColumnTransformer from sklearn.datasets import fetch_openml from sklearn.impute import SimpleImputer from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, OneHotEncoder from sklearn.compose import make_column_selector as selector from sklearn.pipeline import Pipeline import pandas as pd ``` We can now load and inspect the data: ``` from fairness_nb_utils import fetch_openml_with_retries data = fetch_openml_with_retries(data_id=1590) # Extract the items we want X_raw = data.data y = (data.target == '>50K') * 1 X_raw["race"].value_counts().to_dict() ``` We are going to treat the sex and race of each individual as protected attributes, and in this particular case we are going to remove these attributes from the main data (this is not always the best option - see the [Fairlearn website](http://fairlearn.github.io/) for further discussion). Protected attributes are often denoted by 'A' in the literature, and we follow that convention here: ``` A = X_raw[['sex','race']] X_raw = X_raw.drop(labels=['sex', 'race'],axis = 1) ``` We now preprocess our data. To avoid the problem of data leakage, we split our data into training and test sets before performing any other transformations. Subsequent transformations (such as scalings) will be fit to the training data set, and then applied to the test dataset. ``` (X_train, X_test, y_train, y_test, A_train, A_test) = train_test_split( X_raw, y, A, test_size=0.3, random_state=12345, stratify=y ) # Ensure indices are aligned between X, y and A, # after all the slicing and splitting of DataFrames # and Series X_train = X_train.reset_index(drop=True) X_test = X_test.reset_index(drop=True) y_train = y_train.reset_index(drop=True) y_test = y_test.reset_index(drop=True) A_train = A_train.reset_index(drop=True) A_test = A_test.reset_index(drop=True) ``` We have two types of column in the dataset - categorical columns which will need to be one-hot encoded, and numeric ones which will need to be rescaled. We also need to take care of missing values. We use a simple approach here, but please bear in mind that this is another way that bias could be introduced (especially if one subgroup tends to have more missing values). For this preprocessing, we make use of `Pipeline` objects from `sklearn`: ``` numeric_transformer = Pipeline( steps=[ ("impute", SimpleImputer()), ("scaler", StandardScaler()), ] ) categorical_transformer = Pipeline( [ ("impute", SimpleImputer(strategy="most_frequent")), ("ohe", OneHotEncoder(handle_unknown="ignore", sparse=False)), ] ) preprocessor = ColumnTransformer( transformers=[ ("num", numeric_transformer, selector(dtype_exclude="category")), ("cat", categorical_transformer, selector(dtype_include="category")), ] ) ``` Now, the preprocessing pipeline is defined, we can run it on our training data, and apply the generated transform to our test data: ``` X_train = preprocessor.fit_transform(X_train) X_test = preprocessor.transform(X_test) ``` <a id="UnmitigatedModel"></a> ## Training an Unmitigated Model So we have a point of comparison, we first train a model (specifically, logistic regression from scikit-learn) on the raw data, without applying any mitigation algorithm: ``` unmitigated_predictor = LogisticRegression(solver='liblinear', fit_intercept=True) unmitigated_predictor.fit(X_train, y_train) ``` We can view this model in the fairness dashboard, and see the disparities which appear: ``` FairlearnDashboard(sensitive_features=A_test, sensitive_feature_names=['Sex', 'Race'], y_true=y_test, y_pred={"unmitigated": unmitigated_predictor.predict(X_test)}) ``` Looking at the disparity in accuracy when we select 'Sex' as the sensitive feature, we see that males have an error rate about three times greater than the females. More interesting is the disparity in opportunitiy - males are offered loans at three times the rate of females. Despite the fact that we removed the feature from the training data, our predictor still discriminates based on sex. This demonstrates that simply ignoring a protected attribute when fitting a predictor rarely eliminates unfairness. There will generally be enough other features correlated with the removed attribute to lead to disparate impact. <a id="Mitigation"></a> ## Mitigation with GridSearch The `GridSearch` class in `Fairlearn` implements a simplified version of the exponentiated gradient reduction of [Agarwal et al. 2018](https://arxiv.org/abs/1803.02453). The user supplies a standard ML estimator, which is treated as a blackbox - for this simple example, we shall use the logistic regression estimator from scikit-learn. `GridSearch` works by generating a sequence of relabellings and reweightings, and trains a predictor for each. For this example, we specify demographic parity (on the protected attribute of sex) as the fairness metric. Demographic parity requires that individuals are offered the opportunity (a loan in this example) independent of membership in the protected class (i.e., females and males should be offered loans at the same rate). *We are using this metric for the sake of simplicity* in this example; the appropriate fairness metric can only be selected after *careful examination of the broader context* in which the model is to be used. ``` sweep = GridSearch(LogisticRegression(solver='liblinear', fit_intercept=True), constraints=DemographicParity(), grid_size=71) ``` With our estimator created, we can fit it to the data. After `fit()` completes, we extract the full set of predictors from the `GridSearch` object. The following cell trains a many copies of the underlying estimator, and may take a minute or two to run: ``` sweep.fit(X_train, y_train, sensitive_features=A_train.sex) # For Fairlearn v0.5.0, need sweep.predictors_ predictors = sweep._predictors ``` We could load these predictors into the Fairness dashboard now. However, the plot would be somewhat confusing due to their number. In this case, we are going to remove the predictors which are dominated in the error-disparity space by others from the sweep (note that the disparity will only be calculated for the protected attribute; other potentially protected attributes will *not* be mitigated). In general, one might not want to do this, since there may be other considerations beyond the strict optimisation of error and disparity (of the given protected attribute). ``` errors, disparities = [], [] for m in predictors: classifier = lambda X: m.predict(X) error = ErrorRate() error.load_data(X_train, pd.Series(y_train), sensitive_features=A_train.sex) disparity = DemographicParity() disparity.load_data(X_train, pd.Series(y_train), sensitive_features=A_train.sex) errors.append(error.gamma(classifier)[0]) disparities.append(disparity.gamma(classifier).max()) all_results = pd.DataFrame( {"predictor": predictors, "error": errors, "disparity": disparities}) dominant_models_dict = dict() base_name_format = "census_gs_model_{0}" row_id = 0 for row in all_results.itertuples(): model_name = base_name_format.format(row_id) errors_for_lower_or_eq_disparity = all_results["error"][all_results["disparity"]<=row.disparity] if row.error <= errors_for_lower_or_eq_disparity.min(): dominant_models_dict[model_name] = row.predictor row_id = row_id + 1 ``` We can construct predictions for the dominant models (we include the unmitigated predictor as well, for comparison): ``` predictions_dominant = {"census_unmitigated": unmitigated_predictor.predict(X_test)} models_dominant = {"census_unmitigated": unmitigated_predictor} for name, predictor in dominant_models_dict.items(): value = predictor.predict(X_test) predictions_dominant[name] = value models_dominant[name] = predictor ``` These predictions may then be viewed in the fairness dashboard. We include the race column from the dataset, as an alternative basis for assessing the models. However, since we have not based our mitigation on it, the variation in the models with respect to race can be large. ``` FairlearnDashboard(sensitive_features=A_test, sensitive_feature_names=['Sex', 'Race'], y_true=y_test.tolist(), y_pred=predictions_dominant) ``` When using sex as the sensitive feature and accuracy as the metric, we see a Pareto front forming - the set of predictors which represent optimal tradeoffs between accuracy and disparity in predictions. In the ideal case, we would have a predictor at (1,0) - perfectly accurate and without any unfairness under demographic parity (with respect to the protected attribute "sex"). The Pareto front represents the closest we can come to this ideal based on our data and choice of estimator. Note the range of the axes - the disparity axis covers more values than the accuracy, so we can reduce disparity substantially for a small loss in accuracy. Finally, we also see that the unmitigated model is towards the top right of the plot, with high accuracy, but worst disparity. By clicking on individual models on the plot, we can inspect their metrics for disparity and accuracy in greater detail. In a real example, we would then pick the model which represented the best trade-off between accuracy and disparity given the relevant business constraints. <a id="AzureUpload"></a> ## Uploading a Fairness Dashboard to Azure Uploading a fairness dashboard to Azure is a two stage process. The `FairlearnDashboard` invoked in the previous section relies on the underlying Python kernel to compute metrics on demand. This is obviously not available when the fairness dashboard is rendered in AzureML Studio. By default, the dashboard in Azure Machine Learning Studio also requires the models to be registered. The required stages are therefore: 1. Register the dominant models 1. Precompute all the required metrics 1. Upload to Azure Before that, we need to connect to Azure Machine Learning Studio: ``` from azureml.core import Workspace, Experiment, Model ws = Workspace.from_config() ws.get_details() ``` <a id="RegisterModels"></a> ### Registering Models The fairness dashboard is designed to integrate with registered models, so we need to do this for the models we want in the Studio portal. The assumption is that the names of the models specified in the dashboard dictionary correspond to the `id`s (i.e. `<name>:<version>` pairs) of registered models in the workspace. We register each of the models in the `models_dominant` dictionary into the workspace. For this, we have to save each model to a file, and then register that file: ``` import joblib import os os.makedirs('models', exist_ok=True) def register_model(name, model): print("Registering ", name) model_path = "models/{0}.pkl".format(name) joblib.dump(value=model, filename=model_path) registered_model = Model.register(model_path=model_path, model_name=name, workspace=ws) print("Registered ", registered_model.id) return registered_model.id model_name_id_mapping = dict() for name, model in models_dominant.items(): m_id = register_model(name, model) model_name_id_mapping[name] = m_id ``` Now, produce new predictions dictionaries, with the updated names: ``` predictions_dominant_ids = dict() for name, y_pred in predictions_dominant.items(): predictions_dominant_ids[model_name_id_mapping[name]] = y_pred ``` <a id="PrecomputeMetrics"></a> ### Precomputing Metrics We create a _dashboard dictionary_ using Fairlearn's `metrics` package. The `_create_group_metric_set` method has arguments similar to the Dashboard constructor, except that the sensitive features are passed as a dictionary (to ensure that names are available), and we must specify the type of prediction. Note that we use the `predictions_dominant_ids` dictionary we just created: ``` sf = { 'sex': A_test.sex, 'race': A_test.race } from fairlearn.metrics._group_metric_set import _create_group_metric_set dash_dict = _create_group_metric_set(y_true=y_test, predictions=predictions_dominant_ids, sensitive_features=sf, prediction_type='binary_classification') ``` <a id="DashboardUpload"></a> ### Uploading the Dashboard Now, we import our `contrib` package which contains the routine to perform the upload: ``` from azureml.contrib.fairness import upload_dashboard_dictionary, download_dashboard_by_upload_id ``` Now we can create an Experiment, then a Run, and upload our dashboard to it: ``` exp = Experiment(ws, "Test_Fairlearn_GridSearch_Census_Demo") print(exp) run = exp.start_logging() try: dashboard_title = "Dominant Models from GridSearch" upload_id = upload_dashboard_dictionary(run, dash_dict, dashboard_name=dashboard_title) print("\nUploaded to id: {0}\n".format(upload_id)) downloaded_dict = download_dashboard_by_upload_id(run, upload_id) finally: run.complete() ``` The dashboard can be viewed in the Run Details page. Finally, we can verify that the dashboard dictionary which we downloaded matches our upload: ``` print(dash_dict == downloaded_dict) ``` <a id="Conclusion"></a> ## Conclusion In this notebook we have demonstrated how to use the `GridSearch` algorithm from Fairlearn to generate a collection of models, and then present them in the fairness dashboard in Azure Machine Learning Studio. Please remember that this notebook has not attempted to discuss the many considerations which should be part of any approach to unfairness mitigation. The [Fairlearn website](http://fairlearn.github.io/) provides that discussion
github_jupyter
``` import cv2 import numpy as np import matplotlib.pyplot as plt ``` # Data Base Generation ### Basic Frame Capture ``` ## This is just an example to ilustrate how to display video from webcam## vid = cv2.VideoCapture(0) # define a video capture object status = True # Initalize status while(status): # Iterate while status is true, that is while there is a frame being captured status, frame = vid.read() # Capture the video frame by frame, returns status (Boolean) and frame (numpy.ndarray) cv2.imshow('frame', frame) # Display the resulting frame ## Exit if user presses q ## if cv2.waitKey(1) & 0xFF == ord('q'): break vid.release() # After the loop release the cap object cv2.destroyAllWindows() # Destroy all the windows ``` ### Create Screenshots off of Video ``` ## This is just an example to ilustrate how to capture frames from webcam ## path = "Bounding_box" # Name of folder where information will be stored frame_id = 0 # Id of image vid = cv2.VideoCapture(0) # define a video capture object status = True # Initalize status while(status): # Iterate while status is True status, frame = vid.read() # Capture the video frame by frame cv2.imshow('frame', frame) # Display the resulting frame wait_key=cv2.waitKey(1) & 0xFF # Save Waitkey object in variable since we will use it multiple times if wait_key == ord('a'): # If a is pressed name ="eye"+str(frame_id)+'.jpg' name = path + "\\" + name # Set name and path cv2.imwrite(name, frame) # Save image frame_id += 1 # Incremente frame_id elif wait_key == ord('q'): # If user press "q" break # Exit from while Loop vid.release() # After the loop release the cap object cv2.destroyAllWindows() # Destroy all the windows ``` ## Use Haar Cascade to detect objects ``` ## This is just an example to ilustrate how to use Haar Cascades in order to detect objects (LIVE) ## face = cv2.CascadeClassifier('Haarcascade/haarcascade_frontalface_default.xml') # Face Haar Cascade loading eye = cv2.CascadeClassifier('Haarcascade/haarcascade_eye.xml') # Eye Haar Cascade Loading path = "Bounding_box" # Path to Store Photos frame_id = 0 # Frame Id vid = cv2.VideoCapture(0) # Define a video capture object status = True # Initalize status while(status): status, frame = vid.read() # Capture the video frame by frame gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert to gray scale face_info = face.detectMultiScale(gray, 1.3, 5) # Get face infromation for (x,y,w,h) in face_info: # Iterate over this information cv2.rectangle(frame,(x,y),(x+w,y+h),(255,255,0),1) # Draw rectangle cropped_face = gray[y:y+h, x:x+w] # Crop face eye_info = eye.detectMultiScale(gray) # Get info of eyes for (ex,ey,ew,eh) in eye_info: # Iterate over eye information cv2.rectangle(frame,(ex,ey),(ex+ew,ey+eh),(0,255,0),2) # Draw over eye information cv2.imshow('frame', frame) # Display the resulting frame wait_key = cv2.waitKey(1) & 0xFF # Store Waitkey object if wait_key == ord('a'): # If a is pressed name = "eye"+str(frame_id)+'.jpg' # Set name name = path + "\\" + name # Add path cv2.imwrite(name, frame) # Set photo frame_id += 1 # Increment frame id elif wait_key == ord('q'): # If q is pressed break # Break while loop vid.release() # After the loop release the cap object cv2.destroyAllWindows() # Destroy all the windows ``` ## Capture face gestures ``` ## This is just an example to ilustrate how to use Haar Cascades in order to detect objects (LIVE) ## face = cv2.CascadeClassifier('Haarcascade/haarcascade_frontalface_default.xml') # Face Haar Cascade loading eye = cv2.CascadeClassifier('Haarcascade/haarcascade_eye.xml') # Eye Haar Cascade Loading path = "Bouding_box" # Path to Store Photos frame_id = 0 # Frame Id vid = cv2.VideoCapture(0) # Define a video capture object status = True # Initalize status while(status): status, frame = vid.read() # Capture the video frame by frame gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert to gray scale face_info = face.detectMultiScale(gray, 1.3, 5) # Get face infromation for (x,y,w,h) in face_info: # Iterate over this information cv2.rectangle(frame,(x,y),(x+w,y+h),(255,255,0),1) # Draw rectangle cropped_face_color = frame[y:y+h, x:x+w] # Crop face (color) cv2.imshow('frame', frame) # Display the resulting frame wait_key = cv2.waitKey(1) & 0xFF # Store Waitkey object if wait_key == ord('a'): # If a is pressed name = "eye"+str(frame_id)+'.jpg' # Set name name = path + "\\" + name # Add path cv2.imwrite(name, cropped_face_color) # Set photo frame_id += 1 # Increment frame id elif wait_key == ord('q'): # If q is pressed break # Break while loop vid.release() # After the loop release the cap object cv2.destroyAllWindows() # Destroy all the windows ```
github_jupyter
# CTW dataset tutorial (Part 1: basics) Hello, welcome to the tutorial of _Chinese Text in the Wild_ (CTW) dataset. In this tutorial, we will show you: 1. [Basics](#CTW-dataset-tutorial-(Part-1:-Basics) - [The structure of this repository](#The-structure-of-this-repository) - [Dataset split](#Dataset-Split) - [Download images and annotations](#Download-images-and-annotations) - [Annotation format](#Annotation-format) - [Draw annotations on images](#Draw-annotations-on-images) - [Appendix: Adjusted bounding box conversion](#Appendix:-Adjusted-bounding-box-conversion) 2. Classification baseline - Train classification model - Results format and evaluation API - Evaluate your classification model 3. Detection baseline - Train detection model - Results format and evaluation API - Evaluate your classification model Our homepage is https://ctwdataset.github.io/, you may find some more useful information from that. If you don't want to run the baseline code, please jump to [Dataset split](#Dataset-Split) and [Annotation format](#Annotation-format) sections. Notes: > This notebook MUST be run under `$CTW_ROOT/tutorial`. > > All the code SHOULD be run with `Python>=3.4`. We make it compatible with `Python>=2.7` with best effort. > > The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in [RFC 2119](https://tools.ietf.org/html/rfc2119). ## The structure of this repository Our git repository is `[email protected]:yuantailing/ctw-baseline.git`, which you can browse from [GitHub](https://github.com/yuantailing/ctw-baseline). There are several directories under `$CTW_ROOT`. - **tutorial/**: this tutorial - **data/**: download and place images and annotations - **prepare/**: prepare dataset splits - **classification/**: classification baselines using [TensorFlow](https://www.tensorflow.org/) - **detection/**: a detection baseline using [YOLOv2](https://pjreddie.com/darknet/yolo/) - **judge/**: evaluate testing results and draw results and statistics - **pythonapi/**: APIs to traverse annotations, to evaluate results, and for common use - **cppapi/**: a faster implementation to detection AP evaluation - **codalab/**: which we run on [CodaLab](https://competitions.codalab.org/competitions/?q=CTW) (our evaluation server) - **ssd/**: a detection method using [SSD](https://github.com/weiliu89/caffe/tree/ssd) Most of the above directories have some similar structures. - **\*/settings.py**: configure directory of images, file path to annotations, and dedicated configurations for each step - **\*/products/**: store temporary files, logs, middle products, and final products - **\*/pythonapi**: a symbolic link to `pythonapi/`, in order to use Python API more conveniently Most of the code is written in Python, while some code is written in C++, Shell, etc. All the code is purposed to run in subdirectories, e.g., it's correct to execute `cd $CTW_ROOT/detection && python3 train.py`, and it's incorrect to execute `cd $CTW_ROOT && python3 detection/train.py`. All our code won't create or modify any files out of `$CTW_ROOT` (except `/tmp/`), and don't need a privilege elevation (except for running docker workers on the evaluation server). You SHOULD install requirements before you run our code. - git>=1 - Python>=3.4 - Jupyter notebook>=5.0 - gcc>=5 - g++>=5 - CUDA driver - CUDA toolkit>=8.0 - CUDNN>=6.0 - OpenCV>=3.0 - requirements listed in `$CTW_ROOT/requirements.txt` Recommonded hardware requirements: - RAM >= 32GB - GPU memory >= 12 GB - Hard Disk free space >= 200 GB - CPU logical cores >= 8 - Network connection ## Dataset Split We split the dataset into 4 parts: 1. Training set (~75%) For each image in training set, the annotation contains a lot of lines, while each lines contains some character instances. Each character instance contains: - its underlying character, - its bounding box (polygon), - and 6 attributes. Only Chinese character instances are completely annotated, non-Chinese characters (e.g., ASCII characters) are partially annotated. Some ignore regions are annotated, which contain character instances that cannot be recognized by human (e.g., too small, too fuzzy). We will show the annotation format in [next sections](#Annotation-format). 2. Validation set (~5%) Annotations in validation set is the same as that in training set. The split between training set and validation set is only a recommendation. We make no restriction on how you split them. To enlarge training data, you MAY use TRAIN+VAL to train your models. 3. Testing set for classification (~10%) For this testing set, we make images and annotated bounding boxes publicly available. Underlying character, attributes and ignored regions are not avaliable. To evaluate your results on testing set, please visit our evaluation server. 4. Testing set for detection (~10%) For this testing set, we make images public. To evaluate your results on testing set, please visit our evaluation server. Notes: > You MUST NOT use annotations of testing set to fine tune your models or hyper-parameters. (e.g. use annotations of classification testing set to fine tune your detection models) > > You MUST NOT use evaluation server to fine tune your models or hyper-parameters. ## Download images and annotations Visit our homepage (https://ctwdataset.github.io/) and gain access to the dataset. 1. Clone our git repository. ```sh $ git clone [email protected]:yuantailing/ctw-baseline.git ``` 1. Download images, and unzip all the images to `$CTW_ROOT/data/all_images/`. For image file path, both `$CTW_ROOT/data/all_images/0000001.jpg` and `$CTW_ROOT/data/all_images/any/path/0000001.jpg` are OK, do not modify file name. 1. Download annotations, and unzip it to `$CTW_ROOT/data/annotations/downloads/`. ```sh $ mkdir -p ../data/annotations/downloads && tar -xzf /path/to/ctw-annotations.tar.gz -C../data/annotations/downloads ``` 1. In order to run evaluation and analysis code locally, we will use validation set as testing sets in this tutorial. ```sh $ cd ../prepare && python3 fake_testing_set.py ``` If you propose to train your model on TRAIN+VAL, you can execute `cp ../data/annotations/downloads/* ../data/annotations/` instead of running the above code. But you will not be able to run evaluation and analysis code locally, just submit the results to our evaluation server. 1. Create symbolic links for TRAIN+VAL (`$CTW_ROOT/data/images/trainval/`) and TEST(`$CTW_ROOT/data/images/test/`) set, respectively. ```sh $ cd ../prepare && python3 symlink_images.py ``` ## Annotation format In this section, we will show you: - Overall information format - Training set annotation format - Classification testing set format We will display some examples in the next section. #### Overall information format Overall information file (`../data/annotations/info.json`) is UTF-8 (no BOM) encoded [JSON](https://www.json.org/). The data struct for this information file is described below. ``` information: { train: [image_meta_0, image_meta_1, image_meta_2, ...], val: [image_meta_0, image_meta_1, image_meta_2, ...], test_cls: [image_meta_0, image_meta_1, image_meta_2, ...], test_det: [image_meta_0, image_meta_1, image_meta_2, ...], } image_meta: { image_id: str, file_name: str, width: int, height: int, } ``` `train`, `val`, `test_cls`, `test_det` keys denote to training set, validation set, testing set for classification, testing set for detection, respectively. The resolution of each image is always $2048 \times 2048$. Image ID is a 7-digits string, the first digit of image ID indicates the camera orientation in the following rule. - '0': back - '1': left - '2': front - '3': right The `file_name` filed doesn't contain directory name, and is always `image_id + '.jpg'`. #### Training set annotation format All `.jsonl` annotation files (e.g. `../data/annotations/train.jsonl`) are UTF-8 encoded [JSON Lines](http://jsonlines.org/), each line is corresponding to the annotation of one image. The data struct for each of the annotations in training set (and validation set) is described below. ``` annotation (corresponding to one line in .jsonl): { image_id: str, file_name: str, width: int, height: int, annotations: [sentence_0, sentence_1, sentence_2, ...], # MUST NOT be empty ignore: [ignore_0, ignore_1, ignore_2, ...], # MAY be an empty list } sentence: [instance_0, instance_1, instance_2, ...] # MUST NOT be empty instance: { polygon: [[x0, y0], [x1, y1], [x2, y2], [x3, y3]], # x, y are floating-point numbers text: str, # the length of the text MUST be exactly 1 is_chinese: bool, attributes: [attr_0, attr_1, attr_2, ...], # MAY be an empty list adjusted_bbox: [xmin, ymin, w, h], # x, y, w, h are floating-point numbers } attr: "occluded" | "bgcomplex" | "distorted" | "raised" | "wordart" | "handwritten" ignore: { polygon: [[x0, y0], [x1, y1], [x2, y2], [x3, y3]], bbox: [xmin, ymin, w, h], ] ``` Original bounding box annotations are polygons, we will describe how `polygon` is converted to `adjusted_bbox` in [appendix](#Appendix:-Adjusted-bounding-box-conversion). Notes: > The order of lines are not guaranteed to be consistent with `info.json`. > > A polygon MUST be a quadrangle. > > All characters in `CJK Unified Ideographs` are considered to be Chinese, while characters in `ASCII` and `CJK Unified Ideographs Extension`(s) are not. > > Adjusted bboxes of character `instance`s MUST be intersected with the image, while bboxes of `ignore` regions may not. > > Some logos on the camera car (e.g., "`腾讯街景地图`" in `2040368.jpg`) and licence plates are ignored to avoid bias. #### Classification testing set format The data struct for each of the annotations in classification testing set is described below. ``` annotation: { image_id: str, file_name: str, width: int, height: int, proposals: [proposal_0, proposal_1, proposal_2, ...], } proposal: { polygon: [[x0, y0], [x1, y1], [x2, y2], [x3, y3]], adjusted_bbox: [xmin, ymin, w, h], } ``` Notes: > The order of `image_id` in each line are not guaranteed to be consistent with `info.json`. > > Non-Chinese characters (e.g., ASCII characters) MUST NOT appear in proposals. ``` from __future__ import print_function from __future__ import unicode_literals import json import pprint import settings from pythonapi import anno_tools print('Image meta info format:') with open(settings.DATA_LIST) as f: data_list = json.load(f) pprint.pprint(data_list['train'][0]) print('Training set annotation format:') with open(settings.TRAIN) as f: anno = json.loads(f.readline()) pprint.pprint(anno, depth=3) print('Character instance format:') pprint.pprint(anno['annotations'][0][0]) print('Traverse character instances in an image') for instance in anno_tools.each_char(anno): print(instance['text'], end=' ') print() print('Classification testing set format') with open(settings.TEST_CLASSIFICATION) as f: anno = json.loads(f.readline()) pprint.pprint(anno, depth=2) print('Classification testing set proposal format') pprint.pprint(anno['proposals'][0]) ``` ## Draw annotations on images In this section, we will draw annotations on images. This would help you to understand the format of annotations. We show polygon bounding boxes of Chinese character instances in **<span style="color: #0f0;">green</span>**, non-Chinese character instances in **<span style="color: #f00;">red</span>**, and ignore regions in **<span style="color: #ff0;">yellow</span>**. ``` import cv2 import json import matplotlib.patches as patches import matplotlib.pyplot as plt import os import settings from pythonapi import anno_tools %matplotlib inline with open(settings.TRAIN) as f: anno = json.loads(f.readline()) path = os.path.join(settings.TRAINVAL_IMAGE_DIR, anno['file_name']) assert os.path.exists(path), 'file not exists: {}'.format(path) img = cv2.imread(path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.figure(figsize=(16, 16)) ax = plt.gca() plt.imshow(img) for instance in anno_tools.each_char(anno): color = (0, 1, 0) if instance['is_chinese'] else (1, 0, 0) ax.add_patch(patches.Polygon(instance['polygon'], fill=False, color=color)) for ignore in anno['ignore']: color = (1, 1, 0) ax.add_patch(patches.Polygon(ignore['polygon'], fill=False, color=color)) plt.show() ``` ## Appendix: Adjusted bounding box conversion In order to create a tighter bounding box to character instances, we compute `adjusted_bbox` in following steps, instead of use the real bounding box. 1. Take trisections for each edge of the polygon. (<span style="color: #f00;">red points</span>) 2. Compute the bouding box of above points. (<span style="color: #00f;">blue rectangles</span>) Adjusted bounding box is better than the real bounding box, especially for sharp polygons. ``` from __future__ import division import collections import matplotlib.patches as patches import matplotlib.pyplot as plt %matplotlib inline def poly2bbox(poly): key_points = list() rotated = collections.deque(poly) rotated.rotate(1) for (x0, y0), (x1, y1) in zip(poly, rotated): for ratio in (1/3, 2/3): key_points.append((x0 * ratio + x1 * (1 - ratio), y0 * ratio + y1 * (1 - ratio))) x, y = zip(*key_points) adjusted_bbox = (min(x), min(y), max(x) - min(x), max(y) - min(y)) return key_points, adjusted_bbox polygons = [ [[2, 1], [11, 2], [12, 18], [3, 16]], [[21, 1], [30, 5], [31, 19], [22, 14]], ] plt.figure(figsize=(10, 6)) plt.xlim(0, 35) plt.ylim(0, 20) ax = plt.gca() for polygon in polygons: color = (0, 1, 0) ax.add_patch(patches.Polygon(polygon, fill=False, color=(0, 1, 0))) key_points, adjusted_bbox = poly2bbox(polygon) ax.add_patch(patches.Rectangle(adjusted_bbox[:2], *adjusted_bbox[2:], fill=False, color=(0, 0, 1))) for kp in key_points: ax.add_patch(patches.Circle(kp, radius=0.1, fill=True, color=(1, 0, 0))) plt.show() ```
github_jupyter
``` %load_ext autoreload %autoreload 2 import os import datetime import numpy as np import scipy import pandas as pd import torch from torch import nn import criscas from criscas.utilities import create_directory, get_device, report_available_cuda_devices from criscas.predict_model import * base_dir = os.path.abspath('..') base_dir ``` ### Read sample data ``` seq_df = pd.read_csv(os.path.join(base_dir, 'sample_data', 'abemax_sampledata.csv'), header=0) seq_df ``` The models expect sequences (i.e. target sites) to be wrapped in a `pandas.DataFrame` with a header that includes `ID` of the sequence and `seq` columns. The sequences should be of length 20 (i.e. 20 bases) and represent the protospacer target site. ``` # create a directory where we dump the predictions of the models csv_dir = create_directory(os.path.join(base_dir, 'sample_data', 'predictions')) ``` ### Specify device (i.e. CPU or GPU) to run the models on Specify device to run the model on. The models can run on `GPU` or `CPU`. We can instantiate a device by running `get_device(to_gpu,gpu_index)` function. - To run on GPU we pass `to_gpu = True` and specify which card to use if we have multiple cards `gpu_index=int` (i.e. in case we have multiple GPU cards we specify the index counting from 0). - If there is no GPU installed, the function will return a `CPU` device. We can get a detailed information on the GPU cards installed on the compute node by calling `report_available_cuda_devices` function. ``` report_available_cuda_devices() # instantiate a device using the only one available :P device = get_device(True, 0) device ``` ### Create a BE-DICT model by sepcifying the target base editor We start `BE-DICT` model by calling `BEDICT_CriscasModel(base_editor, device)` where we specify which base editor to use (i.e. `ABEmax`, `BE4max`, `ABE8e`, `Target-AID`) and the `device` we create earlier to run on. ``` base_editor = 'ABEmax' bedict = BEDICT_CriscasModel(base_editor, device) ``` We generate predictions by calling `predict_from_dataframe(seq_df)` where we pass the data frame wrapping the target sequences. The function returns two objects: - `pred_w_attn_runs_df` which is a data frame that contains predictions per target base and the attentions scores across all positions. - `proc_df` which is a data frame that represents the processed sequence data frame we passed (i.e. `seq_df`) ``` pred_w_attn_runs_df, proc_df = bedict.predict_from_dataframe(seq_df) ``` `pred_w_attn_runs_df` contains predictions from 5 trained models for `ABEmax` base editor (we have 5 runs trained per base editor). For more info, see our [paper](https://www.biorxiv.org/content/10.1101/2020.07.05.186544v1) on biorxiv. Target positions in the sequence reported in `base_pos` column in `pred_w_attn_runs_df` uses 0-based indexing (i.e. 0-19) ``` pred_w_attn_runs_df proc_df ``` Given that we have 5 predictions per sequence, we can further reduce to one prediction by either `averaging` across all models, or taking the `median` or `max` prediction based on the probability of editing scores. For this we use `select_prediction(pred_w_attn_runs_df, pred_option)` where `pred_w_attn_runs_df` is the data frame containing predictions from 5 models for each sequence. `pred_option` can be assume one of {`mean`, `median`, `max`}. ``` pred_option = 'mean' pred_w_attn_df = bedict.select_prediction(pred_w_attn_runs_df, pred_option) pred_w_attn_df ``` We can dump the prediction results on a specified directory on disk. We will dump the predictions with all 5 runs `pred_w_attn_runs_df` and the one average across runs `pred_w_attn_df`. Under `sample_data` directory we will have the following tree: <pre> sample_data └── predictions ├── predictions_allruns.csv └── predictions_predoption_mean.csv </pre> ``` pred_w_attn_runs_df.to_csv(os.path.join(csv_dir, f'predictions_allruns.csv')) pred_w_attn_df.to_csv(os.path.join(csv_dir, f'predictions_predoption_{pred_option}.csv')) ``` ### Generate attention plots We can generate attention plots for the prediction of each target base in the sequence using `highlight_attn_per_seq` method that takes the following arguments: - `pred_w_attn_runs_df`: data frame that contains model's predictions (5 runs) for each target base of each sequence (see above). - `proc_df`: data frame that represents the processed sequence data frame we passed (i.e. seq_df) - `seqid_pos_map`: dictionary `{seq_id:list of positions}` where `seq_id` is the ID of the target sequence, and list of positions that we want to generate attention plots for. Users can specify a `position from 1 to 20` (i.e. length of protospacer sequence) - `pred_option`: selection option for aggregating across 5 models' predictions. That is we can average the predictions across 5 runs, or take `max`, `median`, `min` or `None` (i.e. keep all 5 runs) - `apply_attnscore_filter`: boolean (`True` or `False`) to further apply filtering on the generated attention scores. This filtering allow to plot only predictions where the associated attention scores have a maximum that is >= 3 times the base attention score value <=> (3 * 1/20) - `fig_dir`: directory where to dump the generated plots or `None` (to return the plots inline) ``` # create a dictionary to specify target sequence and the position we want attention plot for # we are targeting position 5 in the sequence seqid_pos_map = {'CTRL_HEKsiteNO1':[5], 'CTRL_HEKsiteNO2':[5]} pred_option = 'mean' apply_attn_filter = False bedict.highlight_attn_per_seq(pred_w_attn_runs_df, proc_df, seqid_pos_map=seqid_pos_map, pred_option=pred_option, apply_attnscore_filter=apply_attn_filter, fig_dir=None) ``` We can save the plots on disk without returning them by specifing `fig_dir` ``` # create a dictionary to specify target sequence and the position I want attention plot for # we are targeting position 5 in the sequence seqid_pos_map = {'CTRL_HEKsiteNO1':[5], 'CTRL_HEKsiteNO2':[5]} pred_option = 'mean' apply_attn_filter = False fig_dir = create_directory(os.path.join(base_dir, 'sample_data', 'fig_dir')) bedict.highlight_attn_per_seq(pred_w_attn_runs_df, proc_df, seqid_pos_map=seqid_pos_map, pred_option=pred_option, apply_attnscore_filter=apply_attn_filter, fig_dir=create_directory(os.path.join(fig_dir, pred_option))) ``` We will generate the following files: <pre> sample_data ├── abemax_sampledata.csv ├── fig_dir │   └── mean │   ├── ABEmax_seqattn_CTRL_HEKsiteNO1_basepos_5_predoption_mean.pdf │   └── ABEmax_seqattn_CTRL_HEKsiteNO2_basepos_5_predoption_mean.pdf └── predictions ├── predictions_allruns.csv └── predictions_predoption_mean.csv </pre> Similarly we can change the other arguments such as `pred_option` `apply_attnscore_filter` and so on to get different filtering options - We leave this as an exercise for the user/reader :D
github_jupyter
# Procedure for Word Correction Strategy as mentioned in Page 43 in the dissertation report ``` import numpy as np import pandas as pd import os import nltk import re import string from bs4 import BeautifulSoup from spellchecker import SpellChecker def read_file(df_new): print("Started extracting data from file",df_new.shape) dfnew=pd.DataFrame() dfnew.insert(0,'Post',None) dfnew.insert(1,'class',None) for val in df_new.values: appList=[] sp=np.array_str(val).split(",") if len(sp)==2: appList.append(sp[0]) appList.append(sp[1]) dfnew.loc[len(dfnew)]=appList for i in range(0,dfnew.shape[0]): dfnew.values[i][1]=int(dfnew.values[i][1].strip("\'|]|\"")) print(dfnew['class'].value_counts()) print("Finished extracting data from file",dfnew.shape) return dfnew def post_tokenizing_dataset1(df): print("Started cleaning data in dataframe", df.shape) #print(df.head(5)) wpt = nltk.WordPunctTokenizer() stop_words = nltk.corpus.stopwords.words('english') token_list=[] phrase_list=[] token_df=pd.DataFrame() token_df.insert(0,'Post',None) token_df.insert(1,'class',None) for val in df.values: append_list=[] filter_val=re.sub(r'Q:','',val[0]) filter_val=re.sub(r'&#039;[a-z]{1}','',filter_val) filter_val=re.sub('<[a-z]+>',' ',filter_val).lower() filter_val=re.sub(r'[^a-zA-Z\s]', '', filter_val, re.I|re.A) filter_val=[token for token in wpt.tokenize(filter_val)] filter_val=[word for word in filter_val if word.isalpha()] if(filter_val): append_list.append(' '.join(filter_val)) append_list.append(val[1]) token_df.loc[len(token_df)]=append_list print("Finished cleaning data in dataframe",token_df.shape) #print(token_df.head(5)) return token_df def post_tokenizing_dataset3(df): print("Started cleaning data in dataframe", df.shape) #print(df.head(5)) wpt = nltk.WordPunctTokenizer() stop_words = nltk.corpus.stopwords.words('english') token_df=pd.DataFrame() token_df.insert(0,'Post',None) token_df.insert(1,'class',None) for val in df.values: filter_val=[] value=re.sub(r'@\w*','',val[0]) value=re.sub(r'&.*;','',value) value=re.sub(r'http[s?]?:\/\/.*[\r\n]*','',value) tokens=[token for token in wpt.tokenize(value)] tokens=[word for word in tokens if word.isalpha()] if len(tokens)!=0: filter_val.append(' '.join(tokens).lower()) filter_val.append(val[1]) token_df.loc[len(token_df)]=filter_val print("Finished cleaning data in dataframe",token_df.shape) #print(token_df.head(5)) return token_df def correct_words(token_df_copy,badWordsDict): spell = SpellChecker() token_df_ones=token_df_copy[token_df_copy['class']==1] post_list=[] for val in token_df_ones.values: post_list.append(val[0]) count=0 val_counts=token_df_copy['class'].value_counts() print(val_counts[0],val_counts[0]+val_counts[1]) for val in range(val_counts[0],val_counts[0]+val_counts[1]): sentiment=token_df_copy.loc[val][1] if sentiment==1: post=post_list[count] for word in post.split(' '): misspelled = spell.unknown([word]) for value in misspelled: get_list=badWordsDict.get(word[0]) if(get_list): candi_list=spell.candidates(word) list3 = list(set(get_list)&set(candi_list)) if list3: post=[w.replace(word, list3[0]) for w in post.split()] post=' '.join(post) break token_df_copy.loc[val][0]=post count+=1 print(count) token_df_copy.to_csv("cor.csv",index=False, header=True) return token_df_copy wordList=[] for val in string.ascii_lowercase: with open("../swear-words/"+val+".html") as fp: soup = BeautifulSoup(fp) wordSet=soup.find_all('table')[2]('b') for i in range(0,len(wordSet)-1): wordList.append(wordSet[i].string) badWordsDict={} for val in wordList: if not badWordsDict.get(val[0]): badWordsDict[val[0]]=[] badWordsDict.get(val[0]).append(val) df_data_1=read_file(pd.read_csv("../post.csv",sep="\t")) df_data_2=read_file(pd.read_csv("../new_data.csv",sep=",")) df_data_3=pd.read_csv("../dataset_4.csv",sep=",") df_data_1=post_tokenizing_dataset1(df_data_1) token_data_2=df_data_2[df_data_2['class']==1].iloc[:,] token_data_2=post_tokenizing_dataset1(token_data_2) token_data_3=df_data_3[df_data_3['class']==1].iloc[0:3147,] token_data_3=post_tokenizing_dataset3(token_data_3) token_data_3=post_tokenizing_dataset3(token_data_3) print(df_data_2['class'].value_counts()) df_data_1_new=pd.DataFrame() df_data_1_new=df_data_1_new.append(df_data_1[df_data_1['class']==0].iloc[0:7500,],ignore_index=True) df_data_1_new=df_data_1_new.append(df_data_1[df_data_1['class']==1],ignore_index=True) df_data_1_new=df_data_1_new.append(token_data_2 ,ignore_index=True) df_data_1_new=df_data_1_new.append(token_data_3,ignore_index=True) token_df_2=df_data_1_new.copy() token_df_new=correct_words(token_df_2,badWordsDict) token_df_new.to_csv("corrected_post_2.csv",index=False, header=True) df_data_1_new.to_csv("without_correction_2.csv",index=False, header=True) print(token_df_2.loc[11859]) dfnew=pd.DataFrame() dfnew.insert(0,'Post',None) dfnew.insert(1,'class',None) for val in token_df_new.values: value=re.sub(r'http[a-z0-9]*[\r\n\s]?','',val[0]) dfnew.loc[len(dfnew)]=value dfnew['class']=token_df_new['class'] dfnew.to_csv("without_correction_3.csv",index=False, header=True) ```
github_jupyter
``` from extra import * import keras from keras.datasets import mnist from keras.models import Sequential, Model from keras import regularizers from keras.layers import Dense, Dropout, Conv2D, Input, GlobalAveragePooling2D, GlobalMaxPooling2D from keras.layers import Add, Concatenate, BatchNormalization import keras.backend as K from keras.optimizers import Adam import pandas as pd import numpy as np %matplotlib inline from matplotlib import pyplot as plt batch_size = 128 num_classes = 10 # input image dimensions HEIGHT, WIDTH = 28, 28 K.set_image_data_format('channels_first') keras.__version__ (x_train, y_train), (x_test, y_test) = mnist.load_data() print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') print('pixel range',x_train.min(), x_train.max()) ``` images are as pixel values, ranging from 0-255 ``` pd.DataFrame(y_train)[0].value_counts().plot(kind='bar') ## changes pixel range to 0 to 1 def normalize(images): images /= 255. return images x_train = normalize(x_train.astype(np.float32)) x_test = normalize(x_test.astype(np.float32)) x_train = x_train.reshape(x_train.shape[0], 1, WIDTH, HEIGHT) x_test = x_test.reshape(x_test.shape[0], 1, WIDTH, HEIGHT) y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) ``` now we have images that are normalized, and labels are one hot encoded ``` def show_images(rows, columns): fig, axes = plt.subplots(rows,columns) for rows in axes: for ax in rows: idx = np.random.randint(0, len(y_train)) ax.title.set_text(np.argmax(y_train[idx])) ax.imshow(x_train[idx][0], cmap='gray') ax.axis('off') plt.show() show_images(2,4) def build_model(): inp = Input((1, HEIGHT, WIDTH)) x = Conv2D(16, kernel_size=(7,7), strides=(2,2), padding='same', activation='relu', kernel_regularizer=regularizers.l2(0.002))(inp) x = BatchNormalization()(x) y = Conv2D(16, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu', kernel_regularizer=regularizers.l2(0.002))(x) y = BatchNormalization()(y) y = Conv2D(16, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu', kernel_regularizer=regularizers.l2(0.002))(y) y = BatchNormalization()(y) x = Add()([x,y]) x = Conv2D(32, kernel_size=(3,3), strides=(2,2), padding='same', activation='relu', kernel_regularizer=regularizers.l2(0.002))(x) x = BatchNormalization()(x) y = Conv2D(32, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu', kernel_regularizer=regularizers.l2(0.002))(x) y = BatchNormalization()(y) y = Conv2D(32, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu', kernel_regularizer=regularizers.l2(0.002))(y) y = BatchNormalization()(y) x = Add()([x,y]) x = Conv2D(64, kernel_size=(3,3), strides=(2,2), padding='same', activation='relu', kernel_regularizer=regularizers.l2(0.002))(x) x = BatchNormalization()(x) x = Concatenate()([GlobalMaxPooling2D(data_format='channels_first')(x) , GlobalAveragePooling2D(data_format='channels_first')(x)]) x = Dropout(0.3)(x) out = Dense(10, activation='softmax')(x) return Model(inputs=inp, outputs=out) model = build_model() model.summary() model.compile(Adam(), loss='categorical_crossentropy', metrics=['acc']) K.get_value(model.optimizer.lr), K.get_value(model.optimizer.beta_1) lr_find(model, data=(x_train, y_train)) ## use generator if using generator insted of (x_train, y_train) and pass parameter, generator=True ``` selecting lr as 2e-3 ### high lr for demonstration of decay, from above graph anything b/w 0.002 to 0.004 seems nice ``` recorder = RecorderCallback() clr = CyclicLRCallback(max_lr=0.4, cycles=4, decay=0.6, DEBUG_MODE=True, patience=1, auto_decay=True, pct_start=0.3, monitor='val_loss') K.get_value(model.optimizer.lr), K.get_value(model.optimizer.beta_1) model.fit(x_train, y_train, batch_size=128, epochs=4, callbacks=[recorder, clr], validation_data=(x_test, y_test)) K.get_value(model.optimizer.lr), K.get_value(model.optimizer.beta_1) recorder.plot_losses() recorder.plot_losses(log=True) #take log scale for loss recorder.plot_losses(clip=True) #clips loss between 2.5 and 97.5 precentile recorder.plot_losses(clip=True, log=True) recorder.plot_lr() recorder.plot_mom() ##plots momentum, beta_1 in adam family of optimizers ```
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt %matplotlib inline # a) import sse Lx, Ly = 8, 8 n_updates_measure = 10000 # b) spins, op_string, bonds = sse.init_SSE_square(Lx, Ly) for beta in [0.1, 1., 64.]: op_string = sse.thermalize(spins, op_string, bonds, beta, n_updates_measure//10) ns = sse.measure(spins, op_string, bonds, beta, n_updates_measure) plt.figure() plt.hist(ns, bins=np.arange(len(op_string)+1)) plt.axvline(len(op_string), color='r', ) # mark the length of the operator string plt.xlim(0, len(op_string)*1.1) plt.title("T=1./{beta:.1f}, len of op_string={l:d}".format(beta=beta, l=len(op_string))) plt.xlabel("number of operators $n$") ``` The red bar indicates the size of the operator string after thermalization. These histograms justify that we can fix the length of the operator string `M` (called $n*$ in the lecture notes). Since `M` is automatically chosen as large as needed, we effectively take into account *all* relevant terms of the full series $\sum_{n=0}^\infty$ in the expansion, even if our numerical simulations only use a finite `M`. ``` # c) Ts = np.linspace(2., 0., 20, endpoint=False) betas = 1./Ts Ls = [4, 8, 16] Es_Eerrs = [] for L in Ls: print("="*80) print("L =", L) E = sse.run_simulation(L, L, betas) Es_Eerrs.append(E) plt.figure() for E, L in zip(Es_Eerrs, Ls): plt.errorbar(Ts, E[:, 0], yerr=E[:, 1], label="L={L:d}".format(L=L)) plt.legend() plt.xlim(0, np.max(1./betas)) plt.xlabel("temperature $T$") plt.ylabel("energy $E$ per site") ``` # specific heat ``` # d) def run_simulation(Lx, Ly, betas=[1.], n_updates_measure=10000, n_bins=10): """A full simulation: initialize, thermalize and measure for various betas.""" spins, op_string, bonds = sse.init_SSE_square(Lx, Ly) n_sites = len(spins) n_bonds = len(bonds) Es_Eerrs = [] Cs_Cerrs = [] for beta in betas: print("beta = {beta:.3f}".format(beta=beta), flush=True) op_string = sse.thermalize(spins, op_string, bonds, beta, n_updates_measure//10) Es = [] Cs = [] for _ in range(n_bins): ns = sse.measure(spins, op_string, bonds, beta, n_updates_measure) # energy per site n_mean = np.mean(ns) E = (-n_mean/beta + 0.25*n_bonds) / n_sites Es.append(E) Cv = (np.mean(ns**2) - n_mean - n_mean**2)/ n_sites Cs.append(Cv) E, Eerr = np.mean(Es), np.std(Es)/np.sqrt(n_bins) Es_Eerrs.append((E, Eerr)) C, Cerr = np.mean(Cs), np.std(Cs)/np.sqrt(n_bins) Cs_Cerrs.append((C, Cerr)) return np.array(Es_Eerrs), np.array(Cs_Cerrs) Es_Errs, Cs_Cerrs = run_simulation(8, 8, betas) plt.figure() plt.errorbar(Ts, Cs_Cerrs[:, 0], yerr=Cs_Cerrs[:, 1], label="L={L:d}".format(L=L)) plt.xlim(0, np.max(1./betas)) plt.xlabel("temperature $T$") plt.ylabel("Specific heat $C_v$ per site") ``` ## Interpretation We see the behaviour expected from the previous plot considering $C_v= \partial_T <E> $. However, as $T \rightarrow 0$ or $\beta \rightarrow \infty$ the error of $C_v$ blows up! Looking at the formula $C_v = <n^2> - <n>^2 - <n>$, we see that it consist of larger terms which should cancel to zero. Statistical noise is of the order of the large terms $<n^2>$, hence the relative error in $C_v$ explodes. This is the essential problem of the infamous "sign problem" of quantum monte carlo (QMC): in many models (e.g. in our case of the SSE if we don't have a bipartite lattice) one encounters negative weights for some configurations in the partition function, and a cancelation of different terms. Similar as for the $C_v$ at low temperatures, this often leads to error bars which are often exponentially large in the system size. Obviously, phases from a "time evolution" lead to a similar problem. There is no generic solution to circumvent the sign problem (it's NP hard!), but for many specific models, there were actually sign-problem free solutions found. On the other hand, whenever QMC has no sign problem, it is for sure one of the most powerful numerical methods we have. For example, it allows beautiful finite size scaling collapses to extract critical exponents etc. for quantum phase transitions even in 2D or 3D. # Staggered Magnetization ``` # e) def get_staggering(Lx, Ly): stag = np.zeros(Lx*Ly, np.intp) for x in range(Lx): for y in range(Ly): s = sse.site(x, y, Lx, Ly) stag[s] = (-1)**(x+y) return stag def staggered_magnetization(spins, stag): return 0.5*np.sum(spins * stag) def measure(spins, op_string, bonds, stag, beta, n_updates_measure): """Perform a lot of updates with measurements.""" ns = [] ms = [] for _ in range(n_updates_measure): n = sse.diagonal_update(spins, op_string, bonds, beta) m = staggered_magnetization(spins, stag) sse.loop_update(spins, op_string, bonds) ns.append(n) ms.append(m) return np.array(ns), np.array(ms) def run_simulation(Lx, Ly, betas=[1.], n_updates_measure=10000, n_bins=10): """A full simulation: initialize, thermalize and measure for various betas.""" spins, op_string, bonds = sse.init_SSE_square(Lx, Ly) stag = get_staggering(Lx, Ly) n_sites = len(spins) n_bonds = len(bonds) Es_Eerrs = [] Cs_Cerrs = [] Ms_Merrs = [] for beta in betas: print("beta = {beta:.3f}".format(beta=beta), flush=True) op_string = sse.thermalize(spins, op_string, bonds, beta, n_updates_measure//10) Es = [] Cs = [] Ms = [] for _ in range(n_bins): ns, ms = measure(spins, op_string, bonds, stag, beta, n_updates_measure) # energy per site n_mean = np.mean(ns) E = (-n_mean/beta + 0.25*n_bonds) / n_sites Es.append(E) Cv = (np.mean(ns**2) - n_mean - n_mean**2)/ n_sites Cs.append(Cv) Ms.append(np.mean(np.abs(ms))/n_sites) # note that we need the absolute value here! # there is a symmetry of flipping all spins which ensures that <Ms> = 0 E, Eerr = np.mean(Es), np.std(Es)/np.sqrt(n_bins) Es_Eerrs.append((E, Eerr)) C, Cerr = np.mean(Cs), np.std(Cs)/np.sqrt(n_bins) Cs_Cerrs.append((C, Cerr)) M, Merr = np.mean(Ms), np.std(Ms)/np.sqrt(n_bins) Ms_Merrs.append((M, Merr)) return np.array(Es_Eerrs), np.array(Cs_Cerrs), np.array(Ms_Merrs) # f) Ls = [4, 8, 16] Ms_Merrs = [] for L in Ls: print("="*80) print("L =", L) E, C, M = run_simulation(L, L, betas) Ms_Merrs.append(M) plt.figure() for M, L in zip(Ms_Merrs, Ls): plt.errorbar(Ts, M[:, 0], yerr=M[:, 1], label="L={L:d}".format(L=L)) plt.legend() plt.xlim(0, np.max(1./betas)) plt.xlabel("temperature $T$") plt.ylabel("staggered magnetization $<|M_s|>$ per site") ``` # Honeycomb lattice ``` def site_honeycomb(x, y, u, Lx, Ly): """Defines a numbering of the sites, given positions x and y and u=0,1 within the unit cell""" return y * Lx * 2 + x*2 + u def init_SSE_honeycomb(Lx, Ly): """Initialize a starting configuration on a 2D square lattice.""" n_sites = Lx*Ly*2 # initialize spins randomly with numbers +1 or -1, but the average magnetization is 0 spins = 2*np.mod(np.random.permutation(n_sites), 2) - 1 op_string = -1 * np.ones(10, np.intp) # initialize with identities bonds = [] for x0 in range(Lx): for y0 in range(Ly): sA = site_honeycomb(x0, y0, 0, Lx, Ly) sB0 = site_honeycomb(x0, y0, 1, Lx, Ly) bonds.append([sA, sB0]) sB1 = site_honeycomb(np.mod(x0+1, Lx), np.mod(y0-1, Ly), 1, Lx, Ly) bonds.append([sA, sB1]) sB2 = site_honeycomb(x0, np.mod(y0-1, Ly), 1, Lx, Ly) bonds.append([sA, sB2]) bonds = np.array(bonds, dtype=np.intp) return spins, op_string, bonds def get_staggering_honeycomb(Lx, Ly): stag = np.zeros(Lx*Ly*2, np.intp) for x in range(Lx): for y in range(Ly): stag[site_honeycomb(x, y, 0, Lx, Ly)] = +1 stag[site_honeycomb(x, y, 1, Lx, Ly)] = -1 return stag def run_simulation_honeycomb(Lx, Ly, betas=[1.], n_updates_measure=10000, n_bins=10): """A full simulation: initialize, thermalize and measure for various betas.""" spins, op_string, bonds = init_SSE_honeycomb(Lx, Ly) stag = get_staggering_honeycomb(Lx, Ly) n_sites = len(spins) n_bonds = len(bonds) Es_Eerrs = [] Cs_Cerrs = [] Ms_Merrs = [] for beta in betas: print("beta = {beta:.3f}".format(beta=beta), flush=True) op_string = sse.thermalize(spins, op_string, bonds, beta, n_updates_measure//10) Es = [] Cs = [] Ms = [] for _ in range(n_bins): ns, ms = measure(spins, op_string, bonds, stag, beta, n_updates_measure) # energy per site n_mean = np.mean(ns) E = (-n_mean/beta + 0.25*n_bonds) / n_sites Es.append(E) Cv = (np.mean(ns**2) - n_mean - n_mean**2)/ n_sites Cs.append(Cv) Ms.append(np.mean(np.abs(ms))/n_sites) E, Eerr = np.mean(Es), np.std(Es)/np.sqrt(n_bins) Es_Eerrs.append((E, Eerr)) C, Cerr = np.mean(Cs), np.std(Cs)/np.sqrt(n_bins) Cs_Cerrs.append((C, Cerr)) M, Merr = np.mean(Ms), np.std(Ms)/np.sqrt(n_bins) Ms_Merrs.append((M, Merr)) return np.array(Es_Eerrs), np.array(Cs_Cerrs), np.array(Ms_Merrs) # just to check: plot the generated lattice L =4 spins, op_string, bonds = init_SSE_honeycomb(L, L) stag = get_staggering_honeycomb(L, L) n_sites = len(spins) n_bonds = len(bonds) # use non-trivial unit-vectors unit_vectors = np.array([[1, 0], [0.5, 0.5*np.sqrt(3)]]) dx = np.array([0., 0.5]) site_positions = np.zeros((n_sites, 2), np.float) for x in range(L): for y in range(L): pos = x* unit_vectors[0, :] + y*unit_vectors[1, :] s0 = site_honeycomb(x, y, 0, L, L) site_positions[s0, :] = pos s1 = site_honeycomb(x, y, 1, L, L) site_positions[s1, :] = pos + dx # plot the sites and bonds plt.figure() for bond in bonds: linestyle = '-' s0, s1 = bond if np.max(np.abs(site_positions[s0, :] - site_positions[s1, :])) > L/2: linestyle = ':' # plot bonds from the periodic boundary conditions dotted plt.plot(site_positions[bond, 0], site_positions[bond, 1], linestyle=linestyle, color='k') plt.plot(site_positions[:, 0], site_positions[:, 1], marker='o', linestyle='') plt.show() Ls = [4, 8, 16] result_honeycomb = [] for L in Ls: print("="*80) print("L =", L) res = run_simulation_honeycomb(L, L, betas) result_honeycomb.append(res) fig, axes = plt.subplots(nrows=3, figsize=(10, 15), sharex=True) for res, L in zip(result_honeycomb, Ls): for data, ax in zip(res, axes): ax.errorbar(Ts, data[:, 0], yerr=data[:, 1], label="L={L:d}".format(L=L)) for ax, ylabel in zip(axes, ["energy $E$", "specific heat $C_v$", "stag. magnetization $<|M_s|>$"]): ax.legend() ax.set_ylabel(ylabel) axes[0].set_xlim(0, np.max(1./betas)) axes[-1].set_xlabel("temperature $T$") ```
github_jupyter
# Register Client and Create Access Token Notebook - Find detailed information about client registration and access tokens in this blog post: [Authentication to SAS Viya: a couple of approaches](https://blogs.sas.com/content/sgf/2021/09/24/authentication-to-sas-viya/) - Use the client_id to create an access token you can use in the Jupyter environment or externally for API calls to SAS Viya. - You must add the following info to the script: client_id, client_secret, baseURL, and consul_token - Additional access token information is found at the end of this notebook. ### Run the cells below and follow the resulting instructions. # Get register access token ``` import requests import json import os import base64 # set/create variables client_id="" client_secret="" baseURL = "" # sasserver.sas.com consul_token = "" # generate API call for register access token url = f"https://{baseURL}/SASLogon/oauth/clients/consul?callback=false&serviceId={client_id}" headers = { 'X-Consul-Token': consul_token } # process the results response = requests.request("POST", url, headers=headers, verify=False) register_access_token = json.loads(response.text)['access_token'] print(json.dumps(response.json(), indent=4, sort_keys=True)) ``` # Register the client ``` # create API call payload data payload='{"client_id": "' + client_id +'","client_secret": "'+ client_secret +'","scope": ["openid", "*"],"authorized_grant_types": ["authorization_code","refresh_token"],"redirect_uri": "urn:ietf:wg:oauth:2.0:oob","access_token_validity": "43199"}' # generate API call for register access token url = f"https://{baseURL}/SASLogon/oauth/clients" headers = { 'Content-Type': 'application/json', 'Authorization': "Bearer " + register_access_token } # process the results response = requests.request("POST", url, headers=headers, data=payload, verify=False) print(json.dumps(response.json(), indent=4, sort_keys=True)) ``` # Create access token ``` # create authorization url codeURL = "https://" + baseURL + "/SASLogon/oauth/authorize?client_id=" + client_id + "&response_type=code" # enccode client string client_string = client_id + ":" + client_secret message_bytes = client_string.encode('ascii') base64_bytes = base64.b64encode(message_bytes) base64_message = base64_bytes.decode('ascii') # promt with instructions and entry for auth code print(f"* Please visit the following site {codeURL}") print("* If provided a login prompt, add your SAS login credentials") print("* Once authenticated, you'll be redirected to an authoriztion screen, check all of the boxes that appear") print("* This will result in a short string of numbers and letters such as `VAxVFVEnKr`; this is your authorization code; copy the code") code = input("Please enter the authoriztion code you generated through the previous instructions, and then press Enter: ") # generate API call for access token url = f"https://{baseURL}/SASLogon/oauth/token?grant_type=authorization_code&code={code}" headers = { 'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': "Basic " + base64_message } # process the results response = requests.request("GET", url, headers=headers, verify=False) access_token = json.loads(response.text)['access_token'] print(json.dumps(response.json(), indent=4, sort_keys=True)) # Create access_token.txt file directory = os.getcwd() with open(directory + '/access_token.txt', 'w') as f: f.write(access_token) print('The access token was stored for you as ' + directory + '/access_token.txt') ``` ## Notes on the access token - The access token has a 12 hour time-to-live (ttl) by default. - The authorization code is good for 30 minutes and is only good for a single use. - You can generate a new authorization code by reusing the authorization url. - The access_token is valid in this Notebook and is transferable to other notebooks and used for external API calls.
github_jupyter
# Sklearn # Визуализация данных ``` import pandas as pd import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import scipy.stats as sts import seaborn as sns from contextlib import contextmanager sns.set() sns.set_style("whitegrid") color_palette = sns.color_palette('deep') + sns.color_palette('husl', 6) + sns.color_palette('bright') + sns.color_palette('pastel') %matplotlib inline sns.palplot(color_palette) def ndprint(a, precision=3): with np.printoptions(precision=precision, suppress=True): print(a) from sklearn import datasets, metrics, model_selection as mdsel ``` ### Загрузка выборки ``` digits = datasets.load_digits() print(digits.DESCR) print('target:', digits.target[0]) print('features: \n', digits.data[0]) print('number of features:', len(digits.data[0])) ``` ## Визуализация объектов выборки ``` #не будет работать: Invalid dimensions for image data plt.imshow(digits.data[0]) digits.data[0].shape digits.data[0].reshape(8,8) digits.data[0].reshape(8,8).shape plt.imshow(digits.data[0].reshape(8,8)) digits.keys() digits.images[0] plt.imshow(digits.images[0]) plt.figure(figsize=(8, 8)) plt.subplot(2, 2, 1) plt.imshow(digits.images[0]) plt.subplot(2, 2, 2) plt.imshow(digits.images[0], cmap='hot') plt.subplot(2, 2, 3) plt.imshow(digits.images[0], cmap='gray') plt.subplot(2, 2, 4) plt.imshow(digits.images[0], cmap='gray', interpolation='sinc') plt.figure(figsize=(20, 8)) for plot_number, plot in enumerate(digits.images[:10]): plt.subplot(2, 5, plot_number + 1) plt.imshow(plot, cmap = 'gray') plt.title('digit: ' + str(digits.target[plot_number])) ``` ## Уменьшение размерности ``` from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import classification_report from collections import Counter data = digits.data[:1000] labels = digits.target[:1000] print(Counter(labels)) plt.figure(figsize = (10, 6)) plt.bar(Counter(labels).keys(), Counter(labels).values()) classifier = KNeighborsClassifier() classifier.fit(data, labels) print(classification_report(classifier.predict(data), labels)) ``` ### Random projection ``` from sklearn import random_projection projection = random_projection.SparseRandomProjection(n_components = 2, random_state = 0) data_2d_rp = projection.fit_transform(data) plt.figure(figsize=(10, 6)) plt.scatter(data_2d_rp[:, 0], data_2d_rp[:, 1], c = labels) classifier.fit(data_2d_rp, labels) print(classification_report(classifier.predict(data_2d_rp), labels)) ``` ### PCA ``` from sklearn.decomposition import PCA pca = PCA(n_components = 2, random_state = 0, svd_solver='randomized') data_2d_pca = pca.fit_transform(data) plt.figure(figsize = (10, 6)) plt.scatter(data_2d_pca[:, 0], data_2d_pca[:, 1], c = labels) classifier.fit(data_2d_pca, labels) print(classification_report(classifier.predict(data_2d_pca), labels)) ``` ### MDS ``` from sklearn import manifold mds = manifold.MDS(n_components = 2, n_init = 1, max_iter = 100) data_2d_mds = mds.fit_transform(data) plt.figure(figsize=(10, 6)) plt.scatter(data_2d_mds[:, 0], data_2d_mds[:, 1], c = labels) classifier.fit(data_2d_mds, labels) print(classification_report(classifier.predict(data_2d_mds), labels)) ``` ### t- SNE ``` tsne = manifold.TSNE(n_components = 2, init = 'pca', random_state = 0) data_2d_tsne = tsne.fit_transform(data) plt.figure(figsize = (10, 6)) plt.scatter(data_2d_tsne[:, 0], data_2d_tsne[:, 1], c = labels) classifier.fit(data_2d_tsne, labels) print(classification_report(classifier.predict(data_2d_tsne), labels)) ```
github_jupyter
#Create the environment ``` from google.colab import drive drive.mount('/content/drive') %cd /content/drive/My Drive/ESoWC import pandas as pd import xarray as xr import numpy as np import pandas as pd from sklearn import preprocessing import seaborn as sns #Our class from create_dataset.make_dataset import CustomDataset fn_land = 'Data/land_cover_data.nc' fn_weather = 'Data/05_2019_weather_and_CO_for_model.nc' fn_conc = 'Data/totalcolConcentretations_featured.nc' fn_traffic = 'Data/emissions_traffic_hourly_merged.nc' ``` #Load datasets ##Land ``` land_instance = CustomDataset(fn_land) land_instance.get_dataset() land_instance.resample("1H") land_fixed = land_instance.get_dataset() land_fixed = land_fixed.drop_vars('NO emissions') #They are already in the weather dataset land_fixed = land_fixed.transpose('latitude','longitude','time') land_fixed ``` ##Weather ``` weather = xr.open_dataset(fn_weather) weather #This variable is too much correlated with the tcw weather_fixed = weather.drop_vars('tcwv') weather_fixed = weather_fixed.transpose('latitude','longitude','time') weather_fixed ``` ##Conc ``` conc_fidex = xr.open_dataset(fn_conc) conc_fidex ``` ##Traffic ``` traffic_instance = CustomDataset(fn_traffic) traffic_ds= traffic_instance.get_dataset() traffic_ds traffic_ds=traffic_ds.drop_vars('emissions') lat_bins = np.arange(43,51.25,0.25) lon_bins = np.arange(4,12.25,0.25) traffic_ds = traffic_ds.sortby(['latitude','longitude','hour']) traffic_ds = traffic_ds.interp(latitude=lat_bins, longitude=lon_bins, method="linear") days = np.arange(1,32,1) traffic_ds=traffic_ds.expand_dims({'Days':days}) traffic_ds trafic_df = traffic_ds.to_dataframe() trafic_df = trafic_df.reset_index() trafic_df['time'] = (pd.to_datetime(trafic_df['Days']-1,errors='ignore', unit='d',origin='2019-05') + pd.to_timedelta(trafic_df['hour'], unit='h')) trafic_df=trafic_df.drop(columns=['Days', 'hour']) trafic_df = trafic_df.set_index(['latitude','longitude','time']) trafic_df.head() traffic_fixed = trafic_df.to_xarray() traffic_fixed = traffic_fixed.transpose('latitude','longitude','time') traffic_fixed traffic_fixed.isel(time=[15]).traffic.plot() ``` #Merge ``` tot_dataset = weather_fixed.merge(land_fixed) tot_dataset = tot_dataset.merge(conc_fidex) tot_dataset = tot_dataset.merge(traffic_fixed) tot_dataset ``` #Check ``` weather_fixed.to_dataframe().isnull().sum() land_fixed.to_dataframe().isnull().sum() conc_fidex.to_dataframe().isnull().sum() traffic_fixed.to_dataframe().isnull().sum() tot_dataset.to_dataframe().isnull().sum() tot_dataset.isel(time=[12]).EMISSIONS_2019.plot() tot_dataset.isel(time=[12]).u10.plot() tot_dataset.isel(time=[15]).height.plot() tot_dataset.isel(time=[12]).NO_tc.plot() tot_dataset.isel(time=[15]).traffic.plot() ``` #Save the dataset ``` tot_dataset.to_netcdf('Data/05_2019_dataset_complete_for_model_CO.nc', 'w', 'NETCDF4') ```
github_jupyter
## Amazon SageMaker Feature Store: Client-side Encryption using AWS Encryption SDK This notebook demonstrates how client-side encryption with SageMaker Feature Store is done using the [AWS Encryption SDK library](https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/introduction.html) to encrypt your data prior to ingesting it into your Online or Offline Feature Store. We first demonstrate how to encrypt your data using the AWS Encryption SDK library, and then show how to use [Amazon Athena](https://aws.amazon.com/athena/) to query for a subset of encrypted columns of features for model training. Currently, Feature Store supports encryption at rest and encryption in transit. With this notebook, we showcase an additional layer of security where your data is encrypted and then stored in your Feature Store. This notebook also covers the scenario where you want to query a subset of encrypted data using Amazon Athena for model training. This becomes particularly useful when you want to store encrypted data sets in a single Feature Store, and want to perform model training using only a subset of encrypted columns, forcing privacy over the remaining columns. If you are interested in server side encryption with Feature Store, see [Feature Store: Encrypt Data in your Online or Offline Feature Store using KMS key](https://sagemaker-examples.readthedocs.io/en/latest/sagemaker-featurestore/feature_store_kms_key_encryption.html). For more information on the AWS Encryption library, see [AWS Encryption SDK library](https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/introduction.html). For detailed information about Feature Store, see the [Developer Guide](https://docs.aws.amazon.com/sagemaker/latest/dg/feature-store.html). ### Overview 1. Set up 2. Load in and encrypt your data using AWS Encryption library (`aws-encryption-sdk`) 3. Create Feature Group and ingest your encrypted data into it 4. Query your encrypted data in your feature store using Amazon Athena 5. Decrypt the data you queried ### Prerequisites This notebook uses the Python SDK library for Feature Store, the AWS Encryption SDK library, `aws-encryption-sdk` and the `Python 3 (DataScience)` kernel. To use the`aws-encryption-sdk` library you will need to have an active KMS key that you created. If you do not have a KMS key, then you can create one by following the [KMS Policy Template](https://sagemaker-examples.readthedocs.io/en/latest/sagemaker-featurestore/feature_store_kms_key_encryption.html#KMS-Policy-Template) steps, or you can visit the [KMS section in the console](https://console.aws.amazon.com/kms/home) and follow the button prompts for creating a KMS key. This notebook works with SageMaker Studio, Jupyter, and JupyterLab. ### Library Dependencies: * `sagemaker>=2.0.0` * `numpy` * `pandas` * `aws-encryption-sdk` ### Data This notebook uses a synthetic data set that has the following features: `customer_id`, `ssn` (social security number), `credit_score`, `age`, and aims to simulate a relaxed data set that has some important features that would be needed during the credit card approval process. ``` import sagemaker import pandas as pd import numpy as np pip install -q 'aws-encryption-sdk' ``` ### Set up ``` sagemaker_session = sagemaker.Session() s3_bucket_name = sagemaker_session.default_bucket() prefix = "sagemaker-featurestore-demo" role = sagemaker.get_execution_role() region = sagemaker_session.boto_region_name ``` Instantiate an encryption SDK client and provide your KMS ARN key to the `StrictAwsKmsMasterKeyProvider` object. This will be needed for data encryption and decryption by the AWS Encryption SDK library. You will need to substitute your KMS Key ARN for `kms_key`. ``` import aws_encryption_sdk from aws_encryption_sdk.identifiers import CommitmentPolicy client = aws_encryption_sdk.EncryptionSDKClient( commitment_policy=CommitmentPolicy.REQUIRE_ENCRYPT_REQUIRE_DECRYPT ) kms_key_provider = aws_encryption_sdk.StrictAwsKmsMasterKeyProvider( key_ids=[kms_key] ## Add your KMS key here ) ``` Load in your data. ``` credit_card_data = pd.read_csv("data/credit_card_approval_synthetic.csv") credit_card_data.head() credit_card_data.dtypes ``` ### Client-Side Encryption Methods Below are some methods that use the Amazon Encryption SDK library for data encryption, and decryption. Note that the data type of the encryption is byte which we convert to an integer prior to storing it into Feature Store and do the reverse prior to decrypting. This is because Feature Store doesn't support byte format directly, thus why we convert the byte encryption to an integer. ``` def encrypt_data_frame(df, columns): """ Input: df: A pandas Dataframe columns: A list of column names. Encrypt the provided columns in df. This method assumes that column names provided in columns exist in df, and uses the AWS Encryption SDK library. """ for col in columns: buffer = [] for entry in np.array(df[col]): entry = str(entry) encrypted_entry, encryptor_header = client.encrypt( source=entry, key_provider=kms_key_provider ) buffer.append(encrypted_entry) df[col] = buffer def decrypt_data_frame(df, columns): """ Input: df: A pandas Dataframe columns: A list of column names. Decrypt the provided columns in df. This method assumes that column names provided in columns exist in df, and uses the AWS Encryption SDK library. """ for col in columns: buffer = [] for entry in np.array(df[col]): decrypted_entry, decryptor_header = client.decrypt( source=entry, key_provider=kms_key_provider ) buffer.append(float(decrypted_entry)) df[col] = np.array(buffer) def bytes_to_int(df, columns): """ Input: df: A pandas Dataframe columns: A list of column names. Convert the provided columns in df of type bytes to integers. This method assumes that column names provided in columns exist in df and that the columns passed in are of type bytes. """ for col in columns: for index, entry in enumerate(np.array(df[col])): df[col][index] = int.from_bytes(entry, "little") def int_to_bytes(df, columns): """ Input: df: A pandas Dataframe columns: A list of column names. Convert the provided columns in df of type integers to bytes. This method assumes that column names provided in columns exist in df and that the columns passed in are of type integers. """ for col in columns: buffer = [] for index, entry in enumerate(np.array(df[col])): current = int(df[col][index]) current_bit_length = current.bit_length() + 1 # include the sign bit, 1 current_byte_length = (current_bit_length + 7) // 8 buffer.append(current.to_bytes(current_byte_length, "little")) df[col] = pd.Series(buffer) ## Encrypt credit card data. Note that we treat `customer_id` as a primary key, and since it's encryption is unique we can encrypt it. encrypt_data_frame(credit_card_data, ["customer_id", "age", "SSN", "credit_score"]) credit_card_data print(credit_card_data.dtypes) ## Cast encryption of type bytes to an integer so it can be stored in Feature Store. bytes_to_int(credit_card_data, ["customer_id", "age", "SSN", "credit_score"]) print(credit_card_data.dtypes) credit_card_data def cast_object_to_string(data_frame): """ Input: data_frame: A pandas Dataframe Cast all columns of data_frame of type object to type string. """ for label in data_frame.columns: if data_frame.dtypes[label] == object: data_frame[label] = data_frame[label].astype("str").astype("string") return data_frame credit_card_data = cast_object_to_string(credit_card_data) print(credit_card_data.dtypes) credit_card_data ``` ### Create your Feature Group and Ingest your encrypted data into it Below we start by appending the `EventTime` feature to your data to timestamp entries, then we load the feature definition, and instantiate the Feature Group object. Then lastly we ingest the data into your feature store. ``` from time import gmtime, strftime, sleep credit_card_feature_group_name = "credit-card-feature-group-" + strftime("%d-%H-%M-%S", gmtime()) ``` Instantiate a FeatureGroup object for `credit_card_data`. ``` from sagemaker.feature_store.feature_group import FeatureGroup credit_card_feature_group = FeatureGroup( name=credit_card_feature_group_name, sagemaker_session=sagemaker_session ) import time current_time_sec = int(round(time.time())) ## Recall customer_id is encrypted therefore unique, and so it can be used as a record identifier. record_identifier_feature_name = "customer_id" ``` Append the `EventTime` feature to your data frame. This parameter is required, and time stamps each data point. ``` credit_card_data["EventTime"] = pd.Series( [current_time_sec] * len(credit_card_data), dtype="float64" ) credit_card_data.head() print(credit_card_data.dtypes) credit_card_feature_group.load_feature_definitions(data_frame=credit_card_data) credit_card_feature_group.create( s3_uri=f"s3://{s3_bucket_name}/{prefix}", record_identifier_name=record_identifier_feature_name, event_time_feature_name="EventTime", role_arn=role, enable_online_store=False, ) time.sleep(60) ``` Ingest your data into your feature group. ``` credit_card_feature_group.ingest(data_frame=credit_card_data, max_workers=3, wait=True) time.sleep(30) ``` Continually check your offline store until your data is available in it. ``` s3_client = sagemaker_session.boto_session.client("s3", region_name=region) credit_card_feature_group_s3_uri = ( credit_card_feature_group.describe() .get("OfflineStoreConfig") .get("S3StorageConfig") .get("ResolvedOutputS3Uri") ) credit_card_feature_group_s3_prefix = credit_card_feature_group_s3_uri.replace( f"s3://{s3_bucket_name}/", "" ) offline_store_contents = None while offline_store_contents is None: objects_in_bucket = s3_client.list_objects( Bucket=s3_bucket_name, Prefix=credit_card_feature_group_s3_prefix ) if "Contents" in objects_in_bucket and len(objects_in_bucket["Contents"]) > 1: offline_store_contents = objects_in_bucket["Contents"] else: print("Waiting for data in offline store...\n") time.sleep(60) print("Data available.") ``` ### Use Amazon Athena to Query your Encrypted Data in your Feature Store Using Amazon Athena, we query columns `customer_id`, `age`, and `credit_score` from your offline feature store where your encrypted data is. ``` credit_card_query = credit_card_feature_group.athena_query() credit_card_table = credit_card_query.table_name query_credit_card_table = 'SELECT customer_id, age, credit_score FROM "' + credit_card_table + '"' print("Running " + query_credit_card_table) # Run the Athena query credit_card_query.run( query_string=query_credit_card_table, output_location="s3://" + s3_bucket_name + "/" + prefix + "/query_results/", ) time.sleep(60) credit_card_dataset = credit_card_query.as_dataframe() print(credit_card_dataset.dtypes) credit_card_dataset int_to_bytes(credit_card_dataset, ["customer_id", "age", "credit_score"]) credit_card_dataset decrypt_data_frame(credit_card_dataset, ["customer_id", "age", "credit_score"]) ``` In this notebook, we queried a subset of encrypted features. From here you can now train a model on this new dataset while remaining privacy over other columns e.g., `ssn`. ``` credit_card_dataset ``` ### Clean Up Resources Remove the Feature Group that was created. ``` credit_card_feature_group.delete() ``` ### Next Steps In this notebook we covered client-side encryption with Feature Store. If you are interested in understanding how server-side encryption is done with Feature Store, see [Feature Store: Encrypt Data in your Online or Offline Feature Store using KMS key](https://sagemaker-examples.readthedocs.io/en/latest/sagemaker-featurestore/feature_store_kms_key_encryption.html). For more information on the AWS Encryption library, see [AWS Encryption SDK library](https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/introduction.html). For detailed information about Feature Store, see the [Developer Guide](https://docs.aws.amazon.com/sagemaker/latest/dg/feature-store.html).
github_jupyter
# Strings ### **Splitting strings** ``` 'a,b,c'.split(',') latitude = '37.24N' longitude = '-115.81W' 'Coordinates {0},{1}'.format(latitude,longitude) f'Coordinates {latitude},{longitude}' '{0},{1},{2}'.format(*('abc')) coord = {"latitude":latitude,"longitude":longitude} 'Coordinates {latitude},{longitude}'.format(**coord) ``` ### **Access argument' s attribute ** ``` class Point: def __init__(self,x,y): self.x,self.y = x,y def __str__(self): return 'Point({self.x},{self.y})'.format(self = self) def __repr__(self): return f'Point({self.x},{self.y})' test_point = Point(4,2) test_point str(Point(4,2)) ``` ### **Replace with %s , %r ** : ``` " repr() shows the quote {!r}, while str() doesn't:{!s} ".format('a1','a2') ``` ### **Aligning the text with width** : ``` '{:<30}'.format('left aligned') '{:>30}'.format('right aligned') '{:^30}'.format('centerd') '{:*^30}'.format('centerd') ``` ### **Replace with %x , %o and convert the value to different base ** : ``` "int:{0:d}, hex:{0:x}, oct:{0:o}, bin:{0:b}".format(42) '{:,}'.format(12345677) ``` ### **Percentage ** : ``` points = 19 total = 22 'Correct answers: {:.2%}'.format(points/total) import datetime as dt f"{dt.datetime.now():%Y-%m-%d}" f"{dt.datetime.now():%d_%m_%Y}" today = dt.datetime.today().strftime("%d_%m_%Y") today ``` ### **Splitting without parameters ** : ``` "this is a test".split() ``` ### **Concatenating and joining Strings ** : ``` 'do'*2 orig_string ='Hello' orig_string+',World' full_sentence = orig_string+',World' full_sentence ``` ### **Concatenating with join() , other basic funstions** : ``` strings = ['do','re','mi'] ', '.join(strings) 'z' not in 'abc' ord('a'), ord('#') chr(97) s = "foodbar" s[2:5] s[:4] + s[4:] s[:4] + s[4:] == s t=s[:] id(s) id(t) s is t s[0:6:2] s[5:0:-2] s = 'tomorrow is monday' reverse_s = s[::-1] reverse_s s.capitalize() s.upper() s.title() s.count('o') "foobar".startswith('foo') "foobar".endswith('ar') "foobar".endswith('oob',0,4) "foobar".endswith('oob',2,4) "My name is yaozeliang, I work at Societe Generale".find('yao') # If can't find the string, return -1 "My name is yaozeliang, I work at Societe Generale".find('gent') # Check a string if consists of alphanumeric characters "abc123".isalnum() "abc%123".isalnum() "abcABC".isalpha() "abcABC1".isalpha() '123'.isdigit() '123abc'.isdigit() 'abc'.islower() "This Is A Title".istitle() "This is a title".istitle() 'ABC'.isupper() 'ABC1%'.isupper() 'foo'.center(10) ' foo bar baz '.strip() ' foo bar baz '.lstrip() ' foo bar baz '.rstrip() "foo abc foo def fo ljk ".replace('foo','yao') 'www.realpython.com'.strip('w.moc') 'www.realpython.com'.strip('w.com') 'www.realpython.com'.strip('w.ncom') ``` ### **Convert between strings and lists** : ``` ', '.join(['foo','bar','baz','qux']) list('corge') ':'.join('corge') 'www.foo'.partition('.') 'foo@@bar@@baz'.partition('@@') 'foo@@bar@@baz'.rpartition('@@') 'foo.bar'.partition('@@') # By default , rsplit split a string with white space 'foo bar adf yao'.rsplit() 'foo.bar.adf.ert'.split('.') 'foo\nbar\nadfa\nlko'.splitlines() ```
github_jupyter
# 9. Incorporating OD Veto Data ``` import sys import os import h5py from collections import Counter from progressbar import * import re import numpy as np import h5py from scipy import signal import matplotlib from repeating_classifier_training_utils import * from functools import reduce # Add the path to the parent directory to augment search for module par_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir)) if par_dir not in sys.path: sys.path.append(par_dir) %load_ext autoreload %matplotlib inline %autoreload 2 veto_path = '/fast_scratch/WatChMaL/data/IWCDmPMT_4pm_full_tank_ODveto.h5' odv_file = h5py.File(veto_path,'r') odv_info = {} for key in odv_file.keys(): odv_info[key] = np.array(odv_file[key]) odv_dict = {} pbar = ProgressBar(widgets=['Creating Event-Index Dictionary: ', Percentage(), ' ', Bar(marker='0',left='[',right=']'), ' ', ETA()], maxval=len(odv_info['event_ids'])) pbar.start() for i in range(len(odv_info['event_ids'])): odv_dict[(odv_info['root_files'][i], odv_info['event_ids'][i])] = i pbar.update(i) pbar.finish() ``` ## Load test set ``` # Get original h5 file info # Import test events from h5 file filtered_index = "/fast_scratch/WatChMaL/data/IWCD_fulltank_300_pe_idxs.npz" filtered_indices = np.load(filtered_index, allow_pickle=True) test_filtered_indices = filtered_indices['test_idxs'] original_data_path = "/data/WatChMaL/data/IWCDmPMT_4pi_fulltank_9M.h5" f = h5py.File(original_data_path, "r") original_eventids = np.array(f['event_ids']) original_rootfiles = np.array(f['root_files']) filtered_eventids = original_eventids[test_filtered_indices] filtered_rootfiles = original_rootfiles[test_filtered_indices] odv_mapping_indices = np.zeros(len(filtered_rootfiles)) pbar = ProgressBar(widgets=['Mapping Progress: ', Percentage(), ' ', Bar(marker='0',left='[',right=']'), ' ', ETA()], maxval=len(filtered_rootfiles)) pbar.start() for i in range(len(filtered_rootfiles)): odv_mapping_indices[i] = odv_dict[(filtered_rootfiles[i], filtered_eventids[i])] pbar.update(i) pbar.finish() odv_mapping_indices = np.int32(odv_mapping_indices) pbar = ProgressBar(widgets=['Verification Progress: ', Percentage(), ' ', Bar(marker='0',left='[',right=']'), ' ', ETA()], maxval=len(filtered_rootfiles)) pbar.start() for i in range(len(filtered_rootfiles)): assert odv_info['root_files'][odv_mapping_indices[i]] == filtered_rootfiles[i] assert odv_info['event_ids'][odv_mapping_indices[i]] == filtered_eventids[i] pbar.update(i) pbar.finish() np.savez(os.path.join(os.getcwd(), 'Index_Storage/od_veto_mapping_idxs.npz'), mapping_idxs_full_set=odv_mapping_indices) ```
github_jupyter
###### Name: Deepak Vadithala ###### Course: MSc Data Science ###### Project Name: MOOC Recommender System ##### Notes: This notebook contains the analysis of the **Google's Word2Vec** model. This model is trained on the news articles. two variable **(Role and Skill Scores)** is used to predict the course category. Skill Score is calculated using the similarity between the skills from LinkedIn compared with the course description with keywords from Coursera. *Model Source Code Path: /mooc-recommender/Model/Cosine_Distance.py* *Github Repo: https://github.com/iamdv/mooc-recommender* ``` # **************************** IMPORTANT **************************** ''' This cell configuration settings for the Notebook. You can run one role at a time to evaluate the performance of the model Change the variable names to run for multiple roles In this model: 1. Google word2vec model has two variables Roles and Skills with 50% weightage for each ''' # ******************************************************************* # For each role a list of category names are grouped. # Please don't change these variables label_DataScientist = ['Data Science','Data Analysis','Data Mining','Data Visualization'] label_SoftwareDevelopment = ['Software Development','Computer Science', 'Programming Languages', 'Algorithms and Data Structures', 'Information Technology'] label_DatabaseAdministrator = ['Databases'] label_Cybersecurity = ['Cybersecurity'] label_FinancialAccountant = ['Finance', 'Accounting'] label_MachineLearning = ['Machine Learning', 'Deep Learning'] label_Musician = ['Music'] label_Dietitian = ['Nutrition & Wellness', 'Health & Medicine'] # ******************************************************************* # ******************************************************************* # Environment and Config Variables. Change these variables as per the requirement. my_fpath_model = "../Data/Final_Model_Output.csv" my_fpath_courses = "../Data/main_coursera.csv" my_fpath_skills_DataScientist = "../Data/Word2Vec-Google/Word2VecGoogle_DataScientist.csv" my_fpath_skills_SoftwareDevelopment = "../Data/Word2Vec-Google/Word2VecGoogle_SoftwareDevelopment.csv" my_fpath_skills_DatabaseAdministrator = "../Data/Word2Vec-Google/Word2VecGoogle_DatabaseAdministrator.csv" my_fpath_skills_Cybersecurity = "../Data/Word2Vec-Google/Word2VecGoogle_Cybersecurity.csv" my_fpath_skills_FinancialAccountant = "../Data/Word2Vec-Google/Word2VecGoogle_FinancialAccountant.csv" my_fpath_skills_MachineLearning = "../Data/Word2Vec-Google/Word2VecGoogle_MachineLearning.csv" my_fpath_skills_Musician = "../Data/Word2Vec-Google/Word2VecGoogle_Musician.csv" my_fpath_skills_Dietitian = "../Data/Word2Vec-Google/Word2VecGoogle_Dietitian.csv" # ******************************************************************* # ******************************************************************* # Weighting Variables. Change them as per the requirement. # Role score is not applicable for Google's Word2Vec model. my_role_weight = 0.5 my_skill_weight = 0.5 my_threshold = 0.37 # ******************************************************************* # Importing required modules/packages import pandas as pd import numpy as np from matplotlib import pyplot as plt from sklearn.feature_extraction.text import TfidfVectorizer import nltk, string import string import csv import json # Downloading the stopwords like i, me, and, is, the etc. nltk.download('stopwords') # Loading courses and skills data from the CSV files df_courses = pd.read_csv(my_fpath_courses) df_DataScientist = pd.read_csv(my_fpath_skills_DataScientist) df_DataScientist = df_DataScientist.drop('Role', 1) df_DataScientist.columns = ['Course Id', 'DataScientist_Skill_Score', 'DataScientist_Role_Score', 'DataScientist_Keyword_Score'] df_SoftwareDevelopment = pd.read_csv(my_fpath_skills_SoftwareDevelopment) df_SoftwareDevelopment = df_SoftwareDevelopment.drop('Role', 1) df_SoftwareDevelopment.columns = ['Course Id','SoftwareDevelopment_Skill_Score', 'SoftwareDevelopment_Role_Score', 'SoftwareDevelopment_Keyword_Score'] df_DatabaseAdministrator = pd.read_csv(my_fpath_skills_DatabaseAdministrator) df_DatabaseAdministrator = df_DatabaseAdministrator.drop('Role', 1) df_DatabaseAdministrator.columns = ['Course Id','DatabaseAdministrator_Skill_Score', 'DatabaseAdministrator_Role_Score', 'DatabaseAdministrator_Keyword_Score'] df_Cybersecurity = pd.read_csv(my_fpath_skills_Cybersecurity) df_Cybersecurity = df_Cybersecurity.drop('Role', 1) df_Cybersecurity.columns = ['Course Id','Cybersecurity_Skill_Score', 'Cybersecurity_Role_Score', 'Cybersecurity_Keyword_Score'] df_FinancialAccountant = pd.read_csv(my_fpath_skills_FinancialAccountant) df_FinancialAccountant = df_FinancialAccountant.drop('Role', 1) df_FinancialAccountant.columns = ['Course Id','FinancialAccountant_Skill_Score', 'FinancialAccountant_Role_Score', 'FinancialAccountant_Keyword_Score'] df_MachineLearning = pd.read_csv(my_fpath_skills_MachineLearning) df_MachineLearning = df_MachineLearning.drop('Role', 1) df_MachineLearning.columns = ['Course Id','MachineLearning_Skill_Score', 'MachineLearning_Role_Score', 'MachineLearning_Keyword_Score'] df_Musician = pd.read_csv(my_fpath_skills_Musician) df_Musician = df_Musician.drop('Role', 1) df_Musician.columns = ['Course Id','Musician_Skill_Score', 'Musician_Role_Score', 'Musician_Keyword_Score'] df_Dietitian = pd.read_csv(my_fpath_skills_Dietitian) df_Dietitian = df_Dietitian.drop('Role', 1) df_Dietitian.columns = ['Course Id','Dietitian_Skill_Score', 'Dietitian_Role_Score','Dietitian_Keyword_Score'] # Merging the csv files df_cosdist = df_DataScientist.merge(df_SoftwareDevelopment, on = 'Course Id', how = 'outer') df_cosdist = df_cosdist.merge(df_DatabaseAdministrator, on = 'Course Id', how = 'outer') df_cosdist = df_cosdist.merge(df_Cybersecurity, on = 'Course Id', how = 'outer') df_cosdist = df_cosdist.merge(df_FinancialAccountant, on = 'Course Id', how = 'outer') df_cosdist = df_cosdist.merge(df_MachineLearning, on = 'Course Id', how = 'outer') df_cosdist = df_cosdist.merge(df_Musician, on = 'Course Id', how = 'outer') df_cosdist = df_cosdist.merge(df_Dietitian, on = 'Course Id', how = 'outer') # Exploring data dimensionality, feature names, and feature types. print(df_courses.shape,"\n") print(df_cosdist.shape,"\n") print(df_courses.columns, "\n") print(df_cosdist.shape,"\n") print(df_courses.describe(), "\n") print(df_cosdist.describe(), "\n") # Quick check to see if the dataframe showing the right results df_cosdist.head(20) # Joining two dataframes - Courses and the Cosein Similarity Results based on the 'Course Id' variable. # Inner joins: Joins two tables with the common rows. This is a set operateion. df_courses_score = df_courses.merge(df_cosdist, on ='Course Id', how='inner') print(df_courses_score.shape,"\n") # Tranforming and shaping the data to create the confusion matrix for the ROLE: DATA SCIENTIST y_actu_DataScientist = '' y_pred_DataScientist = '' df_courses_score['DataScientist_Final_Score'] = (df_courses_score['DataScientist_Role_Score'] * my_role_weight) + (df_courses_score['DataScientist_Skill_Score'] * my_skill_weight) df_courses_score['DataScientist_Predict'] = (df_courses_score['DataScientist_Final_Score'] >= my_threshold) df_courses_score['DataScientist_Label'] = df_courses_score.Category.isin(label_DataScientist) y_pred_DataScientist = pd.Series(df_courses_score['DataScientist_Predict'], name='Predicted') y_actu_DataScientist = pd.Series(df_courses_score['DataScientist_Label'], name='Actual') df_confusion_DataScientist = pd.crosstab(y_actu_DataScientist, y_pred_DataScientist , rownames=['Actual'], colnames=['Predicted'], margins=False) # Tranforming and shaping the data to create the confusion matrix for the ROLE: SOFTWARE ENGINEER/DEVELOPER y_actu_SoftwareDevelopment = '' y_pred_SoftwareDevelopment = '' df_courses_score['SoftwareDevelopment_Final_Score'] = (df_courses_score['SoftwareDevelopment_Role_Score'] * my_role_weight) + (df_courses_score['SoftwareDevelopment_Skill_Score'] * my_skill_weight) df_courses_score['SoftwareDevelopment_Predict'] = (df_courses_score['SoftwareDevelopment_Final_Score'] >= my_threshold) df_courses_score['SoftwareDevelopment_Label'] = df_courses_score.Category.isin(label_SoftwareDevelopment) y_pred_SoftwareDevelopment = pd.Series(df_courses_score['SoftwareDevelopment_Predict'], name='Predicted') y_actu_SoftwareDevelopment = pd.Series(df_courses_score['SoftwareDevelopment_Label'], name='Actual') df_confusion_SoftwareDevelopment = pd.crosstab(y_actu_SoftwareDevelopment, y_pred_SoftwareDevelopment , rownames=['Actual'], colnames=['Predicted'], margins=False) # Tranforming and shaping the data to create the confusion matrix for the ROLE: DATABASE DEVELOPER/ADMINISTRATOR y_actu_DatabaseAdministrator = '' y_pred_DatabaseAdministrator = '' df_courses_score['DatabaseAdministrator_Final_Score'] = (df_courses_score['DatabaseAdministrator_Role_Score'] * my_role_weight) + (df_courses_score['DatabaseAdministrator_Skill_Score'] * my_skill_weight) df_courses_score['DatabaseAdministrator_Predict'] = (df_courses_score['DatabaseAdministrator_Final_Score'] >= my_threshold) df_courses_score['DatabaseAdministrator_Label'] = df_courses_score.Category.isin(label_DatabaseAdministrator) y_pred_DatabaseAdministrator = pd.Series(df_courses_score['DatabaseAdministrator_Predict'], name='Predicted') y_actu_DatabaseAdministrator = pd.Series(df_courses_score['DatabaseAdministrator_Label'], name='Actual') df_confusion_DatabaseAdministrator = pd.crosstab(y_actu_DatabaseAdministrator, y_pred_DatabaseAdministrator , rownames=['Actual'], colnames=['Predicted'], margins=False) # Tranforming and shaping the data to create the confusion matrix for the ROLE: CYBERSECURITY CONSULTANT y_actu_Cybersecurity = '' y_pred_Cybersecurity = '' df_courses_score['Cybersecurity_Final_Score'] = (df_courses_score['Cybersecurity_Role_Score'] * my_role_weight) + (df_courses_score['Cybersecurity_Skill_Score'] * my_skill_weight) df_courses_score['Cybersecurity_Predict'] = (df_courses_score['Cybersecurity_Final_Score'] >= my_threshold) df_courses_score['Cybersecurity_Label'] = df_courses_score.Category.isin(label_Cybersecurity) y_pred_Cybersecurity = pd.Series(df_courses_score['Cybersecurity_Predict'], name='Predicted') y_actu_Cybersecurity = pd.Series(df_courses_score['Cybersecurity_Label'], name='Actual') df_confusion_Cybersecurity = pd.crosstab(y_actu_Cybersecurity, y_pred_Cybersecurity , rownames=['Actual'], colnames=['Predicted'], margins=False) # Tranforming and shaping the data to create the confusion matrix for the ROLE: FINANCIAL ACCOUNTANT y_actu_FinancialAccountant = '' y_pred_FinancialAccountant = '' df_courses_score['FinancialAccountant_Final_Score'] = (df_courses_score['FinancialAccountant_Role_Score'] * my_role_weight) + (df_courses_score['FinancialAccountant_Skill_Score'] * my_skill_weight) df_courses_score['FinancialAccountant_Predict'] = (df_courses_score['FinancialAccountant_Final_Score'] >= my_threshold) df_courses_score['FinancialAccountant_Label'] = df_courses_score.Category.isin(label_FinancialAccountant) y_pred_FinancialAccountant = pd.Series(df_courses_score['FinancialAccountant_Predict'], name='Predicted') y_actu_FinancialAccountant = pd.Series(df_courses_score['FinancialAccountant_Label'], name='Actual') df_confusion_FinancialAccountant = pd.crosstab(y_actu_FinancialAccountant, y_pred_FinancialAccountant , rownames=['Actual'], colnames=['Predicted'], margins=False) # Tranforming and shaping the data to create the confusion matrix for the ROLE: MACHINE LEARNING ENGINEER y_actu_MachineLearning = '' y_pred_MachineLearning = '' df_courses_score['MachineLearning_Final_Score'] = (df_courses_score['MachineLearning_Role_Score'] * my_role_weight) + (df_courses_score['MachineLearning_Skill_Score'] * my_skill_weight) df_courses_score['MachineLearning_Predict'] = (df_courses_score['MachineLearning_Final_Score'] >= my_threshold) df_courses_score['MachineLearning_Label'] = df_courses_score.Category.isin(label_MachineLearning) y_pred_MachineLearning = pd.Series(df_courses_score['MachineLearning_Predict'], name='Predicted') y_actu_MachineLearning = pd.Series(df_courses_score['MachineLearning_Label'], name='Actual') df_confusion_MachineLearning = pd.crosstab(y_actu_MachineLearning, y_pred_MachineLearning , rownames=['Actual'], colnames=['Predicted'], margins=False) # Tranforming and shaping the data to create the confusion matrix for the ROLE: MUSICIAN y_actu_Musician = '' y_pred_Musician = '' df_courses_score['Musician_Final_Score'] = (df_courses_score['Musician_Role_Score'] * my_role_weight) + (df_courses_score['Musician_Skill_Score'] * my_skill_weight) df_courses_score['Musician_Predict'] = (df_courses_score['Musician_Final_Score'] >= my_threshold) df_courses_score['Musician_Label'] = df_courses_score.Category.isin(label_Musician) y_pred_Musician = pd.Series(df_courses_score['Musician_Predict'], name='Predicted') y_actu_Musician = pd.Series(df_courses_score['Musician_Label'], name='Actual') df_confusion_Musician = pd.crosstab(y_actu_Musician, y_pred_Musician , rownames=['Actual'], colnames=['Predicted'], margins=False) # Tranforming and shaping the data to create the confusion matrix for the ROLE: NUTRITIONIST/DIETITIAN y_actu_Dietitian = '' y_pred_Dietitian = '' df_courses_score['Dietitian_Final_Score'] = (df_courses_score['Dietitian_Role_Score'] * my_role_weight) + (df_courses_score['Dietitian_Skill_Score'] * my_skill_weight) df_courses_score['Dietitian_Predict'] = (df_courses_score['Dietitian_Final_Score'] >= my_threshold) df_courses_score['Dietitian_Label'] = df_courses_score.Category.isin(label_Dietitian) y_pred_Dietitian = pd.Series(df_courses_score['Dietitian_Predict'], name='Predicted') y_actu_Dietitian = pd.Series(df_courses_score['Dietitian_Label'], name='Actual') df_confusion_Dietitian = pd.crosstab(y_actu_Dietitian, y_pred_Dietitian , rownames=['Actual'], colnames=['Predicted'], margins=False) df_confusion_DataScientist df_confusion_SoftwareDevelopment df_confusion_DatabaseAdministrator df_confusion_Cybersecurity df_confusion_FinancialAccountant df_confusion_MachineLearning df_confusion_Musician df_confusion_Dietitian # Performance summary for the ROLE: DATA SCIENTIST try: tn_DataScientist = df_confusion_DataScientist.iloc[0][False] except: tn_DataScientist = 0 try: tp_DataScientist = df_confusion_DataScientist.iloc[1][True] except: tp_DataScientist = 0 try: fn_DataScientist = df_confusion_DataScientist.iloc[1][False] except: fn_DataScientist = 0 try: fp_DataScientist = df_confusion_DataScientist.iloc[0][True] except: fp_DataScientist = 0 total_count_DataScientist = tn_DataScientist + tp_DataScientist + fn_DataScientist + fp_DataScientist print('Data Scientist Accuracy Rate : ', '{0:.2f}'.format((tn_DataScientist + tp_DataScientist) / total_count_DataScientist * 100)) print('Data Scientist Misclassifcation Rate : ', '{0:.2f}'.format((fn_DataScientist + fp_DataScientist) / total_count_DataScientist * 100)) print('Data Scientist True Positive Rate : ', '{0:.2f}'.format(tp_DataScientist / (tp_DataScientist + fn_DataScientist) * 100)) print('Data Scientist False Positive Rate : ', '{0:.2f}'.format(fp_DataScientist / (tn_DataScientist + fp_DataScientist) * 100)) # Performance summary for the ROLE: SOFTWARE ENGINEER try: tn_SoftwareDevelopment = df_confusion_SoftwareDevelopment.iloc[0][False] except: tn_SoftwareDevelopment = 0 try: tp_SoftwareDevelopment = df_confusion_SoftwareDevelopment.iloc[1][True] except: tp_SoftwareDevelopment = 0 try: fn_SoftwareDevelopment = df_confusion_SoftwareDevelopment.iloc[1][False] except: fn_SoftwareDevelopment = 0 try: fp_SoftwareDevelopment = df_confusion_SoftwareDevelopment.iloc[0][True] except: fp_SoftwareDevelopment = 0 total_count_SoftwareDevelopment = tn_SoftwareDevelopment + tp_SoftwareDevelopment + fn_SoftwareDevelopment + fp_SoftwareDevelopment print('Software Engineer Accuracy Rate : ', '{0:.2f}'.format((tn_SoftwareDevelopment + tp_SoftwareDevelopment) / total_count_SoftwareDevelopment * 100)) print('Software Engineer Misclassifcation Rate : ', '{0:.2f}'.format((fn_SoftwareDevelopment + fp_SoftwareDevelopment) / total_count_SoftwareDevelopment * 100)) print('Software Engineer True Positive Rate : ', '{0:.2f}'.format(tp_SoftwareDevelopment / (tp_SoftwareDevelopment + fn_SoftwareDevelopment) * 100)) print('Software Engineer False Positive Rate : ', '{0:.2f}'.format(fp_SoftwareDevelopment / (tn_SoftwareDevelopment + fp_SoftwareDevelopment) * 100)) # Performance summary for the ROLE: DATABASE DEVELOPER/ ADMINISTRATOR try: tn_DatabaseAdministrator = df_confusion_DatabaseAdministrator.iloc[0][False] except: tn_DatabaseAdministrator = 0 try: tp_DatabaseAdministrator = df_confusion_DatabaseAdministrator.iloc[1][True] except: tp_DatabaseAdministrator = 0 try: fn_DatabaseAdministrator = df_confusion_DatabaseAdministrator.iloc[1][False] except: fn_DatabaseAdministrator = 0 try: fp_DatabaseAdministrator = df_confusion_DatabaseAdministrator.iloc[0][True] except: fp_DatabaseAdministrator = 0 total_count_DatabaseAdministrator = tn_DatabaseAdministrator + tp_DatabaseAdministrator + fn_DatabaseAdministrator + fp_DatabaseAdministrator print('Database Administrator Accuracy Rate : ', '{0:.2f}'.format((tn_DatabaseAdministrator + tp_DatabaseAdministrator) / total_count_DatabaseAdministrator * 100)) print('Database Administrator Misclassifcation Rate : ', '{0:.2f}'.format((fn_DatabaseAdministrator + fp_DatabaseAdministrator) / total_count_DatabaseAdministrator * 100)) print('Database Administrator True Positive Rate : ', '{0:.2f}'.format(tp_DatabaseAdministrator / (tp_DatabaseAdministrator + fn_DatabaseAdministrator) * 100)) print('Database Administrator False Positive Rate : ', '{0:.2f}'.format(fp_DatabaseAdministrator / (tn_DatabaseAdministrator + fp_DatabaseAdministrator) * 100)) # Performance summary for the ROLE: CYBERSECURITY CONSULTANT try: tn_Cybersecurity = df_confusion_Cybersecurity.iloc[0][False] except: tn_Cybersecurity = 0 try: tp_Cybersecurity = df_confusion_Cybersecurity.iloc[1][True] except: tp_Cybersecurity = 0 try: fn_Cybersecurity = df_confusion_Cybersecurity.iloc[1][False] except: fn_Cybersecurity = 0 try: fp_Cybersecurity = df_confusion_Cybersecurity.iloc[0][True] except: fp_Cybersecurity = 0 total_count_Cybersecurity = tn_Cybersecurity + tp_Cybersecurity + fn_Cybersecurity + fp_Cybersecurity print('Cybersecurity Consultant Accuracy Rate : ', '{0:.2f}'.format((tn_Cybersecurity + tp_Cybersecurity) / total_count_Cybersecurity * 100)) print('Cybersecurity Consultant Misclassifcation Rate : ', '{0:.2f}'.format((fn_Cybersecurity + fp_Cybersecurity) / total_count_Cybersecurity * 100)) print('Cybersecurity Consultant True Positive Rate : ', '{0:.2f}'.format(tp_Cybersecurity / (tp_Cybersecurity + fn_Cybersecurity) * 100)) print('Cybersecurity Consultant False Positive Rate : ', '{0:.2f}'.format(fp_Cybersecurity / (tn_Cybersecurity + fp_Cybersecurity) * 100)) # Performance summary for the ROLE: FINANCIAL ACCOUNTANT try: tn_FinancialAccountant = df_confusion_FinancialAccountant.iloc[0][False] except: tn_FinancialAccountant = 0 try: tp_FinancialAccountant = df_confusion_FinancialAccountant.iloc[1][True] except: tp_FinancialAccountant = 0 try: fn_FinancialAccountant = df_confusion_FinancialAccountant.iloc[1][False] except: fn_FinancialAccountant = 0 try: fp_FinancialAccountant = df_confusion_FinancialAccountant.iloc[0][True] except: fp_FinancialAccountant = 0 total_count_FinancialAccountant = tn_FinancialAccountant + tp_FinancialAccountant + fn_FinancialAccountant + fp_FinancialAccountant print('Financial Accountant Consultant Accuracy Rate : ', '{0:.2f}'.format((tn_FinancialAccountant + tp_FinancialAccountant) / total_count_FinancialAccountant * 100)) print('Financial Accountant Consultant Misclassifcation Rate : ', '{0:.2f}'.format((fn_FinancialAccountant + fp_FinancialAccountant) / total_count_FinancialAccountant * 100)) print('Financial Accountant Consultant True Positive Rate : ', '{0:.2f}'.format(tp_FinancialAccountant / (tp_FinancialAccountant + fn_FinancialAccountant) * 100)) print('Financial Accountant Consultant False Positive Rate : ', '{0:.2f}'.format(fp_FinancialAccountant / (tn_FinancialAccountant + fp_FinancialAccountant) * 100)) # Performance summary for the ROLE: MACHINE LEARNING ENGINEER try: tn_MachineLearning = df_confusion_MachineLearning.iloc[0][False] except: tn_MachineLearning = 0 try: tp_MachineLearning = df_confusion_MachineLearning.iloc[1][True] except: tp_MachineLearning = 0 try: fn_MachineLearning = df_confusion_MachineLearning.iloc[1][False] except: fn_MachineLearning = 0 try: fp_MachineLearning = df_confusion_MachineLearning.iloc[0][True] except: fp_MachineLearning = 0 total_count_MachineLearning = tn_MachineLearning + tp_MachineLearning + fn_MachineLearning + fp_MachineLearning print('Machine Learning Engineer Accuracy Rate : ', '{0:.2f}'.format((tn_MachineLearning + tp_MachineLearning) / total_count_MachineLearning * 100)) print('Machine Learning Engineer Misclassifcation Rate : ', '{0:.2f}'.format((fn_MachineLearning + fp_MachineLearning) / total_count_MachineLearning * 100)) print('Machine Learning Engineer True Positive Rate : ', '{0:.2f}'.format(tp_MachineLearning / (tp_MachineLearning + fn_MachineLearning) * 100)) print('Machine Learning Engineer False Positive Rate : ', '{0:.2f}'.format(fp_MachineLearning / (tn_MachineLearning + fp_MachineLearning) * 100)) # Performance summary for the ROLE: MUSICIAN try: tn_Musician = df_confusion_Musician.iloc[0][False] except: tn_Musician = 0 try: tp_Musician = df_confusion_Musician.iloc[1][True] except: tp_Musician = 0 try: fn_Musician = df_confusion_Musician.iloc[1][False] except: fn_Musician = 0 try: fp_Musician = df_confusion_Musician.iloc[0][True] except: fp_Musician = 0 total_count_Musician = tn_Musician + tp_Musician + fn_Musician + fp_Musician print('Musician Accuracy Rate : ', '{0:.2f}'.format((tn_Musician + tp_Musician) / total_count_Musician * 100)) print('Musician Misclassifcation Rate : ', '{0:.2f}'.format((fn_Musician + fp_Musician) / total_count_Musician * 100)) print('Musician True Positive Rate : ', '{0:.2f}'.format(tp_Musician / (tp_Musician + fn_Musician) * 100)) print('Musician False Positive Rate : ', '{0:.2f}'.format(fp_Musician / (tn_Musician + fp_Musician) * 100)) # Performance summary for the ROLE: DIETITIAN try: tn_Dietitian = df_confusion_Dietitian.iloc[0][False] except: tn_Dietitian = 0 try: tp_Dietitian = df_confusion_Dietitian.iloc[1][True] except: tp_Dietitian = 0 try: fn_Dietitian = df_confusion_Dietitian.iloc[1][False] except: fn_Dietitian = 0 try: fp_Dietitian = df_confusion_Dietitian.iloc[0][True] except: fp_Dietitian = 0 total_count_Dietitian = tn_Dietitian + tp_Dietitian + fn_Dietitian + fp_Dietitian print('Dietitian Accuracy Rate : ', '{0:.2f}'.format((tn_Dietitian + tp_Dietitian) / total_count_Dietitian * 100)) print('Dietitian Misclassifcation Rate : ', '{0:.2f}'.format((fn_Dietitian + fp_Dietitian) / total_count_Dietitian * 100)) print('Dietitian True Positive Rate : ', '{0:.2f}'.format(tp_Dietitian / (tp_Dietitian + fn_Dietitian) * 100)) print('Dietitian False Positive Rate : ', '{0:.2f}'.format(fp_Dietitian / (tn_Dietitian + fp_Dietitian) * 100)) df_final_model = df_courses_score[['Course Id', 'Course Name', 'Course Description', 'Slug', 'Provider', 'Universities/Institutions', 'Parent Subject', 'Child Subject', 'Category', 'Url', 'Length', 'Language', 'Credential Name', 'Rating', 'Number of Ratings', 'Certificate', 'Workload', 'DataScientist_Final_Score', 'DataScientist_Predict', 'SoftwareDevelopment_Final_Score', 'SoftwareDevelopment_Predict', 'DatabaseAdministrator_Final_Score', 'DatabaseAdministrator_Predict', 'Cybersecurity_Final_Score', 'Cybersecurity_Predict', 'FinancialAccountant_Final_Score', 'FinancialAccountant_Predict', 'MachineLearning_Final_Score', 'MachineLearning_Predict', 'Musician_Final_Score', 'Musician_Predict', 'Dietitian_Final_Score', 'Dietitian_Predict']] df_final_model test = df_final_model.sort_values('FinancialAccountant_Final_Score', ascending=False) test # Save the model results to the CSV File df_final_model.columns df_final_model = df_final_model.drop(df_final_model.columns[df_final_model.columns.str.contains('unnamed',case = False)],axis = 1) df_final_model = df_final_model.replace(np.nan, '', regex=True) df_final_model.columns = ['courseId', 'courseName', 'courseDescription', 'slug', 'provider', 'universitiesInstitutions', 'parentSubject', 'childSubject', 'category', 'url', 'length', 'language', 'credentialName', 'rating', 'numberOfRatings', 'certificate', 'workload', 'dataScientistFinalScore', 'dataScientistPredict', 'softwareDevelopmentFinalScore', 'softwareDevelopmentPredict', 'databaseAdministratorFinalScore', 'databaseAdministratorPredict', 'cybersecurityFinalScore', 'cybersecurityPredict', 'financialAccountantFinalScore', 'financialAccountantPredict', 'machineLearningFinalScore', 'machineLearningPredict', 'musicianFinalScore', 'musicianPredict', 'dietitianFinalScore', 'dietitianPredict'] df_final_model df_final_model.to_csv(my_fpath_model, sep=',', encoding='utf-8') ``` ### End of the Notebook. Thank you!
github_jupyter
<a href="https://colab.research.google.com/github/sreyaschaithanya/football_analysis/blob/main/Football_1_Plotting_pass_and_shot.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` #! git clone https://github.com/statsbomb/open-data.git from google.colab import drive drive.mount('/content/drive') #!rm -rf /content/open-data #!cp -r "/content/drive/My Drive/Football/open-data" "open-data" #!cp "/content/drive/My Drive/Football/open-data.zip" "open-data.zip" #!unzip /content/open-data.zip -d /content/ #from google.colab import files #files.download('open-data.zip') DATA_PATH = "/content/drive/My Drive/Football/open-data/data/" MATCHES_PATH = DATA_PATH+"matches/" #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Mar 25 17:32:00 2020 @author: davsu428 """ import matplotlib.pyplot as plt from matplotlib.patches import Arc def createPitch(length,width, unity,linecolor): # in meters # Code by @JPJ_dejong """ creates a plot in which the 'length' is the length of the pitch (goal to goal). And 'width' is the width of the pitch (sideline to sideline). Fill in the unity in meters or in yards. """ #Set unity if unity == "meters": # Set boundaries if length >= 120.5 or width >= 75.5: return(str("Field dimensions are too big for meters as unity, didn't you mean yards as unity?\ Otherwise the maximum length is 120 meters and the maximum width is 75 meters. Please try again")) #Run program if unity and boundaries are accepted else: #Create figure fig=plt.figure() #fig.set_size_inches(7, 5) ax=fig.add_subplot(1,1,1) #Pitch Outline & Centre Line plt.plot([0,0],[0,width], color=linecolor) plt.plot([0,length],[width,width], color=linecolor) plt.plot([length,length],[width,0], color=linecolor) plt.plot([length,0],[0,0], color=linecolor) plt.plot([length/2,length/2],[0,width], color=linecolor) #Left Penalty Area plt.plot([16.5 ,16.5],[(width/2 +16.5),(width/2-16.5)],color=linecolor) plt.plot([0,16.5],[(width/2 +16.5),(width/2 +16.5)],color=linecolor) plt.plot([16.5,0],[(width/2 -16.5),(width/2 -16.5)],color=linecolor) #Right Penalty Area plt.plot([(length-16.5),length],[(width/2 +16.5),(width/2 +16.5)],color=linecolor) plt.plot([(length-16.5), (length-16.5)],[(width/2 +16.5),(width/2-16.5)],color=linecolor) plt.plot([(length-16.5),length],[(width/2 -16.5),(width/2 -16.5)],color=linecolor) #Left 5-meters Box plt.plot([0,5.5],[(width/2+7.32/2+5.5),(width/2+7.32/2+5.5)],color=linecolor) plt.plot([5.5,5.5],[(width/2+7.32/2+5.5),(width/2-7.32/2-5.5)],color=linecolor) plt.plot([5.5,0.5],[(width/2-7.32/2-5.5),(width/2-7.32/2-5.5)],color=linecolor) #Right 5 -eters Box plt.plot([length,length-5.5],[(width/2+7.32/2+5.5),(width/2+7.32/2+5.5)],color=linecolor) plt.plot([length-5.5,length-5.5],[(width/2+7.32/2+5.5),width/2-7.32/2-5.5],color=linecolor) plt.plot([length-5.5,length],[width/2-7.32/2-5.5,width/2-7.32/2-5.5],color=linecolor) #Prepare Circles centreCircle = plt.Circle((length/2,width/2),9.15,color=linecolor,fill=False) centreSpot = plt.Circle((length/2,width/2),0.8,color=linecolor) leftPenSpot = plt.Circle((11,width/2),0.8,color=linecolor) rightPenSpot = plt.Circle((length-11,width/2),0.8,color=linecolor) #Draw Circles ax.add_patch(centreCircle) ax.add_patch(centreSpot) ax.add_patch(leftPenSpot) ax.add_patch(rightPenSpot) #Prepare Arcs leftArc = Arc((11,width/2),height=18.3,width=18.3,angle=0,theta1=308,theta2=52,color=linecolor) rightArc = Arc((length-11,width/2),height=18.3,width=18.3,angle=0,theta1=128,theta2=232,color=linecolor) #Draw Arcs ax.add_patch(leftArc) ax.add_patch(rightArc) #Axis titles #check unity again elif unity == "yards": #check boundaries again if length <= 95: return(str("Didn't you mean meters as unity?")) elif length >= 131 or width >= 101: return(str("Field dimensions are too big. Maximum length is 130, maximum width is 100")) #Run program if unity and boundaries are accepted else: #Create figure fig=plt.figure() #fig.set_size_inches(7, 5) ax=fig.add_subplot(1,1,1) #Pitch Outline & Centre Line plt.plot([0,0],[0,width], color=linecolor) plt.plot([0,length],[width,width], color=linecolor) plt.plot([length,length],[width,0], color=linecolor) plt.plot([length,0],[0,0], color=linecolor) plt.plot([length/2,length/2],[0,width], color=linecolor) #Left Penalty Area plt.plot([18 ,18],[(width/2 +18),(width/2-18)],color=linecolor) plt.plot([0,18],[(width/2 +18),(width/2 +18)],color=linecolor) plt.plot([18,0],[(width/2 -18),(width/2 -18)],color=linecolor) #Right Penalty Area plt.plot([(length-18),length],[(width/2 +18),(width/2 +18)],color=linecolor) plt.plot([(length-18), (length-18)],[(width/2 +18),(width/2-18)],color=linecolor) plt.plot([(length-18),length],[(width/2 -18),(width/2 -18)],color=linecolor) #Left 6-yard Box plt.plot([0,6],[(width/2+7.32/2+6),(width/2+7.32/2+6)],color=linecolor) plt.plot([6,6],[(width/2+7.32/2+6),(width/2-7.32/2-6)],color=linecolor) plt.plot([6,0],[(width/2-7.32/2-6),(width/2-7.32/2-6)],color=linecolor) #Right 6-yard Box plt.plot([length,length-6],[(width/2+7.32/2+6),(width/2+7.32/2+6)],color=linecolor) plt.plot([length-6,length-6],[(width/2+7.32/2+6),width/2-7.32/2-6],color=linecolor) plt.plot([length-6,length],[(width/2-7.32/2-6),width/2-7.32/2-6],color=linecolor) #Prepare Circles; 10 yards distance. penalty on 12 yards centreCircle = plt.Circle((length/2,width/2),10,color=linecolor,fill=False) centreSpot = plt.Circle((length/2,width/2),0.8,color=linecolor) leftPenSpot = plt.Circle((12,width/2),0.8,color=linecolor) rightPenSpot = plt.Circle((length-12,width/2),0.8,color=linecolor) #Draw Circles ax.add_patch(centreCircle) ax.add_patch(centreSpot) ax.add_patch(leftPenSpot) ax.add_patch(rightPenSpot) #Prepare Arcs leftArc = Arc((11,width/2),height=20,width=20,angle=0,theta1=312,theta2=48,color=linecolor) rightArc = Arc((length-11,width/2),height=20,width=20,angle=0,theta1=130,theta2=230,color=linecolor) #Draw Arcs ax.add_patch(leftArc) ax.add_patch(rightArc) #Tidy Axes plt.axis('off') return fig,ax def createPitchOld(): #Taken from FC Python #Create figure fig=plt.figure() ax=fig.add_subplot(1,1,1) #Pitch Outline & Centre Line plt.plot([0,0],[0,90], color=linecolor) plt.plot([0,130],[90,90], color=linecolor) plt.plot([130,130],[90,0], color=linecolor) plt.plot([130,0],[0,0], color=linecolor) plt.plot([65,65],[0,90], color=linecolor) #Left Penalty Area plt.plot([16.5,16.5],[65,25],color=linecolor) plt.plot([0,16.5],[65,65],color=linecolor) plt.plot([16.5,0],[25,25],color=linecolor) #Right Penalty Area plt.plot([130,113.5],[65,65],color=linecolor) plt.plot([113.5,113.5],[65,25],color=linecolor) plt.plot([113.5,130],[25,25],color=linecolor) #Left 6-yard Box plt.plot([0,5.5],[54,54],color=linecolor) plt.plot([5.5,5.5],[54,36],color=linecolor) plt.plot([5.5,0.5],[36,36],color=linecolor) #Right 6-yard Box plt.plot([130,124.5],[54,54],color=linecolor) plt.plot([124.5,124.5],[54,36],color=linecolor) plt.plot([124.5,130],[36,36],color=linecolor) #Prepare Circles centreCircle = plt.Circle((65,45),9.15,color=linecolor,fill=False) centreSpot = plt.Circle((65,45),0.8,color=linecolor) leftPenSpot = plt.Circle((11,45),0.8,color=linecolor) rightPenSpot = plt.Circle((119,45),0.8,color=linecolor) #Draw Circles ax.add_patch(centreCircle) ax.add_patch(centreSpot) ax.add_patch(leftPenSpot) ax.add_patch(rightPenSpot) #Prepare Arcs leftArc = Arc((11,45),height=18.3,width=18.3,angle=0,theta1=310,theta2=50,color=linecolor) rightArc = Arc((119,45),height=18.3,width=18.3,angle=0,theta1=130,theta2=230,color=linecolor) #Draw Arcs ax.add_patch(leftArc) ax.add_patch(rightArc) #Tidy Axes plt.axis('off') return fig,ax def createGoalMouth(): #Adopted from FC Python #Create figure fig=plt.figure() ax=fig.add_subplot(1,1,1) linecolor='black' #Pitch Outline & Centre Line plt.plot([0,65],[0,0], color=linecolor) plt.plot([65,65],[50,0], color=linecolor) plt.plot([0,0],[50,0], color=linecolor) #Left Penalty Area plt.plot([12.5,52.5],[16.5,16.5],color=linecolor) plt.plot([52.5,52.5],[16.5,0],color=linecolor) plt.plot([12.5,12.5],[0,16.5],color=linecolor) #Left 6-yard Box plt.plot([41.5,41.5],[5.5,0],color=linecolor) plt.plot([23.5,41.5],[5.5,5.5],color=linecolor) plt.plot([23.5,23.5],[0,5.5],color=linecolor) #Goal plt.plot([41.5-5.34,41.5-5.34],[-2,0],color=linecolor) plt.plot([23.5+5.34,41.5-5.34],[-2,-2],color=linecolor) plt.plot([23.5+5.34,23.5+5.34],[0,-2],color=linecolor) #Prepare Circles leftPenSpot = plt.Circle((65/2,11),0.8,color=linecolor) #Draw Circles ax.add_patch(leftPenSpot) #Prepare Arcs leftArc = Arc((32.5,11),height=18.3,width=18.3,angle=0,theta1=38,theta2=142,color=linecolor) #Draw Arcs ax.add_patch(leftArc) #Tidy Axes plt.axis('off') return fig,ax import pandas as pd import json competitions = pd.read_json(DATA_PATH+"competitions.json") competitions.head() competitions.describe() competitions.info() competitions["competition_id"].unique(), competitions["competition_id"].nunique() #show all the competitions and the related files for matches pd.set_option("display.max_rows", None, "display.max_columns", None) for i in competitions["competition_id"].unique(): print(competitions[competitions["competition_id"]==i]) # show files import glob competitions_path_list = glob.glob(MATCHES_PATH+"/*") competition_file_dict = {} for i in competitions_path_list: competition_file_dict[i] = glob.glob(i+"/*.json") competition_file_dict DATA_PATH with open(DATA_PATH + 'matches/72/30.json') as f: matches = json.load(f) matches[0] for match in matches: if match["home_team"]['home_team_name']=="Sweden Women's" or match["away_team"]['away_team_name']=="Sweden Women's": print("match between: "+ match["home_team"]['home_team_name']+" vs " +match["away_team"]['away_team_name'] +" with score {}:{}".format(match["home_score"],match["away_score"])) ``` # Pitch map ``` import matplotlib.pyplot as plt import numpy as np pitchLenX = 120 pitchWidY = 80 match_id = 69301 def get_match(match_id): for i in matches: if i["match_id"]==match_id: return i def get_event(match_id): with open(DATA_PATH+"events/"+str(match_id)+".json") as f: event = json.load(f) return event match = get_match(match_id) events = get_event(match_id) match events_df = pd.json_normalize(events) events_df.columns.values shots = events_df[events_df["type.name"]=="Shot"] shots[["period","minute","location","team.name","shot.outcome.name"]] (fig,ax) = createPitch(pitchLenX,pitchWidY,"yards","grey") for i,shot in shots.iterrows(): x = shot.location[0] y = shot.location[1] goal = shot["shot.outcome.name"] == "Goal" shot_team = shot["team.name"] circle_size = np.sqrt(shot["shot.statsbomb_xg"]*15) print(circle_size) if shot_team == "Sweden Women's": if goal: shotCircle = plt.Circle((x,pitchWidY-y),circle_size,color="red") #plt.text(x,pitchWidY-y,"hi") else: shotCircle = plt.Circle((x,pitchWidY-y),circle_size,color="red") shotCircle.set_alpha(0.2) else: if goal: shotCircle = plt.Circle((pitchLenX-x,y),circle_size,color="blue") #plt.text((pitchLenX-x+1),y+1,shot['player.name']) else: shotCircle = plt.Circle((pitchLenX-x,y),circle_size,color="blue") shotCircle.set_alpha(0.2) ax.add_patch(shotCircle) #"England Women's" #plt.show() fig ``` # passes plotting ``` #passes = events_df[(events_df["type.name"]=="Pass") & (events_df["player.name"]=="Sara Caroline Seger") & (events_df["play_pattern.name"]=="Regular Play")] passes = events_df[(events_df["type.name"]=="Pass") & (events_df["player.name"]=="Sara Caroline Seger")] # shots[["period","minute","location","team.name","shot.outcome.name"]] # #events_df["type.name"].unique() #passes[["location"]+[i for i in passes.columns.values if "pass" in i]] passes (fig,ax) = createPitch(pitchLenX,pitchWidY,"yards","green") fig.set_size_inches(15, 10.5) for i,shot in passes.iterrows(): x_start = shot.location[0] y_start = shot.location[1] x_end = shot["pass.end_location"][0] y_end = shot["pass.end_location"][1] #goal = shot["shot.outcome.name"] == "Goal" #circle_size = np.sqrt(shot["shot.statsbomb_xg"]*15) #print(circle_size) shotarrow = plt.Arrow(x_start, pitchWidY-y_start, x_end-x_start, pitchWidY-y_end-pitchWidY+y_start,width=2,color="blue") ax.add_patch(shotarrow) #"England Women's" #plt.show() (fig, ax) = createPitch(120, 80, 'yards', 'gray') for i, p in passes.iterrows(): x, y = p.location x, y = (x,pitchWidY-y) end_x, end_y = p["pass.end_location"] end_x, end_y = (end_x, pitchWidY-end_y) start_circle = plt.Circle((x, y), 1, alpha=.2, color="blue") pass_arrow = plt.Arrow(x, y, end_x - x, end_y - y, width=2, color="blue") ax.add_patch(pass_arrow) ax.add_patch(start_circle) fig.set_size_inches(15, 10.5) plt.show() ```
github_jupyter
``` %matplotlib inline ``` DCGAN Tutorial ============== **Author**: `Nathan Inkawhich <https://github.com/inkawhich>`__ Introduction ------------ This tutorial will give an introduction to DCGANs through an example. We will train a generative adversarial network (GAN) to generate new celebrities after showing it pictures of many real celebrities. Most of the code here is from the dcgan implementation in `pytorch/examples <https://github.com/pytorch/examples>`__, and this document will give a thorough explanation of the implementation and shed light on how and why this model works. But don’t worry, no prior knowledge of GANs is required, but it may require a first-timer to spend some time reasoning about what is actually happening under the hood. Also, for the sake of time it will help to have a GPU, or two. Lets start from the beginning. Generative Adversarial Networks ------------------------------- What is a GAN? ~~~~~~~~~~~~~~ GANs are a framework for teaching a DL model to capture the training data’s distribution so we can generate new data from that same distribution. GANs were invented by Ian Goodfellow in 2014 and first described in the paper `Generative Adversarial Nets <https://papers.nips.cc/paper/5423-generative-adversarial-nets.pdf>`__. They are made of two distinct models, a *generator* and a *discriminator*. The job of the generator is to spawn ‘fake’ images that look like the training images. The job of the discriminator is to look at an image and output whether or not it is a real training image or a fake image from the generator. During training, the generator is constantly trying to outsmart the discriminator by generating better and better fakes, while the discriminator is working to become a better detective and correctly classify the real and fake images. The equilibrium of this game is when the generator is generating perfect fakes that look as if they came directly from the training data, and the discriminator is left to always guess at 50% confidence that the generator output is real or fake. Now, lets define some notation to be used throughout tutorial starting with the discriminator. Let $x$ be data representing an image. $D(x)$ is the discriminator network which outputs the (scalar) probability that $x$ came from training data rather than the generator. Here, since we are dealing with images the input to $D(x)$ is an image of CHW size 3x64x64. Intuitively, $D(x)$ should be HIGH when $x$ comes from training data and LOW when $x$ comes from the generator. $D(x)$ can also be thought of as a traditional binary classifier. For the generator’s notation, let $z$ be a latent space vector sampled from a standard normal distribution. $G(z)$ represents the generator function which maps the latent vector $z$ to data-space. The goal of $G$ is to estimate the distribution that the training data comes from ($p_{data}$) so it can generate fake samples from that estimated distribution ($p_g$). So, $D(G(z))$ is the probability (scalar) that the output of the generator $G$ is a real image. As described in `Goodfellow’s paper <https://papers.nips.cc/paper/5423-generative-adversarial-nets.pdf>`__, $D$ and $G$ play a minimax game in which $D$ tries to maximize the probability it correctly classifies reals and fakes ($logD(x)$), and $G$ tries to minimize the probability that $D$ will predict its outputs are fake ($log(1-D(G(x)))$). From the paper, the GAN loss function is \begin{align}\underset{G}{\text{min}} \underset{D}{\text{max}}V(D,G) = \mathbb{E}_{x\sim p_{data}(x)}\big[logD(x)\big] + \mathbb{E}_{z\sim p_{z}(z)}\big[log(1-D(G(z)))\big]\end{align} In theory, the solution to this minimax game is where $p_g = p_{data}$, and the discriminator guesses randomly if the inputs are real or fake. However, the convergence theory of GANs is still being actively researched and in reality models do not always train to this point. What is a DCGAN? ~~~~~~~~~~~~~~~~ A DCGAN is a direct extension of the GAN described above, except that it explicitly uses convolutional and convolutional-transpose layers in the discriminator and generator, respectively. It was first described by Radford et. al. in the paper `Unsupervised Representation Learning With Deep Convolutional Generative Adversarial Networks <https://arxiv.org/pdf/1511.06434.pdf>`__. The discriminator is made up of strided `convolution <https://pytorch.org/docs/stable/nn.html#torch.nn.Conv2d>`__ layers, `batch norm <https://pytorch.org/docs/stable/nn.html#torch.nn.BatchNorm2d>`__ layers, and `LeakyReLU <https://pytorch.org/docs/stable/nn.html#torch.nn.LeakyReLU>`__ activations. The input is a 3x64x64 input image and the output is a scalar probability that the input is from the real data distribution. The generator is comprised of `convolutional-transpose <https://pytorch.org/docs/stable/nn.html#torch.nn.ConvTranspose2d>`__ layers, batch norm layers, and `ReLU <https://pytorch.org/docs/stable/nn.html#relu>`__ activations. The input is a latent vector, $z$, that is drawn from a standard normal distribution and the output is a 3x64x64 RGB image. The strided conv-transpose layers allow the latent vector to be transformed into a volume with the same shape as an image. In the paper, the authors also give some tips about how to setup the optimizers, how to calculate the loss functions, and how to initialize the model weights, all of which will be explained in the coming sections. ``` from __future__ import print_function #%matplotlib inline import argparse import os import random import torch import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.optim as optim import torch.utils.data import torchvision.datasets as dset import torchvision.transforms as transforms import torchvision.utils as vutils import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation from IPython.display import HTML # Set random seed for reproducibility manualSeed = 999 #manualSeed = random.randint(1, 10000) # use if you want new results print("Random Seed: ", manualSeed) random.seed(manualSeed) torch.manual_seed(manualSeed) ``` Inputs ------ Let’s define some inputs for the run: - **dataroot** - the path to the root of the dataset folder. We will talk more about the dataset in the next section - **workers** - the number of worker threads for loading the data with the DataLoader - **batch_size** - the batch size used in training. The DCGAN paper uses a batch size of 128 - **image_size** - the spatial size of the images used for training. This implementation defaults to 64x64. If another size is desired, the structures of D and G must be changed. See `here <https://github.com/pytorch/examples/issues/70>`__ for more details - **nc** - number of color channels in the input images. For color images this is 3 - **nz** - length of latent vector - **ngf** - relates to the depth of feature maps carried through the generator - **ndf** - sets the depth of feature maps propagated through the discriminator - **num_epochs** - number of training epochs to run. Training for longer will probably lead to better results but will also take much longer - **lr** - learning rate for training. As described in the DCGAN paper, this number should be 0.0002 - **beta1** - beta1 hyperparameter for Adam optimizers. As described in paper, this number should be 0.5 - **ngpu** - number of GPUs available. If this is 0, code will run in CPU mode. If this number is greater than 0 it will run on that number of GPUs ``` # Root directory for dataset dataroot = "data/celeba" # Number of workers for dataloader workers = 2 # Batch size during training batch_size = 128 # Spatial size of training images. All images will be resized to this # size using a transformer. image_size = 64 # Number of channels in the training images. For color images this is 3 nc = 3 # Size of z latent vector (i.e. size of generator input) nz = 100 # Size of feature maps in generator ngf = 64 # Size of feature maps in discriminator ndf = 64 # Number of training epochs num_epochs = 5 # Learning rate for optimizers lr = 0.0002 # Beta1 hyperparam for Adam optimizers beta1 = 0.5 # Number of GPUs available. Use 0 for CPU mode. ngpu = 1 ``` Data ---- In this tutorial we will use the `Celeb-A Faces dataset <http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html>`__ which can be downloaded at the linked site, or in `Google Drive <https://drive.google.com/drive/folders/0B7EVK8r0v71pTUZsaXdaSnZBZzg>`__. The dataset will download as a file named *img_align_celeba.zip*. Once downloaded, create a directory named *celeba* and extract the zip file into that directory. Then, set the *dataroot* input for this notebook to the *celeba* directory you just created. The resulting directory structure should be: :: /path/to/celeba -> img_align_celeba -> 188242.jpg -> 173822.jpg -> 284702.jpg -> 537394.jpg ... This is an important step because we will be using the ImageFolder dataset class, which requires there to be subdirectories in the dataset’s root folder. Now, we can create the dataset, create the dataloader, set the device to run on, and finally visualize some of the training data. ``` # We can use an image folder dataset the way we have it setup. # Create the dataset dataset = dset.ImageFolder(root=dataroot, transform=transforms.Compose([ transforms.Resize(image_size), transforms.CenterCrop(image_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ])) # Create the dataloader dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=workers) # Decide which device we want to run on device = torch.device("cuda:0" if (torch.cuda.is_available() and ngpu > 0) else "cpu") # Plot some training images real_batch = next(iter(dataloader)) plt.figure(figsize=(8,8)) plt.axis("off") plt.title("Training Images") plt.imshow(np.transpose(vutils.make_grid(real_batch[0].to(device)[:64], padding=2, normalize=True).cpu(),(1,2,0))) ``` Implementation -------------- With our input parameters set and the dataset prepared, we can now get into the implementation. We will start with the weigth initialization strategy, then talk about the generator, discriminator, loss functions, and training loop in detail. Weight Initialization ~~~~~~~~~~~~~~~~~~~~~ From the DCGAN paper, the authors specify that all model weights shall be randomly initialized from a Normal distribution with mean=0, stdev=0.02. The ``weights_init`` function takes an initialized model as input and reinitializes all convolutional, convolutional-transpose, and batch normalization layers to meet this criteria. This function is applied to the models immediately after initialization. ``` # custom weights initialization called on netG and netD def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: nn.init.normal_(m.weight.data, 0.0, 0.02) elif classname.find('BatchNorm') != -1: nn.init.normal_(m.weight.data, 1.0, 0.02) nn.init.constant_(m.bias.data, 0) ``` Generator ~~~~~~~~~ The generator, $G$, is designed to map the latent space vector ($z$) to data-space. Since our data are images, converting $z$ to data-space means ultimately creating a RGB image with the same size as the training images (i.e. 3x64x64). In practice, this is accomplished through a series of strided two dimensional convolutional transpose layers, each paired with a 2d batch norm layer and a relu activation. The output of the generator is fed through a tanh function to return it to the input data range of $[-1,1]$. It is worth noting the existence of the batch norm functions after the conv-transpose layers, as this is a critical contribution of the DCGAN paper. These layers help with the flow of gradients during training. An image of the generator from the DCGAN paper is shown below. .. figure:: /_static/img/dcgan_generator.png :alt: dcgan_generator Notice, the how the inputs we set in the input section (*nz*, *ngf*, and *nc*) influence the generator architecture in code. *nz* is the length of the z input vector, *ngf* relates to the size of the feature maps that are propagated through the generator, and *nc* is the number of channels in the output image (set to 3 for RGB images). Below is the code for the generator. ``` # Generator Code class Generator(nn.Module): def __init__(self, ngpu): super(Generator, self).__init__() self.ngpu = ngpu self.main = nn.Sequential( # input is Z, going into a convolution nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False), nn.BatchNorm2d(ngf * 8), nn.ReLU(True), # state size. (ngf*8) x 4 x 4 nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 4), nn.ReLU(True), # state size. (ngf*4) x 8 x 8 nn.ConvTranspose2d( ngf * 4, ngf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True), # state size. (ngf*2) x 16 x 16 nn.ConvTranspose2d( ngf * 2, ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), # state size. (ngf) x 32 x 32 nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False), nn.Tanh() # state size. (nc) x 64 x 64 ) def forward(self, input): return self.main(input) ``` Now, we can instantiate the generator and apply the ``weights_init`` function. Check out the printed model to see how the generator object is structured. ``` # Create the generator netG = Generator(ngpu).to(device) # Handle multi-gpu if desired if (device.type == 'cuda') and (ngpu > 1): netG = nn.DataParallel(netG, list(range(ngpu))) # Apply the weights_init function to randomly initialize all weights # to mean=0, stdev=0.2. netG.apply(weights_init) # Print the model print(netG) ``` Discriminator ~~~~~~~~~~~~~ As mentioned, the discriminator, $D$, is a binary classification network that takes an image as input and outputs a scalar probability that the input image is real (as opposed to fake). Here, $D$ takes a 3x64x64 input image, processes it through a series of Conv2d, BatchNorm2d, and LeakyReLU layers, and outputs the final probability through a Sigmoid activation function. This architecture can be extended with more layers if necessary for the problem, but there is significance to the use of the strided convolution, BatchNorm, and LeakyReLUs. The DCGAN paper mentions it is a good practice to use strided convolution rather than pooling to downsample because it lets the network learn its own pooling function. Also batch norm and leaky relu functions promote healthy gradient flow which is critical for the learning process of both $G$ and $D$. Discriminator Code ``` class Discriminator(nn.Module): def __init__(self, ngpu): super(Discriminator, self).__init__() self.ngpu = ngpu self.main = nn.Sequential( # input is (nc) x 64 x 64 nn.Conv2d(nc, ndf, 4, 2, 1, bias=False), nn.LeakyReLU(0.2, inplace=True), # state size. (ndf) x 32 x 32 nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 2), nn.LeakyReLU(0.2, inplace=True), # state size. (ndf*2) x 16 x 16 nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 4), nn.LeakyReLU(0.2, inplace=True), # state size. (ndf*4) x 8 x 8 nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 8), nn.LeakyReLU(0.2, inplace=True), # state size. (ndf*8) x 4 x 4 nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False), nn.Sigmoid() ) def forward(self, input): return self.main(input) ``` Now, as with the generator, we can create the discriminator, apply the ``weights_init`` function, and print the model’s structure. ``` # Create the Discriminator netD = Discriminator(ngpu).to(device) # Handle multi-gpu if desired if (device.type == 'cuda') and (ngpu > 1): netD = nn.DataParallel(netD, list(range(ngpu))) # Apply the weights_init function to randomly initialize all weights # to mean=0, stdev=0.2. netD.apply(weights_init) # Print the model print(netD) ``` Loss Functions and Optimizers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ With $D$ and $G$ setup, we can specify how they learn through the loss functions and optimizers. We will use the Binary Cross Entropy loss (`BCELoss <https://pytorch.org/docs/stable/nn.html#torch.nn.BCELoss>`__) function which is defined in PyTorch as: \begin{align}\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad l_n = - \left[ y_n \cdot \log x_n + (1 - y_n) \cdot \log (1 - x_n) \right]\end{align} Notice how this function provides the calculation of both log components in the objective function (i.e. $log(D(x))$ and $log(1-D(G(z)))$). We can specify what part of the BCE equation to use with the $y$ input. This is accomplished in the training loop which is coming up soon, but it is important to understand how we can choose which component we wish to calculate just by changing $y$ (i.e. GT labels). Next, we define our real label as 1 and the fake label as 0. These labels will be used when calculating the losses of $D$ and $G$, and this is also the convention used in the original GAN paper. Finally, we set up two separate optimizers, one for $D$ and one for $G$. As specified in the DCGAN paper, both are Adam optimizers with learning rate 0.0002 and Beta1 = 0.5. For keeping track of the generator’s learning progression, we will generate a fixed batch of latent vectors that are drawn from a Gaussian distribution (i.e. fixed_noise) . In the training loop, we will periodically input this fixed_noise into $G$, and over the iterations we will see images form out of the noise. ``` # Initialize BCELoss function criterion = nn.BCELoss() # Create batch of latent vectors that we will use to visualize # the progression of the generator fixed_noise = torch.randn(64, nz, 1, 1, device=device) # Establish convention for real and fake labels during training real_label = 1 fake_label = 0 # Setup Adam optimizers for both G and D optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999)) optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999)) ``` Training ~~~~~~~~ Finally, now that we have all of the parts of the GAN framework defined, we can train it. Be mindful that training GANs is somewhat of an art form, as incorrect hyperparameter settings lead to mode collapse with little explanation of what went wrong. Here, we will closely follow Algorithm 1 from Goodfellow’s paper, while abiding by some of the best practices shown in `ganhacks <https://github.com/soumith/ganhacks>`__. Namely, we will “construct different mini-batches for real and fake” images, and also adjust G’s objective function to maximize $logD(G(z))$. Training is split up into two main parts. Part 1 updates the Discriminator and Part 2 updates the Generator. **Part 1 - Train the Discriminator** Recall, the goal of training the discriminator is to maximize the probability of correctly classifying a given input as real or fake. In terms of Goodfellow, we wish to “update the discriminator by ascending its stochastic gradient”. Practically, we want to maximize $log(D(x)) + log(1-D(G(z)))$. Due to the separate mini-batch suggestion from ganhacks, we will calculate this in two steps. First, we will construct a batch of real samples from the training set, forward pass through $D$, calculate the loss ($log(D(x))$), then calculate the gradients in a backward pass. Secondly, we will construct a batch of fake samples with the current generator, forward pass this batch through $D$, calculate the loss ($log(1-D(G(z)))$), and *accumulate* the gradients with a backward pass. Now, with the gradients accumulated from both the all-real and all-fake batches, we call a step of the Discriminator’s optimizer. **Part 2 - Train the Generator** As stated in the original paper, we want to train the Generator by minimizing $log(1-D(G(z)))$ in an effort to generate better fakes. As mentioned, this was shown by Goodfellow to not provide sufficient gradients, especially early in the learning process. As a fix, we instead wish to maximize $log(D(G(z)))$. In the code we accomplish this by: classifying the Generator output from Part 1 with the Discriminator, computing G’s loss *using real labels as GT*, computing G’s gradients in a backward pass, and finally updating G’s parameters with an optimizer step. It may seem counter-intuitive to use the real labels as GT labels for the loss function, but this allows us to use the $log(x)$ part of the BCELoss (rather than the $log(1-x)$ part) which is exactly what we want. Finally, we will do some statistic reporting and at the end of each epoch we will push our fixed_noise batch through the generator to visually track the progress of G’s training. The training statistics reported are: - **Loss_D** - discriminator loss calculated as the sum of losses for the all real and all fake batches ($log(D(x)) + log(D(G(z)))$). - **Loss_G** - generator loss calculated as $log(D(G(z)))$ - **D(x)** - the average output (across the batch) of the discriminator for the all real batch. This should start close to 1 then theoretically converge to 0.5 when G gets better. Think about why this is. - **D(G(z))** - average discriminator outputs for the all fake batch. The first number is before D is updated and the second number is after D is updated. These numbers should start near 0 and converge to 0.5 as G gets better. Think about why this is. **Note:** This step might take a while, depending on how many epochs you run and if you removed some data from the dataset. ``` # Training Loop # Lists to keep track of progress img_list = [] G_losses = [] D_losses = [] iters = 0 print("Starting Training Loop...") # For each epoch for epoch in range(num_epochs): # For each batch in the dataloader for i, data in enumerate(dataloader, 0): ############################ # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z))) ########################### ## Train with all-real batch netD.zero_grad() # Format batch real_cpu = data[0].to(device) b_size = real_cpu.size(0) label = torch.full((b_size,), real_label, dtype=torch.float, device=device) # Forward pass real batch through D output = netD(real_cpu).view(-1) # Calculate loss on all-real batch errD_real = criterion(output, label) # Calculate gradients for D in backward pass errD_real.backward() D_x = output.mean().item() ## Train with all-fake batch # Generate batch of latent vectors noise = torch.randn(b_size, nz, 1, 1, device=device) # Generate fake image batch with G fake = netG(noise) label.fill_(fake_label) # Classify all fake batch with D output = netD(fake.detach()).view(-1) # Calculate D's loss on the all-fake batch errD_fake = criterion(output, label) # Calculate the gradients for this batch errD_fake.backward() D_G_z1 = output.mean().item() # Add the gradients from the all-real and all-fake batches errD = errD_real + errD_fake # Update D optimizerD.step() ############################ # (2) Update G network: maximize log(D(G(z))) ########################### netG.zero_grad() label.fill_(real_label) # fake labels are real for generator cost # Since we just updated D, perform another forward pass of all-fake batch through D output = netD(fake).view(-1) # Calculate G's loss based on this output errG = criterion(output, label) # Calculate gradients for G errG.backward() D_G_z2 = output.mean().item() # Update G optimizerG.step() # Output training stats if i % 50 == 0: print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f' % (epoch, num_epochs, i, len(dataloader), errD.item(), errG.item(), D_x, D_G_z1, D_G_z2)) # Save Losses for plotting later G_losses.append(errG.item()) D_losses.append(errD.item()) # Check how the generator is doing by saving G's output on fixed_noise if (iters % 500 == 0) or ((epoch == num_epochs-1) and (i == len(dataloader)-1)): with torch.no_grad(): fake = netG(fixed_noise).detach().cpu() img_list.append(vutils.make_grid(fake, padding=2, normalize=True)) iters += 1 ``` Results ------- Finally, lets check out how we did. Here, we will look at three different results. First, we will see how D and G’s losses changed during training. Second, we will visualize G’s output on the fixed_noise batch for every epoch. And third, we will look at a batch of real data next to a batch of fake data from G. **Loss versus training iteration** Below is a plot of D & G’s losses versus training iterations. ``` plt.figure(figsize=(10,5)) plt.title("Generator and Discriminator Loss During Training") plt.plot(G_losses,label="G") plt.plot(D_losses,label="D") plt.xlabel("iterations") plt.ylabel("Loss") plt.legend() plt.show() ``` **Visualization of G’s progression** Remember how we saved the generator’s output on the fixed_noise batch after every epoch of training. Now, we can visualize the training progression of G with an animation. Press the play button to start the animation. ``` #%%capture fig = plt.figure(figsize=(8,8)) plt.axis("off") ims = [[plt.imshow(np.transpose(i,(1,2,0)), animated=True)] for i in img_list] ani = animation.ArtistAnimation(fig, ims, interval=1000, repeat_delay=1000, blit=True) HTML(ani.to_jshtml()) ``` **Real Images vs. Fake Images** Finally, lets take a look at some real images and fake images side by side. ``` # Grab a batch of real images from the dataloader real_batch = next(iter(dataloader)) # Plot the real images plt.figure(figsize=(15,15)) plt.subplot(1,2,1) plt.axis("off") plt.title("Real Images") plt.imshow(np.transpose(vutils.make_grid(real_batch[0].to(device)[:64], padding=5, normalize=True).cpu(),(1,2,0))) # Plot the fake images from the last epoch plt.subplot(1,2,2) plt.axis("off") plt.title("Fake Images") plt.imshow(np.transpose(img_list[-1],(1,2,0))) plt.show() ``` Where to Go Next ---------------- We have reached the end of our journey, but there are several places you could go from here. You could: - Train for longer to see how good the results get - Modify this model to take a different dataset and possibly change the size of the images and the model architecture - Check out some other cool GAN projects `here <https://github.com/nashory/gans-awesome-applications>`__ - Create GANs that generate `music <https://deepmind.com/blog/wavenet-generative-model-raw-audio/>`__
github_jupyter
# Extension Input Data Validation When using extensions in Fugue, you may add input data validation logic inside your code. However, there is standard way to add your validation logic. Here is a simple example: ``` from typing import List, Dict, Any # partitionby_has: a # schema: a:int,ct:int def get_count(df:List[Dict[str,Any]]) -> List[List[Any]]: return [[df[0]["a"],len(df)]] ``` The following commented-out code will fail, because of the hint `partitionby_has: a` requires the input dataframe to be prepartitioned by at least column `a`. ``` from fugue import FugueWorkflow with FugueWorkflow() as dag: df = dag.df([[0,1],[1,1],[0,2]], "a:int,b:int") # df.transform(get_count).show() # will fail because of no partition by df.partition(by=["a"]).transform(get_count).show() df.partition(by=["b","a"]).transform(get_count).show() # b,a is a super set of a ``` You can also have multiple rules, the following requires partition keys to contain `a`, and presort to be exactly `b asc` (`b == b asc`) ``` from typing import List, Dict, Any # partitionby_has: a # presort_is: b # schema: a:int,ct:int def get_count2(df:List[Dict[str,Any]]) -> List[List[Any]]: return [[df[0]["a"],len(df)]] from fugue import FugueWorkflow with FugueWorkflow() as dag: df = dag.df([[0,1],[1,1],[0,2]], "a:int,b:int") # df.partition(by=["a"]).transform(get_count).show() # will fail because of no presort df.partition(by=["a"], presort="b asc").transform(get_count).show() ``` ## Supported Validations The following are all supported validations. **Compile time validations** will happen when you construct the [FugueWorkflow](/dag.ipynb) while **runtime validations** happen during execution. Compile time validations are very useful to quickly identify logical issues. Runtime validations may take longer time to happen but they are still useful.On Fugue level, we are trying to move runtime validations to compile time as much as we can. Rule | Description | Compile Time | Order Matters | Examples :---|:---|:---|:---|:--- **partitionby_has** | assert the input dataframe is prepartitioned, and the partition keys contain these values | Yes | No | `partitionby_has: a,b` means the partition keys must contain `a` and `b` columns **partitionby_is** | assert the input dataframe is prepartitioned, and the partition keys are exactly these values | Yes | Yes | `partitionby_is: a,b` means the partition keys must contain and only contain `a` and `b` columns **presort_has** | assert the input dataframe is prepartitioned and [presorted](./partition.ipynb#Presort), and the presort keys contain these values | Yes | No | `presort_has: a,b desc` means the presort contains `a asc` and `b desc` (`a == a asc`) **presort_is** | assert the input dataframe is prepartitioned and [presorted](./partition.ipynb#Presort), and the presort keys are exactly these values | Yes | Yes | `presort_is: a,b desc` means the presort is exactly `a asc, b desc` **schema_has** | assert input dataframe schema has certain keys or key type pairs | No | No | `schema_has: a,b:str` means input dataframe schema contains column `a` regardless of type, and `b` of type string, order doesn't matter. So `b:str,a:int` is valid, `b:int,a:int` is invalid because of `b` type, and `b:str` is invalid because `a` is not in the schema **schema_is** | assert input dataframe schema is exactly this value (the value must be a [schema expression](./schema_dataframes.ipynb#Schema)) | No | Yes | `schema_is: a:int,b:str`, then `b:str,a:int` is invalid because of order, `a:str,b:str` is invalid because of `a` type ## Extensions Compatibility Extension Type | Supported | Not Supported :---|:---|:--- Transformer | `partitionby_has`, `partitionby_is`, `presort_has`, `presort_is`, `schema_has`, `schema_is` | None CoTransformer | None | `partitionby_has`, `partitionby_is`, `presort_has`, `presort_is`, `schema_has`, `schema_is` OutputTransformer | `partitionby_has`, `partitionby_is`, `presort_has`, `presort_is`, `schema_has`, `schema_is` | None OutputCoTransformer | None | `partitionby_has`, `partitionby_is`, `presort_has`, `presort_is`, `schema_has`, `schema_is` Creator | N/A | N/A Processor | `partitionby_has`, `partitionby_is`, `presort_has`, `presort_is`, `schema_has`, `schema_is` | None Outputter | `partitionby_has`, `partitionby_is`, `presort_has`, `presort_is`, `schema_has`, `schema_is` | None ## How To Add Validations It depends on how you write your extension, by comment, by decorator or by interface, feature wise, they are equivalent. ## By Comment ``` from typing import List, Dict, Any # schema: a:int,ct:int def get_count2(df:List[Dict[str,Any]]) -> List[List[Any]]: return [[df[0]["a"],len(df)]] ``` ## By Decorator ``` import pandas as pd from typing import List, Dict, Any from fugue import processor, transformer @transformer(schema="*", partitionby_has=["a","d"], presort_is="b, c desc") def example1(df:pd.DataFrame) -> pd.DataFrame: return df @transformer(schema="*", partitionby_has="a,d", presort_is=["b",("c",False)]) def example2(df:pd.DataFrame) -> pd.DataFrame: return df # partitionby_has: a # presort_is: b @transformer(schema="*") def example3(df:pd.DataFrame) -> pd.DataFrame: return df @processor(partitionby_has=["a","d"], presort_is="b, c desc") def example4(df:pd.DataFrame) -> pd.DataFrame: return df ``` ## By Interface In every extension, you can override `validation_rules` ``` from fugue import Transformer class T(Transformer): @property def validation_rules(self): return { "partitionby_has": ["a"] } def get_output_schema(self, df): return df.schema def transform(self, df): return df ```
github_jupyter
##### Copyright 2020 The TF-Agents Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Tutorial on Multi Armed Bandits in TF-Agents ### Get Started <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/agents/tutorials/bandits_tutorial"> <img src="https://www.tensorflow.org/images/tf_logo_32px.png" /> View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/agents/blob/master/docs/tutorials/bandits_tutorial.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/agents/blob/master/docs/tutorials/bandits_tutorial.ipynb"> <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/agents/docs/tutorials/bandits_tutorial.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> ### Setup If you haven't installed the following dependencies, run: ``` !pip install tf-agents ``` ### Imports ``` import abc import numpy as np import tensorflow as tf from tf_agents.agents import tf_agent from tf_agents.drivers import driver from tf_agents.environments import py_environment from tf_agents.environments import tf_environment from tf_agents.environments import tf_py_environment from tf_agents.policies import tf_policy from tf_agents.specs import array_spec from tf_agents.specs import tensor_spec from tf_agents.trajectories import time_step as ts from tf_agents.trajectories import trajectory from tf_agents.trajectories import policy_step # Clear any leftover state from previous colabs run. # (This is not necessary for normal programs.) tf.compat.v1.reset_default_graph() tf.compat.v1.enable_resource_variables() tf.compat.v1.enable_v2_behavior() nest = tf.compat.v2.nest ``` # Introduction The Multi-Armed Bandit problem (MAB) is a special case of Reinforcement Learning: an agent collects rewards in an environment by taking some actions after observing some state of the environment. The main difference between general RL and MAB is that in MAB, we assume that the action taken by the agent does not influence the next state of the environment. Therefore, agents do not model state transitions, credit rewards to past actions, or "plan ahead" to get to reward-rich states. As in other RL domains, the goal of a MAB *agent* is to find a *policy* that collects as much reward as possible. It would be a mistake, however, to always try to exploit the action that promises the highest reward, because then there is a chance that we miss out on better actions if we do not explore enough. This is the main problem to be solved in (MAB), often called the *exploration-exploitation dilemma*. Bandit environments, policies, and agents for MAB can be found in subdirectories of [tf_agents/bandits](https://github.com/tensorflow/agents/blob/master/tf_agents/bandits). # Environments In TF-Agents, the environment class serves the role of giving information on the current state (this is called **observation** or **context**), receiving an action as input, performing a state transition, and outputting a reward. This class also takes care of resetting when an episode ends, so that a new episode can start. This is realized by calling a `reset` function when a state is labelled as "last" of the episode. For more details, see the [TF-Agents environments tutorial](https://github.com/tensorflow/agents/blob/master/docs/tutorials/2_environments_tutorial.ipynb). As mentioned above, MAB differs from general RL in that actions do not influence the next observation. Another difference is that in Bandits, there are no "episodes": every time step starts with a new observation, independently of previous time steps. To make sure observations are independent and to abstract away the concept of RL episodes, we introduce subclasses of `PyEnvironment` and `TFEnvironment`: [BanditPyEnvironment](https://github.com/tensorflow/agents/blob/master/tf_agents/bandits/environments/bandit_tf_environment.py) and [BanditTFEnvironment](https://github.com/tensorflow/agents/blob/master/tf_agents/bandits/environments/bandit_py_environment.py). These classes expose two private member functions that remain to be implemented by the user: ```python @abc.abstractmethod def _observe(self): ``` and ```python @abc.abstractmethod def _apply_action(self, action): ``` The `_observe` function returns an observation. Then, the policy chooses an action based on this observation. The `_apply_action` receives that action as an input, and returns the corresponding reward. These private member functions are called by the functions `reset` and `step`, respectively. ``` class BanditPyEnvironment(py_environment.PyEnvironment): def __init__(self, observation_spec, action_spec): self._observation_spec = observation_spec self._action_spec = action_spec super(BanditPyEnvironment, self).__init__() # Helper functions. def action_spec(self): return self._action_spec def observation_spec(self): return self._observation_spec def _empty_observation(self): return tf.nest.map_structure(lambda x: np.zeros(x.shape, x.dtype), self.observation_spec()) # These two functions below should not be overridden by subclasses. def _reset(self): """Returns a time step containing an observation.""" return ts.restart(self._observe(), batch_size=self.batch_size) def _step(self, action): """Returns a time step containing the reward for the action taken.""" reward = self._apply_action(action) return ts.termination(self._observe(), reward) # These two functions below are to be implemented in subclasses. @abc.abstractmethod def _observe(self): """Returns an observation.""" @abc.abstractmethod def _apply_action(self, action): """Applies `action` to the Environment and returns the corresponding reward. """ ``` The above interim abstract class implements `PyEnvironment`'s `_reset` and `_step` functions and exposes the abstract functions `_observe` and `_apply_action` to be implemented by subclasses. ## A Simple Example Environment Class The following class gives a very simple environment for which the observation is a random integer between -2 and 2, there are 3 possible actions (0, 1, 2), and the reward is the product of the action and the observation. ``` class SimplePyEnvironment(BanditPyEnvironment): def __init__(self): action_spec = array_spec.BoundedArraySpec( shape=(), dtype=np.int32, minimum=0, maximum=2, name='action') observation_spec = array_spec.BoundedArraySpec( shape=(1,), dtype=np.int32, minimum=-2, maximum=2, name='observation') super(SimplePyEnvironment, self).__init__(observation_spec, action_spec) def _observe(self): self._observation = np.random.randint(-2, 3, (1,), dtype='int32') return self._observation def _apply_action(self, action): return action * self._observation ``` Now we can use this environment to get observations, and receive rewards for our actions. ``` environment = SimplePyEnvironment() observation = environment.reset().observation print("observation: %d" % observation) action = 2 #@param print("action: %d" % action) reward = environment.step(action).reward print("reward: %f" % reward) ``` ## TF Environments One can define a bandit environment by subclassing `BanditTFEnvironment`, or, similarly to RL environments, one can define a `BanditPyEnvironment` and wrap it with `TFPyEnvironment`. For the sake of simplicity, we go with the latter option in this tutorial. ``` tf_environment = tf_py_environment.TFPyEnvironment(environment) ``` # Policies A *policy* in a bandit problem works the same way as in an RL problem: it provides an action (or a distribution of actions), given an observation as input. For more details, see the [TF-Agents Policy tutorial](https://github.com/tensorflow/agents/blob/master/docs/tutorials/3_policies_tutorial.ipynb). As with environments, there are two ways to construct a policy: One can create a `PyPolicy` and wrap it with `TFPyPolicy`, or directly create a `TFPolicy`. Here we elect to go with the direct method. Since this example is quite simple, we can define the optimal policy manually. The action only depends on the sign of the observation, 0 when is negative and 2 when is positive. ``` class SignPolicy(tf_policy.TFPolicy): def __init__(self): observation_spec = tensor_spec.BoundedTensorSpec( shape=(1,), dtype=tf.int32, minimum=-2, maximum=2) time_step_spec = ts.time_step_spec(observation_spec) action_spec = tensor_spec.BoundedTensorSpec( shape=(), dtype=tf.int32, minimum=0, maximum=2) super(SignPolicy, self).__init__(time_step_spec=time_step_spec, action_spec=action_spec) def _distribution(self, time_step): pass def _variables(self): return () def _action(self, time_step, policy_state, seed): observation_sign = tf.cast(tf.sign(time_step.observation[0]), dtype=tf.int32) action = observation_sign + 1 return policy_step.PolicyStep(action, policy_state) ``` Now we can request an observation from the environment, call the policy to choose an action, then the environment will output the reward: ``` sign_policy = SignPolicy() current_time_step = tf_environment.reset() print('Observation:') print (current_time_step.observation) action = sign_policy.action(current_time_step).action print('Action:') print (action) reward = tf_environment.step(action).reward print('Reward:') print(reward) ``` The way bandit environments are implemented ensures that every time we take a step, we not only receive the reward for the action we took, but also the next observation. ``` step = tf_environment.reset() action = 1 next_step = tf_environment.step(action) reward = next_step.reward next_observation = next_step.observation print("Reward: ") print(reward) print("Next observation:") print(next_observation) ``` # Agents Now that we have bandit environments and bandit policies, it is time to also define bandit agents, that take care of changing the policy based on training samples. The API for bandit agents does not differ from that of RL agents: the agent just needs to implement the `_initialize` and `_train` methods, and define a `policy` and a `collect_policy`. ## A More Complicated Environment Before we write our bandit agent, we need to have an environment that is a bit harder to figure out. To spice up things just a little bit, the next environment will either always give `reward = observation * action` or `reward = -observation * action`. This will be decided when the environment is initialized. ``` class TwoWayPyEnvironment(BanditPyEnvironment): def __init__(self): action_spec = array_spec.BoundedArraySpec( shape=(), dtype=np.int32, minimum=0, maximum=2, name='action') observation_spec = array_spec.BoundedArraySpec( shape=(1,), dtype=np.int32, minimum=-2, maximum=2, name='observation') # Flipping the sign with probability 1/2. self._reward_sign = 2 * np.random.randint(2) - 1 print("reward sign:") print(self._reward_sign) super(TwoWayPyEnvironment, self).__init__(observation_spec, action_spec) def _observe(self): self._observation = np.random.randint(-2, 3, (1,), dtype='int32') return self._observation def _apply_action(self, action): return self._reward_sign * action * self._observation[0] two_way_tf_environment = tf_py_environment.TFPyEnvironment(TwoWayPyEnvironment()) ``` ## A More Complicated Policy A more complicated environment calls for a more complicated policy. We need a policy that detects the behavior of the underlying environment. There are three situations that the policy needs to handle: 0. The agent has not detected know yet which version of the environment is running. 1. The agent detected that the original version of the environment is running. 2. The agent detected that the flipped version of the environment is running. We define a `tf_variable` named `_situation` to store this information encoded as values in `[0, 2]`, then make the policy behave accordingly. ``` class TwoWaySignPolicy(tf_policy.TFPolicy): def __init__(self, situation): observation_spec = tensor_spec.BoundedTensorSpec( shape=(1,), dtype=tf.int32, minimum=-2, maximum=2) action_spec = tensor_spec.BoundedTensorSpec( shape=(), dtype=tf.int32, minimum=0, maximum=2) time_step_spec = ts.time_step_spec(observation_spec) self._situation = situation super(TwoWaySignPolicy, self).__init__(time_step_spec=time_step_spec, action_spec=action_spec) def _distribution(self, time_step): pass def _variables(self): return [self._situation] def _action(self, time_step, policy_state, seed): sign = tf.cast(tf.sign(time_step.observation[0, 0]), dtype=tf.int32) def case_unknown_fn(): # Choose 1 so that we get information on the sign. return tf.constant(1, shape=(1,)) # Choose 0 or 2, depending on the situation and the sign of the observation. def case_normal_fn(): return tf.constant(sign + 1, shape=(1,)) def case_flipped_fn(): return tf.constant(1 - sign, shape=(1,)) cases = [(tf.equal(self._situation, 0), case_unknown_fn), (tf.equal(self._situation, 1), case_normal_fn), (tf.equal(self._situation, 2), case_flipped_fn)] action = tf.case(cases, exclusive=True) return policy_step.PolicyStep(action, policy_state) ``` ## The Agent Now it's time to define the agent that detects the sign of the environment and sets the policy appropriately. ``` class SignAgent(tf_agent.TFAgent): def __init__(self): self._situation = tf.compat.v2.Variable(0, dtype=tf.int32) policy = TwoWaySignPolicy(self._situation) time_step_spec = policy.time_step_spec action_spec = policy.action_spec super(SignAgent, self).__init__(time_step_spec=time_step_spec, action_spec=action_spec, policy=policy, collect_policy=policy, train_sequence_length=None) def _initialize(self): return tf.compat.v1.variables_initializer(self.variables) def _train(self, experience, weights=None): observation = experience.observation action = experience.action reward = experience.reward # We only need to change the value of the situation variable if it is # unknown (0) right now, and we can infer the situation only if the # observation is not 0. needs_action = tf.logical_and(tf.equal(self._situation, 0), tf.not_equal(reward, 0)) def new_situation_fn(): """This returns either 1 or 2, depending on the signs.""" return (3 - tf.sign(tf.cast(observation[0, 0, 0], dtype=tf.int32) * tf.cast(action[0, 0], dtype=tf.int32) * tf.cast(reward[0, 0], dtype=tf.int32))) / 2 new_situation = tf.cond(needs_action, new_situation_fn, lambda: self._situation) new_situation = tf.cast(new_situation, tf.int32) tf.compat.v1.assign(self._situation, new_situation) return tf_agent.LossInfo((), ()) sign_agent = SignAgent() ``` In the above code, the agent defines the policy, and the variable `situation` is shared by the agent and the policy. Also, the parameter `experience` of the `_train` function is a trajectory: # Trajectories In TF-Agents, `trajectories` are named tuples that contain samples from previous steps taken. These samples are then used by the agent to train and update the policy. In RL, trajectories must contain information about the current state, the next state, and whether the current episode has ended. Since in the Bandit world we do not need these things, we set up a helper function to create a trajectory: ``` # We need to add another dimension here because the agent expects the # trajectory of shape [batch_size, time, ...], but in this tutorial we assume # that both batch size and time are 1. Hence all the expand_dims. def trajectory_for_bandit(initial_step, action_step, final_step): return trajectory.Trajectory(observation=tf.expand_dims(initial_step.observation, 0), action=tf.expand_dims(action_step.action, 0), policy_info=action_step.info, reward=tf.expand_dims(final_step.reward, 0), discount=tf.expand_dims(final_step.discount, 0), step_type=tf.expand_dims(initial_step.step_type, 0), next_step_type=tf.expand_dims(final_step.step_type, 0)) ``` # Training an Agent Now all the pieces are ready for training our bandit agent. ``` step = two_way_tf_environment.reset() for _ in range(10): action_step = sign_agent.collect_policy.action(step) next_step = two_way_tf_environment.step(action_step.action) experience = trajectory_for_bandit(step, action_step, next_step) print(experience) sign_agent.train(experience) step = next_step ``` From the output one can see that after the second step (unless the observation was 0 in the first step), the policy chooses the action in the right way and thus the reward collected is always non-negative. # A Real Contextual Bandit Example In the rest of this tutorial, we use the pre-implemented [environments](https://github.com/tensorflow/agents/blob/master/tf_agents/bandits/environments/) and [agents](https://github.com/tensorflow/agents/blob/master/tf_agents/bandits/agents/) of the TF-Agents Bandits library. ``` # Imports for example. from tf_agents.bandits.agents import lin_ucb_agent from tf_agents.bandits.environments import stationary_stochastic_py_environment as sspe from tf_agents.bandits.metrics import tf_metrics from tf_agents.drivers import dynamic_step_driver from tf_agents.replay_buffers import tf_uniform_replay_buffer import matplotlib.pyplot as plt ``` ## Stationary Stochastic Environment with Linear Payoff Functions The environment used in this example is the [StationaryStochasticPyEnvironment](https://github.com/tensorflow/agents/blob/master/tf_agents/bandits/environments/stationary_stochastic_py_environment.py). This environment takes as parameter a (usually noisy) function for giving observations (context), and for every arm takes an (also noisy) function that computes the reward based on the given observation. In our example, we sample the context uniformly from a d-dimensional cube, and the reward functions are linear functions of the context, plus some Gaussian noise. ``` batch_size = 2 # @param arm0_param = [-3, 0, 1, -2] # @param arm1_param = [1, -2, 3, 0] # @param arm2_param = [0, 0, 1, 1] # @param def context_sampling_fn(batch_size): """Contexts from [-10, 10]^4.""" def _context_sampling_fn(): return np.random.randint(-10, 10, [batch_size, 4]).astype(np.float32) return _context_sampling_fn class LinearNormalReward(object): """A class that acts as linear reward function when called.""" def __init__(self, theta, sigma): self.theta = theta self.sigma = sigma def __call__(self, x): mu = np.dot(x, self.theta) return np.random.normal(mu, self.sigma) arm0_reward_fn = LinearNormalReward(arm0_param, 1) arm1_reward_fn = LinearNormalReward(arm1_param, 1) arm2_reward_fn = LinearNormalReward(arm2_param, 1) environment = tf_py_environment.TFPyEnvironment( sspe.StationaryStochasticPyEnvironment( context_sampling_fn(batch_size), [arm0_reward_fn, arm1_reward_fn, arm2_reward_fn], batch_size=batch_size)) ``` ## The LinUCB Agent The agent below implements the [LinUCB](http://rob.schapire.net/papers/www10.pdf) algorithm. ``` observation_spec = tensor_spec.TensorSpec([4], tf.float32) time_step_spec = ts.time_step_spec(observation_spec) action_spec = tensor_spec.BoundedTensorSpec( dtype=tf.int32, shape=(), minimum=0, maximum=2) agent = lin_ucb_agent.LinearUCBAgent(time_step_spec=time_step_spec, action_spec=action_spec) ``` ## Regret Metric Bandits' most important metric is *regret*, calculated as the difference between the reward collected by the agent and the expected reward of an oracle policy that has access to the reward functions of the environment. The [RegretMetric](https://github.com/tensorflow/agents/blob/master/tf_agents/bandits/metrics/tf_metrics.py) thus needs a *baseline_reward_fn* function that calculates the best achievable expected reward given an observation. For our example, we need to take the maximum of the no-noise equivalents of the reward functions that we already defined for the environment. ``` def compute_optimal_reward(observation): expected_reward_for_arms = [ tf.linalg.matvec(observation, tf.cast(arm0_param, dtype=tf.float32)), tf.linalg.matvec(observation, tf.cast(arm1_param, dtype=tf.float32)), tf.linalg.matvec(observation, tf.cast(arm2_param, dtype=tf.float32))] optimal_action_reward = tf.reduce_max(expected_reward_for_arms, axis=0) return optimal_action_reward regret_metric = tf_metrics.RegretMetric(compute_optimal_reward) ``` ## Training Now we put together all the components that we introduced above: the environment, the policy, and the agent. We run the policy on the environment and output training data with the help of a *driver*, and train the agent on the data. Note that there are two parameters that together specify the number of steps taken. `num_iterations` specifies how many times we run the trainer loop, while the driver will take `steps_per_loop` steps per iteration. The main reason behind keeping both of these parameters is that some operations are done per iteration, while some are done by the driver in every step. For example, the agent's `train` function is only called once per iteration. The trade-off here is that if we train more often then our policy is "fresher", on the other hand, training in bigger batches might be more time efficient. ``` num_iterations = 90 # @param steps_per_loop = 1 # @param replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer( data_spec=agent.policy.trajectory_spec, batch_size=batch_size, max_length=steps_per_loop) observers = [replay_buffer.add_batch, regret_metric] driver = dynamic_step_driver.DynamicStepDriver( env=environment, policy=agent.collect_policy, num_steps=steps_per_loop * batch_size, observers=observers) regret_values = [] for _ in range(num_iterations): driver.run() loss_info = agent.train(replay_buffer.gather_all()) replay_buffer.clear() regret_values.append(regret_metric.result()) plt.plot(regret_values) plt.ylabel('Average Regret') plt.xlabel('Number of Iterations') ``` After running the last code snippet, the resulting plot (hopefully) shows that the average regret is going down as the agent is trained and the policy gets better in figuring out what the right action is, given the observation. # What's Next? To see more working examples, please see the [bandits/agents/examples](https://github.com/tensorflow/agents/blob/master/tf_agents/bandits/agents/examples) directory that has ready-to-run examples for different agents and environments. The TF-Agents library is also capable of handling Multi-Armed Bandits with per-arm features. To that end, we refer the reader to the per-arm bandit [tutorial](https://github.com/tensorflow/agents/blob/master/tf_agents/g3doc/tutorials/per_arm_bandits_tutorial.ipynb).
github_jupyter
# Week 4 Yay! It's week 4. Today's we'll keep things light. I've noticed that many of you are struggling a bit to keep up and still working on exercises from the previous weeks. Thus, this week we only have two components with no lectures and very little reading. ## Overview * An exercise on visualizing geodata using a different set of tools from the ones we played with during Lecture 2. * Thinking about visualization, data quality, and binning. Why ***looking at the details of the data before applying fancy methods*** is often important. ## Part 1: Visualizing geo-data It turns out that `plotly` (which we used during Week 2) is not the only way of working with geo-data. There are many different ways to go about it. (The hard-core PhD and PostDoc researchers in my group simply use matplotlib, since that provides more control. For an example of that kind of thing, check out [this one](https://towardsdatascience.com/visualizing-geospatial-data-in-python-e070374fe621).) Today, we'll try another library for geodata called "[Folium](https://github.com/python-visualization/folium)". It's good for you all to try out a few different libraries - remember that data visualization and analysis in Python is all about the ability to use many different tools. The exercise below is based on the code illustrated in this nice [tutorial](https://www.kaggle.com/daveianhickey/how-to-folium-for-maps-heatmaps-time-data), so let us start by taking a look at that one. *Reading*. Read through the following tutorial * "How to: Folium for maps, heatmaps & time data". Get it here: https://www.kaggle.com/daveianhickey/how-to-folium-for-maps-heatmaps-time-data * (Optional) There are also some nice tricks in "Spatial Visualizations and Analysis in Python with Folium". Read it here: https://towardsdatascience.com/data-101s-spatial-visualizations-and-analysis-in-python-with-folium-39730da2adf > *Exercise 1.1*: A new take on geospatial data. > >A couple of weeks ago (Part 3 of Week 2), we worked with spacial data by using color-intensity of shapefiles to show the counts of certain crimes within those individual areas. Today, we look at studying geospatial data by plotting raw data points as well as heatmaps on top of actual maps. > > * First start by plotting a map of San Francisco with a nice tight zoom. Simply use the command `folium.Map([lat, lon], zoom_start=13)`, where you'll have to look up San Francisco's longitude and latitude. > * Next, use the the coordinates for SF City Hall `37.77919, -122.41914` to indicate its location on the map with a nice, pop-up enabled maker. (In the screenshot below, I used the black & white Stamen tiles, because they look cool). > <img src="https://raw.githubusercontent.com/suneman/socialdata2022/main/files/city_hall_2022.png" alt="drawing" width="600"/> > * Now, let's plot some more data (no need for pop-ups this time). Select a couple of months of data for `'DRUG/NARCOTIC'` and draw a little dot for each arrest for those two months. You could, for example, choose June-July 2016, but you can choose anything you like - the main concern is to not have too many points as this uses a lot of memory and makes Folium behave non-optimally. > We can call this kind of visualization a *point scatter plot*. Ok. Time for a little break. Note that a nice thing about Folium is that you can zoom in and out of the maps. > *Exercise 1.2*: Heatmaps. > * Now, let's play with **heatmaps**. You can figure out the appropriate commands by grabbing code from the main [tutorial](https://www.kaggle.com/daveianhickey/how-to-folium-for-maps-heatmaps-time-data)) and modifying to suit your needs. > * To create your first heatmap, grab all arrests for the category `'SEX OFFENSES, NON FORCIBLE'` across all time. Play with parameters to get plots you like. > * Now, comment on the differences between scatter plots and heatmaps. >. - What can you see using the scatter-plots that you can't see using the heatmaps? >. - And *vice versa*: what does the heatmaps help you see that's difficult to distinguish in the scatter-plots? > * Play around with the various parameters for heatmaps. You can find a list here: https://python-visualization.github.io/folium/plugins.html > * Comment on the effect on the various parameters for the heatmaps. How do they change the picture? (at least talk about the `radius` and `blur`). > For one combination of settings, my heatmap plot looks like this. > <img src="https://raw.githubusercontent.com/suneman/socialdata2022/main/files/crime_hot_spot.png" alt="drawing" width="600"/> > * In that screenshot, I've (manually) highlighted a specific hotspot for this type of crime. Use your detective skills to find out what's going on in that building on the 800 block of Bryant street ... and explain in your own words. (*Fun fact*: I remembered the concentration of crime-counts discussed at the end of this exercise from when I did the course back in 2016. It popped up when I used a completely different framework for visualizing geodata called [`geoplotlib`](https://github.com/andrea-cuttone/geoplotlib). You can spot it if you go to that year's [lecture 2](https://nbviewer.jupyter.org/github/suneman/socialdataanalysis2016/blob/master/lectures/Week3.ipynb), exercise 4.) For the final element of working with heatmaps, let's now use the cool Folium functionality `HeatMapWithTime` to create a visualization of how the patterns of your favorite crime-type changes over time. > *Exercise 1.3*: Heatmap movies. This exercise is a bit more independent than above - you get to make all the choices. > * Start by choosing your favorite crimetype. Prefereably one with spatial patterns that change over time (use your data-exploration from the previous lectures to choose a good one). > * Now, choose a time-resolution. You could plot daily, weekly, monthly datasets to plot in your movie. Again the goal is to find interesting temporal patterns to display. We want at least 20 frames though. > * Create the movie using `HeatMapWithTime`. > * Comment on your results: > - What patterns does your movie reveal? > - Motivate/explain the reasoning behind your choice of crimetype and time-resolution. ## Part 2: Errors in the data. The importance of looking at raw (or close to raw) data. We started the course by plotting simple histogram and bar plots that showed a lot of cool patterns. But sometimes the binning can hide imprecision, irregularity, and simple errors in the data that could be misleading. In the work we've done so far, we've already come across at least three examples of this in the SF data. 1. In the temporal activity for `PROSTITUTION` something surprising is going on on Thursday. Remind yourself [**here**](https://raw.githubusercontent.com/suneman/socialdata2022/main/files/prostitution.png), where I've highlighted the phenomenon I'm talking about. 2. When we investigated the details of how the timestamps are recorded using jitter-plots, we saw that many more crimes were recorded e.g. on the hour, 15 minutes past the hour, and to a lesser in whole increments of 10 minutes. Crimes didn't appear to be recorded as frequently in between those round numbers. Remind yourself [**here**](https://raw.githubusercontent.com/suneman/socialdata2022/main/files/jitter.png), where I've highlighted the phenomenon I'm talking about. 3. And, today we saw that the Hall of Justice seemed to be an unlikely hotspot for sex offences. Remind yourself [**here**](https://raw.githubusercontent.com/suneman/socialdata2022/main/files/crime_hot_spot.png). > *Exercise 2*: Data errors. The data errors we discovered above become difficult to notice when we aggregate data (and when we calculate mean values, as well as statistics more generally). Thus, when we visualize, errors become difficult to notice when binning the data. We explore this process in the exercise below. > >This last exercise for today has two parts: > * In each of the examples above, describe in your own words how the data-errors I call attention to above can bias the binned versions of the data. Also, briefly mention how not noticing these errors can result in misconceptions about the underlying patterns of what's going on in San Francisco (and our modeling). > * Find your own example of human noise in the data and visualize it.
github_jupyter
# Inference with your model This is the third and final tutorial of our [beginner tutorial series](https://github.com/awslabs/djl/tree/master/jupyter/tutorial) that will take you through creating, training, and running inference on a neural network. In this tutorial, you will learn how to execute your image classification model for a production system. In the [previous tutorial](02_train_your_first_model.ipynb), you successfully trained your model. Now, we will learn how to implement a `Translator` to convert between POJO and `NDArray` as well as a `Predictor` to run inference. ## Preparation This tutorial requires the installation of the Java Jupyter Kernel. To install the kernel, see the [Jupyter README](https://github.com/awslabs/djl/blob/master/jupyter/README.md). ``` // Add the snapshot repository to get the DJL snapshot artifacts // %mavenRepo snapshots https://oss.sonatype.org/content/repositories/snapshots/ // Add the maven dependencies %maven ai.djl:api:0.6.0 %maven ai.djl:model-zoo:0.6.0 %maven ai.djl.mxnet:mxnet-engine:0.6.0 %maven ai.djl.mxnet:mxnet-model-zoo:0.6.0 %maven org.slf4j:slf4j-api:1.7.26 %maven org.slf4j:slf4j-simple:1.7.26 %maven net.java.dev.jna:jna:5.3.0 // See https://github.com/awslabs/djl/blob/master/mxnet/mxnet-engine/README.md // for more MXNet library selection options %maven ai.djl.mxnet:mxnet-native-auto:1.7.0-b import java.awt.image.*; import java.nio.file.*; import java.util.*; import java.util.stream.*; import ai.djl.*; import ai.djl.basicmodelzoo.basic.*; import ai.djl.ndarray.*; import ai.djl.modality.*; import ai.djl.modality.cv.*; import ai.djl.modality.cv.util.NDImageUtils; import ai.djl.translate.*; ``` ## Step 1: Load your handwritten digit image We will start by loading the image that we want to run our model to classify. ``` var img = ImageFactory.getInstance().fromUrl("https://djl-ai.s3.amazonaws.com/resources/images/0.png"); img.getWrappedImage(); ``` ## Step 2: Load your model Next, we need to load the model to run inference with. This model should have been saved to the `build/mlp` directory when running the [previous tutorial](02_train_your_first_model.ipynb). TODO: Mention model zoo? List models in model zoo? TODO: Key Concept ZooModel TODO: Link to Model javadoc ``` Path modelDir = Paths.get("build/mlp"); Model model = Model.newInstance("mlp"); model.setBlock(new Mlp(28 * 28, 10, new int[] {128, 64})); model.load(modelDir); ``` ## Step 3: Create a `Translator` The [`Translator`](https://javadoc.io/static/ai.djl/api/0.6.0/index.html?ai/djl/translate/Translator.html) is used to encapsulate the pre-processing and post-processing functionality of your application. The input to the processInput and processOutput should be single data items, not batches. ``` Translator<Image, Classifications> translator = new Translator<Image, Classifications>() { @Override public NDList processInput(TranslatorContext ctx, Image input) { // Convert Image to NDArray NDArray array = input.toNDArray(ctx.getNDManager(), Image.Flag.GRAYSCALE); return new NDList(NDImageUtils.toTensor(array)); } @Override public Classifications processOutput(TranslatorContext ctx, NDList list) { NDArray probabilities = list.singletonOrThrow().softmax(0); List<String> indices = IntStream.range(0, 10).mapToObj(String::valueOf).collect(Collectors.toList()); return new Classifications(indices, probabilities); } @Override public Batchifier getBatchifier() { return Batchifier.STACK; } }; ``` ## Step 4: Create Predictor Using the translator, we will create a new [`Predictor`](https://javadoc.io/static/ai.djl/api/0.6.0/index.html?ai/djl/inference/Predictor.html). The predictor is the main class to orchestrate the inference process. During inference, a trained model is used to predict values, often for production use cases. The predictor is NOT thread-safe, so if you want to do prediction in parallel, you should create a predictor object(with the same model) for each thread. ``` var predictor = model.newPredictor(translator); ``` ## Step 5: Run inference With our predictor, we can simply call the predict method to run inference. Afterwards, the same predictor should be used for further inference calls. ``` var classifications = predictor.predict(img); classifications ``` ## Summary Now, you've successfully built a model, trained it, and run inference. Congratulations on finishing the [beginner tutorial series](https://github.com/awslabs/djl/tree/master/jupyter/tutorial). After this, you should read our other [examples](https://github.com/awslabs/djl/tree/master/examples) and [jupyter notebooks](https://github.com/awslabs/djl/tree/master/jupyter) to learn more about DJL. You can find the complete source code for this tutorial in the [examples project](https://github.com/awslabs/djl/blob/master/examples/src/main/java/ai/djl/examples/inference/ImageClassification.java).
github_jupyter
# Autonomous driving - Car detection Welcome to your week 3 programming assignment. You will learn about object detection using the very powerful YOLO model. Many of the ideas in this notebook are described in the two YOLO papers: [Redmon et al., 2016](https://arxiv.org/abs/1506.02640) and [Redmon and Farhadi, 2016](https://arxiv.org/abs/1612.08242). **You will learn to**: - Use object detection on a car detection dataset - Deal with bounding boxes ## <font color='darkblue'>Updates</font> #### If you were working on the notebook before this update... * The current notebook is version "3a". * You can find your original work saved in the notebook with the previous version name ("v3") * To view the file directory, go to the menu "File->Open", and this will open a new tab that shows the file directory. #### List of updates * Clarified "YOLO" instructions preceding the code. * Added details about anchor boxes. * Added explanation of how score is calculated. * `yolo_filter_boxes`: added additional hints. Clarify syntax for argmax and max. * `iou`: clarify instructions for finding the intersection. * `iou`: give variable names for all 8 box vertices, for clarity. Adds `width` and `height` variables for clarity. * `iou`: add test cases to check handling of non-intersecting boxes, intersection at vertices, or intersection at edges. * `yolo_non_max_suppression`: clarify syntax for tf.image.non_max_suppression and keras.gather. * "convert output of the model to usable bounding box tensors": Provides a link to the definition of `yolo_head`. * `predict`: hint on calling sess.run. * Spelling, grammar, wording and formatting updates to improve clarity. ## Import libraries Run the following cell to load the packages and dependencies that you will find useful as you build the object detector! ``` import argparse import os import matplotlib.pyplot as plt from matplotlib.pyplot import imshow import scipy.io import scipy.misc import numpy as np import pandas as pd import PIL import tensorflow as tf from keras import backend as K from keras.layers import Input, Lambda, Conv2D from keras.models import load_model, Model from yolo_utils import read_classes, read_anchors, generate_colors, preprocess_image, draw_boxes, scale_boxes from yad2k.models.keras_yolo import yolo_head, yolo_boxes_to_corners, preprocess_true_boxes, yolo_loss, yolo_body %matplotlib inline ``` **Important Note**: As you can see, we import Keras's backend as K. This means that to use a Keras function in this notebook, you will need to write: `K.function(...)`. ## 1 - Problem Statement You are working on a self-driving car. As a critical component of this project, you'd like to first build a car detection system. To collect data, you've mounted a camera to the hood (meaning the front) of the car, which takes pictures of the road ahead every few seconds while you drive around. <center> <video width="400" height="200" src="nb_images/road_video_compressed2.mp4" type="video/mp4" controls> </video> </center> <caption><center> Pictures taken from a car-mounted camera while driving around Silicon Valley. <br> We thank [drive.ai](htps://www.drive.ai/) for providing this dataset. </center></caption> You've gathered all these images into a folder and have labelled them by drawing bounding boxes around every car you found. Here's an example of what your bounding boxes look like. <img src="nb_images/box_label.png" style="width:500px;height:250;"> <caption><center> <u> **Figure 1** </u>: **Definition of a box**<br> </center></caption> If you have 80 classes that you want the object detector to recognize, you can represent the class label $c$ either as an integer from 1 to 80, or as an 80-dimensional vector (with 80 numbers) one component of which is 1 and the rest of which are 0. The video lectures had used the latter representation; in this notebook, we will use both representations, depending on which is more convenient for a particular step. In this exercise, you will learn how "You Only Look Once" (YOLO) performs object detection, and then apply it to car detection. Because the YOLO model is very computationally expensive to train, we will load pre-trained weights for you to use. ## 2 - YOLO "You Only Look Once" (YOLO) is a popular algorithm because it achieves high accuracy while also being able to run in real-time. This algorithm "only looks once" at the image in the sense that it requires only one forward propagation pass through the network to make predictions. After non-max suppression, it then outputs recognized objects together with the bounding boxes. ### 2.1 - Model details #### Inputs and outputs - The **input** is a batch of images, and each image has the shape (m, 608, 608, 3) - The **output** is a list of bounding boxes along with the recognized classes. Each bounding box is represented by 6 numbers $(p_c, b_x, b_y, b_h, b_w, c)$ as explained above. If you expand $c$ into an 80-dimensional vector, each bounding box is then represented by 85 numbers. #### Anchor Boxes * Anchor boxes are chosen by exploring the training data to choose reasonable height/width ratios that represent the different classes. For this assignment, 5 anchor boxes were chosen for you (to cover the 80 classes), and stored in the file './model_data/yolo_anchors.txt' * The dimension for anchor boxes is the second to last dimension in the encoding: $(m, n_H,n_W,anchors,classes)$. * The YOLO architecture is: IMAGE (m, 608, 608, 3) -> DEEP CNN -> ENCODING (m, 19, 19, 5, 85). #### Encoding Let's look in greater detail at what this encoding represents. <img src="nb_images/architecture.png" style="width:700px;height:400;"> <caption><center> <u> **Figure 2** </u>: **Encoding architecture for YOLO**<br> </center></caption> If the center/midpoint of an object falls into a grid cell, that grid cell is responsible for detecting that object. Since we are using 5 anchor boxes, each of the 19 x19 cells thus encodes information about 5 boxes. Anchor boxes are defined only by their width and height. For simplicity, we will flatten the last two last dimensions of the shape (19, 19, 5, 85) encoding. So the output of the Deep CNN is (19, 19, 425). <img src="nb_images/flatten.png" style="width:700px;height:400;"> <caption><center> <u> **Figure 3** </u>: **Flattening the last two last dimensions**<br> </center></caption> #### Class score Now, for each box (of each cell) we will compute the following element-wise product and extract a probability that the box contains a certain class. The class score is $score_{c,i} = p_{c} \times c_{i}$: the probability that there is an object $p_{c}$ times the probability that the object is a certain class $c_{i}$. <img src="nb_images/probability_extraction.png" style="width:700px;height:400;"> <caption><center> <u> **Figure 4** </u>: **Find the class detected by each box**<br> </center></caption> ##### Example of figure 4 * In figure 4, let's say for box 1 (cell 1), the probability that an object exists is $p_{1}=0.60$. So there's a 60% chance that an object exists in box 1 (cell 1). * The probability that the object is the class "category 3 (a car)" is $c_{3}=0.73$. * The score for box 1 and for category "3" is $score_{1,3}=0.60 \times 0.73 = 0.44$. * Let's say we calculate the score for all 80 classes in box 1, and find that the score for the car class (class 3) is the maximum. So we'll assign the score 0.44 and class "3" to this box "1". #### Visualizing classes Here's one way to visualize what YOLO is predicting on an image: - For each of the 19x19 grid cells, find the maximum of the probability scores (taking a max across the 80 classes, one maximum for each of the 5 anchor boxes). - Color that grid cell according to what object that grid cell considers the most likely. Doing this results in this picture: <img src="nb_images/proba_map.png" style="width:300px;height:300;"> <caption><center> <u> **Figure 5** </u>: Each one of the 19x19 grid cells is colored according to which class has the largest predicted probability in that cell.<br> </center></caption> Note that this visualization isn't a core part of the YOLO algorithm itself for making predictions; it's just a nice way of visualizing an intermediate result of the algorithm. #### Visualizing bounding boxes Another way to visualize YOLO's output is to plot the bounding boxes that it outputs. Doing that results in a visualization like this: <img src="nb_images/anchor_map.png" style="width:200px;height:200;"> <caption><center> <u> **Figure 6** </u>: Each cell gives you 5 boxes. In total, the model predicts: 19x19x5 = 1805 boxes just by looking once at the image (one forward pass through the network)! Different colors denote different classes. <br> </center></caption> #### Non-Max suppression In the figure above, we plotted only boxes for which the model had assigned a high probability, but this is still too many boxes. You'd like to reduce the algorithm's output to a much smaller number of detected objects. To do so, you'll use **non-max suppression**. Specifically, you'll carry out these steps: - Get rid of boxes with a low score (meaning, the box is not very confident about detecting a class; either due to the low probability of any object, or low probability of this particular class). - Select only one box when several boxes overlap with each other and detect the same object. ### 2.2 - Filtering with a threshold on class scores You are going to first apply a filter by thresholding. You would like to get rid of any box for which the class "score" is less than a chosen threshold. The model gives you a total of 19x19x5x85 numbers, with each box described by 85 numbers. It is convenient to rearrange the (19,19,5,85) (or (19,19,425)) dimensional tensor into the following variables: - `box_confidence`: tensor of shape $(19 \times 19, 5, 1)$ containing $p_c$ (confidence probability that there's some object) for each of the 5 boxes predicted in each of the 19x19 cells. - `boxes`: tensor of shape $(19 \times 19, 5, 4)$ containing the midpoint and dimensions $(b_x, b_y, b_h, b_w)$ for each of the 5 boxes in each cell. - `box_class_probs`: tensor of shape $(19 \times 19, 5, 80)$ containing the "class probabilities" $(c_1, c_2, ... c_{80})$ for each of the 80 classes for each of the 5 boxes per cell. #### **Exercise**: Implement `yolo_filter_boxes()`. 1. Compute box scores by doing the elementwise product as described in Figure 4 ($p \times c$). The following code may help you choose the right operator: ```python a = np.random.randn(19*19, 5, 1) b = np.random.randn(19*19, 5, 80) c = a * b # shape of c will be (19*19, 5, 80) ``` This is an example of **broadcasting** (multiplying vectors of different sizes). 2. For each box, find: - the index of the class with the maximum box score - the corresponding box score **Useful references** * [Keras argmax](https://keras.io/backend/#argmax) * [Keras max](https://keras.io/backend/#max) **Additional Hints** * For the `axis` parameter of `argmax` and `max`, if you want to select the **last** axis, one way to do so is to set `axis=-1`. This is similar to Python array indexing, where you can select the last position of an array using `arrayname[-1]`. * Applying `max` normally collapses the axis for which the maximum is applied. `keepdims=False` is the default option, and allows that dimension to be removed. We don't need to keep the last dimension after applying the maximum here. * Even though the documentation shows `keras.backend.argmax`, use `keras.argmax`. Similarly, use `keras.max`. 3. Create a mask by using a threshold. As a reminder: `([0.9, 0.3, 0.4, 0.5, 0.1] < 0.4)` returns: `[False, True, False, False, True]`. The mask should be True for the boxes you want to keep. 4. Use TensorFlow to apply the mask to `box_class_scores`, `boxes` and `box_classes` to filter out the boxes we don't want. You should be left with just the subset of boxes you want to keep. **Useful reference**: * [boolean mask](https://www.tensorflow.org/api_docs/python/tf/boolean_mask) **Additional Hints**: * For the `tf.boolean_mask`, we can keep the default `axis=None`. **Reminder**: to call a Keras function, you should use `K.function(...)`. ``` # GRADED FUNCTION: yolo_filter_boxes def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = .6): """Filters YOLO boxes by thresholding on object and class confidence. Arguments: box_confidence -- tensor of shape (19, 19, 5, 1) boxes -- tensor of shape (19, 19, 5, 4) box_class_probs -- tensor of shape (19, 19, 5, 80) threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box Returns: scores -- tensor of shape (None,), containing the class probability score for selected boxes boxes -- tensor of shape (None, 4), containing (b_x, b_y, b_h, b_w) coordinates of selected boxes classes -- tensor of shape (None,), containing the index of the class detected by the selected boxes Note: "None" is here because you don't know the exact number of selected boxes, as it depends on the threshold. For example, the actual output size of scores would be (10,) if there are 10 boxes. """ # Step 1: Compute box scores ### START CODE HERE ### (≈ 1 line) box_scores = box_confidence * box_class_probs ### END CODE HERE ### # Step 2: Find the box_classes thanks to the max box_scores, keep track of the corresponding score ### START CODE HERE ### (≈ 2 lines) box_classes = K.argmax(box_scores, axis=-1) box_class_scores = K.max(box_scores, axis=-1) ### END CODE HERE ### # Step 3: Create a filtering mask based on "box_class_scores" by using "threshold". The mask should have the # same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold) ### START CODE HERE ### (≈ 1 line) filtering_mask = ((box_class_scores) >= threshold) ### END CODE HERE ### # Step 4: Apply the mask to scores, boxes and classes ### START CODE HERE ### (≈ 3 lines) scores = tf.boolean_mask(box_class_scores, filtering_mask, name='boolean_mask') boxes = tf.boolean_mask(boxes, filtering_mask, name='boolean_mask') classes = tf.boolean_mask(box_classes, filtering_mask, name='boolean_mask') ### END CODE HERE ### return scores, boxes, classes with tf.Session() as test_a: box_confidence = tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1) boxes = tf.random_normal([19, 19, 5, 4], mean=1, stddev=4, seed = 1) box_class_probs = tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1) scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = 0.5) print("scores[2] = " + str(scores[2].eval())) print("boxes[2] = " + str(boxes[2].eval())) print("classes[2] = " + str(classes[2].eval())) print("scores.shape = " + str(scores.shape)) print("boxes.shape = " + str(boxes.shape)) print("classes.shape = " + str(classes.shape)) ``` **Expected Output**: <table> <tr> <td> **scores[2]** </td> <td> 10.7506 </td> </tr> <tr> <td> **boxes[2]** </td> <td> [ 8.42653275 3.27136683 -0.5313437 -4.94137383] </td> </tr> <tr> <td> **classes[2]** </td> <td> 7 </td> </tr> <tr> <td> **scores.shape** </td> <td> (?,) </td> </tr> <tr> <td> **boxes.shape** </td> <td> (?, 4) </td> </tr> <tr> <td> **classes.shape** </td> <td> (?,) </td> </tr> </table> **Note** In the test for `yolo_filter_boxes`, we're using random numbers to test the function. In real data, the `box_class_probs` would contain non-zero values between 0 and 1 for the probabilities. The box coordinates in `boxes` would also be chosen so that lengths and heights are non-negative. ### 2.3 - Non-max suppression ### Even after filtering by thresholding over the class scores, you still end up with a lot of overlapping boxes. A second filter for selecting the right boxes is called non-maximum suppression (NMS). <img src="nb_images/non-max-suppression.png" style="width:500px;height:400;"> <caption><center> <u> **Figure 7** </u>: In this example, the model has predicted 3 cars, but it's actually 3 predictions of the same car. Running non-max suppression (NMS) will select only the most accurate (highest probability) of the 3 boxes. <br> </center></caption> Non-max suppression uses the very important function called **"Intersection over Union"**, or IoU. <img src="nb_images/iou.png" style="width:500px;height:400;"> <caption><center> <u> **Figure 8** </u>: Definition of "Intersection over Union". <br> </center></caption> #### **Exercise**: Implement iou(). Some hints: - In this code, we use the convention that (0,0) is the top-left corner of an image, (1,0) is the upper-right corner, and (1,1) is the lower-right corner. In other words, the (0,0) origin starts at the top left corner of the image. As x increases, we move to the right. As y increases, we move down. - For this exercise, we define a box using its two corners: upper left $(x_1, y_1)$ and lower right $(x_2,y_2)$, instead of using the midpoint, height and width. (This makes it a bit easier to calculate the intersection). - To calculate the area of a rectangle, multiply its height $(y_2 - y_1)$ by its width $(x_2 - x_1)$. (Since $(x_1,y_1)$ is the top left and $x_2,y_2$ are the bottom right, these differences should be non-negative. - To find the **intersection** of the two boxes $(xi_{1}, yi_{1}, xi_{2}, yi_{2})$: - Feel free to draw some examples on paper to clarify this conceptually. - The top left corner of the intersection $(xi_{1}, yi_{1})$ is found by comparing the top left corners $(x_1, y_1)$ of the two boxes and finding a vertex that has an x-coordinate that is closer to the right, and y-coordinate that is closer to the bottom. - The bottom right corner of the intersection $(xi_{2}, yi_{2})$ is found by comparing the bottom right corners $(x_2,y_2)$ of the two boxes and finding a vertex whose x-coordinate is closer to the left, and the y-coordinate that is closer to the top. - The two boxes **may have no intersection**. You can detect this if the intersection coordinates you calculate end up being the top right and/or bottom left corners of an intersection box. Another way to think of this is if you calculate the height $(y_2 - y_1)$ or width $(x_2 - x_1)$ and find that at least one of these lengths is negative, then there is no intersection (intersection area is zero). - The two boxes may intersect at the **edges or vertices**, in which case the intersection area is still zero. This happens when either the height or width (or both) of the calculated intersection is zero. **Additional Hints** - `xi1` = **max**imum of the x1 coordinates of the two boxes - `yi1` = **max**imum of the y1 coordinates of the two boxes - `xi2` = **min**imum of the x2 coordinates of the two boxes - `yi2` = **min**imum of the y2 coordinates of the two boxes - `inter_area` = You can use `max(height, 0)` and `max(width, 0)` ``` # GRADED FUNCTION: iou def iou(box1, box2): """Implement the intersection over union (IoU) between box1 and box2      Arguments: box1 -- first box, list object with coordinates (box1_x1, box1_y1, box1_x2, box_1_y2)     box2 -- second box, list object with coordinates (box2_x1, box2_y1, box2_x2, box2_y2)     """ # Assign variable names to coordinates for clarity (box1_x1, box1_y1, box1_x2, box1_y2) = box1 (box2_x1, box2_y1, box2_x2, box2_y2) = box2 # Calculate the (yi1, xi1, yi2, xi2) coordinates of the intersection of box1 and box2. Calculate its Area. ### START CODE HERE ### (≈ 7 lines) xi1 = max(box1[0], box2[0]) yi1 = max(box1[1], box2[1]) xi2 = min(box1[2], box2[2]) yi2 = min(box1[3], box2[3]) inter_width = xi2 - xi1 inter_height = yi2 - yi1 inter_area = max(inter_height, 0) * max(inter_width, 0) ### END CODE HERE ###     # Calculate the Union area by using Formula: Union(A,B) = A + B - Inter(A,B) ### START CODE HERE ### (≈ 3 lines) box1_area = (box1[3] - box1[1]) * (box1[2] - box1[0]) box2_area = (box2[3] - box2[1]) * (box2[2] - box2[0]) union_area = box1_area + box2_area - inter_area ### END CODE HERE ### # compute the IoU ### START CODE HERE ### (≈ 1 line) iou = inter_area / union_area ### END CODE HERE ### return iou ## Test case 1: boxes intersect box1 = (2, 1, 4, 3) box2 = (1, 2, 3, 4) print("iou for intersecting boxes = " + str(iou(box1, box2))) ## Test case 2: boxes do not intersect box1 = (1,2,3,4) box2 = (5,6,7,8) print("iou for non-intersecting boxes = " + str(iou(box1,box2))) ## Test case 3: boxes intersect at vertices only box1 = (1,1,2,2) box2 = (2,2,3,3) print("iou for boxes that only touch at vertices = " + str(iou(box1,box2))) ## Test case 4: boxes intersect at edge only box1 = (1,1,3,3) box2 = (2,3,3,4) print("iou for boxes that only touch at edges = " + str(iou(box1,box2))) ``` **Expected Output**: ``` iou for intersecting boxes = 0.14285714285714285 iou for non-intersecting boxes = 0.0 iou for boxes that only touch at vertices = 0.0 iou for boxes that only touch at edges = 0.0 ``` #### YOLO non-max suppression You are now ready to implement non-max suppression. The key steps are: 1. Select the box that has the highest score. 2. Compute the overlap of this box with all other boxes, and remove boxes that overlap significantly (iou >= `iou_threshold`). 3. Go back to step 1 and iterate until there are no more boxes with a lower score than the currently selected box. This will remove all boxes that have a large overlap with the selected boxes. Only the "best" boxes remain. **Exercise**: Implement yolo_non_max_suppression() using TensorFlow. TensorFlow has two built-in functions that are used to implement non-max suppression (so you don't actually need to use your `iou()` implementation): ** Reference documentation ** - [tf.image.non_max_suppression()](https://www.tensorflow.org/api_docs/python/tf/image/non_max_suppression) ``` tf.image.non_max_suppression( boxes, scores, max_output_size, iou_threshold=0.5, name=None ) ``` Note that in the version of tensorflow used here, there is no parameter `score_threshold` (it's shown in the documentation for the latest version) so trying to set this value will result in an error message: *got an unexpected keyword argument 'score_threshold.* - [K.gather()](https://www.tensorflow.org/api_docs/python/tf/keras/backend/gather) Even though the documentation shows `tf.keras.backend.gather()`, you can use `keras.gather()`. ``` keras.gather( reference, indices ) ``` ``` # GRADED FUNCTION: yolo_non_max_suppression def yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold = 0.5): """ Applies Non-max suppression (NMS) to set of boxes Arguments: scores -- tensor of shape (None,), output of yolo_filter_boxes() boxes -- tensor of shape (None, 4), output of yolo_filter_boxes() that have been scaled to the image size (see later) classes -- tensor of shape (None,), output of yolo_filter_boxes() max_boxes -- integer, maximum number of predicted boxes you'd like iou_threshold -- real value, "intersection over union" threshold used for NMS filtering Returns: scores -- tensor of shape (, None), predicted score for each box boxes -- tensor of shape (4, None), predicted box coordinates classes -- tensor of shape (, None), predicted class for each box Note: The "None" dimension of the output tensors has obviously to be less than max_boxes. Note also that this function will transpose the shapes of scores, boxes, classes. This is made for convenience. """ max_boxes_tensor = K.variable(max_boxes, dtype='int32') # tensor to be used in tf.image.non_max_suppression() K.get_session().run(tf.variables_initializer([max_boxes_tensor])) # initialize variable max_boxes_tensor # Use tf.image.non_max_suppression() to get the list of indices corresponding to boxes you keep ### START CODE HERE ### (≈ 1 line) nms_indices = tf.image.non_max_suppression(boxes = boxes, scores = scores, max_output_size = max_boxes, iou_threshold = iou_threshold) ### END CODE HERE ### # Use K.gather() to select only nms_indices from scores, boxes and classes ### START CODE HERE ### (≈ 3 lines) scores = K.gather(scores, nms_indices) boxes = K.gather(boxes, nms_indices) classes = K.gather(classes, nms_indices) ### END CODE HERE ### return scores, boxes, classes with tf.Session() as test_b: scores = tf.random_normal([54,], mean=1, stddev=4, seed = 1) boxes = tf.random_normal([54, 4], mean=1, stddev=4, seed = 1) classes = tf.random_normal([54,], mean=1, stddev=4, seed = 1) scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes) print("scores[2] = " + str(scores[2].eval())) print("boxes[2] = " + str(boxes[2].eval())) print("classes[2] = " + str(classes[2].eval())) print("scores.shape = " + str(scores.eval().shape)) print("boxes.shape = " + str(boxes.eval().shape)) print("classes.shape = " + str(classes.eval().shape)) ``` **Expected Output**: <table> <tr> <td> **scores[2]** </td> <td> 6.9384 </td> </tr> <tr> <td> **boxes[2]** </td> <td> [-5.299932 3.13798141 4.45036697 0.95942086] </td> </tr> <tr> <td> **classes[2]** </td> <td> -2.24527 </td> </tr> <tr> <td> **scores.shape** </td> <td> (10,) </td> </tr> <tr> <td> **boxes.shape** </td> <td> (10, 4) </td> </tr> <tr> <td> **classes.shape** </td> <td> (10,) </td> </tr> </table> ### 2.4 Wrapping up the filtering It's time to implement a function taking the output of the deep CNN (the 19x19x5x85 dimensional encoding) and filtering through all the boxes using the functions you've just implemented. **Exercise**: Implement `yolo_eval()` which takes the output of the YOLO encoding and filters the boxes using score threshold and NMS. There's just one last implementational detail you have to know. There're a few ways of representing boxes, such as via their corners or via their midpoint and height/width. YOLO converts between a few such formats at different times, using the following functions (which we have provided): ```python boxes = yolo_boxes_to_corners(box_xy, box_wh) ``` which converts the yolo box coordinates (x,y,w,h) to box corners' coordinates (x1, y1, x2, y2) to fit the input of `yolo_filter_boxes` ```python boxes = scale_boxes(boxes, image_shape) ``` YOLO's network was trained to run on 608x608 images. If you are testing this data on a different size image--for example, the car detection dataset had 720x1280 images--this step rescales the boxes so that they can be plotted on top of the original 720x1280 image. Don't worry about these two functions; we'll show you where they need to be called. ``` # GRADED FUNCTION: yolo_eval def yolo_eval(yolo_outputs, image_shape = (720., 1280.), max_boxes=10, score_threshold=.6, iou_threshold=.5): """ Converts the output of YOLO encoding (a lot of boxes) to your predicted boxes along with their scores, box coordinates and classes. Arguments: yolo_outputs -- output of the encoding model (for image_shape of (608, 608, 3)), contains 4 tensors: box_confidence: tensor of shape (None, 19, 19, 5, 1) box_xy: tensor of shape (None, 19, 19, 5, 2) box_wh: tensor of shape (None, 19, 19, 5, 2) box_class_probs: tensor of shape (None, 19, 19, 5, 80) image_shape -- tensor of shape (2,) containing the input shape, in this notebook we use (608., 608.) (has to be float32 dtype) max_boxes -- integer, maximum number of predicted boxes you'd like score_threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box iou_threshold -- real value, "intersection over union" threshold used for NMS filtering Returns: scores -- tensor of shape (None, ), predicted score for each box boxes -- tensor of shape (None, 4), predicted box coordinates classes -- tensor of shape (None,), predicted class for each box """ ### START CODE HERE ### # Retrieve outputs of the YOLO model (≈1 line) box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs[0], yolo_outputs[1], yolo_outputs[2], yolo_outputs[3] # Convert boxes to be ready for filtering functions boxes = yolo_boxes_to_corners(box_xy, box_wh) # Use one of the functions you've implemented to perform Score-filtering with a threshold of score_threshold (≈1 line) scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = 0.5) # Scale boxes back to original image shape. boxes = scale_boxes(boxes, image_shape) # Use one of the functions you've implemented to perform Non-max suppression with a threshold of iou_threshold (≈1 line) scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes) ### END CODE HERE ### return scores, boxes, classes with tf.Session() as test_b: yolo_outputs = (tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1), tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1), tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1), tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1)) scores, boxes, classes = yolo_eval(yolo_outputs) print("scores[2] = " + str(scores[2].eval())) print("boxes[2] = " + str(boxes[2].eval())) print("classes[2] = " + str(classes[2].eval())) print("scores.shape = " + str(scores.eval().shape)) print("boxes.shape = " + str(boxes.eval().shape)) print("classes.shape = " + str(classes.eval().shape)) ``` **Expected Output**: <table> <tr> <td> **scores[2]** </td> <td> 138.791 </td> </tr> <tr> <td> **boxes[2]** </td> <td> [ 1292.32971191 -278.52166748 3876.98925781 -835.56494141] </td> </tr> <tr> <td> **classes[2]** </td> <td> 54 </td> </tr> <tr> <td> **scores.shape** </td> <td> (10,) </td> </tr> <tr> <td> **boxes.shape** </td> <td> (10, 4) </td> </tr> <tr> <td> **classes.shape** </td> <td> (10,) </td> </tr> </table> ## Summary for YOLO: - Input image (608, 608, 3) - The input image goes through a CNN, resulting in a (19,19,5,85) dimensional output. - After flattening the last two dimensions, the output is a volume of shape (19, 19, 425): - Each cell in a 19x19 grid over the input image gives 425 numbers. - 425 = 5 x 85 because each cell contains predictions for 5 boxes, corresponding to 5 anchor boxes, as seen in lecture. - 85 = 5 + 80 where 5 is because $(p_c, b_x, b_y, b_h, b_w)$ has 5 numbers, and 80 is the number of classes we'd like to detect - You then select only few boxes based on: - Score-thresholding: throw away boxes that have detected a class with a score less than the threshold - Non-max suppression: Compute the Intersection over Union and avoid selecting overlapping boxes - This gives you YOLO's final output. ## 3 - Test YOLO pre-trained model on images In this part, you are going to use a pre-trained model and test it on the car detection dataset. We'll need a session to execute the computation graph and evaluate the tensors. ``` sess = K.get_session() ``` ### 3.1 - Defining classes, anchors and image shape. * Recall that we are trying to detect 80 classes, and are using 5 anchor boxes. * We have gathered the information on the 80 classes and 5 boxes in two files "coco_classes.txt" and "yolo_anchors.txt". * We'll read class names and anchors from text files. * The car detection dataset has 720x1280 images, which we've pre-processed into 608x608 images. ``` class_names = read_classes("model_data/coco_classes.txt") anchors = read_anchors("model_data/yolo_anchors.txt") image_shape = (720., 1280.) ``` ### 3.2 - Loading a pre-trained model * Training a YOLO model takes a very long time and requires a fairly large dataset of labelled bounding boxes for a large range of target classes. * You are going to load an existing pre-trained Keras YOLO model stored in "yolo.h5". * These weights come from the official YOLO website, and were converted using a function written by Allan Zelener. References are at the end of this notebook. Technically, these are the parameters from the "YOLOv2" model, but we will simply refer to it as "YOLO" in this notebook. Run the cell below to load the model from this file. ``` yolo_model = load_model("model_data/yolo.h5") ``` This loads the weights of a trained YOLO model. Here's a summary of the layers your model contains. ``` yolo_model.summary() ``` **Note**: On some computers, you may see a warning message from Keras. Don't worry about it if you do--it is fine. **Reminder**: this model converts a preprocessed batch of input images (shape: (m, 608, 608, 3)) into a tensor of shape (m, 19, 19, 5, 85) as explained in Figure (2). ### 3.3 - Convert output of the model to usable bounding box tensors The output of `yolo_model` is a (m, 19, 19, 5, 85) tensor that needs to pass through non-trivial processing and conversion. The following cell does that for you. If you are curious about how `yolo_head` is implemented, you can find the function definition in the file ['keras_yolo.py'](https://github.com/allanzelener/YAD2K/blob/master/yad2k/models/keras_yolo.py). The file is located in your workspace in this path 'yad2k/models/keras_yolo.py'. ``` yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names)) ``` You added `yolo_outputs` to your graph. This set of 4 tensors is ready to be used as input by your `yolo_eval` function. ### 3.4 - Filtering boxes `yolo_outputs` gave you all the predicted boxes of `yolo_model` in the correct format. You're now ready to perform filtering and select only the best boxes. Let's now call `yolo_eval`, which you had previously implemented, to do this. ``` scores, boxes, classes = yolo_eval(yolo_outputs, image_shape) ``` ### 3.5 - Run the graph on an image Let the fun begin. You have created a graph that can be summarized as follows: 1. <font color='purple'> yolo_model.input </font> is given to `yolo_model`. The model is used to compute the output <font color='purple'> yolo_model.output </font> 2. <font color='purple'> yolo_model.output </font> is processed by `yolo_head`. It gives you <font color='purple'> yolo_outputs </font> 3. <font color='purple'> yolo_outputs </font> goes through a filtering function, `yolo_eval`. It outputs your predictions: <font color='purple'> scores, boxes, classes </font> **Exercise**: Implement predict() which runs the graph to test YOLO on an image. You will need to run a TensorFlow session, to have it compute `scores, boxes, classes`. The code below also uses the following function: ```python image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608)) ``` which outputs: - image: a python (PIL) representation of your image used for drawing boxes. You won't need to use it. - image_data: a numpy-array representing the image. This will be the input to the CNN. **Important note**: when a model uses BatchNorm (as is the case in YOLO), you will need to pass an additional placeholder in the feed_dict {K.learning_phase(): 0}. #### Hint: Using the TensorFlow Session object * Recall that above, we called `K.get_Session()` and saved the Session object in `sess`. * To evaluate a list of tensors, we call `sess.run()` like this: ``` sess.run(fetches=[tensor1,tensor2,tensor3], feed_dict={yolo_model.input: the_input_variable, K.learning_phase():0 } ``` * Notice that the variables `scores, boxes, classes` are not passed into the `predict` function, but these are global variables that you will use within the `predict` function. ``` def predict(sess, image_file): """ Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the predictions. Arguments: sess -- your tensorflow/Keras session containing the YOLO graph image_file -- name of an image stored in the "images" folder. Returns: out_scores -- tensor of shape (None, ), scores of the predicted boxes out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes out_classes -- tensor of shape (None, ), class index of the predicted boxes Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes. """ # Preprocess your image image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608)) # Run the session with the correct tensors and choose the correct placeholders in the feed_dict. # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0}) ### START CODE HERE ### (≈ 1 line) out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input: image_data, K.learning_phase(): 0}) ### END CODE HERE ### # Print predictions info print('Found {} boxes for {}'.format(len(out_boxes), image_file)) # Generate colors for drawing bounding boxes. colors = generate_colors(class_names) # Draw bounding boxes on the image file draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors) # Save the predicted bounding box on the image image.save(os.path.join("out", image_file), quality=90) # Display the results in the notebook output_image = scipy.misc.imread(os.path.join("out", image_file)) imshow(output_image) return out_scores, out_boxes, out_classes ``` Run the following cell on the "test.jpg" image to verify that your function is correct. ``` out_scores, out_boxes, out_classes = predict(sess, "test.jpg") ``` **Expected Output**: <table> <tr> <td> **Found 7 boxes for test.jpg** </td> </tr> <tr> <td> **car** </td> <td> 0.60 (925, 285) (1045, 374) </td> </tr> <tr> <td> **car** </td> <td> 0.66 (706, 279) (786, 350) </td> </tr> <tr> <td> **bus** </td> <td> 0.67 (5, 266) (220, 407) </td> </tr> <tr> <td> **car** </td> <td> 0.70 (947, 324) (1280, 705) </td> </tr> <tr> <td> **car** </td> <td> 0.74 (159, 303) (346, 440) </td> </tr> <tr> <td> **car** </td> <td> 0.80 (761, 282) (942, 412) </td> </tr> <tr> <td> **car** </td> <td> 0.89 (367, 300) (745, 648) </td> </tr> </table> The model you've just run is actually able to detect 80 different classes listed in "coco_classes.txt". To test the model on your own images: 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. 2. Add your image to this Jupyter Notebook's directory, in the "images" folder 3. Write your image's name in the cell above code 4. Run the code and see the output of the algorithm! If you were to run your session in a for loop over all your images. Here's what you would get: <center> <video width="400" height="200" src="nb_images/pred_video_compressed2.mp4" type="video/mp4" controls> </video> </center> <caption><center> Predictions of the YOLO model on pictures taken from a camera while driving around the Silicon Valley <br> Thanks [drive.ai](https://www.drive.ai/) for providing this dataset! </center></caption> ## <font color='darkblue'>What you should remember: - YOLO is a state-of-the-art object detection model that is fast and accurate - It runs an input image through a CNN which outputs a 19x19x5x85 dimensional volume. - The encoding can be seen as a grid where each of the 19x19 cells contains information about 5 boxes. - You filter through all the boxes using non-max suppression. Specifically: - Score thresholding on the probability of detecting a class to keep only accurate (high probability) boxes - Intersection over Union (IoU) thresholding to eliminate overlapping boxes - Because training a YOLO model from randomly initialized weights is non-trivial and requires a large dataset as well as lot of computation, we used previously trained model parameters in this exercise. If you wish, you can also try fine-tuning the YOLO model with your own dataset, though this would be a fairly non-trivial exercise. **References**: The ideas presented in this notebook came primarily from the two YOLO papers. The implementation here also took significant inspiration and used many components from Allan Zelener's GitHub repository. The pre-trained weights used in this exercise came from the official YOLO website. - Joseph Redmon, Santosh Divvala, Ross Girshick, Ali Farhadi - [You Only Look Once: Unified, Real-Time Object Detection](https://arxiv.org/abs/1506.02640) (2015) - Joseph Redmon, Ali Farhadi - [YOLO9000: Better, Faster, Stronger](https://arxiv.org/abs/1612.08242) (2016) - Allan Zelener - [YAD2K: Yet Another Darknet 2 Keras](https://github.com/allanzelener/YAD2K) - The official YOLO website (https://pjreddie.com/darknet/yolo/) **Car detection dataset**: <a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/88x31.png" /></a><br /><span xmlns:dct="http://purl.org/dc/terms/" property="dct:title">The Drive.ai Sample Dataset</span> (provided by drive.ai) is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</a>. We are grateful to Brody Huval, Chih Hu and Rahul Patel for providing this data.
github_jupyter
``` # Copyright (c) 2020-2021 Adrian Georg Herrmann import os import matplotlib.pyplot as plt import pandas as pd import numpy as np from scipy import interpolate from sklearn.linear_model import LinearRegression from datetime import datetime data_root = "../../data" locations = { "berlin": ["52.4652025", "13.3412466"], "wijchen": ["51.8235504", "5.7329005"] } dfs = { "berlin": None, "wijchen": None } ``` ## Sunlight angles ``` def get_julian_day(time): if time.month > 2: y = time.year m = time.month else: y = time.year - 1 m = time.month + 12 d = time.day + time.hour / 24 + time.minute / 1440 + time.second / 86400 b = 2 - np.floor(y / 100) + np.floor(y / 400) jd = np.floor(365.25 * (y + 4716)) + np.floor(30.6001 * (m + 1)) + d + b - 1524.5 return jd def get_angle(time, latitude, longitude): # Source: # https://de.wikipedia.org/wiki/Sonnenstand#Genauere_Ermittlung_des_Sonnenstandes_f%C3%BCr_einen_Zeitpunkt # 1. Eclipctical coordinates of the sun # Julian day jd = get_julian_day(time) n = jd - 2451545 # Median ecliptic longitude of the sun< l = np.mod(280.46 + 0.9856474 * n, 360) # Median anomaly g = np.mod(357.528 + 0.9856003 * n, 360) # Ecliptic longitude of the sun lbd = l + 1.915 * np.sin(np.radians(g)) + 0.01997 * np.sin(np.radians(2*g)) # 2. Equatorial coordinates of the sun # Ecliptic eps = 23.439 - 0.0000004 * n # Right ascension alpha = np.degrees(np.arctan(np.cos(np.radians(eps)) * np.tan(np.radians(lbd)))) if np.cos(np.radians(lbd)) < 0: alpha += 180 # Declination delta = np.degrees(np.arcsin(np.sin(np.radians(eps)) * np.sin(np.radians(lbd)))) # 3. Horizontal coordinates of the sun t0 = (get_julian_day(time.replace(hour=0, minute=0, second=0)) - 2451545) / 36525 # Median sidereal time theta_hg = np.mod(6.697376 + 2400.05134 * t0 + 1.002738 * (time.hour + time.minute / 60), 24) theta_g = theta_hg * 15 theta = theta_g + longitude # Hour angle of the sun tau = theta - alpha # Elevation angle h = np.cos(np.radians(delta)) * np.cos(np.radians(tau)) * np.cos(np.radians(latitude)) h += np.sin(np.radians(delta)) * np.sin(np.radians(latitude)) h = np.degrees(np.arcsin(h)) return (h if h > 0 else 0) ``` ## Energy data ``` for location, _ in locations.items(): # This list contains all time points for which energy measurements exist, therefore delimiting # the time frame that is to our interest. energy = {} data_path = os.path.join(data_root, location) for filename in os.listdir(data_path): with open(os.path.join(data_path, filename), "r") as file: for line in file: key = datetime.strptime(line.split(";")[0], '%Y-%m-%d %H:%M:%S').timestamp() energy[key] = int(line.split(";")[1].strip()) df = pd.DataFrame( data={"time": energy.keys(), "energy": energy.values()}, columns=["time", "energy"] ) dfs[location] = df.sort_values(by="time", ascending=True) # Summarize energy data per hour instead of keeping it per 15 minutes for location, _ in locations.items(): times = [] energy = [] df = dfs[location] for i, row in dfs[location].iterrows(): if row["time"] % 3600 == 0: try: t4 = row["time"] e4 = row["energy"] e3 = df["energy"][df["time"] == t4 - 900].values[0] e2 = df["energy"][df["time"] == t4 - 1800].values[0] e1 = df["energy"][df["time"] == t4 - 2700].values[0] times += [t4] energy += [e1 + e2 + e3 + e4] except: pass df = pd.DataFrame(data={"time": times, "energy_h": energy}, columns=["time", "energy_h"]) df = df.sort_values(by="time", ascending=True) dfs[location] = dfs[location].join(df.set_index("time"), on="time", how="right").drop("energy", axis=1) dfs[location].rename(columns={"energy_h": "energy"}, inplace=True) # These lists contain the time tuples that delimit connected ranges without interruptions. time_delimiters = {} for location, _ in locations.items(): delimiters = [] df = dfs[location] next_couple = [df["time"].iloc[0], None] interval = df["time"].iloc[1] - df["time"].iloc[0] for i in range(len(df["time"].index) - 1): if df["time"].iloc[i+1] - df["time"].iloc[i] > interval: next_couple[1] = df["time"].iloc[i] delimiters += [next_couple] next_couple = [df["time"].iloc[i+1], None] next_couple[1] = df["time"].iloc[-1] delimiters += [next_couple] time_delimiters[location] = delimiters # This are lists of dataframes containing connected ranges without interruptions. dataframes_wijchen = [] for x in time_delimiters["wijchen"]: dataframes_wijchen += [dfs["wijchen"].loc[(dfs["wijchen"].time >= x[0]) & (dfs["wijchen"].time <= x[1])]] dataframes_berlin = [] for x in time_delimiters["berlin"]: dataframes_berlin += [dfs["berlin"].loc[(dfs["berlin"].time >= x[0]) & (dfs["berlin"].time <= x[1])]] for location, _ in locations.items(): print(location, ":") for delimiters in time_delimiters[location]: t0 = datetime.fromtimestamp(delimiters[0]) t1 = datetime.fromtimestamp(delimiters[1]) print(t0, "-", t1) print() ``` ### Wijchen dataset ``` for d in dataframes_wijchen: print(len(d)) plt.figure(figsize=(200, 25)) plt.plot(dfs["wijchen"]["time"], dfs["wijchen"]["energy"], drawstyle="steps-pre") energy_max_wijchen = dfs["wijchen"]["energy"].max() energy_max_wijchen_idx = dfs["wijchen"]["energy"].argmax() energy_max_wijchen_time = datetime.fromtimestamp(dfs["wijchen"]["time"].iloc[energy_max_wijchen_idx]) print(energy_max_wijchen_time, ":", energy_max_wijchen) energy_avg_wijchen = dfs["wijchen"]["energy"].mean() print(energy_avg_wijchen) ``` ### Berlin dataset ``` for d in dataframes_berlin: print(len(d)) plt.figure(figsize=(200, 25)) plt.plot(dfs["berlin"]["time"], dfs["berlin"]["energy"], drawstyle="steps-pre") energy_max_berlin = dfs["berlin"]["energy"].max() energy_max_berlin_idx = dfs["berlin"]["energy"].argmax() energy_max_berlin_time = datetime.fromtimestamp(dfs["berlin"]["time"].iloc[energy_max_berlin_idx]) print(energy_max_berlin_time, ":", energy_max_berlin) energy_avg_berlin = dfs["berlin"]["energy"].mean() print(energy_avg_berlin) ``` ## Sunlight angles ``` for location, lonlat in locations.items(): angles = [ get_angle( datetime.fromtimestamp(x - 3600), float(lonlat[0]), float(lonlat[1]) ) for x in dfs[location]["time"] ] dfs[location]["angles"] = angles ``` ## Weather data ``` # Contact the author for a sample of data, see doc/thesis.pdf, page 72. weather_data = np.load(os.path.join(data_root, "weather.npy"), allow_pickle=True).item() # There is no cloud cover data for berlin2, so use the data of berlin1. weather_data["berlin2"]["cloud"] = weather_data["berlin1"]["cloud"] # There is no radiation data for berlin1, so use the data of berlin2. weather_data["berlin1"]["rad"] = weather_data["berlin2"]["rad"] # Preprocess weather data weather_params = [ "temp", "humid", "press", "cloud", "rad" ] stations = [ "wijchen1", "wijchen2", "berlin1", "berlin2" ] for station in stations: for param in weather_params: to_del = [] for key, val in weather_data[station][param].items(): if val is None: to_del.append(key) for x in to_del: del weather_data[station][param][x] def interpolate_map(map, time_range): ret = { "time": [], "value": [] } keys = list(map.keys()) values = list(map.values()) f = interpolate.interp1d(keys, values) ret["time"] = time_range ret["value"] = f(ret["time"]) return ret def update_df(df, time_range, map1, map2, param1, param2): map1_ = interpolate_map(map1, time_range) df1 = pd.DataFrame( data={"time": map1_["time"], param1: map1_["value"]}, columns=["time", param1] ) map2_ = interpolate_map(map2, time_range) df2 = pd.DataFrame( data={"time": map2_["time"], param2: map2_["value"]}, columns=["time", param2] ) df_ = df.join(df1.set_index("time"), on="time").join(df2.set_index("time"), on="time") return df_ # Insert weather data into dataframes for location, _ in locations.items(): df = dfs[location] station1 = location + "1" station2 = location + "2" for param in weather_params: param1 = param + "1" param2 = param + "2" df = update_df( df, df["time"], weather_data[station1][param], weather_data[station2][param], param1, param2 ) dfs[location] = df.set_index(keys=["time"], drop=False) # These are lists of dataframes containing connected ranges without interruptions. dataframes_wijchen = [] for x in time_delimiters["wijchen"]: dataframes_wijchen += [dfs["wijchen"].loc[(dfs["wijchen"].time >= x[0]) & (dfs["wijchen"].time <= x[1])]] dataframes_berlin = [] for x in time_delimiters["berlin"]: dataframes_berlin += [dfs["berlin"].loc[(dfs["berlin"].time >= x[0]) & (dfs["berlin"].time <= x[1])]] ``` ### Linear regression model #### Wijchen ``` df_train = dataframes_wijchen[9].iloc[17:258] # df_train = dataframes_wijchen[9].iloc[17:234] # df_train = pd.concat([dataframes_wijchen[9].iloc[17:], dataframes_wijchen[10], dataframes_wijchen[11]]) df_val = dataframes_wijchen[-3].iloc[:241] # df_val = dataframes_wijchen[-2].iloc[:241] lr_x1 = df_train[["angles", "temp1", "humid1", "press1", "cloud1", "rad1"]].to_numpy() lr_y1 = df_train[["energy"]].to_numpy() lr_model1 = LinearRegression() lr_model1.fit(lr_x1, lr_y1) lr_model1.score(lr_x1, lr_y1) lr_x2 = df_train[["angles", "temp2", "humid2", "press2", "cloud2", "rad2"]].to_numpy() lr_y2 = df_train[["energy"]].to_numpy() lr_model2 = LinearRegression() lr_model2.fit(lr_x2, lr_y2) lr_model2.score(lr_x2, lr_y2) lr_x3 = df_train[["angles", "temp1", "temp2", "humid1", "humid2", "press1", "press2", "cloud1", "cloud2", "rad1", "rad2"]].to_numpy() lr_y3 = df_train[["energy"]].to_numpy() lr_model3 = LinearRegression() lr_model3.fit(lr_x3, lr_y3) lr_model3.score(lr_x3, lr_y3) # filename = "lr_model.pkl" # with open(filename, 'wb') as file: # pickle.dump(lr_model3, file) xticks = df_train["time"].iloc[::24] lr_x3 = df_train[["angles", "temp1", "temp2", "humid1", "humid2", "press1", "press2", "cloud1", "cloud2", "rad1", "rad2"]].to_numpy() fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(20, 5)) ax.set_xticks(ticks=xticks) ax.set_xticklabels(labels=[datetime.fromtimestamp(x).strftime("%d-%m-%y") for x in xticks]) ax.tick_params(labelsize=18) ax.plot(df_train["time"], df_train["energy"], label="Actual energy production in Wh", drawstyle="steps-pre") ax.plot(df_train["time"], lr_model3.predict(lr_x3), label="Predicted energy production in Wh (Volkel + Deelen)", drawstyle="steps-pre") ax.legend(prop={'size': 18}) xticks = df_val["time"].iloc[::24] lr_x1 = df_val[["angles", "temp1", "humid1", "press1", "cloud1", "rad1"]].to_numpy() lr_x2 = df_val[["angles", "temp2", "humid2", "press2", "cloud2", "rad2"]].to_numpy() lr_x3 = df_val[["angles", "temp1", "temp2", "humid1", "humid2", "press1", "press2", "cloud1", "cloud2", "rad1", "rad2"]].to_numpy() print(lr_model1.score(lr_x1, df_val[["energy"]].to_numpy())) print(lr_model2.score(lr_x2, df_val[["energy"]].to_numpy())) print(lr_model3.score(lr_x3, df_val[["energy"]].to_numpy())) fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(20, 5)) ax.set_xticks(ticks=xticks) ax.set_xticklabels(labels=[datetime.fromtimestamp(x).strftime("%d-%m-%y") for x in xticks]) ax.tick_params(labelsize=18) ax.plot(df_val["time"], df_val["energy"], label="Actual energy production in Wh", drawstyle="steps-pre") ax.plot(df_val["time"], lr_model3.predict(lr_x3), label="Predicted energy production in Wh (Volkel + Deelen)", drawstyle="steps-pre") ax.legend(prop={'size': 18}) print(df["angles"].min(), df_val["angles"].max()) print(df["angles"].min(), df_train["angles"].max()) ``` #### Berlin ``` df_train = dataframes_berlin[1].iloc[:241] # df_train = dataframes_berlin[1].iloc[:720] df_val = dataframes_berlin[1].iloc[312:553] # df_val = dataframes_berlin[1].iloc[720:961] lr_x1 = df_train[["angles", "temp1", "humid1", "press1", "cloud1", "rad1"]].to_numpy() lr_y1 = df_train[["energy"]].to_numpy() lr_model1 = LinearRegression() lr_model1.fit(lr_x1, lr_y1) lr_model1.score(lr_x1, lr_y1) lr_x2 = df_train[["angles", "temp2", "humid2", "press2", "cloud2", "rad2"]].to_numpy() lr_y2 = df_train[["energy"]].to_numpy() lr_model2 = LinearRegression() lr_model2.fit(lr_x2, lr_y2) lr_model2.score(lr_x2, lr_y2) lr_x3 = df_train[["angles", "temp1", "temp2", "humid1", "humid2", "press1", "press2", "cloud1", "cloud2", "rad1", "rad2"]].to_numpy() lr_y3 = df_train[["energy"]].to_numpy() lr_model3 = LinearRegression() lr_model3.fit(lr_x3, lr_y3) lr_model3.score(lr_x3, lr_y3) # filename = "lr_model.pkl" # with open(filename, 'wb') as file: # pickle.dump(lr_model3, file) xticks = df_train["time"].iloc[::24] lr_x3 = df_train[["angles", "temp1", "temp2", "humid1", "humid2", "press1", "press2", "cloud1", "cloud2", "rad1", "rad2"]].to_numpy() fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(20, 5)) ax.set_xticks(ticks=xticks) ax.set_xticklabels(labels=[datetime.fromtimestamp(x).strftime("%d-%m-%y") for x in xticks]) ax.tick_params(labelsize=18) ax.plot(df_train["time"], df_train["energy"], label="Actual energy production in Wh", drawstyle="steps-pre") ax.plot(df_train["time"], lr_model3.predict(lr_x3), label="Predicted energy production in Wh", drawstyle="steps-pre") ax.legend(prop={'size': 18}) xticks = df_val["time"].iloc[::24] lr_x1 = df_val[["angles", "temp1", "humid1", "press1", "cloud1", "rad1"]].to_numpy() lr_x2 = df_val[["angles", "temp2", "humid2", "press2", "cloud2", "rad2"]].to_numpy() lr_x3 = df_val[["angles", "temp1", "temp2", "humid1", "humid2", "press1", "press2", "cloud1", "cloud2", "rad1", "rad2"]].to_numpy() print(lr_model1.score(lr_x1, df_val[["energy"]].to_numpy())) print(lr_model2.score(lr_x2, df_val[["energy"]].to_numpy())) print(lr_model3.score(lr_x3, df_val[["energy"]].to_numpy())) fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(20, 5)) ax.set_xticks(ticks=xticks) ax.set_xticklabels(labels=[datetime.fromtimestamp(x).strftime("%d-%m-%y") for x in xticks]) ax.tick_params(labelsize=18) ax.plot(df_val["time"], df_val["energy"], label="Actual energy production in Wh", drawstyle="steps-pre") ax.plot(df_val["time"], lr_model3.predict(lr_x3), label="Predicted energy production in Wh", drawstyle="steps-pre") ax.legend(prop={'size': 18}) ```
github_jupyter
``` import pandas as pd import numpy as np import matplotlib.pyplot as plt Pre_data = pd.read_csv("C:\\Users\\2019A00303\\Desktop\\Code\\Airbnb Project\\Data\\PreProcessingAustralia.csv") Pre_data Pre_data['Price'].plot(kind='hist', bins=100) Pre_data['group'] = pd.cut(x=Pre_data['Price'], bins=[0, 50, 100, 150, 200, 1000], labels=['group_1','group_2','group_3','group_4','group_5']) Pre_data.head() from sklearn.model_selection import StratifiedShuffleSplit split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42) for train_index, test_index in split.split(Pre_data, Pre_data["group"]): train = Pre_data.loc[train_index] test = Pre_data.loc[test_index] train['group'].value_counts() / len(train) test['group'].value_counts() / len(test) train.drop('group', axis=1, inplace=True) train.head() test.drop(['Unnamed: 0','group', 'Host Since', 'Country', 'Airbed', 'Couch', 'Futon', 'Pull-out Sofa', 'Real Bed', 'Cleaning Fee'], axis=1, inplace=True) test.head() train_y = train[['Price']] train_y.head() train.drop(['Unnamed: 0', 'Price', 'Host Since', 'Country','Airbed', 'Couch', 'Futon', 'Pull-out Sofa', 'Real Bed', 'Cleaning Fee'], axis=1, inplace=True) train_X = train train_X.head() test_y= test[['Price']] test_y.head() test.drop('Price', axis=1, inplace=True) test_X = test test_X.head() # from sklearn.linear_model import LinearRegression # l_reg = LinearRegression() # l_reg.fit(train_X, train_y) from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_squared_error import numpy as np # predictions = l_reg.predict(train_X) # mse = mean_squared_error(train_y, predictions) # mae = mean_absolute_error(train_y, predictions) # rmse = np.sqrt(mse) # print(mse, rmse, mae) # predictions = l_reg.predict(test_X) # mse = mean_squared_error(test_y, predictions) # mae = mean_absolute_error(test_y, predictions) # rmse = np.sqrt(mse) # print(mse, rmse, mae) # from sklearn.tree import DecisionTreeRegressor # d_reg = DecisionTreeRegressor() # d_reg.fit(train_X, train_y) # predictions = d_reg.predict(train_X) # mse = mean_squared_error(train_y, predictions) # mae = mean_absolute_error(train_y, predictions) # rmse = np.sqrt(mse) # print(mse, rmse, mae) # predictions = d_reg.predict(test_X) # mse = mean_squared_error(test_y, predictions) # mae = mean_absolute_error(test_y, predictions) # rmse = np.sqrt(mse) # print(mse, rmse, mae) # from sklearn.svm import SVR # svr = SVR() # svr.fit(train_X, train_y) # predictions = svr.predict(train_X) # mse = mean_squared_error(train_y, predictions) # mae = mean_absolute_error(train_y, predictions) # rmse = np.sqrt(mse) # print(mse, rmse, mae) # predictions = svr.predict(test_X) # mse = mean_squared_error(test_y, predictions) # mae = mean_absolute_error(test_y, predictions) # rmse = np.sqrt(mse) # print(mse, rmse, mae) # from sklearn.neighbors import KNeighborsRegressor # knn = KNeighborsRegressor() # knn.fit(train_X, train_y) # predictions = knn.predict(train_X) # mse = mean_squared_error(train_y, predictions) # mae = mean_absolute_error(train_y, predictions) # rmse = np.sqrt(mse) # print(mse, rmse, mae) # predictions = knn.predict(test_X) # mse = mean_squared_error(test_y, predictions) # mae = mean_absolute_error(test_y, predictions) # rmse = np.sqrt(mse) # print(mse, rmse, mae) # from sklearn.neural_network import MLPRegressor # ann = MLPRegressor() # ann.fit(train_X, train_y) # predictions = ann.predict(train_X) # mse = mean_squared_error(train_y, predictions) # mae = mean_absolute_error(train_y, predictions) # rmse = np.sqrt(mse) # print(mse, rmse, mae) # predictions = ann.predict(test_X) # mse = mean_squared_error(test_y, predictions) # mae = mean_absolute_error(test_y, predictions) # rmse = np.sqrt(mse) # print(mse, rmse, mae) from sklearn.ensemble import RandomForestRegressor r_reg = RandomForestRegressor() r_reg.fit(train_X, train_y) features = train_X.columns importances = r_reg.feature_importances_ indices = np.argsort(importances) plt.title('Australia Feature Importances') plt.barh(range(len(indices)), importances[indices], color='g', align='center') plt.yticks(range(len(indices)), [features[i] for i in indices]) plt.xlabel('Relative Importance') predictions = r_reg.predict(train_X) mse = mean_squared_error(train_y, predictions) mae = mean_absolute_error(train_y, predictions) rmse = np.sqrt(mse) print(mse, rmse, mae) # from sklearn.model_selection import GridSearchCV # param = {'n_estimators' : [800,900,1000], 'max_features' : ['sqrt','auto','log2'], 'max_depth' : [8,9,10], # 'min_samples_split': [2,3,4]} # r_reg = RandomForestRegressor(random_state=42) # search = GridSearchCV(r_reg, param, cv=5, # scoring='neg_mean_absolute_error') # search.fit(train_X, train_y['Price'].ravel()) # from sklearn.ensemble import RandomForestRegressor # r_reg = RandomForestRegressor(bootstrap=True, # min_samples_split=2, # criterion='mse', # max_depth=None, # max_features='auto', # n_estimators=1000, # random_state=42, # ) # r_reg.fit(train_X, train_y['Price'].ravel()) # predictions = r_reg.predict(train_X) # mse = mean_squared_error(train_y, predictions) # mae = mean_absolute_error(train_y, predictions) # rmse = np.sqrt(mse) # print(mse, rmse, mae) ```
github_jupyter
# KNN(K Nearest Neighbours) for classification of glass types We will make use of KNN algorithms to classify the type of glass. ### What is covered? - About KNN algorithm - Exploring dataset using visualization - scatterplot,pairplot, heatmap (correlation matrix). - Feature scaling - using KNN to predict - Optimization - Distance metrics - Finding the best K value ### About KNN- - It is an instance-based algorithm. - As opposed to model-based algorithms which pre trains on the data, and discards the data. Instance-based algorithms retain the data to classify when a new data point is given. - The distance metric is used to calculate its nearest neighbors (Euclidean, manhattan) - Can solve classification(by determining the majority class of nearest neighbors) and regression problems (by determining the means of nearest neighbors). - If the majority of the nearest neighbors of the new data point belong to a certain class, the model classifies the new data point to that class. ![Knn](./kNN_board.PNG) For example, in the above plot, Assuming k=5, the black point (new data) can be classified as class 1(Blue), because 3 out 5 of its nearest neighbors belong to class 1. ### Dataset [Glass classification dataset](https://www.kaggle.com/uciml/glass) . Download to follow along. **Description** - This is a Glass Identification Data Set from UCI. It contains 10 attributes including id. The response is glass type(discrete 7 values) - Id number: 1 to 214 (removed from CSV file) - RI: refractive index - Na: Sodium (unit measurement: weight percent in corresponding oxide, as are attributes 4-10) - Mg: Magnesium - Al: Aluminum - Si: Silicon - K: Potassium - Ca: Calcium - Ba: Barium - Fe: Iron - Type of glass: (class attribute) - 1 buildingwindowsfloatprocessed - 2 buildingwindowsnonfloatprocessed - 3 vehiclewindowsfloatprocessed - 4 vehiclewindowsnonfloatprocessed (none in this database) - 5 containers - 6 tableware - 7 headlamps About Type 2,4 -> **Float processed glass** means they are made on a floating molten glass on a bed of molten metal, this gives the sheet uniform thickness and flat surfaces. ## Load dependencies and data ``` #import dependencies import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler import seaborn as sns from sklearn.metrics import classification_report, accuracy_score from sklearn.model_selection import cross_val_score #load data df = pd.read_csv('./data/glass.csv') df.head() # value count for glass types df.Type.value_counts() ``` ## Data exploration and visualizaion #### correlation matrix - ``` cor = df.corr() sns.heatmap(cor) ``` We can notice that Ca and K values don't affect Type that much. Also Ca and RI are highly correlated, this means using only RI is enough. So we can go ahead and drop Ca, and also K.(performed later) ## Scatter plot of two features ``` sns.scatterplot(df_feat['RI'],df_feat['Na'],hue=df['Type']) ``` Suppose we consider only RI, and Na values for classification for glass type. - From the above plot, We first calculate the nearest neighbors from the new data point to be calculated. - If the majority of nearest neighbors belong to a particular class, say type 4, then we classify the data point as type 4. But there are a lot more than two features based on which we can classify. So let us take a look at pairwise plot to capture all the features. ``` #pairwise plot of all the features sns.pairplot(df,hue='Type') plt.show() ``` The pairplot shows that the data is not linear and KNN can be applied to get nearest neighbors and classify the glass types ## Feature Scaling Scaling is necessary for distance-based algorithms such as KNN. This is to avoid higher weightage being assigned to data with a higher magnitude. Using standard scaler we can scale down to unit variance. **Formula:** z = (x - u) / s where x -> value, u -> mean, s -> standard deviation ``` scaler = StandardScaler() scaler.fit(df.drop('Type',axis=1)) #perform transformation scaled_features = scaler.transform(df.drop('Type',axis=1)) scaled_features df_feat = pd.DataFrame(scaled_features,columns=df.columns[:-1]) df_feat.head() ``` ## Applying KNN - Drop features that are not required - Use random state while splitting the data to ensure reproducibility and consistency - Experiment with distance metrics - Euclidean, manhattan ``` dff = df_feat.drop(['Ca','K'],axis=1) #Removing features - Ca and K X_train,X_test,y_train,y_test = train_test_split(dff,df['Type'],test_size=0.3,random_state=45) #setting random state ensures split is same eveytime, so that the results are comparable knn = KNeighborsClassifier(n_neighbors=4,metric='manhattan') knn.fit(X_train,y_train) y_pred = knn.predict(X_test) print(classification_report(y_test,y_pred)) accuracy_score(y_test,y_pred) ``` ### Finding the best K value We can do this either - - by plotting Accuracy - or by plotting the error rate Note that plotting both is not required, both are plottted to show as an example. ``` k_range = range(1,25) k_scores = [] error_rate =[] for k in k_range: knn = KNeighborsClassifier(n_neighbors=k) #kscores - accuracy scores = cross_val_score(knn,dff,df['Type'],cv=5,scoring='accuracy') k_scores.append(scores.mean()) #error rate knn.fit(X_train,y_train) y_pred = knn.predict(X_test) error_rate.append(np.mean(y_pred!=y_test)) #plot k vs accuracy plt.plot(k_range,k_scores) plt.xlabel('value of k - knn algorithm') plt.ylabel('Cross validated accuracy score') plt.show() #plot k vs error rate plt.plot(k_range,error_rate) plt.xlabel('value of k - knn algorithm') plt.ylabel('Error rate') plt.show() ``` we can see that k=4 produces the most accurate results ## Findings - - Manhattan distance produced better results (improved accuracy - more than 5%) - Applying feature scaling improved accuracy by almost 5%. - The best k value was found to be 4. - Dropping Ca produced better results by a bit, K value did not affect results in any way. - Also, we noticed that RI and Ca are highly correlated, this makes sense as it was found that the Refractive index of glass was found to increase with the increase in Cao. (https://link.springer.com/article/10.1134/S1087659614030249) ## Further improvements - We can see that the model can be improved further so we get better accuracy. Some suggestions - - Using KFold Cross-validation - Try different algorithms to find the best one for this problem - (SVM, Random forest, etc) ## Other Useful resources - - [K Nearest Neighbour Easily Explained with Implementation by Krish Naik - video](https://www.youtube.com/watch?v=wTF6vzS9fy4) - [KNN by sentdex -video](https://www.youtube.com/watch?v=1i0zu9jHN6U) - [KNN sklearn - docs ](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html) - [Complete guide to K nearest neighbours - python and R - blog](https://kevinzakka.github.io/2016/07/13/k-nearest-neighbor/) - [Why scaling is required in KNN and K-Means - blog](https://medium.com/analytics-vidhya/why-is-scaling-required-in-knn-and-k-means-8129e4d88ed7)
github_jupyter
<a href="https://colab.research.google.com/github/krakowiakpawel9/machine-learning-bootcamp/blob/master/unsupervised/04_anomaly_detection/01_local_outlier_factor.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ### scikit-learn Strona biblioteki: [https://scikit-learn.org](https://scikit-learn.org) Dokumentacja/User Guide: [https://scikit-learn.org/stable/user_guide.html](https://scikit-learn.org/stable/user_guide.html) Podstawowa biblioteka do uczenia maszynowego w języku Python. Aby zainstalować bibliotekę scikit-learn, użyj polecenia poniżej: ``` !pip install scikit-learn ``` Aby zaktualizować do najnowszej wersji bibliotekę scikit-learn, użyj polecenia poniżej: ``` !pip install --upgrade scikit-learn ``` Kurs stworzony w oparciu o wersję `0.22.1` ### Spis treści: 1. [Import bibliotek](#0) 2. [Wygenerowanie danych](#1) 3. [Wizualizacja danych](#2) 4. [Algorytm K-średnich](#3) 5. [Wizualizacja klastrów](#4) ### <a name='0'></a> Import bibliotek ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.datasets import make_blobs import plotly.express as px import plotly.graph_objects as go sns.set(font_scale=1.2) np.random.seed(10) ``` ### <a name='1'></a> Wygenerowanie danych ``` data = make_blobs(n_samples=300, cluster_std=2.0, random_state=10)[0] data[:5] ``` ### <a name='2'></a> Wizualizacja danych ``` tmp = pd.DataFrame(data=data, columns={'x1', 'x2'}) px.scatter(tmp, x='x1', y='x2', width=950, title='Local Outlier Factor', template='plotly_dark') fig = go.Figure() fig1 = px.density_heatmap(tmp, x='x1', y='x2', width=700, title='Outliers', nbinsx=20, nbinsy=20) fig2 = px.scatter(tmp, x='x1', y='x2', width=700, title='Outliers', opacity=0.5) fig.add_trace(fig1['data'][0]) fig.add_trace(fig2['data'][0]) fig.update_traces(marker=dict(size=4, line=dict(width=2, color='white')), selector=dict(mode='markers')) fig.update_layout(template='plotly_dark', width=950) fig.show() plt.figure(figsize=(12, 7)) plt.scatter(data[:, 0], data[:, 1], label='data', cmap='tab10') plt.title('Local Outlier Factor') plt.legend() plt.show() from sklearn.neighbors import LocalOutlierFactor lof = LocalOutlierFactor(n_neighbors=20) y_pred = lof.fit_predict(data) y_pred[:10] all_data = np.c_[data, y_pred] all_data[:5] tmp['y_pred'] = y_pred px.scatter(tmp, x='x1', y='x2', color='y_pred', width=950, title='Local Outlier Factor', template='plotly_dark') plt.figure(figsize=(12, 7)) plt.scatter(all_data[:, 0], all_data[:, 1], c=all_data[:, 2], cmap='tab10', label='data') plt.title('Local Outlier Factor') plt.legend() plt.show() LOF_scores = lof.negative_outlier_factor_ radius = (LOF_scores.max() - LOF_scores) / (LOF_scores.max() - LOF_scores.min()) radius[:5] plt.figure(figsize=(12, 7)) plt.scatter(all_data[:, 0], all_data[:, 1], label='data', cmap='tab10') plt.scatter(all_data[:, 0], all_data[:, 1], s=2000 * radius, edgecolors='r', facecolors='none', label='outlier scores') plt.title('Local Outlier Factor') legend = plt.legend() legend.legendHandles[1]._sizes = [40] plt.show() plt.figure(figsize=(12, 7)) plt.scatter(all_data[:, 0], all_data[:, 1], c=all_data[:, 2], cmap='tab10', label='data') plt.scatter(all_data[:, 0], all_data[:, 1], s=2000 * radius, edgecolors='r', facecolors='none', label='outlier scores') plt.title('Local Outlier Factor') legend = plt.legend() legend.legendHandles[1]._sizes = [40] plt.show() ```
github_jupyter
``` import numpy as np import pandas as pd import glob import emcee import corner import scipy.stats from scipy.ndimage import gaussian_filter1d import matplotlib.pyplot as plt from matplotlib.ticker import MultipleLocator from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KernelDensity from fit_just_early_lc import prep_light_curve, multifcqfid_lnlike_big_unc, multifcqfid_lnprior_big_unc, multifcqfid_lnposterior_big_unc, lnlike_big_unc from multiprocessing import Pool import time from corner_hack import corner_hack from light_curve_plot import f_t, plot_both_filt %matplotlib notebook info_path = "../../forced_lightcurves/sample_lc_v2/" salt_df = pd.read_csv(info_path + "../../Nobs_cut_salt2_spec_subtype_pec.csv") ``` ## Measure the Deviance Information Criterion $$DIC = 2 \bar{D(\theta)} - D(\bar{\theta})$$ where, $D(\theta) = -2 \log P(x|\theta)$. Thus, we need to calculate the mean posterior parameters, AND, the mean likelihood for the posterior parameters. This requires the `multifcqfid_lnlike_big_unc` function. ``` thin_by = 100 rel_flux_cutoff = 0.4 sn = 'ZTF18abauprj' h5_file = info_path + 'big_unc/{}_emcee_40_varchange.h5'.format(sn) reader = emcee.backends.HDFBackend(h5_file) nsteps = thin_by*np.shape(reader.get_chain())[0] tau = reader.get_autocorr_time(tol=0) burnin = int(5*np.max(tau)) samples = reader.get_chain(discard=burnin, thin=np.max([int(np.max(tau)), 1]), flat=True) lnpost = reader.get_log_prob(discard=burnin, thin=np.max([int(np.max(tau)), 1]), flat=True) t_max = float(salt_df['t0_g_adopted'][salt_df['name'] == sn].values) z = float(salt_df['z_adopt'][salt_df['name'] == sn].values) g_max = float(salt_df['fratio_gmax_2adam'][salt_df['name'] == sn].values) r_max = float(salt_df['fratio_rmax_2adam'][salt_df['name'] == sn].values) t_data, f_data, f_unc_data, fcqfid_data = prep_light_curve(info_path+"{}_force_phot.h5".format(sn), t_max=t_max, z=z, g_max=g_max, r_max=r_max, rel_flux_cutoff=rel_flux_cutoff) loglike_samples = np.zeros(len(samples)) for samp_num, sample in enumerate(samples): loglike_samples[samp_num] = multifcqfid_lnlike_big_unc(sample, f_data, t_data, f_unc_data, fcqfid_data) dhat = -2*multifcqfid_lnlike_big_unc(np.mean(samples, axis=0), f_data, t_data, f_unc_data, fcqfid_data) dbar = -2*np.mean(loglike_samples) dic = 2*dbar - dhat print(dic) ``` #### What about for the $t^2$ model? ``` h5_file = info_path + 'big_unc/{}_emcee_40_tsquared.h5'.format(sn) reader = emcee.backends.HDFBackend(h5_file) nsteps = thin_by*np.shape(reader.get_chain())[0] tau = reader.get_autocorr_time(tol=0) burnin = int(5*np.max(tau)) samples_tsquared = reader.get_chain(discard=burnin, thin=np.max([int(np.max(tau)), 1]), flat=True) loglike_samples_tsquared = np.zeros(len(samples)) for samp_num, sample in enumerate(samples_tsquared): loglike_samples_tsquared[samp_num] = multifcqfid_lnlike_big_unc(sample, f_data, t_data, f_unc_data, fcqfid_data, prior='delta2') dhat = -2*multifcqfid_lnlike_big_unc(np.mean(samples_tsquared, axis=0), f_data, t_data, f_unc_data, fcqfid_data, prior='delta2') dbar = np.mean(-2*loglike_samples_tsquared) dic_tsquared = 2*dbar_tsquared - dhat_tsquared print(dic_tsquared) ``` ### Loop over all SNe ``` salt_df.name.values dic_uniformative_arr = np.zeros(len(salt_df)) dic_tsquared_arr = np.zeros(len(salt_df)) dic_alpha_r_plus_colors_arr = np.zeros(len(salt_df)) def get_dic(sn): # sn, bw = tup sn_num = np.where(salt_df.name == sn)[0] h5_file = info_path + 'big_unc/{}_emcee_40_varchange.h5'.format(sn) reader = emcee.backends.HDFBackend(h5_file) thin_by = 100 nsteps = thin_by*np.shape(reader.get_chain())[0] tau = reader.get_autocorr_time(tol=0) burnin = int(5*np.max(tau)) samples = reader.get_chain(discard=burnin, thin=np.max(int(np.max(tau)), 0), flat=True) rel_flux_cutoff = 0.4 t_max = float(salt_df['t0_g_adopted'][salt_df['name'] == sn].values) z = float(salt_df['z_adopt'][salt_df['name'] == sn].values) g_max = float(salt_df['fratio_gmax_2adam'][salt_df['name'] == sn].values) r_max = float(salt_df['fratio_rmax_2adam'][salt_df['name'] == sn].values) t_data, f_data, f_unc_data, fcqfid_data = prep_light_curve(info_path+"{}_force_phot.h5".format(sn), t_max=t_max, z=z, g_max=g_max, r_max=r_max, rel_flux_cutoff=rel_flux_cutoff) loglike_samples = np.zeros(len(samples)) for samp_num, sample in enumerate(samples): loglike_samples[samp_num] = multifcqfid_lnlike_big_unc(sample, f_data, t_data, f_unc_data, fcqfid_data) dhat = -2*multifcqfid_lnlike_big_unc(np.mean(samples, axis=0), f_data, t_data, f_unc_data, fcqfid_data) dbar = -2*np.mean(loglike_samples) dic = 2*dbar - dhat h5_file = info_path + 'big_unc/{}_emcee_40_tsquared.h5'.format(sn) reader = emcee.backends.HDFBackend(h5_file) nsteps = thin_by*np.shape(reader.get_chain())[0] tau = reader.get_autocorr_time(tol=0) burnin = int(5*np.max(tau)) samples_tsquared = reader.get_chain(discard=burnin, thin=np.max([int(np.max(tau)), 1]), flat=True) loglike_samples_tsquared = np.zeros(len(samples_tsquared)) for samp_num, sample in enumerate(samples_tsquared): loglike_samples_tsquared[samp_num] = multifcqfid_lnlike_big_unc(sample, f_data, t_data, f_unc_data, fcqfid_data, prior='delta2') dhat_tsquared = -2*multifcqfid_lnlike_big_unc(np.mean(samples_tsquared, axis=0), f_data, t_data, f_unc_data, fcqfid_data, prior='delta2') dbar_tsquared = np.mean(-2*loglike_samples_tsquared) dic_tsquared = 2*dbar_tsquared - dhat_tsquared dic_uniformative_arr[sn_num] = dic dic_tsquared_arr[sn_num] = dic_tsquared h5_file = info_path + 'big_unc/{}_emcee_40_alpha_r_plus_colors.h5'.format(sn) reader = emcee.backends.HDFBackend(h5_file) nsteps = thin_by*np.shape(reader.get_chain())[0] tau = reader.get_autocorr_time(tol=0) burnin = int(5*np.max(tau)) samples_alpha_r_plus_colors = reader.get_chain(discard=burnin, thin=np.max([int(np.max(tau)), 1]), flat=True) loglike_samples_alpha_r_plus_colors = np.zeros(len(samples_alpha_r_plus_colors)) for samp_num, sample in enumerate(samples_alpha_r_plus_colors): loglike_samples_alpha_r_plus_colors[samp_num] = multifcqfid_lnlike_big_unc(sample, f_data, t_data, f_unc_data, fcqfid_data, prior='alpha_r_plus_colors') dhat_alpha_r_plus_colors = -2*multifcqfid_lnlike_big_unc(np.mean(samples_alpha_r_plus_colors, axis=0), f_data, t_data, f_unc_data, fcqfid_data, prior='alpha_r_plus_colors') dbar_alpha_r_plus_colors = np.mean(-2*loglike_samples_alpha_r_plus_colors) dic_alpha_r_plus_colors = 2*dbar_alpha_r_plus_colors - dhat_alpha_r_plus_colors dic_uniformative_arr[sn_num] = dic dic_alpha_r_plus_colors_arr[sn_num] = dic_alpha_r_plus_colors return (dic, dic_tsquared, dic_alpha_r_plus_colors) pool = Pool() dic_res = pool.map(get_dic, salt_df.name.values) dic_res dic_uninformative_arr = np.array(dic_res)[:,0] dic_tsquared_arr = np.array(dic_res)[:,1] dic_alpha_r_plus_colors_arr = np.array(dic_res)[:,2] dic_df = pd.DataFrame(salt_df.name.values, columns=['ztf_name']) dic_df['dic_uninformative'] = dic_uninformative_arr dic_df['dic_delta2'] = dic_tsquared_arr dic_df['dic_alpha_r_plus'] = dic_alpha_r_plus_colors_arr len(np.where(np.exp((dic_tsquared_arr - dic_alpha_r_plus_colors_arr)/2) > 30)[0]) dic_evidence = np.array(['very strong']*len(salt_df)) dic_evidence[np.where((np.exp((dic_tsquared_arr - dic_uninformative_arr)/2) <= 1))] = 'negative' dic_evidence[np.where((np.exp((dic_tsquared_arr - dic_uninformative_arr)/2) > 1) & (np.exp((dic_tsquared_arr - dic_uninformative_arr)/2) <= 3))] = 'weak' dic_evidence[np.where((np.exp((dic_tsquared_arr - dic_uninformative_arr)/2) > 3) & (np.exp((dic_tsquared_arr - dic_uninformative_arr)/2) <= 10))] = 'substantial' dic_evidence[np.where((np.exp((dic_tsquared_arr - dic_uninformative_arr)/2) > 10) & (np.exp((dic_tsquared_arr - dic_uninformative_arr)/2) <= 30))] = 'strong' dic_evidence[np.where((np.exp((dic_tsquared_arr - dic_uninformative_arr)/2) > 30) & (np.exp((dic_tsquared_arr - dic_uninformative_arr)/2) <= 100))] = 'very strong' dic_evidence[np.where((np.exp((dic_tsquared_arr - dic_uninformative_arr)/2) > 100))] = 'decisive' dic_evidence np.unique(dic_evidence, return_counts=True) dic_df['dic_evidence'] = dic_evidence dic_df.to_csv('dic_results.csv', index=False) ``` ## Analyze which SN prefer $t^2$ model ``` dic_df = pd.read_csv('dic_results.csv') dic_df.head() res = pd.read_csv('results_40percent.csv') decisive = np.where(dic_df.dic_evidence == 'decisive') vstrong = np.where(dic_df.dic_evidence == 'very strong') strong = np.where(dic_df.dic_evidence == 'strong') substantial = np.where(dic_df.dic_evidence == 'substantial') weak = np.where(dic_df.dic_evidence == 'weak') res[['ztf_name','final_selection', 't_rise_95', 't_rise_05', 'n_nights_gr_post']].iloc[decisive] res_tsquared = pd.read_csv('results_40_tsquared.csv') colors_sample = np.where( (((dic_df.dic_evidence == 'decisive') | (dic_df.dic_evidence == 'very strong')) & (res.final_selection == 1))) tsquared_sample = np.where( (((dic_df.dic_evidence == 'decisive') | (dic_df.dic_evidence == 'very strong')) & (res.final_selection == 0) & (res_tsquared.final_selection == 1)) | (((dic_df.dic_evidence != 'decisive') & (dic_df.dic_evidence != 'very strong')) & (res_tsquared.final_selection == 1))) ``` The upshot here is that the very best models (i.e. low $z$, high $N_\mathrm{det}$, and low $CR_{90}$) and the very worst, opposite of this, are the ones that show significant evidence for a departure from $\alpha = 2$ according to the DIC. These models, therefore, should not be "lumped in" with a uniform $\alpha = 2$ analysis.
github_jupyter
# Initial data and problem exploration ``` import xarray as xr import pandas as pd import urllib.request import numpy as np from glob import glob import cartopy.crs as ccrs import matplotlib.pyplot as plt import os import cartopy.feature as cfeature states_provinces = cfeature.NaturalEarthFeature( category='cultural', name='admin_1_states_provinces_lines', scale='50m', facecolor='none') ``` # Data preprocessing ## TIGGE ECMWF ### Control run ``` tigge_ctrl = xr.open_mfdataset("/datadrive/tigge/16km/2m_temperature/2019-10.nc") tigge_ctrl tigge_ctrl.lat.min() tigge_2dslice = tigge_ctrl.t2m.isel(lead_time=4, init_time=0) p = tigge_2dslice.plot( subplot_kws=dict(projection=ccrs.Orthographic(-80, 35), facecolor="gray"), transform=ccrs.PlateCarree(),) #p.axes.set_global() p.axes.coastlines() ``` ### TIGGE CTRL precip ``` prec = xr.open_mfdataset("/datadrive/tigge/raw/total_precipitation/*.nc") prec # aggregated precipitation prec.tp.mean('init_time').diff('lead_time').plot(col='lead_time', col_wrap=3) # that takes a while! ``` ### Checking regridding ``` t2m_raw = xr.open_mfdataset("/datadrive/tigge/raw/2m_temperature/2019-10.nc") t2m_32 = xr.open_mfdataset("/datadrive/tigge/32km/2m_temperature/2019-10.nc") t2m_16 = xr.open_mfdataset("/datadrive/tigge/16km/2m_temperature/2019-10.nc") for ds in [t2m_raw, t2m_16, t2m_32]: tigge_2dslice = ds.t2m.isel(lead_time=4, init_time=-10) plt.figure() p = tigge_2dslice.plot(levels=np.arange(270,305), subplot_kws=dict(projection=ccrs.Orthographic(-80, 35), facecolor="gray"), transform=ccrs.PlateCarree(),) p.axes.coastlines() ``` ### Ensemble ``` !ls -lh ../data/tigge/2020-10-23_ens2.grib tigge = xr.open_mfdataset('../data/tigge/2020-10-23_ens2.grib', engine='pynio').isel() tigge = tigge.rename({ 'tp_P11_L1_GGA0_acc': 'tp', 'initial_time0_hours': 'init_time', 'forecast_time0': 'lead_time', 'lat_0': 'latitude', 'lon_0': 'longitude', 'ensemble0' : 'member' }).diff('lead_time').tp tigge = tigge.where(tigge >= 0, 0) # tigge = tigge * 1000 # m to mm tigge.coords['valid_time'] = xr.concat([i + tigge.lead_time for i in tigge.init_time], 'init_time') tigge tigge.to_netcdf('../data/tigge/2020-10-23_ens_preprocessed.nc') ``` ### Deterministic ``` tigge = xr.open_mfdataset('../data/tigge/2020-10-23.grib', engine='pynio') tigge = tigge.rename({ 'tp_P11_L1_GGA0_acc': 'tp', 'initial_time0_hours': 'init_time', 'forecast_time0': 'lead_time', 'lat_0': 'latitude', 'lon_0': 'longitude', }).diff('lead_time').tp tigge = tigge.where(tigge >= 0, 0) tigge.coords['valid_time'] = xr.concat([i + tigge.lead_time for i in tigge.init_time], 'init_time') tigge tigge.to_netcdf('../data/tigge/2020-10-23_preprocessed.nc') ``` ## YOPP ``` yopp = xr.open_dataset('../data/yopp/2020-10-23.grib', engine='pynio').TP_GDS4_SFC yopp2 = xr.open_dataset('../data/yopp/2020-10-23_12.grib', engine='pynio').TP_GDS4_SFC yopp = xr.merge([yopp, yopp2]).rename({ 'TP_GDS4_SFC': 'tp', 'initial_time0_hours': 'init_time', 'forecast_time1': 'lead_time', 'g4_lat_2': 'latitude', 'g4_lon_3': 'longitude' }) yopp = yopp.diff('lead_time').tp yopp = yopp.where(yopp >= 0, 0) yopp = yopp * 1000 # m to mm yopp.coords['valid_time'] = xr.concat([i + yopp.lead_time for i in yopp.init_time], 'init_time') yopp.to_netcdf('../data/yopp/2020-10-23_preprocessed.nc') ``` ## NRMS data ``` def time_from_fn(fn): s = fn.split('/')[-1].split('_')[-1] year = s[:4] month = s[4:6] day = s[6:8] hour = s[9:11] return np.datetime64(f'{year}-{month}-{day}T{hour}') def open_nrms(path): fns = sorted(glob(f'{path}/*')) dss = [xr.open_dataset(fn, engine='pynio') for fn in fns] times = [time_from_fn(fn) for fn in fns] times = xr.DataArray(times, name='time', dims=['time'], coords={'time': times}) ds = xr.concat(dss, times).rename({'lat_0': 'latitude', 'lon_0': 'longitude'}) da = ds[list(ds)[0]].rename('tp') return da def get_mrms_fn(path, source, year, month, day, hour): month, day, hour = [str(x).zfill(2) for x in [month, day, hour]] fn = f'{path}/{source}/MRMS_{source}_00.00_{year}{month}{day}-{hour}0000.grib2' # print(fn) return fn def load_mrms_data(path, start_time, stop_time, accum=3): times = pd.to_datetime(np.arange(start_time, stop_time, np.timedelta64(accum, 'h'), dtype='datetime64[h]')) das = [] for t in times: if os.path.exists(get_mrms_fn(path, f'MultiSensor_QPE_0{accum}H_Pass1', t.year, t.month, t.day, t.hour)): ds = xr.open_dataset(get_mrms_fn(path, f'MultiSensor_QPE_0{accum}H_Pass1', t.year, t.month, t.day, t.hour), engine='pynio') elif os.path.exists(get_mrms_fn(path, f'MultiSensor_QPE_0{accum}H_Pass2', t.year, t.month, t.day, t.hour)): ds = xr.open_dataset(get_mrms_fn(path, f'MultiSensor_QPE_0{accum}H_Pass2', t.year, t.month, t.day, t.hour), engine='pynio') elif os.path.exists(get_mrms_fn(path, f'RadarOnly_QPE_0{accum}H', t.year, t.month, t.day, t.hour)): ds = xr.open_dataset(get_mrms_fn(path, f'RadarOnly_QPE_0{accum}H', t.year, t.month, t.day, t.hour), engine='pynio') else: raise Exception(f'No data found for {t}') ds = ds.rename({'lat_0': 'latitude', 'lon_0': 'longitude'}) da = ds[list(ds)[0]].rename('tp') das.append(da) times = xr.DataArray(times, name='time', dims=['time'], coords={'time': times}) da = xr.concat(das, times) return da mrms = load_mrms_data('../data/', '2020-10-23', '2020-10-25') mrms6h = mrms.rolling(time=2).sum().isel(time=slice(0, None, 2)) mrms.to_netcdf('../data/mrms/mrms_preprocessed.nc') mrms6h.to_netcdf('../data/mrms/mrms6_preprocessed.nc') ``` # Analysis ``` !ls ../data tigge_det = xr.open_dataarray('../data/tigge/2020-10-23_preprocessed.nc').rename({'latitude': 'lat', 'longitude': 'lon'}) tigge_ens = xr.open_dataarray('../data/tigge/2020-10-23_ens_preprocessed.nc').rename({'latitude': 'lat', 'longitude': 'lon'}) yopp = xr.open_dataarray('../data/yopp/2020-10-23_preprocessed.nc').rename({'latitude': 'lat', 'longitude': 'lon'}) mrms = xr.open_dataarray('../data/mrms/mrms_preprocessed.nc').rename({'latitude': 'lat', 'longitude': 'lon'}) mrms6h = xr.open_dataarray('../data/mrms/mrms6_preprocessed.nc').rename({'latitude': 'lat', 'longitude': 'lon'}) ``` ## Regrid ``` import xesmf as xe lons = slice(260, 280) lats = slice(45, 25) def regrid(ds, km, lats, lons): deg = km/100. grid = xr.Dataset( { 'lat': (['lat'], np.arange(lats.start, lats.stop, -deg)), 'lon': (['lon'], np.arange(lons.start, lons.stop, deg)) } ) regridder = xe.Regridder(ds.sel(lat=lats, lon=lons), grid, 'bilinear') return regridder(ds.sel(lat=lats, lon=lons), keep_attrs=True) mrms4km = regrid(mrms, 4, lats, lons) mrms2km = regrid(mrms, 2, lats, lons) mrms4km6h = regrid(mrms6h, 4, lats, lons) mrms2km6h = regrid(mrms6h, 2, lats, lons) mrms4km6h = mrms4km6h.rename('tp') mrms2km6h =mrms2km6h.rename('tp') yopp16km = regrid(yopp, 16, lats, lons) yopp32km = regrid(yopp, 32, lats, lons) tigge_det16km = regrid(tigge_det, 16, lats, lons) tigge_det32km = regrid(tigge_det, 32, lats, lons) tigge_ens16km = regrid(tigge_ens, 16, lats, lons) tigge_ens32km = regrid(tigge_ens, 32, lats, lons) !mkdir ../data/regridded mrms2km.to_netcdf('../data/regridded/mrms2km.nc') mrms4km.to_netcdf('../data/regridded/mrms4km.nc') mrms2km6h.to_netcdf('../data/regridded/mrms2km6h.nc') mrms4km6h.to_netcdf('../data/regridded/mrms4km6h.nc') yopp16km.to_netcdf('../data/regridded/yopp16km.nc') yopp32km.to_netcdf('../data/regridded/yopp32km.nc') tigge_det16km.to_netcdf('../data/regridded/tigge_det16km.nc') tigge_det32km.to_netcdf('../data/regridded/tigge_det32km.nc') tigge_ens16km.to_netcdf('../data/regridded/tigge_ens16km.nc') tigge_ens32km.to_netcdf('../data/regridded/tigge_ens32km.nc') mrms2km = xr.open_dataarray('../data/regridded/mrms2km.nc') mrms4km = xr.open_dataarray('../data/regridded/mrms4km.nc') mrms2km6h = xr.open_dataarray('../data/regridded/mrms2km6h.nc') mrms4km6h = xr.open_dataarray('../data/regridded/mrms4km6h.nc') yopp16km = xr.open_dataarray('../data/regridded/yopp16km.nc') yopp32km = xr.open_dataarray('../data/regridded/yopp32km.nc') tigge_det16km = xr.open_dataarray('../data/regridded/tigge_det16km.nc') tigge_det32km = xr.open_dataarray('../data/regridded/tigge_det32km.nc') tigge_ens16km = xr.open_dataarray('../data/regridded/tigge_ens16km.nc') tigge_ens32km = xr.open_dataarray('../data/regridded/tigge_ens32km.nc') ``` ### Matplotlib #### Compare different resolutions ``` mrms4km np.arange(lons.start, lons.stop, 512/100) def add_grid(axs): for ax in axs: ax.set_xticks(np.arange(lons.start, lons.stop, 512/100)) ax.set_yticks(np.arange(lats.start, lats.stop, -512/100)) ax.grid(True) ax.set_aspect('equal') yopp16km yopp16km.isel(init_time=i, lead_time=slice(0, 3)).valid_time i = 3 valid_time = yopp16km.isel(init_time=i, lead_time=slice(0, 3)).valid_time figsize = (16, 5) axs = mrms4km.sel(time=valid_time.values).plot(vmin=0, vmax=50, col='time', cmap='gist_ncar_r', figsize=figsize).axes[0] add_grid(axs) axs = yopp16km.isel(init_time=i, lead_time=slice(0, 3)).plot(vmin=0, vmax=50, col='lead_time', cmap='gist_ncar_r', figsize=figsize).axes[0] add_grid(axs) axs = yopp32km.isel(init_time=i, lead_time=slice(0, 3)).plot(vmin=0, vmax=50, col='lead_time', cmap='gist_ncar_r', figsize=figsize).axes[0] add_grid(axs) i = 2 valid_time = tigge_det16km.isel(init_time=i, lead_time=slice(0, 3)).valid_time figsize = (16, 5) axs = mrms4km6h.sel(time=valid_time.values, method='nearest').assign_coords({'time': valid_time.values}).plot(vmin=0, vmax=50, col='time', cmap='gist_ncar_r', figsize=figsize).axes[0] add_grid(axs) axs = tigge_det16km.isel(init_time=i, lead_time=slice(0, 3)).plot(vmin=0, vmax=50, col='lead_time', cmap='gist_ncar_r', figsize=figsize).axes[0] add_grid(axs) axs = tigge_det32km.isel(init_time=i, lead_time=slice(0, 3)).plot(vmin=0, vmax=50, col='lead_time', cmap='gist_ncar_r', figsize=figsize).axes[0] add_grid(axs) tigge_ens16km.isel(init_time=i, lead_time=l) i = 3 l = 0 t = tigge_ens16km.isel(init_time=i, lead_time=slice(l, l+2)).valid_time.values axs = mrms4km6h.sel(time=t, method='nearest').assign_coords({'time': t}).plot(vmin=0, vmax=50, cmap='gist_ncar_r', figsize=(10, 4), col='time').axes[0] add_grid(axs) axs = tigge_ens16km.isel(init_time=i, lead_time=l, member=slice(0, 6)).plot(vmin=0, vmax=50, cmap='gist_ncar_r', figsize=(24, 4), col='member').axes[0] add_grid(axs) axs = tigge_ens16km.isel(init_time=i, lead_time=l+1, member=slice(0, 6)).plot(vmin=0, vmax=50, cmap='gist_ncar_r', figsize=(24, 4), col='member').axes[0] add_grid(axs) ``` ### Holoviews ``` import holoviews as hv hv.extension('bokeh') hv.config.image_rtol = 1 # from holoviews import opts # opts.defaults(opts.Scatter3D(color='Value', cmap='viridis', edgecolor='black', s=50)) lons2 = slice(268, 273) lats2 = slice(40, 35) lons2 = lons lats2 = lats def to_hv(da, dynamic=False, opts={'clim': (1, 50)}): hv_ds = hv.Dataset(da) img = hv_ds.to(hv.Image, kdims=["lon", "lat"], dynamic=dynamic) return img.opts(**opts) valid_time = yopp16km.isel(lead_time=slice(0, 4), init_time=slice(0, 3)).valid_time valid_time2 = tigge_det16km.isel(lead_time=slice(0, 3), init_time=slice(0, 3)).valid_time mrms2km_hv = to_hv(mrms2km.sel(time=valid_time, method='nearest').sel(lat=lats2, lon=lons2)) mrms4km_hv = to_hv(mrms4km.sel(time=valid_time, method='nearest').sel(lat=lats2, lon=lons2)) mrms2km6h_hv = to_hv(mrms2km6h.sel(time=valid_time2, method='nearest').sel(lat=lats2, lon=lons2)) mrms4km6h_hv = to_hv(mrms4km6h.sel(time=valid_time2, method='nearest').sel(lat=lats2, lon=lons2)) yopp16km_hv = to_hv(yopp16km.isel(lead_time=slice(0, 4), init_time=slice(0, 3)).sel(lat=lats2, lon=lons2)) yopp32km_hv = to_hv(yopp32km.isel(lead_time=slice(0, 4), init_time=slice(0, 3)).sel(lat=lats2, lon=lons2)) tigge_det16km_hv = to_hv(tigge_det16km.isel(lead_time=slice(0, 3), init_time=slice(0, 3)).sel(lat=lats2, lon=lons2)) tigge_det32km_hv = to_hv(tigge_det32km.isel(lead_time=slice(0, 3), init_time=slice(0, 3)).sel(lat=lats2, lon=lons2)) ``` ### Which resolution for MRMS? ``` %%output holomap='widgets' %%opts Image style(cmap='gist_ncar_r') [width=600, height=600] # mrms4km6h_hv + tigge_det16km_hv + tigge_det32km_hv # mrms4km_hv + yopp16km_hv + yopp32km_hv %%output holomap='widgets' %%opts Image style(cmap='gist_ncar_r') [width=600, height=600] mrms4km_hv + mrms4km6h_hv hv_yopp = yopp.isel(init_time=0).sel(latitude=lats, longitude=lons) hv_yopp.coords['time'] = hv_yopp.init_time + hv_yopp.lead_time hv_yopp = hv_yopp.swap_dims({'lead_time': 'time'}) # hv_yopp hv_mrms = hv.Dataset(mrms.sel(latitude=lats, longitude=lons)[1:]) hv_yopp = hv.Dataset(hv_yopp.sel(time=mrms.time[1:])) img1 = hv_mrms.to(hv.Image, kdims=["longitude", "latitude"], dynamic=False) img2 = hv_yopp.to(hv.Image, kdims=["longitude", "latitude"], dynamic=False) %%output holomap='widgets' %%opts Image style(cmap='gist_ncar_r') plot[colorbar=True] %%opts Image [width=500, height=400] img1 + img2 hv_yopp = yopp.sel(latitude=lats, longitude=lons) hv_yopp = hv.Dataset(hv_yopp) img1 = hv_yopp.to(hv.Image, kdims=["longitude", "latitude"], dynamic=False) %%output holomap='widgets' %%opts Image style(cmap='gist_ncar_r') plot[colorbar=True] %%opts Image [width=500, height=400] img1 hv_ds = hv.Dataset(da.sel(latitude=lats, longitude=lons)) hv_ds a = hv_ds.to(hv.Image, kdims=["longitude", "latitude"], dynamic=False) a.opts(colorbar=True, fig_size=200, cmap='viridis') ``` # Old ``` path = '../data/MultiSensor_QPE_01H_Pass1/' da1 = open_nrms('../data/MultiSensor_QPE_01H_Pass1/') da3 = open_nrms('../data/MultiSensor_QPE_03H_Pass1/') dar = open_nrms('../data/RadarOnly_QPE_03H/') da3p = open_nrms('../data/MultiSensor_QPE_03H_Pass2/') da1 da3 da13 = da1.rolling(time=3).sum() (da13 - da3).isel(time=3).sel(latitude=lats, longitude=lons).plot() da13.isel(time=slice(0, 7)).sel(latitude=slice(44, 40), longitude=slice(268, 272)).plot(col='time', vmin=0, vmax=50) plt.suptitle('1h accumulation with rolling(time=3).sum()', y=1.05) da3.isel(time=slice(0, 7)).sel(latitude=slice(44, 40), longitude=slice(268, 272)).plot(col='time', vmin=0, vmax=50) plt.suptitle('3h accumulation', y=1.05) dar.isel(time=slice(0, 7)).sel(latitude=slice(44, 40), longitude=slice(268, 272)).plot(col='time', vmin=0, vmax=50) plt.suptitle('3h accumulation radar', y=1.05) da3.isel(time=slice(0, 7)).sel(latitude=slice(44, 43), longitude=slice(269, 270)).plot(col='time', vmin=0, vmax=50) plt.suptitle('3h accumulation', y=1.05) dar.isel(time=slice(0, 7)).sel(latitude=slice(44, 43), longitude=slice(269, 270)).plot(col='time', vmin=0, vmax=50) plt.suptitle('3h accumulation radar', y=1.05) for t in np.arange('2020-10-23', '2020-10-25', np.timedelta64(3, 'h'), dtype='datetime64[h]'): print(t) print('Radar', (dar.time.values == t).sum() > 0) print('Pass1', (da3.time.values == t).sum() > 0) print('Pass2', (da3p.time.values == t).sum() > 0) t (dar.time.values == t).sum() > 0 da3.time.values def plot_facet(da, title='', **kwargs): p = da.plot( col='time', col_wrap=3, subplot_kws={'projection': ccrs.PlateCarree()}, transform=ccrs.PlateCarree(), figsize=(15, 15), **kwargs ) for ax in p.axes.flat: ax.coastlines() ax.add_feature(states_provinces, edgecolor='gray') # ax.set_extent([113, 154, -11, -44], crs=ccrs.PlateCarree()) plt.suptitle(title); plot_facet(da.isel(time=slice(0, 9)).sel(latitude=lats, longitude=lons), vmin=0, vmax=10, add_colorbar=False) import holoviews as hv hv.extension('matplotlib') from holoviews import opts opts.defaults(opts.Scatter3D(color='Value', cmap='fire', edgecolor='black', s=50)) hv_ds = hv.Dataset(da.sel(latitude=lats, longitude=lons)) hv_ds a = hv_ds.to(hv.Image, kdims=["longitude", "latitude"], dynamic=False) a.opts(colorbar=True, fig_size=200, cmap='viridis') da.longitude.diff('longitude').min() !cp ../data/yopp/2020-10-23.nc ../data/yopp/2020-10-23.grib a = xr.open_dataset('../data/yopp/2020-10-23.grib', engine='pynio') a a.g4_lat_2.diff('g4_lat_2') a.g4_lon_3.diff('g4_lon_3') !cp ../data/tigge/2020-10-23.nc ../data/tigge/2020-10-23.grib b = xr.open_dataset('../data/tigge/2020-10-23.grib', engine='pynio') b ```
github_jupyter
# Descriptive analysis for the manuscript Summarize geotagged tweets of the multiple regions used for the experiment and the application. ``` %load_ext autoreload %autoreload 2 import os import numpy as np import pandas as pd import yaml import scipy.stats as stats from tqdm import tqdm def load_region_tweets(region=None): df = pd.read_csv(f'../../dbs/{region}/geotweets.csv') df['day'] = df['createdat'].apply(lambda x: x.split(' ')[0]) df['createdat'] = pd.to_datetime(df['createdat'], infer_datetime_format=True) t_max, t_min = df.createdat.max(), df.createdat.min() time_span = f'{t_min} - {t_max}' num_users = len(df.userid.unique()) num_geo = len(df) num_days = np.median(df.groupby(['userid'])['day'].nunique()) num_geo_freq = np.median(df.groupby(['userid']).size() / df.groupby(['userid'])['day'].nunique()) return region, time_span, num_users, num_geo, num_days, num_geo_freq def user_stats_cal(data): time_span = data.createdat.max() - data.createdat.min() time_span = time_span.days if time_span == 0: time_span += 1 num_days = data['day'].nunique() num_geo = len(data) geo_freq = num_geo / num_days share_active = num_days / time_span return pd.DataFrame.from_dict({'time_span': [time_span], 'num_days': [num_days], 'num_geo': [num_geo], 'geo_freq': [geo_freq], 'share_active': [share_active] }) def region_tweets_stats_per_user(region=None): df = pd.read_csv(f'../../dbs/{region}/geotweets.csv') df['day'] = df['createdat'].apply(lambda x: x.split(' ')[0]) df['createdat'] = pd.to_datetime(df['createdat'], infer_datetime_format=True) tqdm.pandas(desc=region) df_users = df.groupby('userid').progress_apply(user_stats_cal).reset_index() df_users.loc[:, 'region'] = region df_users.drop(columns=['level_1'], inplace=True) return df_users region_list = ['sweden', 'netherlands', 'saopaulo', 'australia', 'austria', 'barcelona', 'capetown', 'cebu', 'egypt', 'guadalajara', 'jakarta', 'johannesburg', 'kualalumpur', 'lagos', 'madrid', 'manila', 'mexicocity', 'moscow', 'nairobi', 'rio', 'saudiarabia', 'stpertersburg', 'surabaya'] with open('../../lib/regions.yaml', encoding='utf8') as f: region_manager = yaml.load(f, Loader=yaml.FullLoader) ``` ## 1 Summarize the geotagged tweets used as input to the model Geotagged tweets: Time span, No. of Twitter users, No. of geotagged tweets, Days covered/user, No. of geotagged tweets/day/user ``` df = pd.DataFrame([load_region_tweets(region=x) for x in region_list], columns=('region', 'time_span', 'num_users', 'num_geo', 'num_days', 'num_geo_freq')) df.loc[:, 'gdp_capita'] = df.loc[:, 'region'].apply(lambda x: region_manager[x]['gdp_capita']) df.loc[:, 'country'] = df.loc[:, 'region'].apply(lambda x: region_manager[x]['country']) df.loc[:, 'pop'] = df.loc[:, 'region'].apply(lambda x: region_manager[x]['pop']) df.loc[:, 'time_span'] = df.loc[:, 'time_span'].apply(lambda x: ' - '.join([x_t.split(' ')[0] for x_t in x.split(' - ')])) df.loc[:, 'region'] = df.loc[:, 'region'].apply(lambda x: region_manager[x]['name']) df df.to_clipboard(index=False) ``` ## 1-extra Summarize the geotagged tweets used as input to the model - by user This is for dissertation presentation - sparsity issue. Geotagged tweets: Time span, No. of Twitter users, No. of geotagged tweets, Days covered/user, No. of geotagged tweets/day/user ``` df = pd.concat([region_tweets_stats_per_user(region=x) for x in region_list]) df.loc[:, 'gdp_capita'] = df.loc[:, 'region'].apply(lambda x: region_manager[x]['gdp_capita']) df.loc[:, 'country'] = df.loc[:, 'region'].apply(lambda x: region_manager[x]['country']) df.loc[:, 'pop'] = df.loc[:, 'region'].apply(lambda x: region_manager[x]['pop']) df.loc[:, 'region'] = df.loc[:, 'region'].apply(lambda x: region_manager[x]['name']) df.to_csv(f'../../dbs/regional_stats.csv', index=False) ``` ## 2 Merge ODMs for visualisation This part applies to Sweden, The Netherlands, and Sao Paulo, Brazil. Separate files will be deleted. ``` for region in ['sweden', 'netherlands', 'saopaulo']: df = pd.read_csv(f'../../dbs/{region}/odm_gt.csv') df_c = pd.read_csv(f'../../dbs/{region}/odm_calibration.csv') df_v = pd.read_csv(f'../../dbs/{region}/odm_validation.csv') df_cb = pd.read_csv(f'../../dbs/{region}/odm_benchmark_c.csv') df_vb = pd.read_csv(f'../../dbs/{region}/odm_benchmark_v.csv') df = pd.merge(df, df_c, on=['ozone', 'dzone']) df = df.rename(columns={'model': 'model_c'}) df = pd.merge(df, df_v, on=['ozone', 'dzone']) df = df.rename(columns={'model': 'model_v'}) df = pd.merge(df, df_cb, on=['ozone', 'dzone']) df = df.rename(columns={'benchmark': 'benchmark_c'}) df = pd.merge(df, df_vb, on=['ozone', 'dzone']) df = df.rename(columns={'benchmark': 'benchmark_v'}) df.loc[:, ['ozone', 'dzone', 'gt', 'model_c', 'model_v', 'benchmark_c', 'benchmark_v']].to_csv(f'../../dbs/{region}/odms.csv', index=False) os.remove(f'../../dbs/{region}/odm_gt.csv') os.remove(f'../../dbs/{region}/odm_calibration.csv') os.remove(f'../../dbs/{region}/odm_validation.csv') os.remove(f'../../dbs/{region}/odm_benchmark_c.csv') os.remove(f'../../dbs/{region}/odm_benchmark_v.csv') ``` ## 3 Quantify the od-pair similarity This part applies to Sweden, The Netherlands, and Sao Paulo, Brazil. The overall similarity. ``` quant_list = [] for region in ['sweden', 'netherlands', 'saopaulo']: df = pd.read_csv(f'../../dbs/{region}/odms.csv') df_c = df.loc[(df.gt != 0) & (df.model_c != 0) & (df.benchmark_c != 0), :] mc = stats.kendalltau(df_c.loc[:, 'gt'], df_c.loc[:, 'model_c']) quant_list.append((region, 'model', 'c', mc.correlation, mc.pvalue)) bc = stats.kendalltau(df_c.loc[:, 'gt'], df_c.loc[:, 'benchmark_c']) quant_list.append((region, 'benchmark', 'c', bc.correlation, bc.pvalue)) df_v = df.loc[(df.gt != 0) & (df.model_v != 0) & (df.benchmark_v != 0), :] mv = stats.kendalltau(df_v.loc[:, 'gt'], df_v.loc[:, 'model_v']) quant_list.append((region, 'model', 'v', mv.correlation, mv.pvalue)) bv = stats.kendalltau(df_v.loc[:, 'gt'], df_v.loc[:, 'benchmark_v']) quant_list.append((region, 'benchmark', 'v', bv.correlation, bv.pvalue)) df_stats = pd.DataFrame(quant_list, columns=['region', 'type', 'data', 'cor', 'p']) df_stats df_stats.groupby(['region', 'type'])['cor'].mean() ```
github_jupyter