text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
``` import pandas as pd import numpy as np from scipy.stats import ks_2samp, chi2 import scipy from astropy.table import Table import astropy import matplotlib.pyplot as plt from matplotlib.ticker import MultipleLocator from matplotlib.colors import colorConverter import matplotlib %matplotlib notebook print('numpy version: {}'.format(np.__version__)) print('pandas version: {}'.format(pd.__version__)) print('matplotlib version: {}'.format(matplotlib.__version__)) print('scipy version: {}'.format(scipy.__version__)) ``` # Figure 7 Create Figure 7 (the host-galaxy offset of ASAS-SN SNe relative to SNe in the ZTF BTS) in [Fremling et al. 2020](https://ui.adsabs.harvard.edu/abs/2019arXiv191012973F/abstract). Data for ASAS-SN are from [Holoien et al. 2019](https://ui.adsabs.harvard.edu/abs/2019MNRAS.484.1899H/abstract). ``` # BTS data bts_df = pd.read_hdf('../data/final_rcf_table.h5') z_sn = bts_df.z_sn.values z_host = bts_df.z_host.values norm_Ia = np.where( ( (bts_df.sn_type == 'Ia-norm') | (bts_df.sn_type == 'Ia') | (bts_df.sn_type == 'Ia-91bg') | (bts_df.sn_type == 'Ia-91T') | (bts_df.sn_type == 'Ia-99aa') | (bts_df.sn_type == 'ia') | (bts_df.sn_type == 'Ia-norm*') | (bts_df.sn_type == 'Ia-91T*') | (bts_df.sn_type == 'Ia-91T**') | (bts_df.sn_type == 'SN Ia') ) ) norm_cc = np.where( (bts_df.sn_type == 'IIb') | (bts_df.sn_type == 'Ib') | (bts_df.sn_type == 'IIP') | (bts_df.sn_type == 'Ib/c') | (bts_df.sn_type == 'Ic-norm') | (bts_df.sn_type == 'IIn') | (bts_df.sn_type == 'IIL') | (bts_df.sn_type == 'Ic-broad') | (bts_df.sn_type == 'II') | (bts_df.sn_type == 'II-pec') | (bts_df.sn_type == 'Ib-pec') | (bts_df.sn_type == 'Ic') | (bts_df.sn_type == 'Ic-BL') | (bts_df.sn_type == 'IIP*') | (bts_df.sn_type == 'II*') | (bts_df.sn_type == 'Ibn') | (bts_df.sn_type == 'II**') | (bts_df.sn_type == 'Ib-norm') | (bts_df.sn_type == 'IIn*') ) has_host_z = np.where((z_host > 0) & np.isfinite(z_host)) no_host = np.where((z_host < 0) | np.isnan(z_host)) has_host_cc = np.intersect1d(has_host_z, norm_cc) has_host_ia = np.intersect1d(has_host_z, norm_Ia) no_host_cc = np.intersect1d(no_host, norm_cc) no_host_ia = np.intersect1d(no_host, norm_Ia) z_mix = z_sn.copy() z_mix[has_host_z] = z_host[has_host_z] ``` #### Read in SN data from ASAS-SN ``` n_asas_ia = 0 n_asas_91T = 0 n_asas_91bg = 0 n_asas_ii = 0 n_asas_ibc = 0 n_asas_slsn = 0 asas_offset = np.array([]) for release in ['1','2','3','4']: tab1 = '../data/ASAS_SN/bright_sn_catalog_{}/table1.txt'.format(release) tab2 = '../data/ASAS_SN/bright_sn_catalog_{}/table2.txt'.format(release) asassn_tab1 = Table.read(tab1, format='cds') asassn_tab2 = Table.read(tab2, format='cds') n_asas_ia += len(np.where( (asassn_tab1['Type'] == 'Ia') | (asassn_tab1['Type'] == 'Ia-91T') | (asassn_tab1['Type'] == 'Ia-91bg') | (asassn_tab1['Type'] == 'Ia+CSM') | (asassn_tab1['Type'] == 'Ia-pec') | (asassn_tab1['Type'] == 'Ia-00cx') | (asassn_tab1['Type'] == 'Ia-06bt') | (asassn_tab1['Type'] == 'Ia-07if') | (asassn_tab1['Type'] == 'Ia-09dc') | (asassn_tab1['Type'] == 'Ia-02cx') )[0]) n_asas_91T += len(np.where( (asassn_tab1['Type'] == 'Ia-91T') )[0]) n_asas_91bg += len(np.where( (asassn_tab1['Type'] == 'Ia-91bg') )[0]) n_asas_ii += len(np.where( (asassn_tab1['Type'] == 'II') | (asassn_tab1['Type'] == 'IIP') | (asassn_tab1['Type'] == 'IIb') | (asassn_tab1['Type'] == 'II-pec') | (asassn_tab1['Type'] == 'IIn') | (asassn_tab1['Type'] == 'IIn-pec') | (asassn_tab1['Type'] == 'IIn/LBV') | (asassn_tab1['Type'] == 'IIn-09ip') )[0]) n_asas_ibc += len(np.where( (asassn_tab1['Type'] == 'Ib') | (asassn_tab1['Type'] == 'Ib/c') | (asassn_tab1['Type'] == 'Ibn') | (asassn_tab1['Type'] == 'Ic') | (asassn_tab1['Type'] == 'Ic-pec') | (asassn_tab1['Type'] == 'Ib/c-BL') | (asassn_tab1['Type'] == 'Ic-BL') )[0]) n_asas_slsn += len(np.where( (asassn_tab1['Type'] == 'SLSN-II') | (asassn_tab1['Type'] == 'SLSN-I') )[0]) n_asas_ia += len(np.where( ( (asassn_tab2['Type'] == 'Ia') | (asassn_tab2['Type'] == 'Ia-91T') | (asassn_tab2['Type'] == 'Ia-91bg') | (asassn_tab2['Type'] == 'Ia+CSM') | (asassn_tab2['Type'] == 'Ia-pec') | (asassn_tab2['Type'] == 'Ia-00cx') | (asassn_tab2['Type'] == 'Ia-06bt') | (asassn_tab2['Type'] == 'Ia-07if') | (asassn_tab2['Type'] == 'Ia-09dc') | (asassn_tab2['Type'] == 'Ia-02cx') ) & (asassn_tab2['Recovered'] == 'Yes') )[0]) n_asas_91T += len(np.where( (asassn_tab2['Type'] == 'Ia-91T') & (asassn_tab2['Recovered'] == 'Yes') )[0]) n_asas_91bg += len(np.where( (asassn_tab2['Type'] == 'Ia-91bg') & (asassn_tab2['Recovered'] == 'Yes') )[0]) n_asas_ii += len(np.where( ( (asassn_tab2['Type'] == 'II') | (asassn_tab2['Type'] == 'IIP') | (asassn_tab2['Type'] == 'IIb') | (asassn_tab2['Type'] == 'II-pec') | (asassn_tab2['Type'] == 'IIn') | (asassn_tab2['Type'] == 'IIn-pec') | (asassn_tab2['Type'] == 'IIn/LBV') | (asassn_tab2['Type'] == 'IIn-09ip') ) & (asassn_tab2['Recovered'] == 'Yes') )[0]) n_asas_ibc += len(np.where( ( (asassn_tab2['Type'] == 'Ib') | (asassn_tab2['Type'] == 'Ib/c') | (asassn_tab2['Type'] == 'Ibn') | (asassn_tab2['Type'] == 'Ic') | (asassn_tab2['Type'] == 'Ic-pec') | (asassn_tab2['Type'] == 'Ib/c-BL') | (asassn_tab2['Type'] == 'Ic-BL') ) & (asassn_tab2['Recovered'] == 'Yes') )[0]) n_asas_slsn += len(np.where( ( (asassn_tab2['Type'] == 'SLSN-II') | (asassn_tab2['Type'] == 'SLSN-I') ) & (asassn_tab2['Recovered'] == 'Yes') )[0]) asas_offset = np.append(asas_offset, np.array(asassn_tab1['Offset'][asassn_tab1['HostName'] != 'None'], dtype=float)) asas_offset = np.append(asas_offset, np.array(asassn_tab2['Offset'][np.where((asassn_tab2['Recovered'] == 'Yes') & (asassn_tab2['SNName'] != 'PS16dtm'))], dtype=float)) tot_asas = n_asas_ia + n_asas_ii + n_asas_ibc + n_asas_slsn bts_df.columns not_ambiguous = np.where(np.isfinite(bts_df.sep)) brighter_than_17 = np.where((bts_df.g_max < 17) | (bts_df.r_max < 17)) bright_bts = np.intersect1d(not_ambiguous, brighter_than_17) print(len(bright_bts)) color_dict = {'blue': '#2C5361', 'orange': '#DB6515', 'yellow': '#CA974C', 'maroon': '#3B2525', 'purple': '#A588AC', 'beige': '#D2A176'} fig, ax1 = plt.subplots(1, 1, figsize=(6,8/3)) ax1.plot(np.sort(bts_df.sep.iloc[bright_bts]), np.arange(len(bts_df.sep.iloc[bright_bts]))/float(len(bts_df.sep.iloc[bright_bts])), label = 'ZTF BTS', lw=3, color=color_dict['orange']) ax1.plot(np.sort(asas_offset), np.arange(len(asas_offset))/float(len(asas_offset)), label = 'ASAS-SN', lw=2, dashes=[6, 1], color=color_dict['blue']) ax1.set_xlabel('SN offset (arcsec)',fontsize=14) ax1.legend(loc=4, fontsize=13) ax1.set_xlim(-1, 24) ax1.set_ylim(0,1) ax1.xaxis.set_minor_locator(MultipleLocator(1)) ax1.yaxis.set_minor_locator(MultipleLocator(.1)) ax1.set_ylabel('cumulative $f_\mathrm{SN}$',fontsize=14) ax1.tick_params(top=True,right=True,labelsize=11,which='both') fig.subplots_adjust(left=0.105,bottom=0.2,top=0.97,right=0.98, hspace=0.3) fig.savefig('ZTF_ASASSN_offset.pdf') ``` #### KS test ``` ks_2samp(bts_df.sep.iloc[bright_bts], asas_offset) ``` #### $\chi^2$ test ``` logbins = np.logspace(-2,1.57,11) ztf_cnts, _ = np.histogram(bts_df.sep.iloc[bright_bts], range=(0,25), bins=50) # bins=logbins) asas_cnts, _ = np.histogram(asas_offset, range=(0,25), bins=50) # bins=logbins) not_empty = np.where((ztf_cnts > 0) & (asas_cnts > 0)) k1 = np.sqrt(np.sum(asas_cnts[not_empty])/np.sum(ztf_cnts[not_empty])) k2 = np.sqrt(np.sum(ztf_cnts[not_empty])/np.sum(asas_cnts[not_empty])) chisq_test = np.sum((k1*ztf_cnts[not_empty] - k2*asas_cnts[not_empty])**2 / (ztf_cnts[not_empty] + asas_cnts[not_empty])) dof = len(not_empty[0]) chisq = scipy.stats.chi2(dof) print(chisq_test, dof, chisq.sf(chisq_test)) ```
github_jupyter
<center> <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%203/images/IDSNlogo.png" width="300" alt="cognitiveclass.ai logo" /> </center> # K-Nearest Neighbors Estimated time needed: **25** minutes ## Objectives After completing this lab you will be able to: - Use K Nearest neighbors to classify data In this Lab you will load a customer dataset, fit the data, and use K-Nearest Neighbors to predict a data point. But what is **K-Nearest Neighbors**? **K-Nearest Neighbors** is an algorithm for supervised learning. Where the data is 'trained' with data points corresponding to their classification. Once a point is to be predicted, it takes into account the 'K' nearest points to it to determine it's classification. ### Here's an visualization of the K-Nearest Neighbors algorithm. <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%203/images/KNN_Diagram.png"> In this case, we have data points of Class A and B. We want to predict what the star (test data point) is. If we consider a k value of 3 (3 nearest data points) we will obtain a prediction of Class B. Yet if we consider a k value of 6, we will obtain a prediction of Class A. In this sense, it is important to consider the value of k. But hopefully from this diagram, you should get a sense of what the K-Nearest Neighbors algorithm is. It considers the 'K' Nearest Neighbors (points) when it predicts the classification of the test point. <h1>Table of contents</h1> <div class="alert alert-block alert-info" style="margin-top: 20px"> <ol> <li><a href="#about_dataset">About the dataset</a></li> <li><a href="#visualization_analysis">Data Visualization and Analysis</a></li> <li><a href="#classification">Classification</a></li> </ol> </div> <br> <hr> ``` !pip install scikit-learn==0.23.1 ``` Lets load required libraries ``` import numpy as np import matplotlib.pyplot as plt import pandas as pd import numpy as np from sklearn import preprocessing %matplotlib inline ``` <div id="about_dataset"> <h2>About the dataset</h2> </div> Imagine a telecommunications provider has segmented its customer base by service usage patterns, categorizing the customers into four groups. If demographic data can be used to predict group membership, the company can customize offers for individual prospective customers. It is a classification problem. That is, given the dataset, with predefined labels, we need to build a model to be used to predict class of a new or unknown case. The example focuses on using demographic data, such as region, age, and marital, to predict usage patterns. The target field, called **custcat**, has four possible values that correspond to the four customer groups, as follows: 1- Basic Service 2- E-Service 3- Plus Service 4- Total Service Our objective is to build a classifier, to predict the class of unknown cases. We will use a specific type of classification called K nearest neighbour. Lets download the dataset. To download the data, we will use !wget to download it from IBM Object Storage. ``` !wget -O teleCust1000t.csv https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%203/data/teleCust1000t.csv ``` **Did you know?** When it comes to Machine Learning, you will likely be working with large datasets. As a business, where can you host your data? IBM is offering a unique opportunity for businesses, with 10 Tb of IBM Cloud Object Storage: [Sign up now for free](http://cocl.us/ML0101EN-IBM-Offer-CC) ### Load Data From CSV File ``` df = pd.read_csv('teleCust1000t.csv') df.head() ``` <div id="visualization_analysis"> <h2>Data Visualization and Analysis</h2> </div> #### Let’s see how many of each class is in our data set ``` df['custcat'].value_counts() ``` #### 281 Plus Service, 266 Basic-service, 236 Total Service, and 217 E-Service customers You can easily explore your data using visualization techniques: ``` df.hist(column='income', bins=50) ``` ### Feature set Lets define feature sets, X: ``` df.columns ``` To use scikit-learn library, we have to convert the Pandas data frame to a Numpy array: ``` X = df[['region', 'tenure','age', 'marital', 'address', 'income', 'ed', 'employ','retire', 'gender', 'reside']] .values #.astype(float) X[0:5] ``` What are our labels? ``` y = df['custcat'].values y[0:5] ``` ## Normalize Data Data Standardization give data zero mean and unit variance, it is good practice, especially for algorithms such as KNN which is based on distance of cases: ``` X = preprocessing.StandardScaler().fit(X).transform(X.astype(float)) X[0:5] ``` ### Train Test Split Out of Sample Accuracy is the percentage of correct predictions that the model makes on data that that the model has NOT been trained on. Doing a train and test on the same dataset will most likely have low out-of-sample accuracy, due to the likelihood of being over-fit. It is important that our models have a high, out-of-sample accuracy, because the purpose of any model, of course, is to make correct predictions on unknown data. So how can we improve out-of-sample accuracy? One way is to use an evaluation approach called Train/Test Split. Train/Test Split involves splitting the dataset into training and testing sets respectively, which are mutually exclusive. After which, you train with the training set and test with the testing set. This will provide a more accurate evaluation on out-of-sample accuracy because the testing dataset is not part of the dataset that have been used to train the data. It is more realistic for real world problems. ``` from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=4) print ('Train set:', X_train.shape, y_train.shape) print ('Test set:', X_test.shape, y_test.shape) ``` <div id="classification"> <h2>Classification</h2> </div> <h3>K nearest neighbor (KNN)</h3> #### Import library Classifier implementing the k-nearest neighbors vote. ``` from sklearn.neighbors import KNeighborsClassifier ``` ### Training Lets start the algorithm with k=4 for now: ``` k = 4 #Train Model and Predict neigh = KNeighborsClassifier(n_neighbors = k).fit(X_train,y_train) neigh ``` ### Predicting we can use the model to predict the test set: ``` yhat = neigh.predict(X_test) yhat[0:5] ``` ### Accuracy evaluation In multilabel classification, **accuracy classification score** is a function that computes subset accuracy. This function is equal to the jaccard_score function. Essentially, it calculates how closely the actual labels and predicted labels are matched in the test set. ``` from sklearn import metrics print("Train set Accuracy: ", metrics.accuracy_score(y_train, neigh.predict(X_train))) print("Test set Accuracy: ", metrics.accuracy_score(y_test, yhat)) ``` ## Practice Can you build the model again, but this time with k=6? ``` # write your code here ``` <details><summary>Click here for the solution</summary> ```python k = 6 neigh6 = KNeighborsClassifier(n_neighbors = k).fit(X_train,y_train) yhat6 = neigh6.predict(X_test) print("Train set Accuracy: ", metrics.accuracy_score(y_train, neigh6.predict(X_train))) print("Test set Accuracy: ", metrics.accuracy_score(y_test, yhat6)) ``` </details> #### What about other K? K in KNN, is the number of nearest neighbors to examine. It is supposed to be specified by the User. So, how can we choose right value for K? The general solution is to reserve a part of your data for testing the accuracy of the model. Then chose k =1, use the training part for modeling, and calculate the accuracy of prediction using all samples in your test set. Repeat this process, increasing the k, and see which k is the best for your model. We can calculate the accuracy of KNN for different Ks. ``` Ks = 10 mean_acc = np.zeros((Ks-1)) std_acc = np.zeros((Ks-1)) for n in range(1,Ks): #Train Model and Predict neigh = KNeighborsClassifier(n_neighbors = n).fit(X_train,y_train) yhat=neigh.predict(X_test) mean_acc[n-1] = metrics.accuracy_score(y_test, yhat) std_acc[n-1]=np.std(yhat==y_test)/np.sqrt(yhat.shape[0]) mean_acc ``` #### Plot model accuracy for Different number of Neighbors ``` plt.plot(range(1,Ks),mean_acc,'g') plt.fill_between(range(1,Ks),mean_acc - 1 * std_acc,mean_acc + 1 * std_acc, alpha=0.10) plt.fill_between(range(1,Ks),mean_acc - 3 * std_acc,mean_acc + 3 * std_acc, alpha=0.10,color="green") plt.legend(('Accuracy ', '+/- 1xstd','+/- 3xstd')) plt.ylabel('Accuracy ') plt.xlabel('Number of Neighbors (K)') plt.tight_layout() plt.show() print( "The best accuracy was with", mean_acc.max(), "with k=", mean_acc.argmax()+1) ``` <h2>Want to learn more?</h2> IBM SPSS Modeler is a comprehensive analytics platform that has many machine learning algorithms. It has been designed to bring predictive intelligence to decisions made by individuals, by groups, by systems – by your enterprise as a whole. A free trial is available through this course, available here: <a href="https://www.ibm.com/analytics/spss-statistics-software">SPSS Modeler</a> Also, you can use Watson Studio to run these notebooks faster with bigger datasets. Watson Studio is IBM's leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, Watson Studio enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of Watson Studio users today with a free account at <a href="https://www.ibm.com/cloud/watson-studio">Watson Studio</a> ### Thank you for completing this lab! ## Author Saeed Aghabozorgi ### Other Contributors <a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank">Joseph Santarcangelo</a> ## Change Log | Date (YYYY-MM-DD) | Version | Changed By | Change Description | | ----------------- | ------- | ---------- | ---------------------------------- | | 2021-01-21 | 2.4 | Lakshmi | Updated sklearn library | | 2020-11-20 | 2.3 | Lakshmi | Removed unused imports | | 2020-11-17 | 2.2 | Lakshmi | Changed plot function of KNN | | 2020-11-03 | 2.1 | Lakshmi | Changed URL of csv | | 2020-08-27 | 2.0 | Lavanya | Moved lab to course repo in GitLab | | | | | | | | | | | ## <h3 align="center"> © IBM Corporation 2020. All rights reserved. <h3/>
github_jupyter
``` import torch from torch.distributions import Normal import math ``` Let us revisit the problem of predicting if a resident of Statsville is female based on the height. For this purpose, we have collected a set of height samples from adult female residents in Statsville. Unfortunately, due to unforseen circumstances we have collected a very small sample from the residents. Armed with our knowledge of Bayesian inference, we do not want to let this deter us from trying to build a model. From physical considerations, we can assume that the distribution of heights is Gaussian. Our goal is to estimate the parameters ($\mu$, $\sigma$) of this Gaussian. Let us first create the dataset by sampling 5 points from a Gaussian distribution with $\mu$=152 and $\sigma$=8. In real life scenarios, we do not know the mean and standard deviation of the true distribution. But for the sake of this example, let's assume that the mean height is 152cm and standard deviation is 8cm. ``` torch.random.manual_seed(0) num_samples = 5 true_dist = Normal(152, 8) X = true_dist.sample((num_samples, 1)) print('Dataset shape: {}'.format(X.shape)) ``` ### Maximum Likelihood Estimate If we relied on Maximum Likelihood estimation, our approach would be simply to compute the mean and standard deviation of the dataset, and use this normal distribution as our model. $$\mu_{MLE} = \frac{1}{N}\sum_{i=1}^nx_i$$ $$\sigma_{MLE} = \frac{1}{N}\sum_{i=1}^n(x_i - \mu)^2$$ Once we estimate the parameters, we can find out the probability that a sample lies in the range using the following formula $$ p(a < X <= b) = \int_{a}^b p(X) dX $$ However, when the amount of data is low, the MLE estimates are not as reliable. ``` mle_mu, mle_std = X.mean(), X.std() mle_dist = Normal(mle_mu, mle_std) print(f"MLE: mu {mle_mu:0.2f} std {mle_std:0.2f}") ``` ## Bayesian Inference Can we do better than MLE? One potential method to do this is to use Bayesian inference with a good prior. How does one go about selecting a good prior? Well, lets say from another survey, we know that the average and the standard deviation of height of adult female residents in Neighborville, the neighboring town. Additionally, we have no reason to believe that the distribution of heights at Statsville is significantly different. So we can use this information to "initialize" our prior. Lets say the the mean height of adult female resident in Neighborville is 150 cm with a standard deviation of 9 cm. We can use this information as our prior. The prior distribution encodes our beliefs on the parameter values. Given that we are dealing with an unknown mean, and unknown variance, we will model the prior as a Normal Gamma distribution. $$p\left( \theta \middle\vert X \right) = p \left( X \middle\vert \theta \right) p \left( \theta \right)\\ p\left( \theta \middle\vert X \right) = Normal-Gamma\left( \mu_{n}, \lambda_{n}, \alpha_{n}, \beta_{n} \right) \\ p \left( X \middle\vert \theta \right) = \mathbb{N}\left( \mu, \lambda^{ -\frac{1}{2} } \right) \\ p \left( \theta \right) = Normal-Gamma\left( \mu_{0}, \lambda_{0}, \alpha_{0}, \beta_{0} \right)$$ We will choose a prior, $p \left(\theta \right)$, such that $$ \mu_{0} = 150 \\ \lambda_{0} = 100 \\ \alpha_{0} = 100.5 \\ \beta_{0} = 8100 $$ $$p \left( \theta \right) = Normal-Gamma\left( 150, 100, 100.5 , 8100 \right)$$ We will compute the posterior, $p\left( \theta \middle\vert X \right)$, using Bayesian inference. $$\mu_{n} = \frac{ \left( n \bar{x} + \mu_{0} \lambda_{0} \right) }{ n + \lambda_{0} } \\ \lambda_{n} = n + \lambda_{0} \\ \alpha_{n} = \frac{n}{2} + \alpha_{0} \\ \beta_{n} = \frac{ ns }{ 2 } + \beta_{ 0 } + \frac{ n \lambda_{0} } { 2 \left( n + \lambda_{0} \right) } \left( \bar{x} - \mu_{0} \right)^{ 2 }$$ $$p\left( \theta \middle\vert X \right) = Normal-Gamma\left( \mu_{n}, \lambda_{n}, \alpha_{n}, \beta_{n} \right)$$ ``` class NormalGamma(): def __init__(self, mu_, lambda_, alpha_, beta_): self.mu_ = mu_ self.lambda_ = lambda_ self.alpha_ = alpha_ self.beta_ = beta_ @property def mean(self): return self.mu_, self.alpha_/ self.beta_ @property def mode(self): return self.mu_, (self.alpha_-0.5)/ self.beta_ def inference_unknown_mean_variance(X, prior_dist): mu_mle = X.mean() sigma_mle = X.std() n = X.shape[0] # Parameters of the prior mu_0 = prior_dist.mu_ lambda_0 = prior_dist.lambda_ alpha_0 = prior_dist.alpha_ beta_0 = prior_dist.beta_ # Parameters of posterior mu_n = (n * mu_mle + mu_0 * lambda_0) / (lambda_0 + n) lambda_n = n + lambda_0 alpha_n = n / 2 + alpha_0 beta_n = (n / 2 * sigma_mle ** 2) + beta_0 + (0.5* n * lambda_0 * (mu_mle - mu_0) **2 /(n + lambda_0)) posterior_dist = NormalGamma(mu_n, lambda_n, alpha_n, beta_n) return posterior_dist # Let us initialize the prior based on our beliefs prior_dist = NormalGamma(150, 100, 10.5, 810) # We compute the posterior distribution posterior_dist = inference_unknown_mean_variance(X, prior_dist) ``` How do we use the posterior distribution? Note that the posterior distribution is a distribution on the parameters $\mu$ and $\lambda$. It is important to note that the posterior and prior are distributions in the parameter space. The likelihood is a distribution on the data space. Once we learn the posterior distribution, one way to use the distribution is to look at the mode of the distribution i.e the parameter values which have the highest probability density. Using these point estimates leads us to Maximum A Posteriori / MAP estimation. As usual, we will obtain the maxima of the posterior probability density function $p\left( \mu, \sigma \middle\vert X \right) = Normal-Gamma\left( \mu, \sigma ; \;\; \mu_{n}, \lambda_{n}, \alpha_{n}, \beta_{n} \right) $. This function attains its maxima when $$\mu = \mu_{n} \\ \lambda = \frac{ \alpha_{n} - \frac{1}{2} } { \beta_{n} }$$ We notice that the MAP estimates for $\mu$ and $\sigma$ are better than the MLE estimates. ``` # With the Normal Gamma formulation, the unknown parameters are mu and precision map_mu, map_precision = posterior_dist.mode # We can compute the standard deviation using precision. map_std = math.sqrt(1 / map_precision) map_dist = Normal(map_mu, map_std) print(f"MAP: mu {map_mu:0.2f} std {map_std:0.2f}") ``` How did we arrive at the values of the parameters for the prior distribution? Let us consider the case when we have 0 data points. In this case, posterior will become equal to the prior. If we use the mode of this posterior for our MAP estimate, we see that the mu and std parameters are the same as the $\mu$ and $\sigma$ of adult female residents in Neighborville. ``` prior_mu, prior_precision = prior_dist.mode prior_std = math.sqrt(1 / prior_precision) print(f"Prior: mu {prior_mu:0.2f} std {prior_std:0.2f}") ``` ## Inference Let us say we want to find out the probability that a height between 150 and 155 belongs to an adult female resident. We can now use the the MAP estimates for $\mu$ and $\sigma$ to compute this value. Since our prior was good, we notice that the MAP serves as a better estimator than MLE at low values of n ``` a, b = torch.Tensor([150]), torch.Tensor([155]) true_prob = true_dist.cdf(b) - true_dist.cdf(a) print(f'True probability: {true_prob}') map_prob = map_dist.cdf(b) - map_dist.cdf(a) print(f'MAP probability: {map_prob}') mle_prob = mle_dist.cdf(b) - mle_dist.cdf(a) print('MLE probability: {}'.format(mle_prob)) ``` Let us say we receive more samples, how do we incorporate this information into our model? We can now set the prior to our current posterior and run inference again to obtain the new posterior. This process can be done interatively. $$ p \left( \theta \right)_{n} = p\left( \theta \middle\vert X \right)_{n-1}$$ $$ p\left( \theta \middle\vert X \right)_{n}=inference\_unknown\_mean\_variance(X_{n}, p \left( \theta \right)_{n})$$ We also notice that as the number of data points increases, the MAP starts to converge towards the true values of $\mu$ and $\sigma$ respectively ``` num_batches, batch_size = 20, 10 for i in range(num_batches): X_i = true_dist.sample((batch_size, 1)) prior_i = posterior_dist posterior_dist = inference_unknown_mean_variance(X_i, prior_i) map_mu, map_precision = posterior_dist.mode # We can compute the standard deviation using precision. map_std = math.sqrt(1 / map_precision) map_dist = Normal(map_mu, map_std) if i % 5 == 0: print(f"MAP at batch {i}: mu {map_mu:0.2f} std {map_std:0.2f}") print(f"MAP at batch {i}: mu {map_mu:0.2f} std {map_std:0.2f}") ```
github_jupyter
##### Copyright 2018 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Distributed Training in TensorFlow <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/alpha/guide/distribute_strategy"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/guide/distribute_strategy.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/guide/distribute_strategy.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> </table> ## Overview `tf.distribute.Strategy` is a TensorFlow API to distribute training across multiple GPUs, multiple machines or TPUs. Using this API, users can distribute their existing models and training code with minimal code changes. `tf.distribute.Strategy` has been designed with these key goals in mind: * Easy to use and support multiple user segments, including researchers, ML engineers, etc. * Provide good performance out of the box. * Easy switching between strategies. `tf.distribute.Strategy` can be used with TensorFlow's high level APIs, [tf.keras](https://www.tensorflow.org/guide/keras) and [tf.estimator](https://www.tensorflow.org/guide/estimators), with just a couple of lines of code change. It also provides an API that can be used to distribute custom training loops (and in general any computation using TensorFlow). In TensorFlow 2.0, users can execute their programs eagerly, or in a graph using [`tf.function`](../tutorials/eager/tf_function.ipynb). `tf.distribute.Strategy` intends to support both these modes of execution. Note that we may talk about training most of the time in this guide, but this API can also be used for distributing evaluation and prediction on different platforms. As you will see in a bit, very few changes are needed to use `tf.distribute.Strategy` with your code. This is because we have changed the underlying components of TensorFlow to become strategy-aware. This includes variables, layers, models, optimizers, metrics, summaries, and checkpoints. In this guide, we will talk about various types of strategies and how one can use them in a different situations. ``` # Import TensorFlow from __future__ import absolute_import, division, print_function import tensorflow as tf #gpu ``` ## Types of strategies `tf.distribute.Strategy` intends to cover a number of use cases along different axes. Some of these combinations are currently supported and others will be added in the future. Some of these axes are: * Syncronous vs asynchronous training: These are two common ways of distributing training with data parallelism. In sync training, all workers train over different slices of input data in sync, and aggregating gradients at each step. In async training, all workers are independently training over the input data and updating variables asynchronously. Typically sync training is supported via all-reduce and async through parameter server architecture. * Hardware platform: Users may want to scale their training onto multiple GPUs on one machine, or multiple machines in a network (with 0 or more GPUs each), or on Cloud TPUs. In order to support these use cases, we have 4 strategies available. In the next section we will talk about which of these are supported in which scenarios in TF nightly at this time. ### MirroredStrategy `tf.distribute.MirroredStrategy` support synchronous distributed training on multiple GPUs on one machine. It creates one replica per GPU device. Each variable in the model is mirrored across all the replicas. Together, these variables form a single conceptual variable called `MirroredVariable`. These variables are kept in sync with each other by applying identical updates. Efficient all-reduce algorithms are used to communicate the variable updates across the devices. All-reduce aggregates tensors across all the devices by adding them up, and makes them available on each device. It’s a fused algorithm that is very efficient and can reduce the overhead of synchronization significantly. There are many all-reduce algorithms and implementations available, depending on the type of communication available between devices. By default, it uses NVIDIA NCCL as the all-reduce implementation. The user can also choose between a few other options we provide, or write their own. Here is the simplest way of creating `MirroredStrategy`: ``` mirrored_strategy = tf.distribute.MirroredStrategy() ``` This will create a `MirroredStrategy` instance which will use all the GPUs that are visible to TensorFlow, and use NCCL as the cross device communication. If you wish to use only some of the GPUs on your machine, you can do so like this: ``` mirrored_strategy = tf.distribute.MirroredStrategy(devices=["/gpu:0", "/gpu:1"]) ``` If you wish to override the cross device communication, you can do so using the `cross_device_ops` argument by supplying an instance of `tf.distribute.CrossDeviceOps`. Currently we provide `tf.distribute.HierarchicalCopyAllReduce` and `tf.distribute.ReductionToOneDevice` as 2 other options other than `tf.distribute.NcclAllReduce` which is the default. ``` mirrored_strategy = tf.distribute.MirroredStrategy( cross_device_ops=tf.distribute.HierarchicalCopyAllReduce()) ``` ### MultiWorkerMirroredStrategy `tf.distribute.experimental.MultiWorkerMirroredStrategy` is very similar to `MirroredStrategy`. It implements synchronous distributed training across multiple workers, each with potentially multiple GPUs. Similar to `MirroredStrategy`, it creates copies of all variables in the model on each device across all workers. It uses [CollectiveOps](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/collective_ops.py) as the multi-worker all-reduce communication method used to keep variables in sync. A collective op is a single op in the TensorFlow graph which can automatically choose an all-reduce algorithm in the TensorFlow runtime according to hardware, network topology and tensor sizes. It also implements additional performance optimizations. For example, it includes a static optimization that converts multiple all-reductions on small tensors into fewer all-reductions on larger tensors. In addition, we are designing it to have a plugin architecture - so that in the future, users will be able to plugin algorithms that are better tuned for their hardware. Note that collective ops also implement other collective operations such as broadcast and all-gather. Here is the simplest way of creating `MultiWorkerMirroredStrategy`: ``` multiworker_strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() ``` `MultiWorkerMirroredStrategy` currently allows you to choose between two different implementations of collective ops. `CollectiveCommunication.RING` implements ring-based collectives using gRPC as the communication layer. `CollectiveCommunication.NCCL` uses [Nvidia's NCCL](https://developer.nvidia.com/nccl) to implement collectives. `CollectiveCommunication.AUTO` defers the choice to the runtime. The best choice of collective implementation depends upon the number and kind of GPUs, and the network interconnect in the cluster. You can specify them like so: ``` multiworker_strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy( tf.distribute.experimental.CollectiveCommunication.NCCL) ``` One of the key differences to get multi worker training going, as compared to multi-GPU training, is the multi-worker setup. "TF_CONFIG" environment variable is the standard way in TensorFlow to specify the cluster configuration to each worker that is part of the cluster. See section on ["TF_CONFIG" below](#TF_CONFIG) for more details on how this can be done. Note: This strategy is [`experimental`](https://www.tensorflow.org/guide/version_compat#what_is_not_covered) as we are currently improving it and making it work for more scenarios. As part of this, please expect the APIs to change in the future. ### TPUStrategy `tf.distribute.experimental.TPUStrategy` lets users run their TensorFlow training on Tensor Processing Units (TPUs). TPUs are Google's specialized ASICs designed to dramatically accelerate machine learning workloads. They are available on Google Colab, the [TensorFlow Research Cloud](https://www.tensorflow.org/tfrc) and [Google Compute Engine](https://cloud.google.com/tpu). In terms of distributed training architecture, TPUStrategy is the same `MirroredStrategy` - it implements synchronous distributed training. TPUs provide their own implementation of efficient all-reduce and other collective operations across multiple TPU cores, which are used in `TPUStrategy`. Here is how you would instantiate `TPUStrategy`. Note: To run this code in Colab, you should select TPU as the Colab runtime. See [Using TPUs]( tpu.ipynb) guide for a runnable version. ``` resolver = tf.distribute.cluster_resolver.TPUClusterResolver() tf.tpu.experimental.initialize_tpu_system(resolver) tpu_strategy = tf.distribute.experimental.TPUStrategy(resolver) ``` `TPUClusterResolver` instance helps locate the TPUs. In Colab, you don't need to specify any arguments to it. If you want to use this for Cloud TPUs, you will need to specify the name of your TPU resource in `tpu` argument. We also need to initialize the tpu system explicitly at the start of the program. This is required before TPUs can be used for computation and should ideally be done at the beginning because it also wipes out the TPU memory so all state will be lost. Note: This strategy is [`experimental`](https://www.tensorflow.org/guide/version_compat#what_is_not_covered) as we are currently improving it and making it work for more scenarios. As part of this, please expect the APIs to change in the future. ### ParameterServerStrategy `tf.distribute.experimental.ParameterServerStrategy` supports parameter servers training. It can be used either for multi-GPU synchronous local training or asynchronous multi-machine training. When used to train locally on one machine, variables are not mirrored, instead they are placed on the CPU and operations are replicated across all local GPUs. In a multi-machine setting, some machines are designated as workers and some as parameter servers. Each variable of the model is placed on one parameter server. Computation is replicated across all GPUs of the all the workers. In terms of code, it looks similar to other strategies: ``` ps_strategy = tf.distribute.experimental.ParameterServerStrategy() ``` For multi worker training, "TF_CONFIG" needs to specify the configuration of parameter servers and workers in your cluster, which you can read more about in ["TF_CONFIG" below](#TF_CONFIG) below. So far we've talked about what are the different stategies available and how you can instantiate them. In the next few sections, we will talk about the different ways in which you can use them to distribute your training. We will show short code snippets in this guide and link off to full tutorials which you can run end to end. ## Using `tf.distribute.Strategy` with Keras We've integrated `tf.distribute.Strategy` into `tf.keras` which is TensorFlow's implementation of the [Keras API specification](https://keras.io). `tf.keras` is a high-level API to build and train models. By integrating into `tf.keras` backend, we've made it seamless for Keras users to distribute their training written in the Keras training framework. The only things that need to change in a user's program are: (1) Create an instance of the appropriate `tf.distribute.Strategy` and (2) Move the creation and compiling of Keras model inside `strategy.scope`. Here is a snippet of code to do this for a very simple Keras model with one dense layer: ``` mirrored_strategy = tf.distribute.MirroredStrategy() with mirrored_strategy.scope(): model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=(1,))]) model.compile(loss='mse', optimizer='sgd') ``` In this example we used `MirroredStrategy` so we can run this on a machine with multiple GPUs. `strategy.scope()` indicated which parts of the code to run distributed. Creating a model inside this scope allows us to create mirrored variables instead of regular variables. Compiling under the scope allows us to know that the user intends to train this model using this strategy. Once this is setup, you can fit your model like you would normally. `MirroredStrategy` takes care of replicating the model's training on the available GPUs, aggregating gradients etc. ``` dataset = tf.data.Dataset.from_tensors(([1.], [1.])).repeat(100).batch(10) model.fit(dataset, epochs=2) model.evaluate(dataset) ``` Here we used a `tf.data.Dataset` to provide the training and eval input. You can also use numpy arrays: ``` import numpy as np inputs, targets = np.ones((100, 1)), np.ones((100, 1)) model.fit(inputs, targets, epochs=2, batch_size=10) ``` In both cases (dataset or numpy), each batch of the given input is divided equally among the multiple replicas. For instance, if using `MirroredStrategy` with 2 GPUs, each batch of size 10 will get divided among the 2 GPUs, with each receiving 5 input examples in each step. Each epoch will then train faster as you add more GPUs. Typically, you would want to increase your batch size as you add more accelerators so as to make effective use of the extra computing power. You will also need to re-tune your learning rate, depending on the model. You can use `strategy.num_replicas_in_sync` to get the number of replicas. ``` # Compute global batch size using number of replicas. BATCH_SIZE_PER_REPLICA = 5 global_batch_size = (BATCH_SIZE_PER_REPLICA * mirrored_strategy.num_replicas_in_sync) dataset = tf.data.Dataset.from_tensors(([1.], [1.])).repeat(100) dataset = dataset.batch(global_batch_size) LEARNING_RATES_BY_BATCH_SIZE = {5: 0.1, 10: 0.15} learning_rate = LEARNING_RATES_BY_BATCH_SIZE[global_batch_size] ``` ### What's supported now? In [TF nightly release](https://pypi.org/project/tf-nightly-gpu/), we now support training with Keras using all strategies. Note: When using `MultiWorkerMirorredStrategy` for multiple workers or `TPUStrategy` with more than one host with Keras, currently the user will have to explicitly shard or shuffle the data for different workers, but we will change this in the future to automatically shard the input data intelligently. ### Examples and Tutorials Here is a list of tutorials and examples that illustrate the above integration end to end with Keras: 1. [Tutorial](../tutorials/distribute/keras.ipynb) to train MNIST with `MirroredStrategy`. 2. Official [ResNet50](https://github.com/tensorflow/models/blob/master/official/resnet/keras/keras_imagenet_main.py) training with ImageNet data using `MirroredStrategy`. 3. [ResNet50](https://github.com/tensorflow/tpu/blob/master/models/experimental/resnet50_keras/resnet50.py) trained with Imagenet data on Cloud TPus with `TPUStrategy`. ## Using `tf.distribute.Strategy` with Estimator `tf.estimator` is a distributed training TensorFlow API that originally supported the async parameter server approach. Like with Keras, we've integrated `tf.distribute.Strategy` into `tf.Estimator` so that a user who is using Estimator for their training can easily change their training is distributed with very few changes to your their code. With this, estimator users can now do synchronous distributed training on multiple GPUs and multiple workers, as well as use TPUs. The usage of `tf.distribute.Strategy` with Estimator is slightly different than the Keras case. Instead of using `strategy.scope`, now we pass the strategy object into the [`RunConfig`](https://www.tensorflow.org/api_docs/python/tf/estimator/RunConfig) for the Estimator. Here is a snippet of code that shows this with a premade estimator `LinearRegressor` and `MirroredStrategy`: ``` mirrored_strategy = tf.distribute.MirroredStrategy() config = tf.estimator.RunConfig( train_distribute=mirrored_strategy, eval_distribute=mirrored_strategy) regressor = tf.estimator.LinearRegressor( feature_columns=[tf.feature_column.numeric_column('feats')], optimizer='SGD', config=config) ``` We use a premade Estimator here, but the same code works with a custom Estimator as well. `train_evaluate` determines how training will be distributed, and `eval_distribute` determines how evaluation will be distributed. This is another difference from Keras where we use the same strategy for both training and eval. Now we can train and evaluate this Estimator with an input function: ``` def input_fn(): dataset = tf.data.Dataset.from_tensors(({"feats":[1.]}, [1.])) return dataset.repeat(1000).batch(10) regressor.train(input_fn=input_fn, steps=10) regressor.evaluate(input_fn=input_fn, steps=10) ``` Another difference to highlight here between Estimator and Keras is the input handling. In Keras, we mentioned that each batch of the dataset is split across the multiple replicas. In Estimator, however, the user provides an `input_fn` and have full control over how they want their data to be distributed across workers and devices. We do not do automatic splitting of batch, nor automatically shard the data across different workers. The provided `input_fn` is called once per worker, thus giving one dataset per worker. Then one batch from that dataset is fed to one replica on that worker, thereby consuming N batches for N replicas on 1 worker. In other words, the dataset returned by the `input_fn` should provide batches of size `PER_REPLICA_BATCH_SIZE`. And the global batch size for a step can be obtained as `PER_REPLICA_BATCH_SIZE * strategy.num_replicas_in_sync`. When doing multi worker training, users will also want to either split their data across the workers, or shuffle with a random seed on each. You can see an example of how to do this in the [multi-worker tutorial](../tutorials/distribute/multi_worker.ipynb). We showed an example of using `MirroredStrategy` with Estimator. You can also use `TPUStrategy` with Estimator as well, in the exact same way: ``` config = tf.estimator.RunConfig( train_distribute=tpu_strategy, eval_distribute=tpu_strategy) ``` And similarly, you can use multi worker and parameter server strategies as well. The code remains the same, but you need to use `tf.estimator.train_and_evaluate`, and set "TF_CONFIG" environment variables for each binary running in your cluster. ### What's supported now? In TF nightly release, we support training with Estimator using all strategies. ### Examples and Tutorials Here are some examples that show end to end usage of various strategies with Estimator: 1. [End to end example](https://github.com/tensorflow/ecosystem/tree/master/distribution_strategy) for multi worker training in tensorflow/ecosystem using Kuberentes templates. This example starts with a Keras model and converts it to an Estimator using the `tf.keras.estimator.model_to_estimator` API. 2. Official [ResNet50](https://github.com/tensorflow/models/blob/master/official/resnet/imagenet_main.py) model, which can be trained using either `MirroredStrategy` or `MultiWorkerMirroredStrategy`. 3. [ResNet50](https://github.com/tensorflow/tpu/blob/master/models/experimental/distribution_strategy/resnet_estimator.py) example with TPUStrategy. ## Using `tf.distribute.Strategy` with custom training loops As you've seen, using `tf.distrbute.Strategy` with high level APIs is only a couple lines of code change. With a little more effort, `tf.distrbute.Strategy` can also be used by other users who are not using these frameworks. TensorFlow is used for a wide variety of use cases and some users (such as researchers) require more flexibility and control over their training loops. This makes it hard for them to use the high level frameworks such as Estimator or Keras. For instance, someone using a GAN may want to take a different number of generator or discriminator steps each round. Similarly, the high level frameworks are not very suitable for Reinforcement Learning training. So these users will usually write their own training loops. For these users, we provide a core set of methods through the `tf.distrbute.Strategy` classes. Using these may require minor restructuring of the code initially, but once that is done, the user should be able to switch between GPUs / TPUs / multiple machines by just changing the strategy instance. Here we will show a brief snippet illustrating this use case for a simple training example using the same Keras model as before. Note: These APIs are still experimental and we are improving them to make them more user friendly. First, we create the model and optimizer inside the strategy's scope. This ensures that any variables created with the model and optimizer are mirrored variables. ``` with mirrored_strategy.scope(): model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=(1,))]) optimizer = tf.train.GradientDescentOptimizer(0.1) ``` Next, we create the input dataset and call `make_dataset_iterator` to distribute the dataset based on the strategy. This API is expected to change in the near future. ``` with mirrored_strategy.scope(): dataset = tf.data.Dataset.from_tensors(([1.], [1.])).repeat(1000).batch( global_batch_size) input_iterator = mirrored_strategy.make_dataset_iterator(dataset) ``` Then, we define one step of the training. We will use `tf.GradientTape` to compute gradients and optimizer to apply those gradients to update our model's variables. To distribute this training step, we put in in a function `step_fn` and pass it to `strategy.experimental_run` along with the iterator created before: ``` def train_step(): def step_fn(inputs): features, labels = inputs logits = model(features) cross_entropy = tf.nn.softmax_cross_entropy_with_logits( logits=logits, labels=labels) loss = tf.reduce_sum(cross_entropy) * (1.0 / global_batch_size) train_op = optimizer.minimize(loss) with tf.control_dependencies([train_op]): return tf.identity(loss) per_replica_losses = mirrored_strategy.experimental_run( step_fn, input_iterator) mean_loss = mirrored_strategy.reduce( tf.distribute.ReduceOp.MEAN, per_replica_losses) return mean_loss ``` A few other things to note in the code above: 1. We used `tf.nn.softmax_cross_entropy_with_logits` to compute the loss. And then we scaled the total loss by the global batch size. This is important because all the replicas are training in sync and number of examples in each step of training is the global batch. If you're using TensorFlow's standard losses from `tf.losses` or `tf.keras.losses`, they are distribution aware and will take care of the scaling by number of replicas whenever a strategy is in scope. 2. We used the `strategy.reduce` API to aggregate the results returned by `experimental_run`. `experimental_run` returns results from each local replica in the strategy, and there are multiple ways to consume this result. You can `reduce` them to get an aggregated value. You can also do `strategy.unwrap(results)`* to get the list of values contained in the result, one per local replica. *expected to change Finally, once we have defined the training step, we can initialize the iterator and variables and run the training in a loop: ``` with mirrored_strategy.scope(): iterator_init = input_iterator.initialize() var_init = tf.global_variables_initializer() loss = train_step() with tf.Session() as sess: sess.run([iterator_init, var_init]) for _ in range(10): print(sess.run(loss)) ``` In the example above, we used `make_dataset_iterator` to provide input to your training. We also provide two additional APIs: `make_input_fn_iterator` and `make_experimental_numpy_iterator` to support other kinds of inputs. See their documentation in `tf.distribute.Strategy` and how they differ from `make_dataset_iterator`. This covers the simplest case of using `tf.distribute.Strategy` API to do distribute custom training loops. We are in the process of improving these APIs. Since this use case requres more work on the part of the user, we will be publishing a separate detailed guide for this use case in the future. ### What's supported now? In TF nightly release, we support training with custom training loops using `MirroredStrategy` and `TPUStrategy` as shown above. Support for other strategies will be coming in soon. `MultiWorkerMirorredStrategy` support will be coming in the future. ### Examples and Tutorials Here are some examples for using distribution strategy with custom training loops: 1. [Example](https://github.com/tensorflow/tensorflow/blob/5456cc28f3f8d9c17c645d9a409e495969e584ae/tensorflow/contrib/distribute/python/examples/mnist_tf1_tpu.py) to train MNIST using `TPUStrategy`. ## Other topics In this section, we will cover some topics that are relevant to multiple use cases. <a id="TF_CONFIG"> ### Setting up TF\_CONFIG environment variable </a> For multi-worker training, as mentioned before, you need to us set "TF\_CONFIG" environment variable for each binary running in your cluster. The "TF\_CONFIG" environment variable is a JSON string which specifies what tasks constitute a cluster, their addresses and each task's role in the cluster. We provide a Kubernetes template in the [tensorflow/ecosystem](https://github.com/tensorflow/ecosystem) repo which sets "TF\_CONFIG" for your training tasks. One example of "TF\_CONFIG" is: ``` os.environ["TF_CONFIG"] = json.dumps({ "cluster": { "worker": ["host1:port", "host2:port", "host3:port"], "ps": ["host4:port", "host5:port"] }, "task": {"type": "worker", "index": 1} }) ``` This "TF\_CONFIG" specifies that there are three workers and two ps tasks in the cluster along with their hosts and ports. The "task" part specifies that the role of the current task in the cluster, worker 1 (the second worker). Valid roles in a cluster is "chief", "worker", "ps" and "evaluator". There should be no "ps" job except when using `tf.distribute.experimental.ParameterServerStrategy`. ## What's next? `tf.distribute.Strategy` is actively under development. We welcome you to try it out and provide and your feedback via [issues on GitHub](https://github.com/tensorflow/tensorflow/issues/new).
github_jupyter
``` !pip install plotly -U import numpy as np import matplotlib.pyplot as plt from plotly import graph_objs as go import plotly as py from scipy import optimize print("hello") ``` Generate the data ``` m = np.random.rand() n = np.random.rand() num_of_points = 100 x = np.random.random(num_of_points) y = x*m + n + 0.15*np.random.random(num_of_points) fig = go.Figure(data=[go.Scatter(x=x, y=y, mode='markers', name='all points')], layout=go.Layout( xaxis=dict(range=[np.min(x), np.max(x)], autorange=False), yaxis=dict(range=[np.min(y), np.max(y)], autorange=False) ) ) fig.show() print("m=" + str(m) + " n=" + str(n) ) # fmin def stright_line_fmin(x,y): dist_func = lambda p: (((y-x*p[0]-p[1])**2).mean()) p_opt = optimize.fmin(dist_func, np.array([0,0])) return p_opt stright_line_fmin(x,y) # PCA def straight_line_pca(x,y): X = np.append(x-x.mean(),y-y.mean(), axis=1) # Data matrix X, assumes 0-centered n, m = X.shape # Compute covariance matrix C = np.dot(X.T, X) / (n-1) # Eigen decomposition eigen_vals, eigen_vecs = np.linalg.eig(C) # Project X onto PC space X_pca_inv = np.dot(np.array([[1,0],[-1,0]]), np.linalg.inv(eigen_vecs)) X_pca = np.dot(X, eigen_vecs) x_min = (x-x.mean()).min() x_max = (x-x.mean()).max() fig = go.Figure(data=[ go.Scatter(x=x.ravel(), y=y.ravel(), mode='markers', name='all points'), go.Scatter(x=X_pca_inv[:, 0]+x.mean(), y=X_pca_inv[:,1]+y.mean(), mode='lines', name='pca estimation')]) fig.show() return X_pca_inv[1, 1]/X_pca_inv[1, 0], y.mean() - x.mean()*X_pca_inv[1, 1]/X_pca_inv[1, 0] c = straight_line_pca(x[:, np.newaxis],y[:, np.newaxis]) c #leaset squares def least_square_fit(x, y): # model: y_i = h*x_i # cost: (Y-h*X)^T * (Y-h*X) # solution: h = (X^t *X)^-1 * X^t * Y return np.dot(np.linalg.inv(np.dot(x.transpose(), x)), np.dot(x.transpose() , y)) least_square_fit(np.append(x[:, np.newaxis], np.ones_like(x[:, np.newaxis]), axis=1), y) # SVd def svd_fit(x, y): # model: y_i = h*x_i # minimize: [x_0, 1, -y_0; x1, 1, -y_1; ...]*[h, 1] = Xh = 0 # do so by: eigenvector coresponds to smallest eigenvalue of X X = np.append(x, -y, axis=1) u, s, vh = np.linalg.svd(X) return vh[-1, :2]/vh[-1,-1] m_, n_ = svd_fit(np.append(x[:, np.newaxis], np.ones_like(x[:, np.newaxis]), axis=1), y[:, np.newaxis]) print(m_, n_) #Ransac def ransac(src_pnts, distance_func, model_func, num_of_points_to_determine_model, dist_th, inliers_ratio=0.7, p=0.95): """Summary or Description of the Function Parameters: src_pnt : data points used by Ransac to find the model distance_func : a function pointer to a distance function. The distance function takes a model and a point and calculate the cost p : success probabilaty Returns: int:Returning value """ min_x = src_pnts[:, 0].min() max_x = src_pnts[:, 0].max() print(min_x, max_x) num_of_points = src_pnts.shape[0] num_of_iter = int(np.ceil(np.log(1-p)/np.log(1-inliers_ratio**num_of_points_to_determine_model))) proposed_line = [] max_num_of_inliers = 0 for i in range(num_of_iter): indx = np.random.permutation(num_of_points)[:num_of_points_to_determine_model] curr_model = model_func(src_pnts[indx, :]) x=np.array([min_x, max_x]) y=curr_model(x) print(y) d = distance_func(curr_model, src_pnts) num_of_inliers = np.sum(d<dist_th) proposed_line.append((curr_model, x, y, indx, d, num_of_inliers)) if num_of_inliers > max_num_of_inliers: max_num_of_inliers = num_of_inliers best_model = curr_model return best_model, proposed_line def stright_line_from_two_points(pnts): m = (pnts[1, 1]-pnts[0,1])/(pnts[1,0]-pnts[0,0]) n = (pnts[1,0]*pnts[0,1]-pnts[0,0]*pnts[1,1])/(pnts[1,0]-pnts[0,0]) mod_func = lambda x : x*m + n return mod_func src_pnts = np.array([x, y]).transpose() distance_func = lambda model, pnts : (model(pnts[:, 0]) - pnts[:, 1])**2 model_func = stright_line_from_two_points num_of_points_to_determine_model = 2 dist_th = 0.2 best_model, ransac_run = ransac(src_pnts, distance_func, model_func, num_of_points_to_determine_model, dist_th) print(x.min()) print(x.max()) x_ransac = np.array([x.min(), x.max()]) y_ransac = best_model(x_ransac) print(y_ransac) scatter_xy = go.Scatter(x=x, y=y, mode='markers', name="all points") frames=[go.Frame( data=[scatter_xy, go.Scatter(x=x[item[3]], y=y[item[3]], mode='markers', line=dict(width=2, color="red"), name="selected points"), go.Scatter(x=item[1], y=item[2], mode='lines', name='current line')]) for item in ransac_run] fig = go.Figure( data=[go.Scatter(x=x, y=y, mode='markers', name='all points'), go.Scatter(x=x, y=y, mode='markers', name="selected points"), go.Scatter(x=x, y=y, mode='markers', name="current line"), go.Scatter(x=x_ransac, y=y_ransac, mode='lines', name="best selection")], layout=go.Layout( xaxis=dict(range=[np.min(x), np.max(x)], autorange=False), yaxis=dict(range=[np.min(y), np.max(y)], autorange=False), title="Ransac guesses", updatemenus=[dict( type="buttons", buttons=[dict(label="Play", method="animate", args=[None])])] ), frames=frames ) fig.show() ```
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Gradient Boosted Trees: Model understanding <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/tutorials/estimators/boosted_trees_model_understanding"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/estimators/boosted_trees_model_understanding.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/tree/master/site/en/tutorials/estimators/boosted_trees_model_understanding.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> </table> For an end-to-end walkthrough of training a Gradient Boosting model check out the [boosted trees tutorial](https://www.tensorflow.org/tutorials/estimators/boosted_trees). In this tutorial you will: * Learn how to interpret a Boosted Trees model both *locally* and *globally* * Gain intution for how a Boosted Trees model fits a dataset ## How to interpret Boosted Trees models both locally and globally Local interpretability refers to an understanding of a model’s predictions at the individual example level, while global interpretability refers to an understanding of the model as a whole. Such techniques can help machine learning (ML) practitioners detect bias and bugs during the model development stage For local interpretability, you will learn how to create and visualize per-instance contributions. To distinguish this from feature importances, we refer to these values as directional feature contributions (DFCs). For global interpretability you will retrieve and visualize gain-based feature importances, [permutation feature importances](https://www.stat.berkeley.edu/~breiman/randomforest2001.pdf) and also show aggregated DFCs. ## Load the titanic dataset You will be using the titanic dataset, where the (rather morbid) goal is to predict passenger survival, given characteristics such as gender, age, class, etc. ``` from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np import pandas as pd import tensorflow as tf tf.logging.set_verbosity(tf.logging.ERROR) tf.set_random_seed(123) # Load dataset. dftrain = pd.read_csv('https://storage.googleapis.com/tfbt/titanic_train.csv') dfeval = pd.read_csv('https://storage.googleapis.com/tfbt/titanic_eval.csv') y_train = dftrain.pop('survived') y_eval = dfeval.pop('survived') ``` For a description of the features, please review the prior tutorial. ## Create feature columns, input_fn, and the train the estimator ### Preprocess the data Create the feature columns, using the original numeric columns as is and one-hot-encoding categorical variables. ``` fc = tf.feature_column CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck', 'embark_town', 'alone'] NUMERIC_COLUMNS = ['age', 'fare'] def one_hot_cat_column(feature_name, vocab): return fc.indicator_column( fc.categorical_column_with_vocabulary_list(feature_name, vocab)) feature_columns = [] for feature_name in CATEGORICAL_COLUMNS: # Need to one-hot encode categorical features. vocabulary = dftrain[feature_name].unique() feature_columns.append(one_hot_cat_column(feature_name, vocabulary)) for feature_name in NUMERIC_COLUMNS: feature_columns.append(fc.numeric_column(feature_name, dtype=tf.float32)) ``` ### Build the input pipeline Create the input functions using the `from_tensor_slices` method in the [`tf.data`](https://www.tensorflow.org/api_docs/python/tf/data) API to read in data directly from Pandas. ``` # Use entire batch since this is such a small dataset. NUM_EXAMPLES = len(y_train) def make_input_fn(X, y, n_epochs=None, shuffle=True): y = np.expand_dims(y, axis=1) def input_fn(): dataset = tf.data.Dataset.from_tensor_slices((X.to_dict(orient='list'), y)) if shuffle: dataset = dataset.shuffle(NUM_EXAMPLES) # For training, cycle thru dataset as many times as need (n_epochs=None). dataset = (dataset .repeat(n_epochs) .batch(NUM_EXAMPLES)) return dataset return input_fn # Training and evaluation input functions. train_input_fn = make_input_fn(dftrain, y_train) eval_input_fn = make_input_fn(dfeval, y_eval, shuffle=False, n_epochs=1) ``` ### Train the model ``` params = { 'n_trees': 50, 'max_depth': 3, 'n_batches_per_layer': 1, # You must enable center_bias = True to get DFCs. This will force the model to # make an initial prediction before using any features (e.g. use the mean of # the training labels for regression or log odds for classification when # using cross entropy loss). 'center_bias': True } est = tf.estimator.BoostedTreesClassifier(feature_columns, **params) est.train(train_input_fn, max_steps=100) results = est.evaluate(eval_input_fn) pd.Series(results).to_frame() ``` For performance reasons, when your data fits in memory, we recommend use the `boosted_trees_classifier_train_in_memory` function. However if training time is not of a concern or if you have a very large dataset and want to do distributed training, use the `tf.estimator.BoostedTrees` API shown above. When using this method, you should not batch your input data, as the method operates on the entire dataset. ``` in_memory_params = dict(params) del in_memory_params['n_batches_per_layer'] # In-memory input_fn does not use batching. def make_inmemory_train_input_fn(X, y): y = np.expand_dims(y, axis=1) def input_fn(): return dict(X), y return input_fn train_input_fn = make_inmemory_train_input_fn(dftrain, y_train) # Train the model. est = tf.contrib.estimator.boosted_trees_classifier_train_in_memory( train_input_fn, feature_columns, **in_memory_params) print(est.evaluate(eval_input_fn)) ``` ## Model interpretation and plotting ``` import matplotlib.pyplot as plt import seaborn as sns sns_colors = sns.color_palette('colorblind') ``` ## Local interpretability Next you will output the directional feature contributions (DFCs) to explain individual predictions using the approach outlined in [Palczewska et al](https://arxiv.org/pdf/1312.1121.pdf) and by Saabas in [Interpreting Random Forests](http://blog.datadive.net/interpreting-random-forests/) (this method is also available in scikit-learn for Random Forests in the [`treeinterpreter`](https://github.com/andosa/treeinterpreter) package). The DFCs are generated with: `pred_dicts = list(est.experimental_predict_with_explanations(pred_input_fn))` (Note: The method is named experimental as we may modify the API before dropping the experimental prefix.) ``` pred_dicts = list(est.experimental_predict_with_explanations(eval_input_fn)) # Create DFC Pandas dataframe. labels = y_eval.values probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts]) df_dfc = pd.DataFrame([pred['dfc'] for pred in pred_dicts]) df_dfc.describe().T ``` A nice property of DFCs is that the sum of the contributions + the bias is equal to the prediction for a given example. ``` # Sum of DFCs + bias == probabality. bias = pred_dicts[0]['bias'] dfc_prob = df_dfc.sum(axis=1) + bias np.testing.assert_almost_equal(dfc_prob.values, probs.values) ``` Plot DFCs for an individual passenger. ``` # Plot results. ID = 182 example = df_dfc.iloc[ID] # Choose ith example from evaluation set. TOP_N = 8 # View top 8 features. sorted_ix = example.abs().sort_values()[-TOP_N:].index ax = example[sorted_ix].plot(kind='barh', color=sns_colors[3]) ax.grid(False, axis='y') ax.set_title('Feature contributions for example {}\n pred: {:1.2f}; label: {}'.format(ID, probs[ID], labels[ID])) ax.set_xlabel('Contribution to predicted probability') plt.show() ``` The larger magnitude contributions have a larger impact on the model's prediction. Negative contributions indicate the feature value for this given example reduced the model's prediction, while positive values contribute an increase in the prediction. ### Improved plotting Let's make the plot nice by color coding based on the contributions' directionality and add the feature values on figure. ``` # Boilerplate code for plotting :) def _get_color(value): """To make positive DFCs plot green, negative DFCs plot red.""" green, red = sns.color_palette()[2:4] if value >= 0: return green return red def _add_feature_values(feature_values, ax): """Display feature's values on left of plot.""" x_coord = ax.get_xlim()[0] OFFSET = 0.15 for y_coord, (feat_name, feat_val) in enumerate(feature_values.items()): t = plt.text(x_coord, y_coord - OFFSET, '{}'.format(feat_val), size=12) t.set_bbox(dict(facecolor='white', alpha=0.5)) from matplotlib.font_manager import FontProperties font = FontProperties() font.set_weight('bold') t = plt.text(x_coord, y_coord + 1 - OFFSET, 'feature\nvalue', fontproperties=font, size=12) def plot_example(example): TOP_N = 8 # View top 8 features. sorted_ix = example.abs().sort_values()[-TOP_N:].index # Sort by magnitude. example = example[sorted_ix] colors = example.map(_get_color).tolist() ax = example.to_frame().plot(kind='barh', color=[colors], legend=None, alpha=0.75, figsize=(10,6)) ax.grid(False, axis='y') ax.set_yticklabels(ax.get_yticklabels(), size=14) # Add feature values. _add_feature_values(dfeval.iloc[ID][sorted_ix], ax) return ax ``` Plot example. ``` example = df_dfc.iloc[ID] # Choose IDth example from evaluation set. ax = plot_example(example) ax.set_title('Feature contributions for example {}\n pred: {:1.2f}; label: {}'.format(ID, probs[ID], labels[ID])) ax.set_xlabel('Contribution to predicted probability', size=14) plt.show() ``` You can also plot the example's DFCs compare with the entire distribution using a voilin plot. ``` # Boilerplate plotting code. def dist_violin_plot(df_dfc, ID): # Initialize plot. fig, ax = plt.subplots(1, 1, figsize=(10, 6)) # Create example dataframe. TOP_N = 8 # View top 8 features. example = df_dfc.iloc[ID] ix = example.abs().sort_values()[-TOP_N:].index example = example[ix] example_df = example.to_frame(name='dfc') # Add contributions of entire distribution. parts=ax.violinplot([df_dfc[w] for w in ix], vert=False, showextrema=False, widths=0.7, positions=np.arange(len(ix))) face_color = sns_colors[0] alpha = 0.15 for pc in parts['bodies']: pc.set_facecolor(face_color) pc.set_alpha(alpha) # Add feature values. _add_feature_values(dfeval.iloc[ID][sorted_ix], ax) # Add local contributions. ax.scatter(example, np.arange(example.shape[0]), color=sns.color_palette()[2], s=100, marker="s", label='contributions for example') # Legend # Proxy plot, to show violinplot dist on legend. ax.plot([0,0], [1,1], label='eval set contributions\ndistributions', color=face_color, alpha=alpha, linewidth=10) legend = ax.legend(loc='lower right', shadow=True, fontsize='x-large', frameon=True) legend.get_frame().set_facecolor('white') # Format plot. ax.set_yticks(np.arange(example.shape[0])) ax.set_yticklabels(example.index) ax.grid(False, axis='y') ax.set_xlabel('Contribution to predicted probability', size=14) ``` Plot this example. ``` dist_violin_plot(df_dfc, ID) plt.title('Feature contributions for example {}\n pred: {:1.2f}; label: {}'.format(ID, probs[ID], labels[ID])) plt.show() ``` Finally, third-party tools, such as [LIME](https://github.com/marcotcr/lime) and [shap](https://github.com/slundberg/shap), can also help understand individual predictions for a model. ## Global feature importances Additionally, you might want to understand the model as a whole, rather than studying individual predictions. Below, you will compute and use: * Gain-based feature importances using `est.experimental_feature_importances` * Permutation importances * Aggregate DFCs using `est.experimental_predict_with_explanations` Gain-based feature importances measure the loss change when splitting on a particular feature, while permutation feature importances are computed by evaluating model performance on the evaluation set by shuffling each feature one-by-one and attributing the change in model performance to the shuffled feature. In general, permutation feature importance are preferred to gain-based feature importance, though both methods can be unreliable in situations where potential predictor variables vary in their scale of measurement or their number of categories and when features are correlated ([source](https://bmcbioinformatics.biomedcentral.com/articles/10.1186/1471-2105-9-307)). Check out [this article](http://explained.ai/rf-importance/index.html) for an in-depth overview and great discussion on different feature importance types. ### Gain-based feature importances Gain-based feature importances are built into the TensorFlow Boosted Trees estimators using `est.experimental_feature_importances`. ``` importances = est.experimental_feature_importances(normalize=True) df_imp = pd.Series(importances) # Visualize importances. N = 8 ax = (df_imp.iloc[0:N][::-1] .plot(kind='barh', color=sns_colors[0], title='Gain feature importances', figsize=(10, 6))) ax.grid(False, axis='y') ``` ### Average absolute DFCs You can also average the absolute values of DFCs to understand impact at a global level. ``` # Plot. dfc_mean = df_dfc.abs().mean() N = 8 sorted_ix = dfc_mean.abs().sort_values()[-N:].index # Average and sort by absolute. ax = dfc_mean[sorted_ix].plot(kind='barh', color=sns_colors[1], title='Mean |directional feature contributions|', figsize=(10, 6)) ax.grid(False, axis='y') ``` You can also see how DFCs vary as a feature value varies. ``` FEATURE = 'fare' feature = pd.Series(df_dfc[FEATURE].values, index=dfeval[FEATURE].values).sort_index() ax = sns.regplot(feature.index.values, feature.values, lowess=True) ax.set_ylabel('contribution') ax.set_xlabel(FEATURE) ax.set_xlim(0, 100) plt.show() ``` ### Permutation feature importance ``` def permutation_importances(est, X_eval, y_eval, metric, features): """Column by column, shuffle values and observe effect on eval set. source: http://explained.ai/rf-importance/index.html A similar approach can be done during training. See "Drop-column importance" in the above article.""" baseline = metric(est, X_eval, y_eval) imp = [] for col in features: save = X_eval[col].copy() X_eval[col] = np.random.permutation(X_eval[col]) m = metric(est, X_eval, y_eval) X_eval[col] = save imp.append(baseline - m) return np.array(imp) def accuracy_metric(est, X, y): """TensorFlow estimator accuracy.""" eval_input_fn = make_input_fn(X, y=y, shuffle=False, n_epochs=1) return est.evaluate(input_fn=eval_input_fn)['accuracy'] features = CATEGORICAL_COLUMNS + NUMERIC_COLUMNS importances = permutation_importances(est, dfeval, y_eval, accuracy_metric, features) df_imp = pd.Series(importances, index=features) sorted_ix = df_imp.abs().sort_values().index ax = df_imp[sorted_ix][-5:].plot(kind='barh', color=sns_colors[2], figsize=(10, 6)) ax.grid(False, axis='y') ax.set_title('Permutation feature importance') plt.show() ``` ## Visualizing model fitting Lets first simulate/create training data using the following formula: $$z=x* e^{-x^2 - y^2}$$ Where \\(z\\) is the dependent variable you are trying to predict and \\(x\\) and \\(y\\) are the features. ``` from numpy.random import uniform, seed from matplotlib.mlab import griddata # Create fake data seed(0) npts = 5000 x = uniform(-2, 2, npts) y = uniform(-2, 2, npts) z = x*np.exp(-x**2 - y**2) # Prep data for training. df = pd.DataFrame({'x': x, 'y': y, 'z': z}) xi = np.linspace(-2.0, 2.0, 200), yi = np.linspace(-2.1, 2.1, 210), xi,yi = np.meshgrid(xi, yi) df_predict = pd.DataFrame({ 'x' : xi.flatten(), 'y' : yi.flatten(), }) predict_shape = xi.shape def plot_contour(x, y, z, **kwargs): # Grid the data. plt.figure(figsize=(10, 8)) # Contour the gridded data, plotting dots at the nonuniform data points. CS = plt.contour(x, y, z, 15, linewidths=0.5, colors='k') CS = plt.contourf(x, y, z, 15, vmax=abs(zi).max(), vmin=-abs(zi).max(), cmap='RdBu_r') plt.colorbar() # Draw colorbar. # Plot data points. plt.xlim(-2, 2) plt.ylim(-2, 2) ``` You can visualize the function. Redder colors correspond to larger function values. ``` zi = griddata(x, y, z, xi, yi, interp='linear') plot_contour(xi, yi, zi) plt.scatter(df.x, df.y, marker='.') plt.title('Contour on training data') plt.show() fc = [tf.feature_column.numeric_column('x'), tf.feature_column.numeric_column('y')] def predict(est): """Predictions from a given estimator.""" predict_input_fn = lambda: tf.data.Dataset.from_tensors(dict(df_predict)) preds = np.array([p['predictions'][0] for p in est.predict(predict_input_fn)]) return preds.reshape(predict_shape) ``` First let's try to fit a linear model to the data. ``` train_input_fn = make_input_fn(df, df.z) est = tf.estimator.LinearRegressor(fc) est.train(train_input_fn, max_steps=500); plot_contour(xi, yi, predict(est)) ``` It's not a very good fit. Next let's try to fit a GBDT model to it and try to understand how the model fits the function. ``` def create_bt_est(n_trees): return tf.estimator.BoostedTreesRegressor(fc, n_batches_per_layer=1, n_trees=n_trees) N_TREES = [1,2,3,4,10,20,50,100] for n in N_TREES: est = create_bt_est(n) est.train(train_input_fn, max_steps=500) plot_contour(xi, yi, predict(est)) plt.text(-1.8, 2.1, '# trees: {}'.format(n), color='w', backgroundcolor='black', size=20) plt.show() ``` As you increase the number of trees, the model's predictions better approximates the underlying function. ## Conclusion In this tutorial you learned how to interpret Boosted Trees models using directional feature contributions and feature importance techniques. These techniques provide insight into how the features impact a model's predictions. Finally, you also gained intution for how a Boosted Tree model fits a complex function by viewing the decision surface for several models.
github_jupyter
# Cycle-GAN ## Model Schema Definition The purpose of this notebook is to create in a simple format the schema of the solution proposed to colorize pictures with a Cycle-GAN accelerated with FFT convolutions.<p>To create a simple model schema this notebook will present the code for a Cycle-GAN built as a MVP (Minimum Viable Product) that works with the problem proposed. ``` import re import os import urllib.request import numpy as np import random import pickle from PIL import Image from skimage import color import matplotlib.pyplot as plt from glob import glob from keras.preprocessing import image from keras.preprocessing.image import ImageDataGenerator from keras.models import Model from keras.layers import Conv2D, MaxPooling2D, Activation, BatchNormalization, UpSampling2D, Dropout, Flatten, Dense, Input, LeakyReLU, Conv2DTranspose,AveragePooling2D, Concatenate from keras.models import load_model from keras.optimizers import Adam from keras.models import Sequential from tensorflow.compat.v1 import set_random_seed import numpy as np import matplotlib.pyplot as plt import pickle import keras.backend as K import boto3 import time from copy import deepcopy %%time %matplotlib inline #import tqdm seperately and use jupyter notebooks %%capture %%capture from tqdm import tqdm_notebook as tqdm #enter your bucket name and use boto3 to identify your region if you don't know it bucket = None region = boto3.Session().region_name #add your bucket then creat the containers to download files and send to bucket role = get_execution_role() bucket = None # customize to your bucket containers = {'us-west-2': '433757028032.dkr.ecr.us-west-2.amazonaws.com/image-classification:latest', 'us-east-1': '811284229777.dkr.ecr.us-east-1.amazonaws.com/image-classification:latest', 'us-east-2': '825641698319.dkr.ecr.us-east-2.amazonaws.com/image-classification:latest', 'eu-west-1': '685385470294.dkr.ecr.eu-west-1.amazonaws.com/image-classification:latest'} training_image = containers[boto3.Session().region_name] def download(url): ''' Downloads the file of a given url ''' filename = url.split("/")[-1] if not os.path.exists(filename): urllib.request.urlretrieve(url, filename) def upload_to_s3(channel, file): ''' Save file in a given folder in the S3 bucket ''' s3 = boto3.resource('s3') data = open(file, "rb") key = channel + '/' + file s3.Bucket(bucket).put_object(Key=key, Body=data) # MPII Human Pose download('https://datasets.d2.mpi-inf.mpg.de/andriluka14cvpr/mpii_human_pose_v1.tar.gz') upload_to_s3('people', 'mpii_human_pose_v1.tar.gz') #untar the file !tar xvzf mpii_human_pose_v1.tar.gz #MIT coastal download('http://cvcl.mit.edu/scenedatabase/coast.zip') upload_to_s3('coast', 'coast.zip') #unzip the file !unzip coast.zip -d ./data def image_read(file, size=(256,256)): ''' This function loads and resizes the image to the passed size. Default image size is set to be 256x256 ''' image = image.load_img(file, target_size=size) image = image.img_to_array(img) return image def image_convert(file_paths,size=256,channels=3): ''' Redimensions images to Numpy arrays of a certain size and channels. Default values are set to 256x256x3 for coloured images. Parameters: file_paths: a path to the image files size: an int or a 2x2 tuple to define the size of an image channels: number of channels to define in the numpy array ''' # If size is an int if isinstance(size, int): # build a zeros matrix of the size of the image all_images_to_array = np.zeros((len(file_paths), size, size, channels), dtype='int64') for ind, i in enumerate(file_paths): # reads image img = image_read(i) all_images_to_array[ind] = img.astype('int64') print('All Images shape: {} size: {:,}'.format(all_images_to_array.shape, all_images_to_array.size)) else: all_images_to_array = np.zeros((len(file_paths), size[0], size[1], channels), dtype='int64') for ind, i in enumerate(file_paths): img = read_img(i) all_images_to_array[ind] = img.astype('int64') print('All Images shape: {} size: {:,}'.format(all_images_to_array.shape, all_images_to_array.size)) return all_images_to_array file_paths = glob(r'./images/*.jpg') X_train = image_convert(file_paths) def rgb_to_lab(img, l=False, ab=False): """ Takes in RGB channels in range 0-255 and outputs L or AB channels in range -1 to 1 """ img = img / 255 l = color.rgb2lab(img)[:,:,0] l = l / 50 - 1 l = l[...,np.newaxis] ab = color.rgb2lab(img)[:,:,1:] ab = (ab + 128) / 255 * 2 - 1 if l: return l else: return ab def lab_to_rgb(img): """ Takes in LAB channels in range -1 to 1 and out puts RGB chanels in range 0-255 """ new_img = np.zeros((256,256,3)) for i in range(len(img)): for j in range(len(img[i])): pix = img[i,j] new_img[i,j] = [(pix[0] + 1) * 50,(pix[1] +1) / 2 * 255 - 128,(pix[2] +1) / 2 * 255 - 128] new_img = color.lab2rgb(new_img) * 255 new_img = new_img.astype('uint8') return new_img L = np.array([rgb_to_lab(image,l=True)for image in X_train]) AB = np.array([rgb_to_lab(image,ab=True)for image in X_train]) L_AB_channels = (L,AB) with open('l_ab_channels.p','wb') as f: pickle.dump(L_AB_channels,f) def resnet_block(x ,num_conv=2, num_filters=512,kernel_size=(3,3),padding='same',strides=2): ''' This function defines a ResNet Block composed of two convolution layers and that returns the sum of the inputs and the convolution outputs. Parameters x: is the tensor which will be used as input to the convolution layer num_conv: is the number of convolutions inside the block num_filters: is an int that describes the number of output filters in the convolution kernel size: is an int or tuple that describes the size of the convolution window padding: padding with zeros the image so that the kernel fits the input image or not. Options: 'valid' or 'same' strides: is the number of pixels shifts over the input matrix. ''' input=x for i in num_conv: input=Conv2D(num_filters,kernel_size=kernel_size,padding=padding,strides=strides)(input) input=InstanceNormalization()(input) input=LeakyReLU(0.2)(input) return (input + x) ``` ### Generator ``` def generator(input,filters=64,num_enc_layers=4,num_resblock=4,name="Generator"): ''' The generator per se is an autoencoder built by a series of convolution layers that initially extract features of the input image. ''' # defining input input=Input(shape=(256,256,1)) x=input ''' Adding first layer of the encoder model: 64 filters, 5x5 kernel size, 2 so the input size is reduced to half, input size is the image size: (256,256,1), number of channels 1 for the luminosity channel. We will use InstanceNormalization through the model and Leaky Relu with and alfa of 0.2 as activation function for the encoder, while relu as activation for the decoder. between both of them, in the latent space we insert 4 resnet blocks. ''' for lay in num_enc_layers: x=Conv2D(filters*lay,(5,5),padding='same',strides=2,input_shape=(256,256,1))(x) x=InstanceNormalization()(x) x=LeakyReLU(0.2)(x) x=Conv2D(128,(3,3),padding='same',strides=2)(x) x=InstanceNormalization()(x) x=LeakyReLU(0.2)(x) x=Conv2D(256,(3,3),padding='same',strides=2)(x) x=InstanceNormalization()(x) x=LeakyReLU(0.2)(x) x=Conv2D(512,(3,3),padding='same',strides=2)(x) x=InstanceNormalization()(x) x=LeakyReLU(0.2)(x) ''' ----------------------------------LATENT SPACE--------------------------------------------- ''' for r in num_resblock: x=resnet_block(x) ''' ----------------------------------LATENT SPACE--------------------------------------------- ''' x=Conv2DTranspose(256,(3,3),padding='same',strides=2)(x) x=InstanceNormalization()(x) x=Activation('relu')(x) x=Conv2DTranspose(128,(3,3),padding='same',strides=2)(x) x=InstanceNormalization()(x) x=Activation('relu')(x) x=Conv2DTranspose(64,(3,3),padding='same',strides=2)(x) x=InstanceNormalization()(x) x=Activation('relu')(x) x=Conv2DTranspose(32,(5,5),padding='same',strides=2)(x) x=InstanceNormalization()(x) x=Activation('relu')(x) x=Conv2D(2,(3,3),padding='same')(x) output=Activation('tanh')(x) model=Model(input,output,name=name) return model ``` ## Discriminator ``` def discriminator(input,name="Discriminator"): # importing libraries from keras.layers import Conv2D, MaxPooling2D, Activation, BatchNormalization, UpSampling2D, Dropout, Flatten, Dense, Input, LeakyReLU, Conv2DTranspose,AveragePooling2D, Concatenate from tensorflow_addons import InstanceNormalization # defining input input=Input(shape=(256,256,2)) x=input x=Conv2D(32,(3,3), padding='same',strides=2,input_shape=(256,256,2))(x) x=LeakyReLU(0.2)(x) x=Dropout(0.25)(x) x=Conv2D(64,(3,3),padding='same',strides=2)(x) x=BatchNormalization() x=LeakyReLU(0.2)(x) x=Dropout(0.25)(x) x=Conv2D(128,(3,3), padding='same', strides=2)(x) x=BatchNormalization()(x) x=LeakyReLU(0.2)(x) x=Dropout(0.25)(x) x=Conv2D(256,(3,3), padding='same',strides=2)(x) x=BatchNormalization()(x) x=LeakyReLU(0.2)(x) x=Dropout(0.25)(x) x=Flatten()(x) x=Dense(1)(x) output=Activation('sigmoid')(x) model=Model(input,output,name=name) return model ``` ## Building GAN Model ``` # Building discriminators discriminator_A=discriminator(input_a,"discriminator_A") discriminator_B=discriminator(input_b,"discriminator_A") discriminator_A.trainable = False discriminator_B.trainable = False # Building generator generator_B = generator(input_a,"Generator_A_B") generator_A = generator(input_b,"Generator_B_A") decision_A=discriminator(generator_a,"Discriminator_A") decision_B=discriminator(generator_B,"Discriminator_B") cycle_A=generator(generator_b,"Generator_B_A") cycle_B=generator(generator_A,"Generator_A_B") #creates lists to log the losses and accuracy gen_losses = [] disc_real_losses = [] disc_fake_losses=[] disc_acc = [] #train the generator on a full set of 320 and the discriminator on a half set of 160 for each epoch #discriminator is given real and fake y's while generator is always given real y's n = 320 y_train_fake = np.zeros([160,1]) y_train_real = np.ones([160,1]) y_gen = np.ones([n,1]) #Optional label smoothing #y_train_real -= .1 #Pick batch size and number of epochs, number of epochs depends on the number of photos per epoch set above num_epochs=1500 batch_size=32 #run and train until photos meet expectations (stop & restart model with tweaks if loss goes to 0 in discriminator) for epoch in tqdm(range(1,num_epochs+1)): #shuffle L and AB channels then take a subset corresponding to each networks training size np.random.shuffle(X_train_L) l = X_train_L[:n] np.random.shuffle(X_train_AB) ab = X_train_AB[:160] fake_images = generator.predict(l[:160], verbose=1) #Train on Real AB channels d_loss_real = discriminator.fit(x=ab, y= y_train_real,batch_size=32,epochs=1,verbose=1) disc_real_losses.append(d_loss_real.history['loss'][-1]) #Train on fake AB channels d_loss_fake = discriminator.fit(x=fake_images,y=y_train_fake,batch_size=32,epochs=1,verbose=1) disc_fake_losses.append(d_loss_fake.history['loss'][-1]) #append the loss and accuracy and print loss disc_acc.append(d_loss_fake.history['acc'][-1]) #Train the gan by producing AB channels from L g_loss = combined_network.fit(x=l, y=y_gen,batch_size=32,epochs=1,verbose=1) #append and print generator loss gen_losses.append(g_loss.history['loss'][-1]) #every 50 epochs it prints a generated photo and every 100 it saves the model under that epoch if epoch % 50 == 0: print('Reached epoch:',epoch) pred = generator.predict(X_test_L[2].reshape(1,256,256,1)) img = lab_to_rgb(np.dstack((X_test_L[2],pred.reshape(256,256,2)))) plt.imshow(img) plt.show() if epoch % 100 == 0: generator.save('generator_' + str(epoch)+ '_v3.h5') img_height = 256 img_width = 256 img_layer = 3 img_size = img_height * img_width to_train = True to_test = False to_restore = False output_path = "./output" check_dir = "./output/checkpoints/" temp_check = 0 max_epoch = 1 max_images = 100 h1_size = 150 h2_size = 300 z_size = 100 batch_size = 1 pool_size = 50 sample_size = 10 save_training_images = True ngf = 32 ndf = 64 class CycleGAN(): def input_setup(self): ''' This function basically setup variables for taking image input. filenames_A/filenames_B -> takes the list of all training images self.image_A/self.image_B -> Input image with each values ranging from [-1,1] ''' filenames_A = tf.train.match_filenames_once("./input/horse2zebra/trainA/*.jpg") self.queue_length_A = tf.size(filenames_A) filenames_B = tf.train.match_filenames_once("./input/horse2zebra/trainB/*.jpg") self.queue_length_B = tf.size(filenames_B) filename_queue_A = tf.train.string_input_producer(filenames_A) filename_queue_B = tf.train.string_input_producer(filenames_B) image_reader = tf.WholeFileReader() _, image_file_A = image_reader.read(filename_queue_A) _, image_file_B = image_reader.read(filename_queue_B) self.image_A = tf.subtract(tf.div(tf.image.resize_images(tf.image.decode_jpeg(image_file_A),[256,256]),127.5),1) self.image_B = tf.subtract(tf.div(tf.image.resize_images(tf.image.decode_jpeg(image_file_B),[256,256]),127.5),1) def input_read(self, sess): ''' It reads the input into from the image folder. self.fake_images_A/self.fake_images_B -> List of generated images used for calculation of loss function of Discriminator self.A_input/self.B_input -> Stores all the training images in python list ''' # Loading images into the tensors coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) num_files_A = sess.run(self.queue_length_A) num_files_B = sess.run(self.queue_length_B) self.fake_images_A = np.zeros((pool_size,1,img_height, img_width, img_layer)) self.fake_images_B = np.zeros((pool_size,1,img_height, img_width, img_layer)) self.A_input = np.zeros((max_images, batch_size, img_height, img_width, img_layer)) self.B_input = np.zeros((max_images, batch_size, img_height, img_width, img_layer)) for i in range(max_images): image_tensor = sess.run(self.image_A) if(image_tensor.size() == img_size*batch_size*img_layer): self.A_input[i] = image_tensor.reshape((batch_size,img_height, img_width, img_layer)) for i in range(max_images): image_tensor = sess.run(self.image_B) if(image_tensor.size() == img_size*batch_size*img_layer): self.B_input[i] = image_tensor.reshape((batch_size,img_height, img_width, img_layer)) coord.request_stop() coord.join(threads) def model_setup(self): ''' This function sets up the model to train self.input_A/self.input_B -> Set of training images. self.fake_A/self.fake_B -> Generated images by corresponding generator of input_A and input_B self.lr -> Learning rate variable self.cyc_A/ self.cyc_B -> Images generated after feeding self.fake_A/self.fake_B to corresponding generator. This is use to calcualte cyclic loss ''' self.input_A = tf.placeholder(tf.float32, [batch_size, img_width, img_height, img_layer], name="input_A") self.input_B = tf.placeholder(tf.float32, [batch_size, img_width, img_height, img_layer], name="input_B") self.fake_pool_A = tf.placeholder(tf.float32, [None, img_width, img_height, img_layer], name="fake_pool_A") self.fake_pool_B = tf.placeholder(tf.float32, [None, img_width, img_height, img_layer], name="fake_pool_B") self.global_step = tf.Variable(0, name="global_step", trainable=False) self.num_fake_inputs = 0 self.lr = tf.placeholder(tf.float32, shape=[], name="lr") with tf.variable_scope("Model") as scope: self.fake_B = build_generator_resnet_9blocks(self.input_A, name="g_A") self.fake_A = build_generator_resnet_9blocks(self.input_B, name="g_B") self.rec_A = build_gen_discriminator(self.input_A, "d_A") self.rec_B = build_gen_discriminator(self.input_B, "d_B") scope.reuse_variables() self.fake_rec_A = build_gen_discriminator(self.fake_A, "d_A") self.fake_rec_B = build_gen_discriminator(self.fake_B, "d_B") self.cyc_A = build_generator_resnet_9blocks(self.fake_B, "g_B") self.cyc_B = build_generator_resnet_9blocks(self.fake_A, "g_A") scope.reuse_variables() self.fake_pool_rec_A = build_gen_discriminator(self.fake_pool_A, "d_A") self.fake_pool_rec_B = build_gen_discriminator(self.fake_pool_B, "d_B") def loss_calc(self): ''' In this function we are defining the variables for loss calcultions and traning model d_loss_A/d_loss_B -> loss for discriminator A/B g_loss_A/g_loss_B -> loss for generator A/B *_trainer -> Variaous trainer for above loss functions *_summ -> Summary variables for above loss functions''' cyc_loss = tf.reduce_mean(tf.abs(self.input_A-self.cyc_A)) + tf.reduce_mean(tf.abs(self.input_B-self.cyc_B)) disc_loss_A = tf.reduce_mean(tf.squared_difference(self.fake_rec_A,1)) disc_loss_B = tf.reduce_mean(tf.squared_difference(self.fake_rec_B,1)) g_loss_A = cyc_loss*10 + disc_loss_B g_loss_B = cyc_loss*10 + disc_loss_A d_loss_A = (tf.reduce_mean(tf.square(self.fake_pool_rec_A)) + tf.reduce_mean(tf.squared_difference(self.rec_A,1)))/2.0 d_loss_B = (tf.reduce_mean(tf.square(self.fake_pool_rec_B)) + tf.reduce_mean(tf.squared_difference(self.rec_B,1)))/2.0 optimizer = tf.train.AdamOptimizer(self.lr, beta1=0.5) self.model_vars = tf.trainable_variables() d_A_vars = [var for var in self.model_vars if 'd_A' in var.name] g_A_vars = [var for var in self.model_vars if 'g_A' in var.name] d_B_vars = [var for var in self.model_vars if 'd_B' in var.name] g_B_vars = [var for var in self.model_vars if 'g_B' in var.name] self.d_A_trainer = optimizer.minimize(d_loss_A, var_list=d_A_vars) self.d_B_trainer = optimizer.minimize(d_loss_B, var_list=d_B_vars) self.g_A_trainer = optimizer.minimize(g_loss_A, var_list=g_A_vars) self.g_B_trainer = optimizer.minimize(g_loss_B, var_list=g_B_vars) for var in self.model_vars: print(var.name) #Summary variables for tensorboard self.g_A_loss_summ = tf.summary.scalar("g_A_loss", g_loss_A) self.g_B_loss_summ = tf.summary.scalar("g_B_loss", g_loss_B) self.d_A_loss_summ = tf.summary.scalar("d_A_loss", d_loss_A) self.d_B_loss_summ = tf.summary.scalar("d_B_loss", d_loss_B) def save_training_images(self, sess, epoch): if not os.path.exists("./output/imgs"): os.makedirs("./output/imgs") for i in range(0,10): fake_A_temp, fake_B_temp, cyc_A_temp, cyc_B_temp = sess.run([self.fake_A, self.fake_B, self.cyc_A, self.cyc_B],feed_dict={self.input_A:self.A_input[i], self.input_B:self.B_input[i]}) imsave("./output/imgs/fakeB_"+ str(epoch) + "_" + str(i)+".jpg",((fake_A_temp[0]+1)*127.5).astype(np.uint8)) imsave("./output/imgs/fakeA_"+ str(epoch) + "_" + str(i)+".jpg",((fake_B_temp[0]+1)*127.5).astype(np.uint8)) imsave("./output/imgs/cycA_"+ str(epoch) + "_" + str(i)+".jpg",((cyc_A_temp[0]+1)*127.5).astype(np.uint8)) imsave("./output/imgs/cycB_"+ str(epoch) + "_" + str(i)+".jpg",((cyc_B_temp[0]+1)*127.5).astype(np.uint8)) imsave("./output/imgs/inputA_"+ str(epoch) + "_" + str(i)+".jpg",((self.A_input[i][0]+1)*127.5).astype(np.uint8)) imsave("./output/imgs/inputB_"+ str(epoch) + "_" + str(i)+".jpg",((self.B_input[i][0]+1)*127.5).astype(np.uint8)) def fake_image_pool(self, num_fakes, fake, fake_pool): ''' This function saves the generated image to corresponding pool of images. In starting. It keeps on feeling the pool till it is full and then randomly selects an already stored image and replace it with new one.''' if(num_fakes < pool_size): fake_pool[num_fakes] = fake return fake else : p = random.random() if p > 0.5: random_id = random.randint(0,pool_size-1) temp = fake_pool[random_id] fake_pool[random_id] = fake return temp else : return fake def train(self): ''' Training Function ''' # Load Dataset from the dataset folder self.input_setup() #Build the network self.model_setup() #Loss function calculations self.loss_calc() # Initializing the global variables init = tf.global_variables_initializer() saver = tf.train.Saver() with tf.Session() as sess: sess.run(init) #Read input to nd array self.input_read(sess) #Restore the model to run the model from last checkpoint if to_restore: chkpt_fname = tf.train.latest_checkpoint(check_dir) saver.restore(sess, chkpt_fname) writer = tf.summary.FileWriter("./output/2") if not os.path.exists(check_dir): os.makedirs(check_dir) # Training Loop for epoch in range(sess.run(self.global_step),100): print ("In the epoch ", epoch) saver.save(sess,os.path.join(check_dir,"cyclegan"),global_step=epoch) # Dealing with the learning rate as per the epoch number if(epoch < 100) : curr_lr = 0.0002 else: curr_lr = 0.0002 - 0.0002*(epoch-100)/100 if(save_training_images): self.save_training_images(sess, epoch) # sys.exit() for ptr in range(0,max_images): print("In the iteration ",ptr) print("Starting",time.time()*1000.0) # Optimizing the G_A network _, fake_B_temp, summary_str = sess.run([self.g_A_trainer, self.fake_B, self.g_A_loss_summ],feed_dict={self.input_A:self.A_input[ptr], self.input_B:self.B_input[ptr], self.lr:curr_lr}) writer.add_summary(summary_str, epoch*max_images + ptr) fake_B_temp1 = self.fake_image_pool(self.num_fake_inputs, fake_B_temp, self.fake_images_B) # Optimizing the D_B network _, summary_str = sess.run([self.d_B_trainer, self.d_B_loss_summ],feed_dict={self.input_A:self.A_input[ptr], self.input_B:self.B_input[ptr], self.lr:curr_lr, self.fake_pool_B:fake_B_temp1}) writer.add_summary(summary_str, epoch*max_images + ptr) # Optimizing the G_B network _, fake_A_temp, summary_str = sess.run([self.g_B_trainer, self.fake_A, self.g_B_loss_summ],feed_dict={self.input_A:self.A_input[ptr], self.input_B:self.B_input[ptr], self.lr:curr_lr}) writer.add_summary(summary_str, epoch*max_images + ptr) fake_A_temp1 = self.fake_image_pool(self.num_fake_inputs, fake_A_temp, self.fake_images_A) # Optimizing the D_A network _, summary_str = sess.run([self.d_A_trainer, self.d_A_loss_summ],feed_dict={self.input_A:self.A_input[ptr], self.input_B:self.B_input[ptr], self.lr:curr_lr, self.fake_pool_A:fake_A_temp1}) writer.add_summary(summary_str, epoch*max_images + ptr) self.num_fake_inputs+=1 sess.run(tf.assign(self.global_step, epoch + 1)) writer.add_graph(sess.graph) def test(self): ''' Testing Function''' print("Testing the results") self.input_setup() self.model_setup() saver = tf.train.Saver() init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) self.input_read(sess) chkpt_fname = tf.train.latest_checkpoint(check_dir) saver.restore(sess, chkpt_fname) if not os.path.exists("./output/imgs/test/"): os.makedirs("./output/imgs/test/") for i in range(0,100): fake_A_temp, fake_B_temp = sess.run([self.fake_A, self.fake_B],feed_dict={self.input_A:self.A_input[i], self.input_B:self.B_input[i]}) imsave("./output/imgs/test/fakeB_"+str(i)+".jpg",((fake_A_temp[0]+1)*127.5).astype(np.uint8)) imsave("./output/imgs/test/fakeA_"+str(i)+".jpg",((fake_B_temp[0]+1)*127.5).astype(np.uint8)) imsave("./output/imgs/test/inputA_"+str(i)+".jpg",((self.A_input[i][0]+1)*127.5).astype(np.uint8)) imsave("./output/imgs/test/inputB_"+str(i)+".jpg",((self.B_input[i][0]+1)*127.5).astype(np.uint8)) def main(): model = CycleGAN() if to_train: model.train() elif to_test: model.test() if __name__ == '__main__': main() ```
github_jupyter
<a href="https://qworld.net" target="_blank" align="left"><img src="../qworld/images/header.jpg" align="left"></a> $$ \newcommand{\set}[1]{\left\{#1\right\}} \newcommand{\abs}[1]{\left\lvert#1\right\rvert} \newcommand{\norm}[1]{\left\lVert#1\right\rVert} \newcommand{\inner}[2]{\left\langle#1,#2\right\rangle} \newcommand{\bra}[1]{\left\langle#1\right|} \newcommand{\ket}[1]{\left|#1\right\rangle} \newcommand{\braket}[2]{\left\langle#1|#2\right\rangle} \newcommand{\ketbra}[2]{\left|#1\right\rangle\left\langle#2\right|} \newcommand{\angleset}[1]{\left\langle#1\right\rangle} \newcommand{\expected}[1]{\left\langle#1\right\rangle} \newcommand{\dv}[2]{\frac{d#1}{d#2}} \newcommand{\real}[0]{\mathfrak{Re}} $$ # Projective Measurement _prepared by Israel Gelover_ ### <a name="definition_3_6">Definition 3.6</a> Projector Given a subset of vectors $\set{\ket{f_i}}_{i=1}^n \subset \mathcal{H}$, we define the _Projector_ over the subspace $\mathcal{F}$ generated by them as: \begin{equation*} \begin{split} \hat{P}:\mathcal{H} &\to \mathcal{F} \\ \ket{\psi} &\to \sum_{i=1}^n \ket{f_i}\braket{f_i}{\psi} \end{split} \end{equation*} It is clear that what we obtain from this operator is a linear combination of $\set{\ket{f_i}}_{i=1}^n$ and therefore, the resulting vector is an element of the subspace generated by these vectors. And it is precisely this definition what we used to calculate the <a href="./WorkedExample.ipynb#5">Wave function collapse</a>. ### <a name="definition_3_7">Definition 3.7</a> Projective Measurement A _Projective Measurement_ is described with a self-adjoint operator \begin{equation*} \hat{M} = \sum_m m\hat{P}_m \end{equation*} Where $\hat{P}_m$ is a projector on the subspace corresponding to the eigenvalue $m$ of $\hat{M}$. This is known as the spectral decomposition of the $\hat{M}$ operator, and any self-adjoint operator can be expressed in terms of its spectral decomposition. We emphasize that this way of decomposing a projective measurement is very useful to us since it involves the eigenvalues and the projectors associated with these eigenvalues. ### Example Let \begin{equation}\label{op_h} \hat{H} = \ketbra{0} + i\ketbra{1}{2} - i\ketbra{2}{1} \end{equation} Let us recall that in the example of <a href="./WorkedExample.ipynb#3">Time evlution</a> we saw that this is a self-adjoint operator, therefore we can use it as a projective measurement, and the way to do it is by obtaining its spectral decomposition through the eigenvalues and eigenvectors that we already calculated. That is \begin{equation*} \begin{split} \varepsilon_1 = 1 \qquad&\qquad \ket{\varepsilon_1} = \ket{0} \\ \varepsilon_2 = 1 \qquad&\qquad \ket{\varepsilon_2} = \frac{1}{\sqrt{2}}(\ket{1} + i\ket{2}) \\ \varepsilon_3 = -1 \qquad&\qquad \ket{\varepsilon_3} = \frac{1}{\sqrt{2}}(\ket{1} - i\ket{2}) \end{split} \end{equation*} Note that we only have two different eigenvalues: $1$ and $-1$. The eigenvalue $1$ has multiplicity $2$ and therefore has associated a subspace of dimension $2$, while the eigenvalue $-1$ has multiplicity $1$ and therefore has associated a subspace of dimension $1$. Thus \begin{equation*} \hat{H} = 1\cdot\hat{P_1} + (-1)\cdot\hat{P_{-1}} \end{equation*} Where, from <a href="#definition_3_6">Definition 3.6</a> \begin{equation*} \begin{split} \hat{P_1} &= \ketbra{\varepsilon_1}{\varepsilon_1} + \ketbra{\varepsilon_2}{\varepsilon_2} \\ \hat{P_{-1}} &= \ketbra{\varepsilon_3}{\varepsilon_3} \end{split} \end{equation*} Therefore \begin{equation*} \hat{H} = \ketbra{\varepsilon_1}{\varepsilon_1} + \ketbra{\varepsilon_2}{\varepsilon_2} - \ketbra{\varepsilon_3}{\varepsilon_3} \end{equation*} Something that may not be so clear from this result is that in $\hat{H} = \ketbra{0} + i\ketbra{1}{2} - i\ketbra{2}{1}$ we have $\hat{H}$ expressed in terms of the base $\set{\ket{0}, \ket{1}, \ket{2}}$ and what the spectral decomposition is doing is diagonalize the operator $\hat{H}$, since we are expressing it in terms of its eigenvectors and that what turns out to be is a diagonal matrix, in this case \begin{equation*} \hat{H} = \begin{pmatrix} 1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & -1 \end{pmatrix} \end{equation*} ## Measurement Related Postulates This formalism of projective measurements allows us on the one hand to group the postulates of quantum mechanics related to measurement in a single formalism, but on the other hand, it also allows us to focus on the state that we want to measure and on the state of the system after the measurement. Let us recall that the postulates of quantum mechanics related to measurement focus on the value that we can measure, that is, on the eigenvalue of an observable that is related to a measurable physical quantity. This formalism allows us to focus on the state in which the vector (that we originally had) ends up after measurement, and so to speak, to put aside for a bit what we are measuring. In the following proposition we are going to describe the postulates related to the measurement that we already mentioned, but in a more condensed way in two quantities. ### <a name="proposition_3_8">Proposition 3.8</a> Let $\hat{M} = \sum_m m\hat{P_m}$ be a projective measurement expressed in terms of its spectral decomposition. ($\hat{M}$ can be an observable) 1. If the system is in the state $\ket{\psi}$, the probability of measuring the eigenvalue $m$ is given by \begin{equation*} P_\psi(m) = \bra{\psi}\hat{P_m}\ket{\psi} \end{equation*} 2. The state of the system immediately after measuring the eigenvalue $m$ is given by \begin{equation*} \ket{\psi} \to \frac{\hat{P_m}\ket{\psi}}{\sqrt{P(m)}} \end{equation*} **Proof:** 1. Let's verify the first statement by calculating the expected value. Recall that by <a href="#definition_3_6">Definition 3.6</a>, the $m$ projector applied to $\ket{\psi}$ is given by \begin{equation*} \hat{P_m}\ket{\psi} = \sum_{i=1}^{g_m} \ket{m_i}\braket{m_i}{\psi} \end{equation*} where $g_m$ is the multiplicity of the eigenvalue $m$. Thus \begin{equation*} \begin{split} \bra{\psi}\hat{P_m}\ket{\psi} &= \bra{\psi} \sum_{i=1}^{g_m} \ket{m_i}\braket{m_i}{\psi} = \sum_{i=1}^{g_m} \braket{\psi}{m_i}\braket{m_i}{\psi} \\ &= \sum_{i=1}^{g_m} \braket{m_i}{\psi}^*\braket{m_i}{\psi} = \sum_{i=1}^{g_m} \abs{\braket{m_i}{\psi}}^2 \\ &= P_\psi(m) \end{split} \end{equation*} This last equality is given by <a href="./Postulates.ipynb#definition_3_1">Postulate V</a>. 2. Let's remember that projecting the vector can change its norm and therefore we need to renormalize it. Let us then calculate the magnitude of the projection, calculating the internal product of the projection with itself. In the previous section we gave the expression of the $m$ projector applied to $\ket{\psi}$, let's see now that \begin{equation*} \bra{\psi}\hat{P_m}^* = \sum_{i=1}^{g_m} \braket{\psi}{m_i}\bra{\psi} \end{equation*} Thus \begin{equation*} \begin{split} \abs{\hat{P_m}\ket{\psi}}^2 &= \bra{\psi}\hat{P_m}^* \hat{P_m}\ket{\psi} \\ &= \sum_{i=1}^{g_m} \braket{\psi}{m_i}\bra{\psi} \sum_{i=1}^{g_m} \ket{m_i}\braket{m_i}{\psi} \\ &= \sum_{i=1}^{g_m} \braket{\psi}{m_i} \braket{m_i} \braket{m_i}{\psi} \\ &= \sum_{i=1}^{g_m} \braket{\psi}{m_i}\braket{m_i}{\psi} \\ &= \sum_{i=1}^{g_m} \braket{m_i}{\psi}^*\braket{m_i}{\psi} \\ &= \sum_{i=1}^{g_m} \abs{\braket{m_i}{\psi}}^2 \\ &= P_\psi(m) \\ \implies \\ \abs{\hat{P_m}\ket{\psi}} &= \sqrt{P_\psi(m)} \end{split} \end{equation*} ### <a name="remark">Remark</a> In summary, with this projector formalism, we can express the measurement-related postulates in two simpler expressions: **1. The probability of measuring an eigenvalue is the expected value of the projector associated with the eigenvalue.** **2. The state of the system after measurement is obtained by applying the projector to the state and renormalizing. The normalization constant is precisely the square root of the probability, calculated in the previous section.** As we have already mentioned, this formalism is useful when we are not so interested in what we are going to measure, but rather we are interested in the state of the system after measurement. That is, instead of verifying what is the observable, calculate the eigenvalues of the observable, calculate the probability based on the eigenvectors associated with these eigenvalues, etc. All this, that is required by the quantum mechanics postulates related to measurement in order to find a probability, is already implicit in the projector formalism. On the other hand, when we talk about measurements in quantum computing, we usually refer to measurements in the computational basis, and the computational basis is the basis of the Pauli operator $\hat{\sigma_z}$. So the observable that is almost always used in quantum computing is $\hat{\sigma_z}$, that is, when talking about measuring a qubit, we are talking about measuring the observable $\hat{\sigma_z}$ and calculating the probability to find the eigenvalue $+1$ or the eigenvalue $-1$ of $\hat{\sigma_z}$. The eigenvalue $+1$ is associated with the qubit $\ket{0}$ and the eigenvalue $-1$ is associated with the qubit $\ket{1}$. Observables are very useful when we are interested in measuring magnitudes that have a physical interpretation such as momentum, position or energy, on the other hand, in quantum computing we are interested in knowing if when the measurement is made the system will be in a state $\ket{0}$ or in a state $\ket{1}$, beyond the measured eigenvalue or the observable with which you are working. It is for this reason that this formalism of projective measurements is particularly useful in this area. Let's see how we can apply it in a concrete example. ### <a name="example">Example</a> Let $\ket{\psi}$ be the state \begin{equation*} \ket{\psi} = \sqrt{\frac{3}{8}}\ket{00} + \frac{1}{2}\ket{01} + \frac{1}{2}\ket{10} + \frac{1}{\sqrt{8}}\ket{11} \in \mathbb{B}^{\otimes2} \end{equation*} We note that the state is normalized, that is, it is a valid state in $\mathbb{B}^{\otimes2}$ and thus we can answer the following questions. **1. What is the probability of finding the first qubit in $\ket{0}$?** To emphasize the previous comments, let's start by considering the following projective measurement, which corresponds to the expression in terms of outer products of the Pauli operator $\hat{\sigma_x}$ \begin{equation*} \hat{M} = (1)\hat{P_0} + (-1)\hat{P_1} \enspace \text{ where } \enspace \hat{P_0} = \ketbra{0}{0}, \enspace \hat{P_1} = \ketbra{1}{1} \end{equation*} We know that to answers this question we need to find the probability of measuring the eigenvalue of $\hat{M}$ associated with the qubit $\ket{0}$, but note that according to section **1.** of the previous remark, what is relevant to make this calculation is only $\hat{P_0}$. That is, we are not interested in the eigenvalue that is measured, nor the observable. To accentuate this fact, we could even have considered any other projective measurement such as \begin{equation*} \hat{M} = \alpha\hat{P_0} + \beta\hat{P_1} \end{equation*} and this would still be a self-adjoint operator and therefore a valid projective measure, for all $\alpha,\beta \in \mathbb{R}$. For all this to agree with the formalism of the postulates of quantum mechanics, we usually take $\hat{M} = \hat{\sigma_z}$ as we did initially, that is, formally from a physical point of view, what we will do is measure the observable $\hat{\sigma_z}$. However, from a mathematical point of view we can measure any projective measurement (self-adjoint operator) that distinguishes with a certain eigenvalue the qubit $\ket{0}$ and with a different eigenvalue the qubit $\ket{1}$. In summary, what is really relevant for this calculation is the projector of the eigenvalue associated with the state we want to measure, in this case what we want to calculate is $\bra{\psi}\hat{P_0}\ket{\psi}$, except that we are working on $\mathbb{B}^{\otimes2}$, but that detail will be clarified below. According to section **1.** of the previous remark, to calculate this probability, we must calculate the expected value of a projector, but we cannot simply consider $\hat{P_0}$ because of the fact that we just mentioned, that we are working on $\mathbb{B}^{\otimes2}$. Since in this case the state of the second qubit is not relevant, what we need is the following \begin{equation*} \begin{split} &\bra{\psi}\hat{P_0}\otimes\hat{I}\ket{\psi} = \\ &= \bra{\psi} \left[\ketbra{0}\otimes\hat{I}\left(\sqrt{\frac{3}{8}}\ket{00} + \frac{1}{2}\ket{01} + \frac{1}{2}\ket{10} + \frac{1}{\sqrt{8}}\ket{11} \right) \right] \\ &= \bra{\psi} \left[ \sqrt{\frac{3}{8}} \ketbra{0}\otimes\hat{I}\ket{00} + \frac{1}{2} \ketbra{0}\otimes\hat{I}\ket{01} + \frac{1}{2} \ketbra{0}\otimes\hat{I}\ket{10} + \frac{1}{\sqrt{8}} \ketbra{0}\otimes\hat{I}\ket{11} \right] \\ \end{split} \end{equation*} Let us recall from <a href="../2-math/TensorProduct.ipynb#definition_2_11">Definition 2.11</a>, that $\hat{A} \otimes \hat{B}(\ket{a} \otimes \ket{b}) = (\hat{A}\ket{a})\otimes(\hat{B}\ket{b})$. This means that we must apply the projector $\ketbra{0}{0}$ to the first qubit and the operator $\hat{I}$ to the second qubit, since its state is not relevant to us. Thus \begin{equation*} \begin{split} \bra{\psi}\hat{P_0}\otimes\hat{I}\ket{\psi} &= \bra{\psi} \left( \sqrt{\frac{3}{8}} \ket{00} + \frac{1}{2} \ket{01} \right) \\ &= \left(\sqrt{\frac{3}{8}}\bra{00} + \frac{1}{2}\bra{01} + \frac{1}{2}\bra{10} + \frac{1}{\sqrt{8}}\bra{11}\right) \left(\sqrt{\frac{3}{8}} \ket{00} + \frac{1}{2} \ket{01}\right) \\ &= \left(\sqrt{\frac{3}{8}}\right)\left(\sqrt{\frac{3}{8}}\right)\braket{00}{00} + \left(\frac{1}{2}\right)\left(\frac{1}{2}\right)\braket{01}{01} \\ &= \frac{3}{8} + \frac{1}{4} = \frac{5}{8} \end{split} \end{equation*} This is congruent with the intuition given by the fact that the amplitudes associated with the states where the first qubit is $\ket{0}$ are $\sqrt{\frac{3}{8}}$ and $\frac{1}{2}$ and to calculate the probability of measuring these states, we take the modulus squared of the amplitudes, which is known as _Born's Rule_. In summary, formally what we did was calculate the probability of measuring the eigenvalue $+1$ of the observable $\hat{\sigma_z}\otimes\hat{I}$, which is completely congruent with what the postulates tell us. But as we previously highlighted, for practical issues of carrying out this calculation, the only thing that was relevant, was the projector associated with the state we wanted to measure, we do not need to know the observable or the eigenvalue to measure. Which allows us to put aside a bit the formalism that the postulates entail. **2. What is the status immediately after measurement?** Section **2.** of the previous remark tells us that \begin{equation*} \begin{split} \ket{\psi} \longrightarrow \frac{\hat{P_0}\otimes\hat{I}\ket{\psi}}{\sqrt{P(\ket{0})}} &= \frac{\hat{P_0}\otimes\hat{I}\ket{\psi}}{\sqrt{\frac{5}{8}}} \\ &= \sqrt{\frac{8}{5}}\hat{P_0}\otimes\hat{I}\ket{\psi} \\ &= \sqrt{\frac{8}{5}}\left(\sqrt{\frac{3}{8}}\ket{00} + \frac{1}{2}\ket{01}\right) \\ &= \sqrt{\frac{3}{5}}\ket{00} + \sqrt{\frac{2}{5}}\ket{01} \end{split} \end{equation*} Where $P(\ket{0})$ is the probability that we just calculated in the first question. Technically it would have to be the probability of measuring the eigenvalue $+1$, but from what we explained previously, we allow ourselves to use this notation. Note that this new state is the projection of $\ket{\psi}$ over the subspace generated by all the states that have the first qubit in $\ket{0}$, namely $\set{\ket{00}, \ket{01}}$, therefore this condition is also true. On the other hand, we note that the normalization is correct since $\abs{\sqrt{\frac{3}{5}}}^2 + \abs{\sqrt{\frac{2}{5}}}^2 = \frac{3}{5} + \frac{2}{5} = 1$. **3. What is the probability of measuring some qubit in $\ket{1}$?** Let us consider the following events \begin{equation*} \begin{split} A &= \text{Measure first qubit in} \ket{1} \\ B &= \text{Measure second qubit in} \ket{1} \end{split} \end{equation*} Recall from probability theory that \begin{equation*} P(A \cup B) = P(A) + P(B) - P(A \cap B) \end{equation*} So what we are looking for is \begin{equation*} \begin{split} P(A \cup B) &= \bra{\psi}\hat{P_1}\otimes\hat{I}\ket{\psi} + \bra{\psi}\hat{I}\otimes\hat{P_1}\ket{\psi} - \bra{\psi}\hat{P_1}\otimes\hat{P_1}\ket{\psi} \\ &= \bra{\psi}\left(\frac{1}{2}\ket{10} + \frac{1}{\sqrt{8}}\ket{11}\right) + \bra{\psi}\left(\frac{1}{2}\ket{01} + \frac{1}{\sqrt{8}}\ket{11}\right) - \bra{\psi}\left(\frac{1}{\sqrt{8}}\ket{11}\right) \\ &= \frac{1}{2}\braket{\psi}{10} + \frac{1}{\sqrt{8}}\braket{\psi}{11} + \frac{1}{2}\braket{\psi}{01} + \frac{1}{\sqrt{8}}\braket{\psi}{11} - \frac{1}{\sqrt{8}}\braket{\psi}{11} \\ &= \left(\frac{1}{2}\right)\left(\frac{1}{2}\right) + \left(\frac{1}{\sqrt{8}}\right)\left(\frac{1}{\sqrt{8}}\right) + \left(\frac{1}{2}\right)\left(\frac{1}{2}\right) \\ &= \frac{1}{4} + \frac{1}{8} + \frac{1}{4} = \frac{5}{8} \end{split} \end{equation*} Note that the amplitudes of the terms of $\ket{\psi}$ that have some qubit in $\ket{1}$ are precisely $\frac{1}{2}$, $\frac{1}{2}$ and $\frac{1}{\sqrt{8}}$, if we calculate the sum of its squared modules we have exactly \begin{equation*} \abs{\frac{1}{2}}^2 + \abs{\frac{1}{2}}^2 + \abs{\frac{1}{\sqrt{8}}}^2 = \frac{1}{4} + \frac{1}{4} + \frac{1}{8} = \frac{5}{8} = P(AUB) \end{equation*} The goal of this section on projective measurement is to highlight that in quantum computing, when we talk about measuring, it is much more practical to ask about the state of the system than the value to be measured, which might not be so relevant in this context. For example, if we have a state $\ket{\psi}$ of three qubits, it is easier to think of calculating the probability of measuring $\ket{010}$ than to think of calculating the probability of measuring a certain eigenvalue of $\hat{\sigma_z}\otimes\hat{\sigma_z}\otimes\hat{\sigma_z}$, which is actually what we are doing in the background but without the formalism of the postulates of quantum mechanics. We can say that in quantum computing the state of the system (the qubits themselves) is more relevant than the eigenvalues obtained from measuring $\hat{\sigma_z}$. It is important to note that in quantum computing, measurement can also be part of an algorithm. When this topic is addressed, it will be clear that many times a measurement is made to project to a certain state that is being sought and continue with the algorithm from that new state. Therefore, being able to know the state of a system after a certain measurement turns out to be very relevant. ### <a name="remark_3_9">Remark 3.9</a> 1. Non-orthogonal states cannot be reliably distinguished by a projective measurement. Let us remember that if we have a certain state $\ket{\psi}$, we measure an observable (self-adjoint operator) and obtain an eigenvalue of that measurement, what the postulates of quantum mechanics regarding the measurement tell us, is that this state $\ket{\psi}$ will be projected on the subspace associated with the measured eigenvalue. In terms of the previous section, this means applying a projector to the $\ket{\psi}$ state. What do we mean by reliably distinguish them? By using a projective measurement we can measure one of them with probability $1$, and measure the other with probability $0$. For example, if we wanted to distinguish if we have the state $\ket{\varphi}$ and not state $\ket{\psi}$, we would simply want to measure the expected value of the projector $\hat{P_\varphi}$ in $\ket{\varphi}$ and get $1$ and in turn get $0$ by measuring it in $\ket{\psi}$. Let's see why we can't make this reliable distinction with two states that are not orthogonal using an example. Let us consider the following non-orthogonal states \begin{equation*} \ket{\psi} = \ket{0} \enspace \text{ y } \enspace \ket{\varphi} = \frac{1}{\sqrt{2}}(\ket{0} + \ket{1}) \end{equation*} And the projector \begin{equation*} \hat{P_\psi} = \ketbra{\psi}{\psi} \end{equation*} Thus we have \begin{equation*} \begin{split} P(\ket{\psi}) &= \bra{\psi}\hat{P_\psi}\ket{\psi} = \braket{\psi}{\psi}\braket{\psi}{\psi} = 1 \\ P(\ket{\varphi}) &= \bra{\varphi}\hat{P_\psi}\ket{\varphi} = \braket{\varphi}{\psi}\braket{\psi}{\varphi} = \frac{1}{\sqrt{2}}\frac{1}{\sqrt{2}} = \frac{1}{2} \end{split} \end{equation*} It should be clear from this particular example that we cannot have a projector that allows us to reliably distinguish two non-orthogonal states. 2. Orthogonal states can be reliably distinguished by a projective measurement. Let's consider the states \begin{equation*} \ket{\psi} = \ket{0} \enspace \text{ and } \enspace \ket{\varphi} = \ket{1} \end{equation*} \begin{equation*} \begin{split} \hat{P_\psi} &= \ketbra{\psi}{\psi} \\ \implies \\ P(\ket{\psi}) &= \bra{\psi}\hat{P_\psi}\ket{\psi} = \braket{\psi}{\psi}\braket{\psi}{\psi} = 1 \\ P(\ket{\varphi}) &= \bra{\varphi}\hat{P_\psi}\ket{\varphi} = \braket{\varphi}{\psi}\braket{\psi}{\varphi} = 0 \end{split} \end{equation*} It should be clear from this particular example that we can reliably distinguish orthogonal states.
github_jupyter
# Neural Networks and Deep Learning for Life Sciences and Health Applications - An introductory course about theoretical fundamentals, case studies and implementations in python and tensorflow (C) Umberto Michelucci 2018 - [email protected] github repository: https://github.com/michelucci/dlcourse2018_students Fall Semester 2018 ``` import tensorflow as tf import numpy as np import pandas as pd import matplotlib.pyplot as plt ``` # Solutions to exercises ## Exercise 1 (Difficulty: easy) Draw and develop in tensorflow with ```tf.constant``` the computational graphs for the following operations A) ```w1*x1+w2*x2+x1*x1``` B) ```A*x1+3+x2/2``` Use as input values ```x1 = 5``` and ```x2 = 6``` ## A) There are several ways of solving this exercise. This is one possible ``` # Building Phase x1 = tf.constant(5.) x2 = tf.constant(6.) w1 = 10. w2 = 20. z1 = tf.multiply(w1, x1) z2 = tf.multiply(w2, x2) z3 = tf.multiply(x1, x1) result = z1 + z2 + z3 # Evaluation Phase with tf.Session() as sess: print(result.eval()) ``` A second way of doing that is the following ``` # Building Phase x1 = tf.constant(5.) x2 = tf.constant(6.) w1 = 10. w2 = 20. z1 = tf.multiply(w1, x1) z2 = tf.multiply(w2, x2) z3 = tf.multiply(x1, x1) result = z1 + z2 + z3 # Evaluation Phase sess = tf.Session() print(sess.run(result)) sess.close() ``` But you can also define ```w1``` and ```w2``` as constants too ``` # Building Phase x1 = tf.constant(5.) x2 = tf.constant(6.) w1 = tf.constant(10.) w2 = tf.constant(20.) z1 = tf.multiply(w1, x1) z2 = tf.multiply(w2, x2) z3 = tf.multiply(x1, x1) result = z1 + z2 + z3 # Evaluation Phase sess = tf.Session() print(sess.run(result)) sess.close() ``` ### B) ``` # Building Phase x1 = tf.constant(5.) x2 = tf.constant(6.) A = tf.constant(10.) result = tf.multiply(A, x1) + tf.constant(3.) + tf.divide(x2, 2.) # Evaluation Phase sess = tf.Session() print(sess.run(result)) sess.close() ``` or you can define the ```result``` in multiple steps ``` # Building Phase z1 = tf.multiply(A, x1) z2 = tf.add(z1, 3.) z3 = tf.add(z2, tf.divide(x2,2.)) # Evaluation Phase sess = tf.Session() print(sess.run(result)) sess.close() ``` ## Exercise 2 (Difficulty: medium) Draw and develop in tensorflow with ```tf.Variable``` the computational graph for the following operation ```A*(w1*x1+w2*x2)``` build the computational graph and then evaluate it two times (without re-building it) with the initial values in the same session A) ```x1 = 3, x2 = 4``` B) ```x1 = 5, x2 = 7``` ``` # Building Phase x1 = tf.Variable(3.) x2 = tf.Variable(4.) w1 = tf.constant(10.) w2 = tf.constant(20.) A = tf.constant(30.) init = tf.global_variables_initializer() z1 = tf.multiply(w1,x1) z2 = tf.multiply(w2,x2) z3 = tf.add(z1, z2) result = tf.multiply(A, z3) ``` To run the same graph twice in the same session you can do the following ``` sess = tf.Session() print(sess.run(result, feed_dict = {x1: 3, x2: 4})) print(sess.run(result, feed_dict = {x1: 5, x2: 7})) sess.close() ``` Or you can write a function that creates a session, evaluates a node, and then close it. ``` def run_evaluation(x1_, x2_): sess = tf.Session() print(sess.run(result, feed_dict = {x1: x1_, x2: x2_})) sess.close() ``` And then you can evalute the node with a call to your function. ``` run_evaluation(3,4) run_evaluation(5,7) ``` ## Exercise 3 (Difficulty: FUN) Consider two vectors ``` x1 = [1,2,3,4,5], x2 = [6,7,8,9,10]``` draw and build in tensorflow the computational graph for the dot-product operation between the two vectors. If you don't know what a dot-product is you can check it here (we covered that in our introductory week) [](https://en.wikipedia.org/wiki/Dot_product). Build it in two different ways: A) Do it with loops. Build a computational graph that takes as input scalars and in the session/evaluation phase build a loop to go over all the inputs and then sums the results B) Do it in one shot with tensorflow. Build a computational graph that takes as input vectors and do the entire operation directly in tensorflow. Hint: you can use in tensorflow two methods: ```tf.reduce_sum(tf.multiply(x1, x2))``` or ```tf.matmul(tf.reshape(x1,[1,5]), tf.reshape(x2, [-1, 1]))```. Try to understand why they work checking the official documentation. ## a) ``` first = tf.Variable(0.) second = tf.Variable(0.) mult = tf.multiply(first, second) x1 = [1,2,3,4,5] x2 = [6,7,8,9,10] sess = tf.Session() total = 0 for i in range(0,len(x1)): total = total + sess.run(mult, feed_dict = {first: x1[i], second: x2[i]}) print(total) ``` Note that you can do that easily in numpy ``` np.dot(x1, x2) ``` ## b) Another way, and much more efficient, is the following ``` x1 = tf.placeholder(tf.int32, None) # Let's assume we work with integers x2 = tf.placeholder(tf.int32, None) # Let's assume we work with integers result = tf.reduce_sum(tf.multiply(x1, x2)) sess = tf.Session() print(sess.run(result, feed_dict = {x1: [1,2,3,4,5], x2:[6,7,8,9,10]})) sess.close() ``` Or in with matrices ``` x1 = tf.placeholder(tf.int32, None) # Let's assume we work with integers x2 = tf.placeholder(tf.int32, None) # Let's assume we work with integers result = tf.matmul(tf.reshape(x1,[1,5]), tf.reshape(x2, [-1, 1])) sess = tf.Session() print(sess.run(result, feed_dict = {x1: [1,2,3,4,5], x2:[6,7,8,9,10]})) sess.close() ``` Note that the result is different in the two cases! In the first we get a scalar, in the second a matrix that has dimensions ```1x1```, because the second method is a matrix multiplication function that will return a matrix (or better a tensor). ## c) (even another way) (BONUS Solution) There is actually another way. Tensorflow can perform the dot product directly ``` x1 = tf.placeholder(tf.int32, None) # Let's assume we work with integers x2 = tf.placeholder(tf.int32, None) # Let's assume we work with integers result = tf.tensordot(x1, x2, axes = 1) sess = tf.Session() print(sess.run(result, feed_dict = {x1: [1,2,3,4,5], x2:[6,7,8,9,10]})) sess.close() ``` ## Exercise 4 (Difficulty: medium) Write a function that build a computational graph for the operation ```x1+x2``` where the input ```x1``` and ```x2``` are input with given dimensions. Your ```x1``` and ```x2``` should be declared as ```tf.placeholder```. Your functions should accept as input: - dimensions of ```x1``` as list, for example ```[3]``` - dimensions of ```x2``` as list, for example ```[3]``` The function should return a tensor ```z = x1 + x2```. Then open a session and evaluate ```z``` with the following inputs: - ```x1 = [4,6,7], x2 = [1,2,9]``` - ```x1 = [1,2,....., 1000], x2 = [10001, 10002, ...., 11000]``` and print the result. ``` def build_graph(dim1, dim2): tf.reset_default_graph() x1 = tf.placeholder(tf.float32, dim1) x2 = tf.placeholder(tf.float32, dim2) z = tf.add(x1, x2) return z, x1, x2 x1list = [4,6,7] x2list = [1,2,9] # Building Phase z, x1, x2 = build_graph(len(x1list), len(x2list)) sess = tf.Session() print(sess.run(z, feed_dict = {x1: x1list, x2: x2list})) sess.close() ``` **Note that since you refer to the tensors ```x1``` and ```x2``` in the ```feed_dict``` dictionary you need to have the tensors visible, otherwise you will get an error, therefore you need your function to return no only ```z``` but also ```x1``` and ```x2```.** ``` x1list = np.arange(1, 1001, 1) x2list = np.arange(10001, 11001, 1) # Building Phase z, x1, x2 = build_graph(len(x1list), len(x2list)) sess = tf.Session() print(sess.run(z, feed_dict = {x1: x1list, x2: x2list})) sess.close() ``` ## Exercise 5 (Difficult: FUN) ### Linear Regression with tensorflow https://onlinecourses.science.psu.edu/stat501/node/382/ Consider the following dataset ``` x = [4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0] y = [33, 42, 45, 51, 53, 61, 62] ``` We want to find the best parameters $p_0$ and $p_1$ that minimise the MSE (mean squared error) for the data given, in other words we want to do a linear regression on the data $(x,y)$. Given that a matrix solution to find the best parameter is $$ {\bf p} =(X^TX)^{-1} X^T Y $$ where $X^T$ is the transpose of the matrix $X$. The matrix $X$ is defined as $$ X = \begin{bmatrix} 1 & x_1 \\ ... & ... \\ 1 & x_n \end{bmatrix} $$ The matrix $Y$ is simply a matrix $n\times 1$ containing the values $y_i$. dimensions are: - $X$ has dimensions $n\times 2$ - $Y$ has dimensions $n\times 1$ - ${\bf p}$ has dimensions $2\times 1$ Build a computational graph that evaluates $\bf p$ as given above, given the matrices $X$ and $Y$. Note you will have to build the matrices from the data given at the beginning. If you need more information a beatifully long explanation can be found here https://onlinecourses.science.psu.edu/stat501/node/382/ Let's convert ```y``` to a floating list... **Remeber tensorflow is really strict with datatypes**. ``` y = [float(i) for i in y] y x = pd.DataFrame(x) y = pd.DataFrame(y) x['b'] = 1 x.head() cols = x.columns.tolist() cols = cols[-1:] + cols[:-1] print(cols) x = x[cols] x.head() ``` Let's build the computational graph: **NOTE: if you use tf.float32 you will get results that are slightly different than numpy. So be aware. To be safe you can use ```float64```.** Always try to be as specific as you can with dimensions The first dimensions is defined as "None" so that we use, in necessary, with different number of observations without rebuilding the graph. ``` tf.reset_default_graph() xinput = tf.placeholder(tf.float64, [None,2]) yinput = tf.placeholder(tf.float64, [None,1]) ``` Multiplication between tensors is somewhat complicated, especially when dealing with tensors with more dimensions. So we use the method https://www.tensorflow.org/api_docs/python/tf/einsum check it out to get more information. ``` tmp = tf.einsum('ij,jk->ik',tf.transpose(xinput) , xinput) part1 = tf.linalg.inv(tmp) part2 = tf.einsum('ij,jk->ik',tf.transpose(xinput), yinput) pout = tf.einsum('ij,jk->ik', part1, part2) # Reference: https://www.tensorflow.org/api_docs/python/tf/einsum sess = tf.Session() print("The best parameters p are:") print(sess.run(pout, feed_dict = {xinput: x, yinput: y})) sess.close() ``` If you remember the first week (check https://github.com/michelucci/dlcourse2018_students/blob/master/Week%201%20-%20Mathematic%20introduction/Week%201%20-%20Solution%20to%20exercises.ipynb) you can do the same with ```numpy``` ``` part1np = np.linalg.inv(np.matmul(x.transpose() , x)) part2np = np.matmul(x.transpose(), y) pnp = np.matmul(part1np, part2np) print(pnp) ``` ## Computational Graph for predictions The same result we got with tensorflow. Now we can build a graph that will use the ```p``` we have found for predictions ``` p = tf.placeholder(tf.float32, [2,1]) xnode = tf.placeholder(tf.float32, [None, 2]) # This time let's be specific with dimensions pred = tf.tensordot(xnode, p, axes = 1) sess = tf.Session() pred_y = sess.run(pred, feed_dict = {p: pnp, xnode: x}) pred_y ``` And those are the **true** values ``` y ``` ## Plot of the results ``` plt.rc('font', family='arial') plt.rc('xtick', labelsize='x-small') plt.rc('ytick', labelsize='x-small') plt.tight_layout() fig = plt.figure(figsize=(8, 5)) ax = fig.add_subplot(1, 1, 1) ax.scatter(y, pred_y, lw = 0.3, s = 80) ax.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw = 3) ax.set_xlabel('Measured Target Value', fontsize = 16); ax.set_ylabel('Predicted Target Value', fontsize = 16); plt.tick_params(labelsize=16) ```
github_jupyter
# **Imbalanced Data** Encountered in a classification problem in which the number of observations per class are disproportionately distributed. ## **How to treat for Imbalanced Data?**<br> Introducing the `imbalanced-learn` (imblearn) package. ### Data ``` import pandas as pd import seaborn as sns from sklearn.datasets import make_classification # make dummy data X, y = make_classification(n_samples=5000, n_features=2, n_informative=2, n_redundant=0, n_repeated=0, n_classes=3, n_clusters_per_class=1, weights=[0.01, 0.05, 0.94], class_sep=0.8, random_state=0) df = pd.DataFrame(X) df.columns = ['feature1', 'feature2'] df['target'] = y df.head() # visualize the data sns.countplot(data=df, x=df['target']); ``` We can see that the data are very heavily imbalanced. -------- # 1) Over-Sampling Approach ## 1.1) naive approach known as Random Over-Sampling + We will upsample our minority classes, that is sample with replacement until the number of observations is uniform across all classes. + As we can imagine this approach should give us a pause depending on the scale of upsampling we'll be doing. + `from imblearn.over_sampling import RandomOverSampler` ## 1.2) another approach is SMOTE (Synthetic Minority Oversampling Technique) + in the case, we generate new observations within the existing feature space over our minority classes. ### Now, let's apply an over-sampling approach. For this we'll use **a naive approach known as random over-sampling.** ``` from imblearn.over_sampling import RandomOverSampler ros = RandomOverSampler(random_state=0) X_resampled, y_resampled = ros.fit_resample(X, y) ``` ### Let's visualize again after random over-sampling ``` df = pd.DataFrame(y_resampled, columns=['target']) sns.countplot(data=df, x=df['target']); ``` We have increased the size of each of our minority classes to be uniform with that of our majority class through random sampling. # 2) Under-Sampling Technique ## 2.1) Naive approach to randomly under-sample our majority class + this time we actually throwing out data in our majority class until the number of observations is uniform. + `from imblearn.under_sampling import RandomUnderSampler` ### Let's now try an under-sampling technique. Again, we'll start with a naive approach to randomly under-sample our majority class. ``` from imblearn.under_sampling import RandomUnderSampler rus = RandomUnderSampler(random_state=0) X_resampled, y_resampled = rus.fit_resample(X, y) ``` ### Visualized the resampled data ``` df = pd.DataFrame(y_resampled, columns=['target']) sns.countplot(data=df, x='target'); ``` Data get blanced. However note that there are about 60 observations per class. **Because of the infrequency of our smallest minority class, we threw out a huge percentage**. So you might want to consider other methods for this data (like `k-means` and `near-miss`)
github_jupyter
``` # ~145MB !wget -x --load-cookies cookies.txt -O business.zip 'https://www.kaggle.com/yelp-dataset/yelp-dataset/download/py6LEr6zxQNWjebkCW8B%2Fversions%2FlVP0fduiJJo8YKt2vKKr%2Ffiles%2Fyelp_academic_dataset_business.json?datasetVersionNumber=2' !unzip business.zip !wget -x --load-cookies cookies.txt -O review.zip 'https://www.kaggle.com/yelp-dataset/yelp-dataset/download/py6LEr6zxQNWjebkCW8B%2Fversions%2FlVP0fduiJJo8YKt2vKKr%2Ffiles%2Fyelp_academic_dataset_review.json?datasetVersionNumber=2' !unzip review.zip import pandas as pd from six.moves import cPickle import numpy as np import json from scipy.sparse import csr_matrix from sklearn.decomposition import TruncatedSVD from scipy.sparse.linalg import svds import matplotlib.pyplot as plt from sklearn.metrics import mean_squared_error business = [] with open('/content/yelp_academic_dataset_business.json') as fl: for line in fl: business.append(json.loads(line)) business = pd.DataFrame(business) business.head() review = [] with open('/content/yelp_academic_dataset_review.json') as fl: for line in fl: review.append(json.loads(line)) review = pd.DataFrame(review) review.head() bcols = ['business_id', 'city', 'categories'] ucols = ['business_id', 'user_id', 'review_id', 'stars'] df = review[ucols].merge(business[bcols], how = 'outer', on= 'business_id') df = df.dropna() df.head() #selecting subset: Phoenix city restaurants dfx = df[(df.city == 'Phoenix') & (df.categories.str.contains('.Restaurant.', case= False))] dfx.shape def get_clean_df(df, min_user_review = 30, min_res_review = 0, cols = ['user_id', 'business_id', 'stars']): '''Cleans the df and gets rid of the unwanted cols and also allows to filter the user and business based on the min number of reviews received''' df_new = df[cols] df_new.dropna(axis = 0, how = 'any', inplace = True) df_new[cols[1]+'_freq'] = df_new.groupby(cols[1])[cols[1]].transform('count') df_clean = df_new[df_new[cols[1]+'_freq']>=min_res_review] df_clean[cols[0]+'_freq'] = df_clean.groupby(cols[0])[cols[0]].transform('count') df_clean_2 = df_clean[df_clean[cols[0]+'_freq']>=min_user_review] return df_clean_2 from pandas.api.types import CategoricalDtype def get_sparse_matrix(df): '''Converts the df into a sparse ratings matrix''' unique_users = list(df['user_id'].unique()) unique_bus = list(df['business_id'].unique()) data = df['stars'].tolist() row = df['user_id'].astype(CategoricalDtype(categories=unique_users)).cat.codes col = df['business_id'].astype(CategoricalDtype(categories=unique_bus)).cat.codes sparse_matrix = csr_matrix((data, (row, col)), shape=(len(unique_users), len(unique_bus))) return sparse_matrix def get_sparsity(sparse_matrix): return 1 - sparse_matrix.nnz/(sparse_matrix.shape[0]*sparse_matrix.shape[1]) data = get_sparse_matrix(get_clean_df(dfx, min_user_review=10)) print(get_sparsity(data)) print(data.shape) def train_val_test_split(sparse_matrix, num_review_val = 2, num_review_test = 2): '''Split the rating matrix into train ,val, and test marix that are disjoint matrices''' nzrows, nzcols = sparse_matrix.nonzero() sparse_matrix_test = csr_matrix(sparse_matrix.shape) sparse_matrix_val = csr_matrix(sparse_matrix.shape) sparse_matrix_train = sparse_matrix.copy() n_users = sparse_matrix.shape[0] for u in range(n_users): idx = nzcols[np.where(nzrows == u)] np.random.shuffle(idx) test_idx = idx[-num_review_test:] val_idx = idx[-(num_review_val+num_review_test):-num_review_test] train_idx = idx[:-(num_review_val+num_review_test)] sparse_matrix_test[u,test_idx] = sparse_matrix[u,test_idx] sparse_matrix_val[u,val_idx] = sparse_matrix[u,val_idx] sparse_matrix_train[u,test_idx] = 0 sparse_matrix_train[u,val_idx] = 0 data = np.array(sparse_matrix_train[sparse_matrix_train.nonzero()])[0] row = sparse_matrix_train.nonzero()[0] col = sparse_matrix_train.nonzero()[1] size = sparse_matrix_train.shape sparse_matrix_train = csr_matrix((data,(row,col)),shape = size) mult = sparse_matrix_train.multiply(sparse_matrix_val) mmult = mult.multiply(sparse_matrix_test) assert(mmult.nnz == 0) return sparse_matrix_train, sparse_matrix_val, sparse_matrix_test train, val, test = train_val_test_split(data) print(train.nnz, val.nnz, test.nnz) ``` ## Model Building ``` def approx_err(k, A, U, S, Vt): rec_A = np.dot(U[:, :k], np.dot(S[:k,:k], Vt[:k, :])) idx = np.where(A>0); diff = A[idx] - rec_A[idx] return np.linalg.norm(diff)**2/diff.shape[1] # # svd # U, S, Vt = np.linalg.svd(train.todense()) # k = np.linspace(2,40,20, dtype = int) # errors_svd_val = {} # errors_svd_train = {} # for i in k: # errors_svd_val[i] = approx_err(i, val.todense(), U, S, Vt) # errors_svd_train[i] = approx_err(i, train.todense(), U, S, Vt) # plt.plot(errors_svd_val.keys(),errors_svd_val.values(), label = 'Validation') # plt.plot(errors_svd_train.keys(),errors_svd_train.values(), label = 'Train') # plt.xlabel('k') # plt.ylabel('MSE') # plt.legend() ``` ALS ``` def get_mse(pred, actual): # Ignore zero terms. pred = pred[actual.nonzero()].flatten() actual = actual[actual.nonzero()].flatten() return mean_squared_error(pred, actual) def als(ratings_matrix, k=40, user_reg=0, res_reg=0, iters=10): '''Performs ALS for a given ratings_matrix and returns predictions using the latent vector representation User (U x K) and Restaurant (R x K)''' ratings_matrix = ratings_matrix.T user_vec = np.random.rand(ratings_matrix.shape[1],k).T res_vec = np.random.rand(ratings_matrix.shape[0],k).T for i in range(iters): for u in range(ratings_matrix.shape[1]): user_vec[:,u] = np.linalg.solve(np.dot(res_vec,res_vec.T) + user_reg * np.eye(res_vec.shape[0]), np.dot(res_vec,ratings_matrix[:,u])) for r in range(ratings_matrix.shape[0]): res_vec[:,r] = np.linalg.solve(np.dot(user_vec,user_vec.T) + res_reg * np.eye(user_vec.shape[0]), np.dot(user_vec,ratings_matrix[r,:].T)) prediction = np.dot(res_vec.T, user_vec) # error = np.mean((ratings_matrix - prediction)**2) return np.dot(res_vec.T, user_vec).T num_features = np.linspace(1,20,5,dtype=int) test_error_als = [] train_error_als = [] for i in num_features: preds_als = als(np.array(train.todense()), k=i, iters = 5) test_err = get_mse(preds_als, np.array(val.todense())) train_err = get_mse(preds_als, np.array(train.todense())) test_error_als.append(test_err) train_error_als.append(train_err) fig = plt.figure(figsize=(8,5)) plt.plot(num_features,test_error_als,'b-',label = 'validation') plt.plot(num_features,train_error_als,'r-', label = 'training') plt.title('MSE vs num_features (for ALS)') plt.xlabel('Number of features in a feature vector') plt.ylabel('MSE') plt.legend() ``` ### Refer to [this](https://colab.research.google.com/github/HegdeChaitra/Yelp-Recommendation-System/blob/master/Yelp_Reco_System.ipynb#scrollTo=kAoMx5IHUpsi) for further info
github_jupyter
``` import matplotlib.pyplot as plt import networkx as nx import pandas as pd import numpy as np from scipy import stats import scipy as sp import datetime as dt from ei_net import * # import cmocean as cmo %matplotlib inline ########################################## ############ PLOTTING SETUP ############## EI_cmap = "Greys" where_to_save_pngs = "../figs/pngs/" where_to_save_pdfs = "../figs/pdfs/" save = True plt.rc('axes', axisbelow=True) plt.rc('axes', linewidth=2) ########################################## ########################################## ``` # The emergence of informative higher scales in complex networks # Chapter 04: Effective Information in Real Networks $EI$ often grows with network size. To compare networks of different sizes, we examine their *effectiveness*, which is the $EI$ normalized by the size of the network to a value between $0.0$ and $1.0$: $$ \text{effectiveness} = \frac{EI}{\log_2(N)} $$ As the noise and/or the degeneracy of a network increases toward their upper possible bounds, the effectiveness of that network will trend to $0.0$. Regardless of its size, a network wherein each node has a deterministic output to a unique target has an effectiveness of $1.0$. Here, we examine the effectiveness of 84 different networks corresponding to data from real systems. These networks were selected primarily from the [Konect Network Database](http://konect.cc/), which was used because its networks are publicly available, range in size from dozens to tens of thousands of nodes, often have a reasonable interpretation as a causal structure, and they are diverse, ranging from social networks, to power networks, to metabolic networks. We defined four categories of interest: biological, social, informational, and technological. We selected our networks by using all the available networks (under 40,000 nodes) in the domains corresponding to each category within the Konect database, and where it was appropriate, the [Network Repository](http://networkrepository.com/) as well. Lower effectiveness values correspond to structures that either have high degeneracy, low determinism, or a combination of both. In the networks we measured, biological networks on average have lower effectiveness values, whereas technological networks on average have the highest effectiveness. This finding aligns intuitively with what we know about the relationship between $EI$ and network structure, and it also supports long-standing hypotheses about the role of redundancy, degeneracy, and noise in biological systems. On the other hand, technological networks such as power grids, autonomous systems, or airline networks are associated with higher effectiveness values on average. One explanation for this difference is that efficiency in human-made technological networks tends to create sparser, non-degenerate networks with higher effectiveness on average. Perhaps it might be surprising to find that evolved networks have such low effectiveness. But, as we will show, a low effectiveness can actually indicate that there are informative higher-scale (macroscale) dependencies in the system. That is, a low effectiveness can be reflective of the fact that biological systems often contain higher-scale causal structure, which we demonstrate in the following section. ________________________ ## 4.1 Effectiveness of Real World Networks ``` import json json_data = open('../data/real_network_ei.json',"r").read() out_dict = json.loads(json_data) list1 = out_dict['Eff'] list1 = list(enumerate(list1)) list2 = sorted(list1, key=lambda x:x[1]) ordering = list(list(zip(*list2))[0]) eff_vals = list(list(zip(*list2))[1]) newcos = ["#ed4f44","#fdcb12","#7f61c3","#00c6c5","#333333"] cols = ['#88002c',"#ba4b57","#cc5134","#daaa32","#b8ab51","#698b4a","#69d07d","#50c9b5", "#64b6ff","#786bdb","#573689","#b55083","#c65abb","#bfbfbf","#666666","#333333"] plt.figure(figsize=(13,20)) for idx,i in enumerate(ordering): co = out_dict['color'][i] ef = out_dict['Eff'][i] plt.hlines(idx,0,ef,color=co,linewidth=4.5) plt.scatter(eff_vals, list(range(len(eff_vals))), edgecolors='w',linewidths=1.5, marker='o', s=130, alpha=0.98, facecolor=np.array(out_dict['color'])[ordering], zorder=20) plt.scatter([0]*len(eff_vals), list(range(len(eff_vals))), marker='s', s=65, alpha=0.98, edgecolors=np.array(out_dict['newco'])[ordering], linewidths=3.5, facecolor='w', zorder=20) domainz = ['Biological','Information','Social','Technological'] for ii, lab in enumerate(domainz): plt.scatter([-1], [-1], marker='s', s=125, alpha=0.98,edgecolors=newcos[ii], linewidths=4.5, facecolor='w', label=lab) for ii, lab in enumerate(sorted(np.unique(out_dict['Category']))): plt.plot([-10,-9], [-10,-9], marker='', alpha=0.98, linewidth=4.0, color=cols[ii], label=lab) plt.legend(loc=4, fontsize=19, framealpha=0.85) plt.yticks(list(range(len(eff_vals))), np.array(out_dict['Name'])[ordering], fontsize=14) plt.xticks(np.linspace(0,1,11), ["%.1f"%i for i in np.linspace(0,1,11)], size=18) plt.grid(alpha=0.3, color='#999999', linestyle='-', linewidth=2.5) plt.xlabel('Effectiveness', size=20) plt.xlim(-0.01,1.01) plt.ylim(-1,len(eff_vals)) if save: plt.savefig(where_to_save_pngs+"Konect_SortedEffectiveness_withLabels.png", dpi=425, bbox_inches='tight') plt.savefig(where_to_save_pdfs+"Konect_SortedEffectiveness_withLabels.pdf", bbox_inches='tight') plt.show() ``` ## 4.2 Statistical Comparison of Effectiveness, by Domain ``` rn_bio = np.array([out_dict['Eff'][i] for i in range(len(out_dict['Eff'])) \ if out_dict['Category_EI'][i]=='Biological']) rn_inf = np.array([out_dict['Eff'][i] for i in range(len(out_dict['Eff'])) \ if out_dict['Category_EI'][i]=='Information']) rn_soc = np.array([out_dict['Eff'][i] for i in range(len(out_dict['Eff'])) \ if out_dict['Category_EI'][i]=='Social']) rn_tec = np.array([out_dict['Eff'][i] for i in range(len(out_dict['Eff'])) \ if out_dict['Category_EI'][i]=='Technological']) labs = {'biological':0,'social':2,"information":1,'technological':3} a = labs['biological'] b = labs['social'] all_data = [rn_bio,rn_inf,rn_soc,rn_tec] for lab1 in labs.keys(): a = labs[lab1] for lab2 in labs.keys(): b = labs[lab2] if a!=b: t,p = sp.stats.ttest_ind(all_data[a], all_data[b], equal_var=False) print("comparing",lab1," \t", "to \t ",lab2," \t t-statistic = %.7f, \t p < %.8f"%(t,p)) plt.rc('axes', linewidth=1.5) mult = 0.8 fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15*mult, 15*mult)) noise0 = np.random.uniform(-0.035,0.035,len(all_data[0])) noise1 = np.random.uniform(-0.035,0.035,len(all_data[1])) noise2 = np.random.uniform(-0.035,0.035,len(all_data[2])) noise3 = np.random.uniform(-0.035,0.035,len(all_data[3])) plt.plot([1]*len(all_data[0]) + noise0, all_data[0], marker='o',linestyle='', markeredgecolor='k', markersize=6, color=newcos[0]) plt.plot([3]*len(all_data[1]) + noise1, all_data[1], marker='o',linestyle='',markeredgecolor='k', markersize=6, color=newcos[1]) plt.plot([2]*len(all_data[2]) + noise2, all_data[2], marker='o',linestyle='',markeredgecolor='k', markersize=6, color=newcos[2]) plt.plot([4]*len(all_data[3]) + noise3, all_data[3], marker='o',linestyle='',markeredgecolor='k', markersize=6, color=newcos[3]) parts = ax.violinplot(all_data, positions=[1,3,2,4], showmeans=False, showmedians=False, showextrema=False, widths=0.75) for i in range(len(parts['bodies'])): pc = parts['bodies'][i] pc.set_edgecolor(newcos[i]) pc.set_facecolor(newcos[i]) pc.set_alpha(0.85) pc.set_linewidth(4.0) parts = ax.violinplot(all_data, positions=[1,3,2,4], showmeans=False, showmedians=False, showextrema=False, widths=0.55) for i in range(len(parts['bodies'])): pc = parts['bodies'][i] pc.set_edgecolor(newcos[i]) pc.set_facecolor('w') pc.set_alpha(0.5) pc.set_linewidth(0.0) plt.hlines([np.mean(data) for data in all_data], [0.67, 2.6925, 1.695, 3.74], [1.33, 3.3075, 2.305, 4.26], linestyles='-', colors=newcos, zorder=1, linewidth=4.5) plt.plot(np.linspace(-10,-20,5), np.linspace(-10,-20,5), linestyle='-', marker='>', markersize=18, markerfacecolor='w', color='#333333', linewidth=3.5, markeredgecolor='k', markeredgewidth=2.5, label='Mean', alpha=0.98) plt.scatter([1,3,2,4], [np.mean(data) for data in all_data], zorder=20, marker='>', s=450, facecolor='w', edgecolors=newcos, linewidths=3.5, alpha=0.98) ax.set_ylabel('Effectiveness', fontsize=22) ax.set_xticks([y+1 for y in range(len(all_data))]) ax.set_xticklabels(['biological', 'social', 'information', 'technological'], fontsize=19, rotation=353) ax.set_yticks(np.linspace(0,1,6)) ax.set_yticklabels(["%.1f"%i for i in np.linspace(0,1,6)], fontsize=18) ax.grid(True, linestyle='-', linewidth=3.0, color='#999999', alpha=0.4) ax.text(1.28,0.07,"n=%i"%len(all_data[0]), fontsize=22, color=newcos[0]) ax.text(3.20,0.33,"n=%i"%len(all_data[1]), fontsize=22, color='k') ax.text(3.20,0.33,"n=%i"%len(all_data[1]), fontsize=22, color=newcos[1],alpha=0.95) ax.text(2.26,0.25,"n=%i"%len(all_data[2]), fontsize=22, color=newcos[2]) ax.text(4.21,0.55,"n=%i"%len(all_data[3]), fontsize=22, color=newcos[3]) ax.text(2.35,1.065,"**", fontsize=22) ax.hlines(1.07, labs['biological']+1+0.025, labs['technological']+1-0.025, linewidth=2.0) ax.vlines(labs['biological']+1+0.025, 1.045, 1.07, linewidth=2.0) ax.vlines(labs['technological']+1-0.025, 1.045, 1.07, linewidth=2.0) ax.text(3.01,1.012,"***", fontsize=22) ax.hlines(1.015, labs['social']+0.025, labs['technological']+1-0.025, linewidth=2.0) ax.vlines(labs['social']+0.025, 0.995, 1.015, linewidth=2.0) ax.vlines(labs['technological']+1-0.025, 0.995, 1.015, linewidth=2.0) ax.text(3.47,0.962,"*", fontsize=22) ax.hlines(0.965, labs['information']+2+0.025, labs['technological']+1-0.025, linewidth=2.0) ax.vlines(labs['information']+2+0.025, 0.945, 0.965, linewidth=2.0) ax.vlines(labs['technological']+1-0.025, 0.945, 0.965, linewidth=2.0) x1 = ax.plot([], [], marker='.', linestyle='', c='w') x2 = ax.plot([], [], marker='.', linestyle='', c='w') x3 = ax.plot([], [], marker='.', linestyle='', c='w') legs=[x1,x2,x3] leg1 = ax.legend(bbox_to_anchor=(1.009,0.22), fontsize=23, ncol=1, columnspacing=2, framealpha=0.95) ax.legend([l[0] for l in legs], ["p < 1e-06 ***","p < 1e-05 **","p < 1e-03 *"], handletextpad=-1.50, bbox_to_anchor=(1.0055,0.16), fontsize=18, ncol=1, columnspacing=-3.75, framealpha=0.95) ax.add_artist(leg1) ax.set_ylim(-0.015, 1.1) ax.set_xlim(0.25, 4.75) if save: plt.savefig( where_to_save_pngs+\ "Konect_Effectiveness_Violinplots.png", dpi=425, bbox_inches='tight') plt.savefig( where_to_save_pdfs+\ "Konect_Effectiveness_Violinplots.pdf", bbox_inches='tight') plt.show() ``` ## End of Chapter 04. In [Chapter 05](https://nbviewer.jupyter.org/github/jkbren/einet/blob/master/code/Chapter%2005%20-%20Causal%20Emergence%20in%20Preferential%20Attachment%20and%20SBMs.ipynb) we'll start to look at *causal emergence* networks. _______________
github_jupyter
--- **Export of unprocessed features** --- ``` import pandas as pd import numpy as np import os import re import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns from sklearn.feature_extraction.text import CountVectorizer import random import pickle from scipy import sparse import math import pprint import sklearn as sk import torch from IPython.display import display from toolbox import * # from myMLtoolbox import * %matplotlib inline sns.set() sns.set_context("notebook") sns.set(rc={'figure.figsize':(14,6)}) cfg = load_cfg() logVersions = load_LogVersions() ``` --- **For figures** ``` from figures_toolbox import * mpl.rcParams.update(mpl.rcParamsDefault) sns.set( context='paper', style='ticks', ) %matplotlib inline mpl.rcParams.update(performancePlot_style) ``` # Get uniprot list of proteins ``` uniprotIDs = pd.read_csv( os.path.join(cfg['rawDataUniProt'], "uniprot_allProteins_Human_v{}.pkl".format(logVersions['UniProt']['rawData'])), header=None, names=['uniprotID'] ) glance(uniprotIDs) ``` ## Hubs ``` path0 = os.path.join( cfg['outputPreprocessingIntAct'], "listHubs_20p_v{}.pkl".format(logVersions['IntAct']['preprocessed']['all']) ) with open(path0, 'rb') as f: list_hubs20 = pickle.load(f) glance(list_hubs20) ``` # Load feature datasets ``` featuresDict = { 'bioProcessUniprot': { 'path': os.path.join( cfg['outputPreprocessingUniprot'], "bioProcessUniprot_v{}--{}.pkl".format(logVersions['UniProt']['rawData'], logVersions['UniProt']['preprocessed']) ), 'imputeNA': '0', # '0', 'mean', 'none' 'normalise':False, 'isBinary': True, }, 'cellCompUniprot': { 'path': os.path.join( cfg['outputPreprocessingUniprot'], "cellCompUniprot_v{}--{}.pkl".format(logVersions['UniProt']['rawData'], logVersions['UniProt']['preprocessed']) ), 'imputeNA': '0', 'normalise':False, 'isBinary': True, }, 'molFuncUniprot': { 'path': os.path.join( cfg['outputPreprocessingUniprot'], "molFuncUniprot_v{}--{}.pkl".format(logVersions['UniProt']['rawData'], logVersions['UniProt']['preprocessed']) ), 'imputeNA': '0', 'normalise':False, 'isBinary': True, }, 'domainUniprot': { 'path': os.path.join( cfg['outputPreprocessingUniprot'], "domainFT_v{}--{}.pkl".format(logVersions['UniProt']['rawData'], logVersions['UniProt']['preprocessed']) ), 'imputeNA': '0', 'normalise':False, 'isBinary': True, }, 'motifUniprot': { 'path': os.path.join( cfg['outputPreprocessingUniprot'], "motif_v{}--{}.pkl".format(logVersions['UniProt']['rawData'], logVersions['UniProt']['preprocessed']) ), 'imputeNA': '0', 'normalise':False, 'isBinary': True, }, 'Bgee': { 'path': os.path.join( cfg['outputPreprocessingBgee'], "Bgee_processed_v{}.pkl".format(logVersions['Bgee']['preprocessed']) ), 'imputeNA': '0', 'normalise':True, 'isBinary': False, }, 'tissueCellHPA': { 'path': os.path.join( cfg['outputPreprocessingHPA'], "tissueIHC_tissueCell_v{}.pkl".format(logVersions['HPA']['preprocessed']['tissueIHC_tissueCell']) ), 'imputeNA': '0', 'normalise':True, 'isBinary': False, }, 'tissueHPA': { 'path': os.path.join( cfg['outputPreprocessingHPA'], "tissueIHC_tissueOnly_v{}.pkl".format(logVersions['HPA']['preprocessed']['tissueIHC_tissueOnly']) ), 'imputeNA': '0', 'normalise':True, 'isBinary': False, }, 'RNAseqHPA': { 'path': os.path.join( cfg['outputPreprocessingHPA'], "consensusRNAseq_v{}.pkl".format(logVersions['HPA']['preprocessed']['consensusRNAseq']) ), 'imputeNA': 'mean', 'normalise':True, 'isBinary': False, }, 'subcellularLocationHPA': { 'path': os.path.join( cfg['outputPreprocessingHPA'], "subcellularLocation_v{}.pkl".format(logVersions['HPA']['preprocessed']['subcellularLocation']) ), 'imputeNA': '0', 'normalise':False, 'isBinary': True, }, 'sequence': { 'path': os.path.join( cfg['outputPreprocessingUniprot'], "sequenceData_v{}--{}.pkl".format(logVersions['UniProt']['rawData'], logVersions['UniProt']['preprocessed']) ), 'imputeNA':'none', 'normalise':False, 'isBinary': False, } } def sneakPeak(featuresDict): for feature, details in featuresDict.items(): df = pd.read_pickle(details['path']) print('## ',feature) glance(df) print() sneakPeak(featuresDict) ``` # EDA **Number of GO terms for hubs and lone proteins** ``` def count_GOterms(): countGO = uniprotIDs.copy() for feature, details in featuresDict.items(): print(feature) if feature != 'sequence': df = pd.read_pickle(details['path']) foo = df.set_index('uniprotID').ne(0).sum(axis=1) foo2 = pd.DataFrame(foo) foo2.columns = [feature] foo2.reset_index() countGO = countGO.join(foo2, on='uniprotID', how='left') return countGO countGO = count_GOterms() glance(countGO) countGO.info() countGO['isHub'] = countGO.uniprotID.isin(list_hubs20) glance(countGO) sns.displot(countGO, x="bioProcessUniprot", hue="isHub", kind='kde', common_norm=False); doPlot=False for feature in featuresDict.keys(): if feature != 'sequence': foo = countGO.loc[countGO.isHub][feature] bar = countGO.loc[~countGO.isHub][feature] print(f"{feature}: on average, hubs have {foo.mean():.2f} GO terms, non-hubs have {bar.mean():.2f} (medians {foo.median():.2f} vs {bar.median():.2f})") if doPlot: sns.displot(countGO, x=feature, hue="isHub", kind='kde', common_norm=False) plt.show(); ``` # Export vectors lengths ``` def getVectorsLengths(featuresDict): vectorsLengths = dict() for feature, details in featuresDict.items(): df = pd.read_pickle(details['path']) assert 'uniprotID' in df.columns vectorsLengths[feature] = df.shape[1]-1 # -1 to remove uniprotID return vectorsLengths vectorsLengths = getVectorsLengths(featuresDict) print(vectorsLengths) versionRawImpute_overall = '6-0' logVersions['featuresEngineering']['longVectors']['overall'] = versionRawImpute_overall dump_LogVersions(logVersions) with open(os.path.join( cfg['outputFeaturesEngineering'], "longVectors_lengths_v{}.pkl".format(versionRawImpute_overall) ), 'wb') as f: pickle.dump(vectorsLengths, f) ``` # Format long vectors ``` def formatRawData(featuresDict, uniprotIDs, vectorsLengths): out = dict() out['uniprotID'] = uniprotIDs.uniprotID.to_list() for feature, details in featuresDict.items(): print(feature) df = pd.read_pickle(details['path']) print(' - initial dim:', df.shape) print(' - merge with reference index list') df = uniprotIDs.merge( df, on = 'uniprotID', how='left', validate='1:1' ) df.set_index('uniprotID', inplace=True) print(' - new dim:', df.shape) assert details['imputeNA'] in ['0','mean','none'] if details['imputeNA'] == 'mean': print(' - mean imputation') meanValues = df.mean(axis = 0, skipna = True) meanValues[np.isnan(meanValues)] = 0 df.fillna(meanValues, inplace=True) # sanity check assert df.isna().sum().sum() == 0 elif details['imputeNA'] == '0': print(' - imputate with 0') df.fillna(0, inplace=True) # sanity check assert df.isna().sum().sum() == 0 else: print(' - no imputation: {:,} NAs'.format(df.isna().sum().sum())) if details['normalise']: print(' - normalise') scal = sk.preprocessing.StandardScaler(copy = False) df = scal.fit_transform(df) elif feature == 'sequence': df = df.sequence.to_list() else: df = df.values # compare shape to vectorsLengths if feature == 'sequence': assert isinstance(df, list) else: assert df.shape[1] == vectorsLengths[feature] out[feature] = df.copy() return out def sneakPeak2(featuresDict, n=5): for feature, df in featuresDict.items(): print('## ',feature) glance(df, n=n) print() ``` ## Without normalising binary features ``` for feature in featuresDict: if featuresDict[feature]['isBinary']: featuresDict[feature]['normalise'] = False featuresDict outDict = formatRawData(featuresDict=featuresDict, uniprotIDs=uniprotIDs, vectorsLengths=vectorsLengths) sneakPeak2(outDict) sneakPeak2(outDict, n=0) ``` --- **Export** - v6.1 09/11/2021 ``` versionRawLimitedImpute = '6-1' # logVersions['featuresEngineering'] = dict() # logVersions['featuresEngineering']['longVectors']=dict() logVersions['featuresEngineering']['longVectors']['keepBinary'] = versionRawLimitedImpute dump_LogVersions(logVersions) with open(os.path.join( cfg['outputFeaturesEngineering'], "longVectors_keepBinary_v{}.pkl".format(versionRawLimitedImpute) ), 'wb') as f: pickle.dump(outDict, f) ``` ## WITH normalising binary features ``` for feature in featuresDict: if featuresDict[feature]['isBinary']: featuresDict[feature]['normalise'] = True featuresDict outDict2 = formatRawData(featuresDict=featuresDict, uniprotIDs=uniprotIDs, vectorsLengths=vectorsLengths) sneakPeak2(outDict2) ``` --- **Export** - v6.1 09/11/2021 ``` versionRawImputeAll = '6-1' logVersions['featuresEngineering']['longVectors']['imputeAll'] = versionRawImputeAll dump_LogVersions(logVersions) with open(os.path.join( cfg['outputFeaturesEngineering'], "longVectors_imputeAll_v{}.pkl".format(versionRawImputeAll) ), 'wb') as f: pickle.dump(outDict, f) ```
github_jupyter
# Muscle modeling > Marcos Duarte > Laboratory of Biomechanics and Motor Control ([http://demotu.org/](http://demotu.org/)) > Federal University of ABC, Brazil There are two major classes of muscle models that have been used in biomechanics and motor control: the Hill-type and Huxley-type models. They differ mainly on how the contractile element is modeled. In Hill-type models, the modeling of the contractile element is phenomenological; arbitrary mathematical functions are used to reproduce experimental observations relating muscle characteristics (such as excitation/activation, muscle length and velocity) with the muscle force. In Huxley-type models, the modeling of the contractile element is mechanistic; the mathematical functions used represent the hypothesized mechanisms for the cross-bridge dynamics (Tsianos and Loeb, 2013). Huxley-type models tend to produce more realistic results than Hill-type models for certain conditions but they have a higher computational demand. For this reason, Hill-type models are more often employed in musculoskeletal modeling and simulation. Hill-type muscle models are presented in several texts (e.g., Erdermir et al. 2007; He et al., 1991; McMahon, 1984; Nigg and Herzog, 2007; Robertson et al., 2013, Thelen, 2003; Tsianos and Loeb, 2013, Winters, 1990; Zajac, 1989; Zatsiorsky and Prilutsky, 2012) and implemented in many software for modeling and simulation of the musculoskeletal dynamics of human movement (e.g., the free and open source software [OpenSim](https://simtk.org/home/opensim)). Next, let's see a brief overview of a Hill-type muscle model and a basic implementation in Python. ## Hill-type muscle model Hill-type models are developed to reproduce the dependence of force with the length and velocity of the muscle-tendon unit and parameters are lumped and made dimensionless in order to represent different muscles with few changes in these parameters. A Hill-type model is complemented with the modeling of the activation dynamics (i.e., the temporal pattern of muscle activation and deactivation as a function of the neural excitation) to produce more realistic results. As a result, the force generated will be a function of three factors: the length and velocity of the muscle-tendon unit and its activation level $a$. A Hill-type muscle model has three components (see figure below): two for the muscle, an active contractile element (CE) and a passive elastic element (PE) in parallel with the CE, and one component for the tendon, an elastic element (SE) in series with the muscle. In some variations, a damping component is added parallel to the CE as a fourth element. A [pennation angle](http://en.wikipedia.org/wiki/Muscle_architecture) (angle of the pennate fibers with respect to the force-generating axis) is also included in the model. In a simpler approach, the muscle and tendon are assumed massless. <figure><img src="./../images/muscle_hill.png" width=400 alt="Hill-type muscle model."/><figcaption><center><i>Figure. A Hill-type muscle model with three components: two for the muscle, an active contractile element, $\mathsf{CE}$, and a passive elastic element in parallel, $\mathsf{PE}$, with the $\mathsf{CE}$, and one component for the tendon, an elastic element in series, $\mathsf{SE}$, with the muscle. $\mathsf{L_{MT}}$: muscle–tendon length, $\mathsf{L_T}$: tendon length, $\mathsf{L_M}$: muscle fiber length, $\mathsf{F_T}$: tendon force, $\mathsf{F_M}$: muscle force, and $α$: pennation angle.</i></center></figcaption> Let's now revise the models of a Hill-type muscle with three components and activation dynamics by two references: 1. [Thelen (2003)](http://simtk-confluence.stanford.edu:8080/display/OpenSim/Thelen+2003+Muscle+Model) with some of the adjustments described in Millard et al. (2013). Hereafter, Thelen2003Muscle or T03. 2. [McLean, Su, van den Bogert (2003)](http://www.ncbi.nlm.nih.gov/pubmed/14986412). Hereafter, McLean2003Muscle or M03. First, let's import the necessary Python libraries and customize the environment: ``` import numpy as np from scipy.integrate import ode, odeint %matplotlib inline import matplotlib.pyplot as plt import matplotlib matplotlib.rcParams['lines.linewidth'] = 3 matplotlib.rcParams['font.size'] = 13 matplotlib.rcParams['lines.markersize'] = 5 matplotlib.rc('axes', grid=True, labelsize=14, titlesize=16, ymargin=0.05) matplotlib.rc('legend', numpoints=1, fontsize=11) ``` ### Force-length relationship In a Hill-type model, the force a muscle can generate depends on its length due to two factors: 1. The active force of the contractile element (CE), which in turn depends on the spatial superposition of the actin and myosin molecules to form cross-bridges at the sarcomere. A maximum number of cross-bridges will be formed at an optimal fiber length, generating a maximum force. When a fiber is too stretched or too shortened, fewer cross-bridges will be formed, decreasing the force generated. 2. The passive and parallel elastic element (PE), which behaves as a nonlinear spring where no force is generated below a certain length (the slack length) and force increases with the muscle elongation. #### Force-length relationship of the contractile element Thelen2003Muscle represented the normalized force-length relationship of the contractile element by a Gaussian function: \begin{equation} \bar{f}_{l,CE} = exp\left[-(\bar{L}_M-1)^2/\gamma\right] \label{} \end{equation} where $\gamma$ is a shape factor and $\bar{L}_M$ is the muscle fiber length normalized by the optimal muscle fiber length at which maximal force can be produced, $L_{Mopt}$: \begin{equation} \bar{L}_M=\dfrac{L_M}{L_{Mopt}} \label{} \end{equation} Thelen2003Muscle adopted $\gamma=0.45$. The actual force produced is obtained multiplying $\bar{f}_{l,CE}$ by the maximum isometric muscle force, $F_{M0}$. Thelen2003Muscle assumed that the maximum isometric muscle forces for old adults were 30% lower than those used for young adults. McLean2003Muscle represented the force-length relationship of the contractile element (not normalized) as a function of muscle length (not normalized) by a quadratic function: \begin{equation} f_{l,CE} = max \left\{ \begin{array}{l l} F_{Mmin} \\ F_{M0}\left[1 - \left(\dfrac{L_M-L_{Mopt}}{WL_{Mopt}}\right)^2\right] \end{array} \right. \label{} \end{equation} where $W$ is a dimensionless parameter describing the width of the force-length relationship. A minimum force level $F_{Mmin}$ is employed for numerical stability. McLean2003Muscle adopted $W=1$ and $F_{Mmin}=10 N$. The corresponding Python functions are: ``` def flce_T03(lm=1, gammal=0.45): """Thelen (2003) force of the contractile element as function of muscle length. Parameters ---------- lm : float, optional (default=1) normalized muscle fiber length gammal : float, optional (default=0.45) shape factor Returns ------- fl : float normalized force of the muscle contractile element """ fl = np.exp(-(lm-1)**2/gammal) return fl def flce_M03(lm=1, lmopt=1, fm0=1, fmmin=0.001, wl=1): """McLean (2003) force of the contractile element as function of muscle length. Parameters ---------- lm : float, optional (default=1) muscle (fiber) length lmopt : float, optional (default=1) optimal muscle fiber length fm0 : float, optional (default=1) maximum isometric muscle force fmmin : float, optional (default=0.001) minimum muscle force wl : float, optional (default=1) shape factor of the contractile element force-length curve Returns ------- fl : float force of the muscle contractile element """ fl = np.max([fmmin, fm0*(1 - ((lm - lmopt)/(wl*lmopt))**2)]) return fl ``` And plots of these functions: ``` lm = np.arange(0, 2.02, .02) fce_T03 = np.zeros(lm.size) fce_M03 = np.zeros(lm.size) for i in range(len(lm)): fce_T03[i] = flce_T03(lm[i]) fce_M03[i] = flce_M03(lm[i]) plt.figure(figsize=(7, 4)) plt.plot(lm, fce_T03, 'b', label='T03') plt.plot(lm, fce_M03, 'g', label='M03') plt.xlabel('Normalized length') plt.ylabel('Normalized force') plt.legend(loc='best') plt.suptitle('Force-length relationship of the contractile element', y=1, fontsize=16) plt.show() ``` Similar results when the same parameters are used. #### Force-length relationship of the parallel element Thelen2003Muscle represents the normalized force of the parallel (passive) element of the muscle as a function of muscle length (normalized by the optimal muscle fiber length) by an exponential function: \begin{equation} \bar{F}_{PE}(\bar{L}_M) = \dfrac{exp\left[k_{PE}(\bar{L}_M-1)/\epsilon_{M0}\right]-1}{exp(k_{PE})-1} \label{} \end{equation} where $k_{PE}$ is an exponential shape factor and $\epsilon_{M0}$ is the passive muscle strain due to maximum isometric force: \begin{equation} \epsilon_{M0}=\dfrac{L_M(F_{M0})-L_{Mslack}}{L_{Mslack}} \label{} \end{equation} where $L_{Mslack}$ is the muscle slack length. Thelen2003Muscle adopted $L_{Mslack} = L_{Mopt}$. Thelen2003Muscle adopted $k_{PE}=5$ and $\epsilon_{M0}=0.6$ for young adults ($\epsilon_{M0}=0.5$ for old adults). The actual force produced is obtained multiplying $\bar{F}_{PE}$ by the maximum isometric muscle force, $F_{M0}$. McLean2003Muscle represents the force of the parallel (passive) element of the muscle (not normalized) as a function of muscle length (not normalized) by a quadratic function: \begin{equation} F_{PE}(L_M) = \left\{ \begin{array}{l l} 0 \quad & \text{if} \quad L_M \leq L_{Mslack} \\ k_{PE}(L_M - L_{Mslack})^2 \quad & \text{if} \quad L_M > L_{Mslack} \end{array} \right. \label{} \end{equation} where $k_{PE}$ is a stiffness parameter of the parallel element such that the passive muscle force is equal to the normalized maximum isometric force of the muscle when the CE is stretched to its maximal length for active force production: \begin{equation} k_{PE} = \dfrac{F_{M0}}{(WL_{Mopt})^2} \label{} \end{equation} McLean2003Muscle adopted $L_{Mslack} = L_{Mopt}$. The corresponding Python functions are: ``` def fpelm_T03(lm=1, kpe=5, epsm0=0.6): """Thelen (2003) force of the muscle parallel element as function of muscle length. Parameters ---------- lm : float, optional (default=1) normalized muscle fiber length kpe : float, optional (default=5) exponential shape factor epsm0 : float, optional (default=0.6) passive muscle strain due to maximum isometric force Returns ------- fpe : float normalized force of the muscle parallel (passive) element """ if lm < 1: fpe = 0 else: fpe = (np.exp(kpe*(lm-1)/epsm0)-1)/(np.exp(kpe)-1) return fpe def fpelm_M03(lm=1, lmopt=1, fm0=1, lmslack=1, wp=1): """McLean (2003) force of the muscle parallel element as function of muscle length. Parameters ---------- lm : float, optional (default=1) muscle fiber length lmopt : float, optional (default=1) optimal muscle (fiber) length fm0 : float, optional (default=1) maximum isometric muscle force lmslack : float, optional (default=1) muscle slack length wp : float, optional (default=1) shape factor of the parallel element force-length curve Returns ------- fpe : float force of the muscle parallel (passive) element """ kpe = fm0/(wp*lmopt)**2 if lm <= lmslack: fpe = 0 else: fpe = kpe*(lm-lmslack)**2 return fpe ``` And plots of these functions: ``` lm = np.arange(0, 2.02, .02) fpe_T03 = np.zeros(lm.size) fpe_M03 = np.zeros(lm.size) for i in range(len(lm)): fpe_T03[i] = fpelm_T03(lm[i]) fpe_M03[i] = fpelm_M03(lm[i]) fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, sharex=True, sharey=True, figsize=(10, 4)) ax1.plot(lm[:86], fce_T03[:86], 'b', label='Active') ax1.plot(lm[:86], fpe_T03[:86], 'r', label='Passive') ax1.plot(lm[:86], fce_T03[:86] + fpe_T03[:86], 'g', label='Total') ax1.text(0.1, 2.6, 'T03') ax1.set_xlim([0, 1.7]) ax1.set_xlabel('Normalized length') ax1.set_ylabel('Normalized force') #ax1.legend(loc='best') ax2.plot(lm[:86], fce_M03[:86], 'b', label='Active') ax2.plot(lm[:86], fpe_M03[:86], 'r', label='Passive') ax2.plot(lm[:86], fce_M03[:86] + fpe_M03[:86], 'g', label='Total') ax2.text(0.1, 2.6, 'M03') ax2.set_xlim([0, 1.7]) ax2.set_xlabel('Normalized length') ax2.legend(loc='best') plt.suptitle('Muscle force-length relationship', y=1, fontsize=16) plt.tight_layout() plt.show() ``` The results are different at the maximum stretching because Thelen2003Muscle and McLean2003Muscle model differently the passive component. These results were simulated for a maximum muscle activation (an activation level, $a$, of 1, where 0 is no activation). The effect of different activation levels on the total muscle force (but only the active force is affected) is shown in the next figure: ``` lm = np.arange(0, 2.02, .02) fce_T03_als = np.zeros((lm.size, 5)) als = [0, 0.25, 0.50, 0.75, 1.0] for j, al in enumerate(als): for i in range(len(lm)): fce_T03_als[i, j] = flce_T03(lm[i])*al fig, ax = plt.subplots(nrows=1, ncols=1, sharex=True, sharey=True, figsize=(6, 5)) for j, al in enumerate(als): ax.plot(lm[:86], fce_T03_als[:86, j] + fpe_T03[:86], label='%.2f'%al) ax.text(0.1, 2.6, 'T03') ax.set_xlim([0, 1.7]) ax.set_xlabel('Normalized length') ax.set_ylabel('Normalized force') ax.legend(loc='best', title='Activation level') ax.set_title('Muscle force-length relationship', y=1, fontsize=16) plt.tight_layout() plt.show() ``` #### Force-length relationship of the series element (tendon) Thelen2003Muscle represented the tendon force of the series element as a function of the normalized tendon length (in fact, tendon strain) by an exponential function during an initial nonlinear toe region and by a linear function thereafter: \begin{equation} \bar{F}_{SE}(\bar{L}_T) = \left\{ \begin{array}{l l} \dfrac{\bar{F}_{Ttoe}}{exp(k_{Ttoe})-1}\left[exp(k_{Ttoe}\epsilon_T/\epsilon_{Ttoe})-1\right] \quad & \text{if} \quad \epsilon_T \leq \epsilon_{Ttoe} \\ k_{Tlin}(\epsilon_T - \epsilon_{Ttoe}) + \bar{F}_{Ttoe} \quad & \text{if} \quad \epsilon_T > \epsilon_{Ttoe} \end{array} \right. \label{} \end{equation} where $\epsilon_{T}$ is the tendon strain: \begin{equation} \epsilon_{T} = \dfrac{L_T-L_{Tslack}}{L_{Tslack}} \label{} \end{equation} $L_{Tslack}$ is the tendon slack length, $\epsilon_{Ttoe}$ is the tendon strain above which the tendon exhibits linear behavior, $k_{Ttoe}$ is an exponential shape factor, and $k_{Tlin}$ is a linear scale factor. The parameters are chosen such that the tendon elongation at the normalized maximal isometric force of the muscle is 4% of the tendon length ($\epsilon_{T0}=0.04$). Thelen2003Muscle adopted $k_{Ttoe}=3$ and the transition from nonlinear to linear behavior occurs for normalized tendon forces greater than $\bar{F}_{Ttoe}=0.33$. For continuity of slopes at the transition, $\epsilon_{Ttoe}=0.609\epsilon_{T0}$ and $k_{Tlin}=1.712/\epsilon_{T0}$. The actual force produced is obtained multiplying $\bar{F}_{SE}$ by the maximum isometric muscle force, $F_{M0}$. McLean2003Muscle represented the tendon force (not normalized) of the series element as a function of the tendon length (not normalized) by the same quadratic function used for the force of the muscle passive element: \begin{equation} F_{SE}(L_T) = \left\{ \begin{array}{l l} 0 \quad & \text{if} \quad L_T \leq L_{Tslack} \\ k_T(L_T - L_{Tslack})^2 \quad & \text{if} \quad L_T > L_{Tslack} \end{array} \right. \label{} \end{equation} where $k_T$ is the tendon stiffness. The stiffness parameter $k_T$ is chosen such that the tendon elongation is 4% at the maximum isometric force, $k_T=(1/\epsilon_{T0})^2=625$ for $F_{M0}=1$. The corresponding Python functions are: ``` def fselt_T03(lt=1, ltslack=1, epst0=0.04, kttoe=3): """Thelen (2003) force-length relationship of tendon as function of tendon length. Parameters ---------- lt : float, optional (default=1) normalized tendon length ltslack : float, optional (default=1) normalized tendon slack length epst0 : float, optional (default=0.04) tendon strain at the maximal isometric muscle force kttoe : float, optional (default=3) linear scale factor Returns ------- fse : float normalized force of the tendon series element """ epst = (lt-ltslack)/ltslack fttoe = 0.33 # values from OpenSim Thelen2003Muscle epsttoe = .99*epst0*np.e**3/(1.66*np.e**3 - .67) ktlin = .67/(epst0 - epsttoe) # if epst <= 0: fse = 0 elif epst <= epsttoe: fse = fttoe/(np.exp(kttoe)-1)*(np.exp(kttoe*epst/epsttoe)-1) else: fse = ktlin*(epst-epsttoe) + fttoe return fse def fselt_M03(lt, ltslack=1, fm0=1, epst0=0.04): """McLean (2003) force-length relationship of tendon as function of tendon length. Parameters ---------- lt : float, optional (default=1) tendon length ltslack : float, optional (default=1) tendon slack length fm0 : float, optional (default=1) maximum isometric muscle force epst0 : float, optional (default=0.04) tendon strain at the maximal isometric muscle force Returns ------- fse : float force of the tendon series element """ kt = fm0/epst0**2 if lt <= ltslack: fse = 0 else: fse = kt*(lt-ltslack)**2 return fse ``` And plots of these functions: ``` lt = np.arange(1, 1.051, .001) fse_T03 = np.zeros(lt.size) fse_M03 = np.zeros(lt.size) for i in range(len(lt)): fse_T03[i] = fselt_T03(lt[i]) fse_M03[i] = fselt_M03(lt[i]) plt.figure(figsize=(7, 4)) plt.plot(lt-1, fse_T03, 'b', label='T03') plt.plot(lt-1, fse_M03, 'g', label='M03') plt.plot(0.04, 1, 'ro', markersize=8) plt.text(0.04, 0.7, '$\epsilon_{T0}$', fontsize=22) plt.xlabel('Tendon strain') plt.ylabel('Normalized force') plt.legend(loc='upper left') plt.suptitle('Tendon force-length relationship (series element)', y=1, fontsize=16) plt.show() ``` Similar results when the same parameters are used. ### Force-velocity relationship of the contractile element The force-velocity relation of the contractile element for shortening (concentric activation) is based on the well known Hill's equation of a hyperbola describing that the product between force $F$ and velocity $V$ of the contractile element is constant (Winters, 1990; Winters, 1995): \begin{equation} (F+a')(V+b') = (F_{0}+a')b' \label{} \end{equation} where $a'$, $b'$, and $F_{0}$ are constants. We can rewrite the equation above with constants more meaningful to our modeling: \begin{equation} (F_{M}+A_f F_{Mlen})(V_M+A_f V_{Mmax}) = A_f F_{Mlen}V_{Mmax}(1+A_f) \label{} \end{equation} where $F_{M}$ and $V_M$ are the contractile element force and velocity, respectively, and the three constants are: $V_{Mmax}$, the maximum unloaded velocity (when $F_{M}=0$), $F_{Mlen}$, the maximum isometric force (when $V_M=0$), and $A_f$, a shape factor which specifies the concavity of the hyperbola. Based on the equation above for the shortening phase and in Winters (1990, 1995) for the lengthening phase, Thelen2003Muscle employed the following force-velocity equation: \begin{equation} V_M = (0.25+0.75a)\,V_{Mmax}\dfrac{\bar{F}_M-a\bar{f}_{l,CE}}{b} \label{} \end{equation} where \begin{equation} b = \left\{ \begin{array}{l l l} a\bar{f}_{l,CE} + \bar{F}_M/A_f \quad & \text{if} \quad \bar{F}_M \leq a\bar{f}_{l,CE} & \text{(shortening)} \\ \\ \dfrac{(2+2/A_f)(a\bar{f}_{l,CE}\bar{f}_{Mlen} - \bar{F}_M)}{\bar{f}_{Mlen}-1} \quad & \text{if} \quad \bar{F}_M > a\bar{f}_{l,CE} & \text{(lengthening)} \end{array} \right. \label{} \end{equation} where $a$ is the activation level and $\bar{f}_{Mlen}$ is a constant for the maximum force generated at the lengthening phase (normalized by the maximum isometric force). Thelen2003Muscle adopted $A_f=0.25$, $V_{Mmax}=10L_{Mopt}/s$, $\bar{f}_{Mlen}=1.4$ for young adults ($V_{Mmax}=8L_{Mopt}/s$ and $\bar{f}_{Mlen}=1.8$ for old adults). Note that the dependences of the force with the activation level and with the muscle length are already incorporated in the expression above. McLean2013Muscle employed: \begin{equation} \bar{f}_{v,CE} = \left\{ \begin{array}{l l l} \dfrac{\lambda(a)V_{Mmax} + V_M}{\lambda(a)V_{Mmax} - V_M/A_f} \quad & \text{if} \quad V_M \leq 0 & \text{(shortening)} \\ \\ \dfrac{\bar{f}_{Mlen}V_M + d_1}{V_M + d_1} \quad & \text{if} \quad 0 < V_M \leq \gamma d_1 & \text{(slow lengthening)} \\ \\ d_3 + d_2V_M \quad & \text{if} \quad V_M > \gamma d_1 & \text{(fast lengthening)} \end{array} \right. \label{} \end{equation} where \begin{equation} \begin{array}{l l} \lambda(a) = 1-e^{-3.82a} + a\:e^{-3.82} \\ \\ d_1 = \dfrac{V_{Mmax}A_f(\bar{f}_{Mlen}-1)}{S(A_f+1)} \\ \\ d_2 = \dfrac{S(A_f+1)}{V_{Mmax}A_f(\gamma+1)^2} \\ \\ d_3 = \dfrac{(\bar{f}_{Mlen}-1)\gamma^2}{(\gamma+1)^2} + 1 \end{array} \label{} \end{equation} where $\lambda(a)$ is a scaling factor to account for the influence of the activation level $a$ on the force-velocity relationship, $\bar{f}_{Mlen}$ is the asymptotic (maximum) value of $\bar{F}_M$, $S$ is a parameter to double the slope of the force-velocity curve at zero velocity, and $\gamma$ is a dimensionless parameter to ensure the transition between the hyperbolic and linear parts of the lengthening phase. McLean2013Muscle adopted $A_f=0.25$, $V_{Mmax}=10L_{Mopt}/s$, $\bar{f}_{Mlen}=1.5$, $S=2.0$, and $\gamma=5.67$. Let's write these expressions as Python code and visualize them: ``` def vmfce_T03(fm, flce=1, lmopt=1, a=1, vmmax=1, fmlen=1.4, af=0.25): """Thelen (2003) velocity of the force-velocity relationship as function of CE force. Parameters ---------- fm : float normalized muscle force flce : float, optional (default=1) normalized muscle force due to the force-length relationship lmopt : float, optional (default=1) optimal muscle fiber length a : float, optional (default=1) muscle activation level vmmax : float, optional (default=1) maximum muscle velocity for concentric activation fmlen : float, optional (default=1.4) normalized maximum force generated at the lengthening phase af : float, optional (default=0.25) shape factor Returns ------- vm : float velocity of the muscle """ vmmax = vmmax*lmopt if fm <= a*flce: # isometric and concentric activation b = a*flce + fm/af else: # eccentric activation b = (2 + 2/af)*(a*flce*fmlen - fm)/(fmlen - 1) vm = (0.25 + 0.75*a)*vmmax*(fm - a*flce)/b return vm ``` Let's find an expression for contractile element force as function of muscle velocity given the equation above, i.e. we want to invert the equation. For that, let's use [Sympy](http://www.sympy.org/): ``` def fvce_T03_symb(): # Thelen (2003) velocity of the force-velocity relationship as function of CE force from sympy import symbols, solve, collect, Eq a, flce, fm, af, fmlen, vmmax = symbols('a, flce, fm, af, fmlen, vmmax', positive=True) vm = symbols('vm', real=True) b = a*flce + fm/af vm_eq = Eq(vm - (0.25 + 0.75*a)*vmmax*(fm - a*flce)/b) sol = solve(vm_eq, fm) print('fm <= a*flce:\n', collect(sol[0], vmmax),'\n') b = (2 + 2/af)*(a*flce*fmlen - fm)/(fmlen - 1) vm_eq = Eq(vm - (0.25 + 0.75*a)*vmmax*(fm - a*flce)/b) sol = solve(vm_eq, fm) print('fm > a*flce:\n', collect(sol[0], (vmmax*af, fmlen, vm))) fvce_T03_symb() ``` And here is the function we need to compute contractile element force as function of muscle velocity: ``` def fvce_T03(vm=0, flce=1, lmopt=1, a=1, vmmax=1, fmlen=1.4, af=0.25): """Thelen (2003) force of the contractile element as function of muscle velocity. Parameters ---------- vm : float, optional (default=0) muscle velocity flce : float, optional (default=1) normalized muscle force due to the force-length relationship lmopt : float, optional (default=1) optimal muscle fiber length a : float, optional (default=1) muscle activation level vmmax : float, optional (default=1) maximum muscle velocity for concentric activation fmlen : float, optional (default=1.4) normalized maximum force generated at the lengthening phase af : float, optional (default=0.25) shape factor Returns ------- fvce : float normalized force of the muscle contractile element """ vmmax = vmmax*lmopt if vm <= 0: # isometric and concentric activation fvce = af*a*flce*(4*vm + vmmax*(3*a + 1))/(-4*vm + vmmax*af*(3*a + 1)) else: # eccentric activation fvce = a*flce*(af*vmmax*(3*a*fmlen - 3*a + fmlen - 1) + 8*vm*fmlen*(af + 1))/\ (af*vmmax*(3*a*fmlen - 3*a + fmlen - 1) + 8*vm*(af + 1)) return fvce ``` Here is the Python function for the McLean (2003) model: ``` def fvce_M03(vm=0, lmopt=1, a=1, vmmax=1, fmlen=1.5, af=0.25, s=2, gammav=5.67): """McLean (2003) contractile element force as function of muscle velocity. Parameters ---------- vm : float, optional (default=0) muscle velocity lmopt : float, optional (default=1) optimal muscle fiber length a : float, optional (default=1) muscle activation level vmmax : float, optional (default=1) maximum muscle velocity for concentric activation fmlen : float, optional (default=1.5) normalized maximum force generated at the lengthening phase af : float, optional (default=0.25) shape factor s : float, optional (default=2) to double the slope of the force-velocity curve at zero velocity gammav : float, optional (default=5.67) to ensure the smooth transition of the lengthening phase Returns ------- fvce : float normalized force of the muscle contractile element """ vmmax = vmmax*lmopt d1 = vmmax*af*(fmlen - 1)/(s*(af + 1)) d2 = s*(af + 1)/(vmmax*af*(gammav + 1)**2) d3 = (fmlen - 1)*gammav**2/(gammav + 1)**2 + 1 lbd = 1 - np.exp(-3.82*a) + a*np.exp(-3.82) if vm <= 0: # isometric and concentric activation fvce = (lbd*vmmax + vm)/(lbd*vmmax - vm/af) elif 0 < vm <= gammav*d1: # slow lengthening fvce = (fmlen*vm + d1)/(vm + d1) elif vm > gammav*d1: # fast lengthening fvce = d3 + d2*vm return fvce ``` We can invert this equation to get an expression for muscle velocity as function of the contractile element force: ``` def vmfce_M03(fvce=1, lmopt=1, a=1, vmmax=1, fmlen=1.5, af=0.25, s=2, gammav=5.67): """McLean (2003) contractile element velocity as function of CE force. Parameters ---------- fvce : float, optional (default=1) normalized muscle force lmopt : float, optional (default=1) optimal muscle fiber length a : float, optional (default=1) muscle activation level vmmax : float, optional (default=1) maximum muscle velocity for concentric activation fmlen : float, optional (default=1.5) normalized maximum force generated at the lengthening phase af : float, optional (default=0.25) shape factor s : float, optional (default=2) to double the slope of the force-velocity curve at zero velocity gammav : float, optional (default=5.67) to ensure the smooth transition of the lengthening phase Returns ------- fvce : float muscle velocity """ vmmax = vmmax*lmopt d1 = vmmax*af*(fmlen - 1)/(s*(af + 1)) d2 = s*(af + 1)/(vmmax*af*(gammav + 1)**2) d3 = (fmlen - 1)*gammav**2/(gammav + 1)**2 + 1 lbd = 1 - np.exp(-3.82*a) + a*np.exp(-3.82) if 0 <= fvce <= 1: # isometric and concentric activation vm = (lbd*vmmax*(1 - fvce))/(1 + fvce/af) elif 1 < fvce <= gammav*d1*d2 + d3: # slow lengthening vm = d1*(fvce - 1)/(fmlen - fvce) elif fvce > gammav*d1*d2 + d3: # fast lengthening vm = (fvce - d3)/d2 return vm ``` Let's use these functions to compute muscle force as a function of the muscle velocity considering two levels of activation: ``` vm1_T03 = np.linspace(-1, 1, 201) fce1_T03 = np.zeros(vm1_T03.size) vm2_T03 = np.linspace(-.63, .63, 201) fce2_T03 = np.zeros(vm2_T03.size) for i in range(len(vm1_T03)): fce1_T03[i] = fvce_T03(vm=vm1_T03[i]) fce2_T03[i] = fvce_T03(vm=vm2_T03[i], a=0.5) vm1_M03 = np.linspace(-1, 1, 201) fce1_M03 = np.zeros(vm1_M03.size) vm2_M03 = np.linspace(-.63, .63, 201) fce2_M03 = np.zeros(vm2_M03.size) for i in range(len(vm1_M03)): fce1_M03[i] = fvce_M03(vm=vm1_M03[i]) fce2_M03[i] = fvce_M03(vm=vm2_M03[i], a=0.5) fce2_M03 = fce2_M03*0.5 fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, sharex=True, sharey=True, figsize=(10, 4)) ax1.plot(vm1_T03, fce1_T03, 'b', label='T03)') ax1.plot(vm1_M03, fce1_M03, 'g', label='M03)') ax1.set_ylabel('Normalized force') ax1.set_xlabel('Normalized velocity') ax1.text(-.9, 1.5, 'Activation = 1.0') ax2.plot(vm2_T03, fce2_T03, 'b', label='T03') ax2.plot(vm2_M03, fce2_M03, 'g', label='M03') ax2.text(-.9, 1.5, 'Activation = 0.5') ax2.set_xlabel('Normalized velocity') ax2.legend(loc='best') plt.suptitle('Force-velocity relationship of the contractile element', y=1.05, fontsize=16) plt.tight_layout() plt.show() ``` Identical results for the shortening phase when $a=1$ and similar results for the lengthening phase when the same parameters are used. #### Muscle power The muscle power is the product between force and velocity: ``` P_T03 = np.abs(fce1_T03*vm1_T03) ``` Let's visualize the muscle power only for the concentric phase (muscle shortening): ``` plt.figure(figsize=(7, 4)) plt.plot(vm1_T03[:101], fce1_T03[:101], 'b', label='Force') plt.xlabel('Normalized velocity') plt.ylabel('Normalized force', color='b') #plt.legend(loc='upper left') plt.gca().invert_xaxis() plt.gca().twinx() plt.plot(vm1_T03[:101], P_T03[:101], 'g', label='Power') plt.ylabel('Normalized power', color='g') #plt.legend(loc='upper right') plt.suptitle('Muscle power', y=1, fontsize=16) plt.show() ``` #### Force-length-velocity relationship Let's visualize the effects of the length and velocity on the total (active plus passive) muscle force: ``` lms = np.linspace(0, 1.65, 101) vms = np.linspace(-1, .76, 101) fce_T03 = np.zeros(lms.size) fpe_T03 = np.zeros(lms.size) fm_T03 = np.zeros((lms.size, vms.size)) for i in range(len(lms)): fce_T03[i] = flce_T03(lm=lms[i]) fpe_T03[i] = fpelm_T03(lm=lms[i]) for j in range(len(vms)): fm_T03[j, i] = fvce_T03(vm=vms[j], flce=fce_T03[i]) + fpe_T03[i] lms = np.linspace(0, 1.65, 101) vms = np.linspace(-1, .76, 101) fce_M03 = np.zeros(lms.size) fpe_M03 = np.zeros(lms.size) fm_M03 = np.zeros((lms.size, vms.size)) for i in range(len(lms)): fce_M03[i] = flce_M03(lm=lms[i]) fpe_M03[i] = fpelm_M03(lm=lms[i]) for j in range(len(vms)): fm_M03[j, i] = fvce_M03(vm=vms[j])*fce_M03[i] + fpe_M03[i] from mpl_toolkits.mplot3d import Axes3D def flv3dplot(ax, lm, vm, fm, model): # 3d plot lm2, vm2 = np.meshgrid(lm, vm) ax.plot_surface(lm2, vm2, fm, rstride=2, cstride=2, cmap=plt.cm.coolwarm, linewidth=.5, antialiased=True) ax.plot(np.ones(vms.size), vms, fm[:, np.argmax(lm>=1)], 'w', linewidth=4) ax.plot(lm, np.zeros(lm.size), fm[np.argmax(vm>=0),:], 'w', linewidth=4) ax.set_xlim3d(lm[0], lm[-1]) ax.set_ylim3d(vm[0], vm[-1]) #ax.set_zlim3d(np.min(fm), np.max(fm)) ax.set_zlim3d(0, 2) ax.set_xlabel('Normalized length') ax.set_ylabel('Normalized velocity') ax.set_zlabel('Normalized force') ax.view_init(20, 225) ax.locator_params(nbins=6) ax.text(-0.4, 0.7, 2.5, model, fontsize=14) fig = plt.figure(figsize=(12, 6)) ax1 = fig.add_subplot(1, 2, 1, projection='3d') flv3dplot(ax1, lms, vms, fm_T03, 'T03') ax2 = fig.add_subplot(1, 2, 2, projection='3d') flv3dplot(ax2, lms, vms, fm_M03, 'M03') plt.suptitle('Force-length-velocity relationship', y=1, fontsize=16) plt.tight_layout() plt.show() ``` ### Activation dynamics Activation dynamics represents the fact that a muscle cannot instantly activate or deactivate because of the electrical and chemical processes involved and it is usually integrated with a Hill-type model. In its simplest form, the activation dynamics is generally represented as a first-order ODE. Thelen2003Muscle employed the following first-order [ordinary differential equation (ODE)](http://en.wikipedia.org/wiki/Ordinary_differential_equation): \begin{equation} \frac{\mathrm{d}a}{\mathrm{d}t} = \dfrac{u-a}{\tau(a, u)} \label{} \end{equation} with a lower activation bound to both activation and excitation. where $u$ and $a$ are the muscle excitation and activation, respectively (both are function of time), and $\tau$ is a variable time constant to represent the activation and deactivation times, given by: \begin{equation} \tau(a, u) = \left\{ \begin{array}{l l} t_{act}(0.5+1.5a) \quad & \text{if} \quad u > a\\ \dfrac{t_{deact}}{(0.5+1.5a)} \quad & \text{if} \quad u \leq a \end{array} \right. \label{} \end{equation} Thelen2003Muscle adopted activation, $t_{act}$, and deactivation, $t_{deact}$, time constants for young adults equal to 15 and 50 ms, respectively (for old adults, Thelen2003Muscle adopted 15 and 60 ms, respectively). McLean2003Muscle expressed the activation dynamics as the following first-order ODE: \begin{equation} \dfrac{\mathrm{d}a}{\mathrm{d}t} = (u - a)(c_1u + c_2) \label{} \end{equation} with a lower activation bound to both activation and excitation. where $c_1 + c_2$ is the activation rate constant (when $u = 1$), the inverse of $t_{act}$, and $c_2$ is the deactivation rate constant (when $u = 0$), the inverse of $t_{deact}$. McLean2003Muscle adopted $c_1=3.3 s^{-1}$ and $c_2=16.7 s^{-1}$, resulting in time constants of 50 ms and 60 ms for activation and deactivation, respectively. In Python, the numeric first-order ODE for the activation dynamics presented in Thelen2003Muscle can be expressed as: ``` def actdyn_T03(t, a, t_act, t_deact, u_max, u_min, t0=0, t1=1): """Thelen (2003) activation dynamics, the derivative of `a` at `t`. Parameters ---------- t : float time instant [s] a : float (0 <= a <= 1) muscle activation t_act : float activation time constant [s] t_deact : float deactivation time constant [s] u_max : float (0 < u_max <= 1), optional (default=1) maximum value for muscle excitation u_min : float (0 < u_min < 1), optional (default=0.01) minimum value for muscle excitation t0 : float [s], optional (default=0) initial time instant for muscle excitation equals to u_max t1 : float [s], optional (default=1) final time instant for muscle excitation equals to u_max Returns ------- adot : float derivative of `a` at `t` """ u = excitation(t, u_max, u_min) if u > a: adot = (u - a)/(t_act*(0.5 + 1.5*a)) else: adot = (u - a)/(t_deact/(0.5 + 1.5*a)) return adot ``` In Python, the numeric first-order ODE for the activation dynamics presented in McLean2003Muscle can be expressed as: ``` def actdyn_M03(t, a, t_act, t_deact, u_max=1, u_min=0.01, t0=0, t1=1): """McLean (2003) activation dynamics, the derivative of `a` at `t`. Parameters ---------- t : float time instant [s] a : float (0 <= a <= 1) muscle activation t_act : float activation time constant [s] t_deact : float deactivation time constant [s] u_max : float (0 < u_max <= 1), optional (default=1) maximum value for muscle excitation u_min : float (0 < u_min < 1), optional (default=0.01) minimum value for muscle excitation t0 : float [s], optional (default=0) initial time instant for muscle excitation equals to u_max t1 : float [s], optional (default=1) final time instant for muscle excitation equals to u_max Returns ------- adot : float derivative of `a` at `t` """ c2 = 1/t_deact c1 = 1/t_act - c2 u = excitation(t, u_max, u_min) adot = (u - a)*(c1*u + c2) return adot ``` Let's simulate the activation signal for a rectangular function as excitation signal: ``` def excitation(t, u_max=1, u_min=0.01, t0=0.1, t1=0.4): """Excitation signal, a square wave. Parameters ---------- t : float time instant [s] u_max : float (0 < u_max <= 1), optional (default=1) maximum value for muscle excitation u_min : float (0 < u_min < 1), optional (default=0.01) minimum value for muscle excitation t0 : float [s], optional (default=0.1) initial time instant for muscle excitation equals to u_max t1 : float [s], optional (default=0.4) final time instant for muscle excitation equals to u_max Returns ------- u : float (0 < u <= 1) excitation signal """ u = u_min if t >= t0 and t <= t1: u = u_max return u ``` We will solve the equation for $a$ by numerical integration using the [`scipy.integrate.ode`](http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.integrate.ode.html) class of numeric integrators, particularly the `dopri5`, an explicit runge-kutta method of order (4)5 due to Dormand and Prince (a.k.a. ode45 in Matlab): ``` import warnings def actdyn_ode45(fun, t0=0, t1=1, a0=0, t_act=0.015, t_deact=0.050, u_max=1, u_min=0.01): # Runge-Kutta (4)5 due to Dormand & Prince with variable stepsize ODE solver f = ode(fun).set_integrator('dopri5', nsteps=1, max_step=0.01, atol=1e-8) f.set_initial_value(a0, t0).set_f_params(t_act, t_deact, u_max, u_min) # suppress Fortran warning warnings.filterwarnings("ignore", category=UserWarning) data = [] while f.t < t1: f.integrate(t1, step=True) data.append([f.t, excitation(f.t, u_max, u_min), np.max([f.y, u_min])]) warnings.resetwarnings() data = np.array(data) return data ``` Solving the problem for two different maximum excitation levels: ``` # using the values for t_act and t_deact from Thelen2003Muscle for both models act1_T03 = actdyn_ode45(fun=actdyn_T03, u_max=1.0) act2_T03 = actdyn_ode45(fun=actdyn_T03, u_max=0.5) act1_M03 = actdyn_ode45(fun=actdyn_M03, u_max=1.0) act2_M03 = actdyn_ode45(fun=actdyn_M03, u_max=0.5) # using the values for t_act and t_deact from McLean2003Muscle act3_M03 = actdyn_ode45(fun=actdyn_M03, u_max=1.0, t_act=0.050, t_deact=0.060) act4_M03 = actdyn_ode45(fun=actdyn_M03, u_max=0.5, t_act=0.050, t_deact=0.060) ``` And the results: ``` fig, axs = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(10, 6)) axs[0, 0].plot(act1_T03[:, 0], act1_T03[:, 1], 'r:', label='Excitation') axs[0, 0].plot(act1_T03[:, 0], act1_T03[:, 2], 'b', label='T03 [15, 50] ms') axs[0, 0].plot(act1_M03[:, 0], act1_M03[:, 2], 'g', label='M03 [15, 50] ms') axs[0, 0].set_ylabel('Level') axs[0, 1].plot(act2_T03[:, 0], act2_T03[:, 1], 'r:', label='Excitation') axs[0, 1].plot(act2_T03[:, 0], act2_T03[:, 2], 'b', label='T03 [15, 50] ms') axs[0, 1].plot(act2_M03[:, 0], act2_M03[:, 2], 'g', label='M03 [15, 50] ms') axs[1, 1].set_xlabel('Time (s)') axs[0, 1].legend() axs[1, 0].plot(act1_T03[:, 0], act1_T03[:, 1], 'r:', label='Excitation') axs[1, 0].plot(act1_T03[:, 0], act1_T03[:, 2], 'b', label='T03 [15, 50] ms') axs[1, 0].plot(act3_M03[:, 0], act3_M03[:, 2], 'g', label='M03 [50, 60] ms') axs[1, 0].set_xlabel('Time (s)') axs[1, 0].set_ylabel('Level') axs[1, 1].plot(act2_T03[:, 0], act2_T03[:, 1], 'r:', label='Excitation') axs[1, 1].plot(act2_T03[:, 0], act2_T03[:, 2], 'b', label='T03 [15, 50] ms') axs[1, 1].plot(act4_M03[:, 0], act4_M03[:, 2], 'g', label='M03 [50, 60] ms') axs[1, 1].set_xlabel('Time (s)') axs[1, 1].legend() plt.suptitle('Activation dynamics', y=1, fontsize=16) plt.tight_layout() plt.show() ``` Similar results when the same parameters are used (first row), but different bahavior when the typical values of each study are compared (second row). ### Muscle modeling parameters We have seen two types of parameters in the muscle modeling: parameters related to the mathematical functions used to model the muscle and tendon behavior and parameters related to the properties of specific muscles and tendons (e.g., maximum isometric force, optimal fiber length, pennation angle, and tendon slack). In general the first type of parameters are independent of the muscle-tendon unit being modeled (but dependent of the model!) while the second type of parameters is changed for each muscle-tendon unit (for instance, see http://isbweb.org/data/delp/ for some of these parameters). ### Limitations of Hill-type muscle models As with any modeling, Hill-type muscle models are a simplification of the reality. For instance, a typical Hill-type muscle model (as implemented here) does not capture time-dependent muscle behavior, such as force depression after quick muscle shortening, force enhancement after quick muscle lengthening, viscoelastic properties (creep and relaxation), and muscle fatigue (Zatsiorsky and Prilutsky, 2012). There are enhanced models that capture these properties but it seems their complexity are not worthy for the most common applications of human movement simulation. ## Exercises 1. The results presented in this text depend on the parameters used in the model. These parameters may vary because of different properties of the muscle and tendon but also because different mathematical functions may be used. a. Change some of the parameters and reproduce the plots shown here and discuss these results (e.g., use the parameters for different muscles from OpenSim or the data from [http://isbweb.org/data/delp/](http://isbweb.org/data/delp/)). b. Select another reference (e.g., Anderson, 2007) about muscle modeling that uses different mathematical functions and repeat the previous item. ## References - Anderson C (2007) [Equations for Modeling the Forces Generated by Muscles and Tendons](https://docs.google.com/viewer?url=https%3A%2F%2Fsimtk.org%2Fdocman%2Fview.php%2F124%2F604%2FMuscleAndTendonForcesClayAnderson20070521.doc) ([PDF](https://drive.google.com/open?id=0BxbW72zV7WmUVUh0MldGOGZ6aHc&authuser=0)). BioE215 Physics-based Simulation of Biological Structures. - Erdemir A, McLean S, Herzog W, van den Bogert AJ (2007) [Model-based estimation of muscle forces exerted during movements](http://www.ncbi.nlm.nih.gov/pubmed/17070969). Clinical Biomechanics, 22, 131–154. - He J, Levine WS, Loeb GE (1991) [Feedback gains for correcting small perturbations to standing posture](https://drive.google.com/open?id=0BxbW72zV7WmUekRXY09GSEhUVlE&authuser=0). IEEE Transactions on Automatic Control, 36, 322–332. - McLean SG, Su A, van den Bogert AJ (2003) [Development and validation of a 3-D model to predict knee joint loading during dynamic movement](http://www.ncbi.nlm.nih.gov/pubmed/14986412). Journal of Biomechanical Engineering, 125, 864-74. - McMahon TA (1984) [Muscles, Reflexes, and Locomotion](https://archive.org/details/McMahonTAMusclesReflexesAndLocomotionPrincetonUniversityPress1984). Princeton University Press, Princeton, New Jersey. - Millard M, Uchida T, Seth A, Delp SL (2013) [Flexing computational muscle: modeling and simulation of musculotendon dynamics](http://www.ncbi.nlm.nih.gov/pubmed/23445050). Journal of Biomechanical Engineering, 135, 021005. - Nigg BM and Herzog W (2006) [Biomechanics of the Musculo-skeletal System](https://books.google.com.br/books?id=hOIeAQAAIAAJ&dq=editions:ISBN0470017678). 3rd Edition. Wiley. - Robertson G, Caldwell G, Hamill J, Kamen G (2013) [Research Methods in Biomechanics](http://books.google.com.br/books?id=gRn8AAAAQBAJ). 2nd Edition. Human Kinetics. - Thelen DG (2003) [Adjustment of muscle mechanics model parameters to simulate dynamic contractions in older adults](http://homepages.cae.wisc.edu/~thelen/pubs/jbme03.pdf). Journal of Biomechanical Engineering, 125(1):70–77. - Tsianos GA and Loeb GE (2013) [Muscle Physiology and Modeling](http://www.scholarpedia.org/article/Muscle_Physiology_and_Modeling). Scholarpedia, 8(10):12388. - Winters JM (1990) [Hill-based muscle models: a systems engineering perspective](http://link.springer.com/chapter/10.1007%2F978-1-4613-9030-5_5). In [Multiple Muscle Systems: Biomechanics and Movement Organization](http://link.springer.com/book/10.1007/978-1-4613-9030-5), edited by JM Winters and SL Woo, Springer-Verlag, New York. - Winters JM (1995) [An Improved Muscle-Reflex Actuator for Use in Large-Scale Neuromusculoskeletal Models](http://www.ncbi.nlm.nih.gov/pubmed/7486344). Annals of Biomedical Engineering, 23, 359–374. - Zajac FE (1989) [Muscle and tendon: properties, models, scaling and application to biomechanics and motor control](http://www.ncbi.nlm.nih.gov/pubmed/2676342). Critical Reviews in Biomedical Engineering 17:359-411. - Zatsiorsky V and Prilutsky B (2012) [Biomechanics of Skeletal Muscles](http://books.google.com.br/books?id=THXfHT8L5MEC). Human Kinetics.
github_jupyter
# Getting Started With Xarray <h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc"><ul class="toc-item"><li><span><a href="#Getting-Started-With-Xarray" data-toc-modified-id="Getting-Started-With-Xarray-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Getting Started With Xarray</a></span><ul class="toc-item"><li><span><a href="#Learning-Objectives" data-toc-modified-id="Learning-Objectives-1.1"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Learning Objectives</a></span></li><li><span><a href="#What-Is-Xarray?" data-toc-modified-id="What-Is-Xarray?-1.2"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>What Is Xarray?</a></span></li><li><span><a href="#Core-Data-Structures" data-toc-modified-id="Core-Data-Structures-1.3"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>Core Data Structures</a></span><ul class="toc-item"><li><span><a href="#DataArray" data-toc-modified-id="DataArray-1.3.1"><span class="toc-item-num">1.3.1&nbsp;&nbsp;</span><code>DataArray</code></a></span></li><li><span><a href="#Dataset" data-toc-modified-id="Dataset-1.3.2"><span class="toc-item-num">1.3.2&nbsp;&nbsp;</span><code>Dataset</code></a></span></li></ul></li><li><span><a href="#Going-Further" data-toc-modified-id="Going-Further-1.4"><span class="toc-item-num">1.4&nbsp;&nbsp;</span>Going Further</a></span></li></ul></li></ul></div> ## Learning Objectives - Provide an overview of xarray - Describe the core xarray data structures, the DataArray and the Dataset, and the components that make them up - Create xarray DataArrays/Datasets out of raw numpy arrays - Create xarray objects with and without indexes - View and set attributes ## What Is Xarray? Unlabeled, N-dimensional arrays of numbers (e.g., NumPy’s ndarray) are the most widely used data structure in scientific computing. However, they lack a meaningful representation of the metadata associated with their data. Implementing such functionality is left to individual users and domain-specific packages. xarray is a useful tool for parallelizing and working with large datasets in the geosciences. xarry expands on the capabilities of NumPy arrays, providing a lot of streamline data manipulation. Xarray's interface is based largely on the netCDF data model (variables, attributes, and dimensions), but it goes beyond the traditional netCDF interfaces to provide functionality similar to netCDF-java's Common Data Model (CDM). ## Core Data Structures - xarray has 2 fundamental data structures: - `DataArray`, which holds single multi-dimensional variables and its coordinates - `Dataset`, which holds multiple variables that potentially share the same coordinates ![](./images/xarray-data-structures.png) ### `DataArray` The DataArray is xarray's implementation of a labeled, multi-dimensional array. It has several key properties: | Attribute | Description | |----------- |------------------------------------------------------------------------------------------------------------------------------------------ | | `data` | `numpy.ndarray` or `dask.array` holding the array's values. | | `dims` | dimension names for each axis. For example:(`x`, `y`, `z`) (`lat`, `lon`, `time`). | | `coords` | a dict-like container of arrays (coordinates) that label each point (e.g., 1-dimensional arrays of numbers, datetime objects or strings) | | `attrs` | an `OrderedDict` to hold arbitrary attributes/metadata (such as units) | | `name` | an arbitrary name of the array | ``` # Import packages import numpy as np import xarray as xr # Create some sample data data = 2 + 6 * np.random.exponential(size=(5, 3, 4)) data ``` To create a basic `DataArray`, you can pass this numpy array of random data to `xr.DataArray` ``` prec = xr.DataArray(data) prec ``` <div class="alert alert-block alert-warning"> Xarray automatically generates some basic dimension names for us. </div> You can also pass in your own dimension names and coordinate values: ``` # Use pandas to create an array of datetimes import pandas as pd times = pd.date_range('2019-04-01', periods=5) times # Use numpy to create array of longitude and latitude values lons = np.linspace(-150, -60, 4) lats = np.linspace(10, 80, 3) lons, lats coords = {'time': times, 'lat': lats, 'lon': lons} dims = ['time', 'lat', 'lon'] # Add name, coords, dims to our data prec = xr.DataArray(data, dims=dims, coords=coords, name='prec') prec ``` This is already improved upon from the original numpy array, because we have names for each of the dimensions (or axis in NumPy parlance). We can also add attributes to an existing `DataArray`: ``` prec.attrs['units'] = 'mm' prec.attrs['standard_name'] = 'precipitation' prec ``` ### `Dataset` - Xarray's `Dataset` is a dict-like container of labeled arrays (`DataArrays`) with aligned dimensions. - It is designed as an in-memory representation of a netCDF dataset. - In addition to the dict-like interface of the dataset itself, which can be used to access any `DataArray` in a `Dataset`. Datasets have the following key properties: | Attribute | Description | |------------- |------------------------------------------------------------------------------------------------------------------------------------------ | | `data_vars` | OrderedDict of `DataArray` objects corresponding to data variables. | | `dims` | dictionary mapping from dimension names to the fixed length of each dimension (e.g., {`lat`: 6, `lon`: 6, `time`: 8}). | | `coords` | a dict-like container of arrays (coordinates) that label each point (e.g., 1-dimensional arrays of numbers, datetime objects or strings) | | `attrs` | OrderedDict to hold arbitrary metadata pertaining to the dataset. | | `name` | an arbitrary name of the dataset | - DataArray objects inside a Dataset may have any number of dimensions but are presumed to share a common coordinate system. - Coordinates can also have any number of dimensions but denote constant/independent quantities, unlike the varying/dependent quantities that belong in data. To create a `Dataset` from scratch, we need to supply dictionaries for any variables (`data_vars`), coordinates (`coords`) and attributes (`attrs`): ``` dset = xr.Dataset({'precipitation' : prec}) dset ``` Let's add some toy `temperature` data array to this existing dataset: ``` temp_data = 283 + 5 * np.random.randn(5, 3, 4) temp = xr.DataArray(data=temp_data, dims=['time', 'lat', 'lon'], coords={'time': times, 'lat': lats, 'lon': lons}, name='temp', attrs={'standard_name': 'air_temperature', 'units': 'kelvin'}) temp # Now add this data array to our existing dataset dset['temperature'] = temp dset.attrs['history'] = 'Created for the xarray tutorial' dset.attrs['author'] = 'foo and bar' dset ``` ## Going Further Xarray Documentation on Data Structures: http://xarray.pydata.org/en/latest/data-structures.html <div class="alert alert-block alert-success"> <p>Next: <a href="02_io.ipynb">I/O</a></p> </div>
github_jupyter
**KNN model of 10k dataset** _using data found on kaggle from Goodreads_ _books.csv contains information for 10,000 books, such as ISBN, authors, title, year_ _ratings.csv is a collection of user ratings on these books, from 1 to 5 stars_ ``` # imports import numpy as pd import pandas as pd import pickle from sklearn.neighbors import NearestNeighbors from scipy.sparse import csr_matrix import re ``` **Books dataset** ``` books = pd.read_csv('https://raw.githubusercontent.com/zygmuntz/goodbooks-10k/master/books.csv') print(books.shape) books.head() ``` **Ratings dataset** ``` ratings = pd.read_csv('https://raw.githubusercontent.com/zygmuntz/goodbooks-10k/master/ratings.csv') print(ratings.shape) ratings.head() ``` **Trim down the data** _In order to make a user rating matrix we will only need bood_id and title._ ``` cols = ['book_id', 'title'] books = books[cols] books.head() ``` **Clean up book titles** _Book titles are messy, special characters, empty spaces, brackets clutter up the titles_ ``` def clean_book_titles(title): title = re.sub(r'\([^)]*\)', '', title) # handles brackets title = re.sub(' + ', ' ', title) #compresses multi spaces into a single space title = title.strip() # handles special characters return title books['title'] = books['title'].apply(clean_book_titles) books.head() ``` **neat-o** **Create feature matrix** _Combine datasets to get a new dataset of user ratings for each book_ ``` books_ratings = pd.merge(ratings, books, on='book_id') print(books_ratings.shape) books_ratings.head() ``` **Remove rows with same user_id and book title** ``` user_ratings = books_ratings.drop_duplicates(['user_id', 'title']) print(user_ratings.shape) user_ratings.head() ``` **Pivot table to create user_ratings matrix** _Each column is a user and each row is a book. The entries in the martix are the user's rating for that book._ ``` user_matrix = user_ratings.pivot(index='title', columns='user_id', values='rating').fillna(0) user_matrix.head() user_matrix.shape ``` **Compress the matrix since it is extremely sparse** _Whole lotta zeros_ _ ``` compressed = csr_matrix(user_matrix.values) # build and train knn # unsupervised learning # using cosine to measure space/distance knn = NearestNeighbors(algorithm='brute', metric='cosine') knn.fit(compressed) def get_recommendations(book_title, matrix=user_matrix, model=knn, topn=2): book_index = list(matrix.index).index(book_title) distances, indices = model.kneighbors(matrix.iloc[book_index,:].values.reshape(1,-1), n_neighbors=topn+1) print('Recommendations for {}:'.format(matrix.index[book_index])) for i in range(1, len(distances.flatten())): print('{}. {}, distance = {}'.format(i, matrix.index[indices.flatten()[i]], "%.3f"%distances.flatten()[i])) print() get_recommendations("Harry Potter and the Sorcerer's Stone") get_recommendations("Pride and Prejudice") get_recommendations("Matilda") pickle.dump(knn, open('knn_model.pkl','wb')) ```
github_jupyter
# "Wine Quality." ### _"Quality ratings of Portuguese white wines" (Classification task)._ ## Table of Contents ## Part 0: Introduction ### Overview The dataset that's we see here contains 12 columns and 4898 entries of data about Portuguese white wines. **Метаданные:** * **fixed acidity** * **volatile acidity** * **citric acid** * **residual sugar** * **chlorides** * **free sulfur dioxide** * **total sulfur dioxide** * **density** * **pH** * **sulphates** * **alcohol** * **quality** - score between 3 and 9 ### Questions: Predict which wines are 'Good/1' and 'Not Good/0' (use binary classification; check balance of classes; calculate perdictions; choose the best model) ## [Part 1: Import, Load Data](#Part-1:-Import,-Load-Data.) * ### Import libraries, Read data from ‘.csv’ file ## [Part 2: Exploratory Data Analysis](#Part-2:-Exploratory-Data-Analysis.) * ### Info, Head, Describe * ### Encoding 'quality' attribute * ### 'quality' attribute value counts and visualisation * ### Resampling of an imbalanced dataset * ### Random under-sampling of an imbalanced dataset * ### Random over-sampling of an imbalanced dataset ## [Part 3: Data Wrangling and Transformation](#Part-3:-Data-Wrangling-and-Transformation.) * ### Creating datasets for ML part * ### StandardScaler * ### 'Train\Test' splitting method ## [Part 4: Machine Learning](#Part-4:-Machine-Learning.) * ### Build, train and evaluate models without hyperparameters * #### Logistic Regression, K-Nearest Neighbors, Decision Trees * #### Classification report * #### Confusion Matrix * #### ROC-AUC score * ### Build, train and evaluate models with hyperparameters * #### Logistic Regression, K-Nearest Neighbors, Decision Trees * #### Classification report * #### Confusion Matrix * #### ROC-AUC score ## [Conclusion](#Conclusion.) ## Part 1: Import, Load Data. * ### Import libraries ``` # import standard libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline import seaborn as sns sns.set() from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.metrics import classification_report, confusion_matrix, roc_auc_score from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier import warnings warnings.filterwarnings('ignore') ``` * ### Read data from ‘.csv’ file ``` # read data from '.csv' file data = pd.read_csv("winequality.csv") ``` ## Part 2: Exploratory Data Analysis. * ### Info ``` # print the full summary of the dataset data.info() ``` * ### Head ``` # preview of the first 5 lines of the loaded data data.head() ``` * ### Describe ``` data.describe() ``` * ### Encoding 'quality' attribute ``` # lambda function; wine quality from 3-6 == 0, from 7-9 == 1. data["quality"] = data["quality"].apply(lambda x: 0 if x < 7 else 1) # preview of the first 5 lines of the loaded data data.head() ``` * ### 'quality' attribute value counts and visualisation ``` data["quality"].value_counts() # visualisation plot sns.countplot(x="quality", data=data); ``` * ### Resampling of an imbalanced dataset ``` # class count count_class_0, count_class_1 = data['quality'].value_counts() # divide by class class_0 = data[data["quality"] == 0] class_1 = data[data["quality"] == 1] ``` * ### Random under-sampling of an imbalanced dataset ``` #class_0_under = class_0.sample(count_class_1) #data_under = pd.concat([class_0_under, class_1], axis=0) #sns.countplot(x="quality", data=data_under); ``` * ### Random over-sampling of an imbalanced dataset ``` class_1_over = class_1.sample(count_class_0, replace=True) data_over = pd.concat([class_0, class_1_over], axis=0) sns.countplot(x="quality", data=data_over); ``` ## Part 3: Data Wrangling and Transformation. * ### Creating datasets for ML part ``` # set 'X' for features' and y' for the target ('quality'). #X = data.drop('quality', axis=1) #y = data['quality'] # for under-sampling dataset #X = data_under.drop('quality', axis=1) #y = data_under['quality'] # for over-sampling dataset X = data_over.drop('quality', axis=1) y = data_over['quality'] # preview of the first 5 lines of the loaded data X.head() ``` * ### 'Train\Test' split ``` # apply 'Train\Test' splitting method X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) # print shape of X_train and y_train X_train.shape, y_train.shape # print shape of X_test and y_test X_test.shape, y_test.shape ``` * ### StandardScaler ``` # StandardScaler sc = StandardScaler() data_sc_train = pd.DataFrame(sc.fit_transform(X_train), columns=X.columns) data_sc_test = pd.DataFrame(sc.transform(X_test), columns=X.columns) data_sc_train.head() data_sc_test.head() ``` ## Part 4: Machine Learning. * ### Build, train and evaluate models without hyperparameters * Logistic Regression * K-Nearest Neighbors * Decision Trees ``` # Logistic Regression LR = LogisticRegression() LR.fit(data_sc_train, y_train) LR_pred = LR.predict(data_sc_test) # K-Nearest Neighbors KNN = KNeighborsClassifier() KNN.fit(data_sc_train, y_train) KNN_pred = KNN.predict(data_sc_test) # Decision Tree DT = DecisionTreeClassifier(random_state=0) DT.fit(data_sc_train, y_train) DT_pred = DT.predict(data_sc_test) ``` * ### Classification report ``` print(f"LogisticRegression: \n {classification_report(y_test, LR_pred, digits=6)} ") print(f"KNeighborsClassifier: \n {classification_report(y_test, KNN_pred, digits=6)} ") print(f"DecisionTreeClassifier: \n {classification_report(y_test, DT_pred, digits=6)} ") ``` * ### Confusion matrix ``` sns.heatmap(confusion_matrix(y_test, LR_pred), annot=True); sns.heatmap(confusion_matrix(y_test, KNN_pred), annot=True); sns.heatmap(confusion_matrix(y_test, DT_pred), annot=True); ``` * ### ROC-AUC score ``` roc_auc_score(y_test, DT_pred) ``` * ### Build, train and evaluate models with hyperparameters ``` # Logistic Regression LR = LogisticRegression() LR_params = {'C':[1,2,3,4,5,6,7,8,9,10], 'penalty':['l1', 'l2', 'elasticnet', 'none'], 'solver':['lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'], 'random_state':[0]} LR1 = GridSearchCV(LR, param_grid = LR_params) LR1.fit(X_train, y_train) LR1_pred = LR1.predict(X_test) # K-Nearest Neighbors KNN = KNeighborsClassifier() KNN_params = {'n_neighbors':[5,7,9,11]} KNN1 = GridSearchCV(KNN, param_grid = KNN_params) KNN1.fit(X_train, y_train) KNN1_pred = KNN1.predict(X_test) # Decision Tree DT = DecisionTreeClassifier() DT_params = {'max_depth':[2,10,15,20], 'criterion':['gini', 'entropy'], 'random_state':[0]} DT1 = GridSearchCV(DT, param_grid = DT_params) DT1.fit(X_train, y_train) DT1_pred = DT1.predict(X_test) # print the best hyper parameters set print(f"LogisticRegression: {LR1.best_params_}") print(f"KNeighborsClassifier: {KNN1.best_params_}") print(f"DecisionTreeClassifier: {DT1.best_params_}") ``` * ### Classification report ``` print(f"LogisticRegression: \n {classification_report(y_test, LR1_pred, digits=6)} ") print(f"KNeighborsClassifier: \n {classification_report(y_test, KNN1_pred, digits=6)} ") print(f"DecisionTreeClassifier: \n {classification_report(y_test, DT1_pred, digits=6)} ") ``` * ### Confusion matrix ``` # confusion matrix of DT model conf_mat_DT1 = confusion_matrix(y_test, DT1_pred) # visualisation sns.heatmap(conf_mat_DT1, annot=True); ``` * ### ROC-AUC score ``` roc_auc_score(y_test, DT1_pred) ``` ## Conclusion. ``` # submission of .csv file with predictions sub = pd.DataFrame() sub['ID'] = X_test.index sub['quality'] = DT1_pred sub.to_csv('WinePredictionsTest.csv', index=False) ``` **Question**: Predict which wines are 'Good/1' and 'Not Good/0' (use binary classification; check balance of classes; calculate perdictions; choose the best model). **Answers**: 1. Binary classification was applied. 2. Classes were highly imbalanced. 3. Three options were applied in order to calculate the best predictions: * Calculate predictions with imbalanced dataset * Calculate predictions with random under-sampling technique of an imbalanced dataset * Calculate predictions with random over-sampling technique of an imbalanced dataset (the best solution) 4. Three ML models were used: Logistic Regression, KNN, Decision Tree (without and with hyper parameters). 5. The best result was choosen: * Random over-sampling dataset with 3838 enteties in class '0' and 3838 enteties in class '1', 7676 enteties in total. * Train/Test split: test_size=0.2, random_state=0 * Decision Tree model with hyper parameters tuning, with an accuracy score equal 0.921875 and ROC-AUC score equal 0.921773.
github_jupyter
# Perturb-seq K562 co-expression ``` import scanpy as sc import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import numpy as np import scipy.stats as stats import itertools from pybedtools import BedTool import pickle as pkl %matplotlib inline pd.set_option('max_columns', None) import sys sys.path.append('/home/ssm-user/Github/scrna-parameter-estimation/dist/memento-0.0.6-py3.8.egg') sys.path.append('/home/ssm-user/Github/misc-seq/miscseq/') import encode import memento data_path = '/data_volume/memento/k562/' ``` ### Read the guide labled K562 data From perturbseq paper ``` adata = sc.read(data_path + 'h5ad/filtered-cellcycle.h5ad') guides = adata.obs.guides.drop_duplicates().tolist() guides = [g for g in guides if ('INTER' not in g and 'nan' not in g)] ko_genes = adata.obs.query('KO == 1')['KO_GENE'].drop_duplicates().tolist() adata.X = adata.X.tocsr() ``` ### Setup memento ``` adata.obs['q'] = 0.07 memento.setup_memento(adata, q_column='q', filter_mean_thresh=0.07) ``` ### Get moments from all groups ``` adata_moments = adata.copy().copy() memento.create_groups(adata_moments, label_columns=['phase']) memento.compute_1d_moments(adata_moments, min_perc_group=.9) moment_df = memento.get_1d_moments(adata_moments) moment_df = moment_df[0].merge(moment_df[1], on='gene', suffixes=('_m', '_v')) moment_df = moment_df[['gene','sg^G1_m', 'sg^S_m', 'sg^G2M_m', 'sg^G1_v', 'sg^S_v', 'sg^G2M_v']] ``` ### Cell cycle 1D moments ``` adata.obs['s_phase'] = (adata.obs.phase == 'S').astype(int) adata.obs['g1_phase'] = (adata.obs.phase == 'G1').astype(int) adata.obs['g2m_phase'] = (adata.obs.phase == 'G2M').astype(int) g1_s = adata[adata.obs.phase.isin(['S', 'G1'])].copy().copy() s_g2 = adata[adata.obs.phase.isin(['S', 'G2M'])].copy().copy() g2_g1 = adata[adata.obs.phase.isin(['G1', 'G2M'])].copy().copy() memento.create_groups(g1_s, label_columns=['s_phase', 'leiden']) memento.compute_1d_moments(g1_s, min_perc_group=.9) memento.create_groups(s_g2, label_columns=['g2m_phase', 'leiden']) memento.compute_1d_moments(s_g2, min_perc_group=.9) memento.create_groups(g2_g1, label_columns=['g1_phase', 'leiden']) memento.compute_1d_moments(g2_g1, min_perc_group=.9) memento.ht_1d_moments( g1_s, formula_like='1 + s_phase', cov_column='s_phase', num_boot=20000, verbose=1, num_cpus=70) memento.ht_1d_moments( s_g2, formula_like='1 + g2m_phase', cov_column='g2m_phase', num_boot=20000, verbose=1, num_cpus=70) memento.ht_1d_moments( g2_g1, formula_like='1 + g1_phase', cov_column='g1_phase', num_boot=20000, verbose=1, num_cpus=70) g1_s.write(data_path + 'cell_cycle/g1_s.h5ad') s_g2.write(data_path + 'cell_cycle/s_g2.h5ad') g2_g1.write(data_path + 'cell_cycle/g2_g1.h5ad') def get_1d_dfs(subset): df = memento.get_1d_ht_result(subset) df['dv_fdr'] = memento.util._fdrcorrect(df['dv_pval']) df['de_fdr'] = memento.util._fdrcorrect(df['de_pval']) return df g1_s_1d = get_1d_dfs(g1_s) s_g2_1d = get_1d_dfs(s_g2) g2_g1_1d = get_1d_dfs(g2_g1) plt.figure(figsize=(10,3)) plt.subplot(1,3,1) plt.scatter(g1_s_1d['de_coef'], g1_s_1d['dv_coef'], s=1) plt.subplot(1,3,2) plt.scatter(s_g2_1d['de_coef'], s_g2_1d['dv_coef'], s=1) plt.subplot(1,3,3) plt.scatter(g2_g1_1d['de_coef'], g2_g1_1d['dv_coef'], s=1) sig_genes = set( g1_s_1d.query('dv_fdr < 0.01 & (dv_coef < -1 | dv_coef > 1)').gene.tolist() +\ s_g2_1d.query('dv_fdr < 0.01 & (dv_coef < -1 | dv_coef > 1)').gene.tolist() + \ g2_g1_1d.query('dv_fdr < 0.01 & (dv_coef < -1 | dv_coef > 1)').gene.tolist()) ``` ### GSEA + scatterplots ``` def plot_scatters(gene_set, name, c='k'): plt.figure(figsize=(10,3)) plt.subplot(1,3,1) plt.scatter(g1_s_1d['de_coef'], g1_s_1d['dv_coef'], s=1, color='gray') plt.scatter(g1_s_1d.query('gene in @gene_set')['de_coef'], g1_s_1d.query('gene in @gene_set')['dv_coef'], s=15, color=c) plt.xlabel('G1->S') # plt.xlim(-1.2,1.2); plt.ylim(-1.2,1.2); plt.subplot(1,3,2) plt.scatter(s_g2_1d['de_coef'], s_g2_1d['dv_coef'], s=1, color='gray') plt.scatter(s_g2_1d.query('gene in @gene_set')['de_coef'], s_g2_1d.query('gene in @gene_set')['dv_coef'], s=15, color=c) plt.title(name) plt.xlabel('S->G2M') # plt.xlim(-1.2,1.2); plt.ylim(-1.2,1.2); plt.subplot(1,3,3) plt.scatter(g2_g1_1d['de_coef'], g2_g1_1d['dv_coef'], s=1, color='gray') plt.scatter(g2_g1_1d.query('gene in @gene_set')['de_coef'], g2_g1_1d.query('gene in @gene_set')['dv_coef'], s=15, color=c) plt.xlabel('G2M->G1') # plt.xlim(-1.2,1.2); plt.ylim(-1.2,1.2); import gseapy as gp from gseapy.plot import gseaplot pre_res = gp.prerank( rnk=s_g2_1d.query('de_coef > 0 & de_fdr < 0.01')[['gene','dv_coef']].sort_values('dv_coef'), gene_sets='GO_Biological_Process_2018', processes=4, permutation_num=100, # reduce number to speed up testing outdir=None, seed=6) terms = pre_res.res2d.index gsea_table = pre_res.res2d.sort_index().sort_values('fdr') gsea_table.head(5) terms = gsea_table.index idx=0 gseaplot(rank_metric=pre_res.ranking, term=terms[idx], **pre_res.results[terms[idx]]) gsea_table = pre_res.res2d.sort_index().sort_values('fdr') stress_genes stress_genes = gsea_table['ledge_genes'].iloc[0].split(';') plot_scatters(stress_genes, 'chaperones') cell_cycle_genes = [x.strip() for x in open('./regev_lab_cell_cycle_genes.txt')] plot_scatters(cell_cycle_genes, 'cell cycle') manual_gene_set = g1_s_1d.query('dv_coef < -1 & de_coef < -0.5').gene.tolist() plot_scatters(manual_gene_set, 'G1 genes') manual_gene_set ``` ### Get any hits for KOs ``` guides = adata.obs.guides.drop_duplicates().tolist() guides = [g for g in guides if ('INTER' not in g and 'nan' not in g)] ko_genes = adata.obs.query('KO == 1')['KO_GENE'].drop_duplicates().tolist() ``` ### Get moments for the gene classes ``` for g in ko_genes: print(g) subset = adata[adata.obs.WT | (adata.obs.KO_GENE == g)].copy().copy() memento.create_groups(subset, label_columns=['KO', 'leiden']) memento.compute_1d_moments(subset, min_perc_group=.9) target_genes = list(set(subset.var.index)-set(ko_genes)) # memento.compute_2d_moments(subset, gene_pairs=list(itertools.product([g], target_genes))) memento.ht_1d_moments( subset, formula_like='1 + KO', cov_column='KO', num_boot=10000, verbose=1, num_cpus=70) # subset.write(data_path + '2d_self_h5ad/{}.h5ad'.format(g)) break df = memento.get_1d_ht_result(subset) df['de_fdr'] = memento.util._fdrcorrect(df['de_pval']) df.query('de_fdr < 0.1') plt.hist(df['dv_pval']) plt.figure(figsize=(10, 3)) plt.subplot(1, 2, 1) plt.plot(moment_df.query('gene in @stress_genes').iloc[:, 1:4].values.T) plt.xticks([0,1,2],['G1', 'S', 'G2M']) plt.title('Mean') plt.subplot(1, 2, 2) plt.plot(moment_df.query('gene in @stress_genes').iloc[:, 4:].values.T) plt.xticks([0,1,2],['G1', 'S', 'G2M']) plt.title('Variability') plt.plot(moment_df.query('gene in @stress_genes').iloc[:, 4:].values.T) df['dv_pval'].hist(bins=50) ``` ### Find self-DC genes ``` for g in ko_genes: subset = adata[adata.obs.WT | (adata.obs.KO_GENE == g)].copy().copy() memento.create_groups(subset, label_columns=['KO']) memento.compute_1d_moments(subset, min_perc_group=.9) if g not in subset.var.index: continue target_genes = list(set(subset.var.index)-set(ko_genes)) # memento.compute_2d_moments(subset, gene_pairs=list(itertools.product([g], target_genes))) memento.ht_1d_moments( subset, formula_like='1 + KO', cov_column='KO', num_boot=10000, verbose=1, num_cpus=70) # subset.write(data_path + '2d_self_h5ad/{}.h5ad'.format(g)) break df = memento.get_1d_ht_result(subset) df = memento.get_1d_ht_result(subset) df['de_pval'].hist(bins=50) for g, result in result_1d_dict.items(): result.to_csv(data_path + '/result_1d/{}.csv'.format(g), index=False) ``` ### Get 1D results ``` result_1d_dict = {g:pd.read_csv(data_path + '/result_1d/{}.csv'.format(g)) for g in guides if ('INTER' not in g and 'nan' not in g)} g = 'p_sgGABPA_9' df = result_1d_dict[g] df.query('de_fdr < 0.1 | dv_fdr < 0.1') for g in guides: df = result_1d_dict[g] df['de_fdr'] = memento.util._fdrcorrect(df['de_pval']) df['dv_fdr'] = memento.util._fdrcorrect(df['dv_pval']) print(g, df.query('de_fdr < 0.15').shape[0], df.query('dv_fdr < 0.15').shape[0]) ``` ### DV shift plots ``` for g in guides: df = result_1d_dict[g] plt.figure() sns.kdeplot(df['dv_coef']); plt.plot([0, 0], [0, 2]) plt.title(g) plt.xlim(-2, 2) ``` ### within WT ``` adata[adata.obs.WT].obs.guides.value_counts() subset = adata[(adata.obs.guides=='p_INTERGENIC393453') | (adata.obs.guides=='p_INTERGENIC216151') ].copy().copy() memento.create_groups(subset, label_columns=['guides']) memento.compute_1d_moments(subset, min_perc_group=.9) memento.ht_1d_moments( subset, formula_like='1 + guides', cov_column='guides', num_boot=10000, verbose=1, num_cpus=14) wt_result = memento.get_1d_ht_result(subset) sns.kdeplot(wt_result.dv_coef) plt.title('WT') plt.plot([0, 0], [0, 2]) ``` ### Get the change in magnitude for each guide ``` coef_mag = [] for g, df in result_1d_dict.items(): coef_mag.append((g, df['de_coef'].abs().median())) coef_mag = pd.DataFrame(coef_mag, columns=['guide', 'de_mag']) coef_mag['gene'] = coef_mag['guide'].str.split('_').str[1].str[2:] ``` ### Get WT variability of each TF ``` wt_adata = adata[adata.obs['WT']].copy().copy() tfs = adata.obs.query('KO==1').KO_GENE.drop_duplicates().tolist() memento.create_groups(wt_adata, label_columns=['KO']) memento.compute_1d_moments(wt_adata, min_perc_group=.9,) tf_moments = memento.get_1d_moments(wt_adata, groupby='KO') ``` ### Compare WT variability to De mag ``` merged = coef_mag.merge(tf_moments[1], on='gene') stats.spearmanr(merged['de_mag'], merged['KO_0']) plt.scatter(merged['de_mag'], merged['KO_0']) ``` ### Number of TF binding sites within 5k(?) KB ``` enc = encode.Encode('/home/ssm-user/Github/misc-seq/miscseq/GRCh38Genes.bed') encode_links = { 'ELK1':'https://www.encodeproject.org/files/ENCFF119SCQ/@@download/ENCFF119SCQ.bed.gz', 'ELF1':'https://www.encodeproject.org/files/ENCFF133TSU/@@download/ENCFF133TSU.bed.gz', 'IRF1':'https://www.encodeproject.org/files/ENCFF203LRV/@@download/ENCFF203LRV.bed.gz', 'ETS1':'https://www.encodeproject.org/files/ENCFF461PRP/@@download/ENCFF461PRP.bed.gz', 'EGR1':'https://www.encodeproject.org/files/ENCFF375RDB/@@download/ENCFF375RDB.bed.gz', 'YY1':'https://www.encodeproject.org/files/ENCFF635XCI/@@download/ENCFF635XCI.bed.gz', 'GABPA':'https://www.encodeproject.org/files/ENCFF173GUD/@@download/ENCFF173GUD.bed.gz', 'E2F4':'https://www.encodeproject.org/files/ENCFF225TLP/@@download/ENCFF225TLP.bed.gz', 'NR2C2':'https://www.encodeproject.org/files/ENCFF263VIC/@@download/ENCFF263VIC.bed.gz', 'CREB1':'https://www.encodeproject.org/files/ENCFF193LLN/@@download/ENCFF193LLN.bed.gz' } bed_objs = {tf:enc.get_encode_peaks(link) for tf,link in encode_links.items()} target_genes = {tf:enc.get_peak_genes_bed(bed_obj, 0).query('distance==0').gene.tolist() for tf, bed_obj in bed_objs.items()} x = wt_adata[:, 'EGR1'].X.todense().A1 np.bincount(x.astype(int)) x.mean() plt.hist(x, bins=20) target_numbers = [] for tf in encode_links.keys(): target_numbers.append((tf, len(target_genes[tf]))) target_numbers = pd.DataFrame(target_numbers, columns=['gene', 'num_targets']) merged = target_numbers.merge(tf_moments[1], on='gene') stats.pearsonr(merged.query('gene != "EGR1"')['num_targets'], merged.query('gene != "EGR1"')['KO_0']) plt.scatter(merged['num_targets'], merged['KO_0']) ``` ### Try with all ENCODE ``` merged all_encode = pd.read_csv('gene_attribute_matrix.txt', sep='\t', index_col=0, low_memory=False).iloc[2:, 2:].astype(float) target_counts = pd.DataFrame(all_encode.sum(axis=0), columns=['num_targets']).reset_index().rename(columns={'index':'gene'}) x = target_counts.query('gene in @tfs').sort_values('gene')['num_targets'] y = merged.sort_values('gene')['num_targets'] merged2 = target_counts.merge(tf_moments[1], on='gene') plt.scatter(merged2['num_targets'], merged2['KO_0']) merged2 ``` ### Get gene list ``` wt_adata = adata[adata.obs['WT']].copy().copy() memento.create_groups(wt_adata, label_columns=['KO']) memento.compute_1d_moments(wt_adata, min_perc_group=.9) plt.hist(np.log(wt_adata.uns['memento']['1d_moments']['sg^0'][0])) wt_high_genes = wt_adata.var.index[np.log(wt_adata.uns['memento']['1d_moments']['sg^0'][0]) > -1].tolist() ``` ### Create labels for X genes ``` chr_locations = pd.read_csv('chr_locations.bed', sep='\t').rename(columns={'#chrom':'chr'}).drop_duplicates('geneName') chr_locations.index=chr_locations.geneName adata.var = adata.var.join(chr_locations, how='left') ``` ### Filter X-chromosomal genes ``` adata_X = adata[:, (adata.var.chr=='chrX') | adata.var.chr.isin(['chr1', 'chr2', 'chr3'])].copy() adata_X ``` ### Escape genes ``` par_genes = """PLCXD1 GTPBP6 PPP2R3B SHOX CRLF2 CSF2RA IL3RA SLC25A6 ASMTL P2RY8 ASMT DHRSXY ZBED1 CD99 XG IL9R SPRY3 VAMP7""".split() escape_genes = """EIF1AX USP9X EIF2S3 CTPS2 TRAPPC2 HDHD1 ZFX DDX3X RAB9A AP1S2 GEMIN8 RPS4X SMC1A ZRSR2 STS FUNDC1 PNPLA4 UBA1 ARSD NLGN4X GPM6B MED14 CD99 RBBP7 SYAP1 PRKX OFD1 CXorf38 TXLNG KDM5C GYG2 TBL1X CA5B XIST RENBP HCFC1 USP11 PLCXD1 SLC25A6 ASMTL DHRSX XG TMEM27 ARHGAP4 GAB3 PIR TMEM187 DOCK11 EFHC2 RIBC1 NAP1L3 CA5BP1 MXRA5 KAL1 PCDH11X KDM6A PLS3 CITED1 L1CAM ALG13 BCOR""".split() ``` ### Run 1d memento ``` adata_X.obs['is_female'] = (adata_X.obs['Sex'] == 'Female').astype(int) adata_X.obs.is_female.value_counts() memento.create_groups(adata_X, label_columns=['is_female', 'ind_cov']) memento.compute_1d_moments(adata_X, min_perc_group=.9) memento.ht_1d_moments( adata_X, formula_like='1 + is_female', cov_column='is_female', num_boot=20000, verbose=1, num_cpus=13) result_1d = memento.get_1d_ht_result(adata_X) result_1d['dv_fdr'] = memento.util._fdrcorrect(result_1d['dv_pval']) sns.distplot(result_1d.dv_coef) x_chr_genes = adata.var.index[adata.var.chr=='chrX'].tolist() result_1d['escape'] = result_1d['gene'].isin(escape_genes) result_1d['par'] = result_1d['gene'].isin(par_genes) result_1d['x_chr'] = result_1d['gene'].isin(x_chr_genes) sns.distplot(result_1d.query('~x_chr').dv_coef) sns.distplot(result_1d.query('x_chr').dv_coef) sns.boxplot(x='x_chr', y='dv_coef', data=result_1d) dv_genes = result_1d.query('dv_fdr < 0.1').gene.tolist() result_1d['dv'] = result_1d.gene.isin(dv_genes) result_1d.query('~dv & ~x_chr & dv_coef > 0').shape a = [[193, 14], [23,5]] stats.chi2_contingency(a) result_1d.query('dv_fdr < 0.1').x_chr.mean() result_1d.x_chr.mean() ``` ### Run memento for each subset, comparing to control ``` cts = [['ciliated'], ['bc','basal']] # tps = ['3', '6', '9', '24', '48'] tps = ['3', '6', '9', '24', '48'] stims = ['alpha', 'beta', 'gamma', 'lambda'] import os done_files = os.listdir('/data_volume/ifn_hbec/binary_test_deep/') for ct in cts: for tp in tps: for stim in stims: fname = '{}_{}_{}_20200320.h5ad'.format('-'.join(ct), stim, tp) if fname in done_files: print('Skipping', fname) continue print('starting', ct, tp, stim) adata_stim = adata.copy()[ adata.obs.cell_type.isin(ct) & \ adata.obs.stim.isin(['control', stim]) & \ adata.obs.time.isin(['0',tp]), :].copy() time_converter={0:0, int(tp):1} adata_stim.obs['time_step'] = adata_stim.obs['time'].astype(int).apply(lambda x: time_converter[x]) memento.create_groups(adata_stim, label_columns=['time_step', 'donor']) memento.compute_1d_moments(adata_stim, min_perc_group=.9) memento.ht_1d_moments( adata_stim, formula_like='1 + time_step', cov_column='time_step', num_boot=10000, verbose=1, num_cpus=13) del adata_stim.uns['memento']['mv_regressor'] adata_stim.write('/data_volume/ifn_hbec/binary_test_deep/{}_{}_{}_20200320.h5ad'.format( '-'.join(ct), stim, tp)) ```
github_jupyter
# Wind Statistics ### Introduction: The data have been modified to contain some missing values, identified by NaN. Using pandas should make this exercise easier, in particular for the bonus question. You should be able to perform all of these operations without using a for loop or other looping construct. 1. The data in 'wind.data' has the following format: ``` """ Yr Mo Dy RPT VAL ROS KIL SHA BIR DUB CLA MUL CLO BEL MAL 61 1 1 15.04 14.96 13.17 9.29 NaN 9.87 13.67 10.25 10.83 12.58 18.50 15.04 61 1 2 14.71 NaN 10.83 6.50 12.62 7.67 11.50 10.04 9.79 9.67 17.54 13.83 61 1 3 18.50 16.88 12.33 10.13 11.17 6.17 11.25 NaN 8.50 7.67 12.75 12.71 """ ``` The first three columns are year, month and day. The remaining 12 columns are average windspeeds in knots at 12 locations in Ireland on that day. More information about the dataset go [here](wind.desc). ### Step 1. Import the necessary libraries ``` import pandas as pd import datetime ``` ### Step 2. Import the dataset from this [address](https://github.com/guipsamora/pandas_exercises/blob/master/06_Stats/Wind_Stats/wind.data) ### Step 3. Assign it to a variable called data and replace the first 3 columns by a proper datetime index. ``` # parse_dates gets 0, 1, 2 columns and parses them as the index data_url = 'https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/06_Stats/Wind_Stats/wind.data' data = pd.read_csv(data_url, sep = "\s+", parse_dates = [[0,1,2]]) data.head() ``` ### Step 4. Year 2061? Do we really have data from this year? Create a function to fix it and apply it. ``` # The problem is that the dates are 2061 and so on... # function that uses datetime def fix_century(x): year = x.year - 100 if x.year > 1989 else x.year return datetime.date(year, x.month, x.day) # apply the function fix_century on the column and replace the values to the right ones data['Yr_Mo_Dy'] = data['Yr_Mo_Dy'].apply(fix_century) # data.info() data.head() ``` ### Step 5. Set the right dates as the index. Pay attention at the data type, it should be datetime64[ns]. ``` # transform Yr_Mo_Dy it to date type datetime64 data["Yr_Mo_Dy"] = pd.to_datetime(data["Yr_Mo_Dy"]) # set 'Yr_Mo_Dy' as the index data = data.set_index('Yr_Mo_Dy') data.head() # data.info() ``` ### Step 6. Compute how many values are missing for each location over the entire record. #### They should be ignored in all calculations below. ``` # "Number of non-missing values for each location: " data.isnull().sum() ``` ### Step 7. Compute how many non-missing values there are in total. ``` #number of columns minus the number of missing values for each location data.shape[0] - data.isnull().sum() #or data.notnull().sum() ``` ### Step 8. Calculate the mean windspeeds of the windspeeds over all the locations and all the times. #### A single number for the entire dataset. ``` data.sum().sum() / data.notna().sum().sum() ``` ### Step 9. Create a DataFrame called loc_stats and calculate the min, max and mean windspeeds and standard deviations of the windspeeds at each location over all the days #### A different set of numbers for each location. ``` data.describe(percentiles=[]) ``` ### Step 10. Create a DataFrame called day_stats and calculate the min, max and mean windspeed and standard deviations of the windspeeds across all the locations at each day. #### A different set of numbers for each day. ``` # create the dataframe day_stats = pd.DataFrame() # this time we determine axis equals to one so it gets each row. day_stats['min'] = data.min(axis = 1) # min day_stats['max'] = data.max(axis = 1) # max day_stats['mean'] = data.mean(axis = 1) # mean day_stats['std'] = data.std(axis = 1) # standard deviations day_stats.head() ``` ### Step 11. Find the average windspeed in January for each location. #### Treat January 1961 and January 1962 both as January. ``` data.loc[data.index.month == 1].mean() ``` ### Step 12. Downsample the record to a yearly frequency for each location. ``` data.groupby(data.index.to_period('A')).mean() ``` ### Step 13. Downsample the record to a monthly frequency for each location. ``` data.groupby(data.index.to_period('M')).mean() ``` ### Step 14. Downsample the record to a weekly frequency for each location. ``` data.groupby(data.index.to_period('W')).mean() ``` ### Step 15. Calculate the min, max and mean windspeeds and standard deviations of the windspeeds across all locations for each week (assume that the first week starts on January 2 1961) for the first 52 weeks. ``` # resample data to 'W' week and use the functions weekly = data.resample('W').agg(['min','max','mean','std']) # slice it for the first 52 weeks and locations weekly.loc[weekly.index[1:53], "RPT":"MAL"] .head(10) ```
github_jupyter
# Mark and Recapture Think Bayes, Second Edition Copyright 2020 Allen B. Downey License: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/) ``` # If we're running on Colab, install empiricaldist # https://pypi.org/project/empiricaldist/ import sys IN_COLAB = 'google.colab' in sys.modules if IN_COLAB: !pip install empiricaldist # Get utils.py import os if not os.path.exists('utils.py'): !wget https://github.com/AllenDowney/ThinkBayes2/raw/master/soln/utils.py from utils import set_pyplot_params set_pyplot_params() ``` This chapter introduces "mark and recapture" experiments, in which we sample individuals from a population, mark them somehow, and then take a second sample from the same population. Seeing how many individuals in the second sample are marked, we can estimate the size of the population. Experiments like this were originally used in ecology, but turn out to be useful in many other fields. Examples in this chapter include software engineering and epidemiology. Also, in this chapter we'll work with models that have three parameters, so we'll extend the joint distributions we've been using to three dimensions. But first, grizzly bears. ## The Grizzly Bear Problem In 1996 and 1997 researchers deployed bear traps in locations in British Columbia and Alberta, Canada, in an effort to estimate the population of grizzly bears. They describe the experiment in [this article](https://www.researchgate.net/publication/229195465_Estimating_Population_Size_of_Grizzly_Bears_Using_Hair_Capture_DNA_Profiling_and_Mark-Recapture_Analysis). The "trap" consists of a lure and several strands of barbed wire intended to capture samples of hair from bears that visit the lure. Using the hair samples, the researchers use DNA analysis to identify individual bears. During the first session, the researchers deployed traps at 76 sites. Returning 10 days later, they obtained 1043 hair samples and identified 23 different bears. During a second 10-day session they obtained 1191 samples from 19 different bears, where 4 of the 19 were from bears they had identified in the first batch. To estimate the population of bears from this data, we need a model for the probability that each bear will be observed during each session. As a starting place, we'll make the simplest assumption, that every bear in the population has the same (unknown) probability of being sampled during each session. With these assumptions we can compute the probability of the data for a range of possible populations. As an example, let's suppose that the actual population of bears is 100. After the first session, 23 of the 100 bears have been identified. During the second session, if we choose 19 bears at random, what is the probability that 4 of them were previously identified? I'll define * $N$: actual population size, 100. * $K$: number of bears identified in the first session, 23. * $n$: number of bears observed in the second session, 19 in the example. * $k$: number of bears in the second session that were previously identified, 4. For given values of $N$, $K$, and $n$, the probability of finding $k$ previously-identified bears is given by the [hypergeometric distribution](https://en.wikipedia.org/wiki/Hypergeometric_distribution): $$\binom{K}{k} \binom{N-K}{n-k}/ \binom{N}{n}$$ where the [binomial coefficient](https://en.wikipedia.org/wiki/Binomial_coefficient), $\binom{K}{k}$, is the number of subsets of size $k$ we can choose from a population of size $K$. To understand why, consider: * The denominator, $\binom{N}{n}$, is the number of subsets of $n$ we could choose from a population of $N$ bears. * The numerator is the number of subsets that contain $k$ bears from the previously identified $K$ and $n-k$ from the previously unseen $N-K$. SciPy provides `hypergeom`, which we can use to compute this probability for a range of values of $k$. ``` import numpy as np from scipy.stats import hypergeom N = 100 K = 23 n = 19 ks = np.arange(12) ps = hypergeom(N, K, n).pmf(ks) ``` The result is the distribution of $k$ with given parameters $N$, $K$, and $n$. Here's what it looks like. ``` import matplotlib.pyplot as plt from utils import decorate plt.bar(ks, ps) decorate(xlabel='Number of bears observed twice', ylabel='PMF', title='Hypergeometric distribution of k (known population 100)') ``` The most likely value of $k$ is 4, which is the value actually observed in the experiment. That suggests that $N=100$ is a reasonable estimate of the population, given this data. We've computed the distribution of $k$ given $N$, $K$, and $n$. Now let's go the other way: given $K$, $n$, and $k$, how can we estimate the total population, $N$? ## The Update As a starting place, let's suppose that, prior to this study, an expert estimates that the local bear population is between 50 and 500, and equally likely to be any value in that range. I'll use `make_uniform` to make a uniform distribution of integers in this range. ``` import numpy as np from utils import make_uniform qs = np.arange(50, 501) prior_N = make_uniform(qs, name='N') prior_N.shape ``` So that's our prior. To compute the likelihood of the data, we can use `hypergeom` with constants `K` and `n`, and a range of values of `N`. ``` Ns = prior_N.qs K = 23 n = 19 k = 4 likelihood = hypergeom(Ns, K, n).pmf(k) ``` We can compute the posterior in the usual way. ``` posterior_N = prior_N * likelihood posterior_N.normalize() ``` And here's what it looks like. ``` posterior_N.plot(color='C4') decorate(xlabel='Population of bears (N)', ylabel='PDF', title='Posterior distribution of N') ``` The most likely value is 109. ``` posterior_N.max_prob() ``` But the distribution is skewed to the right, so the posterior mean is substantially higher. ``` posterior_N.mean() ``` And the credible interval is quite wide. ``` posterior_N.credible_interval(0.9) ``` This solution is relatively simple, but it turns out we can do a little better if we model the unknown probability of observing a bear explicitly. ## Two Parameter Model Next we'll try a model with two parameters: the number of bears, `N`, and the probability of observing a bear, `p`. We'll assume that the probability is the same in both rounds, which is probably reasonable in this case because it is the same kind of trap in the same place. We'll also assume that the probabilities are independent; that is, the probability a bear is observed in the second round does not depend on whether it was observed in the first round. This assumption might be less reasonable, but for now it is a necessary simplification. Here are the counts again: ``` K = 23 n = 19 k = 4 ``` For this model, I'll express the data in a notation that will make it easier to generalize to more than two rounds: * `k10` is the number of bears observed in the first round but not the second, * `k01` is the number of bears observed in the second round but not the first, and * `k11` is the number of bears observed in both rounds. Here are their values. ``` k10 = 23 - 4 k01 = 19 - 4 k11 = 4 ``` Suppose we know the actual values of `N` and `p`. We can use them to compute the likelihood of this data. For example, suppose we know that `N=100` and `p=0.2`. We can use `N` to compute `k00`, which is the number of unobserved bears. ``` N = 100 observed = k01 + k10 + k11 k00 = N - observed k00 ``` For the update, it will be convenient to store the data as a list that represents the number of bears in each category. ``` x = [k00, k01, k10, k11] x ``` Now, if we know `p=0.2`, we can compute the probability a bear falls in each category. For example, the probability of being observed in both rounds is `p*p`, and the probability of being unobserved in both rounds is `q*q` (where `q=1-p`). ``` p = 0.2 q = 1-p y = [q*q, q*p, p*q, p*p] y ``` Now the probability of the data is given by the [multinomial distribution](https://en.wikipedia.org/wiki/Multinomial_distribution): $$\frac{N!}{\prod x_i!} \prod y_i^{x_i}$$ where $N$ is actual population, $x$ is a sequence with the counts in each category, and $y$ is a sequence of probabilities for each category. SciPy provides `multinomial`, which provides `pmf`, which computes this probability. Here is the probability of the data for these values of `N` and `p`. ``` from scipy.stats import multinomial likelihood = multinomial.pmf(x, N, y) likelihood ``` That's the likelihood if we know `N` and `p`, but of course we don't. So we'll choose prior distributions for `N` and `p`, and use the likelihoods to update it. ## The Prior We'll use `prior_N` again for the prior distribution of `N`, and a uniform prior for the probability of observing a bear, `p`: ``` qs = np.linspace(0, 0.99, num=100) prior_p = make_uniform(qs, name='p') ``` We can make a joint distribution in the usual way. ``` from utils import make_joint joint_prior = make_joint(prior_p, prior_N) joint_prior.shape ``` The result is a Pandas `DataFrame` with values of `N` down the rows and values of `p` across the columns. However, for this problem it will be convenient to represent the prior distribution as a 1-D `Series` rather than a 2-D `DataFrame`. We can convert from one format to the other using `stack`. ``` from empiricaldist import Pmf joint_pmf = Pmf(joint_prior.stack()) joint_pmf.head(3) type(joint_pmf) type(joint_pmf.index) joint_pmf.shape ``` The result is a `Pmf` whose index is a `MultiIndex`. A `MultiIndex` can have more than one column; in this example, the first column contains values of `N` and the second column contains values of `p`. The `Pmf` has one row (and one prior probability) for each possible pair of parameters `N` and `p`. So the total number of rows is the product of the lengths of `prior_N` and `prior_p`. Now we have to compute the likelihood of the data for each pair of parameters. ## The Update To allocate space for the likelihoods, it is convenient to make a copy of `joint_pmf`: ``` likelihood = joint_pmf.copy() ``` As we loop through the pairs of parameters, we compute the likelihood of the data as in the previous section, and then store the result as an element of `likelihood`. ``` observed = k01 + k10 + k11 for N, p in joint_pmf.index: k00 = N - observed x = [k00, k01, k10, k11] q = 1-p y = [q*q, q*p, p*q, p*p] likelihood[N, p] = multinomial.pmf(x, N, y) ``` Now we can compute the posterior in the usual way. ``` posterior_pmf = joint_pmf * likelihood posterior_pmf.normalize() ``` We'll use `plot_contour` again to visualize the joint posterior distribution. But remember that the posterior distribution we just computed is represented as a `Pmf`, which is a `Series`, and `plot_contour` expects a `DataFrame`. Since we used `stack` to convert from a `DataFrame` to a `Series`, we can use `unstack` to go the other way. ``` joint_posterior = posterior_pmf.unstack() ``` And here's what the result looks like. ``` from utils import plot_contour plot_contour(joint_posterior) decorate(title='Joint posterior distribution of N and p') ``` The most likely values of `N` are near 100, as in the previous model. The most likely values of `p` are near 0.2. The shape of this contour indicates that these parameters are correlated. If `p` is near the low end of the range, the most likely values of `N` are higher; if `p` is near the high end of the range, `N` is lower. Now that we have a posterior `DataFrame`, we can extract the marginal distributions in the usual way. ``` from utils import marginal posterior2_p = marginal(joint_posterior, 0) posterior2_N = marginal(joint_posterior, 1) ``` Here's the posterior distribution for `p`: ``` posterior2_p.plot(color='C1') decorate(xlabel='Probability of observing a bear', ylabel='PDF', title='Posterior marginal distribution of p') ``` The most likely values are near 0.2. Here's the posterior distribution for `N` based on the two-parameter model, along with the posterior we got using the one-parameter (hypergeometric) model. ``` posterior_N.plot(label='one-parameter model', color='C4') posterior2_N.plot(label='two-parameter model', color='C1') decorate(xlabel='Population of bears (N)', ylabel='PDF', title='Posterior marginal distribution of N') ``` With the two-parameter model, the mean is a little lower and the 90% credible interval is a little narrower. ``` print(posterior_N.mean(), posterior_N.credible_interval(0.9)) print(posterior2_N.mean(), posterior2_N.credible_interval(0.9)) ``` The two-parameter model yields a narrower posterior distribution for `N`, compared to the one-parameter model, because it takes advantage of an additional source of information: the consistency of the two observations. To see how this helps, consider a scenario where `N` is relatively low, like 138 (the posterior mean of the two-parameter model). ``` N1 = 138 ``` Given that we saw 23 bears during the first trial and 19 during the second, we can estimate the corresponding value of `p`. ``` mean = (23 + 19) / 2 p = mean/N1 p ``` With these parameters, how much variability do you expect in the number of bears from one trial to the next? We can quantify that by computing the standard deviation of the binomial distribution with these parameters. ``` from scipy.stats import binom binom(N1, p).std() ``` Now let's consider a second scenario where `N` is 173, the posterior mean of the one-parameter model. The corresponding value of `p` is lower. ``` N2 = 173 p = mean/N2 p ``` In this scenario, the variation we expect to see from one trial to the next is higher. ``` binom(N2, p).std() ``` So if the number of bears we observe is the same in both trials, that would be evidence for lower values of `N`, where we expect more consistency. If the number of bears is substantially different between the two trials, that would be evidence for higher values of `N`. In the actual data, the difference between the two trials is low, which is why the posterior mean of the two-parameter model is lower. The two-parameter model takes advantage of additional information, which is why the credible interval is narrower. ## Joint and Marginal Distributions Marginal distributions are called "marginal" because in a common visualization they appear in the margins of the plot. Seaborn provides a class called `JointGrid` that creates this visualization. The following function uses it to show the joint and marginal distributions in a single plot. ``` import pandas as pd from seaborn import JointGrid def joint_plot(joint, **options): """Show joint and marginal distributions. joint: DataFrame that represents a joint distribution options: passed to JointGrid """ # get the names of the parameters x = joint.columns.name x = 'x' if x is None else x y = joint.index.name y = 'y' if y is None else y # make a JointGrid with minimal data data = pd.DataFrame({x:[0], y:[0]}) g = JointGrid(x=x, y=y, data=data, **options) # replace the contour plot g.ax_joint.contour(joint.columns, joint.index, joint, cmap='viridis') # replace the marginals marginal_x = marginal(joint, 0) g.ax_marg_x.plot(marginal_x.qs, marginal_x.ps) marginal_y = marginal(joint, 1) g.ax_marg_y.plot(marginal_y.ps, marginal_y.qs) joint_plot(joint_posterior) ``` A `JointGrid` is a concise way to represent the joint and marginal distributions visually. ## The Lincoln Index Problem In [an excellent blog post](http://www.johndcook.com/blog/2010/07/13/lincoln-index/), John D. Cook wrote about the Lincoln index, which is a way to estimate the number of errors in a document (or program) by comparing results from two independent testers. Here's his presentation of the problem: > "Suppose you have a tester who finds 20 bugs in your program. You want to estimate how many bugs are really in the program. You know there are at least 20 bugs, and if you have supreme confidence in your tester, you may suppose there are around 20 bugs. But maybe your tester isn't very good. Maybe there are hundreds of bugs. How can you have any idea how many bugs there are? There's no way to know with one tester. But if you have two testers, you can get a good idea, even if you don't know how skilled the testers are." Suppose the first tester finds 20 bugs, the second finds 15, and they find 3 in common; how can we estimate the number of bugs? This problem is similar to the Grizzly Bear problem, so I'll represent the data in the same way. ``` k10 = 20 - 3 k01 = 15 - 3 k11 = 3 ``` But in this case it is probably not reasonable to assume that the testers have the same probability of finding a bug. So I'll define two parameters, `p0` for the probability that the first tester finds a bug, and `p1` for the probability that the second tester finds a bug. I will continue to assume that the probabilities are independent, which is like assuming that all bugs are equally easy to find. That might not be a good assumption, but let's stick with it for now. As an example, suppose we know that the probabilities are 0.2 and 0.15. ``` p0, p1 = 0.2, 0.15 ``` We can compute the array of probabilities, `y`, like this: ``` def compute_probs(p0, p1): """Computes the probability for each of 4 categories.""" q0 = 1-p0 q1 = 1-p1 return [q0*q1, q0*p1, p0*q1, p0*p1] y = compute_probs(p0, p1) y ``` With these probabilities, there is a 68% chance that neither tester finds the bug and a 3% chance that both do. Pretending that these probabilities are known, we can compute the posterior distribution for `N`. Here's a prior distribution that's uniform from 32 to 350 bugs. ``` qs = np.arange(32, 350, step=5) prior_N = make_uniform(qs, name='N') prior_N.head(3) ``` I'll put the data in an array, with 0 as a place-keeper for the unknown value `k00`. ``` data = np.array([0, k01, k10, k11]) ``` And here are the likelihoods for each value of `N`, with `ps` as a constant. ``` likelihood = prior_N.copy() observed = data.sum() x = data.copy() for N in prior_N.qs: x[0] = N - observed likelihood[N] = multinomial.pmf(x, N, y) ``` We can compute the posterior in the usual way. ``` posterior_N = prior_N * likelihood posterior_N.normalize() ``` And here's what it looks like. ``` posterior_N.plot(color='C4') decorate(xlabel='Number of bugs (N)', ylabel='PMF', title='Posterior marginal distribution of n with known p1, p2') print(posterior_N.mean(), posterior_N.credible_interval(0.9)) ``` With the assumption that `p0` and `p1` are known to be `0.2` and `0.15`, the posterior mean is 102 with 90% credible interval (77, 127). But this result is based on the assumption that we know the probabilities, and we don't. ## Three-parameter Model What we need is a model with three parameters: `N`, `p0`, and `p1`. We'll use `prior_N` again for the prior distribution of `N`, and here are the priors for `p0` and `p1`: ``` qs = np.linspace(0, 1, num=51) prior_p0 = make_uniform(qs, name='p0') prior_p1 = make_uniform(qs, name='p1') ``` Now we have to assemble them into a joint prior with three dimensions. I'll start by putting the first two into a `DataFrame`. ``` joint2 = make_joint(prior_p0, prior_N) joint2.shape ``` Now I'll stack them, as in the previous example, and put the result in a `Pmf`. ``` joint2_pmf = Pmf(joint2.stack()) joint2_pmf.head(3) ``` We can use `make_joint` again to add in the third parameter. ``` joint3 = make_joint(prior_p1, joint2_pmf) joint3.shape ``` The result is a `DataFrame` with values of `N` and `p0` in a `MultiIndex` that goes down the rows and values of `p1` in an index that goes across the columns. ``` joint3.head(3) ``` Now I'll apply `stack` again: ``` joint3_pmf = Pmf(joint3.stack()) joint3_pmf.head(3) ``` The result is a `Pmf` with a three-column `MultiIndex` containing all possible triplets of parameters. The number of rows is the product of the number of values in all three priors, which is almost 170,000. ``` joint3_pmf.shape ``` That's still small enough to be practical, but it will take longer to compute the likelihoods than in the previous examples. Here's the loop that computes the likelihoods; it's similar to the one in the previous section: ``` likelihood = joint3_pmf.copy() observed = data.sum() x = data.copy() for N, p0, p1 in joint3_pmf.index: x[0] = N - observed y = compute_probs(p0, p1) likelihood[N, p0, p1] = multinomial.pmf(x, N, y) ``` We can compute the posterior in the usual way. ``` posterior_pmf = joint3_pmf * likelihood posterior_pmf.normalize() ``` Now, to extract the marginal distributions, we could unstack the joint posterior as we did in the previous section. But `Pmf` provides a version of `marginal` that works with a `Pmf` rather than a `DataFrame`. Here's how we use it to get the posterior distribution for `N`. ``` posterior_N = posterior_pmf.marginal(0) ``` And here's what it looks look. ``` posterior_N.plot(color='C4') decorate(xlabel='Number of bugs (N)', ylabel='PDF', title='Posterior marginal distributions of N') posterior_N.mean() ``` The posterior mean is 105 bugs, which suggests that there are still many bugs the testers have not found. Here are the posteriors for `p0` and `p1`. ``` posterior_p1 = posterior_pmf.marginal(1) posterior_p2 = posterior_pmf.marginal(2) posterior_p1.plot(label='p1') posterior_p2.plot(label='p2') decorate(xlabel='Probability of finding a bug', ylabel='PDF', title='Posterior marginal distributions of p1 and p2') posterior_p1.mean(), posterior_p1.credible_interval(0.9) posterior_p2.mean(), posterior_p2.credible_interval(0.9) ``` Comparing the posterior distributions, the tester who found more bugs probably has a higher probability of finding bugs. The posterior means are about 23% and 18%. But the distributions overlap, so we should not be too sure. This is the first example we've seen with three parameters. As the number of parameters increases, the number of combinations increases quickly. The method we've been using so far, enumerating all possible combinations, becomes impractical if the number of parameters is more than 3 or 4. However there are other methods that can handle models with many more parameters, as we'll see in <<_MCMC>>. ## Summary The problems in this chapter are examples of [mark and recapture](https://en.wikipedia.org/wiki/Mark_and_recapture) experiments, which are used in ecology to estimate animal populations. They also have applications in engineering, as in the Lincoln index problem. And in the exercises you'll see that they are used in epidemiology, too. This chapter introduces two new probability distributions: * The hypergeometric distribution is a variation of the binomial distribution in which samples are drawn from the population without replacement. * The multinomial distribution is a generalization of the binomial distribution where there are more than two possible outcomes. Also in this chapter, we saw the first example of a model with three parameters. We'll see more in subsequent chapters. ## Exercises **Exercise:** [In an excellent paper](http://chao.stat.nthu.edu.tw/wordpress/paper/110.pdf), Anne Chao explains how mark and recapture experiments are used in epidemiology to estimate the prevalence of a disease in a human population based on multiple incomplete lists of cases. One of the examples in that paper is a study "to estimate the number of people who were infected by hepatitis in an outbreak that occurred in and around a college in northern Taiwan from April to July 1995." Three lists of cases were available: 1. 135 cases identified using a serum test. 2. 122 cases reported by local hospitals. 3. 126 cases reported on questionnaires collected by epidemiologists. In this exercise, we'll use only the first two lists; in the next exercise we'll bring in the third list. Make a joint prior and update it using this data, then compute the posterior mean of `N` and a 90% credible interval. The following array contains 0 as a place-holder for the unknown value of `k00`, followed by known values of `k01`, `k10`, and `k11`. ``` data2 = np.array([0, 73, 86, 49]) ``` These data indicate that there are 73 cases on the second list that are not on the first, 86 cases on the first list that are not on the second, and 49 cases on both lists. To keep things simple, we'll assume that each case has the same probability of appearing on each list. So we'll use a two-parameter model where `N` is the total number of cases and `p` is the probability that any case appears on any list. Here are priors you can start with (but feel free to modify them). ``` qs = np.arange(200, 500, step=5) prior_N = make_uniform(qs, name='N') prior_N.head(3) qs = np.linspace(0, 0.98, num=50) prior_p = make_uniform(qs, name='p') prior_p.head(3) # Solution goes here # Solution goes here # Solution goes here # Solution goes here # Solution goes here # Solution goes here # Solution goes here # Solution goes here ``` **Exercise:** Now let's do the version of the problem with all three lists. Here's the data from Chou's paper: ``` Hepatitis A virus list P Q E Data 1 1 1 k111 =28 1 1 0 k110 =21 1 0 1 k101 =17 1 0 0 k100 =69 0 1 1 k011 =18 0 1 0 k010 =55 0 0 1 k001 =63 0 0 0 k000 =?? ``` Write a loop that computes the likelihood of the data for each pair of parameters, then update the prior and compute the posterior mean of `N`. How does it compare to the results using only the first two lists? Here's the data in a NumPy array (in reverse order). ``` data3 = np.array([0, 63, 55, 18, 69, 17, 21, 28]) ``` Again, the first value is a place-keeper for the unknown `k000`. The second value is `k001`, which means there are 63 cases that appear on the third list but not the first two. And the last value is `k111`, which means there are 28 cases that appear on all three lists. In the two-list version of the problem we computed `ps` by enumerating the combinations of `p` and `q`. ``` q = 1-p ps = [q*q, q*p, p*q, p*p] ``` We could do the same thing for the three-list version, computing the probability for each of the eight categories. But we can generalize it by recognizing that we are computing the cartesian product of `p` and `q`, repeated once for each list. And we can use the following function (based on [this StackOverflow answer](https://stackoverflow.com/questions/58242078/cartesian-product-of-arbitrary-lists-in-pandas/58242079#58242079)) to compute Cartesian products: ``` def cartesian_product(*args, **options): """Cartesian product of sequences. args: any number of sequences options: passes to `MultiIndex.from_product` returns: DataFrame with one column per sequence """ index = pd.MultiIndex.from_product(args, **options) return pd.DataFrame(index=index).reset_index() ``` Here's an example with `p=0.2`: ``` p = 0.2 t = (1-p, p) df = cartesian_product(t, t, t) df ``` To compute the probability for each category, we take the product across the columns: ``` y = df.prod(axis=1) y ``` Now you finish it off from there. ``` # Solution goes here # Solution goes here # Solution goes here # Solution goes here # Solution goes here # Solution goes here # Solution goes here # Solution goes here ```
github_jupyter
# Writing Low-Level TensorFlow Code **Learning Objectives** 1. Practice defining and performing basic operations on constant Tensors 2. Use Tensorflow's automatic differentiation capability 3. Learn how to train a linear regression from scratch with TensorFLow ## Introduction In this notebook, we will start by reviewing the main operations on Tensors in TensorFlow and understand how to manipulate TensorFlow Variables. We explain how these are compatible with python built-in list and numpy arrays. Then we will jump to the problem of training a linear regression from scratch with gradient descent. The first order of business will be to understand how to compute the gradients of a function (the loss here) with respect to some of its arguments (the model weights here). The TensorFlow construct allowing us to do that is `tf.GradientTape`, which we will describe. At last we will create a simple training loop to learn the weights of a 1-dim linear regression using synthetic data generated from a linear model. As a bonus exercise, we will do the same for data generated from a non linear model, forcing us to manual engineer non-linear features to improve our linear model performance. Each learning objective will correspond to a __#TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/write_low_level_code.ipynb) ``` import numpy as np from matplotlib import pyplot as plt import tensorflow as tf print(tf.__version__) ``` ## Operations on Tensors ### Variables and Constants Tensors in TensorFlow are either contant (`tf.constant`) or variables (`tf.Variable`). Constant values can not be changed, while variables values can be. The main difference is that instances of `tf.Variable` have methods allowing us to change their values while tensors constructed with `tf.constant` don't have these methods, and therefore their values can not be changed. When you want to change the value of a `tf.Variable` `x` use one of the following method: * `x.assign(new_value)` * `x.assign_add(value_to_be_added)` * `x.assign_sub(value_to_be_subtracted` ``` x = tf.constant([2, 3, 4]) x x = tf.Variable(2.0, dtype=tf.float32, name='my_variable') x.assign(45.8) x x.assign_add(4) x x.assign_sub(3) x ``` ### Point-wise operations Tensorflow offers similar point-wise tensor operations as numpy does: * `tf.add` allows to add the components of a tensor * `tf.multiply` allows us to multiply the components of a tensor * `tf.subtract` allow us to substract the components of a tensor * `tf.math.*` contains the usual math operations to be applied on the components of a tensor * and many more... Most of the standard arithmetic operations (`tf.add`, `tf.substrac`, etc.) are overloaded by the usual corresponding arithmetic symbols (`+`, `-`, etc.) **Lab Task #1:** Performing basic operations on Tensors 1. In the first cell, define two constants `a` and `b` and compute their sum in c and d respectively, below using `tf.add` and `+` and verify both operations produce the same values. 2. In the second cell, compute the product of the constants `a` and `b` below using `tf.multiply` and `*` and verify both operations produce the same values. 3. In the third cell, compute the exponential of the constant `a` using `tf.math.exp`. Note, you'll need to specify the type for this operation. ``` # TODO 1a a = # TODO -- Your code here. b = # TODO -- Your code here. c = # TODO -- Your code here. d = # TODO -- Your code here. print("c:", c) print("d:", d) # TODO 1b a = # TODO -- Your code here. b = # TODO -- Your code here. c = # TODO -- Your code here. d = # TODO -- Your code here. print("c:", c) print("d:", d) # TODO 1c # tf.math.exp expects floats so we need to explicitly give the type a = # TODO -- Your code here. b = # TODO -- Your code here. print("b:", b) ``` ### NumPy Interoperability In addition to native TF tensors, tensorflow operations can take native python types and NumPy arrays as operands. ``` # native python list a_py = [1, 2] b_py = [3, 4] tf.add(a_py, b_py) # numpy arrays a_np = np.array([1, 2]) b_np = np.array([3, 4]) tf.add(a_np, b_np) # native TF tensor a_tf = tf.constant([1, 2]) b_tf = tf.constant([3, 4]) tf.add(a_tf, b_tf) ``` You can convert a native TF tensor to a NumPy array using .numpy() ``` a_tf.numpy() ``` ## Linear Regression Now let's use low level tensorflow operations to implement linear regression. Later in the course you'll see abstracted ways to do this using high level TensorFlow. ### Toy Dataset We'll model the following function: \begin{equation} y= 2x + 10 \end{equation} ``` X = tf.constant(range(10), dtype=tf.float32) Y = 2 * X + 10 print("X:{}".format(X)) print("Y:{}".format(Y)) ``` Let's also create a test dataset to evaluate our models: ``` X_test = tf.constant(range(10, 20), dtype=tf.float32) Y_test = 2 * X_test + 10 print("X_test:{}".format(X_test)) print("Y_test:{}".format(Y_test)) ``` #### Loss Function The simplest model we can build is a model that for each value of x returns the sample mean of the training set: ``` y_mean = Y.numpy().mean() def predict_mean(X): y_hat = [y_mean] * len(X) return y_hat Y_hat = predict_mean(X_test) ``` Using mean squared error, our loss is: \begin{equation} MSE = \frac{1}{m}\sum_{i=1}^{m}(\hat{Y}_i-Y_i)^2 \end{equation} For this simple model the loss is then: ``` errors = (Y_hat - Y)**2 loss = tf.reduce_mean(errors) loss.numpy() ``` This values for the MSE loss above will give us a baseline to compare how a more complex model is doing. Now, if $\hat{Y}$ represents the vector containing our model's predictions when we use a linear regression model \begin{equation} \hat{Y} = w_0X + w_1 \end{equation} we can write a loss function taking as arguments the coefficients of the model: ``` def loss_mse(X, Y, w0, w1): Y_hat = w0 * X + w1 errors = (Y_hat - Y)**2 return tf.reduce_mean(errors) ``` ### Gradient Function To use gradient descent we need to take the partial derivatives of the loss function with respect to each of the weights. We could manually compute the derivatives, but with Tensorflow's automatic differentiation capabilities we don't have to! During gradient descent we think of the loss as a function of the parameters $w_0$ and $w_1$. Thus, we want to compute the partial derivative with respect to these variables. For that we need to wrap our loss computation within the context of `tf.GradientTape` instance which will record gradient information: ```python with tf.GradientTape() as tape: loss = # computation ``` This will allow us to later compute the gradients of any tensor computed within the `tf.GradientTape` context with respect to instances of `tf.Variable`: ```python gradients = tape.gradient(loss, [w0, w1]) ``` We illustrate this procedure by computing the loss gradients with respect to the model weights: **Lab Task #2:** Complete the function below to compute the loss gradients with respect to the model weights `w0` and `w1`. ``` # TODO 2 def compute_gradients(X, Y, w0, w1): # TODO -- Your code here. w0 = tf.Variable(0.0) w1 = tf.Variable(0.0) dw0, dw1 = compute_gradients(X, Y, w0, w1) print("dw0:", dw0.numpy()) print("dw1", dw1.numpy()) ``` ### Training Loop Here we have a very simple training loop that converges. Note we are ignoring best practices like batching, creating a separate test set, and random weight initialization for the sake of simplicity. **Lab Task #3:** Complete the `for` loop below to train a linear regression. 1. Use `compute_gradients` to compute `dw0` and `dw1`. 2. Then, re-assign the value of `w0` and `w1` using the `.assign_sub(...)` method with the computed gradient values and the `LEARNING_RATE`. 3. Finally, for every 100th step , we'll compute and print the `loss`. Use the `loss_mse` function we created above to compute the `loss`. ``` # TODO 3 STEPS = 1000 LEARNING_RATE = .02 MSG = "STEP {step} - loss: {loss}, w0: {w0}, w1: {w1}\n" w0 = tf.Variable(0.0) w1 = tf.Variable(0.0) for step in range(0, STEPS + 1): dw0, dw1 = # TODO -- Your code here. if step % 100 == 0: loss = # TODO -- Your code here. print(MSG.format(step=step, loss=loss, w0=w0.numpy(), w1=w1.numpy())) ``` Now let's compare the test loss for this linear regression to the test loss from the baseline model that outputs always the mean of the training set: ``` loss = loss_mse(X_test, Y_test, w0, w1) loss.numpy() ``` This is indeed much better! ## Bonus Try modeling a non-linear function such as: $y=xe^{-x^2}$ ``` X = tf.constant(np.linspace(0, 2, 1000), dtype=tf.float32) Y = X * tf.exp(-X**2) %matplotlib inline plt.plot(X, Y) def make_features(X): f1 = tf.ones_like(X) # Bias. f2 = X f3 = tf.square(X) f4 = tf.sqrt(X) f5 = tf.exp(X) return tf.stack([f1, f2, f3, f4, f5], axis=1) def predict(X, W): return tf.squeeze(X @ W, -1) def loss_mse(X, Y, W): Y_hat = predict(X, W) errors = (Y_hat - Y)**2 return tf.reduce_mean(errors) def compute_gradients(X, Y, W): with tf.GradientTape() as tape: loss = loss_mse(Xf, Y, W) return tape.gradient(loss, W) STEPS = 2000 LEARNING_RATE = .02 Xf = make_features(X) n_weights = Xf.shape[1] W = tf.Variable(np.zeros((n_weights, 1)), dtype=tf.float32) # For plotting steps, losses = [], [] plt.figure() for step in range(1, STEPS + 1): dW = compute_gradients(X, Y, W) W.assign_sub(dW * LEARNING_RATE) if step % 100 == 0: loss = loss_mse(Xf, Y, W) steps.append(step) losses.append(loss) plt.clf() plt.plot(steps, losses) print("STEP: {} MSE: {}".format(STEPS, loss_mse(Xf, Y, W))) # The .figure() method will create a new figure, or activate an existing figure. plt.figure() # The .plot() is a versatile function, and will take an arbitrary number of arguments. For example, to plot x versus y. plt.plot(X, Y, label='actual') plt.plot(X, predict(Xf, W), label='predicted') # The .legend() method will place a legend on the axes. plt.legend() ``` Copyright 2021 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
github_jupyter
## Modeling the musical difficulty ``` import ipywidgets as widgets from IPython.display import Audio, display, clear_output from ipywidgets import interactive import matplotlib.pyplot as plt import seaborn as sns import numpy as np distributions = { "krumhansl_kessler": [ 0.15195022732711172, 0.0533620483369227, 0.08327351040918879, 0.05575496530270399, 0.10480976310122037, 0.09787030390045463, 0.06030150753768843, 0.1241923905240488, 0.05719071548217276, 0.08758076094759511, 0.05479779851639147, 0.06891600861450106, 0.14221523253201526, 0.06021118849696697, 0.07908335205571781, 0.12087171422152324, 0.05841383958660975, 0.07930802066951245, 0.05706582790384183, 0.1067175915524601, 0.08941810829027184, 0.06043585711076162, 0.07503931700741405, 0.07121995057290496 ], "sapp": [ 0.2222222222222222, 0.0, 0.1111111111111111, 0.0, 0.1111111111111111, 0.1111111111111111, 0.0, 0.2222222222222222, 0.0, 0.1111111111111111, 0.0, 0.1111111111111111, 0.2222222222222222, 0.0, 0.1111111111111111, 0.1111111111111111, 0.0, 0.1111111111111111, 0.0, 0.2222222222222222, 0.1111111111111111, 0.0, 0.05555555555555555, 0.05555555555555555 ], "aarden_essen": [ 0.17766092893562843, 0.001456239417504233, 0.1492649402940239, 0.0016018593592562562, 0.19804892078043168, 0.11358695456521818, 0.002912478835008466, 0.2206199117520353, 0.001456239417504233, 0.08154936738025305, 0.002329979068008373, 0.049512180195127924, 0.18264800547944018, 0.007376190221285707, 0.14049900421497014, 0.16859900505797015, 0.0070249402107482066, 0.14436200433086013, 0.0070249402107482066, 0.18616100558483017, 0.04566210136986304, 0.019318600579558018, 0.07376190221285707, 0.017562300526869017 ], "bellman_budge": [ 0.168, 0.0086, 0.1295, 0.0141, 0.1349, 0.1193, 0.0125, 0.2028, 0.018000000000000002, 0.0804, 0.0062, 0.1057, 0.1816, 0.0069, 0.12990000000000002, 0.1334, 0.010700000000000001, 0.1115, 0.0138, 0.2107, 0.07490000000000001, 0.015300000000000001, 0.0092, 0.10210000000000001 ], "temperley": [ 0.17616580310880825, 0.014130946773433817, 0.11493170042392838, 0.019312293923692884, 0.15779557230334432, 0.10833725859632594, 0.02260951483749411, 0.16839378238341965, 0.02449364107395195, 0.08619877531794629, 0.013424399434762127, 0.09420631182289213, 0.1702127659574468, 0.020081281377002155, 0.1133158020559407, 0.14774085584508725, 0.011714080803251255, 0.10996892182644036, 0.02510160172125269, 0.1785799665311977, 0.09658140090843893, 0.016017212526894576, 0.03179536218025341, 0.07889074826679417 ], 'albrecht_shanahan1': [ 0.238, 0.006, 0.111, 0.006, 0.137, 0.094, 0.016, 0.214, 0.009, 0.080, 0.008, 0.081, 0.220, 0.006, 0.104, 0.123, 0.019, 0.103, 0.012, 0.214, 0.062, 0.022, 0.061, 0.052 ], 'albrecht_shanahan2': [ 0.21169, 0.00892766, 0.120448, 0.0100265, 0.131444, 0.0911768, 0.0215947, 0.204703, 0.012894, 0.0900445, 0.012617, 0.0844338, 0.201933, 0.009335, 0.107284, 0.124169, 0.0199224, 0.108324, 0.014314, 0.202699, 0.0653907, 0.0252515, 0.071959, 0.049419 ] } def compute_threshold(dist_max, dist_min, d, cutoff): if d < cutoff: thresh = dist_max - d * ((dist_max - dist_min) / cutoff) else: thresh = 0.0 return thresh def clipped_distribution(orig_dist, d, cutoff): # make a copy of the original distribution copy = np.array(orig_dist) # compute the threshold to get rid of difficult notes at initial difficulties threshold = compute_threshold(max(copy), min(copy), d, cutoff) # remove the most difficult notes for low difficulties copy[copy < threshold] = 0.0 # norm-1 of the distribution copy = copy / sum(copy) return copy, threshold def scaled_distribution(clipped_dist, h, d): # make a copy of the original distribution copy = np.array(clipped_dist) # compute the scaling factor based on handicap parameter and difficulty (user input) scaling = h - (h * d) # scale the distribution copy = copy ** scaling # norm-1 of the distribution copy = copy / sum(copy) return copy def f(dist_name, clipping, handicap, difficulty): # create the figures f, (axmaj, axmin) = plt.subplots(2, 3, sharex=True, sharey=True) # get the original distributions for major and minor keys dist = np.array(distributions[dist_name]) major = dist[:12] minor = dist[12:] # clip the distributions for lower difficulties clipped_major, major_threshold = clipped_distribution(major, difficulty, clipping) clipped_minor, minor_threshold = clipped_distribution(minor, difficulty, clipping) # get the scaled distribution according to difficulty, handicap, and initial clipping scaled_major = scaled_distribution(clipped_major, handicap, difficulty) scaled_minor = scaled_distribution(clipped_minor, handicap, difficulty) ylim_major = max(max(np.amax(major), np.amax(clipped_major)), np.amax(scaled_major)) ylim_minor = max(max(np.amax(minor), np.amax(clipped_minor)), np.amax(scaled_minor)) # prepare to plot x = np.array(['C', 'C#', 'D', 'Eb', 'E', 'F', 'F#', 'G', 'Ab', 'A', 'Bb', 'B']) sns.barplot(x=x, y=major, ax=axmaj[0]) axmaj[0].set_title("Original Major") axmaj[0].axhline(major_threshold, color="k", clip_on=True) axmaj[0].set_ylim(0, ylim_major) sns.barplot(x=x, y=clipped_major, ax=axmaj[1]) axmaj[1].set_title("Clipped Major") axmaj[1].set_ylim(0, ylim_major) sns.barplot(x=x, y=scaled_major, ax=axmaj[2]) axmaj[2].set_title("Scaled Major") axmaj[2].set_ylim(0, ylim_major) sns.barplot(x=x, y=minor, ax=axmin[0]) axmin[0].set_title("Original Minor") axmin[0].axhline(minor_threshold, color="k", clip_on=True) axmin[0].set_ylim(0, ylim_minor) sns.barplot(x=x, y=clipped_minor, ax=axmin[1]) axmin[1].set_title("Clipped Minor") axmin[1].set_ylim(0, ylim_minor) sns.barplot(x=x, y=scaled_minor, ax=axmin[2]) axmin[2].set_title("Scaled Minor") axmin[2].set_ylim(0, ylim_minor) plt.tight_layout(h_pad=2) return scaled_major, scaled_minor distribution_name = list(distributions.keys()) handicap = widgets.IntSlider(min=1, max=10, value=2, continuous_update=False) difficulty = widgets.FloatSlider(min=0.0, max=1.0, value=0.5, step=0.01, continuous_update=False) clipping = widgets.FloatSlider(min=0.2, max=0.8, step=0.1, value=0.2, continuous_update=False) w = interactive(f, dist_name=distribution_name, handicap=handicap, difficulty=difficulty, clipping=clipping) rate = 16000. duration = .1 t = np.linspace(0., duration, int(rate * duration)) notes = range(12) freqs = 220. * 2**(np.arange(3, 3 + len(notes)) / 12.) def synth(f): x = np.sin(f * 2. * np.pi * t) * np.sin(t * np.pi / duration) display(Audio(x, rate=rate, autoplay=True)) def sample_major_distribution(b): with output_major: major = w.result[0] note = np.random.choice(np.arange(12), p=major) synth(freqs[note]) clear_output(wait=duration) def sample_minor_distribution(b): with output_minor: minor = w.result[1] note = np.random.choice(np.arange(12), p=minor) synth(freqs[note]) clear_output(wait=duration) display(w) sample_major = widgets.Button(description="C Major") output_major = widgets.Output() display(sample_major, output_major) sample_minor = widgets.Button(description="C Minor") output_minor = widgets.Output() display(sample_minor, output_minor) sample_major.on_click(sample_major_distribution) sample_minor.on_click(sample_minor_distribution) ```
github_jupyter
``` import numpy as np import pandas as pd from datetime import datetime as dt import itertools season_1=pd.read_csv("2015-16.csv")[['Date','HomeTeam','AwayTeam','FTHG','FTAG','FTR']] season_2=pd.read_csv("2014-15.csv")[['Date','HomeTeam','AwayTeam','FTHG','FTAG','FTR']] season_3=pd.read_csv("2013-14.csv")[['Date','HomeTeam','AwayTeam','FTHG','FTAG','FTR']] season_4=pd.read_csv("2012-13.csv")[['Date','HomeTeam','AwayTeam','FTHG','FTAG','FTR']] season_5=pd.read_csv("2011-12.csv")[['Date','HomeTeam','AwayTeam','FTHG','FTAG','FTR']] season_6=pd.read_csv("2010-11.csv")[['Date','HomeTeam','AwayTeam','FTHG','FTAG','FTR']] season_7=pd.read_csv("2009-10.csv")[['Date','HomeTeam','AwayTeam','FTHG','FTAG','FTR']] season_8=pd.read_csv("2008-09.csv")[['Date','HomeTeam','AwayTeam','FTHG','FTAG','FTR']] season_9=pd.read_csv("2007-08.csv")[['Date','HomeTeam','AwayTeam','FTHG','FTAG','FTR']] season_1.shape def parse_date(date): # print(type(date)) data=str(date) print(type(date)) print(date) if date=="": return None else: return dt.strptime(date,"%d/%m/%y").date() seasons=[season_1,season_2,season_3,season_4,season_5,season_6,season_7,season_8,season_9] #apply the above functions for season in seasons: season.Date=season.Date.apply(parse_date) season_1.head(5) #functions adopted from Tewari and Krishna https://github.com/krishnakartik1/LSTM-footballMatchWinner def get_goals_scored(season): print("get_goals_scored") # Create a dictionary with team names as keys teams = {} for i in season.groupby('HomeTeam').mean().T.columns: print("check {} \n".format(i)) teams[i] = [] #print (len(teams["Augsburg"])) # the value corresponding to keys is a list containing the match location. for i in range(len(season)): HTGS = season.iloc[i]['FTHG'] ATGS = season.iloc[i]['FTAG'] teams[season.iloc[i].HomeTeam].append(HTGS) teams[season.iloc[i].AwayTeam].append(ATGS) # Create a dataframe for goals scored where rows are teams and cols are matchweek. GoalsScored = pd.DataFrame(data=teams, index = [i for i in range(1,39)]).T GoalsScored[0] = 0 # Aggregate to get uptil that point for i in range(2,39): GoalsScored[i] = GoalsScored[i] + GoalsScored[i-1] return GoalsScored # Gets the goals conceded agg arranged by teams and matchweek def get_goals_conceded(season): # Create a dictionary with team names as keys teams = {} for i in season.groupby('HomeTeam').mean().T.columns: print("check {} \n".format(i)) teams[i] = [] # the value corresponding to keys is a list containing the match location. for i in range(len(season)): ATGC = season.iloc[i]['FTHG'] HTGC = season.iloc[i]['FTAG'] teams[season.iloc[i].HomeTeam].append(HTGC) teams[season.iloc[i].AwayTeam].append(ATGC) # Create a dataframe for goals scored where rows are teams and cols are matchweek. GoalsConceded = pd.DataFrame(data=teams, index = [i for i in range(1,39)]).T GoalsConceded[0] = 0 # Aggregate to get uptil that point for i in range(2,39): GoalsConceded[i] = GoalsConceded[i] + GoalsConceded[i-1] return GoalsConceded def get_gss(season): GC = get_goals_conceded(season) GS = get_goals_scored(season) j = 0 HTGS = [] ATGS = [] HTGC = [] ATGC = [] for i in range(season.shape[0]): ht = season.iloc[i].HomeTeam at = season.iloc[i].AwayTeam HTGS.append(GS.loc[ht][j]) ATGS.append(GS.loc[at][j]) HTGC.append(GC.loc[ht][j]) ATGC.append(GC.loc[at][j]) if ((i + 1)% 10) == 0: j = j + 1 # print("check line 87") # print(season.shape,len(HTGS)) season['HTGS'] = HTGS season['ATGS'] = ATGS season['HTGC'] = HTGC season['ATGC'] = ATGC return season #apply the above functions for season in seasons: season.head() season = get_gss(season) season_1.head(5) season_1 #functions adopted from Tewari and Krishna https://github.com/krishnakartik1/LSTM-footballMatchWinner def get_points(result): if result == 'W': return 3 elif result == 'D': return 1 else: return 0 def get_cuml_points(matchres): matchres_points = matchres.applymap(get_points) for i in range(2,38): matchres_points[i] = matchres_points[i] + matchres_points[i-1] matchres_points.insert(column =0, loc = 0, value = [0*i for i in range(20)]) return matchres_points def get_matchres(season): print("here") # Create a dictionary with team names as keys teams = {} for i in season.groupby('HomeTeam').mean().T.columns: teams[i] = [] # the value corresponding to keys is a list containing the match result for i in range(len(season)): if season.iloc[i].FTR == 'H': teams[season.iloc[i].HomeTeam].append('W') teams[season.iloc[i].AwayTeam].append('L') elif season.iloc[i].FTR == 'A': teams[season.iloc[i].AwayTeam].append('W') teams[season.iloc[i].HomeTeam].append('L') else: teams[season.iloc[i].AwayTeam].append('D') teams[season.iloc[i].HomeTeam].append('D') return pd.DataFrame(data=teams, index = [i for i in range(1,39)]).T def get_agg_points(season): matchres = get_matchres(season) cum_pts = get_cuml_points(matchres) HTP = [] ATP = [] j = 0 for i in range(season.shape[0]): ht = season.iloc[i].HomeTeam at = season.iloc[i].AwayTeam HTP.append(cum_pts.loc[ht][j]) ATP.append(cum_pts.loc[at][j]) if ((i + 1)% 10) == 0: j = j + 1 season['HTP'] = HTP season['ATP'] = ATP return season #apply the above functions for season in seasons: season.head() season = get_agg_points(season) season_1.head(40) la_liga = pd.concat(seasons) la_liga la_liga.to_csv('la_liga_stats.csv') ```
github_jupyter
``` #hide #skip ! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab # default_exp losses # default_cls_lvl 3 #export from fastai.imports import * from fastai.torch_imports import * from fastai.torch_core import * from fastai.layers import * #hide from nbdev.showdoc import * ``` # Loss Functions > Custom fastai loss functions ``` # export class BaseLoss(): "Same as `loss_cls`, but flattens input and target." activation=decodes=noops def __init__(self, loss_cls, *args, axis=-1, flatten=True, floatify=False, is_2d=True, **kwargs): store_attr("axis,flatten,floatify,is_2d") self.func = loss_cls(*args,**kwargs) functools.update_wrapper(self, self.func) def __repr__(self): return f"FlattenedLoss of {self.func}" @property def reduction(self): return self.func.reduction @reduction.setter def reduction(self, v): self.func.reduction = v def _contiguous(self,x): return TensorBase(x.transpose(self.axis,-1).contiguous()) if isinstance(x,torch.Tensor) else x def __call__(self, inp, targ, **kwargs): inp,targ = map(self._contiguous, (inp,targ)) if self.floatify and targ.dtype!=torch.float16: targ = targ.float() if targ.dtype in [torch.int8, torch.int16, torch.int32]: targ = targ.long() if self.flatten: inp = inp.view(-1,inp.shape[-1]) if self.is_2d else inp.view(-1) return self.func.__call__(inp, targ.view(-1) if self.flatten else targ, **kwargs) ``` Wrapping a general loss function inside of `BaseLoss` provides extra functionalities to your loss functions: - flattens the tensors before trying to take the losses since it's more convenient (with a potential tranpose to put `axis` at the end) - a potential `activation` method that tells the library if there is an activation fused in the loss (useful for inference and methods such as `Learner.get_preds` or `Learner.predict`) - a potential <code>decodes</code> method that is used on predictions in inference (for instance, an argmax in classification) The `args` and `kwargs` will be passed to `loss_cls` during the initialization to instantiate a loss function. `axis` is put at the end for losses like softmax that are often performed on the last axis. If `floatify=True`, the `targs` will be converted to floats (useful for losses that only accept float targets like `BCEWithLogitsLoss`), and `is_2d` determines if we flatten while keeping the first dimension (batch size) or completely flatten the input. We want the first for losses like Cross Entropy, and the second for pretty much anything else. ``` # export @delegates() class CrossEntropyLossFlat(BaseLoss): "Same as `nn.CrossEntropyLoss`, but flattens input and target." y_int = True @use_kwargs_dict(keep=True, weight=None, ignore_index=-100, reduction='mean') def __init__(self, *args, axis=-1, **kwargs): super().__init__(nn.CrossEntropyLoss, *args, axis=axis, **kwargs) def decodes(self, x): return x.argmax(dim=self.axis) def activation(self, x): return F.softmax(x, dim=self.axis) tst = CrossEntropyLossFlat() output = torch.randn(32, 5, 10) target = torch.randint(0, 10, (32,5)) #nn.CrossEntropy would fail with those two tensors, but not our flattened version. _ = tst(output, target) test_fail(lambda x: nn.CrossEntropyLoss()(output,target)) #Associated activation is softmax test_eq(tst.activation(output), F.softmax(output, dim=-1)) #This loss function has a decodes which is argmax test_eq(tst.decodes(output), output.argmax(dim=-1)) #In a segmentation task, we want to take the softmax over the channel dimension tst = CrossEntropyLossFlat(axis=1) output = torch.randn(32, 5, 128, 128) target = torch.randint(0, 5, (32, 128, 128)) _ = tst(output, target) test_eq(tst.activation(output), F.softmax(output, dim=1)) test_eq(tst.decodes(output), output.argmax(dim=1)) ``` [Focal Loss](https://arxiv.org/pdf/1708.02002.pdf) is the same as cross entropy except easy-to-classify observations are down-weighted in the loss calculation. The strength of down-weighting is proportional to the size of the `gamma` parameter. Put another way, the larger `gamma` the less the easy-to-classify observations contribute to the loss. ``` # export class FocalLossFlat(CrossEntropyLossFlat): """ Same as CrossEntropyLossFlat but with focal paramter, `gamma`. Focal loss is introduced by Lin et al. https://arxiv.org/pdf/1708.02002.pdf. Note the class weighting factor in the paper, alpha, can be implemented through pytorch `weight` argument in nn.CrossEntropyLoss. """ y_int = True @use_kwargs_dict(keep=True, weight=None, ignore_index=-100, reduction='mean') def __init__(self, *args, gamma=2, axis=-1, **kwargs): self.gamma = gamma self.reduce = kwargs.pop('reduction') if 'reduction' in kwargs else 'mean' super().__init__(*args, reduction='none', axis=axis, **kwargs) def __call__(self, inp, targ, **kwargs): ce_loss = super().__call__(inp, targ, **kwargs) pt = torch.exp(-ce_loss) fl_loss = (1-pt)**self.gamma * ce_loss return fl_loss.mean() if self.reduce == 'mean' else fl_loss.sum() if self.reduce == 'sum' else fl_loss #Compare focal loss with gamma = 0 to cross entropy fl = FocalLossFlat(gamma=0) ce = CrossEntropyLossFlat() output = torch.randn(32, 5, 10) target = torch.randint(0, 10, (32,5)) test_close(fl(output, target), ce(output, target)) #Test focal loss with gamma > 0 is different than cross entropy fl = FocalLossFlat(gamma=2) test_ne(fl(output, target), ce(output, target)) #In a segmentation task, we want to take the softmax over the channel dimension fl = FocalLossFlat(gamma=0, axis=1) ce = CrossEntropyLossFlat(axis=1) output = torch.randn(32, 5, 128, 128) target = torch.randint(0, 5, (32, 128, 128)) test_close(fl(output, target), ce(output, target), eps=1e-4) test_eq(fl.activation(output), F.softmax(output, dim=1)) test_eq(fl.decodes(output), output.argmax(dim=1)) # export @delegates() class BCEWithLogitsLossFlat(BaseLoss): "Same as `nn.BCEWithLogitsLoss`, but flattens input and target." @use_kwargs_dict(keep=True, weight=None, reduction='mean', pos_weight=None) def __init__(self, *args, axis=-1, floatify=True, thresh=0.5, **kwargs): if kwargs.get('pos_weight', None) is not None and kwargs.get('flatten', None) is True: raise ValueError("`flatten` must be False when using `pos_weight` to avoid a RuntimeError due to shape mismatch") if kwargs.get('pos_weight', None) is not None: kwargs['flatten'] = False super().__init__(nn.BCEWithLogitsLoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) self.thresh = thresh def decodes(self, x): return x>self.thresh def activation(self, x): return torch.sigmoid(x) tst = BCEWithLogitsLossFlat() output = torch.randn(32, 5, 10) target = torch.randn(32, 5, 10) #nn.BCEWithLogitsLoss would fail with those two tensors, but not our flattened version. _ = tst(output, target) test_fail(lambda x: nn.BCEWithLogitsLoss()(output,target)) output = torch.randn(32, 5) target = torch.randint(0,2,(32, 5)) #nn.BCEWithLogitsLoss would fail with int targets but not our flattened version. _ = tst(output, target) test_fail(lambda x: nn.BCEWithLogitsLoss()(output,target)) tst = BCEWithLogitsLossFlat(pos_weight=torch.ones(10)) output = torch.randn(32, 5, 10) target = torch.randn(32, 5, 10) _ = tst(output, target) test_fail(lambda x: nn.BCEWithLogitsLoss()(output,target)) #Associated activation is sigmoid test_eq(tst.activation(output), torch.sigmoid(output)) # export @use_kwargs_dict(weight=None, reduction='mean') def BCELossFlat(*args, axis=-1, floatify=True, **kwargs): "Same as `nn.BCELoss`, but flattens input and target." return BaseLoss(nn.BCELoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) tst = BCELossFlat() output = torch.sigmoid(torch.randn(32, 5, 10)) target = torch.randint(0,2,(32, 5, 10)) _ = tst(output, target) test_fail(lambda x: nn.BCELoss()(output,target)) # export @use_kwargs_dict(reduction='mean') def MSELossFlat(*args, axis=-1, floatify=True, **kwargs): "Same as `nn.MSELoss`, but flattens input and target." return BaseLoss(nn.MSELoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) tst = MSELossFlat() output = torch.sigmoid(torch.randn(32, 5, 10)) target = torch.randint(0,2,(32, 5, 10)) _ = tst(output, target) test_fail(lambda x: nn.MSELoss()(output,target)) #hide #cuda #Test losses work in half precision if torch.cuda.is_available(): output = torch.sigmoid(torch.randn(32, 5, 10)).half().cuda() target = torch.randint(0,2,(32, 5, 10)).half().cuda() for tst in [BCELossFlat(), MSELossFlat()]: _ = tst(output, target) # export @use_kwargs_dict(reduction='mean') def L1LossFlat(*args, axis=-1, floatify=True, **kwargs): "Same as `nn.L1Loss`, but flattens input and target." return BaseLoss(nn.L1Loss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) #export class LabelSmoothingCrossEntropy(Module): y_int = True def __init__(self, eps:float=0.1, weight=None, reduction='mean'): store_attr() def forward(self, output, target): c = output.size()[1] log_preds = F.log_softmax(output, dim=1) if self.reduction=='sum': loss = -log_preds.sum() else: loss = -log_preds.sum(dim=1) #We divide by that size at the return line so sum and not mean if self.reduction=='mean': loss = loss.mean() return loss*self.eps/c + (1-self.eps) * F.nll_loss(log_preds, target.long(), weight=self.weight, reduction=self.reduction) def activation(self, out): return F.softmax(out, dim=-1) def decodes(self, out): return out.argmax(dim=-1) lmce = LabelSmoothingCrossEntropy() output = torch.randn(32, 5, 10) target = torch.randint(0, 10, (32,5)) test_eq(lmce(output.flatten(0,1), target.flatten()), lmce(output.transpose(-1,-2), target)) ``` On top of the formula we define: - a `reduction` attribute, that will be used when we call `Learner.get_preds` - `weight` attribute to pass to BCE. - an `activation` function that represents the activation fused in the loss (since we use cross entropy behind the scenes). It will be applied to the output of the model when calling `Learner.get_preds` or `Learner.predict` - a <code>decodes</code> function that converts the output of the model to a format similar to the target (here indices). This is used in `Learner.predict` and `Learner.show_results` to decode the predictions ``` #export @delegates() class LabelSmoothingCrossEntropyFlat(BaseLoss): "Same as `LabelSmoothingCrossEntropy`, but flattens input and target." y_int = True @use_kwargs_dict(keep=True, eps=0.1, reduction='mean') def __init__(self, *args, axis=-1, **kwargs): super().__init__(LabelSmoothingCrossEntropy, *args, axis=axis, **kwargs) def activation(self, out): return F.softmax(out, dim=-1) def decodes(self, out): return out.argmax(dim=-1) ``` ## Export - ``` #hide from nbdev.export import * notebook2script() ```
github_jupyter
# Tau_p effects ``` import pprint import subprocess import sys sys.path.append('../') import numpy as np import matplotlib.pyplot as plt import matplotlib import matplotlib.gridspec as gridspec from mpl_toolkits.axes_grid1 import make_axes_locatable import seaborn as sns %matplotlib inline np.set_printoptions(suppress=True, precision=2) sns.set(font_scale=2.0) ``` #### Git machinery ``` run_old_version = False if run_old_version: hash_when_file_was_written = 'beb606918461c91b007f25a007b71466d94cf516' hash_at_the_moment = subprocess.check_output(["git", 'rev-parse', 'HEAD']).strip() print('Actual hash', hash_at_the_moment) print('Hash of the commit used to run the simulation', hash_when_file_was_written) subprocess.call(['git', 'checkout', hash_when_file_was_written]) from network import Protocol, BCPNNFast, NetworkManager from analysis_functions import calculate_recall_success_sequences, calculate_recall_success from analysis_functions import calculate_recall_time_quantities, calculate_excitation_inhibition_ratio from analysis_functions import calculate_total_connections from plotting_functions import plot_weight_matrix, plot_winning_pattern ``` ## How do the probabilities evolve in time depending on tau_p #### An example ``` # Patterns parameters hypercolumns = 4 minicolumns = 20 n_patterns = 10 # Manager properties dt = 0.001 T_recalling = 5.0 values_to_save = ['o', 'p_pre', 'p_post', 'p_co', 'w'] # Protocol training_time = 0.1 inter_sequence_interval = 1.0 inter_pulse_interval = 0.0 epochs = 3 # Network parameters tau_z_pre = 0.150 tau_p = 500.0 # Build the network nn = BCPNNFast(hypercolumns, minicolumns, tau_p=tau_p, tau_z_pre=tau_z_pre) # Build the manager manager = NetworkManager(nn=nn, dt=dt, values_to_save=values_to_save) # Build protocol protocol = Protocol() sequences = [[i for i in range(n_patterns)]] protocol.cross_protocol(sequences, training_time=training_time, inter_sequence_interval=inter_sequence_interval, epochs=epochs) manager.run_network_protocol(protocol=protocol, verbose=True) manager plot_weight_matrix(manager.nn) fig = plt.figure(figsize=(16, 12)) ax1 = fig.add_subplot(221) ax2 = fig.add_subplot(222) ax3 = fig.add_subplot(223) ax4 = fig.add_subplot(224) o = manager.history['o'] p_pre = manager.history['p_pre'] p_co = manager.history['p_co'] w = manager.history['w'] pattern_1 = 3 pattern_2 = 4 time = np.arange(0, manager.T_total, dt) ax1.plot(time, o[:, pattern_1]) ax1.plot(time, o[:, pattern_2]) ax1.set_ylabel('activity') ax1.set_xlabel('Time') ax2.plot(time, p_pre[:, pattern_1]) ax2.plot(time, p_pre[:, pattern_2]) ax2.set_ylabel('p') ax2.set_xlabel('Time') ax3.plot(time, p_co[:, pattern_2, pattern_1]) ax3.set_ylabel('p_co') ax3.set_xlabel('Time') ax4.plot(time, w[:, pattern_2, pattern_1]) ax4.set_ylabel('w') ax4.set_xlabel('Time'); nn.g_w = 15.0 nn.g_w_ampa = 15.0 total, mean, std, success = calculate_recall_time_quantities(manager, T_recall, T_cue, n, sequences) print('success', success) plot_winning_pattern(manager) fig = plt.figure(figsize=(16, 12)) ax1 = fig.add_subplot(221) ax2 = fig.add_subplot(222) ax3 = fig.add_subplot(223) ax4 = fig.add_subplot(224) o = manager.history['o'] p_pre = manager.history['p_pre'] p_co = manager.history['p_co'] w = manager.history['w'] pattern_1 = 3 pattern_2 = 4 time = np.arange(0, manager.T_total, dt) ax1.plot(time, o[:, pattern_1]) ax1.plot(time, o[:, pattern_2]) ax1.set_ylabel('activity') ax1.set_xlabel('Time') ax2.plot(time, p_pre[:, pattern_1]) ax2.plot(time, p_pre[:, pattern_2]) ax2.set_ylabel('p') ax2.set_xlabel('Time') ax3.plot(time, p_co[:, pattern_2, pattern_1]) ax3.set_ylabel('p_co') ax3.set_xlabel('Time') ax4.plot(time, w[:, pattern_2, pattern_1]) ax4.set_ylabel('w') ax4.set_xlabel('Time'); ``` #### Multiple values of tau_p ``` tau_p_list = [5.0, 20.0, 100.0, 1000.0] tau_p_list = [10.0, 20.0, 30.0, 40.0] tau_p_list = [1, 10, 100, 1000] fig = plt.figure(figsize=(16, 12)) ax1 = fig.add_subplot(221) ax2 = fig.add_subplot(222) ax3 = fig.add_subplot(223) ax4 = fig.add_subplot(224) pattern_1 = 3 pattern_2 = 4 # Patterns parameters hypercolumns = 4 minicolumns = 20 n_patterns = 10 # Manager properties dt = 0.001 T_recalling = 5.0 values_to_save = ['o', 'p_pre', 'p_post', 'p_co', 'w'] # Protocol training_time = 0.1 inter_sequence_interval = 1.0 inter_pulse_interval = 0.0 epochs = 3 # Network parameters tau_z_pre = 0.150 tau_p = 10.0 for tau_p in tau_p_list: # Build the network nn = BCPNNFast(hypercolumns, minicolumns, tau_p=tau_p, tau_z_pre=tau_z_pre) # Build the manager manager = NetworkManager(nn=nn, dt=dt, values_to_save=values_to_save) # Build protocol protocol = Protocol() sequences = [[i for i in range(n_patterns)]] protocol.cross_protocol(sequences, training_time=training_time, inter_sequence_interval=inter_sequence_interval, epochs=epochs) manager.run_network_protocol(protocol=protocol, verbose=False) # Plotting o = manager.history['o'] p_pre = manager.history['p_pre'] p_post = manager.history['p_post'] p_co = manager.history['p_co'] w = manager.history['w'] pattern_1 = 3 pattern_2 = 4 time = np.arange(0, manager.T_total, dt) if False: ax1.plot(time, o[:, pattern_1]) ax1.plot(time, o[:, pattern_2]) ax1.set_ylabel('activity') ax1.set_xlabel('Time') ax1.plot(time, p_post[:, pattern_1], label=str(tau_p)) ax1.plot(time, p_post[:, pattern_2], label=str(tau_p)) ax1.set_ylabel('p') ax1.set_xlabel('Time') ax2.plot(time, p_pre[:, pattern_1], label=str(tau_p)) ax2.plot(time, p_pre[:, pattern_2], label=str(tau_p)) ax2.set_ylabel('p') ax2.set_xlabel('Time') ax3.plot(time, p_co[:, pattern_2, pattern_1]) ax3.set_ylabel('p_co') ax3.set_xlabel('Time') ax4.plot(time, w[:, pattern_2, pattern_1]) ax4.set_ylabel('w') ax4.set_xlabel('Time') ax1.legend() ax2.legend(); ``` ## Convergence and final weights based on tau_p ``` tau_p_vector = np.logspace(1.0, 2.0, num=15) weights = [] weights_inhibition = [] weights_ampa = [] weights_free_attactor = [] exc_inh_ratio = [] exc_inh_ratio_ampa = [] mean_recall_time = [] recall_successes = [] from_pattern_inh = 0 from_pattern = 3 to_pattern = 4 T_recall = 5.0 T_cue = 0.100 I_cue = 0 n = 1 for tau_p in tau_p_vector: print('tau_p', tau_p) # Patterns parameters hypercolumns = 4 minicolumns = 20 n_patterns = 10 # Manager properties dt = 0.001 T_recalling = 5.0 values_to_save = ['o'] # Protocol training_time = 0.1 inter_sequence_interval = 1.0 inter_pulse_interval = 0.0 epochs = 3 # Build the network nn = BCPNNFast(hypercolumns, minicolumns, tau_p=tau_p, tau_z_pre=tau_z_pre) # Build the manager manager = NetworkManager(nn=nn, dt=dt, values_to_save=values_to_save) # Build protocol protocol = Protocol() sequences = [[i for i in range(n_patterns)]] protocol.cross_protocol(sequences, training_time=training_time, inter_sequence_interval=inter_sequence_interval, epochs=epochs) manager.run_network_protocol(protocol=protocol, verbose=False) total, mean, std, success = calculate_recall_time_quantities(manager, T_recall, T_cue, n, sequences) mean_ratio, std_ratio, aux = calculate_excitation_inhibition_ratio(nn, sequences, ampa=False) mean_ratio_ampa, std_ratio, aux = calculate_excitation_inhibition_ratio(nn, sequences, ampa=True) # Store weights.append(nn.w[to_pattern, from_pattern]) weights_inhibition.append(nn.w[to_pattern, from_pattern_inh]) weights_ampa.append(nn.w_ampa[0, minicolumns]) weights_free_attactor.append(nn.w[to_pattern, n_patterns + 2]) exc_inh_ratio.append(mean_ratio) exc_inh_ratio_ampa.append(mean_ratio_ampa) mean_recall_time.append(mean) recall_successes.append(success) fig = plt.figure(figsize=(16, 12)) ax1 = fig.add_subplot(211) ax2 = fig.add_subplot(212) ax1.plot(tau_p_vector, weights, '*-', markersize=15, label='weights') ax1.plot(tau_p_vector, weights_inhibition, '*-', markersize=15, label='weights inh') ax1.plot(tau_p_vector, weights_free_attactor, '*-', markersize=15, label='free_attractor') ax1.plot(tau_p_vector, weights_ampa, '*-', markersize=15, label='weights ampa') ax2.plot(tau_p_vector, recall_successes, '*-', markersize=15, label='recall') ax1.set_xscale('log') ax1.set_xlabel('tau_p') ax1.legend() ax2.set_xscale('log') ax2.set_xlabel('tau_p') ax2.legend(); fig = plt.figure(figsize=(16, 12)) ax1 = fig.add_subplot(211) ax2 = fig.add_subplot(212) ax1.plot(tau_p_vector, exc_inh_ratio, '*-', markersize=15, label='exc inh ratio') ax1.plot(tau_p_vector, exc_inh_ratio_ampa, '*-', markersize=15, label='exc inh ratio ampa') ax2.plot(tau_p_vector, recall_successes, '*-', markersize=15, label='recall') ax1.set_xscale('log') ax1.set_xlabel('tau_p') ax1.legend() ax2.set_xscale('log') ax2.set_xlabel('tau_p') ax2.legend(); ``` ## Two sequences assymetry in values ``` tau_p_vector = np.logspace(1.0, 4.0, num=20) connectivities_1_list = [] connectivities_2_list = [] connectivities_3_list = [] connectivities_4_list = [] connectivities_5_list = [] connectivities_6_list = [] # Patterns parameters hypercolumns = 4 minicolumns = 35 # Manager properties dt = 0.001 T_recalling = 5.0 values_to_save = ['o'] # Protocol training_time = 0.1 inter_sequence_interval = 2.0 inter_pulse_interval = 0.0 epochs = 3 tau_z_pre = 0.150 sigma = 0 tau_p = 1000.0 for tau_p in tau_p_vector: print('tau p', tau_p) # Build the network nn = BCPNNFast(hypercolumns, minicolumns, tau_z_pre=tau_z_pre, sigma=sigma, tau_p=tau_p) # Build the manager manager = NetworkManager(nn=nn, dt=dt, values_to_save=values_to_save) # Build a protocol protocol = Protocol() sequences = [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23 ,24], [25, 26, 27, 28, 29]] protocol.cross_protocol(sequences, training_time=training_time, inter_sequence_interval=inter_sequence_interval, epochs=epochs) # Train manager.run_network_protocol(protocol=protocol, verbose=False) from_pattern = 3 to_pattern = 4 connectivity_seq_1 = calculate_total_connections(manager, from_pattern, to_pattern, ampa=False, normalize=True) from_pattern = 8 to_pattern = 9 connectivity_seq_2 = calculate_total_connections(manager, from_pattern, to_pattern, ampa=False, normalize=True) from_pattern = 13 to_pattern = 14 connectivity_seq_3 = calculate_total_connections(manager, from_pattern, to_pattern, ampa=False, normalize=True) from_pattern = 13 to_pattern = 14 connectivity_seq_3 = calculate_total_connections(manager, from_pattern, to_pattern, ampa=False, normalize=True) from_pattern = 18 to_pattern = 19 connectivity_seq_4 = calculate_total_connections(manager, from_pattern, to_pattern, ampa=False, normalize=True) from_pattern = 23 to_pattern = 24 connectivity_seq_5 = calculate_total_connections(manager, from_pattern, to_pattern, ampa=False, normalize=True) from_pattern = 28 to_pattern = 29 connectivity_seq_6 = calculate_total_connections(manager, from_pattern, to_pattern, ampa=False, normalize=True) connectivities_1_list.append(connectivity_seq_1) connectivities_2_list.append(connectivity_seq_2) connectivities_3_list.append(connectivity_seq_3) connectivities_4_list.append(connectivity_seq_4) connectivities_5_list.append(connectivity_seq_5) connectivities_6_list.append(connectivity_seq_6) fig = plt.figure(figsize=(16, 12)) ax = fig.add_subplot(111) ax.plot(tau_p_vector, connectivities_1_list, '*-', markersize=15, label='1') ax.plot(tau_p_vector, connectivities_2_list, '*-', markersize=15, label='2') ax.plot(tau_p_vector, connectivities_3_list, '*-', markersize=15, label='3') ax.plot(tau_p_vector, connectivities_4_list, '*-', markersize=15, label='4') ax.plot(tau_p_vector, connectivities_5_list, '*-', markersize=15, label='5') ax.plot(tau_p_vector, connectivities_6_list, '*-', markersize=15, label='6') ax.set_xscale('log') ax.set_xlabel('tau_p') ax.set_ylabel('Connectivities') ax.legend(); ``` ## Do previous sequences stick? ``` # Patterns parameters hypercolumns = 4 minicolumns = 40 n_patterns = 10 # Manager properties dt = 0.001 T_recall = 5.0 T_cue = 0.100 n = 1 values_to_save = ['o'] # Protocol training_time = 0.1 inter_sequence_interval = 1.0 inter_pulse_interval = 0.0 epochs = 3 sigma = 0 tau_z_pre = 0.200 tau_p = 100.0 # Sequence structure overlap = 2 number_of_sequences = 5 half_width = 2 # Build the network nn = BCPNNFast(hypercolumns, minicolumns, tau_z_pre=tau_z_pre, sigma=sigma, tau_p=tau_p) # Buidl the manager manager = NetworkManager(nn=nn, dt=dt, values_to_save=values_to_save) # Build chain protocol chain_protocol = Protocol() units_to_overload = [i for i in range(overlap)] sequences = chain_protocol.create_overload_chain(number_of_sequences, half_width, units_to_overload) chain_protocol.cross_protocol(sequences, training_time=training_time, inter_sequence_interval=inter_sequence_interval, epochs=epochs) # Run the manager manager.run_network_protocol(protocol=chain_protocol, verbose=True) print(sequences) nn.g_w = 15.0 nn.g_w_ampa = 1.0 nn.tau_z_pre = 0.050 nn.tau_a = 2.7 successes = calculate_recall_success_sequences(manager, T_recall=T_recall, T_cue=T_cue, n=n, sequences=sequences) successes plot_weight_matrix(manager.nn) ampa = False from_pattern = 1 to_pattern = 4 connectivity_seq_1 = calculate_total_connections(manager, from_pattern, to_pattern, ampa=ampa, normalize=True) from_pattern = 1 to_pattern = 8 connectivity_seq_2 = calculate_total_connections(manager, from_pattern, to_pattern, ampa=ampa, normalize=True) from_pattern = 1 to_pattern = 12 connectivity_seq_3 = calculate_total_connections(manager, from_pattern, to_pattern, ampa=ampa, normalize=True) from_pattern = 1 to_pattern = 16 connectivity_seq_4 = calculate_total_connections(manager, from_pattern, to_pattern, ampa=ampa, normalize=True) from_pattern = 1 to_pattern = 20 connectivity_seq_5 = calculate_total_connections(manager, from_pattern, to_pattern, ampa=ampa, normalize=True) print('connectivit 1', connectivity_seq_1) print('connectivit 2', connectivity_seq_2) print('connectivit 3', connectivity_seq_3) print('connectivit 4', connectivity_seq_4) print('connectivit 5', connectivity_seq_5) from analysis_functions import calculate_timings nn.g_w = 15.0 nn.g_w_ampa = 1.0 nn.tau_a = 2.7 nn.tau_z_pre = 0.500 print(nn.get_parameters()) T_recall = 5.0 T_cue = 0.100 n = 1 sequence = 0 patterns_indexes = sequences[sequence] success_1 = calculate_recall_success(manager, T_recall=T_recall, I_cue=patterns_indexes[0], T_cue=T_cue, n=n, patterns_indexes=patterns_indexes) timings = calculate_timings(manager, remove=0.010) print('succes', success_1) plot_winning_pattern(manager) print(patterns_indexes) print(timings) ``` #### Git machinery ``` if run_old_version: subprocess.call(['git', 'checkout', 'master']) ```
github_jupyter
<a href="https://colab.research.google.com/github/victorog17/Soulcode_Projeto_Python/blob/main/Projeto_Python_Oficina_Mecanica_V2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` print('Hello World') print('Essa Fera Bicho') ``` 1) Ao executar o algoritmo, deverá aparecer duas opções: A - Para acessar o programa ou F* - Para finalizar o programa (CORRIGIR) OK 2) Caso o usuário digite A, deverá ser direcionado para outra parte do programa que tenha no mínimo 4 funcionalidades que podem ser: Adicionar produto , adicionar serviço , finalizar a compra , etc. OK 3) A cada produto ou serviço selecionado, deverá aumentar o valor a ser pago na conta , igualmente num caixa de supermercado convencional . considerando que o cliente pode levar mais de uma quantidade do mesmo produto/serviço (ex : 2 caixas de leite , 2 trocas de pneus ) . OK 4) Ao fechar/concluir o processo de seleção de produtos/serviços deve exibir ao cliente o total de valor a ser pago e pedir para que o cliente selecione a forma de pagamento , obrigatoriamente deve existir a forma de pagamento em dinheiro que gere troco , caso o troco seja gerado deve-se informar o valor do troco e quantas cedulas vão ser dadas para o cliente, sempre considere a menor quantidade de cédulas possíveis . 5) As cédulas disponíveis são : 50 , 20 , 10 , 5 ,2 e 1 real . Pode descartar valores de centavos OK 6) No processo de finalização da compra deve existir uma opção para o cliente desistir da compra , em caso positivo deve ser perguntado a confirmação da desistência (informando os produtos/serviços que o cliente está desistindo) OK 7) Ao finalizar a compra deve-se voltar a tela inicial Acessar programa / finalizar programa . Quando finalizar deve-se exibir uma mensagem agradecendo a visita, informando o que foi comprado e o valor gasto no estabelecimento OK ``` # Lista de Produtos lista_prod = [['Pneu(s)', 'Calota(s)', 'Palheta(s)', 'Protetor(es) de Volante', 'Cheirinho(s) de Carro', 'Óleo de Motor', 'Bateria(s)'],[339.00, 15.00, 55.00, 30.00, 15.00, 27.00, 270.00]] # Lista de Serviços lista_serv = [['Troca de Óleo', 'Alinhamento', 'Revisão Geral', 'Troca de Lampada', 'Troca de Correia', 'Troca de Pastilha de Freio'],[200.00, 60.00, 300.00, 40.00, 220.00, 150.00]] #FUNCIONALIDADES import time def limparcar(): #FUNÇÃO LIMPEZA DO CARRINHO somaFatura = 0 for y in range(len(carrinho[0])): #AMOSTRA DO CARRINHO print(f'[{y+1}] - {carrinho[0][y]} --> R${carrinho[1][y]} Quantidade: {carrinho[2][y]}') somaFatura += ((carrinho[1][y])*(carrinho[2][y])) print(f"\nValor total R${somaFatura:.2f}") #VALOR TOTAL print("[S] para sim\n[N] para não\n") #CONFIRMAÇÃO DA AÇÃO certeza = input(f'Tem certeza que deseja remover TUDO de seu carrinho? ').upper()[0] print('='*50) while (certeza != 'S') and (certeza != 'N'): certeza = input("Opção inválida! Digite [S] para sim [N] para não:\n").upper()[0] print('='*50) if certeza == 'S': #CONFIRMAÇÃO = SIM - LIMPEZA DO CARRINHO carrinho[0].clear() carrinho[1].clear() carrinho[2].clear() print("Limpando seu carrinho ...") print('='*50) time.sleep(3) else: #CONFIRMAÇÃO = NÃO print("Seus produtos foram mantidos no carrinho!") print('='*50) time.sleep(3) def adcProduto(): #FUNÇÃO ADICIONAR PRODUTO while True: print("Opções de produto:\n") for i in range(len(lista_prod[0])): #LISTA DE PRODUTOS DISPONÍVEIS print(f'[{i+1}] - {lista_prod[0][i]} --> R${lista_prod[1][i]}') print("\nPara voltar ao menu principal basta digitar [99] ") print('='*50) #CARRINHO digite = int(input('Adicione um produto ao seu carrinho: ')) print('='*50) if digite >= 1 and digite <= (len(lista_prod[0])): #ESCOLHA DE PRODUTO carrinho[0].append(lista_prod[0][digite-1]) carrinho[1].append(lista_prod[1][digite-1]) quant = int(input(f'Qual seria a quantidade de "{lista_prod[0][digite-1]}" (MÁX. 10): ')) #QUANTIDADE DE PRODUTO print('='*50) while quant <= 0 or quant > 10: quant = int(input('Valor inválido! Digite novamente a quantidade: ')) print('='*50) print(f'Adicionando "{lista_prod[0][digite-1]}" ao seu carrinho ...') print('='*50) time.sleep(3) carrinho[2].append(quant) elif digite == 99: #SAÍDA DA FUNÇÃO print('Saindo ...') print('='*50) time.sleep(3) break else: #OPÇÃO INVÁLIDA print('Este número não está entre as opções!!') print('='*50) time.sleep(3) def adcServico(): #FUNÇÃO ADICIONAR SERVIÇO while True: print("Opções de serviços:\n") for x in range(len(lista_serv[0])): #LISTA DE SERVIÇOS DISPONÍVEIS print(f'[{x+1}] - {lista_serv[0][x]} --> R${lista_serv[1][x]}') print("\nPara voltar ao menu principal basta digitar [99] ") print('='*50) #CARRINHO digite = int(input('Adicione um serviço ao seu carrinho: ')) print('='*50) if digite >= 1 and digite <= (len(lista_serv[0])): #ESCOLHA DE SERVIÇO carrinho[0].append(lista_serv[0][digite-1]) carrinho[1].append(lista_serv[1][digite-1]) print(f'Adicionando "{lista_serv[0][digite-1]}" ao seu carrinho ...') print('='*50) time.sleep(3) carrinho[2].append(1) elif digite == 99: #SAÍDA DA FUNÇÃO print('Saindo ...') print('='*50) time.sleep(3) break else: #OPÇÃO INVÁLIDA print('Este número não está entre as opções!!') print('='*50) time.sleep(3) def rmvProduto(): #FUNÇÃO REMOVER PRODUTO/SERVIÇO while True: print("Dentro do carrinho:\n") for y in range(len(carrinho[0])): #AMOSTRA DO CARRINHO print(f'[{y+1}] - {carrinho[0][y]} --> R${carrinho[1][y]} Quantidade: {carrinho[2][y]}') print('='*50) #ESCOLHA DE OPÇÕES DE REMOÇÃO - PRODUTO OU QUANTIDADE print("Digite [P] para remover um produto/serviço\nDigite [Q] para diminuir a quantidade de seu produto\nDigite [M] para voltar ao MENU PRINCIPAL") produto_ou_quantidade = input("\nEscolha uma das opções acima: ").upper()[0] print('='*50) while (produto_ou_quantidade != 'P') and (produto_ou_quantidade != 'Q') and ((produto_ou_quantidade != 'M')): produto_ou_quantidade = input("As únicas opções válidas são [P], [Q] ou [M]: ").upper()[0] print('='*50) if produto_ou_quantidade == 'M': #SAÍDA DA FUNÇÃO print('Saindo ...') print('='*50) time.sleep(3) break elif produto_ou_quantidade == 'P': #REMOÇÃO DE PRODUTO remove = int(input("Informe qual produto irá remover: ")) print('='*50) while remove < 1 or remove > len(carrinho[0]): remove = int(input("Este produto não está na lista! Informe novamente qual produto irá remover: ")) print('='*50) elif produto_ou_quantidade == 'Q': #REMOÇÃO POR QUANTIDADE escolheProdRem = int(input("Informe de qual item irá reduzir a quantidade: ")) #APONTAR PRODUTO print('='*50) while escolheProdRem < 1 or escolheProdRem > len(carrinho[2]): escolheProdRem = int(input("Este produto não está na lista! Informe novamente qual produto irá reduzir a quantidade: ")) print('='*50) removeQuantidade = int(input(f'Gostaria de remover quantos de "{carrinho[0][escolheProdRem-1]}": ')) #REMOÇÃO DA QUANTIDADE DESSE PRODUTO print('='*50) while removeQuantidade <= 0 or removeQuantidade > carrinho[2][escolheProdRem-1]: removeQuantidade = int(input(f'Tirar este valor é impossível! Gostaria de remover quantos de "{carrinho[0][escolheProdRem-1]}": ')) print('='*50) print("[S] para sim\n[N] para não\n") certeza = input(f'Confirme a sua ação: ').upper()[0] #CONFIRMAÇÃO DA AÇÃO print('='*50) while (certeza != 'S') and (certeza != 'N'): certeza = input("Opção inválida! Digite [S] para sim [N] para não: ").upper()[0] print('='*50) if certeza == 'S': #CONFIRMAÇÃO = SIM if produto_ou_quantidade == 'P': #REMOÇÃO DO PRODUTO del carrinho[0][remove-1] del carrinho[1][remove-1] del carrinho[2][remove-1] elif produto_ou_quantidade == 'Q': if removeQuantidade == carrinho[2][escolheProdRem-1]: #SE REMOÇÃO DA QUANTIDADE FOR IGUAL A QUANTIDADE DO CARRINHO del carrinho[0][escolheProdRem-1] del carrinho[1][escolheProdRem-1] del carrinho[2][escolheProdRem-1] else: carrinho[2][escolheProdRem-1] -= removeQuantidade #REMOVE QUANTIDADE PEDIDA QUANDO MENOR QUE A QUANTIDADE DO PRODUTO else: #CONFIRMAÇÃO = NÃO - MANTÉM PRODUTO OU MESMA QUANTIDADE NO CARRINHO print("O produto não foi removido de seu carrinho!") print('='*50) time.sleep(3) def extrato(): #FUNÇÃO EXTRATO CARRINHO while True: somaFatura = 0 for y in range(len(carrinho[0])): #AMOSTRA DO CARRINHO print(f'[{y+1}] - {carrinho[0][y]} --> R${carrinho[1][y]} Quantidade: {carrinho[2][y]}') somaFatura += ((carrinho[1][y])*(carrinho[2][y])) print(f"\nValor total R${somaFatura:.2f}") #VALOR TOTAL sair_extrato = int(input("\nDigite [99] para sair: ")) print('='*50) while sair_extrato != 99: sair_extrato = int(input("Dado inválido! Digite 99 para sair: ")) print('='*50) if sair_extrato == 99: #OPÇÃO DE SAÍDA DA FUNÇÃO print("Saindo ...") print('='*50) time.sleep(3) break #PROGRAMA import time carrinho = [[],[],[]] historico = [[],[],[]] #ACESSAR/FINALIZAR while True: print("> Para acessar o programa basta digitar [A]\n> Caso queira finalizar o programa, digite [F]\n") acessar = str(input("Escolha uma opção: ")).upper()[0] print('='*50) while acessar != 'A' and acessar != 'F': #VALIDAÇÃO ACESSAR/FINALIZAR acessar = input("Valor inválido! Digite A para acessar o programa ou F para finalizar o programa:\n").upper()[0] print('='*50) if acessar == 'A': print('Bem vindo a Oficina Borracha Forte!') #ACESSAR - BOAS VINDAS print('='*50) time.sleep(3) else: print('Iremos finalizar o programa ...') #FINALIZAR print('='*50) time.sleep(3) print(f"Muito obrigado pela visita!") #AGRADECIMENTO + HISTÓRICO DE COMPRAS print('='*50) print('NOTA FISCAL\n') somaFatura = 0 for y in range(len(historico[0])): #AMOSTRA DO HISTÓRICO FINAL DA COMPRA print(f'[{y+1}] - {historico[0][y]} --> R${historico[1][y]:.2f} Quantidade: {historico[2][y]}') somaFatura += ((historico[1][y])*(historico[2][y])) print(f"\nValor total R${somaFatura:.2f}") break while True: print(f"MENU PRINCIPAL\n") #MENU PRINCIPAL #OPÇÕES PARA DAR PROCEDIMENTO print("Escolha a opção que deseja:\n\n[1] - Adicionar Produto\n[2] - Adicionar Serviço\n[3] - Remover Produto ou Serviço\n[4] - Limpar carrinho\n[5] - Extrato\n[6] - Finalizar Compra\n[7] - Sair") opcao = int(input("\n")) print('='*50) if opcao == 1: #ADICIONAR PRODUTOS AO SEU CARRINHO print("Carregando ...") print('='*50) time.sleep(3) while True: adcProduto() #FUNÇÃO ADICIONAR PRODUTO break elif opcao == 2: #ADICIONAR SERVIÇOS AO SEU CARRINHO print("Carregando ...") print('='*50) time.sleep(3) while True: adcServico() #FUNÇÃO ADICIONAR SERVIÇO break elif opcao == 3: #REMOVER PRODUTOS/SERVIÇOS DE SEU CARRINHO print("Carregando ...") print('='*50) time.sleep(3) while True: rmvProduto() #FUNÇÃO REMOVER PRODUTO break elif opcao == 4: #LIMPAR SEU CARRINHO print("Carregando ...") print('='*50) time.sleep(3) while True: limparcar() #FUNÇÃO LIMPAR CARRINHO break elif opcao == 5: #EXTRATO DE SEU CARRINHO print("Carregando ...") print('='*50) time.sleep(3) while True: extrato() #FUNÇÃO EXTRATO CARRINHO break elif opcao == 6: #FINALIZAR/DESISTIR DA COMPRA print("Carregando ...") print('='*50) time.sleep(3) print("Gostaria de dar procedimento a finalização da compra ou gostaria de desistir?\n") #CHANCE DE DESISTÊNCIA DA COMPRA print("[P] para prosseguir\n[D] para desistir\n") certeza = input(f'Confirme a sua ação: ').upper()[0] print('='*50) while (certeza != 'P') and (certeza != 'D'): certeza = input("Opção inválida! Digite [P] para prosseguir [D] para desistir: ").upper()[0] print('='*50) if certeza == 'D': #DESISTÊNCIA (1ªCONFIRMAÇÃO) - MOSTRA OS PRODUTOS QUE ESTÁ DESISTINDO print("Você tem certeza? Essa é o seu carrinho:\n") for y in range(len(carrinho[0])): print(f'[{y+1}] - {carrinho[0][y]} --> R${carrinho[1][y]} Quantidade: {carrinho[2][y]}') print('='*50) print("[S] para sim\n[N] para não\n") #DESISTÊNCIA (2ªCONFIRMAÇÃO) - LIMPEZA DO CARRINHO E SAÍDA DIRETA DO PROGRAMA certeza = input("Confirme sua ação: ").upper()[0] print('='*50) while (certeza != 'S') and (certeza != 'N'): certeza = input("Opção inválida! Confirme sua ação: ").upper()[0] print('='*50) if certeza == 'S': carrinho[0].clear() carrinho[1].clear() carrinho[2].clear() print('VOLTE SEMPRE!') print('='*50) time.sleep(3) break else: print("Voltando ...") print('='*50) time.sleep(3) else: #FINALIZAR COMPRA - FORMA DE PAGAMENTO print("Qual será a forma de pagamento?\n") print("[C] - Cartão\n[D] - Dinheiro\n[P] - PIX") FormaPagamento = str(input("\nEscolha a forma de pagamento: ")).upper()[0] print('='*50) while (FormaPagamento != 'D') and (FormaPagamento != 'C') and (FormaPagamento != 'P'): FormaPagamento = str(input("Esta opcção não é válida! Escolha a forma de pagamento: ")).upper()[0] print('='*50) if FormaPagamento == 'D': #FORMA DE PAGAMENTO - DINHEIRO somaFatura = 0 for y in range(len(carrinho[0])): #AMOSTRA DO CARRINHO print(f'[{y+1}] - {carrinho[0][y]} --> R${carrinho[1][y]} Quantidade: {carrinho[2][y]}') somaFatura += ((carrinho[1][y])*(carrinho[2][y])) print(f"\nValor total R${somaFatura:.2f}") dinheiro = int(input("\nDigite o valor do pagamento: ")) print('='*50) while dinheiro < somaFatura: dinheiro = int(input("Inválido! Digite o valor: ")) print('='*50) troco = dinheiro - somaFatura print(f"Troco do cliente: R${troco}") cont50n = 0 cont20n = 0 cont10n = 0 cont5n = 0 cont2n = 0 cont1n = 0 while troco > 0: if troco >= 50: troco -= 50 cont50n +=1 elif troco >= 20: troco -= 20 cont20n += 1 elif troco >= 10: troco -= 10 cont10n += 1 elif troco >= 5: troco -= 5 cont5n += 1 elif troco >= 2: troco -= 2 cont2n += 1 elif troco >= 1: troco -= 1 cont1n += 1 lista_cont = [cont50n, cont20n, cont10n, cont5n, cont2n, cont1n] lista_cedulas = [50, 20, 10, 5, 2, 1] for i, v in zip(lista_cont, lista_cedulas): if i > 0: print(f'{i} cédula(s) de {v} reais') print('='*50) somaFatura = 0 for i in range(len(carrinho[0])): historico[0].append(carrinho[0][i]) historico[1].append(carrinho[1][i]) historico[2].append(carrinho[2][i]) carrinho[0].clear() carrinho[1].clear() carrinho[2].clear() elif FormaPagamento == 'C': #FORMA DE PAGAMENTO - CARTÃO somaFatura = 0 for y in range(len(carrinho[0])): print(f'[{y+1}] - {carrinho[0][y]} --> R${carrinho[1][y]} Quantidade: {carrinho[2][y]}') somaFatura += ((carrinho[1][y])*(carrinho[2][y])) print(f"\nValor total R${somaFatura:.2f}") print("\n[C] - Crédito\n[D] - Débito") #CRÉDITO OU DÉBITO credito_debito = str(input("\nEscolha entre Crédito ou Débito: ")).upper()[0] print('='*50) while (FormaPagamento != 'D') and (FormaPagamento != 'C'): credito_debito = str(input("Dado inválido! Escolha entre Crédito ou Débito: ")).upper()[0] print('='*50) if credito_debito == 'C': #CRÉDITO print('Obs: Parcelas acima de 3x acarretará juros de 3%. Máximo de parcelas: 10') # parcelas = int(input('\nDeseja parcelar em quantas vezes: ')) print('='*50) while parcelas <= 0 or parcelas > 10: parcelas = int(input('Inválido! Deseja parcelar em quantas vezes: ')) print('='*50) if parcelas >= 1 and parcelas <= 3: somaFatura /= parcelas print(f"O valor parcelado em {parcelas}x fica: R${somaFatura:.2f}") # print('='*50) print("Pago com sucesso!") print('='*50) somaFatura = 0 for i in range(len(carrinho[0])): historico[0].append(carrinho[0][i]) historico[1].append(carrinho[1][i]) historico[2].append(carrinho[2][i]) carrinho[0].clear() carrinho[1].clear() carrinho[2].clear() time.sleep(3) elif parcelas == 0: print(f"O valor parcelado em {parcelas}x fica: R${somaFatura:.2f}") print('='*50) print("Pago com sucesso!") print('='*50) somaFatura = 0 for i in range(len(carrinho[0])): historico[0].append(carrinho[0][i]) historico[1].append(carrinho[1][i]) historico[2].append(carrinho[2][i]) carrinho[0].clear() carrinho[1].clear() carrinho[2].clear() else: somaFatura /= parcelas somaFatura * 1.03 print(f"O valor parcelado em {parcelas}x fica: R${somaFatura:.2f}") print('='*50) print("Pago com sucesso!") print('='*50) somaFatura = 0 for i in range(len(carrinho[0])): historico[0].append(carrinho[0][i]) historico[1].append(carrinho[1][i]) historico[2].append(carrinho[2][i]) carrinho[0].clear() carrinho[1].clear() carrinho[2].clear() time.sleep(3) elif credito_debito == 'D': #DÉBITO print('Pagamento realizado com sucesso!') print('='*50) somaFatura = 0 for i in range(len(carrinho[0])): historico[0].append(carrinho[0][i]) historico[1].append(carrinho[1][i]) historico[2].append(carrinho[2][i]) carrinho[0].clear() carrinho[1].clear() carrinho[2].clear() time.sleep(3) else: #FORMA DE PAGAMENTO - PIX print('='*50) print('Pagamento com PIX realizado com sucesso!') print('='*50) somaFatura = 0 for i in range(len(carrinho[0])): historico[0].append(carrinho[0][i]) historico[1].append(carrinho[1][i]) historico[2].append(carrinho[2][i]) carrinho[0].clear() carrinho[1].clear() carrinho[2].clear() time.sleep(3) elif opcao == 7: #SAIR DO PROGRAMA print("Carregando ...") print('='*50) time.sleep(3) if len(carrinho[0]) == 0: #CARRINHO SEM ITEM - SAÍDA DIRETA print("VOLTE SEMPRE!") print('='*50) time.sleep(3) break else: print("Tem certeza que deseja sair? Todo o conteúdo do seu carrinho será removido.\n\n[S] para sim\n[N] para não") #CONFIRMAÇÃO DA AÇÃO certeza = input("\nConfirme sua ação: ").upper()[0] print('='*50) while (certeza != 'S') and (certeza != 'N'): certeza = input("Dado inválido! Digite [S] para sim [N] para não:\n").upper()[0] print('='*50) if certeza == 'S': #LIMPEZA DO CARRINHO carrinho[0].clear() carrinho[1].clear() carrinho[2].clear() print("Limpando seu carrinho ...") print('='*50) print("VOLTE SEMPRE!") print('='*50) time.sleep(3) break else: #CASO DESISTA DA AÇÃO - CARRINHO MANTIDO print("Seus produtos foram mantidos no carrinho!") print('='*50) time.sleep(3) else: #AVISO DE ALTERNATIVA INVÁLIDA print('Insira uma opção valida!') print('='*50) time.sleep(3) #LEGADO PARA CONSULTA #def finalizarCompra(): # print("Gostaria de dar procedimento a finalização da compra ou gostaria de desistir?\n") # print("[S] para sim\n[N] para não\n") # certeza = input(f'Confirme a sua ação: ').upper()[0] #MOSTRAR O NOME DO PRODUTO QUE SERÁ APAGADO # print('='*50) # while (certeza != 'S') and (certeza != 'N'): # certeza = input("Opção inválida! Digite [S] para sim [N] para não: ").upper()[0] #MOSTRAR O NOME DO PRODUTO QUE SERÁ APAGADO # print('='*50) # print("Qual será a forma de pagamento?\n") # print("[C] - Cartão\n[D] - Dinheiro\n[P] - PIX") # FormaPagamento = str(input("\nEscolha a forma de pagamento: ")).upper()[0] # print('='*50) # while (FormaPagamento != 'D') and (FormaPagamento != 'C') and (FormaPagamento != 'P'): # FormaPagamento = str(input("Esta opcção não é válida! Escolha a forma de pagamento: ")).upper()[0] # print('='*50) # # if FormaPagamento == 'D': # somaFatura = 0 # for y in range(len(carrinho[0])): # print(f'[{y+1}] - {carrinho[0][y]} --> R${carrinho[1][y]} Quantidade ; {carrinho[2][y]}') # somaFatura += ((carrinho[1][y])*(carrinho[2][y])) # print(f"\nValor total R${somaFatura:.2f}") # dinheiro = int(input("\nDigite o valor do pagamento: ")) # print('='*50) # while dinheiro < somaFatura: # dinheiro = int(input("Inválido! Digite o valor: ")) # print('='*50) # troco = dinheiro - somaFatura # print(f"Troco do cliente: R${troco}") # cont50n = 0 # cont20n = 0 # cont10n = 0 # cont5n = 0 # cont2n = 0 # cont1n = 0 # while troco > 0: # if troco >= 50: # troco -= 50 # cont50n +=1 # elif troco >= 20: # troco -= 20 # cont20n += 1 # elif troco >= 10: # troco -= 10 # cont10n += 1 # elif troco >= 5: # troco -= 5 # cont5n += 1 # elif troco >= 2: # troco -= 2 # cont2n += 1 # elif troco >= 1: # troco -= 1 # cont1n += 1 # # lista_cont = [cont50n, cont20n, cont10n, cont5n, cont2n, cont1n] # lista_cedulas = [50, 20, 10, 5, 2, 1] # # for i, v in zip(lista_cont, lista_cedulas): # if i > 0: # print(f'{i} cédula(s) de {v} reais') # print('='*50) # somaFatura = 0 # historico = [[],[],[]] # for i in range(len(carrinho[0])): # historico[0].append(carrinho[0][i]) # historico[1].append(carrinho[1][i]) # historico[2].append(carrinho[2][i]) # print(f"antes Lista histórico: {historico}") # print(f"antesLista carrinho: {carrinho}") # carrinho[0].clear() # carrinho[1].clear() # carrinho[2].clear() # print(f"depois Lista histórico: {historico}") # print(f"depois Lista carrinho: {carrinho}") # # elif FormaPagamento == 'C': # somaFatura = 0 # for y in range(len(carrinho[0])): # print(f'[{y+1}] - {carrinho[0][y]} --> R${carrinho[1][y]} Quantidade ; {carrinho[2][y]}') # somaFatura += ((carrinho[1][y])*(carrinho[2][y])) # print(f"\nValor total R${somaFatura:.2f}") # print("\n[C] - Crédito\n[D] - Débito") # credito_debito = str(input("\nEscolha entre Crédito ou Débito: ")).upper()[0] # print('='*50) # while (FormaPagamento != 'D') and (FormaPagamento != 'C'): # credito_debito = str(input("Dado inválido! Escolha entre Crédito ou Débito: ")).upper()[0] # print('='*50) # if credito_debito == 'C': # print('Obs: Parcelas acima de 3x acarretará juros de 3%. Máximo de parcelas: 10') # parcelas = int(input('\nDeseja parcelar em quantas vezes: ')) # print('='*50) # while parcelas <= 0 or parcelas > 10: # parcelas = int(input('Inválido! Deseja parcelar em quantas vezes: ')) # print('='*50) # if parcelas >= 1 and parcelas <= 3: # somaFatura /= parcelas # print(f"O valor parcelado em {parcelas}x fica: R${somaFatura:.2f}") # print('='*50) # print("Pago com sucesso!") # print('='*50) # somaFatura = 0 # historico = carrinho.copy() # carrinho[0].clear() # carrinho[1].clear() # carrinho[2].clear() # time.sleep(3) # elif parcelas == 0: # print(f"O valor parcelado em {parcelas}x fica: R${somaFatura:.2f}") # print('='*50) # print("Pago com sucesso!") # print('='*50) # somaFatura = 0 # historico = carrinho.copy() # carrinho[0].clear() # carrinho[1].clear() # carrinho[2].clear() # time.sleep(3) # else: # somaFatura /= parcelas # somaFatura * 1.03 # print(f"O valor parcelado em {parcelas}x fica: R${somaFatura:.2f}") # print('='*50) # print("Pago com sucesso!") # print('='*50) # somaFatura = 0 # historico = carrinho.copy() # carrinho[0].clear() # carrinho[1].clear() # carrinho[2].clear() # time.sleep(3) # elif credito_debito == 'D': # print('Pagamento realizado com sucesso!') # print('='*50) # somaFatura = 0 # historico = carrinho # carrinho[0].clear() # carrinho[1].clear() # carrinho[2].clear() # time.sleep(3) # else: # print('='*50) # print('Pagamento com PIX realizado com sucesso!') # print('='*50) # somaFatura = 0 # historico = carrinho # carrinho[0].clear() # carrinho[1].clear() # carrinho[2].clear() # time.sleep(3) ```
github_jupyter
![qiskit_header.png](attachment:qiskit_header.png) # _*Qiskit Finance: Pricing Fixed-Income Assets*_ The latest version of this notebook is available on https://github.com/Qiskit/qiskit-iqx-tutorials. *** ### Contributors Stefan Woerner<sup>[1]</sup>, Daniel Egger<sup>[1]</sup>, Shaohan Hu<sup>[1]</sup>, Stephen Wood<sup>[1]</sup>, Marco Pistoia<sup>[1]</sup> ### Affiliation - <sup>[1]</sup>IBMQ ### Introduction We seek to price a fixed-income asset knowing the distributions describing the relevant interest rates. The cash flows $c_t$ of the asset and the dates at which they occur are known. The total value $V$ of the asset is thus the expectation value of: $$V = \sum_{t=1}^T \frac{c_t}{(1+r_t)^t}$$ Each cash flow is treated as a zero coupon bond with a corresponding interest rate $r_t$ that depends on its maturity. The user must specify the distribution modeling the uncertainty in each $r_t$ (possibly correlated) as well as the number of qubits he wishes to use to sample each distribution. In this example we expand the value of the asset to first order in the interest rates $r_t$. This corresponds to studying the asset in terms of its duration. <br> <br> The approximation of the objective function follows the following paper:<br> <a href="https://arxiv.org/abs/1806.06893">Quantum Risk Analysis. Woerner, Egger. 2018.</a> ``` import matplotlib.pyplot as plt %matplotlib inline import numpy as np from qiskit import BasicAer from qiskit.aqua.algorithms.single_sample.amplitude_estimation.ae import AmplitudeEstimation from qiskit.aqua.components.uncertainty_models import MultivariateNormalDistribution from qiskit.finance.components.uncertainty_problems import FixedIncomeExpectedValue backend = BasicAer.get_backend('statevector_simulator') ``` ### Uncertainty Model We construct a circuit factory to load a multivariate normal random distribution in $d$ dimensions into a quantum state. The distribution is truncated to a given box $\otimes_{i=1}^d [low_i, high_i]$ and discretized using $2^{n_i}$ grid points, where $n_i$ denotes the number of qubits used for dimension $i = 1,\ldots, d$. The unitary operator corresponding to the circuit factory implements the following: $$\big|0\rangle_{n_1}\ldots\big|0\rangle_{n_d} \mapsto \big|\psi\rangle = \sum_{i_1=0}^{2^n_-1}\ldots\sum_{i_d=0}^{2^n_-1} \sqrt{p_{i_1,...,i_d}}\big|i_1\rangle_{n_1}\ldots\big|i_d\rangle_{n_d},$$ where $p_{i_1, ..., i_d}$ denote the probabilities corresponding to the truncated and discretized distribution and where $i_j$ is mapped to the right interval $[low_j, high_j]$ using the affine map: $$ \{0, \ldots, 2^{n_{j}}-1\} \ni i_j \mapsto \frac{high_j - low_j}{2^{n_j} - 1} * i_j + low_j \in [low_j, high_j].$$ In addition to the uncertainty model, we can also apply an affine map, e.g. resulting from a principal component analysis. The interest rates used are then given by: $$ \vec{r} = A * \vec{x} + b,$$ where $\vec{x} \in \otimes_{i=1}^d [low_i, high_i]$ follows the given random distribution. ``` # can be used in case a principal component analysis has been done to derive the uncertainty model, ignored in this example. A = np.eye(2) b = np.zeros(2) # specify the number of qubits that are used to represent the different dimenions of the uncertainty model num_qubits = [2, 2] # specify the lower and upper bounds for the different dimension low = [0, 0] high = [0.12, 0.24] mu = [0.12, 0.24] sigma = 0.01*np.eye(2) # construct corresponding distribution u = MultivariateNormalDistribution(num_qubits, low, high, mu, sigma) # plot contour of probability density function x = np.linspace(low[0], high[0], 2**num_qubits[0]) y = np.linspace(low[1], high[1], 2**num_qubits[1]) z = u.probabilities.reshape(2**num_qubits[0], 2**num_qubits[1]) plt.contourf(x, y, z) plt.xticks(x, size=15) plt.yticks(y, size=15) plt.grid() plt.xlabel('$r_1$ (%)', size=15) plt.ylabel('$r_2$ (%)', size=15) plt.colorbar() plt.show() ``` ### Cash flow, payoff function, and exact expected value In the following we define the cash flow per period, the resulting payoff function and evaluate the exact expected value. For the payoff function we first use a first order approximation and then apply the same approximation technique as for the linear part of the payoff function of the [European Call Option](european_call_option_pricing.ipynb). ``` # specify cash flow cf = [1.0, 2.0] periods = range(1, len(cf)+1) # plot cash flow plt.bar(periods, cf) plt.xticks(periods, size=15) plt.yticks(size=15) plt.grid() plt.xlabel('periods', size=15) plt.ylabel('cashflow ($)', size=15) plt.show() # estimate real value cnt = 0 exact_value = 0.0 for x1 in np.linspace(low[0], high[0], pow(2, num_qubits[0])): for x2 in np.linspace(low[1], high[1], pow(2, num_qubits[1])): prob = u.probabilities[cnt] for t in range(len(cf)): # evaluate linear approximation of real value w.r.t. interest rates exact_value += prob * (cf[t]/pow(1 + b[t], t+1) - (t+1)*cf[t]*np.dot(A[:, t], np.asarray([x1, x2]))/pow(1 + b[t], t+2)) cnt += 1 print('Exact value: \t%.4f' % exact_value) # specify approximation factor c_approx = 0.125 # get fixed income circuit appfactory fixed_income = FixedIncomeExpectedValue(u, A, b, cf, c_approx) # set number of evaluation qubits (samples) m = 5 # construct amplitude estimation ae = AmplitudeEstimation(m, fixed_income) # result = ae.run(quantum_instance=LegacySimulators.get_backend('qasm_simulator'), shots=100) result = ae.run(quantum_instance=backend) print('Exact value: \t%.4f' % exact_value) print('Estimated value:\t%.4f' % result['estimation']) print('Probability: \t%.4f' % result['max_probability']) # plot estimated values for "a" (direct result of amplitude estimation, not rescaled yet) plt.bar(result['values'], result['probabilities'], width=0.5/len(result['probabilities'])) plt.xticks([0, 0.25, 0.5, 0.75, 1], size=15) plt.yticks([0, 0.25, 0.5, 0.75, 1], size=15) plt.title('"a" Value', size=15) plt.ylabel('Probability', size=15) plt.xlim((0,1)) plt.ylim((0,1)) plt.grid() plt.show() # plot estimated values for fixed-income asset (after re-scaling and reversing the c_approx-transformation) plt.bar(result['mapped_values'], result['probabilities'], width=3/len(result['probabilities'])) plt.plot([exact_value, exact_value], [0,1], 'r--', linewidth=2) plt.xticks(size=15) plt.yticks([0, 0.25, 0.5, 0.75, 1], size=15) plt.title('Estimated Option Price', size=15) plt.ylabel('Probability', size=15) plt.ylim((0,1)) plt.grid() plt.show() import qiskit.tools.jupyter %qiskit_version_table %qiskit_copyright ```
github_jupyter
# Sentiment Analysis ## Using XGBoost in SageMaker _Deep Learning Nanodegree Program | Deployment_ --- As our first example of using Amazon's SageMaker service we will construct a random tree model to predict the sentiment of a movie review. You may have seen a version of this example in a pervious lesson although it would have been done using the sklearn package. Instead, we will be using the XGBoost package as it is provided to us by Amazon. ## Instructions Some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this notebook. You will not need to modify the included code beyond what is requested. Sections that begin with '**TODO**' in the header indicate that you need to complete or implement some portion within them. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a `# TODO: ...` comment. Please be sure to read the instructions carefully! In addition to implementing code, there may be questions for you to answer which relate to the task and your implementation. Each section where you will answer a question is preceded by a '**Question:**' header. Carefully read each question and provide your answer below the '**Answer:**' header by editing the Markdown cell. > **Note**: Code and Markdown cells can be executed using the **Shift+Enter** keyboard shortcut. In addition, a cell can be edited by typically clicking it (double-click for Markdown cells) or by pressing **Enter** while it is highlighted. ## Step 1: Downloading the data The dataset we are going to use is very popular among researchers in Natural Language Processing, usually referred to as the [IMDb dataset](http://ai.stanford.edu/~amaas/data/sentiment/). It consists of movie reviews from the website [imdb.com](http://www.imdb.com/), each labeled as either '**pos**itive', if the reviewer enjoyed the film, or '**neg**ative' otherwise. > Maas, Andrew L., et al. [Learning Word Vectors for Sentiment Analysis](http://ai.stanford.edu/~amaas/data/sentiment/). In _Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies_. Association for Computational Linguistics, 2011. We begin by using some Jupyter Notebook magic to download and extract the dataset. ``` %mkdir ../data !wget -O ../data/aclImdb_v1.tar.gz http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz !tar -zxf ../data/aclImdb_v1.tar.gz -C ../data ``` ## Step 2: Preparing the data The data we have downloaded is split into various files, each of which contains a single review. It will be much easier going forward if we combine these individual files into two large files, one for training and one for testing. ``` import os import glob def read_imdb_data(data_dir='../data/aclImdb'): data = {} labels = {} for data_type in ['train', 'test']: data[data_type] = {} labels[data_type] = {} for sentiment in ['pos', 'neg']: data[data_type][sentiment] = [] labels[data_type][sentiment] = [] path = os.path.join(data_dir, data_type, sentiment, '*.txt') files = glob.glob(path) for f in files: with open(f) as review: data[data_type][sentiment].append(review.read()) # Here we represent a positive review by '1' and a negative review by '0' labels[data_type][sentiment].append(1 if sentiment == 'pos' else 0) assert len(data[data_type][sentiment]) == len(labels[data_type][sentiment]), \ "{}/{} data size does not match labels size".format(data_type, sentiment) return data, labels data, labels = read_imdb_data() print("IMDB reviews: train = {} pos / {} neg, test = {} pos / {} neg".format( len(data['train']['pos']), len(data['train']['neg']), len(data['test']['pos']), len(data['test']['neg']))) from sklearn.utils import shuffle def prepare_imdb_data(data, labels): """Prepare training and test sets from IMDb movie reviews.""" #Combine positive and negative reviews and labels data_train = data['train']['pos'] + data['train']['neg'] data_test = data['test']['pos'] + data['test']['neg'] labels_train = labels['train']['pos'] + labels['train']['neg'] labels_test = labels['test']['pos'] + labels['test']['neg'] #Shuffle reviews and corresponding labels within training and test sets data_train, labels_train = shuffle(data_train, labels_train) data_test, labels_test = shuffle(data_test, labels_test) # Return a unified training data, test data, training labels, test labets return data_train, data_test, labels_train, labels_test train_X, test_X, train_y, test_y = prepare_imdb_data(data, labels) print("IMDb reviews (combined): train = {}, test = {}".format(len(train_X), len(test_X))) train_X[100] ``` ## Step 3: Processing the data Now that we have our training and testing datasets merged and ready to use, we need to start processing the raw data into something that will be useable by our machine learning algorithm. To begin with, we remove any html formatting that may appear in the reviews and perform some standard natural language processing in order to homogenize the data. ``` import nltk nltk.download("stopwords") from nltk.corpus import stopwords from nltk.stem.porter import * stemmer = PorterStemmer() import re from bs4 import BeautifulSoup def review_to_words(review): text = BeautifulSoup(review, "html.parser").get_text() # Remove HTML tags text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower()) # Convert to lower case words = text.split() # Split string into words words = [w for w in words if w not in stopwords.words("english")] # Remove stopwords words = [PorterStemmer().stem(w) for w in words] # stem return words import pickle cache_dir = os.path.join("../cache", "sentiment_analysis") # where to store cache files os.makedirs(cache_dir, exist_ok=True) # ensure cache directory exists def preprocess_data(data_train, data_test, labels_train, labels_test, cache_dir=cache_dir, cache_file="preprocessed_data.pkl"): """Convert each review to words; read from cache if available.""" # If cache_file is not None, try to read from it first cache_data = None if cache_file is not None: try: with open(os.path.join(cache_dir, cache_file), "rb") as f: cache_data = pickle.load(f) print("Read preprocessed data from cache file:", cache_file) except: pass # unable to read from cache, but that's okay # If cache is missing, then do the heavy lifting if cache_data is None: # Preprocess training and test data to obtain words for each review #words_train = list(map(review_to_words, data_train)) #words_test = list(map(review_to_words, data_test)) words_train = [review_to_words(review) for review in data_train] words_test = [review_to_words(review) for review in data_test] # Write to cache file for future runs if cache_file is not None: cache_data = dict(words_train=words_train, words_test=words_test, labels_train=labels_train, labels_test=labels_test) with open(os.path.join(cache_dir, cache_file), "wb") as f: pickle.dump(cache_data, f) print("Wrote preprocessed data to cache file:", cache_file) else: # Unpack data loaded from cache file words_train, words_test, labels_train, labels_test = (cache_data['words_train'], cache_data['words_test'], cache_data['labels_train'], cache_data['labels_test']) return words_train, words_test, labels_train, labels_test # Preprocess data train_X, test_X, train_y, test_y = preprocess_data(train_X, test_X, train_y, test_y) ``` ### Extract Bag-of-Words features For the model we will be implementing, rather than using the reviews directly, we are going to transform each review into a Bag-of-Words feature representation. Keep in mind that 'in the wild' we will only have access to the training set so our transformer can only use the training set to construct a representation. ``` import numpy as np from sklearn.feature_extraction.text import CountVectorizer from sklearn.externals import joblib # joblib is an enhanced version of pickle that is more efficient for storing NumPy arrays def extract_BoW_features(words_train, words_test, vocabulary_size=5000, cache_dir=cache_dir, cache_file="bow_features.pkl"): """Extract Bag-of-Words for a given set of documents, already preprocessed into words.""" # If cache_file is not None, try to read from it first cache_data = None if cache_file is not None: try: with open(os.path.join(cache_dir, cache_file), "rb") as f: cache_data = joblib.load(f) print("Read features from cache file:", cache_file) except: pass # unable to read from cache, but that's okay # If cache is missing, then do the heavy lifting if cache_data is None: # Fit a vectorizer to training documents and use it to transform them # NOTE: Training documents have already been preprocessed and tokenized into words; # pass in dummy functions to skip those steps, e.g. preprocessor=lambda x: x vectorizer = CountVectorizer(max_features=vocabulary_size, preprocessor=lambda x: x, tokenizer=lambda x: x) # already preprocessed features_train = vectorizer.fit_transform(words_train).toarray() # Apply the same vectorizer to transform the test documents (ignore unknown words) features_test = vectorizer.transform(words_test).toarray() # NOTE: Remember to convert the features using .toarray() for a compact representation # Write to cache file for future runs (store vocabulary as well) if cache_file is not None: vocabulary = vectorizer.vocabulary_ cache_data = dict(features_train=features_train, features_test=features_test, vocabulary=vocabulary) with open(os.path.join(cache_dir, cache_file), "wb") as f: joblib.dump(cache_data, f) print("Wrote features to cache file:", cache_file) else: # Unpack data loaded from cache file features_train, features_test, vocabulary = (cache_data['features_train'], cache_data['features_test'], cache_data['vocabulary']) # Return both the extracted features as well as the vocabulary return features_train, features_test, vocabulary # Extract Bag of Words features for both training and test datasets train_X, test_X, vocabulary = extract_BoW_features(train_X, test_X) ``` ## Step 4: Classification using XGBoost Now that we have created the feature representation of our training (and testing) data, it is time to start setting up and using the XGBoost classifier provided by SageMaker. ### (TODO) Writing the dataset The XGBoost classifier that we will be using requires the dataset to be written to a file and stored using Amazon S3. To do this, we will start by splitting the training dataset into two parts, the data we will train the model with and a validation set. Then, we will write those datasets to a file and upload the files to S3. In addition, we will write the test set input to a file and upload the file to S3. This is so that we can use SageMakers Batch Transform functionality to test our model once we've fit it. ``` import pandas as pd # TODO: Split the train_X and train_y arrays into the DataFrames val_X, train_X and val_y, train_y. Make sure that # val_X and val_y contain 10 000 entires while train_X and train_y contain the remaining 15 000 entries. val_X = pd.DataFrame(train_X[:10000]) train_X = pd.DataFrame(train_X[10000:]) val_y = pd.DataFrame(train_y[:10000]) train_y = pd.DataFrame(train_y[10000:]) ``` The documentation for the XGBoost algorithm in SageMaker requires that the saved datasets should contain no headers or index and that for the training and validation data, the label should occur first for each sample. For more information about this and other algorithms, the SageMaker developer documentation can be found on __[Amazon's website.](https://docs.aws.amazon.com/sagemaker/latest/dg/)__ ``` # First we make sure that the local directory in which we'd like to store the training and validation csv files exists. data_dir = '../data/xgboost' if not os.path.exists(data_dir): os.makedirs(data_dir) # First, save the test data to test.csv in the data_dir directory. Note that we do not save the associated ground truth # labels, instead we will use them later to compare with our model output. pd.DataFrame(test_X).to_csv(os.path.join(data_dir, 'test.csv'), header=False, index=False) # TODO: Save the training and validation data to train.csv and validation.csv in the data_dir directory. # Make sure that the files you create are in the correct format. # Solution: pd.concat([val_y, val_X], axis=1).to_csv(os.path.join(data_dir, 'validation.csv'), header=False, index=False) pd.concat([train_y, train_X], axis=1).to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False) # To save a bit of memory we can set text_X, train_X, val_X, train_y and val_y to None. test_X = train_X = val_X = train_y = val_y = None ``` ### (TODO) Uploading Training / Validation files to S3 Amazon's S3 service allows us to store files that can be access by both the built-in training models such as the XGBoost model we will be using as well as custom models such as the one we will see a little later. For this, and most other tasks we will be doing using SageMaker, there are two methods we could use. The first is to use the low level functionality of SageMaker which requires knowing each of the objects involved in the SageMaker environment. The second is to use the high level functionality in which certain choices have been made on the user's behalf. The low level approach benefits from allowing the user a great deal of flexibility while the high level approach makes development much quicker. For our purposes we will opt to use the high level approach although using the low-level approach is certainly an option. Recall the method `upload_data()` which is a member of object representing our current SageMaker session. What this method does is upload the data to the default bucket (which is created if it does not exist) into the path described by the key_prefix variable. To see this for yourself, once you have uploaded the data files, go to the S3 console and look to see where the files have been uploaded. For additional resources, see the __[SageMaker API documentation](http://sagemaker.readthedocs.io/en/latest/)__ and in addition the __[SageMaker Developer Guide.](https://docs.aws.amazon.com/sagemaker/latest/dg/)__ ``` import sagemaker session = sagemaker.Session() # Store the current SageMaker session # S3 prefix (which folder will we use) prefix = 'sentiment-xgboost' # TODO: Upload the test.csv, train.csv and validation.csv files which are contained in data_dir to S3 using sess.upload_data(). test_location = session.upload_data(os.path.join(data_dir, 'test.csv'), key_prefix=prefix) val_location = session.upload_data(os.path.join(data_dir, 'validation.csv'), key_prefix=prefix) train_location = session.upload_data(os.path.join(data_dir, 'train.csv'), key_prefix=prefix) ``` ### (TODO) Creating the XGBoost model Now that the data has been uploaded it is time to create the XGBoost model. To begin with, we need to do some setup. At this point it is worth discussing what a model is in SageMaker. It is easiest to think of a model of comprising three different objects in the SageMaker ecosystem, which interact with one another. - Model Artifacts - Training Code (Container) - Inference Code (Container) The Model Artifacts are what you might think of as the actual model itself. For example, if you were building a neural network, the model artifacts would be the weights of the various layers. In our case, for an XGBoost model, the artifacts are the actual trees that are created during training. The other two objects, the training code and the inference code are then used the manipulate the training artifacts. More precisely, the training code uses the training data that is provided and creates the model artifacts, while the inference code uses the model artifacts to make predictions on new data. The way that SageMaker runs the training and inference code is by making use of Docker containers. For now, think of a container as being a way of packaging code up so that dependencies aren't an issue. ``` from sagemaker import get_execution_role # Our current execution role is require when creating the model as the training # and inference code will need to access the model artifacts. role = get_execution_role() # We need to retrieve the location of the container which is provided by Amazon for using XGBoost. # As a matter of convenience, the training and inference code both use the same container. from sagemaker.amazon.amazon_estimator import get_image_uri container = get_image_uri(session.boto_region_name, 'xgboost') # TODO: Create a SageMaker estimator using the container location determined in the previous cell. # It is recommended that you use a single training instance of type ml.m4.xlarge. It is also # recommended that you use 's3://{}/{}/output'.format(session.default_bucket(), prefix) as the # output path. xgb = None # Solution: xgb = sagemaker.estimator.Estimator(container, # The location of the container we wish to use role, # What is our current IAM Role train_instance_count=1, # How many compute instances train_instance_type='ml.m4.xlarge', # What kind of compute instances output_path='s3://{}/{}/output'.format(session.default_bucket(), prefix), sagemaker_session=session) # TODO: Set the XGBoost hyperparameters in the xgb object. Don't forget that in this case we have a binary # label so we should be using the 'binary:logistic' objective. # Solution: xgb.set_hyperparameters(max_depth=5, eta=0.2, gamma=4, min_child_weight=6, subsample=0.8, silent=0, objective='binary:logistic', early_stopping_rounds=10, num_round=500) ``` ### Fit the XGBoost model Now that our model has been set up we simply need to attach the training and validation datasets and then ask SageMaker to set up the computation. ``` s3_input_train = sagemaker.s3_input(s3_data=train_location, content_type='csv') s3_input_validation = sagemaker.s3_input(s3_data=val_location, content_type='csv') xgb.fit({'train': s3_input_train, 'validation': s3_input_validation}) ``` ### (TODO) Testing the model Now that we've fit our XGBoost model, it's time to see how well it performs. To do this we will use SageMakers Batch Transform functionality. Batch Transform is a convenient way to perform inference on a large dataset in a way that is not realtime. That is, we don't necessarily need to use our model's results immediately and instead we can peform inference on a large number of samples. An example of this in industry might be peforming an end of month report. This method of inference can also be useful to us as it means to can perform inference on our entire test set. To perform a Batch Transformation we need to first create a transformer objects from our trained estimator object. ``` # TODO: Create a transformer object from the trained model. Using an instance count of 1 and an instance type of ml.m4.xlarge # should be more than enough. xgb_transformer = None # Solution: xgb_transformer = xgb.transformer(instance_count = 1, instance_type = 'ml.m4.xlarge') ``` Next we actually perform the transform job. When doing so we need to make sure to specify the type of data we are sending so that it is serialized correctly in the background. In our case we are providing our model with csv data so we specify `text/csv`. Also, if the test data that we have provided is too large to process all at once then we need to specify how the data file should be split up. Since each line is a single entry in our data set we tell SageMaker that it can split the input on each line. ``` # TODO: Start the transform job. Make sure to specify the content type and the split type of the test data. # Solution: xgb_transformer.transform(test_location, content_type='text/csv', split_type='Line') ``` Currently the transform job is running but it is doing so in the background. Since we wish to wait until the transform job is done and we would like a bit of feedback we can run the `wait()` method. ``` xgb_transformer.wait() ``` Now the transform job has executed and the result, the estimated sentiment of each review, has been saved on S3. Since we would rather work on this file locally we can perform a bit of notebook magic to copy the file to the `data_dir`. ``` !aws s3 cp --recursive $xgb_transformer.output_path $data_dir ``` The last step is now to read in the output from our model, convert the output to something a little more usable, in this case we want the sentiment to be either `1` (positive) or `0` (negative), and then compare to the ground truth labels. ``` predictions = pd.read_csv(os.path.join(data_dir, 'test.csv.out'), header=None) predictions = [round(num) for num in predictions.squeeze().values] from sklearn.metrics import accuracy_score accuracy_score(test_y, predictions) ``` ## Optional: Clean up The default notebook instance on SageMaker doesn't have a lot of excess disk space available. As you continue to complete and execute notebooks you will eventually fill up this disk space, leading to errors which can be difficult to diagnose. Once you are completely finished using a notebook it is a good idea to remove the files that you created along the way. Of course, you can do this from the terminal or from the notebook hub if you would like. The cell below contains some commands to clean up the created files from within the notebook. ``` # First we will remove all of the files contained in the data_dir directory !rm $data_dir/* # And then we delete the directory itself !rmdir $data_dir # Similarly we will remove the files in the cache_dir directory and the directory itself !rm $cache_dir/* !rmdir $cache_dir ```
github_jupyter
``` #hide #skip ! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab # default_exp losses # default_cls_lvl 3 #export from fastai.imports import * from fastai.torch_imports import * from fastai.torch_core import * from fastai.layers import * #hide from nbdev.showdoc import * ``` # Loss Functions > Custom fastai loss functions ``` F.binary_cross_entropy_with_logits(torch.randn(4,5), torch.randint(0, 2, (4,5)).float(), reduction='none') funcs_kwargs # export class BaseLoss(): "Same as `loss_cls`, but flattens input and target." activation=decodes=noops def __init__(self, loss_cls, *args, axis=-1, flatten=True, floatify=False, is_2d=True, **kwargs): store_attr("axis,flatten,floatify,is_2d") self.func = loss_cls(*args,**kwargs) functools.update_wrapper(self, self.func) def __repr__(self): return f"FlattenedLoss of {self.func}" @property def reduction(self): return self.func.reduction @reduction.setter def reduction(self, v): self.func.reduction = v def __call__(self, inp, targ, **kwargs): inp = inp .transpose(self.axis,-1).contiguous() targ = targ.transpose(self.axis,-1).contiguous() if self.floatify and targ.dtype!=torch.float16: targ = targ.float() if targ.dtype in [torch.int8, torch.int16, torch.int32]: targ = targ.long() if self.flatten: inp = inp.view(-1,inp.shape[-1]) if self.is_2d else inp.view(-1) return self.func.__call__(inp, targ.view(-1) if self.flatten else targ, **kwargs) ``` Wrapping a general loss function inside of `BaseLoss` provides extra functionalities to your loss functions: - flattens the tensors before trying to take the losses since it's more convenient (with a potential tranpose to put `axis` at the end) - a potential `activation` method that tells the library if there is an activation fused in the loss (useful for inference and methods such as `Learner.get_preds` or `Learner.predict`) - a potential <code>decodes</code> method that is used on predictions in inference (for instance, an argmax in classification) The `args` and `kwargs` will be passed to `loss_cls` during the initialization to instantiate a loss function. `axis` is put at the end for losses like softmax that are often performed on the last axis. If `floatify=True`, the `targs` will be converted to floats (useful for losses that only accept float targets like `BCEWithLogitsLoss`), and `is_2d` determines if we flatten while keeping the first dimension (batch size) or completely flatten the input. We want the first for losses like Cross Entropy, and the second for pretty much anything else. ``` # export @delegates() class CrossEntropyLossFlat(BaseLoss): "Same as `nn.CrossEntropyLoss`, but flattens input and target." y_int = True @use_kwargs_dict(keep=True, weight=None, ignore_index=-100, reduction='mean') def __init__(self, *args, axis=-1, **kwargs): super().__init__(nn.CrossEntropyLoss, *args, axis=axis, **kwargs) def decodes(self, x): return x.argmax(dim=self.axis) def activation(self, x): return F.softmax(x, dim=self.axis) tst = CrossEntropyLossFlat() output = torch.randn(32, 5, 10) target = torch.randint(0, 10, (32,5)) #nn.CrossEntropy would fail with those two tensors, but not our flattened version. _ = tst(output, target) test_fail(lambda x: nn.CrossEntropyLoss()(output,target)) #Associated activation is softmax test_eq(tst.activation(output), F.softmax(output, dim=-1)) #This loss function has a decodes which is argmax test_eq(tst.decodes(output), output.argmax(dim=-1)) #In a segmentation task, we want to take the softmax over the channel dimension tst = CrossEntropyLossFlat(axis=1) output = torch.randn(32, 5, 128, 128) target = torch.randint(0, 5, (32, 128, 128)) _ = tst(output, target) test_eq(tst.activation(output), F.softmax(output, dim=1)) test_eq(tst.decodes(output), output.argmax(dim=1)) # export @delegates() class BCEWithLogitsLossFlat(BaseLoss): "Same as `nn.BCEWithLogitsLoss`, but flattens input and target." @use_kwargs_dict(keep=True, weight=None, reduction='mean', pos_weight=None) def __init__(self, *args, axis=-1, floatify=True, thresh=0.5, **kwargs): if kwargs.get('pos_weight', None) is not None and kwargs.get('flatten', None) is True: raise ValueError("`flatten` must be False when using `pos_weight` to avoid a RuntimeError due to shape mismatch") if kwargs.get('pos_weight', None) is not None: kwargs['flatten'] = False super().__init__(nn.BCEWithLogitsLoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) self.thresh = thresh def decodes(self, x): return x>self.thresh def activation(self, x): return torch.sigmoid(x) tst = BCEWithLogitsLossFlat() output = torch.randn(32, 5, 10) target = torch.randn(32, 5, 10) #nn.BCEWithLogitsLoss would fail with those two tensors, but not our flattened version. _ = tst(output, target) test_fail(lambda x: nn.BCEWithLogitsLoss()(output,target)) output = torch.randn(32, 5) target = torch.randint(0,2,(32, 5)) #nn.BCEWithLogitsLoss would fail with int targets but not our flattened version. _ = tst(output, target) test_fail(lambda x: nn.BCEWithLogitsLoss()(output,target)) tst = BCEWithLogitsLossFlat(pos_weight=torch.ones(10)) output = torch.randn(32, 5, 10) target = torch.randn(32, 5, 10) _ = tst(output, target) test_fail(lambda x: nn.BCEWithLogitsLoss()(output,target)) #Associated activation is sigmoid test_eq(tst.activation(output), torch.sigmoid(output)) # export @use_kwargs_dict(weight=None, reduction='mean') def BCELossFlat(*args, axis=-1, floatify=True, **kwargs): "Same as `nn.BCELoss`, but flattens input and target." return BaseLoss(nn.BCELoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) tst = BCELossFlat() output = torch.sigmoid(torch.randn(32, 5, 10)) target = torch.randint(0,2,(32, 5, 10)) _ = tst(output, target) test_fail(lambda x: nn.BCELoss()(output,target)) # export @use_kwargs_dict(reduction='mean') def MSELossFlat(*args, axis=-1, floatify=True, **kwargs): "Same as `nn.MSELoss`, but flattens input and target." return BaseLoss(nn.MSELoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) tst = MSELossFlat() output = torch.sigmoid(torch.randn(32, 5, 10)) target = torch.randint(0,2,(32, 5, 10)) _ = tst(output, target) test_fail(lambda x: nn.MSELoss()(output,target)) #hide #cuda #Test losses work in half precision output = torch.sigmoid(torch.randn(32, 5, 10)).half().cuda() target = torch.randint(0,2,(32, 5, 10)).half().cuda() for tst in [BCELossFlat(), MSELossFlat()]: _ = tst(output, target) # export @use_kwargs_dict(reduction='mean') def L1LossFlat(*args, axis=-1, floatify=True, **kwargs): "Same as `nn.L1Loss`, but flattens input and target." return BaseLoss(nn.L1Loss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) #export class LabelSmoothingCrossEntropy(Module): y_int = True def __init__(self, eps:float=0.1, reduction='mean'): self.eps,self.reduction = eps,reduction def forward(self, output, target): c = output.size()[-1] log_preds = F.log_softmax(output, dim=-1) if self.reduction=='sum': loss = -log_preds.sum() else: loss = -log_preds.sum(dim=-1) #We divide by that size at the return line so sum and not mean if self.reduction=='mean': loss = loss.mean() return loss*self.eps/c + (1-self.eps) * F.nll_loss(log_preds, target.long(), reduction=self.reduction) def activation(self, out): return F.softmax(out, dim=-1) def decodes(self, out): return out.argmax(dim=-1) ``` On top of the formula we define: - a `reduction` attribute, that will be used when we call `Learner.get_preds` - an `activation` function that represents the activation fused in the loss (since we use cross entropy behind the scenes). It will be applied to the output of the model when calling `Learner.get_preds` or `Learner.predict` - a <code>decodes</code> function that converts the output of the model to a format similar to the target (here indices). This is used in `Learner.predict` and `Learner.show_results` to decode the predictions ``` #export @delegates() class LabelSmoothingCrossEntropyFlat(BaseLoss): "Same as `LabelSmoothingCrossEntropy`, but flattens input and target." y_int = True @use_kwargs_dict(keep=True, eps=0.1, reduction='mean') def __init__(self, *args, axis=-1, **kwargs): super().__init__(LabelSmoothingCrossEntropy, *args, axis=axis, **kwargs) def activation(self, out): return F.softmax(out, dim=-1) def decodes(self, out): return out.argmax(dim=-1) ``` ## Export - ``` #hide from nbdev.export import * notebook2script() ```
github_jupyter
# Finding Outliers with k-Means ## Setup ``` import numpy as np import pandas as pd import sqlite3 with sqlite3.connect('../../ch_11/logs/logs.db') as conn: logs_2018 = pd.read_sql( """ SELECT * FROM logs WHERE datetime BETWEEN "2018-01-01" AND "2019-01-01"; """, conn, parse_dates=['datetime'], index_col='datetime' ) logs_2018.head() def get_X(log, day): """ Get data we can use for the X Parameters: - log: The logs dataframe - day: A day or single value we can use as a datetime index slice Returns: A pandas DataFrame """ return pd.get_dummies(log[day].assign( failures=lambda x: 1 - x.success ).query('failures > 0').resample('1min').agg( {'username':'nunique', 'failures': 'sum'} ).dropna().rename( columns={'username':'usernames_with_failures'} ).assign( day_of_week=lambda x: x.index.dayofweek, hour=lambda x: x.index.hour ).drop(columns=['failures']), columns=['day_of_week', 'hour']) X = get_X(logs_2018, '2018') X.columns ``` ## k-Means Since we want a "normal" activity cluster and an "anomaly" cluster, we need to make 2 clusters. ``` from sklearn.cluster import KMeans from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler kmeans_pipeline = Pipeline([ ('scale', StandardScaler()), ('kmeans', KMeans(random_state=0, n_clusters=2)) ]).fit(X) ``` The cluster label doesn't mean anything to us, but we can examine the size of each cluster. We don't expect the clusters to be of equal size because anomalous activity doesn't happen as often as normal activity (we presume). ``` preds = kmeans_pipeline.predict(X) pd.Series(preds).value_counts() ``` ### Evaluating the clustering #### Step 1: Get the true labels ``` with sqlite3.connect('../../ch_11/logs/logs.db') as conn: hackers_2018 = pd.read_sql( 'SELECT * FROM attacks WHERE start BETWEEN "2018-01-01" AND "2019-01-01";', conn, parse_dates=['start', 'end'] ).assign( duration=lambda x: x.end - x.start, start_floor=lambda x: x.start.dt.floor('min'), end_ceil=lambda x: x.end.dt.ceil('min') ) def get_y(datetimes, hackers, resolution='1min'): """ Get data we can use for the y (whether or not a hacker attempted a log in during that time). Parameters: - datetimes: The datetimes to check for hackers - hackers: The dataframe indicating when the attacks started and stopped - resolution: The granularity of the datetime. Default is 1 minute. Returns: A pandas Series of booleans. """ date_ranges = hackers.apply( lambda x: pd.date_range(x.start_floor, x.end_ceil, freq=resolution), axis=1 ) dates = pd.Series() for date_range in date_ranges: dates = pd.concat([dates, date_range.to_series()]) return datetimes.isin(dates) is_hacker = get_y(X.reset_index().datetime, hackers_2018) ``` ### Step 2: Calculate Fowlkes Mallows Score This indicates percentage of the observations belong to the same cluster in the true labels and in the predicted labels. ``` from sklearn.metrics import fowlkes_mallows_score fowlkes_mallows_score(is_hacker, preds) ```
github_jupyter
# Detecting malaria in blood smear images ### The Problem Malaria is a mosquito-borne disease caused by the parasite _Plasmodium_. There are an estimated 219 million cases of malaria annually, with 435,000 deaths, many of whom are children. Malaria is prevalent in sub-tropical regions of Africa. Microscopy is the most common and reliable method for diagnosing malaria and computing parasitic load. With this technique, malaria parasites are identified by examining a drop of the patient’s blood, spread out as a “blood smear” on a slide. Prior to examination, the specimen is stained (most often with the Giemsa stain) to give the parasites a distinctive appearance. This technique remains the gold standard for laboratory confirmation of malaria. ![Malaria-positive blood smear](https://www.cdc.gov/malaria/images/microscopy/parasites_arrows.jpg) Blood smear from a patient with malaria; microscopic examination shows _Plasmodium falciparum_ parasites (arrows) infecting some of the patient’s red blood cells. (CDC photo) However, the diagnostic accuracy of this technique is dependent on human expertise and can be affectived by and observer's variability. ### Deep learning as a diagnostic aid Recent advances in computing and deep learning techniques have led to the applications of large-scale medical image analysis. Here, we aim to use a convolutional neural network (CNN) in order to quickly and accurately classify parasitized from healthy cells from blood smears. This notebook is based on the work presented by [Dipanjan Sarkar](https://towardsdatascience.com/detecting-malaria-with-deep-learning-9e45c1e34b60) ### About the dataset A [dataset](https://ceb.nlm.nih.gov/repositories/malaria-datasets/) of parasitized and unparasitized cells from blood smear slides was collected and annotated by [Rajaraman et al](https://doi.org/10.7717/peerj.4568). The dataset contains a total of 27,558 cell images with equal instances of parasitized and uninfected cells from Giemsa-stained thin blood smear slides from 150 P. falciparum-infected and 50 healthy patients collected and photographed at Chittagong Medical College Hospital, Bangladesh. There are also CSV files containing the Patient-ID to cell mappings for the parasitized and uninfected classes. The CSV file for the parasitized class contains 151 patient-ID entries. The slide images for the parasitized patient-ID “C47P8thinOriginal” are read from two different microscope models (Olympus and Motif). The CSV file for the uninfected class contains 201 entries since the normal cells from the infected patients’ slides also make it to the normal cell category (151+50 = 201). The data appears along with the publication: Rajaraman S, Antani SK, Poostchi M, Silamut K, Hossain MA, Maude, RJ, Jaeger S, Thoma GR. (2018) Pre-trained convolutional neural networks as feature extractors toward improved Malaria parasite detection in thin blood smear images. PeerJ6:e4568 https://doi.org/10.7717/peerj.4568 ## Malaria Dataset Medium post: https://towardsdatascience.com/detecting-malaria-using-deep-learning-fd4fdcee1f5a Data: https://ceb.nlm.nih.gov/repositories/malaria-datasets/ ## Data preprocessing The [cell images](https://ceb.nlm.nih.gov/proj/malaria/cell_images.zip) dataset can be downloaded from the [NIH repository](https://ceb.nlm.nih.gov/repositories/malaria-datasets/). Parasitized and healthy cells are sorted into their own folders. ``` # mkdir ../data/ # wget https://ceb.nlm.nih.gov/proj/malaria/cell_images.zip # unzip cell_images.zip import os os.listdir('../data/cell_images/') import random import glob # Get file paths for files base_dir = os.path.join('../data/cell_images') infected_dir = os.path.join(base_dir, 'Parasitized') healthy_dir = os.path.join(base_dir, 'Uninfected') # Glob is used to identify filepath patterns infected_files = glob.glob(infected_dir+'/*.png') healthy_files = glob.glob(healthy_dir+'/*.png') # View size of dataset len(infected_files), len(healthy_files) ``` Our data is evenly split between parasitized and healthy cells/images so we won't need to further balance our data. ## Split data into train, test, split sets We can aggregate all of our images by adding the filepaths and labels into a single dataframe. We'll then shuffle and split the data into a 60/30/10 train/test/validation set. ``` import numpy as np import pandas as pd np.random.seed(1) # Build a dataframe of filenames with labels files = pd.DataFrame(data={'filename': infected_files, 'label': ['malaria' for i in range(len(infected_files))]}) files = pd.concat([files, pd.DataFrame(data={'filename': healthy_files, 'label': ['healthy' for i in range(len(healthy_files))]})]) files = files.sample(frac=1).reset_index(drop=True) # Shuffle rows files.head() from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(files.filename.values, files.label.values, test_size=0.3, random_state=42) X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=42) X_train.shape, X_val.shape, y_test.shape ``` As the dimensions of each image will vary, we will resize the images to be 125 x 125 pixels. The cv2 module can be used to load and resize images. ``` import cv2 # Read and resize images nrows = 125 ncols = 125 channels = 3 cv2.imread(X_train[0], cv2.IMREAD_COLOR) cv2.resize(cv2.imread(X_train[0], cv2.IMREAD_COLOR), (nrows, ncols), interpolation=cv2.INTER_CUBIC).shape import threading from concurrent import futures # Resize images IMG_DIMS = (125, 125) def get_img_data_parallel(idx, img, total_imgs): if idx % 5000 == 0 or idx == (total_imgs - 1): print('{}: working on img num: {}'.format(threading.current_thread().name, idx)) img = cv2.imread(img) img = cv2.resize(img, dsize=IMG_DIMS, interpolation=cv2.INTER_CUBIC) img = np.array(img, dtype=np.float32) return img ex = futures.ThreadPoolExecutor(max_workers=None) X_train_inp = [(idx, img, len(X_train)) for idx, img in enumerate(X_train)] X_val_inp = [(idx, img, len(X_val)) for idx, img in enumerate(X_val)] X_test_inp = [(idx, img, len(X_test)) for idx, img in enumerate(X_test)] print('Loading Train Images:') X_train_map = ex.map(get_img_data_parallel, [record[0] for record in X_train_inp], [record[1] for record in X_train_inp], [record[2] for record in X_train_inp]) X_train = np.array(list(X_train_map)) print('\nLoading Validation Images:') X_val_map = ex.map(get_img_data_parallel, [record[0] for record in X_val_inp], [record[1] for record in X_val_inp], [record[2] for record in X_val_inp]) X_val = np.array(list(X_val_map)) print('\nLoading Test Images:') X_test_map = ex.map(get_img_data_parallel, [record[0] for record in X_test_inp], [record[1] for record in X_test_inp], [record[2] for record in X_test_inp]) X_test = np.array(list(X_test_map)) X_train.shape, X_val.shape, X_test.shape ``` Using the matplotlib module, we can view a sample of the resized cell images. A brief inspection shows the presence of purple-stained parasites only in malaria-labeled samples. ``` import matplotlib.pyplot as plt %matplotlib inline plt.figure(1 , figsize = (8 , 8)) n = 0 for i in range(16): n += 1 r = np.random.randint(0 , X_train.shape[0] , 1) plt.subplot(4 , 4 , n) plt.subplots_adjust(hspace = 0.5 , wspace = 0.5) plt.imshow(X_train[r[0]]/255.) plt.title('{}'.format(y_train[r[0]])) plt.xticks([]) , plt.yticks([]) ``` ## Model training We can set some initial parameters for our model, including batch size, the number of classes, number of epochs, and image dimensions. We'll encode the text category labels as 0 or 1. ``` from sklearn.preprocessing import LabelEncoder BATCH_SIZE = 64 NUM_CLASSES = 2 EPOCHS = 25 INPUT_SHAPE = (125, 125, 3) X_train_imgs_scaled = X_train / 255. X_val_imgs_scaled = X_val / 255. le = LabelEncoder() le.fit(y_train) y_train_enc = le.transform(y_train) y_val_enc = le.transform(y_val) print(y_train[:6], y_train_enc[:6]) ``` ### Simple CNN model To start with, we'll build a simple CNN model with 2 convolution and pooling layers and a dense dropout layer for regularization. ``` from keras.models import Sequential from keras.utils import to_categorical from keras.layers import Conv2D, Dense, MaxPooling2D, Flatten # Build a simple CNN model = Sequential() model.add(Conv2D(32, kernel_size=(5,5), strides=(1,1), activation='relu', input_shape=INPUT_SHAPE)) model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2))) model.add(Conv2D(64, (5, 5), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(1000, activation='relu')) model.add(Dense(1, activation='softmax')) # out = tf.keras.layers.Dense(1, activation='sigmoid')(drop2) # model = tf.keras.Model(inputs=inp, outputs=out) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) model.summary() ``` We can evaluate the accuracy of model ``` import datetime from keras import callbacks # View accuracy logdir = os.path.join('../tensorboard_logs', datetime.datetime.now().strftime("%Y%m%d-%H%M%S")) tensorboard_callback = callbacks.TensorBoard(logdir, histogram_freq=1) reduce_lr = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=2, min_lr=0.000001) callbacks = [reduce_lr, tensorboard_callback] history = model.fit(x=X_train_imgs_scaled, y=y_train_enc, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_data=(X_val_imgs_scaled, y_val_enc), callbacks=callbacks, verbose=1) ```
github_jupyter
<img align="right" src="images/tf.png" width="128"/> <img align="right" src="images/ninologo.png" width="128"/> <img align="right" src="images/dans.png" width="128"/> # Tutorial This notebook gets you started with using [Text-Fabric](https://annotation.github.io/text-fabric/) for coding in the Old-Babylonian Letter corpus (cuneiform). Familiarity with the underlying [data model](https://annotation.github.io/text-fabric/tf/about/datamodel.html) is recommended. ## Installing Text-Fabric ### Python You need to have Python on your system. Most systems have it out of the box, but alas, that is python2 and we need at least python **3.6**. Install it from [python.org](https://www.python.org) or from [Anaconda](https://www.anaconda.com/download). ### TF itself ``` pip3 install text-fabric ``` ### Jupyter notebook You need [Jupyter](http://jupyter.org). If it is not already installed: ``` pip3 install jupyter ``` ## Tip If you cloned the repository containing this tutorial, first copy its parent directory to somewhere outside your clone of the repo, before computing with this it. If you pull changes from the repository later, it will not conflict with your computations. Where you put your tutorial directory is up to you. It will work from any directory. ## Old Babylonian data Text-Fabric will fetch the data set for you from the newest github release binaries. The data will be stored in the `text-fabric-data` in your home directory. # Features The data of the corpus is organized in features. They are *columns* of data. Think of the corpus as a gigantic spreadsheet, where row 1 corresponds to the first sign, row 2 to the second sign, and so on, for all 200,000 signs. The information which reading each sign has, constitutes a column in that spreadsheet. The Old Babylonian corpus contains nearly 60 columns, not only for the signs, but also for thousands of other textual objects, such as clusters, lines, columns, faces, documents. Instead of putting that information in one big table, the data is organized in separate columns. We call those columns **features**. ``` %load_ext autoreload %autoreload 2 import os import collections ``` # Incantation The simplest way to get going is by this *incantation*: ``` from tf.app import use ``` For the very last version, use `hot`. For the latest release, use `latest`. If you have cloned the repos (TF app and data), use `clone`. If you do not want/need to upgrade, leave out the checkout specifiers. ``` A = use("oldbabylonian:clone", checkout="clone", hoist=globals()) # A = use('oldbabylonian:hot', checkout="hot", hoist=globals()) # A = use('oldbabylonian:latest', checkout="latest", hoist=globals()) # A = use('oldbabylonian', hoist=globals()) ``` You can see which features have been loaded, and if you click on a feature name, you find its documentation. If you hover over a name, you see where the feature is located on your system. ## API The result of the incantation is that we have a bunch of special variables at our disposal that give us access to the text and data of the corpus. At this point it is helpful to throw a quick glance at the text-fabric API documentation (see the links under **API Members** above). The most essential thing for now is that we can use `F` to access the data in the features we've loaded. But there is more, such as `N`, which helps us to walk over the text, as we see in a minute. The **API members** above show you exactly which new names have been inserted in your namespace. If you click on these names, you go to the API documentation for them. ## Search Text-Fabric contains a flexible search engine, that does not only work for the data, of this corpus, but also for other corpora and data that you add to corpora. **Search is the quickest way to come up-to-speed with your data, without too much programming.** Jump to the dedicated [search](search.ipynb) search tutorial first, to whet your appetite. The real power of search lies in the fact that it is integrated in a programming environment. You can use programming to: * compose dynamic queries * process query results Therefore, the rest of this tutorial is still important when you want to tap that power. If you continue here, you learn all the basics of data-navigation with Text-Fabric. # Counting In order to get acquainted with the data, we start with the simple task of counting. ## Count all nodes We use the [`N.walk()` generator](https://annotation.github.io/text-fabric/tf/core/nodes.html#tf.core.nodes.Nodes.walk) to walk through the nodes. We compared the TF data to a gigantic spreadsheet, where the rows correspond to the signs. In Text-Fabric, we call the rows `slots`, because they are the textual positions that can be filled with signs. We also mentioned that there are also other textual objects. They are the clusters, lines, faces and documents. They also correspond to rows in the big spreadsheet. In Text-Fabric we call all these rows *nodes*, and the `N()` generator carries us through those nodes in the textual order. Just one extra thing: the `info` statements generate timed messages. If you use them instead of `print` you'll get a sense of the amount of time that the various processing steps typically need. ``` A.indent(reset=True) A.info("Counting nodes ...") i = 0 for n in N.walk(): i += 1 A.info("{} nodes".format(i)) ``` Here you see it: over 300,000 nodes. ## What are those nodes? Every node has a type, like sign, or line, face. But what exactly are they? Text-Fabric has two special features, `otype` and `oslots`, that must occur in every Text-Fabric data set. `otype` tells you for each node its type, and you can ask for the number of `slot`s in the text. Here we go! ``` F.otype.slotType F.otype.maxSlot F.otype.maxNode F.otype.all C.levels.data ``` This is interesting: above you see all the textual objects, with the average size of their objects, the node where they start, and the node where they end. ## Count individual object types This is an intuitive way to count the number of nodes in each type. Note in passing, how we use the `indent` in conjunction with `info` to produce neat timed and indented progress messages. ``` A.indent(reset=True) A.info("counting objects ...") for otype in F.otype.all: i = 0 A.indent(level=1, reset=True) for n in F.otype.s(otype): i += 1 A.info("{:>7} {}s".format(i, otype)) A.indent(level=0) A.info("Done") ``` # Viewing textual objects You can use the A API (the extra power) to display cuneiform text. See the [display](display.ipynb) tutorial. # Feature statistics `F` gives access to all features. Every feature has a method `freqList()` to generate a frequency list of its values, higher frequencies first. Here are the repeats of numerals (the `-1` comes from a `n(rrr)`: ``` F.repeat.freqList() ``` Signs have types and clusters have types. We can count them separately: ``` F.type.freqList("cluster") F.type.freqList("sign") ``` Finally, the flags: ``` F.flags.freqList() ``` # Word matters ## Top 20 frequent words We represent words by their essential symbols, collected in the feature *sym* (which also exists for signs). ``` for (w, amount) in F.sym.freqList("word")[0:20]: print(f"{amount:>5} {w}") ``` ## Word distribution Let's do a bit more fancy word stuff. ### Hapaxes A hapax can be found by picking the words with frequency 1 We print 20 hapaxes. ``` for w in [w for (w, amount) in F.sym.freqList("word") if amount == 1][0:20]: print(f'"{w}"') ``` ### Small occurrence base The occurrence base of a word are the documents in which occurs. We compute the occurrence base of each word. ``` occurrenceBase = collections.defaultdict(set) for w in F.otype.s("word"): pNum = T.sectionFromNode(w)[0] occurrenceBase[F.sym.v(w)].add(pNum) ``` An overview of how many words have how big occurrence bases: ``` occurrenceSize = collections.Counter() for (w, pNums) in occurrenceBase.items(): occurrenceSize[len(pNums)] += 1 occurrenceSize = sorted( occurrenceSize.items(), key=lambda x: (-x[1], x[0]), ) for (size, amount) in occurrenceSize[0:10]: print(f"base size {size:>4} : {amount:>5} words") print("...") for (size, amount) in occurrenceSize[-10:]: print(f"base size {size:>4} : {amount:>5} words") ``` Let's give the predicate *private* to those words whose occurrence base is a single document. ``` privates = {w for (w, base) in occurrenceBase.items() if len(base) == 1} len(privates) ``` ### Peculiarity of documents As a final exercise with words, lets make a list of all documents, and show their * total number of words * number of private words * the percentage of private words: a measure of the peculiarity of the document ``` docList = [] empty = set() ordinary = set() for d in F.otype.s("document"): pNum = T.documentName(d) words = {F.sym.v(w) for w in L.d(d, otype="word")} a = len(words) if not a: empty.add(pNum) continue o = len({w for w in words if w in privates}) if not o: ordinary.add(pNum) continue p = 100 * o / a docList.append((pNum, a, o, p)) docList = sorted(docList, key=lambda e: (-e[3], -e[1], e[0])) print(f"Found {len(empty):>4} empty documents") print(f"Found {len(ordinary):>4} ordinary documents (i.e. without private words)") print( "{:<20}{:>5}{:>5}{:>5}\n{}".format( "document", "#all", "#own", "%own", "-" * 35, ) ) for x in docList[0:20]: print("{:<20} {:>4} {:>4} {:>4.1f}%".format(*x)) print("...") for x in docList[-20:]: print("{:<20} {:>4} {:>4} {:>4.1f}%".format(*x)) ``` # Locality API We travel upwards and downwards, forwards and backwards through the nodes. The Locality-API (`L`) provides functions: `u()` for going up, and `d()` for going down, `n()` for going to next nodes and `p()` for going to previous nodes. These directions are indirect notions: nodes are just numbers, but by means of the `oslots` feature they are linked to slots. One node *contains* an other node, if the one is linked to a set of slots that contains the set of slots that the other is linked to. And one if next or previous to an other, if its slots follow or precede the slots of the other one. `L.u(node)` **Up** is going to nodes that embed `node`. `L.d(node)` **Down** is the opposite direction, to those that are contained in `node`. `L.n(node)` **Next** are the next *adjacent* nodes, i.e. nodes whose first slot comes immediately after the last slot of `node`. `L.p(node)` **Previous** are the previous *adjacent* nodes, i.e. nodes whose last slot comes immediately before the first slot of `node`. All these functions yield nodes of all possible otypes. By passing an optional parameter, you can restrict the results to nodes of that type. The result are ordered according to the order of things in the text. The functions return always a tuple, even if there is just one node in the result. ## Going up We go from the first word to the document it contains. Note the `[0]` at the end. You expect one document, yet `L` returns a tuple. To get the only element of that tuple, you need to do that `[0]`. If you are like me, you keep forgetting it, and that will lead to weird error messages later on. ``` firstDoc = L.u(1, otype="document")[0] print(firstDoc) ``` And let's see all the containing objects of sign 3: ``` s = 3 for otype in F.otype.all: if otype == F.otype.slotType: continue up = L.u(s, otype=otype) upNode = "x" if len(up) == 0 else up[0] print("sign {} is contained in {} {}".format(s, otype, upNode)) ``` ## Going next Let's go to the next nodes of the first document. ``` afterFirstDoc = L.n(firstDoc) for n in afterFirstDoc: print( "{:>7}: {:<13} first slot={:<6}, last slot={:<6}".format( n, F.otype.v(n), E.oslots.s(n)[0], E.oslots.s(n)[-1], ) ) secondDoc = L.n(firstDoc, otype="document")[0] ``` ## Going previous And let's see what is right before the second document. ``` for n in L.p(secondDoc): print( "{:>7}: {:<13} first slot={:<6}, last slot={:<6}".format( n, F.otype.v(n), E.oslots.s(n)[0], E.oslots.s(n)[-1], ) ) ``` ## Going down We go to the faces of the first document, and just count them. ``` faces = L.d(firstDoc, otype="face") print(len(faces)) ``` ## The first line We pick two nodes and explore what is above and below them: the first line and the first word. ``` for n in [ F.otype.s("word")[0], F.otype.s("line")[0], ]: A.indent(level=0) A.info("Node {}".format(n), tm=False) A.indent(level=1) A.info("UP", tm=False) A.indent(level=2) A.info("\n".join(["{:<15} {}".format(u, F.otype.v(u)) for u in L.u(n)]), tm=False) A.indent(level=1) A.info("DOWN", tm=False) A.indent(level=2) A.info("\n".join(["{:<15} {}".format(u, F.otype.v(u)) for u in L.d(n)]), tm=False) A.indent(level=0) A.info("Done", tm=False) ``` # Text API So far, we have mainly seen nodes and their numbers, and the names of node types. You would almost forget that we are dealing with text. So let's try to see some text. In the same way as `F` gives access to feature data, `T` gives access to the text. That is also feature data, but you can tell Text-Fabric which features are specifically carrying the text, and in return Text-Fabric offers you a Text API: `T`. ## Formats Cuneiform text can be represented in a number of ways: * original ATF, with bracketings and flags * essential symbols: readings and graphemes, repeats and fractions (of numerals), no flags, no clusterings * unicode symbols If you wonder where the information about text formats is stored: not in the program text-fabric, but in the data set. It has a feature `otext`, which specifies the formats and which features must be used to produce them. `otext` is the third special feature in a TF data set, next to `otype` and `oslots`. It is an optional feature. If it is absent, there will be no `T` API. Here is a list of all available formats in this data set. ``` sorted(T.formats) ``` ## Using the formats The ` T.text()` function is central to get text representations of nodes. Its most basic usage is ```python T.text(nodes, fmt=fmt) ``` where `nodes` is a list or iterable of nodes, usually word nodes, and `fmt` is the name of a format. If you leave out `fmt`, the default `text-orig-full` is chosen. The result is the text in that format for all nodes specified: ``` T.text([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], fmt="text-orig-plain") ``` There is also another usage of this function: ```python T.text(node, fmt=fmt) ``` where `node` is a single node. In this case, the default format is *ntype*`-orig-full` where *ntype* is the type of `node`. If the format is defined in the corpus, it will be used. Otherwise, the word nodes contained in `node` will be looked up and represented with the default format `text-orig-full`. In this way we can sensibly represent a lot of different nodes, such as documents, faces, lines, clusters, words and signs. We compose a set of example nodes and run `T.text` on them: ``` exampleNodes = [ F.otype.s("sign")[0], F.otype.s("word")[0], F.otype.s("cluster")[0], F.otype.s("line")[0], F.otype.s("face")[0], F.otype.s("document")[0], ] exampleNodes for n in exampleNodes: print(f"This is {F.otype.v(n)} {n}:") print(T.text(n)) print("") ``` ## Using the formats Now let's use those formats to print out the first line in this corpus. Note that only the formats starting with `text-` are usable for this. For the `layout-` formats, see [display](display.ipynb). ``` for fmt in sorted(T.formats): if fmt.startswith("text-"): print("{}:\n\t{}".format(fmt, T.text(range(1, 12), fmt=fmt))) ``` If we do not specify a format, the **default** format is used (`text-orig-full`). ``` T.text(range(1, 12)) firstLine = F.otype.s("line")[0] T.text(firstLine) T.text(firstLine, fmt="text-orig-unicode") ``` The important things to remember are: * you can supply a list of slot nodes and get them represented in all formats * you can get non-slot nodes `n` in default format by `T.text(n)` * you can get non-slot nodes `n` in other formats by `T.text(n, fmt=fmt, descend=True)` ## Whole text in all formats in just 2 seconds Part of the pleasure of working with computers is that they can crunch massive amounts of data. The text of the Old Babylonian Letters is a piece of cake. It takes just ten seconds to have that cake and eat it. In nearly a dozen formats. ``` A.indent(reset=True) A.info("writing plain text of all letters in all text formats") text = collections.defaultdict(list) for ln in F.otype.s("line"): for fmt in sorted(T.formats): if fmt.startswith("text-"): text[fmt].append(T.text(ln, fmt=fmt, descend=True)) A.info("done {} formats".format(len(text))) for fmt in sorted(text): print("{}\n{}\n".format(fmt, "\n".join(text[fmt][0:5]))) ``` ### The full plain text We write all formats to file, in your `Downloads` folder. ``` for fmt in T.formats: if fmt.startswith("text-"): with open(os.path.expanduser(f"~/Downloads/{fmt}.txt"), "w") as f: f.write("\n".join(text[fmt])) ``` ## Sections A section in the letter corpus is a document, a face or a line. Knowledge of sections is not baked into Text-Fabric. The config feature `otext.tf` may specify three section levels, and tell what the corresponding node types and features are. From that knowledge it can construct mappings from nodes to sections, e.g. from line nodes to tuples of the form: (p-number, face specifier, line number) You can get the section of a node as a tuple of relevant document, face, and line nodes. Or you can get it as a passage label, a string. You can ask for the passage corresponding to the first slot of a node, or the one corresponding to the last slot. If you are dealing with document and face nodes, you can ask to fill out the line and face parts as well. Here are examples of getting the section that corresponds to a node and vice versa. **NB:** `sectionFromNode` always delivers a verse specification, either from the first slot belonging to that node, or, if `lastSlot`, from the last slot belonging to that node. ``` someNodes = ( F.otype.s("sign")[100000], F.otype.s("word")[10000], F.otype.s("cluster")[5000], F.otype.s("line")[15000], F.otype.s("face")[1000], F.otype.s("document")[500], ) for n in someNodes: nType = F.otype.v(n) d = f"{n:>7} {nType}" first = A.sectionStrFromNode(n) last = A.sectionStrFromNode(n, lastSlot=True, fillup=True) tup = ( T.sectionTuple(n), T.sectionTuple(n, lastSlot=True, fillup=True), ) print(f"{d:<16} - {first:<18} {last:<18} {tup}") ``` # Clean caches Text-Fabric pre-computes data for you, so that it can be loaded faster. If the original data is updated, Text-Fabric detects it, and will recompute that data. But there are cases, when the algorithms of Text-Fabric have changed, without any changes in the data, that you might want to clear the cache of precomputed results. There are two ways to do that: * Locate the `.tf` directory of your dataset, and remove all `.tfx` files in it. This might be a bit awkward to do, because the `.tf` directory is hidden on Unix-like systems. * Call `TF.clearCache()`, which does exactly the same. It is not handy to execute the following cell all the time, that's why I have commented it out. So if you really want to clear the cache, remove the comment sign below. ``` # TF.clearCache() ``` # Next steps By now you have an impression how to compute around in the corpus. While this is still the beginning, I hope you already sense the power of unlimited programmatic access to all the bits and bytes in the data set. Here are a few directions for unleashing that power. * **[display](display.ipynb)** become an expert in creating pretty displays of your text structures * **[search](search.ipynb)** turbo charge your hand-coding with search templates * **[exportExcel](exportExcel.ipynb)** make tailor-made spreadsheets out of your results * **[share](share.ipynb)** draw in other people's data and let them use yours * **[similarLines](similarLines.ipynb)** spot the similarities between lines --- See the [cookbook](cookbook) for recipes for small, concrete tasks. CC-BY Dirk Roorda
github_jupyter
# Distributed DeepRacer RL training with SageMaker and RoboMaker --- ## Introduction In this notebook, we will train a fully autonomous 1/18th scale race car using reinforcement learning using Amazon SageMaker RL and AWS RoboMaker's 3D driving simulator. [AWS RoboMaker](https://console.aws.amazon.com/robomaker/home#welcome) is a service that makes it easy for developers to develop, test, and deploy robotics applications. This notebook provides a jailbreak experience of [AWS DeepRacer](https://console.aws.amazon.com/deepracer/home#welcome), giving us more control over the training/simulation process and RL algorithm tuning. ![Training in Action](./deepracer-reinvent-track.jpg) --- ## How it works? ![How training works](./training.png) The reinforcement learning agent (i.e. our autonomous car) learns to drive by interacting with its environment, e.g., the track, by taking an action in a given state to maximize the expected reward. The agent learns the optimal plan of actions in training by trial-and-error through repeated episodes. The figure above shows an example of distributed RL training across SageMaker and two RoboMaker simulation envrionments that perform the **rollouts** - execute a fixed number of episodes using the current model or policy. The rollouts collect agent experiences (state-transition tuples) and share this data with SageMaker for training. SageMaker updates the model policy which is then used to execute the next sequence of rollouts. This training loop continues until the model converges, i.e. the car learns to drive and stops going off-track. More formally, we can define the problem in terms of the following: 1. **Objective**: Learn to drive autonomously by staying close to the center of the track. 2. **Environment**: A 3D driving simulator hosted on AWS RoboMaker. 3. **State**: The driving POV image captured by the car's head camera, as shown in the illustration above. 4. **Action**: Six discrete steering wheel positions at different angles (configurable) 5. **Reward**: Positive reward for staying close to the center line; High penalty for going off-track. This is configurable and can be made more complex (for e.g. steering penalty can be added). ## Prequisites ### Run these command if you wish to modify the SageMaker and Robomaker code <span style="color:red">Note: Make sure you have atleast 25 GB of space when you are planning to modify the Sagemaker and Robomaker code</span> ``` # # # # Run these commands only for the first time # # # # Clean the build directory if present # !python3 sim_app_bundler.py --clean # # Download Robomaker simApp from the deepracer public s3 bucket # simulation_application_bundle_location = "s3://deepracer-managed-resources-us-east-1/deepracer-simapp.tar.gz" # !aws s3 cp {simulation_application_bundle_location} ./ # # Untar the simapp bundle # !python3 sim_app_bundler.py --untar ./deepracer-simapp.tar.gz # # Now modify the simapp(Robomaker) from build directory and run this command. # # Most of the simapp files can be found here (Robomaker changes). You can modify them in these locations # # bundle/opt/install/sagemaker_rl_agent/lib/python3.5/site-packages/ # # bundle/opt/install/deepracer_simulation_environment/share/deepracer_simulation_environment/ # # bundle/opt/install/deepracer_simulation_environment/lib/deepracer_simulation_environment/ # # # Copying the notebook src/markov changes to the simapp (For sagemaker container) # !rsync -av ./src/markov/ ./build/simapp/bundle/opt/install/sagemaker_rl_agent/lib/python3.5/site-packages/markov # print("############################################") # print("This command execution takes around >2 min...") # !python3 sim_app_bundler.py --tar ``` ### Imports To get started, we'll import the Python libraries we need, set up the environment with a few prerequisites for permissions and configurations. You can run this notebook from your local machine or from a SageMaker notebook instance. In both of these scenarios, you can run the following to launch a training job on SageMaker and a simulation job on RoboMaker. ``` import boto3 import sagemaker import sys import os import re import numpy as np import subprocess import yaml sys.path.append("common") sys.path.append("./src") from misc import get_execution_role, wait_for_s3_object from docker_utils import build_and_push_docker_image from sagemaker.rl import RLEstimator, RLToolkit, RLFramework from time import gmtime, strftime import time from IPython.display import Markdown from markdown_helper import * ``` ### Initializing basic parameters ``` # Select the instance type instance_type = "ml.c4.2xlarge" #instance_type = "ml.p2.xlarge" #instance_type = "ml.c5.4xlarge" # Starting SageMaker session sage_session = sagemaker.session.Session() # Create unique job name. job_name_prefix = 'deepracer-notebook' # Duration of job in seconds (1 hours) job_duration_in_seconds = 3600 # AWS Region aws_region = sage_session.boto_region_name if aws_region not in ["us-west-2", "us-east-1", "eu-west-1"]: raise Exception("This notebook uses RoboMaker which is available only in US East (N. Virginia)," "US West (Oregon) and EU (Ireland). Please switch to one of these regions.") ``` ### Setup S3 bucket Set up the linkage and authentication to the S3 bucket that we want to use for checkpoint and metadata. ``` # S3 bucket s3_bucket = sage_session.default_bucket() # SDK appends the job name and output folder s3_output_path = 's3://{}/'.format(s3_bucket) #Ensure that the S3 prefix contains the keyword 'sagemaker' s3_prefix = job_name_prefix + "-sagemaker-" + strftime("%y%m%d-%H%M%S", gmtime()) # Get the AWS account id of this account sts = boto3.client("sts") account_id = sts.get_caller_identity()['Account'] print("Using s3 bucket {}".format(s3_bucket)) print("Model checkpoints and other metadata will be stored at: \ns3://{}/{}".format(s3_bucket, s3_prefix)) ``` ### Create an IAM role Either get the execution role when running from a SageMaker notebook `role = sagemaker.get_execution_role()` or, when running from local machine, use utils method `role = get_execution_role('role_name')` to create an execution role. ``` try: sagemaker_role = sagemaker.get_execution_role() except: sagemaker_role = get_execution_role('sagemaker') print("Using Sagemaker IAM role arn: \n{}".format(sagemaker_role)) ``` > Please note that this notebook cannot be run in `SageMaker local mode` as the simulator is based on AWS RoboMaker service. ### Permission setup for invoking AWS RoboMaker from this notebook In order to enable this notebook to be able to execute AWS RoboMaker jobs, we need to add one trust relationship to the default execution role of this notebook. ``` display(Markdown(generate_help_for_robomaker_trust_relationship(sagemaker_role))) ``` ### Permission setup for Sagemaker to S3 bucket The sagemaker writes the Redis IP address, models to the S3 bucket. This requires PutObject permission on the bucket. Make sure the sagemaker role you are using as this permissions. ``` display(Markdown(generate_s3_write_permission_for_sagemaker_role(sagemaker_role))) ``` ### Permission setup for Sagemaker to create KinesisVideoStreams The sagemaker notebook has to create a kinesis video streamer. You can observer the car making epsiodes in the kinesis video streamer. ``` display(Markdown(generate_kinesis_create_permission_for_sagemaker_role(sagemaker_role))) ``` ### Build and push docker image The file ./Dockerfile contains all the packages that are installed into the docker. Instead of using the default sagemaker container. We will be using this docker container. ``` %%time from copy_to_sagemaker_container import get_sagemaker_docker, copy_to_sagemaker_container, get_custom_image_name cpu_or_gpu = 'gpu' if instance_type.startswith('ml.p') else 'cpu' repository_short_name = "sagemaker-docker-%s" % cpu_or_gpu custom_image_name = get_custom_image_name(repository_short_name) try: print("Copying files from your notebook to existing sagemaker container") sagemaker_docker_id = get_sagemaker_docker(repository_short_name) copy_to_sagemaker_container(sagemaker_docker_id, repository_short_name) except Exception as e: print("Creating sagemaker container") docker_build_args = { 'CPU_OR_GPU': cpu_or_gpu, 'AWS_REGION': boto3.Session().region_name, } custom_image_name = build_and_push_docker_image(repository_short_name, build_args=docker_build_args) print("Using ECR image %s" % custom_image_name) ``` ### Clean the docker images Remove this only when you want to completely remove the docker or clean up the space of the sagemaker instance ``` # !docker rm -f $(docker ps -a -q); # !docker rmi -f $(docker images -q); ``` ### Configure VPC Since SageMaker and RoboMaker have to communicate with each other over the network, both of these services need to run in VPC mode. This can be done by supplying subnets and security groups to the job launching scripts. We will check if the deepracer-vpc stack is created and use it if present (This is present if the AWS Deepracer console is used atleast once to create a model). Else we will use the default VPC stack. ``` ec2 = boto3.client('ec2') # # Check if the user has Deepracer-VPC and use that if its present. This will have all permission. # This VPC will be created when you have used the Deepracer console and created one model atleast # If this is not present. Use the default VPC connnection # deepracer_security_groups = [group["GroupId"] for group in ec2.describe_security_groups()['SecurityGroups']\ if group['GroupName'].startswith("aws-deepracer-")] # deepracer_security_groups = False if(deepracer_security_groups): print("Using the DeepRacer VPC stacks. This will be created if you run one training job from console.") deepracer_vpc = [vpc['VpcId'] for vpc in ec2.describe_vpcs()['Vpcs'] \ if "Tags" in vpc for val in vpc['Tags'] \ if val['Value'] == 'deepracer-vpc'][0] deepracer_subnets = [subnet["SubnetId"] for subnet in ec2.describe_subnets()["Subnets"] \ if subnet["VpcId"] == deepracer_vpc] else: print("Using the default VPC stacks") deepracer_vpc = [vpc['VpcId'] for vpc in ec2.describe_vpcs()['Vpcs'] if vpc["IsDefault"] == True][0] deepracer_security_groups = [group["GroupId"] for group in ec2.describe_security_groups()['SecurityGroups'] \ if 'VpcId' in group and group["GroupName"] == "default" and group["VpcId"] == deepracer_vpc] deepracer_subnets = [subnet["SubnetId"] for subnet in ec2.describe_subnets()["Subnets"] \ if subnet["VpcId"] == deepracer_vpc and subnet['DefaultForAz']==True] print("Using VPC:", deepracer_vpc) print("Using security group:", deepracer_security_groups) print("Using subnets:", deepracer_subnets) ``` ### Create Route Table A SageMaker job running in VPC mode cannot access S3 resourcs. So, we need to create a VPC S3 endpoint to allow S3 access from SageMaker container. To learn more about the VPC mode, please visit [this link.](https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html) ``` #TODO: Explain to customer what CREATE_ROUTE_TABLE is doing CREATE_ROUTE_TABLE = True def create_vpc_endpoint_table(): print("Creating ") try: route_tables = [route_table["RouteTableId"] for route_table in ec2.describe_route_tables()['RouteTables']\ if route_table['VpcId'] == deepracer_vpc] except Exception as e: if "UnauthorizedOperation" in str(e): display(Markdown(generate_help_for_s3_endpoint_permissions(sagemaker_role))) else: display(Markdown(create_s3_endpoint_manually(aws_region, deepracer_vpc))) raise e print("Trying to attach S3 endpoints to the following route tables:", route_tables) if not route_tables: raise Exception(("No route tables were found. Please follow the VPC S3 endpoint creation " "guide by clicking the above link.")) try: ec2.create_vpc_endpoint(DryRun=False, VpcEndpointType="Gateway", VpcId=deepracer_vpc, ServiceName="com.amazonaws.{}.s3".format(aws_region), RouteTableIds=route_tables) print("S3 endpoint created successfully!") except Exception as e: if "RouteAlreadyExists" in str(e): print("S3 endpoint already exists.") elif "UnauthorizedOperation" in str(e): display(Markdown(generate_help_for_s3_endpoint_permissions(role))) raise e else: display(Markdown(create_s3_endpoint_manually(aws_region, deepracer_vpc))) raise e if CREATE_ROUTE_TABLE: create_vpc_endpoint_table() ``` ## Setup the environment The environment is defined in a Python file called “deepracer_racetrack_env.py” and the file can be found at `src/markov/environments/`. This file implements the gym interface for our Gazebo based RoboMakersimulator. This is a common environment file used by both SageMaker and RoboMaker. The environment variable - `NODE_TYPE` defines which node the code is running on. So, the expressions that have `rospy` dependencies are executed on RoboMaker only. We can experiment with different reward functions by modifying `reward_function` in `src/markov/rewards/`. Action space and steering angles can be changed by modifying `src/markov/actions/`.json file ### Configure the preset for RL algorithm The parameters that configure the RL training job are defined in `src/markov/presets/`. Using the preset file, you can define agent parameters to select the specific agent algorithm. We suggest using Clipped PPO for this example. You can edit this file to modify algorithm parameters like learning_rate, neural network structure, batch_size, discount factor etc. ``` # Uncomment the pygmentize code lines to see the code # Reward function #!pygmentize src/markov/rewards/default.py # Action space #!pygmentize src/markov/actions/single_speed_stereo_shallow.json # Preset File #!pygmentize src/markov/presets/default.py #!pygmentize src/markov/presets/preset_attention_layer.py ``` ### Copy custom files to S3 bucket so that sagemaker & robomaker can pick it up ``` s3_location = "s3://%s/%s" % (s3_bucket, s3_prefix) print(s3_location) # Clean up the previously uploaded files !aws s3 rm --recursive {s3_location} !aws s3 cp ./src/artifacts/rewards/default.py {s3_location}/customer_reward_function.py !aws s3 cp ./src/artifacts/actions/default.json {s3_location}/model/model_metadata.json #!aws s3 cp src/markov/presets/default.py {s3_location}/presets/preset.py #!aws s3 cp src/markov/presets/preset_attention_layer.py {s3_location}/presets/preset.py ``` ### Train the RL model using the Python SDK Script mode Next, we define the following algorithm metrics that we want to capture from cloudwatch logs to monitor the training progress. These are algorithm specific parameters and might change for different algorithm. We use [Clipped PPO](https://coach.nervanasys.com/algorithms/policy_optimization/cppo/index.html) for this example. ``` metric_definitions = [ # Training> Name=main_level/agent, Worker=0, Episode=19, Total reward=-102.88, Steps=19019, Training iteration=1 {'Name': 'reward-training', 'Regex': '^Training>.*Total reward=(.*?),'}, # Policy training> Surrogate loss=-0.32664725184440613, KL divergence=7.255815035023261e-06, Entropy=2.83156156539917, training epoch=0, learning_rate=0.00025 {'Name': 'ppo-surrogate-loss', 'Regex': '^Policy training>.*Surrogate loss=(.*?),'}, {'Name': 'ppo-entropy', 'Regex': '^Policy training>.*Entropy=(.*?),'}, # Testing> Name=main_level/agent, Worker=0, Episode=19, Total reward=1359.12, Steps=20015, Training iteration=2 {'Name': 'reward-testing', 'Regex': '^Testing>.*Total reward=(.*?),'}, ] ``` We use the RLEstimator for training RL jobs. 1. Specify the source directory which has the environment file, preset and training code. 2. Specify the entry point as the training code 3. Specify the choice of RL toolkit and framework. This automatically resolves to the ECR path for the RL Container. 4. Define the training parameters such as the instance count, instance type, job name, s3_bucket and s3_prefix for storing model checkpoints and metadata. **Only 1 training instance is supported for now.** 4. Set the RLCOACH_PRESET as "deepracer" for this example. 5. Define the metrics definitions that you are interested in capturing in your logs. These can also be visualized in CloudWatch and SageMaker Notebooks. ``` estimator = RLEstimator(entry_point="training_worker.py", source_dir='src', image_name=custom_image_name, dependencies=["common/"], role=sagemaker_role, train_instance_type=instance_type, train_instance_count=1, output_path=s3_output_path, base_job_name=job_name_prefix, metric_definitions=metric_definitions, train_max_run=job_duration_in_seconds, hyperparameters={ "s3_bucket": s3_bucket, "s3_prefix": s3_prefix, "aws_region": aws_region, "model_metadata_s3_key": "%s/model/model_metadata.json" % s3_prefix, "reward_function_s3_source": "%s/customer_reward_function.py" % s3_prefix, "batch_size": "64", "num_epochs": "10", "stack_size": "1", "lr": "0.0003", "exploration_type": "Categorical", "e_greedy_value": "1", "epsilon_steps": "10000", "beta_entropy": "0.01", "discount_factor": "0.999", "loss_type": "Huber", "num_episodes_between_training": "20", "max_sample_count": "0", "sampling_frequency": "1" # ,"pretrained_s3_bucket": "sagemaker-us-east-1-259455987231" # ,"pretrained_s3_prefix": "deepracer-notebook-sagemaker-200729-202318" }, subnets=deepracer_subnets, security_group_ids=deepracer_security_groups, ) estimator.fit(wait=False) job_name = estimator.latest_training_job.job_name print("Training job: %s" % job_name) training_job_arn = estimator.latest_training_job.describe()['TrainingJobArn'] ``` ### Create the Kinesis video stream ``` kvs_stream_name = "dr-kvs-{}".format(job_name) !aws --region {aws_region} kinesisvideo create-stream --stream-name {kvs_stream_name} --media-type video/h264 --data-retention-in-hours 24 print ("Created kinesis video stream {}".format(kvs_stream_name)) ``` ### Start the Robomaker job ``` robomaker = boto3.client("robomaker") ``` ### Create Simulation Application ``` robomaker_s3_key = 'robomaker/simulation_ws.tar.gz' robomaker_source = {'s3Bucket': s3_bucket, 's3Key': robomaker_s3_key, 'architecture': "X86_64"} simulation_software_suite={'name': 'Gazebo', 'version': '7'} robot_software_suite={'name': 'ROS', 'version': 'Kinetic'} rendering_engine={'name': 'OGRE', 'version': '1.x'} ``` Download the DeepRacer bundle provided by RoboMaker service and upload it in our S3 bucket to create a RoboMaker Simulation Application ``` if not os.path.exists('./build/output.tar.gz'): print("Using the latest simapp from public s3 bucket") # Download Robomaker simApp for the deepracer public s3 bucket simulation_application_bundle_location = "s3://deepracer-managed-resources-us-east-1/deepracer-simapp.tar.gz" !aws s3 cp {simulation_application_bundle_location} ./ # Remove if the Robomaker sim-app is present in s3 bucket !aws s3 rm s3://{s3_bucket}/{robomaker_s3_key} # Uploading the Robomaker SimApp to your S3 bucket !aws s3 cp ./deepracer-simapp.tar.gz s3://{s3_bucket}/{robomaker_s3_key} # Cleanup the locally downloaded version of SimApp !rm deepracer-simapp.tar.gz else: print("Using the simapp from build directory") !aws s3 cp ./build/output.tar.gz s3://{s3_bucket}/{robomaker_s3_key} app_name = "deepracer-notebook-application" + strftime("%y%m%d-%H%M%S", gmtime()) print(app_name) try: response = robomaker.create_simulation_application(name=app_name, sources=[robomaker_source], simulationSoftwareSuite=simulation_software_suite, robotSoftwareSuite=robot_software_suite, renderingEngine=rendering_engine) simulation_app_arn = response["arn"] print("Created a new simulation app with ARN:", simulation_app_arn) except Exception as e: if "AccessDeniedException" in str(e): display(Markdown(generate_help_for_robomaker_all_permissions(role))) raise e else: raise e ``` ### Launch the Simulation job on RoboMaker We create [AWS RoboMaker](https://console.aws.amazon.com/robomaker/home#welcome) Simulation Jobs that simulates the environment and shares this data with SageMaker for training. ``` s3_yaml_name="training_params.yaml" world_name = "reInvent2019_track" # Change this for multiple rollouts. This will invoke the specified number of robomaker jobs to collect experience num_simulation_workers = 1 with open("./src/artifacts/yaml/training_yaml_template.yaml", "r") as filepointer: yaml_config = yaml.load(filepointer) yaml_config['WORLD_NAME'] = world_name yaml_config['SAGEMAKER_SHARED_S3_BUCKET'] = s3_bucket yaml_config['SAGEMAKER_SHARED_S3_PREFIX'] = s3_prefix yaml_config['TRAINING_JOB_ARN'] = training_job_arn yaml_config['METRICS_S3_BUCKET'] = s3_bucket yaml_config['METRICS_S3_OBJECT_KEY'] = "{}/training_metrics.json".format(s3_prefix) yaml_config['SIMTRACE_S3_BUCKET'] = s3_bucket yaml_config['SIMTRACE_S3_PREFIX'] = "{}/iteration-data/training".format(s3_prefix) yaml_config['AWS_REGION'] = aws_region yaml_config['ROBOMAKER_SIMULATION_JOB_ACCOUNT_ID'] = account_id yaml_config['KINESIS_VIDEO_STREAM_NAME'] = kvs_stream_name yaml_config['REWARD_FILE_S3_KEY'] = "{}/customer_reward_function.py".format(s3_prefix) yaml_config['MODEL_METADATA_FILE_S3_KEY'] = "{}/model/model_metadata.json".format(s3_prefix) yaml_config['NUM_WORKERS'] = num_simulation_workers yaml_config['MP4_S3_BUCKET'] = s3_bucket yaml_config['MP4_S3_OBJECT_PREFIX'] = "{}/iteration-data/training".format(s3_prefix) # Race-type supported for training are TIME_TRIAL, OBJECT_AVOIDANCE, HEAD_TO_BOT # If you need to modify more attributes look at the template yaml file race_type = "TIME_TRIAL" if race_type == "OBJECT_AVOIDANCE": yaml_config['NUMBER_OF_OBSTACLES'] = "6" yaml_config['RACE_TYPE'] = "OBJECT_AVOIDANCE" elif race_type == "HEAD_TO_BOT": yaml_config['NUMBER_OF_BOT_CARS'] = "6" yaml_config['RACE_TYPE'] = "HEAD_TO_BOT" # Printing the modified yaml parameter for key, value in yaml_config.items(): print("{}: {}".format(key.ljust(40, ' '), value)) # Uploading the modified yaml parameter with open("./training_params.yaml", "w") as filepointer: yaml.dump(yaml_config, filepointer) !aws s3 cp ./training_params.yaml {s3_location}/training_params.yaml !rm training_params.yaml vpcConfig = {"subnets": deepracer_subnets, "securityGroups": deepracer_security_groups, "assignPublicIp": True} responses = [] for job_no in range(num_simulation_workers): client_request_token = strftime("%Y-%m-%d-%H-%M-%S", gmtime()) envriron_vars = { "S3_YAML_NAME": s3_yaml_name, "SAGEMAKER_SHARED_S3_PREFIX": s3_prefix, "SAGEMAKER_SHARED_S3_BUCKET": s3_bucket, "WORLD_NAME": world_name, "KINESIS_VIDEO_STREAM_NAME": kvs_stream_name, "APP_REGION": aws_region, "MODEL_METADATA_FILE_S3_KEY": "%s/model/model_metadata.json" % s3_prefix, "ROLLOUT_IDX": str(job_no) } simulation_application = {"application":simulation_app_arn, "launchConfig": {"packageName": "deepracer_simulation_environment", "launchFile": "distributed_training.launch", "environmentVariables": envriron_vars} } response = robomaker.create_simulation_job(iamRole=sagemaker_role, clientRequestToken=client_request_token, maxJobDurationInSeconds=job_duration_in_seconds, failureBehavior="Fail", simulationApplications=[simulation_application], vpcConfig=vpcConfig ) responses.append(response) time.sleep(5) print("Created the following jobs:") job_arns = [response["arn"] for response in responses] for job_arn in job_arns: print("Job ARN", job_arn) ``` ### Visualizing the simulations in RoboMaker You can visit the RoboMaker console to visualize the simulations or run the following cell to generate the hyperlinks. ``` display(Markdown(generate_robomaker_links(job_arns, aws_region))) ``` ### Creating temporary folder top plot metrics ``` tmp_dir = "/tmp/{}".format(job_name) os.system("mkdir {}".format(tmp_dir)) print("Create local folder {}".format(tmp_dir)) ``` ### Plot metrics for training job ``` %matplotlib inline import pandas as pd import json training_metrics_file = "training_metrics.json" training_metrics_path = "{}/{}".format(s3_prefix, training_metrics_file) wait_for_s3_object(s3_bucket, training_metrics_path, tmp_dir) json_file = "{}/{}".format(tmp_dir, training_metrics_file) with open(json_file) as fp: data = json.load(fp) df = pd.DataFrame(data['metrics']) x_axis = 'episode' y_axis = 'reward_score' plt = df.plot(x=x_axis,y=y_axis, figsize=(12,5), legend=True, style='b-') plt.set_ylabel(y_axis); plt.set_xlabel(x_axis); ``` ### Clean up RoboMaker and SageMaker training job Execute the cells below if you want to kill RoboMaker and SageMaker job. ``` # # Cancelling robomaker job # for job_arn in job_arns: # robomaker.cancel_simulation_job(job=job_arn) # # Stopping sagemaker training job # sage_session.sagemaker_client.stop_training_job(TrainingJobName=estimator._current_job_name) ``` # Evaluation (Time trail, Object avoidance, Head to bot) ``` s3_yaml_name="evaluation_params.yaml" world_name = "reInvent2019_track" with open("./src/artifacts/yaml/evaluation_yaml_template.yaml", "r") as filepointer: yaml_config = yaml.load(filepointer) yaml_config['WORLD_NAME'] = world_name yaml_config['MODEL_S3_BUCKET'] = s3_bucket yaml_config['MODEL_S3_PREFIX'] = s3_prefix yaml_config['AWS_REGION'] = aws_region yaml_config['METRICS_S3_BUCKET'] = s3_bucket yaml_config['METRICS_S3_OBJECT_KEY'] = "{}/evaluation_metrics.json".format(s3_prefix) yaml_config['SIMTRACE_S3_BUCKET'] = s3_bucket yaml_config['SIMTRACE_S3_PREFIX'] = "{}/iteration-data/evaluation".format(s3_prefix) yaml_config['ROBOMAKER_SIMULATION_JOB_ACCOUNT_ID'] = account_id yaml_config['NUMBER_OF_TRIALS'] = "5" yaml_config['MP4_S3_BUCKET'] = s3_bucket yaml_config['MP4_S3_OBJECT_PREFIX'] = "{}/iteration-data/evaluation".format(s3_prefix) # Race-type supported for training are TIME_TRIAL, OBJECT_AVOIDANCE, HEAD_TO_BOT # If you need to modify more attributes look at the template yaml file race_type = "TIME_TRIAL" if race_type == "OBJECT_AVOIDANCE": yaml_config['NUMBER_OF_OBSTACLES'] = "6" yaml_config['RACE_TYPE'] = "OBJECT_AVOIDANCE" elif race_type == "HEAD_TO_BOT": yaml_config['NUMBER_OF_BOT_CARS'] = "6" yaml_config['RACE_TYPE'] = "HEAD_TO_BOT" # Printing the modified yaml parameter for key, value in yaml_config.items(): print("{}: {}".format(key.ljust(40, ' '), value)) # Uploading the modified yaml parameter with open("./evaluation_params.yaml", "w") as filepointer: yaml.dump(yaml_config, filepointer) !aws s3 cp ./evaluation_params.yaml {s3_location}/evaluation_params.yaml !rm evaluation_params.yaml num_simulation_workers = 1 envriron_vars = { "S3_YAML_NAME": s3_yaml_name, "MODEL_S3_PREFIX": s3_prefix, "MODEL_S3_BUCKET": s3_bucket, "WORLD_NAME": world_name, "KINESIS_VIDEO_STREAM_NAME": kvs_stream_name, "APP_REGION": aws_region, "MODEL_METADATA_FILE_S3_KEY": "%s/model/model_metadata.json" % s3_prefix } simulation_application = { "application":simulation_app_arn, "launchConfig": { "packageName": "deepracer_simulation_environment", "launchFile": "evaluation.launch", "environmentVariables": envriron_vars } } vpcConfig = {"subnets": deepracer_subnets, "securityGroups": deepracer_security_groups, "assignPublicIp": True} responses = [] for job_no in range(num_simulation_workers): response = robomaker.create_simulation_job(clientRequestToken=strftime("%Y-%m-%d-%H-%M-%S", gmtime()), outputLocation={ "s3Bucket": s3_bucket, "s3Prefix": s3_prefix }, maxJobDurationInSeconds=job_duration_in_seconds, iamRole=sagemaker_role, failureBehavior="Fail", simulationApplications=[simulation_application], vpcConfig=vpcConfig) responses.append(response) print("Created the following jobs:") job_arns = [response["arn"] for response in responses] for job_arn in job_arns: print("Job ARN", job_arn) ``` ### Visualizing the simulations in RoboMaker You can visit the RoboMaker console to visualize the simulations or run the following cell to generate the hyperlinks. ``` display(Markdown(generate_robomaker_links(job_arns, aws_region))) ``` ### Creating temporary folder top plot metrics ``` evaluation_metrics_file = "evaluation_metrics.json" evaluation_metrics_path = "{}/{}".format(s3_prefix, evaluation_metrics_file) wait_for_s3_object(s3_bucket, evaluation_metrics_path, tmp_dir) json_file = "{}/{}".format(tmp_dir, evaluation_metrics_file) with open(json_file) as fp: data = json.load(fp) df = pd.DataFrame(data['metrics']) # Converting milliseconds to seconds df['elapsed_time'] = df['elapsed_time_in_milliseconds']/1000 df = df[['trial', 'completion_percentage', 'elapsed_time']] display(df) ``` ### Clean Up Simulation Application Resource ``` # robomaker.delete_simulation_application(application=simulation_app_arn) ``` ### Clean your S3 bucket (Uncomment the awscli commands if you want to do it) ``` ## Uncomment if you only want to clean the s3 bucket # sagemaker_s3_folder = "s3://{}/{}".format(s3_bucket, s3_prefix) # !aws s3 rm --recursive {sagemaker_s3_folder} # robomaker_s3_folder = "s3://{}/{}".format(s3_bucket, job_name) # !aws s3 rm --recursive {robomaker_s3_folder} # robomaker_sim_app = "s3://{}/{}".format(s3_bucket, 'robomaker') # !aws s3 rm --recursive {robomaker_sim_app} # model_output = "s3://{}/{}".format(s3_bucket, s3_bucket) # !aws s3 rm --recursive {model_output} ``` # Head-to-head Evaluation ``` # S3 bucket s3_bucket_2 = sage_session.default_bucket() # Ensure that the S3 prefix contains the keyword 'sagemaker' # s3_prefix_2 = "deepracer-notebook-sagemaker-200422-231836" s3_prefix_2 = "deepracer-notebook-sagemaker-200422-231836" if not s3_prefix_2: raise Exception("Please provide the second agents s3_prefix and s3_bucket. The prefix would have sagemaker in between") print("Using s3 bucket {}".format(s3_bucket_2)) print("Model checkpoints and other metadata will be stored at: \ns3://{}/{}".format(s3_bucket_2, s3_prefix_2)) s3_yaml_name="evaluation_params.yaml" world_name = "reInvent2019_track" with open("./src/artifacts/yaml/head2head_yaml_template.yaml", "r") as filepointer: yaml_config = yaml.load(filepointer) yaml_config['WORLD_NAME'] = world_name yaml_config['MODEL_S3_BUCKET'] = [s3_bucket, s3_bucket_2] yaml_config['MODEL_S3_PREFIX'] = [s3_prefix, s3_prefix_2] yaml_config['MODEL_METADATA_FILE_S3_KEY'] =["{}/model/model_metadata.json".format(s3_prefix), "{}/model/model_metadata.json".format(s3_prefix_2)] yaml_config['AWS_REGION'] = aws_region yaml_config['METRICS_S3_BUCKET'] = [s3_bucket, s3_bucket_2] yaml_config['METRICS_S3_OBJECT_KEY'] = ["{}/evaluation_metrics.json".format(s3_prefix), "{}/evaluation_metrics.json".format(s3_prefix_2)] yaml_config['SIMTRACE_S3_BUCKET'] = [s3_bucket, s3_bucket_2] yaml_config['SIMTRACE_S3_PREFIX'] = ["{}/iteration-data/evaluation".format(s3_prefix), "{}/iteration-data/evaluation".format(s3_prefix_2)] yaml_config['ROBOMAKER_SIMULATION_JOB_ACCOUNT_ID'] = account_id yaml_config['NUMBER_OF_TRIALS'] = "5" yaml_config['MP4_S3_BUCKET'] = [s3_bucket, s3_bucket_2] yaml_config['MP4_S3_OBJECT_PREFIX'] = ["{}/iteration-data/evaluation".format(s3_prefix), "{}/iteration-data/evaluation".format(s3_prefix_2)] # Race-type supported for training are TIME_TRIAL, OBJECT_AVOIDANCE, HEAD_TO_BOT # If you need to modify more attributes look at the template yaml file race_type = "TIME_TRIAL" if race_type == "OBJECT_AVOIDANCE": yaml_config['NUMBER_OF_OBSTACLES'] = "6" yaml_config['RACE_TYPE'] = "OBJECT_AVOIDANCE" elif race_type == "HEAD_TO_BOT": yaml_config['NUMBER_OF_BOT_CARS'] = "6" yaml_config['RACE_TYPE'] = "HEAD_TO_BOT" # Printing the modified yaml parameter for key, value in yaml_config.items(): print("{}: {}".format(key.ljust(40, ' '), value)) # Uploading the modified yaml parameter with open("./evaluation_params.yaml", "w") as filepointer: yaml.dump(yaml_config, filepointer) !aws s3 cp ./evaluation_params.yaml {s3_location}/evaluation_params.yaml !rm evaluation_params.yaml num_simulation_workers = 1 envriron_vars = { "S3_YAML_NAME": s3_yaml_name, "MODEL_S3_PREFIX": s3_prefix, "MODEL_S3_BUCKET": s3_bucket, "WORLD_NAME": world_name, "KINESIS_VIDEO_STREAM_NAME": kvs_stream_name, "APP_REGION": aws_region, "MODEL_METADATA_FILE_S3_KEY": "%s/model/model_metadata.json" % s3_prefix } simulation_application = { "application":simulation_app_arn, "launchConfig": { "packageName": "deepracer_simulation_environment", "launchFile": "evaluation.launch", "environmentVariables": envriron_vars } } vpcConfig = {"subnets": deepracer_subnets, "securityGroups": deepracer_security_groups, "assignPublicIp": True} responses = [] for job_no in range(num_simulation_workers): response = robomaker.create_simulation_job(clientRequestToken=strftime("%Y-%m-%d-%H-%M-%S", gmtime()), outputLocation={ "s3Bucket": s3_bucket, "s3Prefix": s3_prefix }, maxJobDurationInSeconds=job_duration_in_seconds, iamRole=sagemaker_role, failureBehavior="Fail", simulationApplications=[simulation_application], vpcConfig=vpcConfig) responses.append(response) print("Created the following jobs:") job_arns = [response["arn"] for response in responses] for job_arn in job_arns: print("Job ARN", job_arn) ``` ### Visualizing the simulations in RoboMaker You can visit the RoboMaker console to visualize the simulations or run the following cell to generate the hyperlinks. ``` display(Markdown(generate_robomaker_links(job_arns, aws_region))) ``` ### Creating temporary folder top plot metrics ``` evaluation_metrics_file = "evaluation_metrics.json" evaluation_metrics_path = "{}/{}".format(s3_prefix, evaluation_metrics_file) wait_for_s3_object(s3_bucket, evaluation_metrics_path, tmp_dir) json_file = "{}/{}".format(tmp_dir, evaluation_metrics_file) with open(json_file) as fp: data = json.load(fp) df_1 = pd.DataFrame(data['metrics']) # Converting milliseconds to seconds df_1['elapsed_time'] = df_1['elapsed_time_in_milliseconds']/1000 df_1 = df_1[['trial', 'completion_percentage', 'elapsed_time']] display(df_1) evaluation_metrics_file = "evaluation_metrics.json" evaluation_metrics_path = "{}/{}".format(s3_prefix_2, evaluation_metrics_file) wait_for_s3_object(s3_bucket_2, evaluation_metrics_path, tmp_dir) json_file = "{}/{}".format(tmp_dir, evaluation_metrics_file) with open(json_file) as fp: data = json.load(fp) df_2 = pd.DataFrame(data['metrics']) # Converting milliseconds to seconds df_2['elapsed_time'] = df_2['elapsed_time_in_milliseconds']/1000 df_2 = df_2[['trial', 'completion_percentage', 'elapsed_time']] display(df_2) ```
github_jupyter
``` #!pip install pytorch_lightning #!pip install torchsummaryX !pip install webdataset # !pip install datasets # !pip install wandb #!pip install -r MedicalZooPytorch/installation/requirements.txt #!pip install torch==1.7.1+cu101 torchvision==0.8.2+cu101 torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html !git clone https://github.com/McMasterAI/Radiology-and-AI.git #--branch augmentation !git clone https://github.com/jcreinhold/intensity-normalization.git ! python intensity-normalization/setup.py install !pip install scikit-fuzzy from google.colab import drive drive.mount('/content/drive', force_remount=True) cd drive/MyDrive/MacAI import sys sys.path.append('./Radiology-and-AI/Radiology_and_AI') sys.path.append('./intensity-normalization') import os import torch import numpy as np import webdataset as wds import intensity_normalization from io import BytesIO from nibabel import FileHolder, Nifti1Image import torch import numpy as np from scipy.interpolate import RegularGridInterpolator from scipy.ndimage.filters import gaussian_filter from time import time import matplotlib.pyplot as plt import seaborn as sns from scipy.interpolate import interp1d train_dataset = wds.Dataset("macai_datasets/brats/train/brats_train.tar.gz") eval_dataset = wds.Dataset("macai_datasets/brats/validation/brats_validation.tar.gz") def np_img_collator(batch): bytes_data_list = [list(batch[i].items())[1][1] for i in range(5)] bytes_data_keys = [list(batch[i].items())[0][1].split('_')[-1] for i in range(5)] bytes_data_dict = dict(zip(bytes_data_keys,bytes_data_list)) bb = BytesIO(bytes_data_dict['flair']) fh = FileHolder(fileobj=bb) f_flair = Nifti1Image.from_file_map({'header': fh, 'image':fh}).get_fdata() bb = BytesIO(bytes_data_dict['seg']) fh = FileHolder(fileobj=bb) f_seg = Nifti1Image.from_file_map({'header': fh, 'image':fh}).get_fdata() bb = BytesIO(bytes_data_dict['t1']) fh = FileHolder(fileobj=bb) f_t1 = Nifti1Image.from_file_map({'header': fh, 'image':fh}).get_fdata() bb = BytesIO(bytes_data_dict['t1ce']) fh = FileHolder(fileobj=bb) f_t1ce=Nifti1Image.from_file_map({'header':fh, 'image':fh}).get_fdata() bb = BytesIO(bytes_data_dict['t2']) fh = FileHolder(fileobj=bb) f_t2 =Nifti1Image.from_file_map({'header':fh, 'image':fh}).get_fdata() padding = [(0, 0), (0, 0), (0, 0)]# last (2,3) f_flair = np.expand_dims(np.pad(f_flair, padding), axis=0) f_t1 = np.expand_dims(np.pad(f_t1, padding), axis=0) f_t2 = np.expand_dims(np.pad(f_t2, padding), axis=0) f_t1ce = np.expand_dims(np.pad(f_t1ce, padding), axis=0) f_seg = np.pad(f_seg, padding) concat = np.concatenate([f_t1, f_t1ce, f_t2, f_flair], axis=0) f_seg = np.expand_dims(f_seg, axis=0) return ([concat, f_seg]) train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=5,collate_fn=np_img_collator) def nyul_train_dataloader(dataloader, n_imgs = 4, i_min=1, i_max=99, i_s_min=1, i_s_max=100, l_percentile=10, u_percentile=90, step=20): """ determine the standard scale for the set of images Args: img_fns (list): set of NifTI MR image paths which are to be normalized mask_fns (list): set of corresponding masks (if not provided, estimated) i_min (float): minimum percentile to consider in the images i_max (float): maximum percentile to consider in the images i_s_min (float): minimum percentile on the standard scale i_s_max (float): maximum percentile on the standard scale l_percentile (int): middle percentile lower bound (e.g., for deciles 10) u_percentile (int): middle percentile upper bound (e.g., for deciles 90) step (int): step for middle percentiles (e.g., for deciles 10) Returns: standard_scale (np.ndarray): average landmark intensity for images percs (np.ndarray): array of all percentiles used """ percss = [np.concatenate(([i_min], np.arange(l_percentile, u_percentile+1, step), [i_max])) for _ in range(n_imgs)] standard_scales = [np.zeros(len(percss[0])) for _ in range(n_imgs)] iteration = 1 for all_img, seg_data in dataloader: print(iteration) # print(seg_data.shape) mask_data = seg_data mask_data[seg_data ==0] = 1 mask_data = np.squeeze(mask_data, axis=0) #mask_data[mask_data==2] = 0 # ignore edema for i in range(n_imgs): img_data = all_img[i] masked = img_data[mask_data > 0] landmarks = intensity_normalization.normalize.nyul.get_landmarks(masked, percss[i]) min_p = np.percentile(masked, i_min) max_p = np.percentile(masked, i_max) f = interp1d([min_p, max_p], [i_s_min, i_s_max]) landmarks = np.array(f(landmarks)) standard_scales[i] += landmarks iteration += 1 standard_scales = [scale / iteration for scale in standard_scales] return standard_scales, percss standard_scales, percss = nyul_train_dataloader(train_dataloader) def dataloader_hist_norm(img_data, landmark_percs, standard_scale, seg_data): """ do the Nyul and Udupa histogram normalization routine with a given set of learned landmarks Args: img (nibabel.nifti1.Nifti1Image): image on which to find landmarks landmark_percs (np.ndarray): corresponding landmark points of standard scale standard_scale (np.ndarray): landmarks on the standard scale mask (nibabel.nifti1.Nifti1Image): foreground mask for img Returns: normalized (nibabel.nifti1.Nifti1Image): normalized image """ mask_data = seg_data mask_data[seg_data ==0] = 1 mask_data = np.squeeze(mask_data, axis=0) masked = img_data[mask_data > 0] landmarks = intensity_normalization.normalize.nyul.get_landmarks(masked, landmark_percs) f = interp1d(landmarks, standard_scale, fill_value='extrapolate') normed = f(img_data) z = img_data z[img_data > 0] = normed[img_data > 0] return z #normed for all_img, seg_data in train_dataloader: for i, this_img in enumerate(all_img): if i == 0: transformed_img = dataloader_hist_norm(this_img, percss[i], standard_scales[i], seg_data) transformed_img = transformed_img[transformed_img>0] plt.hist(np.ravel(transformed_img), bins=30) plt.xlim(0, 150) plt.show() # plt.hist(np.ravel(this_img)) # plt.show() ```
github_jupyter
``` import os import argparse import xml.etree.ElementTree as ET import pandas as pd import numpy as np import csv # Useful if you want to perform stemming. import nltk stemmer = nltk.stem.PorterStemmer() categories_file_name = r'/workspace/datasets/product_data/categories/categories_0001_abcat0010000_to_pcmcat99300050000.xml' queries_file_name = r'/workspace/datasets/train.csv' output_file_name = r'/workspace/datasets/labeled_query_data.txt' # parser = argparse.ArgumentParser(description='Process arguments.') # general = parser.add_argument_group("general") # general.add_argument("--min_queries", default=1, help="The minimum number of queries per category label (default is 1)") # general.add_argument("--output", default=output_file_name, help="the file to output to") # args = parser.parse_args() # output_file_name = args.output # if args.min_queries: # min_queries = int(args.min_queries) # The root category, named Best Buy with id cat00000, doesn't have a parent. min_queries = 10 root_category_id = 'cat00000' tree = ET.parse(categories_file_name) root = tree.getroot() # Parse the category XML file to map each category id to its parent category id in a dataframe. categories = [] parents = [] for child in root: id = child.find('id').text cat_path = child.find('path') cat_path_ids = [cat.find('id').text for cat in cat_path] leaf_id = cat_path_ids[-1] if leaf_id != root_category_id: categories.append(leaf_id) parents.append(cat_path_ids[-2]) parents_df = pd.DataFrame(list(zip(categories, parents)), columns =['category', 'parent']) # Read the training data into pandas, only keeping queries with non-root categories in our category tree. df = pd.read_csv(queries_file_name)[['category', 'query']] df = df[df['category'].isin(categories)] category_value_counts= pd.DataFrame(df['category'].value_counts().reset_index().\ rename(columns = {"index": "category", "category": "category_count"})) faulty_categories = list(category_value_counts[category_value_counts['category_count'] < min_queries]['category']) while len(faulty_categories) > 0: df.loc[df['category'].isin(faulty_categories), 'category'] = df['category'].\ map(parents_df.set_index('category')['parent']) category_value_counts= pd.DataFrame(df['category'].value_counts().reset_index().\ rename(columns = {"index": "category", "category": "category_count"})) faulty_categories = list(category_value_counts[category_value_counts['category_count'] < min_queries]['category']) # find faulty categories category_value_counts= pd.DataFrame(df['category'].value_counts().reset_index().\ rename(columns = {"index": "category", "category": "category_count"})) faulty_categories = list(category_value_counts[category_value_counts['category_count'] < min_queries]['category']) df.loc[df['category'].isin(faulty_categories), 'category'] = df['category'].map(parents_df.set_index('category')['parent']) faulty_categories df.isnull().sum() ```
github_jupyter
``` import re import pandas as pd import spacy from typing import List from math import sqrt, ceil # gensim from gensim import corpora from gensim.models.ldamulticore import LdaMulticore # plotting from matplotlib import pyplot as plt from wordcloud import WordCloud import matplotlib.colors as mcolors # progress bars from tqdm.notebook import tqdm tqdm.pandas() ``` ### Params ``` params = dict( num_topics = 15, iterations = 200, epochs = 20, minDF = 0.02, maxDF = 0.8, ) ``` #### Files Input CSV file and stopword files. ``` inputfile = "../../data/nytimes.tsv" stopwordfile = "../stopwords/custom_stopwords.txt" def get_stopwords(): # Read in stopwords with open(stopwordfile) as f: stopwords = [] for line in f: stopwords.append(line.strip("\n")) return stopwords stopwords = get_stopwords() ``` ### Read in New York Times Dataset A pre-processed version of the NYT news dataset is read in as a DataFrame. ``` def read_data(inputfile): "Read in a tab-separated file with date, headline and news content" df = pd.read_csv(inputfile, sep='\t', header=None, names=['date', 'headline', 'content']) df['date'] = pd.to_datetime(df['date'], format="%Y-%m-%d") return df df = read_data(inputfile) df.head() ``` ### Clean the input text We clean the text from each article's content to only contain relevant alphanumeric strings (symbols do not add any value to topic modelling). ``` def clean_data(df): "Extract relevant text from DataFrame using a regex" # Regex pattern for only alphanumeric, hyphenated text with 3 or more chars pattern = re.compile(r"[A-Za-z0-9\-]{3,50}") df['clean'] = df['content'].str.findall(pattern).str.join(' ') return df df_clean = clean_data(df) ``` #### (Optional) Subset the dataframe for testing Test on a subset of the full data for quicker results. ``` df1 = df_clean.iloc[:2000, :].copy() # df1 = df_clean.copy() ``` ### Preprocess text for topic modelling ``` def lemmatize(text, nlp): "Perform lemmatization and stopword removal in the clean text" doc = nlp(text) lemma_list = [str(tok.lemma_).lower() for tok in doc if tok.is_alpha and tok.text.lower() not in stopwords] return lemma_list def preprocess(df): "Preprocess text in each row of the DataFrame" nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner']) nlp.add_pipe(nlp.create_pipe('sentencizer')) df['lemmas'] = df['clean'].progress_apply(lambda row: lemmatize(row, nlp)) return df.drop('clean', axis=1) df_preproc = preprocess(df1) df_preproc.head(3) ``` ### Build LDA Topic Model #### Multicore LDA algorithm ``` # Choose number of workers for multicore LDA as (num_physical_cores - 1) def run_lda_multicore(text_df, params, workers=7): id2word = corpora.Dictionary(text_df['lemmas']) # Filter out words that occur in less than 2% documents or more than 50% of the documents. id2word.filter_extremes(no_below=params['minDF'], no_above=params['maxDF']) corpus = [id2word.doc2bow(text) for text in text_df['lemmas']] # LDA Model lda_model = LdaMulticore( corpus=corpus, id2word=id2word, workers=workers, num_topics=params['num_topics'], random_state=1, chunksize=2048, passes=params['epochs'], iterations=params['iterations'], ) return lda_model, corpus ``` ### Wordclouds of most likely words in each topic ``` def plot_wordclouds(topics, colormap="cividis"): cloud = WordCloud( background_color='white', width=600, height=400, colormap=colormap, prefer_horizontal=1.0, ) num_topics = len(topics) fig_width = min(ceil(0.6 * num_topics + 6), 20) fig_height = min(ceil(0.65 * num_topics), 20) fig = plt.figure(figsize=(fig_width, fig_height)) for idx, word_weights in tqdm(enumerate(topics), total=num_topics): ax = fig.add_subplot((num_topics / 5) + 1, 5, idx + 1) wordcloud = cloud.generate_from_frequencies(word_weights) ax.imshow(wordcloud, interpolation="bilinear") ax.set_title('Topic {}'.format(idx + 1)) ax.set_xticklabels([]) ax.set_yticklabels([]) ax.tick_params(length=0) plt.tick_params(labelsize=14) plt.subplots_adjust(wspace=0.1, hspace=0.1) plt.margins(x=0.1, y=0.1) st = fig.suptitle("LDA Topics", y=0.92) fig.savefig("pyspark-topics.png", bbox_extra_artists=[st], bbox_inches='tight') ``` ### Run topic model and plot wordclouds ``` model, corpus = run_lda_multicore(df_preproc, params) ``` #### Convert topic words to a list of dicts ``` topic_list = model.show_topics(formatted=False, num_topics=params['num_topics'], num_words=15) topics = [dict(item[1]) for item in topic_list] plot_wordclouds(topics) ```
github_jupyter
Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. # Distributed CNTK using custom docker images In this tutorial, you will train a CNTK model on the [MNIST](http://yann.lecun.com/exdb/mnist/) dataset using a custom docker image and distributed training. ## Prerequisites * Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning * Go through the [configuration notebook](../../../configuration.ipynb) to: * install the AML SDK * create a workspace and its configuration file (`config.json`) ``` # Check core SDK version number import azureml.core print("SDK version:", azureml.core.VERSION) ``` ## Diagnostics Opt-in diagnostics for better experience, quality, and security of future releases. ``` from azureml.telemetry import set_diagnostics_collection set_diagnostics_collection(send_diagnostics=True) ``` ## Initialize workspace Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#workspace) object from the existing workspace you created in the Prerequisites step. `Workspace.from_config()` creates a workspace object from the details stored in `config.json`. ``` from azureml.core.workspace import Workspace ws = Workspace.from_config() print('Workspace name: ' + ws.name, 'Azure region: ' + ws.location, 'Subscription id: ' + ws.subscription_id, 'Resource group: ' + ws.resource_group, sep='\n') ``` ## Create or Attach existing AmlCompute You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource. **Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota. ``` from azureml.core.compute import ComputeTarget, AmlCompute from azureml.core.compute_target import ComputeTargetException # choose a name for your cluster cluster_name = "gpucluster" try: compute_target = ComputeTarget(workspace=ws, name=cluster_name) print('Found existing compute target.') except ComputeTargetException: print('Creating a new compute target...') compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6', max_nodes=4) # create the cluster compute_target = ComputeTarget.create(ws, cluster_name, compute_config) compute_target.wait_for_completion(show_output=True) # use get_status() to get a detailed status for the current AmlCompute print(compute_target.get_status().serialize()) ``` ## Upload training data For this tutorial, we will be using the MNIST dataset. First, let's download the dataset. We've included the `install_mnist.py` script to download the data and convert it to a CNTK-supported format. Our data files will get written to a directory named `'mnist'`. ``` import install_mnist install_mnist.main('mnist') ``` To make the data accessible for remote training, you will need to upload the data from your local machine to the cloud. AML provides a convenient way to do so via a [Datastore](https://docs.microsoft.com/azure/machine-learning/service/how-to-access-data). The datastore provides a mechanism for you to upload/download data, and interact with it from your remote compute targets. Each workspace is associated with a default datastore. In this tutorial, we will upload the training data to this default datastore, which we will then mount on the remote compute for training in the next section. ``` ds = ws.get_default_datastore() print(ds.datastore_type, ds.account_name, ds.container_name) ``` The following code will upload the training data to the path `./mnist` on the default datastore. ``` ds.upload(src_dir='./mnist', target_path='./mnist') ``` Now let's get a reference to the path on the datastore with the training data. We can do so using the `path` method. In the next section, we can then pass this reference to our training script's `--data_dir` argument. ``` path_on_datastore = 'mnist' ds_data = ds.path(path_on_datastore) print(ds_data) ``` ## Train model on the remote compute Now that we have the cluster ready to go, let's run our distributed training job. ### Create a project directory Create a directory that will contain all the necessary code from your local machine that you will need access to on the remote resource. This includes the training script, and any additional files your training script depends on. ``` import os project_folder = './cntk-distr' os.makedirs(project_folder, exist_ok=True) ``` Copy the training script `cntk_distr_mnist.py` into this project directory. ``` import shutil shutil.copy('cntk_distr_mnist.py', project_folder) ``` ### Create an experiment Create an [experiment](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#experiment) to track all the runs in your workspace for this distributed CNTK tutorial. ``` from azureml.core import Experiment experiment_name = 'cntk-distr' experiment = Experiment(ws, name=experiment_name) ``` ### Create an Estimator The AML SDK's base Estimator enables you to easily submit custom scripts for both single-node and distributed runs. You should this generic estimator for training code using frameworks such as sklearn or CNTK that don't have corresponding custom estimators. For more information on using the generic estimator, refer [here](https://docs.microsoft.com/azure/machine-learning/service/how-to-train-ml-models). ``` from azureml.train.estimator import Estimator script_params = { '--num_epochs': 20, '--data_dir': ds_data.as_mount(), '--output_dir': './outputs' } estimator = Estimator(source_directory=project_folder, compute_target=compute_target, entry_script='cntk_distr_mnist.py', script_params=script_params, node_count=2, process_count_per_node=1, distributed_backend='mpi', pip_packages=['cntk-gpu==2.6'], custom_docker_base_image='microsoft/mmlspark:gpu-0.12', use_gpu=True) ``` We would like to train our model using a [pre-built Docker container](https://hub.docker.com/r/microsoft/mmlspark/). To do so, specify the name of the docker image to the argument `custom_docker_base_image`. You can only provide images available in public docker repositories such as Docker Hub using this argument. To use an image from a private docker repository, use the constructor's `environment_definition` parameter instead. Finally, we provide the `cntk` package to `pip_packages` to install CNTK 2.6 on our custom image. The above code specifies that we will run our training script on `2` nodes, with one worker per node. In order to run distributed CNTK, which uses MPI, you must provide the argument `distributed_backend='mpi'`. ### Submit job Run your experiment by submitting your estimator object. Note that this call is asynchronous. ``` run = experiment.submit(estimator) print(run) ``` ### Monitor your run You can monitor the progress of the run with a Jupyter widget. Like the run submission, the widget is asynchronous and provides live updates every 10-15 seconds until the job completes. ``` from azureml.widgets import RunDetails RunDetails(run).show() ``` Alternatively, you can block until the script has completed training before running more code. ``` run.wait_for_completion(show_output=True) ```
github_jupyter
# Initial_t_rad Bug The purpose of this notebook is to demonstrate the bug associated with setting the initial_t_rad tardis.plasma property. ``` pwd import tardis import numpy as np ``` ## Density and Abundance test files Below are the density and abundance data from the test files used for demonstrating this bug. ``` density_dat = np.loadtxt('data/density.txt',skiprows=1) abund_dat = np.loadtxt('data/abund.dat', skiprows=1) print(density_dat) print(abund_dat) ``` ## No initial_t_rad Below we run a simple tardis simulation where `initial_t_rad` is not set. The simulation has v_inner_boundary = 3350 km/s and v_outer_boundary = 3750 km/s, both within the velocity range in the density file. The simulation runs fine. ``` no_init_trad = tardis.run_tardis('data/config_no_init_trad.yml') no_init_trad.model.velocity no_init_trad.model.no_of_shells, no_init_trad.model.no_of_raw_shells print('raw velocity: \n',no_init_trad.model.raw_velocity) print('raw velocity shape: ',no_init_trad.model.raw_velocity.shape) print('(v_boundary_inner, v_boundary_outer) = (%i, %i)'% (no_init_trad.model.v_boundary_inner.to('km/s').value, no_init_trad.model.v_boundary_outer.to('km/s').value)) print('v_boundary_inner_index: ', no_init_trad.model.v_boundary_inner_index) print('v_boundary_outer_index: ', no_init_trad.model.v_boundary_outer_index) print('t_rad', no_init_trad.model.t_rad) ``` ## Debugging ``` %%debug init_trad = tardis.run_tardis('data/config_init_trad.yml') init_trad = tardis.run_tardis('data/config_init_trad.yml') ``` ## Debugging ## Debugging No initial_t_radiative run to compare with Yes initial_t_radiative run We place two breakpoints: break 1. tardis/base:37 --> Stops in the run_tardis() function when the simulation is initialized. break 2. tardis/simulation/base:436 --> Stops after the Radial1DModel has been built from the config file, but before the plasma has been initialized. ## IMPORTANT: We check the model.t_radiative property INSIDE the assemble_plasma function. Notice that it has len(model.t_radiative) = model.no_of_shells = 5 ``` %%debug no_init_trad = tardis.run_tardis('config_no_init_trad.yml') ``` ## Debugging Yes initial_t_radiative run We place the same two breakpoints as above: break 1. tardis/base:37 --> Stops in the run_tardis() function when the simulation is initialized. break 2. tardis/simulation/base:436 --> Stops after the Radial1DModel has been built from the config file, but before the plasma has been initialized. ## IMPORTANT: We check the model.t_radiative property INSIDE the assemble_plasma function. Notice that it has len(model.t_radiative) = 6 which is NOT EQUAL to model.no_of_shells = 5 ``` %%debug init_trad = tardis.run_tardis('config_init_trad.yml') ``` ## Checking model.t_radiative initialization when YES initial_t_rad In the above debugging blocks, we have identified the following discrepancy INSIDE assemble_plasma(): ### len(model.t_radiative) = 6 when YES initial_t_rad ### len(model.t_radiative) = 5 when NO initial_t_rad Therefore, we investigate in the following debugging block how model.t_radiative is initialized. We place a breakpoint at tardis/simulation/base:432 and step INSIDE the Radial1DModel initialization. Breakpoints: break 1. tardis/simulation/base:432 --> Stops so that we can step INSIDE Radial1DModel initialization from_config(). break 2. tardis/model/base:330 --> Where temperature is handled INSIDE Radial1DModel initialization from_config(). break 3. tardis/model/base:337 --> `t_radiative` is initialized. It has the same length as `velocity` which is the raw velocities from the density file. break 4. tardis/model/base:374 --> init() for Radial1DModel is called. We check values of relevant variables. break 5. tardis/model/base:76 --> Stops at first line of Radial1DModel init() function. break 6. tardis/model/base:101 --> self.\_t\_radiative is set. break 7. tardis/model/base:140 --> Stops at first line of self.t_radiative setter. break 8. tardis/model/base:132 --> Stops at first line of self.t_radiative getter. break 9. tardis/model/base:108 --> Stop right after self.\_t\_radiative is set. NOTICE that neither the setter nor the getter was called. __IMPORTANT:__ at line 108, we have len(self.\_t\_radiative) = 10. __TO DO:__ Check len(self.\_t\_radiative) at line 108 in the NO initial\_t\_rad case. ``` %%debug init_trad = tardis.run_tardis('config_init_trad.yml') ``` ## Checking self.\_t\_radiative initialization when NO initial_t_rad at line 108 __IMPORTANT:__ We find that len(self.\_t\_radiative) = 5. This is a DISCREPANCY with the YES initial_t_rad case. ``` %%debug no_init_trad = tardis.run_tardis('config_no_init_trad.yml') ``` ## CODE CHANGE: We propose the following change to tardis/model/base:106 __Line 106 Before Change:__ `self._t_radiative = t_radiative` __Line 106 After Change:__ `self._t_radiative = t_radiative[1:1 + self.no_of_shells]` t_radiative\[0\] corresponds to the temperature within the inner boundary, and so should be ignored. ``` init_trad = tardis.run_tardis('config_init_trad.yml') import numpy as np a = np.array([1,2,3,4,5,6,7,8]) a[3:8] a 2 in a np.argwhere(a==6)[0][0] np.searchsorted(a, 6.5) if (2 in a) and (3.5 in a): print('hi') assert 1==1.2, "test" a[3:6] ```
github_jupyter
<a href="https://colab.research.google.com/github/MIT-LCP/sccm-datathon/blob/master/04_timeseries.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # eICU Collaborative Research Database # Notebook 4: Timeseries for a single patient This notebook explores timeseries data for a single patient. ## Load libraries and connect to the database ``` # Import libraries import numpy as np import os import pandas as pd import matplotlib.pyplot as plt # Make pandas dataframes prettier from IPython.display import display, HTML # Access data using Google BigQuery. from google.colab import auth from google.cloud import bigquery # authenticate auth.authenticate_user() # Set up environment variables project_id='sccm-datathon' os.environ["GOOGLE_CLOUD_PROJECT"]=project_id ``` ## Selecting a single patient stay ### The patient table The patient table includes general information about the patient admissions (for example, demographics, admission and discharge details). See: http://eicu-crd.mit.edu/eicutables/patient/ ``` # Display the patient table %%bigquery SELECT * FROM `physionet-data.eicu_crd_demo.patient` patient.head() ``` ### The `vitalperiodic` table The `vitalperiodic` table comprises data that is consistently interfaced from bedside vital signs monitors into eCareManager. Data are generally interfaced as 1 minute averages, and archived into the `vitalperiodic` table as 5 minute median values. For more detail, see: http://eicu-crd.mit.edu/eicutables/vitalPeriodic/ ``` # Get periodic vital signs for a single patient stay %%bigquery vitalperiodic SELECT * FROM `physionet-data.eicu_crd_demo.vitalperiodic` WHERE patientunitstayid = 210014 vitalperiodic.head() # sort the values by the observationoffset (time in minutes from ICU admission) vitalperiodic = vitalperiodic.sort_values(by='observationoffset') vitalperiodic.head() # subselect the variable columns columns = ['observationoffset','temperature','sao2','heartrate','respiration', 'cvp','etco2','systemicsystolic','systemicdiastolic','systemicmean', 'pasystolic','padiastolic','pamean','icp'] vitalperiodic = vitalperiodic[columns].set_index('observationoffset') vitalperiodic.head() # plot the data plt.rcParams['figure.figsize'] = [12,8] title = 'Vital signs (periodic) for patientunitstayid = {} \n'.format(patientunitstayid) ax = vitalperiodic.plot(title=title, marker='o') ax.legend(loc='center left', bbox_to_anchor=(1.0, 0.5)) ax.set_xlabel("Minutes after admission to the ICU") ax.set_ylabel("Absolute value") ``` ## Questions - Which variables are available for this patient? - What is the peak heart rate during the period? ### The vitalaperiodic table The vitalAperiodic table provides invasive vital sign data that is recorded at irregular intervals. See: http://eicu-crd.mit.edu/eicutables/vitalAperiodic/ ``` # Get aperiodic vital signs %%bigquery vitalaperiodic SELECT * FROM `physionet-data.eicu_crd_demo.vitalaperiodic` WHERE patientunitstayid = 210014 # display the first few rows of the dataframe vitalaperiodic.head() # sort the values by the observationoffset (time in minutes from ICU admission) vitalaperiodic = vitalaperiodic.sort_values(by='observationoffset') vitalaperiodic.head() # subselect the variable columns columns = ['observationoffset','noninvasivesystolic','noninvasivediastolic', 'noninvasivemean','paop','cardiacoutput','cardiacinput','svr', 'svri','pvr','pvri'] vitalaperiodic = vitalaperiodic[columns].set_index('observationoffset') vitalaperiodic.head() # plot the data plt.rcParams['figure.figsize'] = [12,8] title = 'Vital signs (aperiodic) for patientunitstayid = {} \n'.format(patientunitstayid) ax = vitalaperiodic.plot(title=title, marker='o') ax.legend(loc='center left', bbox_to_anchor=(1.0, 0.5)) ax.set_xlabel("Minutes after admission to the ICU") ax.set_ylabel("Absolute value") ``` ## Questions - What do the non-invasive variables measure? - How do you think the mean is calculated? ## 3.4. The lab table ``` # Get labs %%bigquery lab SELECT * FROM `physionet-data.eicu_crd_demo.lab` WHERE patientunitstayid = 210014 lab.head() # sort the values by the offset time (time in minutes from ICU admission) lab = lab.sort_values(by='labresultoffset') lab.head() lab = lab.set_index('labresultoffset') columns = ['labname','labresult','labmeasurenamesystem'] lab = lab[columns] lab.head() # list the distinct labnames lab['labname'].unique() # pivot the lab table to put variables into columns lab = lab.pivot(columns='labname', values='labresult') lab.head() # plot laboratory tests of interest labs_to_plot = ['creatinine','pH','BUN', 'glucose', 'potassium'] lab[labs_to_plot].head() # plot the data plt.rcParams['figure.figsize'] = [12,8] title = 'Laboratory test results for patientunitstayid = {} \n'.format(patientunitstayid) ax = lab[labs_to_plot].plot(title=title, marker='o',ms=10, lw=0) ax.legend(loc='center left', bbox_to_anchor=(1.0, 0.5)) ax.set_xlabel("Minutes after admission to the ICU") ax.set_ylabel("Absolute value") ```
github_jupyter
# Random Signals *This jupyter notebook is part of a [collection of notebooks](../index.ipynb) on various topics of Digital Signal Processing. Please direct questions and suggestions to [[email protected]](mailto:[email protected]).* ## Auto Power Spectral Density The (auto-) [power spectral density](https://en.wikipedia.org/wiki/Spectral_density#Power_spectral_density) (PSD) is defined as the Fourier transformation of the [auto-correlation function](correlation_functions.ipynb) (ACF). ### Definition For a continuous-amplitude, real-valued, wide-sense stationary (WSS) random signal $x[k]$ the PSD is given as \begin{equation} \Phi_{xx}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) = \mathcal{F}_* \{ \varphi_{xx}[\kappa] \}, \end{equation} where $\mathcal{F}_* \{ \cdot \}$ denotes the [discrete-time Fourier transformation](https://en.wikipedia.org/wiki/Discrete-time_Fourier_transform) (DTFT) and $\varphi_{xx}[\kappa]$ the ACF of $x[k]$. Note that the DTFT is performed with respect to $\kappa$. The ACF of a random signal of finite length $N$ can be expressed by way of a linear convolution \begin{equation} \varphi_{xx}[\kappa] = \frac{1}{N} \cdot x_N[k] * x_N[-k]. \end{equation} Taking the DTFT of the left- and right-hand side results in \begin{equation} \Phi_{xx}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) = \frac{1}{N} \, X_N(\mathrm{e}^{\,\mathrm{j}\,\Omega})\, X_N(\mathrm{e}^{-\,\mathrm{j}\,\Omega}) = \frac{1}{N} \, | X_N(\mathrm{e}^{\,\mathrm{j}\,\Omega}) |^2. \end{equation} The last equality results from the definition of the magnitude and the symmetry of the DTFT for real-valued signals. The spectrum $X_N(\mathrm{e}^{\,\mathrm{j}\,\Omega})$ quantifies the amplitude density of the signal $x_N[k]$. It can be concluded from above result that the PSD quantifies the squared amplitude or power density of a random signal. This explains the term power spectral density. ### Properties The properties of the PSD can be deduced from the properties of the ACF and the DTFT as: 1. From the link between the PSD $\Phi_{xx}(\mathrm{e}^{\,\mathrm{j}\,\Omega})$ and the spectrum $X_N(\mathrm{e}^{\,\mathrm{j}\,\Omega})$ derived above it can be concluded that the PSD is real valued $$\Phi_{xx}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) \in \mathbb{R}$$ 2. From the even symmetry $\varphi_{xx}[\kappa] = \varphi_{xx}[-\kappa]$ of the ACF it follows that $$ \Phi_{xx}(\mathrm{e}^{\,\mathrm{j} \, \Omega}) = \Phi_{xx}(\mathrm{e}^{\,-\mathrm{j}\, \Omega}) $$ 3. The PSD of an uncorrelated random signal is given as $$ \Phi_{xx}(\mathrm{e}^{\,\mathrm{j} \, \Omega}) = (\sigma_x^2 + \mu_x^2) \cdot {\bot \!\! \bot \!\! \bot}\left( \frac{\Omega}{2 \pi} \right) ,$$ which can be deduced from the [ACF of an uncorrelated signal](correlation_functions.ipynb#Properties). 4. The quadratic mean of a random signal is given as $$ E\{ x[k]^2 \} = \varphi_{xx}[\kappa=0] = \frac{1}{2\pi} \int\limits_{-\pi}^{\pi} \Phi_{xx}(\mathrm{e}^{\,\mathrm{j}\, \Omega}) \,\mathrm{d} \Omega $$ The last relation can be found by expressing the ACF via the inverse DTFT of $\Phi_{xx}$ and considering that $\mathrm{e}^{\mathrm{j} \Omega \kappa} = 1$ when evaluating the integral for $\kappa=0$. ### Example - Power Spectral Density of a Speech Signal In this example the PSD $\Phi_{xx}(\mathrm{e}^{\,\mathrm{j} \,\Omega})$ of a speech signal of length $N$ is estimated by applying a discrete Fourier transformation (DFT) to its ACF. For a better interpretation of the PSD, the frequency axis $f = \frac{\Omega}{2 \pi} \cdot f_s$ has been chosen for illustration, where $f_s$ denotes the sampling frequency of the signal. The speech signal constitutes a recording of the vowel 'o' spoken from a German male, loaded into variable `x`. In Python the ACF is stored in a vector with indices $0, 1, \dots, 2N - 2$ corresponding to the lags $\kappa = (0, 1, \dots, 2N - 2)^\mathrm{T} - (N-1)$. When computing the discrete Fourier transform (DFT) of the ACF numerically by the fast Fourier transform (FFT) one has to take this shift into account. For instance, by multiplying the DFT $\Phi_{xx}[\mu]$ by $\mathrm{e}^{\mathrm{j} \mu \frac{2 \pi}{2N - 1} (N-1)}$. ``` import numpy as np import matplotlib.pyplot as plt from scipy.io import wavfile %matplotlib inline # read audio file fs, x = wavfile.read('../data/vocal_o_8k.wav') x = np.asarray(x, dtype=float) N = len(x) # compute ACF acf = 1/N * np.correlate(x, x, mode='full') # compute PSD psd = np.fft.fft(acf) psd = psd * np.exp(1j*np.arange(2*N-1)*2*np.pi*(N-1)/(2*N-1)) f = np.fft.fftfreq(2*N-1, d=1/fs) # plot PSD plt.figure(figsize = (10, 4)) plt.plot(f, np.real(psd)) plt.title('Estimated power spectral density') plt.ylabel(r'$\hat{\Phi}_{xx}(e^{j \Omega})$') plt.xlabel(r'$f / Hz$') plt.axis([0, 500, 0, 1.1*max(np.abs(psd))]) plt.grid() ``` **Exercise** * What does the PSD tell you about the average spectral contents of a speech signal? Solution: The speech signal exhibits a harmonic structure with the dominant fundamental frequency $f_0 \approx 100$ Hz and a number of harmonics $f_n \approx n \cdot f_0$ for $n > 0$. This due to the fact that vowels generate random signals which are in good approximation periodic. To generate vowels, the sound produced by the periodically vibrating vowel folds is filtered by the resonance volumes and articulators above the voice box. The spectrum of periodic signals is a line spectrum. ## Cross-Power Spectral Density The cross-power spectral density is defined as the Fourier transformation of the [cross-correlation function](correlation_functions.ipynb#Cross-Correlation-Function) (CCF). ### Definition For two continuous-amplitude, real-valued, wide-sense stationary (WSS) random signals $x[k]$ and $y[k]$, the cross-power spectral density is given as \begin{equation} \Phi_{xy}(\mathrm{e}^{\,\mathrm{j} \, \Omega}) = \mathcal{F}_* \{ \varphi_{xy}[\kappa] \}, \end{equation} where $\varphi_{xy}[\kappa]$ denotes the CCF of $x[k]$ and $y[k]$. Note again, that the DTFT is performed with respect to $\kappa$. The CCF of two random signals of finite length $N$ and $M$ can be expressed by way of a linear convolution \begin{equation} \varphi_{xy}[\kappa] = \frac{1}{N} \cdot x_N[k] * y_M[-k]. \end{equation} Note the chosen $\frac{1}{N}$-averaging convention corresponds to the length of signal $x$. If $N \neq M$, care should be taken on the interpretation of this normalization. In case of $N=M$ the $\frac{1}{N}$-averaging yields a [biased estimator](https://en.wikipedia.org/wiki/Bias_of_an_estimator) of the CCF, which consistently should be denoted with $\hat{\varphi}_{xy,\mathrm{biased}}[\kappa]$. Taking the DTFT of the left- and right-hand side from above cross-correlation results in \begin{equation} \Phi_{xy}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) = \frac{1}{N} \, X_N(\mathrm{e}^{\,\mathrm{j}\,\Omega})\, Y_M(\mathrm{e}^{-\,\mathrm{j}\,\Omega}). \end{equation} ### Properties 1. The symmetries of $\Phi_{xy}(\mathrm{e}^{\,\mathrm{j}\, \Omega})$ can be derived from the symmetries of the CCF and the DTFT as $$ \underbrace {\Phi_{xy}(\mathrm{e}^{\,\mathrm{j}\, \Omega}) = \Phi_{xy}^*(\mathrm{e}^{-\,\mathrm{j}\, \Omega})}_{\varphi_{xy}[\kappa] \in \mathbb{R}} = \underbrace {\Phi_{yx}(\mathrm{e}^{\,- \mathrm{j}\, \Omega}) = \Phi_{yx}^*(\mathrm{e}^{\,\mathrm{j}\, \Omega})}_{\varphi_{yx}[-\kappa] \in \mathbb{R}},$$ from which $|\Phi_{xy}(\mathrm{e}^{\,\mathrm{j}\, \Omega})| = |\Phi_{yx}(\mathrm{e}^{\,\mathrm{j}\, \Omega})|$ can be concluded. 2. The cross PSD of two uncorrelated random signals is given as $$ \Phi_{xy}(\mathrm{e}^{\,\mathrm{j} \, \Omega}) = \mu_x^2 \mu_y^2 \cdot {\bot \!\! \bot \!\! \bot}\left( \frac{\Omega}{2 \pi} \right) $$ which can be deduced from the CCF of an uncorrelated signal. ### Example - Cross-Power Spectral Density The following example estimates and plots the cross PSD $\Phi_{xy}(\mathrm{e}^{\,\mathrm{j}\, \Omega})$ of two random signals $x_N[k]$ and $y_M[k]$ of finite lengths $N = 64$ and $M = 512$. ``` N = 64 # length of x M = 512 # length of y # generate two uncorrelated random signals np.random.seed(1) x = 2 + np.random.normal(size=N) y = 3 + np.random.normal(size=M) N = len(x) M = len(y) # compute cross PSD via CCF acf = 1/N * np.correlate(x, y, mode='full') psd = np.fft.fft(acf) psd = psd * np.exp(1j*np.arange(N+M-1)*2*np.pi*(M-1)/(2*M-1)) psd = np.fft.fftshift(psd) Om = 2*np.pi * np.arange(0, N+M-1) / (N+M-1) Om = Om - np.pi # plot results plt.figure(figsize=(10, 4)) plt.stem(Om, np.abs(psd), basefmt='C0:', use_line_collection=True) plt.title('Biased estimator of cross power spectral density') plt.ylabel(r'$|\hat{\Phi}_{xy}(e^{j \Omega})|$') plt.xlabel(r'$\Omega$') plt.grid() ``` **Exercise** * What does the cross PSD $\Phi_{xy}(\mathrm{e}^{\,\mathrm{j} \, \Omega})$ tell you about the statistical properties of the two random signals? Solution: The cross PSD $\Phi_{xy}(\mathrm{e}^{\,\mathrm{j} \, \Omega})$ is essential only non-zero for $\Omega=0$. It hence can be concluded that the two random signals are not mean-free and uncorrelated to each other. **Copyright** This notebook is provided as [Open Educational Resource](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebook for your own purposes. The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *Sascha Spors, Digital Signal Processing - Lecture notes featuring computational examples*.
github_jupyter
# Politeness strategies in MT-mediated communication In this notebook, we demo how to extract politeness strategies using ConvoKit's `PolitenessStrategies` module both in English and in Chinese. We will make use of this functionality to assess the degree to which politeness strategies are preserved in machine-translated texts. The politeness strategies considered are adapted from operationalizations in the following papers: - English: [A computational approach to politeness with application to social factors](https://www.cs.cornell.edu/~cristian/Politeness.html), [The politeness Package: Detecting Politeness in Natural Language](https://journal.r-project.org/archive/2018/RJ-2018-079/RJ-2018-079.pdf) - Chinese: [Studying Politeness across Cultures using English Twitter and Mandarin Weibo](https://dl.acm.org/doi/abs/10.1145/3415190) ``` import os from collections import defaultdict, Counter from tqdm import tqdm import pandas as pd import numpy as np from scipy.stats import pearsonr import spacy from convokit import Corpus, Speaker, Utterance, download from convokit import TextParser, PolitenessStrategies import seaborn as sns from matplotlib import pyplot as plt %matplotlib inline ``` ## 1. Preparing diagnostic test sets We sample utterances from Wikipedia Talkpages discussions in both English and Chinese. In particular, we use the medium-sized `wiki-corpus` shipped by ConvoKit as the source for sampling English utterances (as shown below), and we sampled a subset of utterances from [WikiConv](https://www.cs.cornell.edu/~cristian/index_files/wikiconv-conversation-corpus.pdf) (Chinese) as shared in [figshare](https://figshare.com/articles/dataset/WikiConv_-_Chinese/7376012). For those who would like to skip the preparatory steps and go straight to our analysis exploring how to assess the permeability of politeness signals in machine-translated communication ([Part 2 of this notebook](#2.-Computing-permeability-for-politeness-strategies)), we have made the sampled corpora directly downloadable via ConvoKit as `wiki-sampled-en-corpus` and `wiki-sampled-zh-corpus`. ### 1.1. English data: `wiki-corpus` The medium-sized Wikipedia dataset is provided by ConvoKit as `wiki-corpus` ([documentation](https://convokit.cornell.edu/documentation/wiki.html)). Note that ConvoKit also offers a more complete collection of Wikipedia Talkpage discussions: [the Cornell Wikiconv Dataset](https://convokit.cornell.edu/documentation/wikiconv.html). We choose to use `wiki-corpus` as it is already sufficiently large for our purpose. To load the corpus, see options in the cell below. ``` # OPTION 1: DOWNLOAD CORPUS # UNCOMMENT THESE LINES TO DOWNLOAD CORPUS # DATA_DIR = '<YOUR DIRECTORY>' # WIKI_ROOT_DIR = download('wiki-corpus', data_dir=DATA_DIR) # OPTION 2: READ PREVIOUSLY-DOWNLOADED CORPUS FROM DISK # UNCOMMENT THIS LINE AND REPLACE WITH THE DIRECTORY WHERE THE WIKI-CORPUS IS LOCATED # WIKI_ROOT_DIR = '<YOUR DIRECTORY>' corpus = Corpus(filename=WIKI_ROOT_DIR) # load parses corpus.load_info('utterance',['parsed']) # Overall stats of the dataset corpus.print_summary_stats() ``` #### Extracting strategies for sampling In the case when the corpus is not dependency parsed, it will need to go through an additional step of parsing, which can be achieved via `TextParser`. See [this demo](https://github.com/CornellNLP/Cornell-Conversational-Analysis-Toolkit/blob/master/examples/politeness-strategies/politeness_demo.ipynb) for an example. As the `wiki-corpus` ships with pre-computed dependency parses (which we already loaded as you may notice), we can go straight to politeness strategy extraction. Here, we will focus on a set of local strategies, and hence specify that we will want to extract with strategy collection _politeness_local_. For other available options, refer to the [documentation](https://convokit.cornell.edu/documentation/politenessStrategies.html) for details. ``` ps_local = PolitenessStrategies(strategy_collection="politeness_local", verbose=10000) # By default, strategy extraction results are saved under "politeness_strategies". corpus = ps_local.transform(corpus, markers=True) ``` #### Computing strategy prevalance We can first take a glimpse over utterance-level strategy prevalence, i.e., proportion of utterances in the dataset that use the politeness strategy. This can be easily done using `summarize()`. ``` df_prevalence = ps_local.summarize(corpus) df_prevalence ``` #### Sampling To assess permeability of these strategies, we sample 1000 instances for each strategy. The results will be saved to a smaller `wiki-sampled-en` corpus, which may be directly downloaded via ConvoKit if one wants to skip the intermediate steps (which will take a while to run) see [Part 2 of this notebook](#2.-Computing-permeability-for-politeness-strategies). ``` # utterance-level strategy uses df_feat = pd.DataFrame.from_dict({utt.id: utt.meta['politeness_strategies'] \ for utt in corpus.iter_utterances()}, orient='index') # sampling from least common to most sorted_strategies = df_prevalence.sort_values().index sampled_ids, samples = set(), [] for k in sorted_strategies: df_sample = df_feat[(~df_feat.index.isin(sampled_ids)) & (df_feat[k]==1)].sample(1000, random_state=42) df_sample['strategy'] = k samples.append(df_sample[['strategy']]) sampled_ids.update(df_sample.index) df_en_sample = pd.concat(samples) # saving as a convokit corpus for i, info in df_en_sample.itertuples(): utt = corpus.get_utterance(i) utt.add_meta('selected', True) utt.add_meta('strategy', info) # filter only selected utterances # (not that this does not maintain conversation structure) wiki_sampled_en = corpus.filter_utterances_by(lambda utt:'selected' in utt.meta and utt.meta['selected']) ``` #### Translating To determine the degree to which politeness markers are perserved in translation, we will make comparisons between original and translated texts. To set it up, we two rounds of translations, forming a English -> Chinese -> English loop, i.e., we first translate the English texts into Chinese, and then translate the Chinese translations back into English. We use [EasyNMT](https://github.com/UKPLab/EasyNMT) to perform translations between English and Chinese, using models from [Opus-MT](https://github.com/Helsinki-NLP/Opus-MT) from [Helsinki-NLP](https://blogs.helsinki.fi/language-technology/). ``` from easynmt import EasyNMT # texts to be translated df_utts = wiki_sampled_en.get_utterances_dataframe(exclude_meta=True) # translation model model = EasyNMT('opus-mt', cache_folder="/belafonte_sauna/liye_translations/easynmt/") df_utts['en-zh'] = model.translate(list(df_utts['text']), \ target_lang='zh', \ source_lang='en', \ show_progress_bar=True, batch_size=8, \ perform_sentence_splitting=False) df_utts['en-back'] = model.translate(list(df_utts['en-zh']), \ target_lang='en', \ source_lang='zh', \ show_progress_bar=True, batch_size=8, \ perform_sentence_splitting=False) ``` We add these translated texts as meta data to our sampled corpus, and parse them to prepare for later strategy extraction. ``` from convokit.text_processing.textParser import TextParser for row in df_utts[['text', 'en-zh', 'en-back']].itertuples(): idx, trans, backtrans = row[0], row[2], row[3] utt = wiki_sampled_en.get_utterance(idx) utt.add_meta('en-zh', trans) utt.add_meta('en-back', backtrans) # parser to parse back-translated English texts en_parser = TextParser(output_field='en_parsed', input_field='en-back', \ verbosity=5000) # parer to parse translated texts in Chinese spacy_zh = spacy.load('zh_core_web_sm', disable=['ner']) zh_parser = TextParser(output_field='zh_parsed', input_field='en-zh', \ spacy_nlp=spacy_zh, verbosity=5000) wiki_sampled_en = en_parser.transform(wiki_sampled_en) wiki_sampled_en = zh_parser.transform(wiki_sampled_en) # We can then save the corpus using wiki_sampled_en.dump(YOUR_OUT_DIR) ``` ### 1.2 Chinese data: [WikiConv](https://www.cs.cornell.edu/~cristian/index_files/wikiconv-conversation-corpus.pdf) For the Chinese data, we start utterances from [WikiConv](https://figshare.com/articles/dataset/WikiConv_-_Chinese/7376012) and similarly sampled 1000 instances for a subset of strategies from the collection "_politeness-cscw-zh_". The corpus is saved as `wiki-sampled-zh-corpus`, with all textual data (i.e., both the original utterance texts and the the corresponding translations) tokenized and parsed. ``` wiki_sampled_zh = Corpus(download('wiki-sampled-zh-corpus')) # Inspect the meta data avaible, should have the following: # 'parsed' contains the dependency parses for the utterance text # 'zh-en' and 'zh-back' contains the translations and back translations for utterance texts respectively # 'en_parsed' and 'zh_parsed' contain the respective parses, which we will use for strategy extractions wiki_sampled_zh.meta_index ``` ## 2. Computing permeability for politeness strategies With the two sampled datasets tokenized and parsed, we are now ready to the degree to which strategies are perserved vs. lost in different translation directions. We make two types of comparisons: * First, we consider a direct comparison between the original vs. translated texts. In particular, we check strategies used in utterances in English texts and Chinese texts with respective politeness strategy operationalizations to make comparisons. * Second, we consider comparing the original vs. the backtranslated texts using the same strategy operationalization and compare strategies detected. ``` # Download the data if Part 1 of the notebook is skipped # replace with where you'd like the corpora to be saved DATA_DIR = '/belafonte_sauna/liye_translations/convokit_mt/test/' wiki_sampled_en = Corpus(download('wiki-sampled-en-corpus', data_dir=DATA_DIR)) wiki_sampled_zh = Corpus(download('wiki-sampled-zh-corpus', data_dir=DATA_DIR)) wiki_sampled_en.print_summary_stats() wiki_sampled_zh.print_summary_stats() ``` ### Extracting strategies As a first step, we extract strategies for all translations and back-translations. We will need two politeness strategy transformers: * for texts in English, we will again use the strategy collection _politeness_local_ * for texts in Chinese, we will be using the strategy collection _politeness-cscw-zh_. More details of different politeness strategy collections can be found at the [documentation page]( https://convokit.cornell.edu/documentation/politenessStrategies.html). ``` ps_zh = PolitenessStrategies(parse_attribute_name='zh_parsed', \ strategy_attribute_name="zh_strategies", \ strategy_collection="politeness_cscw_zh", verbose=5000) ps_en = PolitenessStrategies(parse_attribute_name='en_parsed', \ strategy_attribute_name="en_strategies", \ strategy_collection="politeness_local", verbose=5000) # extracting for English samples wiki_sampled_en = ps_zh.transform(wiki_sampled_en) wiki_sampled_en = ps_en.transform(wiki_sampled_en) # extracting for Chinese samples wiki_sampled_zh = ps_zh.transform(wiki_sampled_zh) wiki_sampled_zh = ps_en.transform(wiki_sampled_zh) ``` ### Making comparisons We consider permeability of a politeness strategy _s_ as the percentage utterances in a given collection containing such markers for which the translated version also contains (potentially different) markers from the same set. As mentioned earlier, we estimate permeability both with translations and backtranslations. Note that each approach has its own limitations, and thus both of them are at best _proxies_ for strategy permeability and should be not read as the groundtruth values. ``` # Mapping between strategy names in different collections # Note that the collections are not exactly equivalent, # i.e., there are strategies we can't find a close match between the two collections en2zh = {'Actually': 'factuality', 'Adverb.Just': None, 'Affirmation': 'praise', 'Apology': 'apologetic', 'By.The.Way': 'indirect_btw', 'Conj.Start': 'start_so', 'Filler': None, 'For.Me': None, 'For.You': None, 'Gratitude': 'gratitude', 'Greeting':'greeting', 'Hedges':'hedge', 'Indicative':'can_you', 'Please': 'please', 'Please.Start': 'start_please', 'Reassurance': None, 'Subjunctive': 'could_you', 'Swearing': 'taboo' } zh2en = {v:k for k,v in en2zh.items() if v} # add utterance-level assessing result to utterance metadata for the English corpus for utt in wiki_sampled_en.iter_utterances(): # strategy names in English and Chinese en_name = utt.retrieve_meta('strategy') zh_name = en2zh[en_name] # translations if zh_name: trans_status = utt.retrieve_meta('zh_strategies')[zh_name] utt.add_meta('translation_result', trans_status) else: # when a comparison isn't applicable, we use the value -1 utt.add_meta('translation_result', -1) # back translations backtrans_status = utt.retrieve_meta('en_strategies')[en_name] utt.add_meta('backtranslation_result', backtrans_status) # add utterance-level assessing result to utterance metadata for the Chinese corpus for utt in wiki_sampled_zh.iter_utterances(): # strategy names in English and Chinese zh_name = utt.retrieve_meta('strategy') en_name = zh2en[zh_name] # translations if en_name: trans_status = utt.retrieve_meta('en_strategies')[en_name] utt.add_meta('translation_result', trans_status) # back translations backtrans_status = utt.retrieve_meta('zh_strategies')[zh_name] utt.add_meta('backtranslation_result', backtrans_status) ``` We can then export these utterance-level assessing results to pandas DataFrames (via `get_attribute_table`) for easy aggregation and plotting. The utterance metadata we need are: * strategy: the strategy to be checked for the utterance * translation_result: whether the checked strategy remains in the translated text * backtranslation_result: whether the checked strategy remains in the back-translated text #### A. English -> Chinese ``` # results for the English corpus res_df_en = wiki_sampled_en.get_attribute_table(obj_type='utterance', \ attrs=['strategy', \ 'translation_result', \ 'backtranslation_result']) res_df_en.columns = ['strategy', 'en->zh', 'en->zh->en'] # strategy-level permeability, -1 means the strategy is not applicable permeability_df_en = res_df_en.groupby('strategy').sum() / 1000 # As a reference, we include permeability computed through an informal small-scale human annotations # (50 instances, one annotator) reference = {'Actually': 0.7, 'Adverb.Just': 0.62, 'Affirmation': 0.8, 'Apology': 0.94, 'By.The.Way': 0.42, 'Conj.Start': 0.66, 'Filler': 0.58, 'For.Me': 0.62, 'For.You': 0.52, 'Gratitude': 0.86, 'Greeting': 0.52, 'Hedges': 0.68, 'Indicative': 0.64, 'Please': 0.72, 'Please.Start': 0.82, 'Reassurance': 0.88, 'Subjunctive': 0.0, 'Swearing': 0.3} permeability_df_en['reference'] = [reference[name] for name in permeability_df_en.index] # As further context, we can inlcude information about strategy prevalence on our plot prevalence_en = dict(df_prevalence*100) permeability_df_en.index = [f"{name} ({prevalence_en[name]:.1f}%)" for name in permeability_df_en.index] plt.figure(figsize=(9, 12)) sns.set(font_scale=1.2) # cells that are not applicable are masked in white with sns.axes_style("white"): sns.heatmap(permeability_df_en, annot=True, cmap="Greens", fmt=".1%", mask=permeability_df_en==-1) ``` #### B. Chinese -> English ``` # results for the English corpus res_df_zh = wiki_sampled_zh.get_attribute_table(obj_type='utterance', \ attrs=['strategy', \ 'translation_result', \ 'backtranslation_result']) # convert names to make it easier to compare between directions res_df_zh['strategy'] = res_df_zh['strategy'].apply(lambda name:zh2en[name]) res_df_zh.columns = ['strategy', 'zh->en', 'zh->en->zh'] permeability_df_zh = res_df_zh.groupby('strategy').sum() / 1000 # as the original dataset for the Chinese corpus is quite large # we present strategy prevalence results directly prevalence_zh = {'apologetic': 0.6, 'can_you': 0.3, 'could_you': 0.0, 'factuality': 0.4,'gratitude': 3.1, 'greeting': 0.0, 'hedge': 42.8, 'indirect_btw': 0.1, 'praise': 0.4, 'please': 25.4, 'start_please': 17.7, 'start_so': 0.7, 'taboo': 0.4} permeability_df_zh.index = [f"{name} ({prevalence_zh[en2zh[name]]:.1f}%)" for name in permeability_df_zh.index] plt.figure(figsize=(6, 9)) sns.set(font_scale=1.2) with sns.axes_style("white"): sns.heatmap(permeability_df_zh, annot=True, cmap="Blues", fmt=".1%") ```
github_jupyter
Notebook to plot the histogram of the power criterion values of Rel-UME test. ``` %matplotlib inline %load_ext autoreload %autoreload 2 #%config InlineBackend.figure_format = 'svg' #%config InlineBackend.figure_format = 'pdf' import freqopttest.tst as tst import kmod import kgof import kgof.goftest as gof # submodules from kmod import data, density, kernel, util, plot, glo, log from kmod.ex import cifar10 as cf10 import kmod.ex.exutil as exu from kmod import mctest as mct import matplotlib import matplotlib.pyplot as plt import pickle import os import autograd.numpy as np import scipy.stats as stats import numpy.testing as testing # plot.set_default_matplotlib_options() # font options font = { #'family' : 'normal', #'weight' : 'bold', 'size' : 20, } plt.rc('font', **font) plt.rc('lines', linewidth=2) matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['ps.fonttype'] = 42 # def store_path(fname): # """ # Construct a full path for saving/loading files. # """ # return os.path.join('cifar10', fname) display(list(zip(range(10), cf10.cifar10_classes))) ``` # Histogram of power criterion values First construct four samples: $X \sim P, Y \sim Q, Z \sim R$, and a pool W to be used as test location candidates. ``` # class_spec = [ # # (class, #points for p, #points for q, #points for r, #points for the pool) # ('airplane', 2000, 0, 0, 1500), # ('cat', 0, 2000, 2000, 1500), # ('truck', 1500, 1500, 1500, 1500), # ] # class_spec = [ # # (class, #points for p, #points for q, #points for r, #points for the pool) # ('airplane', 1000, 0, 0, 300), # ('cat', 0, 1000, 1000, 300), # ('truck', 1500, 1500, 1500, 300), # ] class_spec = [ # (class, #points for p, #points for q, #points for r, #points for the pool) ('ship', 2000, 0, 0, 1000), ('airplane', 0, 2000, 1500, 1000), ('dog', 1500, 1500, 1500, 1000), ('bird', 0, 0, 500, 1000), ] # class_spec = [ # # (class, #points for p, #points for q, #points for r, #points for the pool) # ('horse', 2000, 0, 0, 1000), # ('deer', 0, 2000, 1500, 1000), # ('dog', 1500, 1500, 1500, 1000), # ('automobile', 0, 0, 500, 1000), # ] # class_spec = [ # # (class, #points for p, #points for q, #points for r, #points for the pool) # ('airplane', 2000, 0, 0, 1000), # ('automobile', 0, 2000, 1500, 1000), # ('cat', 1500, 1500, 1500, 1000), # ('frog', 0, 0, 500, 1000), # ] #class_spec = [ # (class, #points for p, #points for q, #points for r, #points for the pool) # ('airplane', 2000, 0, 0, 1000), # ('automobile', 0, 2000, 2000, 1000), # ('cat', 1500, 1500, 1500, 1000), #] # class_spec = [ # # (class, #points for p, #points for q, #points for r, #points for the pool) # ('airplane', 200, 0, 0, 150), # ('cat', 0, 200, 200, 150), # ('truck', 150, 150, 150, 150), # ] # check sizes hist_classes = [z[0] for z in class_spec] p_sizes = [z[1] for z in class_spec] q_sizes = [z[2] for z in class_spec] r_sizes = [z[3] for z in class_spec] pool_sizes = [z[4] for z in class_spec] # make sure p,q,r have the same sample size assert sum(p_sizes) == sum(q_sizes) assert sum(q_sizes) == sum(r_sizes) # cannot use more than 6000 from each class for i, cs in enumerate(class_spec): class_used = sum(cs[1:]) if class_used > 6000: raise ValueError('class "{}" requires more than 6000 points. Was {}.'.format(cs[0], class_used)) # images as numpy arrays list_Ximgs = [] list_Yimgs = [] list_Zimgs = [] list_poolimgs = [] # features list_X = [] list_Y = [] list_Z = [] list_pool = [] # class labels list_Xlabels = [] list_Ylabels = [] list_Zlabels = [] list_poollabels = [] # seed used for subsampling seed = 368 with util.NumpySeedContext(seed=seed): for i, cs in enumerate(class_spec): # load class data class_i = cs[0] imgs_i = cf10.load_data_array(class_i) feas_i = cf10.load_feature_array(class_i) # split each class according to the spec class_sizes_i = cs[1:] # imgs_i, feas_i may contain more than what we need in total for a class. Subsample sub_ind = util.subsample_ind(imgs_i.shape[0], sum(class_sizes_i), seed=seed+1) sub_ind = list(sub_ind) assert len(sub_ind) == sum(class_sizes_i) xyzp_imgs_i = util.multi_way_split(imgs_i[sub_ind,:], class_sizes_i) xyzp_feas_i = util.multi_way_split(feas_i[sub_ind,:], class_sizes_i) # assignment list_Ximgs.append(xyzp_imgs_i[0]) list_Yimgs.append(xyzp_imgs_i[1]) list_Zimgs.append(xyzp_imgs_i[2]) list_poolimgs.append(xyzp_imgs_i[3]) list_X.append(xyzp_feas_i[0]) list_Y.append(xyzp_feas_i[1]) list_Z.append(xyzp_feas_i[2]) list_pool.append(xyzp_feas_i[3]) # class labels class_ind_i = cf10.cifar10_class_ind_dict[class_i] list_Xlabels.append(np.ones(class_sizes_i[0])*class_ind_i) list_Ylabels.append(np.ones(class_sizes_i[1])*class_ind_i) list_Zlabels.append(np.ones(class_sizes_i[2])*class_ind_i) list_poollabels.append(np.ones(class_sizes_i[3])*class_ind_i) ``` Finally we have the samples (features and images) ``` # stack the lists. For the "histogram" purpose, we don't actually need # images for X, Y, Z. Only images for the pool. Ximgs = np.vstack(list_Ximgs) Yimgs = np.vstack(list_Yimgs) Zimgs = np.vstack(list_Zimgs) poolimgs = np.vstack(list_poolimgs) # features X = np.vstack(list_X) Y = np.vstack(list_Y) Z = np.vstack(list_Z) pool = np.vstack(list_pool) # labels Xlabels = np.hstack(list_Xlabels) Ylabels = np.hstack(list_Ylabels) Zlabels = np.hstack(list_Zlabels) poollabels = np.hstack(list_poollabels) # sanity check XYZP = [(X, Ximgs, Xlabels), (Y, Yimgs, Ylabels), (Z, Zimgs, Zlabels), (pool, poolimgs, poollabels)] for f, fimgs, flabels in XYZP: assert f.shape[0] == fimgs.shape[0] assert fimgs.shape[0] == flabels.shape[0] assert X.shape[0] == sum(p_sizes) assert Y.shape[0] == sum(q_sizes) assert Z.shape[0] == sum(r_sizes) assert pool.shape[0] == sum(pool_sizes) ``` ## The actual histogram ``` def eval_test_locations(X, Y, Z, loc_pool, k, func_inds, reg=1e-6): """ Use X, Y, Z to estimate the Rel-UME power criterion function and evaluate the function at each point (individually) in loc_pool (2d numpy array). * k: a kernel * func_inds: list of indices of the functions to evaluate. See below. * reg: regularization parameter in the power criterion Return an m x (up to) 5 numpy array where m = number of candidates in the pool. The columns can be (as specified in func_inds): 0. power criterion 1. evaluation of the relative witness (or the test statistic of UME_SC) 2. evaluation of MMD witness(p, r) (not squared) 3. evaluation of witness(q, r) 4. evaluate of witness(p, q) """ datap = data.Data(X) dataq = data.Data(Y) datar = data.Data(Z) powcri_func = mct.SC_UME.get_power_criterion_func(datap, dataq, datar, k, k, reg=1e-7) relwit_func = mct.SC_UME.get_relative_sqwitness(datap, dataq, datar, k, k) witpr = tst.MMDWitness(k, X, Z) witqr = tst.MMDWitness(k, Y, Z) witpq = tst.MMDWitness(k, X, Y) funcs = [powcri_func, relwit_func, witpr, witqr, witpq] # select the functions according to func_inds list_evals = [funcs[i](loc_pool) for i in func_inds] stack_evals = np.vstack(list_evals) return stack_evals.T # Gaussian kernel with median heuristic medxz = util.meddistance(np.vstack((X, Z)), subsample=1000) medyz = util.meddistance(np.vstack((Y, Z)), subsample=1000) k = kernel.KGauss(np.mean([medxz, medyz])**2) print('Gaussian width: {}'.format(k.sigma2**0.5)) # histogram. This will take some time. func_inds = np.array([0, 1, 2, 3, 4]) pool_evals = eval_test_locations(X, Y, Z, loc_pool=pool, k=k, func_inds=func_inds, reg=1e-6) pow_cri_values = pool_evals[:, func_inds==0].reshape(-1) test_stat_values = pool_evals[:, func_inds==1].reshape(-1) witpr_values = pool_evals[:, func_inds==2] witqr_values = pool_evals[:, func_inds==3] witpq_values = pool_evals[:, func_inds==4].reshape(-1) plt.figure(figsize=(6, 4)) a = 0.6 plt.figure(figsize=(4,4)) plt.hist(pow_cri_values, bins=15, label='Power Criterion', alpha=a); plt.hist(witpr_values, bins=15, label='Power Criterion', alpha=a); plt.hist(witqr_values, bins=15, label='Power Criterion', alpha=a); plt.hist(witpq_values, bins=15, label='Power Criterion', alpha=a); # Save the results # package things to save datapack = { 'class_spec': class_spec, 'seed': seed, 'poolimgs': poolimgs, 'X': X, 'Y': Y, 'Z': Z, 'pool': pool, 'medxz': medxz, 'medyz': medyz, 'func_inds': func_inds, 'pool_evals': pool_evals, } lines = [ '_'.join(str(x) for x in cs) for cs in class_spec] fname = '-'.join(lines) + '-seed{}.pkl'.format(seed) with open(fname, 'wb') as f: # expect result to be a dictionary pickle.dump(datapack, f) ``` Code for running the experiment ends here. ## Plot the results This section can be run by loading the previously saved results. ``` # load the results # fname = 'airplane_2000_0_0_1000-automobile_0_2000_1500_1000-cat_1500_1500_1500_1000-frog_0_0_500_1000-seed368.pkl' # fname = 'ship_2000_0_0_1000-airplane_0_2000_1500_1000-automobile_1500_1500_1500_1000-bird_0_0_500_1000-seed368.pkl' # fname = 'ship_2000_0_0_1000-dog_0_2000_1500_1000-automobile_1500_1500_1500_1000-bird_0_0_500_1000-seed368.pkl' fname = 'ship_2000_0_0_1000-airplane_0_2000_1500_1000-dog_1500_1500_1500_1000-bird_0_0_500_1000-seed368.pkl' # fname = 'horse_2000_0_0_1000-deer_0_2000_1500_1000-dog_1500_1500_1500_1000-airplane_0_0_500_1000-seed368.pkl' # fname = 'horse_2000_0_0_1000-deer_0_2000_1500_1000-dog_1500_1500_1500_1000-automobile_0_0_500_1000-seed368.pkl' # fname = 'horse_2000_0_0_1000-deer_0_2000_2000_1000-dog_1500_1500_1500_1000-seed368.pkl' #fname = 'airplane_2000_0_0_1000-automobile_0_2000_2000_1000-cat_1500_1500_1500_1000-seed368.pkl' with open(fname, 'rb') as f: # expect a dictionary L = pickle.load(f) # load the variables class_spec = L['class_spec'] seed = L['seed'] poolimgs = L['poolimgs'] X = L['X'] Y = L['Y'] Z = L['Z'] pool = L['pool'] medxz = L['medxz'] medyz = L['medyz'] func_inds = L['func_inds'] pool_evals = L['pool_evals'] pow_cri_values = pool_evals[:, func_inds==0].reshape(-1) test_stat_values = pool_evals[:, func_inds==1].reshape(-1) witpq_values = pool_evals[:, func_inds==4].reshape(-1) # plot the histogram plt.figure(figsize=(6, 4)) a = 0.6 plt.figure(figsize=(4,4)) plt.hist(pow_cri_values, bins=15, label='Power Criterion', alpha=a); # plt.hist(test_stat_values, label='Stat.', alpha=a); # plt.legend() plt.savefig('powcri_hist_locs_pool.pdf', bbox_inches='tight') plt.figure(figsize=(12, 4)) plt.hist(test_stat_values, label='Stat.', alpha=a); plt.legend() def reshape_3c_rescale(img_in_stack): img = img_in_stack.reshape([3, 32, 32]) # h x w x c img = img.transpose([1, 2, 0])/255.0 return img def plot_lowzerohigh(images, values, text_in_title='', grid_rows=2, grid_cols=10, figsize=(13, 3)): """ Sort the values in three different ways (ascending, descending, absolute ascending). Plot the images corresponding to the top-k sorted values. k is determined by the grid size. """ low_inds, zeros_inds, high_inds = util.top_lowzerohigh(values) plt.figure(figsize=figsize) exu.plot_images_grid(images[low_inds], reshape_3c_rescale, grid_rows, grid_cols) # plt.suptitle('{} Low'.format(text_in_title)) plt.savefig('powcri_low_region.pdf', bbox_inches='tight') plt.figure(figsize=figsize) exu.plot_images_grid(images[zeros_inds], reshape_3c_rescale, grid_rows, grid_cols) # plt.suptitle('{} Near Zero'.format(text_in_title)) plt.savefig('powcri_zero_region.pdf', bbox_inches='tight') plt.figure(figsize=figsize) exu.plot_images_grid(images[high_inds], reshape_3c_rescale, grid_rows, grid_cols) # plt.suptitle('{} High'.format(text_in_title)) plt.savefig('powcri_high_region.pdf', bbox_inches='tight') grid_rows = 2 grid_cols = 5 figsize = (5, 3) plot_lowzerohigh(poolimgs, pow_cri_values, 'Power Criterion.', grid_rows, grid_cols, figsize) # plot_lowzerohigh(poolimgs, rel_wit_values, 'Test statistic.', grid_rows, grid_cols, figsize) import matplotlib.gridspec as gridspec def plot_images_grid_witness(images, func_img=None, grid_rows=4, grid_cols=4, witness_pq=None, scale=100.): """ Plot images in a grid, starting from index 0 to the maximum size of the grid. images: stack of images images[i] is one image func_img: function to run on each image before plotting """ gs1 = gridspec.GridSpec(grid_rows, grid_cols) gs1.update(wspace=0.2, hspace=0.8) # set the spacing between axes. wit_sign = np.sign(witness_pq) for i in range(grid_rows*grid_cols): if func_img is not None: img = func_img(images[i]) else: img = images[i] if witness_pq is not None: sign = wit_sign[i] if sign > 0: color = 'red' else: color = 'blue' # plt.subplot(grid_rows, grid_cols, i+1) ax = plt.subplot(gs1[i]) if witness_pq is not None: ax.text(0.5, -0.6, "{:1.2f}".format(scale*witness_pq[i]), ha="center", color=color, transform=ax.transAxes) plt.imshow(img) plt.axis('off') def plot_lowzerohigh(images, values, text_in_title='', grid_rows=2, grid_cols=10, figsize=(13, 3), wit_pq=None, skip_length=1): """ Sort the values in three different ways (ascending, descending, absolute ascending). Plot the images corresponding to the top-k sorted values. k is determined by the grid size. """ low_inds, zeros_inds, high_inds = util.top_lowzerohigh(values) low_inds = low_inds[::skip_length] zeros_inds = zeros_inds[::skip_length] high_inds = high_inds[::skip_length] plt.figure(figsize=figsize) plot_images_grid_witness(images[low_inds], reshape_3c_rescale, grid_rows, grid_cols, wit_pq[low_inds]) # plt.suptitle('{} Low'.format(text_in_title)) # plt.savefig('powcri_low_region.pdf', bbox_inches='tight') plt.figure(figsize=figsize) plot_images_grid_witness(images[zeros_inds], reshape_3c_rescale, grid_rows, grid_cols, wit_pq[zeros_inds]) # plt.suptitle('{} Near Zero'.format(text_in_title)) # plt.savefig('powcri_zero_region.pdf', bbox_inches='tight') plt.figure(figsize=figsize) plot_images_grid_witness(images[high_inds[:]], reshape_3c_rescale, grid_rows, grid_cols, wit_pq[high_inds]) # plt.suptitle('{} High'.format(text_in_title)) # plt.savefig('powcri_high_region.pdf', bbox_inches='tight') grid_rows = 3 grid_cols = 5 figsize = (8, 3) plot_lowzerohigh(poolimgs, pow_cri_values, 'Power Criterion.', grid_rows, grid_cols, figsize, witpq_values, skip_length=40) ```
github_jupyter
# Integrated gradients for text classification on the IMDB dataset In this example, we apply the integrated gradients method to a sentiment analysis model trained on the IMDB dataset. In text classification models, integrated gradients define an attribution value for each word in the input sentence. The attributions are calculated considering the integral of the model gradients with respect to the word embedding layer along a straight path from a baseline instance $x^\prime$ to the input instance $x.$ A description of the method can be found [here](https://docs.seldon.io/projects/alibi/en/latest/methods/IntegratedGradients.html). Integrated gradients was originally proposed in Sundararajan et al., ["Axiomatic Attribution for Deep Networks"](https://arxiv.org/abs/1703.01365) The IMDB data set contains 50K movie reviews labelled as positive or negative. We train a convolutional neural network classifier with a single 1-d convolutional layer followed by a fully connected layer. The reviews in the dataset are truncated at 100 words and each word is represented by 50-dimesional word embedding vector. We calculate attributions for the elements of the embedding layer. ``` import tensorflow as tf import numpy as np import os import pandas as pd from tensorflow.keras.datasets import imdb from tensorflow.keras.preprocessing import sequence from tensorflow.keras.models import Model from tensorflow.keras.layers import Input, Dense, Embedding, Conv1D, GlobalMaxPooling1D, Dropout from tensorflow.keras.utils import to_categorical from alibi.explainers import IntegratedGradients import matplotlib.pyplot as plt print('TF version: ', tf.__version__) print('Eager execution enabled: ', tf.executing_eagerly()) # True ``` ## Load data Loading the imdb dataset. ``` max_features = 10000 maxlen = 100 print('Loading data...') (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features) test_labels = y_test.copy() train_labels = y_train.copy() print(len(x_train), 'train sequences') print(len(x_test), 'test sequences') y_train, y_test = to_categorical(y_train), to_categorical(y_test) print('Pad sequences (samples x time)') x_train = sequence.pad_sequences(x_train, maxlen=maxlen) x_test = sequence.pad_sequences(x_test, maxlen=maxlen) print('x_train shape:', x_train.shape) print('x_test shape:', x_test.shape) index = imdb.get_word_index() reverse_index = {value: key for (key, value) in index.items()} ``` A sample review from the test set. Note that unknown words are replaced with 'UNK' ``` def decode_sentence(x, reverse_index): # the `-3` offset is due to the special tokens used by keras # see https://stackoverflow.com/questions/42821330/restore-original-text-from-keras-s-imdb-dataset return " ".join([reverse_index.get(i - 3, 'UNK') for i in x]) print(decode_sentence(x_test[1], reverse_index)) ``` ## Train Model The model includes one convolutional layer and reaches a test accuracy of 0.85. If `save_model = True`, a local folder `../model_imdb` will be created and the trained model will be saved in that folder. If the model was previously saved, it can be loaded by setting `load_model = True`. ``` batch_size = 32 embedding_dims = 50 filters = 250 kernel_size = 3 hidden_dims = 250 load_model = False save_model = True filepath = './model_imdb/' # change to directory where model is downloaded if load_model: model = tf.keras.models.load_model(os.path.join(filepath, 'model.h5')) else: print('Build model...') inputs = Input(shape=(maxlen,), dtype='int32') embedded_sequences = Embedding(max_features, embedding_dims)(inputs) out = Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1)(embedded_sequences) out = Dropout(0.4)(out) out = GlobalMaxPooling1D()(out) out = Dense(hidden_dims, activation='relu')(out) out = Dropout(0.4)(out) outputs = Dense(2, activation='softmax')(out) model = Model(inputs=inputs, outputs=outputs) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) print('Train...') model.fit(x_train, y_train, batch_size=256, epochs=3, validation_data=(x_test, y_test)) if save_model: if not os.path.exists(filepath): os.makedirs(filepath) model.save(os.path.join(filepath, 'model.h5')) ``` ## Calculate integrated gradients The integrated gradients attributions are calculated with respect to the embedding layer for 10 samples from the test set. Since the model uses a word to vector embedding with vector dimensionality of 50 and sequence length of 100 words, the dimensionality of the attributions is (10, 100, 50). In order to obtain a single attribution value for each word, we sum all the attribution values for the 50 elements of each word's vector representation. The default baseline is used in this example which is internally defined as a sequence of zeros. In this case, this corresponds to a sequence of padding characters (**NB:** in general the numerical value corresponding to a "non-informative" baseline such as the PAD token will depend on the tokenizer used, make sure that the numerical value of the baseline used corresponds to your desired token value to avoid surprises). The path integral is defined as a straight line from the baseline to the input image. The path is approximated by choosing 50 discrete steps according to the Gauss-Legendre method. ``` n_steps = 50 method = "gausslegendre" internal_batch_size = 100 nb_samples = 10 ig = IntegratedGradients(model, layer=model.layers[1], n_steps=n_steps, method=method, internal_batch_size=internal_batch_size) x_test_sample = x_test[:nb_samples] predictions = model(x_test_sample).numpy().argmax(axis=1) explanation = ig.explain(x_test_sample, baselines=None, target=predictions) # Metadata from the explanation object explanation.meta # Data fields from the explanation object explanation.data.keys() # Get attributions values from the explanation object attrs = explanation.attributions[0] print('Attributions shape:', attrs.shape) ``` ## Sum attributions ``` attrs = attrs.sum(axis=2) print('Attributions shape:', attrs.shape) ``` ## Visualize attributions ``` i = 1 x_i = x_test_sample[i] attrs_i = attrs[i] pred = predictions[i] pred_dict = {1: 'Positive review', 0: 'Negative review'} print('Predicted label = {}: {}'.format(pred, pred_dict[pred])) ``` We can visualize the attributions for the text instance by mapping the values of the attributions onto a matplotlib colormap. Below we define some utility functions for doing this. ``` from IPython.display import HTML def hlstr(string, color='white'): """ Return HTML markup highlighting text with the desired color. """ return f"<mark style=background-color:{color}>{string} </mark>" def colorize(attrs, cmap='PiYG'): """ Compute hex colors based on the attributions for a single instance. Uses a diverging colorscale by default and normalizes and scales the colormap so that colors are consistent with the attributions. """ import matplotlib as mpl cmap_bound = np.abs(attrs).max() norm = mpl.colors.Normalize(vmin=-cmap_bound, vmax=cmap_bound) cmap = mpl.cm.get_cmap(cmap) # now compute hex values of colors colors = list(map(lambda x: mpl.colors.rgb2hex(cmap(norm(x))), attrs)) return colors ``` Below we visualize the attribution values (highlighted in the text) having the highest positive attributions. Words with high positive attribution are highlighted in shades of green and words with negative attribution in shades of pink. Stronger shading corresponds to higher attribution values. Positive attributions can be interpreted as increase in probability of the predicted class ("Positive sentiment") while negative attributions correspond to decrease in probability of the predicted class. ``` words = decode_sentence(x_i, reverse_index).split() colors = colorize(attrs_i) HTML("".join(list(map(hlstr, words, colors)))) ```
github_jupyter
# A practical introduction to Reinforcement Learning Most of you have probably heard of AI learning to play computer games on their own, a very popular example being Deepmind. Deepmind hit the news when their AlphaGo program defeated the South Korean Go world champion in 2016. There had been many successful attempts in the past to develop agents with the intent of playing Atari games like Breakout, Pong, and Space Invaders. You know what's common in most of these programs? A paradigm of Machine Learning known as **Reinforcement Learning**. For those of you that are new to RL, let's get some understand with few analogies. ## Reinforcement Learning Analogy Consider the scenario of teaching a dog new tricks. The dog doesn't understand our language, so we can't tell him what to do. Instead, we follow a different strategy. We emulate a situation (or a cue), and the dog tries to respond in many different ways. If the dog's response is the desired one, we reward them with snacks. Now guess what, the next time the dog is exposed to the same situation, the dog executes a similar action with even more enthusiasm in expectation of more food. That's like learning "what to do" from positive experiences. Similarly, dogs will tend to learn what not to do when face with negative experiences. That's exactly how Reinforcement Learning works in a broader sense: - Your dog is an "agent" that is exposed to the **environment**. The environment could in your house, with you. - The situations they encounter are analogous to a **state**. An example of a state could be your dog standing and you use a specific word in a certain tone in your living room - Our agents react by performing an **action** to transition from one "state" to another "state," your dog goes from standing to sitting, for example. - After the transition, they may receive a **reward** or **penalty** in return. You give them a treat! Or a "No" as a penalty. - The **policy** is the strategy of choosing an action given a state in expectation of better outcomes. Reinforcement Learning lies between the spectrum of Supervised Learning and Unsupervised Learning, and there's a few important things to note: 1. Being greedy doesn't always work There are things that are easy to do for instant gratification, and there's things that provide long term rewards The goal is to not be greedy by looking for the quick immediate rewards, but instead to optimize for maximum rewards over the whole training. 2. Sequence matters in Reinforcement Learning The reward agent does not just depend on the current state, but the entire history of states. Unlike supervised and unsupervised learning, time is important here. ### The Reinforcement Process In a way, Reinforcement Learning is the science of making optimal decisions using experiences. Breaking it down, the process of Reinforcement Learning involves these simple steps: 1. Observation of the environment 2. Deciding how to act using some strategy 3. Acting accordingly 4. Receiving a reward or penalty 5. Learning from the experiences and refining our strategy 6. Iterate until an optimal strategy is found Let's now understand Reinforcement Learning by actually developing an agent to learn to play a game automatically on its own. ## Example Design: Self-Driving Cab Let's design a simulation of a self-driving cab. The major goal is to demonstrate, in a simplified environment, how you can use RL techniques to develop an efficient and safe approach for tackling this problem. The Smartcab's job is to pick up the passenger at one location and drop them off in another. Here are a few things that we'd love our Smartcab to take care of: - Drop off the passenger to the right location. - Save passenger's time by taking minimum time possible to drop off - Take care of passenger's safety and traffic rules There are different aspects that need to be considered here while modeling an RL solution to this problem: rewards, states, and actions. ### 1. Rewards Since the agent (the imaginary driver) is reward-motivated and is going to learn how to control the cab by trial experiences in the environment, we need to decide the **rewards** and/or **penalties** and their magnitude accordingly. Here a few points to consider: - The agent should receive a high positive reward for a successful dropoff because this behavior is highly desired - The agent should be penalized if it tries to drop off a passenger in wrong locations - The agent should get a slight negative reward for not making it to the destination after every time-step. "Slight" negative because we would prefer our agent to reach late instead of making wrong moves trying to reach to the destination as fast as possible ### 2. State Space In Reinforcement Learning, the agent encounters a state, and then takes action according to the state it's in. The **State Space** is the set of all possible situations our taxi could inhabit. The state should contain useful information the agent needs to make the right action. Let's say we have a training area for our Smartcab where we are teaching it to transport people in a parking lot to four different locations (R, G, Y, B): ![](https://storage.googleapis.com/lds-media/images/Reinforcement_Learning_Taxi_Env.width-1200.png) Let's assume Smartcab is the only vehicle in this parking lot. We can break up the parking lot into a 5x5 grid, which gives us 25 possible taxi locations. These 25 locations are one part of our state space. Notice the current location state of our taxi is coordinate (3, 1). You'll also notice there are four (4) locations that we can pick up and drop off a passenger: R, G, Y, B or `[(0,0), (0,4), (4,0), (4,3)] ` in (row, col) coordinates. Our illustrated passenger is in location **Y** and they wish to go to location **R**. When we also account for one (1) additional passenger state of being inside the taxi, we can take all combinations of passenger locations and destination locations to come to a total number of states for our taxi environment; there's four (4) destinations and five (4 + 1) passenger locations. So, our taxi environment has $5 \times 5 \times 5 \times 4 = 500$ total possible states. ### 3. Action Space The agent encounters one of the 500 states and it takes an action. The action in our case can be to move in a direction or decide to pickup/dropoff a passenger. In other words, we have six possible actions: 1. `south` 2. `north` 3. `east` 4. `west` 5. `pickup` 6. `dropoff` This is the **action space**: the set of all the actions that our agent can take in a given state. You'll notice in the illustration above, that the taxi cannot perform certain actions in certain states due to walls. In environment's code, we will simply provide a -1 penalty for every wall hit and the taxi won't move anywhere. This will just rack up penalties causing the taxi to consider going around the wall. ## Implementation with Python Fortunately, [OpenAI Gym](https://gym.openai.com/) has this exact environment already built for us. Gym provides different game environments which we can plug into our code and test an agent. The library takes care of API for providing all the information that our agent would require, like possible actions, score, and current state. We just need to focus just on the algorithm part for our agent. We'll be using the Gym environment called `Taxi-V3`, which all of the details explained above were pulled from. The objectives, rewards, and actions are all the same. ### Gym's interface We need to install `gym` first. Executing the following in a Jupyter notebook should work: ``` !pip install cmake 'gym[atari]' scipy ``` Once installed, we can load the game environment and render what it looks like: ``` import gym env = gym.make("Taxi-v3").env env.render() ``` The core gym interface is `env`, which is the unified environment interface. The following are the `env` methods that would be quite helpful to us: - `env.reset`: Resets the environment and returns a random initial state. - `env.step(action)`: Step the environment by one timestep. Returns + **observation**: Observations of the environment + **reward**: If your action was beneficial or not + **done**: Indicates if we have successfully picked up and dropped off a passenger, also called one *episode* + **info**: Additional info such as performance and latency for debugging purposes - `env.render`: Renders one frame of the environment (helpful in visualizing the environment) Note: We are using the `.env` on the end of `make` to avoid training stopping at 200 iterations, which is the default for the new version of Gym ([reference](https://stackoverflow.com/a/42802225)). ### Reminder of our problem Here's our restructured problem statement (from Gym docs): > There are 4 locations (labeled by different letters), and our job is to pick up the passenger at one location and drop him off at another. We receive +20 points for a successful drop-off and lose 1 point for every time-step it takes. There is also a 10 point penalty for illegal pick-up and drop-off actions. Let's dive more into the environment. ``` env.reset() # reset environment to a new, random state env.render() print("Action Space {}".format(env.action_space)) print("State Space {}".format(env.observation_space)) ``` - The **filled square** represents the taxi, which is yellow without a passenger and green with a passenger. - The **pipe ("|")** represents a wall which the taxi cannot cross. - **R, G, Y, B** are the possible pickup and destination locations. The **blue letter** represents the current passenger pick-up location, and the **purple letter** is the current destination. As verified by the prints, we have an **Action Space** of size 6 and a **State Space** of size 500. As you'll see, our RL algorithm won't need any more information than these two things. All we need is a way to identify a state uniquely by assigning a unique number to every possible state, and RL learns to choose an action number from 0-5 where: - 0 = south - 1 = north - 2 = east - 3 = west - 4 = pickup - 5 = dropoff Recall that the 500 states correspond to a encoding of the taxi's location, the passenger's location, and the destination location. Reinforcement Learning will learn a mapping of **states** to the optimal **action** to perform in that state by *exploration*, i.e. the agent explores the environment and takes actions based off rewards defined in the environment. The optimal action for each state is the action that has the **highest cumulative long-term reward**. #### Back to our illustration We can actually take our illustration above, encode its state, and give it to the environment to render in Gym. Recall that we have the taxi at row 3, column 1, our passenger is at location 2, and our destination is location 0. Using the Taxi-v2 state encoding method, we can do the following: ``` state = env.encode(3, 1, 2, 0) # (taxi row, taxi column, passenger index, destination index) print("State:", state) env.s = state env.render() ``` We are using our illustration's coordinates to generate a number corresponding to a state between 0 and 499, which turns out to be **328** for our illustration's state. Then we can set the environment's state manually with `env.env.s` using that encoded number. You can play around with the numbers and you'll see the taxi, passenger, and destination move around. #### The Reward Table When the Taxi environment is created, there is an initial Reward table that's also created, called `P`. We can think of it like a matrix that has the number of states as rows and number of actions as columns, i.e. a $states \ \times \ actions$ matrix. Since every state is in this matrix, we can see the default reward values assigned to our illustration's state: ``` for I in range(10,20): print(env.P[I],'\n') ``` This dictionary has the structure `{action: [(probability, nextstate, reward, done)]}`. A few things to note: - The 0-5 corresponds to the actions (south, north, east, west, pickup, dropoff) the taxi can perform at our current state in the illustration. - In this env, `probability` is always 1.0. - The `nextstate` is the state we would be in if we take the action at this index of the dict - All the movement actions have a -1 reward and the pickup/dropoff actions have -10 reward in this particular state. If we are in a state where the taxi has a passenger and is on top of the right destination, we would see a reward of 20 at the dropoff action (5) - `done` is used to tell us when we have successfully dropped off a passenger in the right location. Each successfull dropoff is the end of an **episode** Note that if our agent chose to explore action two (2) in this state it would be going East into a wall. The source code has made it impossible to actually move the taxi across a wall, so if the taxi chooses that action, it will just keep acruing -1 penalties, which affects the **long-term reward**. ### Solving the environment without Reinforcement Learning Let's see what would happen if we try to brute-force our way to solving the problem without RL. Since we have our `P` table for default rewards in each state, we can try to have our taxi navigate just using that. We'll create an infinite loop which runs until one passenger reaches one destination (one **episode**), or in other words, when the received reward is 20. The `env.action_space.sample()` method automatically selects one random action from set of all possible actions. Let's see what happens: ``` env.s = 328 # set environment to illustration's state epochs = 0 penalties, reward = 0, 0 frames = [] # for animation done = False while not done: action = env.action_space.sample() state, reward, done, info = env.step(action) if reward == -10: penalties += 1 # Put each rendered frame into dict for animation frames.append({ 'frame': env.render(mode='ansi'), 'state': state, 'action': action, 'reward': reward } ) epochs += 1 print("Timesteps taken: {}".format(epochs)) print("Penalties incurred: {}".format(penalties)) from IPython.display import clear_output from time import sleep def print_frames(frames): for i, frame in enumerate(frames): clear_output(wait=True) #print(frame['frame'].getvalue()) print(f"Timestep: {i + 1}") print(f"State: {frame['state']}") print(f"Action: {frame['action']}") print(f"Reward: {frame['reward']}") sleep(.1) print_frames(frames) ``` Not good. Our agent takes thousands of timesteps and makes lots of wrong drop offs to deliver just one passenger to the right destination. This is because we aren't *learning* from past experience. We can run this over and over, and it will never optimize. The agent has no memory of which action was best for each state, which is exactly what Reinforcement Learning will do for us. ### Enter Reinforcement Learning We are going to use a simple RL algorithm called *Q-learning* which will give our agent some memory. #### Intro to Q-learning Essentially, Q-learning lets the agent use the environment's rewards to learn, over time, the best action to take in a given state. In our Taxi environment, we have the reward table, `P`, that the agent will learn from. It does thing by looking receiving a reward for taking an action in the current state, then updating a *Q-value* to remember if that action was beneficial. The values store in the Q-table are called a *Q-values*, and they map to a `(state, action)` combination. A Q-value for a particular state-action combination is representative of the "quality" of an action taken from that state. Better Q-values imply better chances of getting greater rewards. For example, if the taxi is faced with a state that includes a passenger at its current location, it is highly likely that the Q-value for `pickup` is higher when compared to other actions, like `dropoff` or `north`. Q-values are initialized to an arbitrary value, and as the agent exposes itself to the environment and receives different rewards by executing different actions, the Q-values are updated using the equation: $$\Large Q({\small state}, {\small action}) \leftarrow (1 - \alpha) Q({\small state}, {\small action}) + \alpha \Big({\small reward} + \gamma \max_{a} Q({\small next \ state}, {\small all \ actions})\Big)$$ Where: - $\Large \alpha$ (alpha) is the learning rate ($0 < \alpha \leq 1$) - Just like in supervised learning settings, $\alpha$ is the extent to which our Q-values are being updated in every iteration. - $\Large \gamma$ (gamma) is the discount factor ($0 \leq \gamma \leq 1$) - determines how much importance we want to give to future rewards. A high value for the discount factor (close to **1**) captures the long-term effective award, whereas, a discount factor of **0** makes our agent consider only immediate reward, hence making it greedy. **What is this saying?** We are assigning ($\leftarrow$), or updating, the Q-value of the agent's current *state* and *action* by first taking a weight ($1-\alpha$) of the old Q-value, then adding the learned value. The learned value is a combination of the reward for taking the current action in the current state, and the discounted maximum reward from the next state we will be in once we take the current action. Basically, we are learning the proper action to take in the current state by looking at the reward for the current state/action combo, and the max rewards for the next state. This will eventually cause our taxi to consider the route with the best rewards strung together. The Q-value of a state-action pair is the sum of the instant reward and the discounted future reward (of the resulting state). The way we store the Q-values for each state and action is through a **Q-table** ##### Q-Table The Q-table is a matrix where we have a row for every state (500) and a column for every action (6). It's first initialized to 0, and then values are updated after training. Note that the Q-table has the same dimensions as the reward table, but it has a completely different purpose. <img src="assets/q-matrix-initialized-to-learned.png" width=500px> #### Summing up the Q-Learning Process Breaking it down into steps, we get - Initialize the Q-table by all zeros. - Start exploring actions: For each state, select any one among all possible actions for the current state (S). - Travel to the next state (S') as a result of that action (a). - For all possible actions from the state (S') select the one with the highest Q-value. - Update Q-table values using the equation. - Set the next state as the current state. - If goal state is reached, then end and repeat the process. ##### Exploiting learned values After enough random exploration of actions, the Q-values tend to converge serving our agent as an action-value function which it can exploit to pick the most optimal action from a given state. There's a tradeoff between exploration (choosing a random action) and exploitation (choosing actions based on already learned Q-values). We want to prevent the action from always taking the same route, and possibly overfitting, so we'll be introducing another parameter called $\Large \epsilon$ "epsilon" to cater to this during training. Instead of just selecting the best learned Q-value action, we'll sometimes favor exploring the action space further. Lower epsilon value results in episodes with more penalties (on average) which is obvious because we are exploring and making random decisions. ### Implementing Q-learning in python #### Training the Agent First, we'll initialize the Q-table to a $500 \times 6$ matrix of zeros: ``` import numpy as np q_table = np.zeros([env.observation_space.n, env.action_space.n]) q_table ``` We can now create the training algorithm that will update this Q-table as the agent explores the environment over thousands of episodes. In the first part of `while not done`, we decide whether to pick a random action or to exploit the already computed Q-values. This is done simply by using the `epsilon` value and comparing it to the `random.uniform(0, 1)` function, which returns an arbitrary number between 0 and 1. We execute the chosen action in the environment to obtain the `next_state` and the `reward` from performing the action. After that, we calculate the maximum Q-value for the actions corresponding to the `next_state`, and with that, we can easily update our Q-value to the `new_q_value`: ``` %%time """Training the agent""" import random from IPython.display import clear_output import matplotlib.pyplot as plt import seaborn as sns from time import sleep %matplotlib inline # Hyperparameters alpha = 0.1 gamma = 0.6 epsilon = 0.1 # For plotting metrics all_epochs = [] all_penalties = [] for i in range(1, 1000): state = env.reset() epochs, penalties, reward, = 0, 0, 0 done = False while not done: if random.uniform(0, 1) < epsilon: action = env.action_space.sample() # Explore action space Sub sample else: action = np.argmax(q_table[state]) # Values Funcation next_state, reward, done, info = env.step(action) old_value = q_table[state, action] # Q-values Funcation next_max = np.max(q_table[next_state]) new_value = (1 - alpha) * old_value + alpha * (reward + gamma * next_max) q_table[state, action] = new_value if reward == -10: penalties += 1 state = next_state epochs += 1 if i % 100 == 0: clear_output(wait=True) print(f"Episode: {i}") print("Training finished.\n") ``` Now that the Q-table has been established over 100,000 episodes, let's see what the Q-values are at our illustration's state: ``` q_table[328] ``` The max Q-value is "north" (-1.971), so it looks like Q-learning has effectively learned the best action to take in our illustration's state! ### Evaluating the agent Let's evaluate the performance of our agent. We don't need to explore actions any further, so now the next action is always selected using the best Q-value: ``` """Evaluate agent's performance after Q-learning""" total_epochs, total_penalties = 0, 0 episodes = 100 for _ in range(episodes): state = env.reset() epochs, penalties, reward = 0, 0, 0 done = False while not done: action = np.argmax(q_table[state]) # Values Funcation state, reward, done, info = env.step(action) if reward == -10: penalties += 1 epochs += 1 total_penalties += penalties total_epochs += epochs print(f"Results after {episodes} episodes:") print(f"Average timesteps per episode: {total_epochs / episodes}") print(f"Average penalties per episode: {total_penalties / episodes}") ``` We can see from the evaluation, the agent's performance improved significantly and it incurred no penalties, which means it performed the correct pickup/dropoff actions with 100 different passengers. #### Comparing our Q-learning agent to no Reinforcement Learning With Q-learning agent commits errors initially during exploration but once it has explored enough (seen most of the states), it can act wisely maximizing the rewards making smart moves. Let's see how much better our Q-learning solution is when compared to the agent making just random moves. We evaluate our agents according to the following metrics, - **Average number of penalties per episode:** The smaller the number, the better the performance of our agent. Ideally, we would like this metric to be zero or very close to zero. - **Average number of timesteps per trip:** We want a small number of timesteps per episode as well since we want our agent to take minimum steps(i.e. the shortest path) to reach the destination. - **Average rewards per move:** The larger the reward means the agent is doing the right thing. That's why deciding rewards is a crucial part of Reinforcement Learning. In our case, as both timesteps and penalties are negatively rewarded, a higher average reward would mean that the agent reaches the destination as fast as possible with the least penalties" | Measure | Random agent's performance | Q-learning agent's performance | |----------------------------------------- |-------------------------- |-------------------------------- | | Average rewards per move | -3.9012092102214075 | 0.6962843295638126 | | Average number of penalties per episode | 920.45 | 0.0 | | Average number of timesteps per trip | 2848.14 | 12.38 | | These metrics were computed over 100 episodes. And as the results show, our Q-learning agent nailed it! #### Hyperparameters and optimizations The values of `alpha`, `gamma`, and `epsilon` were mostly based on intuition and some "hit and trial", but there are better ways to come up with good values. Ideally, all three should decrease over time because as the agent continues to learn, it actually builds up more resilient priors; - $\Large \alpha$: (the learning rate) should decrease as you continue to gain a larger and larger knowledge base. - $\Large \gamma$: as you get closer and closer to the deadline, your preference for near-term reward should increase, as you won't be around long enough to get the long-term reward, which means your gamma should decrease. - $\Large \epsilon$: as we develop our strategy, we have less need of exploration and more exploitation to get more utility from our policy, so as trials increase, epsilon should decrease. #### Tuning the hyperparameters A simple way to programmatically come up with the best set of values of the hyperparameter is to create a comprehensive search function (similar to [grid search](https://en.wikipedia.org/wiki/Hyperparameter_optimization#Grid_search)) that selects the parameters that would result in best `reward/time_steps` ratio. The reason for `reward/time_steps` is that we want to choose parameters which enable us to get the maximum reward as fast as possible. We may want to track the number of penalties corresponding to the hyperparameter value combination as well because this can also be a deciding factor (we don't want our smart agent to violate rules at the cost of reaching faster). A more fancy way to get the right combination of hyperparameter values would be to use Genetic Algorithms. ## Conclusion and What's Ahead Alright! We began with understanding Reinforcement Learning with the help of real-world analogies. We then dived into the basics of Reinforcement Learning and framed a Self-driving cab as a Reinforcement Learning problem. We then used OpenAI's Gym in python to provide us with a related environment, where we can develop our agent and evaluate it. Then we observed how terrible our agent was without using any algorithm to play the game, so we went ahead to implement the Q-learning algorithm from scratch. The agent's performance improved significantly after Q-learning. Finally, we discussed better approaches for deciding the hyperparameters for our algorithm. Q-learning is one of the easiest Reinforcement Learning algorithms. The problem with Q-earning however is, once the number of states in the environment are very high, it becomes difficult to implement them with Q table as the size would become very, very large. State of the art techniques uses Deep neural networks instead of the Q-table (Deep Reinforcement Learning). The neural network takes in state information and actions to the input layer and learns to output the right action over the time. Deep learning techniques (like Convolutional Neural Networks) are also used to interpret the pixels on the screen and extract information out of the game (like scores), and then letting the agent control the game. We have discussed a lot about Reinforcement Learning and games. But Reinforcement learning is not just limited to games. It is used for managing stock portfolios and finances, for making humanoid robots, for manufacturing and inventory management, to develop general AI agents, which are agents that can perform multiple things with a single algorithm, like the same agent playing multiple Atari games. Open AI also has a platform called universe for measuring and training an AI's general intelligence across myriads of games, websites and other general applications.
github_jupyter
``` from google.colab import drive drive.mount('/content/drive') path = '/content/drive/MyDrive/Research/AAAI/complexity/50_200/' import numpy as np import pandas as pd import torch import torchvision from torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from matplotlib import pyplot as plt %matplotlib inline torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False ``` # Generate dataset ``` mu1 = np.array([3,3,3,3,0]) sigma1 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) mu2 = np.array([4,4,4,4,0]) sigma2 = np.array([[16,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) mu3 = np.array([10,5,5,10,0]) sigma3 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) mu4 = np.array([-10,-10,-10,-10,0]) sigma4 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) mu5 = np.array([-21,4,4,-21,0]) sigma5 = np.array([[16,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) mu6 = np.array([-10,18,18,-10,0]) sigma6 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) mu7 = np.array([4,20,4,20,0]) sigma7 = np.array([[16,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) mu8 = np.array([4,-20,-20,4,0]) sigma8 = np.array([[16,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) mu9 = np.array([20,20,20,20,0]) sigma9 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) mu10 = np.array([20,-10,-10,20,0]) sigma10 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) np.random.seed(12) sample1 = np.random.multivariate_normal(mean=mu1,cov= sigma1,size=500) sample2 = np.random.multivariate_normal(mean=mu2,cov= sigma2,size=500) sample3 = np.random.multivariate_normal(mean=mu3,cov= sigma3,size=500) sample4 = np.random.multivariate_normal(mean=mu4,cov= sigma4,size=500) sample5 = np.random.multivariate_normal(mean=mu5,cov= sigma5,size=500) sample6 = np.random.multivariate_normal(mean=mu6,cov= sigma6,size=500) sample7 = np.random.multivariate_normal(mean=mu7,cov= sigma7,size=500) sample8 = np.random.multivariate_normal(mean=mu8,cov= sigma8,size=500) sample9 = np.random.multivariate_normal(mean=mu9,cov= sigma9,size=500) sample10 = np.random.multivariate_normal(mean=mu10,cov= sigma10,size=500) X = np.concatenate((sample1,sample2,sample3,sample4,sample5,sample6,sample7,sample8,sample9,sample10),axis=0) Y = np.concatenate((np.zeros((500,1)),np.ones((500,1)),2*np.ones((500,1)),3*np.ones((500,1)),4*np.ones((500,1)), 5*np.ones((500,1)),6*np.ones((500,1)),7*np.ones((500,1)),8*np.ones((500,1)),9*np.ones((500,1))),axis=0).astype(int) print(X[0], Y[0]) print(X[500], Y[500]) class SyntheticDataset(Dataset): """MosaicDataset dataset.""" def __init__(self, x, y): """ Args: x: list of instance y: list of instance label """ self.x = x self.y = y #self.fore_idx = fore_idx def __len__(self): return len(self.y) def __getitem__(self, idx): return self.x[idx] , self.y[idx] #, self.fore_idx[idx] trainset = SyntheticDataset(X,Y) classes = ('zero','one','two','three','four','five','six','seven','eight','nine') foreground_classes = {'zero','one','two'} fg_used = '012' fg1, fg2, fg3 = 0,1,2 all_classes = {'zero','one','two','three','four','five','six','seven','eight','nine'} background_classes = all_classes - foreground_classes print("background classes ",background_classes) trainloader = torch.utils.data.DataLoader(trainset, batch_size=100, shuffle=False) dataiter = iter(trainloader) background_data=[] background_label=[] foreground_data=[] foreground_label=[] batch_size=100 for i in range(50): images, labels = dataiter.next() for j in range(batch_size): if(classes[labels[j]] in background_classes): img = images[j].tolist() background_data.append(img) background_label.append(labels[j]) else: img = images[j].tolist() foreground_data.append(img) foreground_label.append(labels[j]) foreground_data = torch.tensor(foreground_data) foreground_label = torch.tensor(foreground_label) background_data = torch.tensor(background_data) background_label = torch.tensor(background_label) print(foreground_data[0], foreground_label[0] ) def create_mosaic_img(bg_idx,fg_idx,fg): """ bg_idx : list of indexes of background_data[] to be used as background images in mosaic fg_idx : index of image to be used as foreground image from foreground data fg : at what position/index foreground image has to be stored out of 0-8 """ image_list=[] j=0 for i in range(9): if i != fg: image_list.append(background_data[bg_idx[j]]) j+=1 else: image_list.append(foreground_data[fg_idx]) label = foreground_label[fg_idx] - fg1 # minus fg1 because our fore ground classes are fg1,fg2,fg3 but we have to store it as 0,1,2 #image_list = np.concatenate(image_list ,axis=0) image_list = torch.stack(image_list) return image_list,label desired_num = 6000 mosaic_list_of_images =[] # list of mosaic images, each mosaic image is saved as list of 9 images fore_idx =[] # list of indexes at which foreground image is present in a mosaic image i.e from 0 to 9 mosaic_label=[] # label of mosaic image = foreground class present in that mosaic list_set_labels = [] for i in range(desired_num): set_idx = set() np.random.seed(i) bg_idx = np.random.randint(0,3500,8) set_idx = set(background_label[bg_idx].tolist()) fg_idx = np.random.randint(0,1500) set_idx.add(foreground_label[fg_idx].item()) fg = np.random.randint(0,9) fore_idx.append(fg) image_list,label = create_mosaic_img(bg_idx,fg_idx,fg) mosaic_list_of_images.append(image_list) mosaic_label.append(label) list_set_labels.append(set_idx) len(mosaic_list_of_images), mosaic_list_of_images[0] ``` # load mosaic data ``` class MosaicDataset(Dataset): """MosaicDataset dataset.""" def __init__(self, mosaic_list, mosaic_label,fore_idx): """ Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.mosaic = mosaic_list self.label = mosaic_label self.fore_idx = fore_idx def __len__(self): return len(self.label) def __getitem__(self, idx): return self.mosaic[idx] , self.label[idx] , self.fore_idx[idx] batch = 250 msd1 = MosaicDataset(mosaic_list_of_images[0:3000], mosaic_label[0:3000] , fore_idx[0:3000]) train_loader = DataLoader( msd1 ,batch_size= batch ,shuffle=True) batch = 250 msd2 = MosaicDataset(mosaic_list_of_images[3000:6000], mosaic_label[3000:6000] , fore_idx[3000:6000]) test_loader = DataLoader( msd2 ,batch_size= batch ,shuffle=True) ``` # models ``` class Focus_deep(nn.Module): ''' deep focus network averaged at zeroth layer with input-50-output architecture input : elemental data ''' def __init__(self,inputs,output,K,d): super(Focus_deep,self).__init__() self.inputs = inputs self.output = output self.K = K self.d = d self.linear1 = nn.Linear(self.inputs,50, bias=False) #,self.output) self.linear2 = nn.Linear(50,self.output, bias=False) torch.nn.init.xavier_normal_(self.linear1.weight) torch.nn.init.xavier_normal_(self.linear2.weight) def forward(self,z): batch = z.shape[0] x = torch.zeros([batch,self.K],dtype=torch.float64) y = torch.zeros([batch,self.d], dtype=torch.float64) x,y = x.to("cuda"),y.to("cuda") for i in range(self.K): x[:,i] = self.helper(z[:,i] )[:,0] # self.d*i:self.d*i+self.d log_x = F.log_softmax(x,dim=1) x = F.softmax(x,dim=1) # alphas x1 = x[:,0] for i in range(self.K): x1 = x[:,i] y = y+torch.mul(x1[:,None],z[:,i]) # self.d*i:self.d*i+self.d return y , x , log_x def helper(self,x): x = F.relu(self.linear1(x)) x = self.linear2(x) return x class Classification_deep(nn.Module): ''' input : elemental data deep classification module data averaged at zeroth layer with input-50-output architecture ''' def __init__(self,inputs,output): super(Classification_deep,self).__init__() self.inputs = inputs self.output = output self.linear1 = nn.Linear(self.inputs,200) self.linear2 = nn.Linear(200,self.output) torch.nn.init.xavier_normal_(self.linear1.weight) torch.nn.init.zeros_(self.linear1.bias) torch.nn.init.xavier_normal_(self.linear2.weight) torch.nn.init.zeros_(self.linear2.bias) def forward(self,x): x = F.relu(self.linear1(x)) x = self.linear2(x) return x torch.manual_seed(12) focus_net = Focus_deep(2,1,9,2).double() focus_net = focus_net.to("cuda") focus_net.linear1.weight.shape,focus_net.linear2.weight.shape focus_net.linear1.weight.data[25:,:] = focus_net.linear1.weight.data[:25,:] #torch.nn.Parameter(torch.tensor([last_layer]) ) (focus_net.linear1.weight[:25,:]== focus_net.linear1.weight[25:,:] ) focus_net.linear2.weight.data[:,25:] = -focus_net.linear2.weight.data[:,:25] #torch.nn.Parameter(torch.tensor([last_layer]) ) focus_net.linear2.weight focus_net.helper( torch.randn((1,5,2)).double().to("cuda") ) criterion = nn.CrossEntropyLoss() def my_cross_entropy(x, y,alpha,log_alpha,k): # log_prob = -1.0 * F.log_softmax(x, 1) # loss = log_prob.gather(1, y.unsqueeze(1)) # loss = loss.mean() loss = criterion(x,y) #alpha = torch.clamp(alpha,min=1e-10) b = -1.0* alpha * log_alpha b = torch.mean(torch.sum(b,dim=1)) closs = loss entropy = b loss = (1-k)*loss + ((k)*b) return loss,closs,entropy def calculate_attn_loss(dataloader,what,where,criter,k): what.eval() where.eval() r_loss = 0 cc_loss = 0 cc_entropy = 0 alphas = [] lbls = [] pred = [] fidices = [] with torch.no_grad(): for i, data in enumerate(dataloader, 0): inputs, labels,fidx = data lbls.append(labels) fidices.append(fidx) inputs = inputs.double() inputs, labels = inputs.to("cuda"),labels.to("cuda") avg,alpha,log_alpha = where(inputs) outputs = what(avg) _, predicted = torch.max(outputs.data, 1) pred.append(predicted.cpu().numpy()) alphas.append(alpha.cpu().numpy()) #ent = np.sum(entropy(alpha.cpu().detach().numpy(), base=2, axis=1))/batch # mx,_ = torch.max(alpha,1) # entropy = np.mean(-np.log2(mx.cpu().detach().numpy())) # print("entropy of batch", entropy) #loss = (1-k)*criter(outputs, labels) + k*ent loss,closs,entropy = my_cross_entropy(outputs,labels,alpha,log_alpha,k) r_loss += loss.item() cc_loss += closs.item() cc_entropy += entropy.item() alphas = np.concatenate(alphas,axis=0) pred = np.concatenate(pred,axis=0) lbls = np.concatenate(lbls,axis=0) fidices = np.concatenate(fidices,axis=0) #print(alphas.shape,pred.shape,lbls.shape,fidices.shape) analysis = analyse_data(alphas,lbls,pred,fidices) return r_loss/i,cc_loss/i,cc_entropy/i,analysis def analyse_data(alphas,lbls,predicted,f_idx): ''' analysis data is created here ''' batch = len(predicted) amth,alth,ftpt,ffpt,ftpf,ffpf = 0,0,0,0,0,0 for j in range (batch): focus = np.argmax(alphas[j]) if(alphas[j][focus] >= 0.5): amth +=1 else: alth +=1 if(focus == f_idx[j] and predicted[j] == lbls[j]): ftpt += 1 elif(focus != f_idx[j] and predicted[j] == lbls[j]): ffpt +=1 elif(focus == f_idx[j] and predicted[j] != lbls[j]): ftpf +=1 elif(focus != f_idx[j] and predicted[j] != lbls[j]): ffpf +=1 #print(sum(predicted==lbls),ftpt+ffpt) return [ftpt,ffpt,ftpf,ffpf,amth,alth] ``` # training ``` number_runs = 10 full_analysis =[] FTPT_analysis = pd.DataFrame(columns = ["FTPT","FFPT", "FTPF","FFPF"]) k = 0 for n in range(number_runs): print("--"*40) # instantiate focus and classification Model torch.manual_seed(n) where = Focus_deep(5,1,9,5).double() where.linear1.weight.data[25:,:] = where.linear1.weight.data[:25,:] where.linear2.weight.data[:,25:] = -where.linear2.weight.data[:,:25] where = where.double().to("cuda") print(where.helper( torch.randn((1,5,9)).double().to("cuda"))) what = Classification_deep(5,3).double() where = where.to("cuda") what = what.to("cuda") # instantiate optimizer optimizer_where = optim.Adam(where.parameters(),lr =0.001) optimizer_what = optim.Adam(what.parameters(), lr=0.001) #criterion = nn.CrossEntropyLoss() acti = [] analysis_data = [] loss_curi = [] epochs = 2000 # calculate zeroth epoch loss and FTPT values running_loss ,_,_,anlys_data= calculate_attn_loss(train_loader,what,where,criterion,k) loss_curi.append(running_loss) analysis_data.append(anlys_data) print('epoch: [%d ] loss: %.3f' %(0,running_loss)) # training starts for epoch in range(epochs): # loop over the dataset multiple times ep_lossi = [] running_loss = 0.0 what.train() where.train() for i, data in enumerate(train_loader, 0): # get the inputs inputs, labels,_ = data inputs = inputs.double() inputs, labels = inputs.to("cuda"),labels.to("cuda") # zero the parameter gradients optimizer_where.zero_grad() optimizer_what.zero_grad() # forward + backward + optimize avg, alpha,log_alpha = where(inputs) outputs = what(avg) my_loss,_,_ = my_cross_entropy(outputs,labels,alpha,log_alpha,k) # print statistics running_loss += my_loss.item() my_loss.backward() optimizer_where.step() optimizer_what.step() #break running_loss,ccloss,ccentropy,anls_data = calculate_attn_loss(train_loader,what,where,criterion,k) analysis_data.append(anls_data) if(epoch % 200==0): print('epoch: [%d] loss: %.3f celoss: %.3f entropy: %.3f' %(epoch + 1,running_loss,ccloss,ccentropy)) loss_curi.append(running_loss) #loss per epoch if running_loss<=0.01: print('breaking in epoch: ', epoch) break print('Finished Training run ' +str(n)) #break analysis_data = np.array(analysis_data) FTPT_analysis.loc[n] = analysis_data[-1,:4]/30 full_analysis.append((epoch, analysis_data)) correct = 0 total = 0 with torch.no_grad(): for data in test_loader: images, labels,_ = data images = images.double() images, labels = images.to("cuda"), labels.to("cuda") avg, alpha,log_alpha = where(images) outputs = what(avg) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 3000 test images: %f %%' % ( 100 * correct / total)) print(np.mean(np.array(FTPT_analysis),axis=0)) FTPT_analysis FTPT_analysis[FTPT_analysis['FTPT']+FTPT_analysis['FFPT'] > 90 ] print(np.mean(np.array(FTPT_analysis[FTPT_analysis['FTPT']+FTPT_analysis['FFPT'] > 90 ]),axis=0)) cnt=1 for epoch, analysis_data in full_analysis: analysis_data = np.array(analysis_data) # print("="*20+"run ",cnt,"="*20) plt.figure(figsize=(6,5)) plt.plot(np.arange(0,epoch+2,1),analysis_data[:,0]/30,label="FTPT") plt.plot(np.arange(0,epoch+2,1),analysis_data[:,1]/30,label="FFPT") plt.plot(np.arange(0,epoch+2,1),analysis_data[:,2]/30,label="FTPF") plt.plot(np.arange(0,epoch+2,1),analysis_data[:,3]/30,label="FFPF") plt.title("Training trends for run "+str(cnt)) plt.grid() # plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.legend() plt.xlabel("epochs", fontsize=14, fontweight = 'bold') plt.ylabel("percentage train data", fontsize=14, fontweight = 'bold') plt.savefig(path + "run"+str(cnt)+".png",bbox_inches="tight") plt.savefig(path + "run"+str(cnt)+".pdf",bbox_inches="tight") cnt+=1 FTPT_analysis.to_csv(path+"synthetic_zeroth.csv",index=False) ```
github_jupyter
<a href="https://colab.research.google.com/github/MonitSharma/Learn-Quantum-Computing/blob/main/Circuit_Basics.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` !pip install qiskit ``` # Qiskit Basics ``` import numpy as np from qiskit import QuantumCircuit # building a circuit qc = QuantumCircuit(3) # adding gates qc.h(0) qc.cx(0,1) qc.cx(0,2) qc.draw('mpl') ``` ## Simulating the Circuits ``` from qiskit.quantum_info import Statevector # setting the initial state to 0 state = Statevector.from_int(0,2**3) state = state.evolve(qc) state.draw('latex') from qiskit.quantum_info import Statevector # setting the initial state to 1 state = Statevector.from_int(1,2**3) state = state.evolve(qc) state.draw('latex') ``` Below we use the visualization function to plot the bloch sphere and a hinton representing the real and the imaginary components of the state density matrix $\rho$ ``` state.draw('qsphere') state.draw('hinton') ``` ## Unitary Representation of a Circuit The quant_info module of qiskit has an operator method that can be used to make unitary operator for the circuit. ``` from qiskit.quantum_info import Operator U = Operator(qc) U.data ``` ## Open QASM backend The simulators above are useful, as they help us in providing information about the state output and matrix representation of the circuit. Here we would learn about more simulators that will help us in measuring the circuit ``` qc2 = QuantumCircuit(3,3) qc2.barrier(range(3)) # do the measurement qc2.measure(range(3), range(3)) qc2.draw('mpl') # now, if we want to add both the qc and qc2 circuit circ = qc2.compose(qc, range(3), front = True) circ.draw('mpl') ``` This circuit adds a classical register , and three measurement that are used to map the outcome of qubits to the classical bits. To simulate this circuit we use the 'qasm_simulator' in Qiskit Aer. Each single run will yield a bit string $000$ or $111$. To build up the statistics about the distribution , we need to repeat the circuit many times. The number of times the circuit is repeated is specified in the 'execute' function via the 'shots' keyword. ``` from qiskit import transpile # import the qasm simulator from qiskit.providers.aer import QasmSimulator backend = QasmSimulator() # first transpile the quantum circuit to low level QASM instructions qc_compiled = transpile(circ, backend) # execute the circuit job_sim = backend.run(qc_compiled, shots=1024) # get the result result_sim = job_sim.result() ``` Since, the code has run, we can count the number of specific ouputs it recieved and plot it too. ``` counts = result_sim.get_counts(qc_compiled) print(counts) from qiskit.visualization import plot_histogram plot_histogram(counts) ```
github_jupyter
This script performs analyses to check how many mice pass the currenty set criterion for ephys. ``` import datajoint as dj dj.config['database.host'] = 'datajoint.internationalbrainlab.org' from ibl_pipeline import subject, acquisition, action, behavior, reference, data from ibl_pipeline.analyses.behavior import PsychResults, SessionTrainingStatus from ibl_pipeline.utils import psychofit as psy from ibl_pipeline.analyses import behavior as behavior_analysis import numpy as np import matplotlib.pyplot as plt import pandas as pd # Get list of subjects associated to the repeated site probe trajectory from ONE (original snippet from Gaelle Chapuis) from oneibl.one import ONE one = ONE() traj = one.alyx.rest('trajectories', 'list', provenance='Planned', x=-2243, y=-2000, # repeated site coordinate project='ibl_neuropixel_brainwide_01') sess = [p['session'] for p in traj] first_pass_map_repeated = [(s['subject'],s['start_time'][0:10]) for s in sess] # Download all ephys sessions from DataJoint sess_ephys = (acquisition.Session * subject.Subject * behavior_analysis.SessionTrainingStatus ) & 'task_protocol LIKE "%ephys%"' # & 'task_protocol LIKE "%biased%"' & 'session_start_time < "2019-09-30"') df = pd.DataFrame(sess_ephys) ``` The following code computes how many `ephys` sessions are considered `good_enough_for_brainwide_map`: - across *all* ephys sessions; - across the ephys sessions in the first-pass map for the repeated site. ``` session_dates = df['session_start_time'].apply(lambda x : x.strftime("%Y-%m-%d")) # First, count all mice total = len(df.index) good_enough = np.sum(df['good_enough_for_brainwide_map']) prc = good_enough / total * 100 print('Total # of ephys sessions: '+ str(total)) print('Total # of sessions good_enough_for_brainwide_map: ' + str(good_enough) + ' (' + "{:.1f}".format(prc) + ' %)') # Now, consider only mice in the first pass map, repeated site count = 0 for (mouse_name,session_date) in first_pass_map_repeated: tmp = df[(df['subject_nickname'] == mouse_name) & (session_dates == session_date)] count = count + np.sum(tmp['good_enough_for_brainwide_map']) total = len(first_pass_map_repeated) good_enough = count prc = good_enough / total * 100 print('Total # of ephys sessions in first pass map, repeated site: '+ str(total)) print('Total # of sessions good_enough_for_brainwide_map in first pass map, repeated site: ' + str(good_enough) + ' (' + "{:.1f}".format(prc) + ' %)') ``` The following code computes how many sessions are required for a mouse to reach certain levels of training or protocols, in particular: - from `trained` status to `biased` protocol - from `biased` protocol to `ready4ephys` status ``` mice_list = set(df['subject_nickname']) trained2biased = [] biased2ready4ephys = [] for mouse_name in mice_list: subj_string = 'subject_nickname LIKE "' + mouse_name + '"' sess_mouse = (acquisition.Session * subject.Subject * behavior_analysis.SessionTrainingStatus ) & subj_string df1 = pd.DataFrame(sess_mouse) # Find first session of training trained_start = np.argmax(df1['training_status'].apply(lambda x: 'trained' in x)) if 'trained' not in df1['training_status'][trained_start]: trained_start = None # Find first session of biased protocol biased_start = np.argmax(df1['task_protocol'].apply(lambda x: 'biased' in x)) if 'biased' not in df1['task_protocol'][biased_start]: biased_start = None # Find first session of ephys ready4ephys_start = np.argmax(df1['training_status'].apply(lambda x: 'ready4ephys' in x)) if 'ready4ephys' not in df1['training_status'][ready4ephys_start]: ready4ephys_start = None if ready4ephys_start != None: trained2biased.append(biased_start - trained_start) biased2ready4ephys.append(ready4ephys_start - biased_start) trained2biased = np.array(trained2biased) biased2ready4ephys = np.array(biased2ready4ephys) flag = trained2biased > 0 print('# Mice: ' + str(np.sum(flag))) print('# Sessions from "trained" to "biased": ' + "{:.2f}".format(np.mean(trained2biased[flag])) + ' +/- '+ "{:.2f}".format(np.std(trained2biased[flag]))) print('# Sessions from "biased" to "ready4ephys": ' + "{:.2f}".format(np.mean(biased2ready4ephys[flag])) + ' +/- '+ "{:.2f}".format(np.std(biased2ready4ephys[flag]))) ```
github_jupyter
# Pipeline Analysis for CSM Model - Plot Heatmaps of the model results using Z-normalization - CEZ/OEZ Pooled Patient Analysis - CEZ/OEZ IRR Metric ``` import os import sys import collections import pandas as pd import numpy as np import warnings warnings.filterwarnings("ignore") import scipy.stats from sklearn.metrics import roc_curve, auc, precision_recall_curve, \ average_precision_score, confusion_matrix, accuracy_score from pprint import pprint import copy from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from scipy.cluster.hierarchy import linkage from scipy.cluster.hierarchy import dendrogram sys.path.append("../../") %matplotlib inline import matplotlib as mp import matplotlib.pyplot as plt import seaborn as sns import dabest from eztrack.edm.classifiers.evaluate.dataset import Dataset, Patient from eztrack.edm.classifiers.evaluate.pipeline import EvaluationFramework from eztrack.edm.classifiers.evaluate.model_selection import get_clinical_split, compute_category_regression, \ compute_splits_train, large_scale_train from eztrack.edv.results.plot_distributions import PlotDistributions from eztrack.edv.base.utils import plot_baseline, plot_boxplot, plot_pr, \ plot_roc, plot_confusion_matrix, plot_boxplot_withdf from eztrack.base.utils.data_science_utils import cutoff_youdens, split_inds_engel, \ split_inds_clindiff, split_inds_outcome, get_numerical_outcome, compute_minmaxfragilitymetric, compute_fragilitymetric,\ compute_znormalized_fragilitymetric, split_inds_modality from eztrack.edm.classifiers.model.cez_oez_analyzer import FragilitySplitAnalyzer from eztrack.pipeline.experiments.cez_vs_oez.center_cezvsoez import plot_results from eztrack.edp.objects.clinical.master_clinical import MasterClinicalSheet from eztrack.edp.loaders.dataset.clinical.excel_meta import ExcelReader # Import magic commands for jupyter notebook # - autoreloading a module # - profiling functions for memory usage and scripts %load_ext autoreload %autoreload 2 def get_per_patient_results(timewarpdict_dataset): # reorder them into patients timewarp_patient = collections.defaultdict(list) datasetids = [] for datasetid in sorted(timewarpdict_dataset.keys()): # extract the patient id patid = datasetid.split("_")[0] _datasetid = datasetid.split("_")[0] datasetids.append(_datasetid) # extract the data from each dataset and the corresponding cez/oez matrix data = timewarpdict_dataset[datasetid] cezmat = data['cezmat'] oezmat = data['oezmat'] if oezmat.shape[0] == 0 or cezmat.shape[0] == 0: print(cezmat.shape, oezmat.shape) print(patid, datasetid) continue # add to patient's list of datasets timewarp_patient[patid].append((cezmat, oezmat)) totaldatasets = 0 for pat in timewarp_patient.keys(): totaldatasets += len(timewarp_patient[pat]) return timewarp_patient, datasetids, totaldatasets datadir = "/Users/adam2392/Dropbox/phd_research/Fragility_Analysis_Project/" # datadir = "/home/adam2392/Documents/Dropbox/phd_research/Fragility_Analysis_Project/" excelfilename = "organized_clinical_datasheet_raw.xlsx" excelfilepath = os.path.join(datadir, excelfilename) outputexcelfilename = "organized_clinical_datasheet_formatted.xlsx" outputexcelfilepath = os.path.join(datadir, outputexcelfilename) print(os.path.exists(excelfilepath)) print(excelfilepath) clinreader = ExcelReader(excelfilepath) ieegdf, datasetdf, scalpdf = clinreader.read_formatted_df() mastersheet = MasterClinicalSheet(ieegdf, datasetdf, scalpdf) figdir = "/Users/adam2392/Downloads/journalfigs/" ``` # Load In Data ``` modality = 'ieeg' # modality = 'scalp' reference = "common_avg" reference = "monopolar" modelname = "impulse" networkmodelname = "" freqband = "" expname = "trimmed" datadir = f"/Users/adam2392/Downloads/output_new/{expname}/{modelname}{networkmodelname}/{reference}/{modality}/" resultfilepath = os.path.join(datadir, f"{modelname}_responses.npz") if not os.path.exists(resultfilepath): resultfilepath = os.path.join(datadir, f"networkstatic_responses.npz") allfiles = os.listdir(datadir) print(allfiles) # data that is only timewarped, but without threshold applied yet # datadir = "/Users/adam2392/Downloads/output_new/joined_results/timewarp_nothreshold/" # datadir = "/Users/adam2392/Downloads/output_new/common_avg_timewarp_nothresh/" ``` # Create Plots of Data First create for successful patients, then for failure patients. ``` COMBINE_SEPARATE_PATS = [ 'pt11', 'nl22', 'ummc007', 'tvb7', 'nl02', 'nl06', 'nl11', # no resection ] ignore_pats = [ # 'pt11', # 'jh107' 'la01-2','la01', 'la03', 'la05', # 'la09', 'la23', 'nl22', ] center = 'nih' dict_dataset = dict() centerdir = os.path.join(datadir, center) if freqband != "": centerdir = os.path.join(centerdir, freqband) resultfilepath = os.path.join(centerdir, f"{modelname}_responses.npz") if not os.path.exists(resultfilepath): resultfilepath = os.path.join(centerdir, f"networkstatic_responses.npz") if not os.path.exists(resultfilepath): resultfilepath = os.path.join(centerdir, f"impulsemodel_magnitude1_responses.npz") allfiles = os.listdir(os.path.join(centerdir)) # load in the dataset trainresult = np.load(resultfilepath, allow_pickle=True) dict_dataset.update(**trainresult['timewarpdict'].item()) dataset_patient, datasetids, numdatasets = get_per_patient_results(dict_dataset) print(dataset_patient.keys()) print(numdatasets) dict_dataset = dict() centers = [ # 'clevelandnl', 'cleveland', 'nih', 'jhu', 'ummc' ] for center in centers: centerdir = os.path.join(datadir, center) if freqband != "": centerdir = os.path.join(centerdir, freqband) resultfilepath = os.path.join(centerdir, f"{modelname}_responses.npz") # print(resultfilepath) if not os.path.exists(resultfilepath): resultfilepath = os.path.join(centerdir, f"networkstatic_responses.npz") if not os.path.exists(resultfilepath): resultfilepath = os.path.join(centerdir, f"impulsemodel_magnitude1_responses.npz") allfiles = os.listdir(os.path.join(centerdir)) # load in the datasete result = np.load(resultfilepath, allow_pickle=True) dict_dataset.update(**result['timewarpdict'].item()) print(dict_dataset.keys()) dataset_patient, datasetids, totaldatasets = get_per_patient_results(dict_dataset) print(totaldatasets) plotter = PlotDistributions(figdir) print(dataset_patient.keys()) jhcount = 0 umcount = 0 nihcount = 0 cccount = 0 for key in dataset_patient.keys(): if 'jh' in key: jhcount += 1 elif 'ummc' in key: umcount += 1 elif 'pt' in key: nihcount += 1 elif 'la' in key: cccount += 1 print(jhcount) print(umcount, nihcount, cccount) print(6+9+13+10) ``` # Dataset Summary ``` failcount = 0 successcount = 0 engel_count_dict = dict() for patient in patientlist: if patient.outcome == 'nr': continue elif patient.outcome == 'f': failcount += 1 else: successcount += 1 if str(patient.engelscore) not in engel_count_dict.keys(): engel_count_dict[str(patient.engelscore)] = 0 engel_count_dict[str(patient.engelscore)] += 1 print(failcount, successcount) print(engel_count_dict) print(4+19+8+2) cez_chs = [] other_chs = [] allpats = [] for pat in dataset_patient.keys(): datasets = dataset_patient[pat] if pat in ignore_pats: continue cezs = [] oezs = [] print(pat) # normalize print(len(datasets)) # for i in range(len(datasets)): # cezmat, oezmat = datasets[i] # # print(cezmat.shape, oezmat.shape) # mat = np.concatenate((cezmat, oezmat), axis=0) # mat = compute_minmaxfragilitymetric(mat) # cezmat = mat[:cezmat.shape[0], :] # oezmat = mat[cezmat.shape[0]:, :] # print(cezmat.shape, oezmat.shape) for i in range(len(datasets)): cezmat, oezmat = datasets[i] mat = np.concatenate((cezmat, oezmat), axis=0) # mat = compute_fragilitymetric(mat) cezmat = mat[:cezmat.shape[0], :] oezmat = mat[cezmat.shape[0]:, :] if pat in joinseppats: cezs.append(np.mean(cezmat, axis=0)) oezs.append(np.mean(oezmat, axis=0)) else: cezs.append(cezmat) oezs.append(oezmat) if pat not in joinseppats: cezs = np.nanmedian(np.array(cezs), axis=0) oezs = np.nanmedian(np.array(oezs), axis=0) # print(np.array(cezs).shape) # store the entire patient concatenated vector cez_chs.append(np.mean(cezs, axis=0)) other_chs.append(np.mean(oezs, axis=0)) allpats.append(pat) cez_chs = np.array(cez_chs) other_chs = np.array(other_chs) print(cez_chs.shape, other_chs.shape) # split by outcome succ_inds, fail_inds = split_inds_outcome(allpats, mastersheet) print(len(succ_inds), len(fail_inds)) print(totaldatasets) center = ", ".join(centers) print(center) sns.set(font_scale=1.75) cez_mat_fail = cez_chs[fail_inds,...] oez_mat_fail = other_chs[fail_inds,...] # take the average across all patients mean_onset = np.nanmean(cez_mat_fail, axis=0) mean_other = np.nanmean(oez_mat_fail, axis=0) stderr_onset = scipy.stats.sem(cez_mat_fail, nan_policy='omit', axis=0) stderr_other = scipy.stats.sem(oez_mat_fail, nan_policy='omit', axis=0) # mean_onset[mean_onset > 3] = 5 # mean_other[mean_other > 3] = 5 # stderr_onset[np.abs(stderr_onset) > 3] = 3 # stderr_other[np.abs(stderr_other) > 3] = 3 xs = [np.arange(len(mean_onset)), np.arange(len(mean_other))] ys = [mean_onset, mean_other] errors = [stderr_onset, stderr_other] labels = ['clinez (n={})'.format(len(cez_mat_fail)), 'others (n={})'.format(len(oez_mat_fail))] threshstr = "\n Thresh=0.7" # threshstr = "" titlestr="{center} {reference} Failure Fragile Channels".format(center=center, reference=reference) xlabel = "Normalized Window Around Seizure Onset (+/- 10 secs)" vertline = [30,130] # vertline = [offsetwin] fig, ax = plotter.plot_comparison_distribution(xs, ys, labels=labels, alpha=0.5, save=True, # ylim=[0,7.5], figure_name=titlestr, errors=errors, titlestr=titlestr, ylabel="DOA +/- stderr", xlabel="Time (a.u.)", vertlines=vertline) print(cez_chs.shape, other_chs.shape) cez_mat = cez_chs[succ_inds,...] oez_mat = other_chs[succ_inds,...] # take the average across all patients mean_onset = np.mean(cez_mat, axis=0) mean_other = np.mean(oez_mat, axis=0) stderr_onset = scipy.stats.sem(cez_mat, axis=0) stderr_other = scipy.stats.sem(oez_mat, axis=0) # mean_onset[mean_onset>5] = 5 # mean_other[mean_other>5] = 5 # stderr_onset[stderr_onset > 5] = 5 # stderr_other[stderr_other > 5] = 5 xs = [np.arange(len(mean_onset)), np.arange(len(mean_other))] ys = [mean_onset, mean_other] errors = [stderr_onset, stderr_other] labels = ['clinez (n={})'.format(len(cez_mat)), 'others (n={})'.format(len(oez_mat))] threshstr = "\n Thresh=0.7" # threshstr = "" titlestr="{center} {reference} Success Fragile Channels".format(center=center, reference=reference) xlabel = "Normalized Window Around Seizure Onset (+/- 10 secs)" vertline = [30,130] # vertline = [offsetwin] fig, ax = plotter.plot_comparison_distribution(xs, ys, labels=labels, save=True, # ylim=[0, 7], figure_name=titlestr, errors=errors, titlestr=titlestr, ylabel="DOA +/- stderr", xlabel="Time (a.u.)", vertlines=vertline) ``` # Create Pipeline Object ``` def plot_summary(succ_ezratios, fail_ezratios, clinical_baseline, engelscore_box, clindiff_box, fpr, tpr, precision, recall, average_precision, youdenind, youdenpred, titlestr, clf_auc, Y_pred_engel, Y_pred_clindiff): ylabel = "DOA Metric" # plotting for baselines baselinex_roc = [0, 1-(clinical_baseline-0.5)] baseliney_roc = [0+(clinical_baseline-0.5), 1] baselinex_pr = [0, 1] baseliney_pr = [clinical_baseline, clinical_baseline] # make box plot plt.style.use("classic") sns.set_style("white") fix, axs = plt.subplots(2,3, figsize=(25,15)) axs = axs.flatten() ax = axs[0] titlestr = f"Outcome Split N={numdatasets} P={numpats}" boxdict = [ [fail_ezratios, succ_ezratios], [ 'Fail', 'Success'] ] plot_boxplot(ax, boxdict, titlestr, ylabel) outcome_df = create_df_from_outcome(succ_ezratios, fail_ezratios) outcome_dabest = dabest.load(data=outcome_df, x='outcome', y="ezr", idx=('failure','success') ) # Produce a Cumming estimation plot. outcome_dabest.mean_diff.plot(); ax = axs[1] titlestr = f"Engel Score Split N={numdatasets} P={numpats}" plot_boxplot(ax, engelscore_box, titlestr, ylabel="") xticks = ax.get_xticks() ax.plot(xticks, Y_pred_engel, color='red', label=f"y={engel_intercept:.2f} + {engel_slope:.2f}x" ) ax.legend() ax = axs[2] titlestr = f"Clin Difficulty Split N={numdatasets} P={numpats}" plot_boxplot(ax, clindiff_box, titlestr, ylabel="") ax.plot(xticks, Y_pred_clindiff, color='red', label=f"y={clindiff_intercept:.2f} + {clindiff_slope:.2f}x") ax.legend() # make ROC Curve plot ax = axs[3] titlestr = f"ROC Curve N={numdatasets} P={numpats}" label = "ROC Curve (AUC = %0.2f)" % (clf_auc) plot_roc(ax, fpr, tpr, label, titlestr) plot_baseline(ax, baselinex_roc, baseliney_roc) ax.legend(loc='lower right') ax.plot(np.mean(baselinex_roc).squeeze(), np.mean(baseliney_roc).squeeze(), 'k*', linewidth=4, markersize=12, label=f"Clinical-Baseline {np.round(clinical_baseline, 2)}" ) ax.plot(fpr[youdenind], tpr[youdenind], 'r*', linewidth=4, markersize=12, label=f"Youden-Index {np.round(youdenacc, 2)}") ax.legend(loc='lower right') # make PR Curve ax = axs[4] label = 'PR Curve (AP = %0.2f)' % (average_precision) titlestr = f"PR-Curve N={numdatasets} P={numpats}" plot_pr(ax, recall, precision, label, titlestr) plot_baseline(ax, baselinex_pr, baseliney_pr) ax.legend(loc='lower right') # Confusion Matrix ax = axs[5] titlestr = f"Confusion matrix Youdens-cutoff" plot_confusion_matrix(ax, ytrue, youdenpred, classes=[0.,1.], title=titlestr, normalize=True) # titlestr = f"{modelname}{networkmodelname}-{freqband} {center} N={numdatasets} P={numpats}" # plt.savefig(os.path.join(figdir, normname, titlestr+".png"), # box_inches='tight') %%time # create patient list for all datasets patientlist = [] for patientid in dataset_patient.keys(): # initialize empty list to store datasets per patient datasetlist = [] if patientid in ignore_pats: continue # get metadata for patient center = mastersheet.get_patient_center(patientid) outcome = mastersheet.get_patient_outcome(patientid) engelscore = mastersheet.get_patient_engelscore(patientid) clindiff = mastersheet.get_patient_clinicaldiff(patientid) modality = mastersheet.get_patient_modality(patientid) for datasetname, result in dict_dataset.items(): # get the patient/dataset id patid = datasetname.split("_")[0] datasetid = datasetname.split(patid + "_")[1] # print(patid, datasetid) if patid != patientid: continue # format the matrix and the indices mat = np.concatenate((result['cezmat'], result['oezmat']), axis=0) cezinds = np.arange(0, result['cezmat'].shape[0]) # create dataset object dataset_obj = Dataset(mat=mat, patientid=patid, name=datasetid, datatype='ieeg', cezinds=cezinds, markeron=30, markeroff=130) datasetlist.append(dataset_obj) if patientid == 'pt2': print(mat.shape) ax = sns.heatmap(mat,cmap='inferno', yticklabels=[], # vmax=3, # vmin=-3 ) ax.axhline(len(cezinds), linewidth=5, color='white') ax.set_ylabel("CEZ vs OEZ Map") ax.axvline(30, linewidth=4, linestyle='--', color='red') ax.axvline(130, linewidth=4, linestyle='--', color='black') # create patient object patient_obj = Patient(datasetlist, name=patientid, center=center, outcome=outcome, engelscore=engelscore, clindiff=clindiff, modality=modality) patientlist.append(patient_obj) # print(patient_obj, len(datasetlist)) evalpipe = EvaluationFramework(patientlist) print(patient_obj) print(evalpipe.centers, evalpipe.modalities) print(evalpipe) COMBINE_SEPARATE_PATS = [ 'pt11', # 'nl22', 'ummc007', # 'tvb7', # 'nl02', 'nl06', 'nl11', # no resection ] ignore_pats = [ # 'pt11', # 'jh107' # 'jh102', 'jh104', 'la01-2','la01', 'la03', 'la05', # 'la09', 'la23', 'nl22', ] # evalpipe.apply_normalization(normalizemethod=None) ezr_list = evalpipe.compute_ezratios( # threshold=0.5, ignore_pats=ignore_pats, combine_sep_pats=COMBINE_SEPARATE_PATS ) nr_inds = evalpipe.remove_nr_inds() surgery_inds = evalpipe.get_surgery_inds() ezratios = ezr_list[surgery_inds] patlist = evalpipe.patientlist[surgery_inds] # split by outcome succ_inds, fail_inds = split_inds_outcome(patlist, mastersheet) ytrue = get_numerical_outcome(patlist, mastersheet) engel_inds_dict = split_inds_engel(patlist, mastersheet) clindiff_inds_dict = split_inds_clindiff(patlist, mastersheet) roc_dict, cm = evalpipe.evaluate_roc_performance(ezratios, ytrue, normalize=True) pr_dict = evalpipe.evaluate_pr_performance(ezratios, ytrue, pos_label=1) # extract data from dictionaries fpr = roc_dict['fpr'] tpr = roc_dict['tpr'] clf_auc = roc_dict['auc'] youdenthreshold = roc_dict['youdenthresh'] youdenacc = roc_dict['youdenacc'] youdenind = roc_dict['youdenind'] precision = pr_dict['prec'] recall = pr_dict['recall'] average_precision = pr_dict['avgprec'] clinical_baseline = pr_dict['baseline'] # youden prediction youdenpred = ezratios >= youdenthreshold youdenpred = [int(y == True) for y in youdenpred] # evaluate box plot separation using wilcoxon rank-sum succ_ezratios, fail_ezratios, \ stat, pval = evalpipe.evaluate_metric_separation(ytrue, ezratios, pos_label=1, neg_label=0) print("Wilcoxon Rank-sum: ", stat, pval) print("Clinical baseline: ", clinical_baseline) print(sum(ytrue)) # pprint(pr_dict) engelscore_box = {} for i in sorted(engel_inds_dict.keys()): if i == -1: continue if np.isnan(i): continue this_fratio = ezratios[engel_inds_dict[i]] engelscore_box[f"ENG{int(i)}"] = this_fratio clindiff_box = {} for i in sorted(clindiff_inds_dict.keys()): this_fratio = ezratios[clindiff_inds_dict[i]] clindiff_box[f"CD{int(i)}"] = this_fratio print("Total amount of data: ", len(ezratios), len(patlist)) linear_regressor = LinearRegression() # create object for the class X = [] y = [] for idx, engelscore in enumerate(engelscore_box.keys()): print(engelscore) y.append(np.mean(engelscore_box[engelscore])) X.append(idx+1) X = np.array(X)[:, np.newaxis] linear_regressor.fit(X, y) # perform linear regression engel_intercept = linear_regressor.intercept_ engel_slope = linear_regressor.coef_[0] Y_pred_engel = linear_regressor.predict(X) # make predictions X = [] y = [] for idx, clindiff in enumerate(clindiff_box.keys()): print(clindiff) y.append(np.mean(clindiff_box[clindiff])) X.append(idx+1) X = np.array(X)[:, np.newaxis] linear_regressor.fit(X, y) # perform linear regression clindiff_intercept = linear_regressor.intercept_ clindiff_slope = linear_regressor.coef_[0] Y_pred_clindiff = linear_regressor.predict(X) # make predictions print(X, y) print("Slope and intercept: ", clindiff_slope, clindiff_intercept) sns.set(font_scale=2.5) centernames = "UMMC, JHH, CC" numpats = len(patlist) numdatasets = totaldatasets # titlestr = f"{modelname}{networkmodelname}-{freqband} {center} N={numdatasets} P={numpats}" titlestr= f"{modelname}{networkmodelname}-{freqband} {centernames} N={numdatasets} P={numpats}" titlestr = "" plot_summary(succ_ezratios, fail_ezratios, clinical_baseline, engelscore_box, clindiff_box, fpr, tpr, precision, recall, average_precision, youdenind, youdenpred, titlestr, clf_auc, Y_pred_engel, Y_pred_clindiff) print("Outlier min on fratio_succ: ", patlist[ezratios==min(succ_ezratios)]) print("Outlier max oon fratio_fail: ", patlist[ezratios==max(fail_ezratios)]) argsort_succ = np.sort(succ_ezratios) topinds = [ezratios.tolist().index(argsort_succ[i]) for i in range(10)] succ_bad_pats = patlist[topinds] print("\n\n Outlier of success patients:") print(succ_bad_pats) argsort_fail = np.sort(fail_ezratios)[::-1] topinds = [ezratios.tolist().index(argsort_fail[i]) for i in range(10)] fail_bad_pats = patlist[topinds] print("\n\n Outlier of failed patients:") print(fail_bad_pats) ``` # Train/Test Split ``` # traininds, testinds = train_test_split(np.arange(len(y)), test_size=0.6, random_state=98765) traininds, testinds = evalpipe.train_test_split(method='engel', trainsize=0.50) print(len(traininds), len(testinds)) ''' RUN TRAINING ''' ezratios = ezr_list[surgery_inds] # ezratios = ezratios[traininds] patlist = evalpipe.patientlist[surgery_inds] # patlist = patlist[traininds] numpats = len(patlist) print(len(patlist), len(ezratios)) # split by outcome succ_inds, fail_inds = split_inds_outcome(patlist, mastersheet) ytrue = get_numerical_outcome(patlist, mastersheet) engel_inds_dict = split_inds_engel(patlist, mastersheet) clindiff_inds_dict = split_inds_clindiff(patlist, mastersheet) succ_ezratios = ezratios[succ_inds] fail_ezratios = ezratios[fail_inds] # engel / clindiff metric split into dictionary engel_metric_dict = get_clinical_split(ezratios, 'engel', engel_inds_dict) clindiff_metric_dict = get_clinical_split(ezratios, 'clindiff', clindiff_inds_dict) # create dictionary split engel and clindiff classes engel_metric_dict = get_clinical_split(ezratios, 'engel', engel_inds_dict) clindiff_metric_dict = get_clinical_split(ezratios, 'clindiff', clindiff_inds_dict) Y_pred_engel, engel_intercept, engel_slope = compute_category_regression(engel_metric_dict) Y_pred_clindiff, clindiff_intercept, clindiff_slope = compute_category_regression(clindiff_metric_dict) ezrcolvals = np.concatenate((succ_ezratios, fail_ezratios), axis=-1)[:, np.newaxis] scorevals = np.array(['Success']*len(succ_ezratios) + ['Failure']*len(fail_ezratios))[:, np.newaxis] outcome_df = pd.DataFrame(data=ezrcolvals, columns=['ezr']) outcome_df['Outcome'] = scorevals ezrcolvals = [] scorevals = [] for key, vals in engel_metric_dict.items(): scorevals.extend([key] * len(vals)) ezrcolvals.extend(vals) engel_df = pd.DataFrame(data=ezrcolvals, columns=['ezr']) engel_df['Engel Score'] = scorevals ezrcolvals = [] scorevals = [] for key, vals in clindiff_metric_dict.items(): scorevals.extend([key] * len(vals)) ezrcolvals.extend(vals) clindiff_df = pd.DataFrame(data=ezrcolvals, columns=['ezr']) clindiff_df['Epilepsy Category'] = scorevals print("converted clinical categorizations into dataframes!") display(outcome_df.head()) display(engel_df.head()) display(clindiff_df.head()) outcome_df.to_csv("/Users/adam2392/Downloads/outcome_impulsemodel.csv") engel_df.to_csv("/Users/adam2392/Downloads/engel_impulsemodel.csv") clindiff_df.to_csv("/Users/adam2392/Downloads/clindiff_impulsemodel.csv") ylabel = "Degree of Agreement (CEZ)" outcome_dabest = dabest.load(data=outcome_df, x='Outcome', y="ezr", idx=outcome_df['Outcome'].unique() ) engel_dabest = dabest.load(data=engel_df, x='Engel Score', y="ezr", idx=engel_df['Engel Score'].unique() ) clindiff_dabest = dabest.load(data=clindiff_df, x='Epilepsy Category', y="ezr", idx=clindiff_df['Epilepsy Category'].unique() ) # make box plot plt.style.use("classic") sns.set(font_scale=1.75) sns.set_style("white") cols = 3 rows = 1 ylim = [0.3, 0.7] ylim = None fig, axs = plt.subplots(rows, cols, figsize=(24,8), constrained_layout=True) # ax1 = fig.add_subplot(cols, rows, 1) axs = axs.flatten() ax = axs[0] titlestr = f"Outcome Split N={numdatasets} P={numpats}" titlestr = "" plot_boxplot_withdf(ax, outcome_df, df_xlabel='Outcome', df_ylabel='ezr', color='black', ylabel=ylabel, titlestr=titlestr, ylim=ylim, yticks=np.linspace(0.3, 0.7, 5)) ax = axs[1] titlestr = f"Engel Score Split N={numdatasets} P={numpats}" titlestr = "" plot_boxplot_withdf(ax, engel_df, df_xlabel='Engel Score', df_ylabel='ezr', color='black', ylabel="", titlestr=titlestr, ylim=ylim, yticks=np.linspace(0.3, 0.7, 5)) xticks = ax.get_xticks() ax.plot(xticks, Y_pred_engel, color='red', label=f"y={engel_intercept:.2f} + {engel_slope:.2f}x") ax.legend() ax = axs[2] titlestr = f"Clin Difficulty Split N={numdatasets} P={numpats}" titlestr = "" plot_boxplot_withdf(ax, clindiff_df, df_xlabel='Epilepsy Category', df_ylabel='ezr',color='black', ylabel="", titlestr=titlestr, ylim=ylim, yticks=np.linspace(0.3, 0.7, 5)) xticks = ax.get_xticks() ax.plot(xticks, Y_pred_clindiff, color='red', label=f"y={clindiff_intercept:.2f} + {clindiff_slope:.2f}x") ax.legend() # fig.tight_layout() suptitle = f"Clinical Categories Split N={numdatasets}, P={numpats}" st = fig.suptitle(suptitle) figpath = os.path.join(figdir, suptitle+".png") plt.savefig(figpath, bbox_extra_artists=[st], bbox_inches='tight') # Produce a Cumming estimation plot. fig1 = outcome_dabest.median_diff.plot() ax1_list = fig1.axes ax1 = ax1_list[0] fig1.suptitle("SRR of Success vs Failure Outcomes", fontsize=20) fig1.tight_layout() # print(fig1, ax1) # print(ax1.) fig2 = engel_dabest.median_diff.plot() ax2_list = fig2.axes ax2 = ax2_list[0] fig2.suptitle("SRR of Outcomes Stratified By Engel Class", fontsize=20) fig2.tight_layout() print("Done") # clindiff_dabest.mean_diff.plot() ``` # Load in Previous Analysis ``` from eztrack.edv.plot_fragility_heatmap import PlotFragilityHeatmap from eztrack.edv.baseplot import BasePlotter plotter = BasePlotter(figdir) trimmed_dataset_dict = np.load(f"/Users/adam2392/Downloads/improved_allmap_embc_datasets.npy", allow_pickle=True) trimmed_dataset_ids = np.load(f"/Users/adam2392/Downloads/improved_allmap_embc_datasetids.npy", allow_pickle=True) trimmed_patient_ids = np.load(f"/Users/adam2392/Downloads/improved_allmap_embc_patientids.npy", allow_pickle=True) trimmed_chanlabels = np.load(f"/Users/adam2392/Downloads/improved_allmap_embc_chanlabels.npy", allow_pickle=True) trimmed_cezcontacts = np.load(f"/Users/adam2392/Downloads/improved_allmap_embc_cezcontacts.npy", allow_pickle=True) print(trimmed_dataset_dict.shape) print(len(trimmed_patient_ids)) # print(trimmed_cezcontacts[0]) for i, dataset in enumerate(trimmed_dataset_dict): patient_id = trimmed_patient_ids[i] dataset_id = trimmed_dataset_ids[i] print(dataset.shape) break ```
github_jupyter
# Basic Examples with Different Protocols ## Prerequisites * A kubernetes cluster with kubectl configured * curl * grpcurl * pygmentize ## Setup Seldon Core Use the setup notebook to [Setup Cluster](seldon_core_setup.ipynb) to setup Seldon Core with an ingress - either Ambassador or Istio. Then port-forward to that ingress on localhost:8003 in a separate terminal either with: * Ambassador: `kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080` * Istio: `kubectl port-forward $(kubectl get pods -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].metadata.name}') -n istio-system 8003:80` ``` !kubectl create namespace seldon !kubectl config set-context $(kubectl config current-context) --namespace=seldon import json ``` ## Seldon Protocol REST Model ``` !pygmentize resources/model_seldon_rest.yaml !kubectl apply -f resources/model_seldon_rest.yaml !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=rest-seldon \ -o jsonpath='{.items[0].metadata.name}') X=!curl -s -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' \ -X POST http://localhost:8003/seldon/seldon/rest-seldon/api/v1.0/predictions \ -H "Content-Type: application/json" d=json.loads(X[0]) print(d) assert(d["data"]["ndarray"][0][0] > 0.4) !kubectl delete -f resources/model_seldon_rest.yaml ``` ## Seldon Protocol GRPC Model ``` !pygmentize resources/model_seldon_grpc.yaml !kubectl apply -f resources/model_seldon_grpc.yaml !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=grpc-seldon \ -o jsonpath='{.items[0].metadata.name}') X=!cd ../executor/proto && grpcurl -d '{"data":{"ndarray":[[1.0,2.0,5.0]]}}' \ -rpc-header seldon:grpc-seldon -rpc-header namespace:seldon \ -plaintext \ -proto ./prediction.proto 0.0.0.0:8003 seldon.protos.Seldon/Predict d=json.loads("".join(X)) print(d) assert(d["data"]["ndarray"][0][0] > 0.4) !kubectl delete -f resources/model_seldon_grpc.yaml ``` ## Tensorflow Protocol REST Model ``` !pygmentize resources/model_tfserving_rest.yaml !kubectl apply -f resources/model_tfserving_rest.yaml !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=rest-tfserving \ -o jsonpath='{.items[0].metadata.name}') X=!curl -s -d '{"instances": [1.0, 2.0, 5.0]}' \ -X POST http://localhost:8003/seldon/seldon/rest-tfserving/v1/models/halfplustwo/:predict \ -H "Content-Type: application/json" d=json.loads("".join(X)) print(d) assert(d["predictions"][0] == 2.5) !kubectl delete -f resources/model_tfserving_rest.yaml ``` ## Tensorflow Protocol GRPC Model ``` !pygmentize resources/model_tfserving_grpc.yaml !kubectl apply -f resources/model_tfserving_grpc.yaml !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=grpc-tfserving \ -o jsonpath='{.items[0].metadata.name}') X=!cd ../executor/proto && grpcurl \ -d '{"model_spec":{"name":"halfplustwo"},"inputs":{"x":{"dtype": 1, "tensor_shape": {"dim":[{"size": 3}]}, "floatVal" : [1.0, 2.0, 3.0]}}}' \ -rpc-header seldon:grpc-tfserving -rpc-header namespace:seldon \ -plaintext -proto ./prediction_service.proto \ 0.0.0.0:8003 tensorflow.serving.PredictionService/Predict d=json.loads("".join(X)) print(d) assert(d["outputs"]["x"]["floatVal"][0] == 2.5) !kubectl delete -f resources/model_tfserving_grpc.yaml ```
github_jupyter
Configurations: * install tensorflow 2.1 * install matplotlib * install pandas * install scjkit-learn * install nltk ``` import matplotlib.pyplot as plt import numpy as np import pandas as pd import tensorflow as tf import re from tensorflow import keras from keras.models import Sequential from keras.layers import Dense, Activation, Dropout from keras.preprocessing import text, sequence from keras import utils from keras.preprocessing.sequence import pad_sequences from sklearn.model_selection import train_test_split from keras.layers.embeddings import Embedding from keras.layers.core import SpatialDropout1D from keras.layers import LSTM from keras.callbacks import EarlyStopping from numpy.random import seed #Load Data df_train = pd.read_csv('../data/deep-learning-datasets/twitter-sentiment-analysis/train_E6oV3lV.csv') df_train.columns = ["id", "label", "text"] df_test = pd.read_csv('../data/deep-learning-datasets/twitter-sentiment-analysis/test_tweets_anuFYb8.csv') df_test.columns = ["id","text"] df_train # clean data REPLACE_BY_SPACE_RE = re.compile('[/(){}\[\]\|@,;]') BAD_SYMBOLS_RE = re.compile('[^0-9a-z #+_]') def clean_text(text): """ text: a string return: modified initial string """ text = text.lower() # lowercase text text = REPLACE_BY_SPACE_RE.sub(' ', text) # replace REPLACE_BY_SPACE_RE symbols by space in text. substitute the matched string in REPLACE_BY_SPACE_RE with space. text = BAD_SYMBOLS_RE.sub('', text) # remove symbols which are in BAD_SYMBOLS_RE from text. substitute the matched string in BAD_SYMBOLS_RE with nothing. text = ' '.join(word for word in text.split() if len(word) > 2) # remove stopwors from text return text def preprocess_text(df): df = df.reset_index(drop=True) df['text'] = df['text'].apply(clean_text) df['text'] = df['text'].str.replace('\d+', '') return df df_train = preprocess_text(df_train) df_test = preprocess_text(df_test) df_train # The maximum number of words to be used. (most frequent) MAX_NB_WORDS = 30000 # Max number of words in each complaint. MAX_SEQUENCE_LENGTH = 50 # This is fixed. EMBEDDING_DIM = 100 tokenizer = text.Tokenizer(num_words=MAX_NB_WORDS, filters='#!"$%&()*+,-./:;<=>?@[\]^_`{|}~', lower=True, split= ' ') tokenizer.fit_on_texts((df_train['text'].append(df_test['text'])).values) word_index = tokenizer.word_index word_index['study'] def fromTextToFeatures(df_text): # gives you a list of integer sequences encoding the words in your sentence X = tokenizer.texts_to_sequences(df_text.values) # split the X 1-dimensional sequence of word indexes into a 2-d listof items # Each item is split is a sequence of 50 value left-padded with zeros X = pad_sequences(X, maxlen=MAX_SEQUENCE_LENGTH) return X X = fromTextToFeatures(df_train['text']) print('Shape of data tensor:', X.shape) #X X_test_ex = fromTextToFeatures(df_test['text']) print('Shape of data tensor:', X_test_ex.shape) Y = pd.get_dummies(df_train['label']).values # asdas dasda sd asd asd asd [0, 1] # dfsdf asd sd fdsf sdf [1, 0] print('Shape of label tensor:', Y.shape) X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.10, random_state = 42) print(X_train.shape,Y_train.shape) print(X_test.shape,Y_test.shape) seed(100) model = Sequential() # The Embedding layer is used to create word vectors for incoming words. # It sits between the input and the LSTM layer, i.e. # the output of the Embedding layer is the input to the LSTM layer. model.add(Embedding(MAX_NB_WORDS, EMBEDDING_DIM, input_length=X.shape[1])) model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2)) model.add(Dense(2, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) epochs = 3 batch_size = 64 history = model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size,validation_split=0.1,callbacks=[EarlyStopping(monitor='val_loss', patience=3, min_delta=0.0001)]) accr = model.evaluate(X_test,Y_test) print('Test set\n Loss: {:0.3f}\n Accuracy: {:0.3f}'.format(accr[0],accr[1])) pred_y = model.predict(X_test) ```
github_jupyter
# A case study in screening for new enzymatic reactions In this example, we show how to search the KEGG database for a reaction of interest based on user requirements. At specific points we highlight how our code could be used for arbitrary molecules that the user is interested in. This is crucial because the KEGG database is not exhaustive, and we only accessed a portion of the database that has no ambiguities (to avoid the need for manual filtering). Requirements to run this script: * rdkit (2019.09.2.0) * matplotlib (3.1.1) * numpy (1.17.4) * enzyme_screen * Clone source code and run this notebook in its default directory. # This notebook requires data from screening, which is not uploaded! ## The idea: We want to screen all collected reactions for a reaction that fits these constraints (automatic or manual application is noted): 1. Maximum component size within 5-7 Angstrom (automatic) 2. *One* component on *one* side of the reaction contains a nitrile group (automatic) 3. Value added from reactant to product (partially manual) e.g.: - cost of the reactants being much less than the products - products being unpurchasable and reactants being purchasable Constraint *2* affords potential reaction monitoring through the isolated FT-IR signal of the nitrile group. Constraint *3* is vague, but generally aims to determine some value-added by using an enzyme for a given reaction. This is often based on overcoming the cost of purchasing/synthesising the product through some non-enzymatic pathway by using an encapsulate enzyme. In this case, we use the primary literature on a selected reaction and some intuition to guide our efforts (i.e. we select a reaction (directionality determined from KEGG) where a relatively cheap (fair assumption) amino acid is the reactant). The alternative to this process would be to select a target reactant or product and search all reactions that include that target and apply similar constraints to test the validity of those reactions. ### Provide directory to reaction data and molecule data, and parameter file. ``` import numpy as np import matplotlib.pyplot as plt import pandas as pd import os import sys reaction_dir = ( '/data/atarzia/projects/psp_phd/production/rxn_collection' ) molecule_dir = ( '/data/atarzia/projects/psp_phd/molecules/molecule_DBs/production' ) # Handle import directories. module_path = os.path.abspath(os.path.join('../src')) if module_path not in sys.path: sys.path.append(module_path) import utilities param_file = '../data/param_file.txt' params = utilities.read_params(param_file) ``` ### Find reaction systems with max component sizes within threshold Using a threshold of 5 to 7 angstrom. Results in a plot of reaction distributions. ``` import plotting_fn as pfn threshold_min = 5 threshold_max = 7 # Read in reaction collection CSV: rs_properties.csv # from running RS_analysis.py. rs_properties = pd.read_csv( os.path.join(reaction_dir, 'rs_properties.csv') ) rs_within_threshold = rs_properties[ rs_properties['max_mid_diam'] < threshold_max ] rs_within_threshold = rs_within_threshold[ rs_within_threshold['max_mid_diam'] >= threshold_min ] print(f'{len(rs_within_threshold)} reactions in threshold') fig, ax = plt.subplots() alpha = 1.0 width = 0.25 X_bins = np.arange(0, 20, width) # All reactions. hist, bin_edges = np.histogram( a=list(rs_properties['max_mid_diam']), bins=X_bins ) ax.bar( bin_edges[:-1], hist, align='edge', alpha=alpha, width=width, color='lightgray', edgecolor='lightgray', label='all reactions' ) # Within threshold. hist, bin_edges = np.histogram( a=list(rs_within_threshold['max_mid_diam']), bins=X_bins ) ax.bar( bin_edges[:-1], hist, align='edge', alpha=alpha, width=width, color='firebrick', edgecolor='firebrick', label='within threshold' ) pfn.define_standard_plot( ax, xtitle='$d$ of largest component [$\mathrm{\AA}$]', ytitle='count', xlim=(0, 20), ylim=None ) fig.legend(fontsize=16) fig.savefig( os.path.join(reaction_dir, 'screen_example_distribution.pdf'), dpi=720, bbox_inches='tight' ) plt.show() ``` ### Find reaction systems with at least one nitrile functionality on one side of the reaction ``` import reaction from rdkit.Chem import AllChem as rdkit from rdkit.Chem import Fragments # Handle some warnings for flat molecules. from rdkit import RDLogger RDLogger.DisableLog('rdApp.*') # Needed to show molecules from rdkit.Chem import Draw from rdkit.Chem.Draw import IPythonConsole def has_nitrile(mol_file): """ Returns False if nitrile fragment is not found using RDKIT. """ mol = rdkit.MolFromMolFile(mol_file) no_frag = Fragments.fr_nitrile(mol) if no_frag > 0: return True else: return False # Define generator over reactions. generator = reaction.yield_rxn_syst( output_dir=reaction_dir, pars=params, ) # Iterate over reactions, checking for validity. target_reaction_ids = [] molecules_with_nitriles = [] for i, (count, rs) in enumerate(generator): if 'KEGG' not in rs.pkl: continue if rs.skip_rxn: continue if rs.components is None: continue # Check components for nitrile groups. reactants_w_nitriles = 0 products_w_nitriles = 0 for m in rs.components: mol_file = os.path.join( molecule_dir, m.name+'_opt.mol' ) if has_nitrile(mol_file): if mol_file not in molecules_with_nitriles: molecules_with_nitriles.append(mol_file) if m.role == 'reactant': reactants_w_nitriles += 1 elif m.role == 'product': products_w_nitriles += 1 # Get both directions. if products_w_nitriles == 1 and reactants_w_nitriles == 0: target_reaction_ids.append(rs.DB_ID) if products_w_nitriles == 0 and reactants_w_nitriles == 1: target_reaction_ids.append(rs.DB_ID) ``` ### Draw nitrile containing molecules ``` print( f'There are {len(molecules_with_nitriles)} molecules ' f'with nitrile groups, corresponding to ' f'{len(target_reaction_ids)} reactions ' 'out of all.' ) molecules = [ rdkit.MolFromSmiles(rdkit.MolToSmiles(rdkit.MolFromMolFile(i))) for i in molecules_with_nitriles ] mol_names = [ i.replace(molecule_dir+'/', '').replace('_opt.mol', '') for i in molecules_with_nitriles ] img = Draw.MolsToGridImage( molecules, molsPerRow=6, subImgSize=(100, 100), legends=mol_names, ) img ``` ## Update dataframe to have target reaction ids only. ``` target_reactions = rs_within_threshold[ rs_within_threshold['db_id'].isin(target_reaction_ids) ] print( f'There are {len(target_reactions)} reactions ' 'that fit all constraints so far.' ) target_reactions ``` ## Select reaction based on bertzCT and SAScore, plus intuition from visualisation Plotting the measures of reaction productivity is useful, but so is looking manually through the small subset. Both methods highlight R02846 (https://www.genome.jp/dbget-bin/www_bget?rn:R02846) as a good candidate: - High deltaSA and deltaBertzCT - The main reactant is a natural amino acid (cysteine). Note that the chirality is not defined in this specific KEGG Reaction, however, the chirality is defined as L-cysteine in the Enzyme entry (https://www.genome.jp/dbget-bin/www_bget?ec:4.4.1.9) ``` fig, ax = plt.subplots() ax.scatter( target_reactions['deltasa'], target_reactions['deltabct'], alpha=1.0, c='#ff3b3b', edgecolor='none', label='target reactions', s=100, ) pfn.define_standard_plot( ax, xtitle=r'$\Delta$ SAscore', ytitle=r'$\Delta$ BertzCT', xlim=(-10, 10), ylim=None, ) fig.legend(fontsize=16) fig.savefig( os.path.join( reaction_dir, 'screen_example_complexity_targets.pdf' ), dpi=720, bbox_inches='tight' ) plt.show() fig, ax = plt.subplots() ax.scatter( rs_properties['deltasa'], rs_properties['deltabct'], alpha=1.0, c='lightgray', edgecolor='none', label='all reactions', s=40, ) ax.scatter( rs_within_threshold['deltasa'], rs_within_threshold['deltabct'], alpha=1.0, c='#2c3e50', edgecolor='none', label='within threshold', s=40, ) ax.scatter( target_reactions['deltasa'], target_reactions['deltabct'], alpha=1.0, c='#ff3b3b', edgecolor='k', label='target reactions', marker='P', s=60, ) pfn.define_standard_plot( ax, xtitle=r'$\Delta$ SAscore', ytitle=r'$\Delta$ BertzCT', xlim=(-10, 10), ylim=(-850, 850), ) fig.legend(fontsize=16) fig.savefig( os.path.join( reaction_dir, 'screen_example_complexity_all.pdf' ), dpi=720, bbox_inches='tight' ) plt.show() ``` ## Visualise properties of chosen reaction Reaction: R02846 (https://www.genome.jp/dbget-bin/www_bget?rn:R02846) ``` # Read in reaction system. rs = reaction.get_RS( filename=os.path.join( reaction_dir, 'sRS-4_4_1_9-KEGG-R02846.gpkl' ), output_dir=reaction_dir, pars=params, verbose=True ) # Print properties and collate components. print(rs) if rs.skip_rxn: print(f'>>> {rs.skip_reason}') print( f'max intermediate diameter = {rs.max_min_mid_diam} angstrom' ) print( f'deltaSA = {rs.delta_SA}' ) print( f'deltaBertzCT = {rs.delta_bCT}' ) print('--------------------------\n') print('Components:') # Output molecular components and their properties. reacts = [] reactstr = [] prodstr = [] prods = [] for rsc in rs.components: prop_dict = rsc.read_prop_file() print(rsc) print(f"SA = {round(prop_dict['Synth_score'], 3)}") print(f"BertzCT = {round(prop_dict['bertzCT'], 3)}") print('\n') if rsc.role == 'product': prods.append( rdkit.MolFromMolFile(rsc.structure_file) ) prodstr.append(f'{rsc.name}') if rsc.role == 'reactant': reacts.append( rdkit.MolFromMolFile(rsc.structure_file) ) reactstr.append(f'{rsc.name}') img = Draw.MolsToGridImage( reacts, molsPerRow=2, subImgSize=(300, 300), legends=reactstr, ) img.save( os.path.join( reaction_dir, 'screen_example_reactants.png' ) ) img img = Draw.MolsToGridImage( prods, molsPerRow=2, subImgSize=(300, 300), legends=prodstr, ) img.save( os.path.join( reaction_dir, 'screen_example_products.png' ) ) img ``` ## Manually obtaining the cost of molecules In this example, we will assume C00283 and C00177 are obtainable/purchasable through some means and that only C00736 and C02512 are relevant to the productivity of the reaction. Note that the synthetic accessibility is 'large' for these molecules due to the two small molecules, while the change in BertzCT comes from the two larger molecules. - Get CAS number from KEGG Compound pages: - KEGG: C00736, CAS: 3374-22-9 - KEGG: C02512, CAS: 6232-19-5 - Use CAS number in some supplier website (using http://astatechinc.com/ here for no particular reason) - KEGG: C00736, Price: \\$69 for 10 gram = \\$6.9 per gram - KEGG: C02512, Price: \\$309 for 1 gram = \\$309 per gram
github_jupyter
# Задание 1.1 - Метод К-ближайших соседей (K-neariest neighbor classifier) В первом задании вы реализуете один из простейших алгоритмов машинного обучения - классификатор на основе метода K-ближайших соседей. Мы применим его к задачам - бинарной классификации (то есть, только двум классам) - многоклассовой классификации (то есть, нескольким классам) Так как методу необходим гиперпараметр (hyperparameter) - количество соседей, мы выберем его на основе кросс-валидации (cross-validation). Наша основная задача - научиться пользоваться numpy и представлять вычисления в векторном виде, а также ознакомиться с основными метриками, важными для задачи классификации. Перед выполнением задания: - запустите файл `download_data.sh`, чтобы скачать данные, которые мы будем использовать для тренировки - установите все необходимые библиотеки, запустив `pip install -r requirements.txt` (если раньше не работали с `pip`, вам сюда - https://pip.pypa.io/en/stable/quickstart/) Если вы раньше не работали с numpy, вам может помочь tutorial. Например этот: http://cs231n.github.io/python-numpy-tutorial/ ``` import numpy as np import matplotlib.pyplot as plt %matplotlib inline %load_ext autoreload %autoreload 2 from dataset import load_svhn from knn import KNN from metrics import binary_classification_metrics, multiclass_accuracy ``` # Загрузим и визуализируем данные В задании уже дана функция `load_svhn`, загружающая данные с диска. Она возвращает данные для тренировки и для тестирования как numpy arrays. Мы будем использовать цифры из датасета Street View House Numbers (SVHN, http://ufldl.stanford.edu/housenumbers/), чтобы решать задачу хоть сколько-нибудь сложнее MNIST. ``` train_X, train_y, test_X, test_y = load_svhn("data", max_train=1000, max_test=100) samples_per_class = 5 # Number of samples per class to visualize plot_index = 1 for example_index in range(samples_per_class): for class_index in range(10): plt.subplot(5, 10, plot_index) image = train_X[train_y == class_index][example_index] plt.imshow(image.astype(np.uint8)) plt.axis('off') plot_index += 1 ``` # Сначала реализуем KNN для бинарной классификации В качестве задачи бинарной классификации мы натренируем модель, которая будет отличать цифру 0 от цифры 9. ``` # First, let's prepare the labels and the source data # Only select 0s and 9s binary_train_mask = (train_y == 0) | (train_y == 9) binary_train_X = train_X[binary_train_mask] binary_train_y = train_y[binary_train_mask] == 0 binary_test_mask = (test_y == 0) | (test_y == 9) binary_test_X = test_X[binary_test_mask] binary_test_y = test_y[binary_test_mask] == 0 # Reshape to 1-dimensional array [num_samples, 32*32*3] binary_train_X = binary_train_X.reshape(binary_train_X.shape[0], -1) binary_test_X = binary_test_X.reshape(binary_test_X.shape[0], -1) # Create the classifier and call fit to train the model # KNN just remembers all the data knn_classifier = KNN(k=1) knn_classifier.fit(binary_train_X, binary_train_y) ``` ## Пришло время написать код! Последовательно реализуйте функции `compute_distances_two_loops`, `compute_distances_one_loop` и `compute_distances_no_loops` в файле `knn.py`. Эти функции строят массив расстояний между всеми векторами в тестовом наборе и в тренировочном наборе. В результате они должны построить массив размера `(num_test, num_train)`, где координата `[i][j]` соотвествует расстоянию между i-м вектором в test (`test[i]`) и j-м вектором в train (`train[j]`). **Обратите внимание** Для простоты реализации мы будем использовать в качестве расстояния меру L1 (ее еще называют [Manhattan distance](https://ru.wikipedia.org/wiki/%D0%A0%D0%B0%D1%81%D1%81%D1%82%D0%BE%D1%8F%D0%BD%D0%B8%D0%B5_%D0%B3%D0%BE%D1%80%D0%BE%D0%B4%D1%81%D0%BA%D0%B8%D1%85_%D0%BA%D0%B2%D0%B0%D1%80%D1%82%D0%B0%D0%BB%D0%BE%D0%B2)). ![image.png](attachment:image.png) ``` # TODO: implement compute_distances_two_loops in knn.py dists = knn_classifier.compute_distances_two_loops(binary_test_X) assert np.isclose(dists[0, 10], np.sum(np.abs(binary_test_X[0] - binary_train_X[10]))) # TODO: implement compute_distances_one_loop in knn.py dists = knn_classifier.compute_distances_one_loop(binary_test_X) assert np.isclose(dists[0, 10], np.sum(np.abs(binary_test_X[0] - binary_train_X[10]))) # TODO: implement compute_distances_no_loops in knn.py dists = knn_classifier.compute_distances_no_loops(binary_test_X) assert np.isclose(dists[0, 10], np.sum(np.abs(binary_test_X[0] - binary_train_X[10]))) # Lets look at the performance difference %timeit knn_classifier.compute_distances_two_loops(binary_test_X) %timeit knn_classifier.compute_distances_one_loop(binary_test_X) %timeit knn_classifier.compute_distances_no_loops(binary_test_X) # TODO: implement predict_labels_binary in knn.py prediction = knn_classifier.predict(binary_test_X) # TODO: implement binary_classification_metrics in metrics.py precision, recall, f1, accuracy = binary_classification_metrics(prediction, binary_test_y) print("KNN with k = %s" % knn_classifier.k) print("Accuracy: %4.2f, Precision: %4.2f, Recall: %4.2f, F1: %4.2f" % (accuracy, precision, recall, f1)) # Let's put everything together and run KNN with k=3 and see how we do knn_classifier_3 = KNN(k=3) knn_classifier_3.fit(binary_train_X, binary_train_y) prediction = knn_classifier_3.predict(binary_test_X) precision, recall, f1, accuracy = binary_classification_metrics(prediction, binary_test_y) print("KNN with k = %s" % knn_classifier_3.k) print("Accuracy: %4.2f, Precision: %4.2f, Recall: %4.2f, F1: %4.2f" % (accuracy, precision, recall, f1)) ``` # Кросс-валидация (cross-validation) Попробуем найти лучшее значение параметра k для алгоритма KNN! Для этого мы воспользуемся k-fold cross-validation (https://en.wikipedia.org/wiki/Cross-validation_(statistics)#k-fold_cross-validation). Мы разделим тренировочные данные на 5 фолдов (folds), и по очереди будем использовать каждый из них в качестве проверочных данных (validation data), а остальные -- в качестве тренировочных (training data). В качестве финальной оценки эффективности k мы усредним значения F1 score на всех фолдах. После этого мы просто выберем значение k с лучшим значением метрики. *Бонус*: есть ли другие варианты агрегировать F1 score по всем фолдам? Напишите плюсы и минусы в клетке ниже. ``` # Find the best k using cross-validation based on F1 score num_folds = 5 train_folds_X = [] train_folds_y = [] # TODO: split the training data in 5 folds and store them in train_folds_X/train_folds_y train_indexes = range(0, binary_train_X.shape[0]) test_size = np.floor_divide(binary_train_X.shape[0], num_folds) k_choices = [1, 2, 3, 5, 8, 10, 15, 20, 25, 50] k_to_f1 = {} # dict mapping k values to mean F1 scores (int -> float) for k in k_choices: # TODO: perform cross-validation # Go through every fold and use it for testing and all other folds for training # Perform training and produce F1 score metric on the validation dataset # Average F1 from all the folds and write it into k_to_f1 f1_arr = [] for i_fold in range(num_folds): #split indexes if (i_fold == 0): test_slice, remainder = np.split(train_indexes, [test_size], axis=0) else: remainder[(i_fold-1)*test_size:i_fold*test_size], test_slice = test_slice, remainder[(i_fold-1)*test_size:i_fold*test_size].copy() # Reshape to 1-dimensional array [num_samples, 32*32*3] train_folds_X = binary_train_X[remainder] train_folds_y = binary_train_y[remainder] validation_folds_X = binary_train_X[test_slice] validation_folds_y = binary_train_y[test_slice] # train & predict knn_classifier = KNN(k=k) knn_classifier.fit(train_folds_X, train_folds_y) prediction = knn_classifier.predict(validation_folds_X) precision, recall, f1, accuracy = binary_classification_metrics(prediction, validation_folds_y) #print(precision, recall, f1, accuracy) f1_arr = np.append(f1_arr, f1) k_to_f1[k] = np.mean(f1_arr) print('----') for k in sorted(k_to_f1): print('k = %d, f1 = %f' % (k, k_to_f1[k])) ``` ### Проверим, как хорошо работает лучшее значение k на тестовых данных (test data) ``` # TODO Set the best k to the best value found by cross-validation best_k = 1 best_knn_classifier = KNN(k=best_k) best_knn_classifier.fit(binary_train_X, binary_train_y) prediction = best_knn_classifier.predict(binary_test_X) precision, recall, f1, accuracy = binary_classification_metrics(prediction, binary_test_y) print("Best KNN with k = %s" % best_k) print("Accuracy: %4.2f, Precision: %4.2f, Recall: %4.2f, F1: %4.2f" % (accuracy, precision, recall, f1)) ``` # Многоклассовая классификация (multi-class classification) Переходим к следующему этапу - классификации на каждую цифру. ``` # Now let's use all 10 classes train_X = train_X.reshape(train_X.shape[0], -1) test_X = test_X.reshape(test_X.shape[0], -1) knn_classifier = KNN(k=1) knn_classifier.fit(train_X, train_y) # TODO: Implement predict_labels_multiclass predict = knn_classifier.predict(test_X) # TODO: Implement multiclass_accuracy accuracy = multiclass_accuracy(predict, test_y) print("Accuracy: %4.2f" % accuracy) ``` Снова кросс-валидация. Теперь нашей основной метрикой стала точность (accuracy), и ее мы тоже будем усреднять по всем фолдам. ``` # Find the best k using cross-validation based on accuracy num_folds = 5 train_folds_X = [] train_folds_y = [] # TODO: split the training data in 5 folds and store them in train_folds_X/train_folds_y k_choices = [1, 2, 3, 5, 8, 10, 15, 20, 25, 50] k_to_accuracy = {} for k in k_choices: # TODO: perform cross-validation # Go through every fold and use it for testing and all other folds for validation # Perform training and produce accuracy metric on the validation dataset # Average accuracy from all the folds and write it into k_to_accuracy pass for k in sorted(k_to_accuracy): print('k = %d, accuracy = %f' % (k, k_to_accuracy[k])) ``` ### Финальный тест - классификация на 10 классов на тестовой выборке (test data) Если все реализовано правильно, вы должны увидеть точность не менее **0.2**. ``` # TODO Set the best k as a best from computed best_k = 1 best_knn_classifier = KNN(k=best_k) best_knn_classifier.fit(train_X, train_y) prediction = best_knn_classifier.predict(test_X) # Accuracy should be around 20%! accuracy = multiclass_accuracy(prediction, test_y) print("Accuracy: %4.2f" % accuracy) ```
github_jupyter
# API demonstration for paper of v1.0 _the LSST-DESC CLMM team_ Here we demonstrate how to use `clmm` to estimate a WL halo mass from observations of a galaxy cluster when source galaxies follow a given distribution (The LSST DESC Science Requirements Document - arXiv:1809.01669, implemented in `clmm`). It uses several functionalities of the support `mock_data` module to produce mock datasets. - Setting things up, with the proper imports. - Computing the binned reduced tangential shear profile, for the 2 datasets, using logarithmic binning. - Setting up a model accounting for the redshift distribution. - Perform a simple fit using `scipy.optimize.curve_fit` included in `clmm` and visualize the results. ## Setup First, we import some standard packages. ``` import matplotlib.pyplot as plt import numpy as np plt.rcParams['font.family'] = ['gothambook','gotham','gotham-book','serif'] ``` ## Generating mock data `clmm` has a support code to generate a mock catalog given a input cosmology and cluster parameters. We will use this to generate a data sample to be used in this example: ``` from clmm import Cosmology import clmm.support.mock_data as mock np.random.seed(14) # For reproducibility # Set cosmology of mock data cosmo = Cosmology(H0=70.0, Omega_dm0=0.27-0.045, Omega_b0=0.045, Omega_k0=0.0) # Cluster info cluster_m = 1.e15 # Cluster mass - ($M200_m$) [Msun] concentration = 4 # Cluster concentration cluster_z = 0.3 # Cluster redshift cluster_ra = 0. # Cluster Ra in deg cluster_dec = 0. # Cluster Dec in deg # Catalog info field_size = 10 # i.e. 10 x 10 Mpc field at the cluster redshift, cluster in the center # Make mock galaxies mock_galaxies = mock.generate_galaxy_catalog( cluster_m=cluster_m, cluster_z=cluster_z, cluster_c=concentration, # Cluster data cosmo=cosmo, # Cosmology object zsrc='desc_srd', # Galaxy redshift distribution, zsrc_min=0.4, # Minimum redshift of the galaxies shapenoise=0.05, # Gaussian shape noise to the galaxy shapes photoz_sigma_unscaled=0.05, # Photo-z errors to source redshifts field_size=field_size, ngal_density=20 # number of gal/arcmin2 for z in [0, infty] )['ra', 'dec', 'e1', 'e2', 'z', 'ztrue', 'pzbins', 'pzpdf', 'id'] print(f'Catalog table with the columns: {", ".join(mock_galaxies.colnames)}') ngals_init = len(mock_galaxies) print(f'Initial number of galaxies: {ngals_init:,}') # Keeping only galaxies with "measured" redshift greater than cluster redshift mock_galaxies = mock_galaxies[(mock_galaxies['z']>cluster_z)] ngals_good = len(mock_galaxies) if ngals_good < ngals_init: print(f'Number of excluded galaxies (with photoz < cluster_z): {ngals_init-ngals_good:,}') # reset galaxy id for later use mock_galaxies['id'] = np.arange(ngals_good) # Check final density from clmm.utils import convert_units field_size_arcmin = convert_units(field_size, 'Mpc', 'arcmin', redshift=cluster_z, cosmo=cosmo) print(f'Background galaxy density = {ngals_good/field_size_arcmin**2:.2f} gal/arcmin2\n') ``` We can extract the column of this mock catalog to show explicitely how the quantities can be used on `clmm` functionality and how to add them to a `GalaxyCluster` object: ``` # Put galaxy values on arrays gal_ra = mock_galaxies['ra'] # Galaxies Ra in deg gal_dec = mock_galaxies['dec'] # Galaxies Dec in deg gal_e1 = mock_galaxies['e1'] # Galaxies elipticipy 1 gal_e2 = mock_galaxies['e2'] # Galaxies elipticipy 2 gal_z = mock_galaxies['z'] # Galaxies observed redshift gal_ztrue = mock_galaxies['ztrue'] # Galaxies true redshift gal_pzbins = mock_galaxies['pzbins'] # Galaxies P(z) bins gal_pzpdf = mock_galaxies['pzpdf'] # Galaxies P(z) gal_id = mock_galaxies['id'] # Galaxies ID ``` ## Measuring shear profiles From the source galaxy quantities, we can compute the elepticities and corresponding radial profile usimg `clmm.dataops` functions: ``` import clmm.dataops as da # Convert elipticities into shears gal_ang_dist, gal_gt, gal_gx = da.compute_tangential_and_cross_components(cluster_ra, cluster_dec, gal_ra, gal_dec, gal_e1, gal_e2, geometry="flat") # Measure profile profile = da.make_radial_profile([gal_gt, gal_gx, gal_z], gal_ang_dist, "radians", "Mpc", bins=da.make_bins(0.01, field_size/2., 50), cosmo=cosmo, z_lens=cluster_z, include_empty_bins=False) print(f'Profile table has columns: {", ".join(profile.colnames)},') print('where p_(0, 1, 2) = (gt, gx, z)') ``` The other possibility is to use the `GalaxyCluster` object. This is the main approach to handle data with `clmm`, and also the simpler way. For that you just have to provide the following information of the cluster: * Ra, Dec [deg] * Mass - ($M200_m$) [Msun] * Concentration * Redshift and the source galaxies: * Ra, Dec [deg] * 2 axis of eliptticities * Redshift ``` import clmm # Create a GCData with the galaxies galaxies = clmm.GCData([gal_ra, gal_dec, gal_e1, gal_e2, gal_z, gal_ztrue, gal_pzbins, gal_pzpdf, gal_id], names=['ra', 'dec', 'e1', 'e2', 'z', 'ztrue', 'pzbins', 'pzpdf', 'id']) # Create a GalaxyCluster cluster = clmm.GalaxyCluster("Name of cluster", cluster_ra, cluster_dec, cluster_z, mock_galaxies) # Convert elipticities into shears for the members cluster.compute_tangential_and_cross_components(geometry="flat") print(cluster.galcat.colnames) # Measure profile and add profile table to the cluster seps = convert_units(cluster.galcat['theta'], 'radians', 'mpc',cluster.z, cosmo) cluster.make_radial_profile(bins=da.make_bins(0.1, field_size/2., 25, method='evenlog10width'), bin_units="Mpc", cosmo=cosmo, include_empty_bins=False, gal_ids_in_bins=True, ) print(cluster.profile.colnames) ``` This results in an attribute `table` added to the `cluster` object. ``` from paper_formating import prep_plot prep_plot(figsize=(9, 9)) errorbar_kwargs = dict(linestyle='', marker='o', markersize=1, elinewidth=.5, capthick=.5) plt.errorbar(cluster.profile['radius'], cluster.profile['gt'], cluster.profile['gt_err'], c='k', **errorbar_kwargs) plt.xlabel('r [Mpc]', fontsize = 10) plt.ylabel(r'$g_t$', fontsize = 10) plt.xscale('log') plt.yscale('log') ``` ## Theoretical predictions We consider 3 models: 1. One model where all sources are considered at the same redshift 2. One model using the overall source redshift distribution to predict the reduced tangential shear 3. A more accurate model, relying on the fact that we have access to the individual redshifts of the sources, where the average reduced tangential shear is averaged independently in each bin, accounting for the acutal population of sources in each bin. All models rely on `clmm.predict_reduced_tangential_shear` to make a prediction that accounts for the redshift distribution of the galaxies in each radial bin: ### Model considering all sources located at the average redshift \begin{equation} g_{t,i}^{\rm{avg(z)}} = g_t(R_i, \langle z \rangle)\;, \label{eq:wrong_gt_model} \end{equation} ``` def predict_reduced_tangential_shear_mean_z(profile, logm): return clmm.compute_reduced_tangential_shear( r_proj=profile['radius'], # Radial component of the profile mdelta=10**logm, # Mass of the cluster [M_sun] cdelta=4, # Concentration of the cluster z_cluster=cluster_z, # Redshift of the cluster z_source=np.mean(cluster.galcat['z']), # Mean value of source galaxies redshift cosmo=cosmo, delta_mdef=200, halo_profile_model='nfw' ) ``` ### Model relying on the overall redshift distribution of the sources N(z), not using individual redshift information (eq. (6) from Applegate et al. 2014, MNRAS, 439, 48) \begin{equation} g_{t,i}^{N(z)} = \frac{\langle\beta_s\rangle \gamma_t(R_i, z\rightarrow\infty)}{1-\frac{\langle\beta_s^2\rangle}{\langle\beta_s\rangle}\kappa(R_i, z\rightarrow\infty)} \label{eq:approx_model} \end{equation} ``` z_inf = 1000 dl_inf = cosmo.eval_da_z1z2(cluster_z, z_inf) d_inf = cosmo.eval_da(z_inf) def betas(z): dls = cosmo.eval_da_z1z2(cluster_z, z) ds = cosmo.eval_da(z) return dls * d_inf / (ds * dl_inf) def predict_reduced_tangential_shear_approx(profile, logm): bs_mean = np.mean(betas(cluster.galcat['z'])) bs2_mean = np.mean(betas(cluster.galcat['z'])**2) gamma_t_inf = clmm.compute_tangential_shear( r_proj=profile['radius'], # Radial component of the profile mdelta=10**logm, # Mass of the cluster [M_sun] cdelta=4, # Concentration of the cluster z_cluster=cluster_z, # Redshift of the cluster z_source=z_inf, # Redshift value at infinity cosmo=cosmo, delta_mdef=200, halo_profile_model='nfw') convergence_inf = clmm.compute_convergence( r_proj=profile['radius'], # Radial component of the profile mdelta=10**logm, # Mass of the cluster [M_sun] cdelta=4, # Concentration of the cluster z_cluster=cluster_z, # Redshift of the cluster z_source=z_inf, # Redshift value at infinity cosmo=cosmo, delta_mdef=200, halo_profile_model='nfw') return bs_mean*gamma_t_inf/(1-(bs2_mean/bs_mean)*convergence_inf) ``` ### Model using individual redshift and radial information, to compute the averaged shear in each radial bin, based on the galaxies actually present in that bin. \begin{equation} g_{t,i}^{z, R} = \frac{1}{N_i}\sum_{{\rm gal\,}j\in {\rm bin\,}i} g_t(R_j, z_j) \label{eq:exact_model} \end{equation} ``` cluster.galcat['theta_mpc'] = convert_units(cluster.galcat['theta'], 'radians', 'mpc',cluster.z, cosmo) def predict_reduced_tangential_shear_exact(profile, logm): return np.array([np.mean( clmm.compute_reduced_tangential_shear( # Radial component of each source galaxy inside the radial bin r_proj=cluster.galcat[radial_bin['gal_id']]['theta_mpc'], mdelta=10**logm, # Mass of the cluster [M_sun] cdelta=4, # Concentration of the cluster z_cluster=cluster_z, # Redshift of the cluster # Redshift value of each source galaxy inside the radial bin z_source=cluster.galcat[radial_bin['gal_id']]['z'], cosmo=cosmo, delta_mdef=200, halo_profile_model='nfw' )) for radial_bin in profile]) ``` ## Mass fitting We estimate the best-fit mass using `scipy.optimize.curve_fit`. The choice of fitting $\log M$ instead of $M$ lowers the range of pre-defined fitting bounds from several order of magnitude for the mass to unity. From the associated error $\sigma_{\log M}$ we calculate the error to mass as $\sigma_M = M_{fit}\ln(10)\sigma_{\log M}$. #### First, identify bins with sufficient galaxy statistics to be kept for the fit For small samples, error bars should not be computed using the simple error on the mean approach available so far in CLMM) ``` mask_for_fit = cluster.profile['n_src'] > 5 data_for_fit = cluster.profile[mask_for_fit] ``` #### Perform the fits ``` from clmm.support.sampler import fitters def fit_mass(predict_function): popt, pcov = fitters['curve_fit'](predict_function, data_for_fit, data_for_fit['gt'], data_for_fit['gt_err'], bounds=[10.,17.]) logm, logm_err = popt[0], np.sqrt(pcov[0][0]) return {'logm':logm, 'logm_err':logm_err, 'm': 10**logm, 'm_err': (10**logm)*logm_err*np.log(10)} fit_mean_z = fit_mass(predict_reduced_tangential_shear_mean_z) fit_approx = fit_mass(predict_reduced_tangential_shear_approx) fit_exact = fit_mass(predict_reduced_tangential_shear_exact) print(f'Input mass = {cluster_m:.2e} Msun\n') print(f'Best fit mass for average redshift = {fit_mean_z["m"]:.3e} +/- {fit_mean_z["m_err"]:.3e} Msun') print(f'Best fit mass for N(z) model = {fit_approx["m"]:.3e} +/- {fit_approx["m_err"]:.3e} Msun') print(f'Best fit mass for individual redshift and radius = {fit_exact["m"]:.3e} +/- {fit_exact["m_err"]:.3e} Msun') ``` As expected, the reconstructed mass is biased when the redshift distribution is not accounted for in the model ## Visualization of the results For visualization purpose, we calculate the reduced tangential shear predicted by the model with estimated masses for noisy and ideal data. ``` def get_predicted_shear(predict_function, fit_values): gt_est = predict_function(data_for_fit, fit_values['logm']) gt_est_err = [predict_function(data_for_fit, fit_values['logm']+i*fit_values['logm_err']) for i in (-3, 3)] return gt_est, gt_est_err gt_mean_z, gt_err_mean_z = get_predicted_shear(predict_reduced_tangential_shear_mean_z, fit_mean_z) gt_approx, gt_err_approx = get_predicted_shear(predict_reduced_tangential_shear_approx, fit_approx) gt_exact, gt_err_exact = get_predicted_shear(predict_reduced_tangential_shear_exact, fit_exact) ``` Check reduced chi2 values of the best-fit model ``` chi2_mean_z_dof = np.sum((gt_mean_z-data_for_fit['gt'])**2/(data_for_fit['gt_err'])**2)/(len(data_for_fit)-1) chi2_approx_dof = np.sum((gt_approx-data_for_fit['gt'])**2/(data_for_fit['gt_err'])**2)/(len(data_for_fit)-1) chi2_exact_dof = np.sum((gt_exact-data_for_fit['gt'])**2/(data_for_fit['gt_err'])**2)/(len(data_for_fit)-1) print(f'Reduced chi2 (mean z model) = {chi2_mean_z_dof}') print(f'Reduced chi2 (N(z) model) = {chi2_approx_dof}') print(f'Reduced chi2 (individual (R,z) model) = {chi2_exact_dof}') ``` We compare to tangential shear obtained with theoretical mass. We plot the reduced tangential shear models first when redshift distribution is accounted for in the model then for the naive approach, with respective best-fit masses. ``` from matplotlib.ticker import MultipleLocator prep_plot(figsize=(9 , 9)) gt_ax = plt.axes([.25, .42, .7, .55]) gt_ax.errorbar(data_for_fit['radius'],data_for_fit['gt'], data_for_fit['gt_err'], c='k', label=rf'$M_{{input}} = {cluster_m*1e-15}\times10^{{{15}}} M_\odot$', **errorbar_kwargs) # Points in grey have not been used for the fit gt_ax.errorbar(cluster.profile['radius'][~mask_for_fit], cluster.profile['gt'][~mask_for_fit], cluster.profile['gt_err'][~mask_for_fit], c='grey',**errorbar_kwargs) pow10 = 15 mlabel = lambda name, fits: fr'$M_{{fit}}^{{{name}}} = {fits["m"]/10**pow10:.3f}\pm{fits["m_err"]/10**pow10:.3f}\times 10^{{{pow10}}} M_\odot$' # Avg z gt_ax.loglog(data_for_fit['radius'], gt_mean_z,'-C0', label=mlabel('avg(z)', fit_mean_z),lw=.5) gt_ax.fill_between(data_for_fit['radius'], *gt_err_mean_z, lw=0, color='C0', alpha=.2) # Approx model gt_ax.loglog(data_for_fit['radius'], gt_approx,'-C1', label=mlabel('N(z)', fit_approx), lw=.5) gt_ax.fill_between(data_for_fit['radius'], *gt_err_approx, lw=0, color='C1', alpha=.2) # Exact model gt_ax.loglog(data_for_fit['radius'], gt_exact,'-C2', label=mlabel('z,R', fit_exact), lw=.5) gt_ax.fill_between(data_for_fit['radius'], *gt_err_exact, lw=0, color='C2', alpha=.2) gt_ax.set_ylabel(r'$g_t$', fontsize = 8) gt_ax.legend(fontsize=6) gt_ax.set_xticklabels([]) gt_ax.tick_params('x', labelsize=8) gt_ax.tick_params('y', labelsize=8) #gt_ax.set_yscale('log') errorbar_kwargs2 = {k:v for k, v in errorbar_kwargs.items() if 'marker' not in k} errorbar_kwargs2['markersize'] = 3 errorbar_kwargs2['markeredgewidth'] = .5 res_ax = plt.axes([.25, .2, .7, .2]) delta = (cluster.profile['radius'][1]/cluster.profile['radius'][0])**.25 res_err = data_for_fit['gt_err']/data_for_fit['gt'] res_ax.errorbar(data_for_fit['radius']/delta, gt_mean_z/data_for_fit['gt']-1, yerr=res_err, marker='.', c='C0', **errorbar_kwargs2) errorbar_kwargs2['markersize'] = 1.5 res_ax.errorbar(data_for_fit['radius'], gt_approx/data_for_fit['gt']-1, yerr=res_err, marker='s', c='C1', **errorbar_kwargs2) errorbar_kwargs2['markersize'] = 3 errorbar_kwargs2['markeredgewidth'] = .5 res_ax.errorbar(data_for_fit['radius']*delta, gt_exact/data_for_fit['gt']-1, yerr=res_err, marker='*', c='C2', **errorbar_kwargs2) res_ax.set_xlabel(r'$R$ [Mpc]', fontsize = 8) res_ax.set_ylabel(r'$g_t^{mod.}/g_t^{data}-1$', fontsize = 8) res_ax.set_xscale('log') res_ax.set_xlim(gt_ax.get_xlim()) res_ax.set_ylim(-0.65,0.65) res_ax.yaxis.set_minor_locator(MultipleLocator(.1)) res_ax.tick_params('x', labelsize=8) res_ax.tick_params('y', labelsize=8) for p in (gt_ax, res_ax): p.xaxis.grid(True, which='major', lw=.5) p.yaxis.grid(True, which='major', lw=.5) p.xaxis.grid(True, which='minor', lw=.1) p.yaxis.grid(True, which='minor', lw=.1) plt.savefig('r_gt.png') ```
github_jupyter
<img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/> # CI/CD - Make sure all notebooks respects our format policy **Tags:** #naas **Author:** [Maxime Jublou](https://www.linkedin.com/in/maximejublou/) # Input ### Import libraries ``` import json import glob from rich import print import pydash import re ``` ## Model ### Utility functions These functions are used by other to not repeat ourselves. ``` def tag_exists(tagname, cells): for cell in cells: if tagname in pydash.get(cell, 'metadata.tags', []): return True return False def regexp_match(regex, string): matches = re.finditer(regex, string, re.MULTILINE) return len(list(matches)) >= 1 def check_regexp(cells, regex, source): cell_str = pydash.get(cells, source, '') return regexp_match(regex, cell_str) def check_title_exists(cells, title): for cell in cells: if pydash.get(cell, 'cell_type') == 'markdown' and regexp_match(rf"^## *{title}", pydash.get(cell, 'source[0]')): return True return False ``` ### Check functions This functions are used to check if a notebook contains the rights cells with proper formatting. ``` def check_naas_logo(cells): logo_content = '<img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/>' if pydash.get(cells, '[0].cell_type') == 'markdown' and pydash.get(cells, '[0].source[0]', '').startswith(logo_content): return (True, '') return (False, f''' Requirements: - Cell number: 1 - Cell type: Markdown - Shape: {logo_content} ''') def check_title_match_regexp(cells): return (check_regexp(cells, r"markdown", '[1].cell_type') and check_regexp(cells, r"^#.*-.*", '[1].source[0]'), ''' Requirements: - Cell number: 2 - Cell type: Markdown - Shape: "# something - some other thing" ''') def check_tool_tags(cells): return (check_regexp(cells, r"markdown", '[2].cell_type') and check_regexp(cells, r"^\*\*Tags:\*\* (#[1-9,a-z,A-Z]*( *|$))*", '[2].source[0]'), ''' Requirements: - Cell number: 3 - Cell type: Markdown - Shape: "**Tags:** #atLeastOneTool" ''') def check_author(cells): return (check_regexp(cells, r"markdown", '[3].cell_type') and check_regexp(cells, r"^\*\*Author:\*\* *.*", '[3].source[0]'), ''' Requirements: - Cell number: 4 - Cell type: Markdown - Shape: "**Author:** At least one author name" ''') def check_input_title_exists(cells): return (check_title_exists(cells, 'Input'), ''' Requirements: - Cell number: Any - Cell type: Markdown - Shape: "## Input" ''') def check_model_title_exists(cells): return (check_title_exists(cells, 'Model'), ''' Requirements: - Cell number: Any - Cell type: Markdown - Shape: "## Model" ''') def check_output_title_exists(cells): return (check_title_exists(cells, 'Output'), ''' Requirements: - Cell number: Any - Cell type: Markdown - Shape: "## Output" ''') ``` ## Output ``` got_errors = False error_counter = 0 for file in glob.glob('../../**/*.ipynb', recursive=True): # Do not check notebooks in .github or at the root of the project. if '.github' in file or len(file.split('/')) == 3: continue notebook = json.load(open(file)) cells = notebook.get('cells') filename = "[dark_orange]" + file.replace("../../", "") + "[/dark_orange]" outputs = [f'Errors found in: {filename}'] should_display_debug = False for checkf in [ check_naas_logo, check_title_match_regexp, check_tool_tags, check_author, check_input_title_exists, check_model_title_exists, check_output_title_exists]: result, msg = checkf(cells) if result is False: should_display_debug = True status_msg = "[bright_green]OK[/bright_green]" if result is True else f"[bright_red]KO {msg}[/bright_red]" outputs.append(f'{checkf.__name__} ... {status_msg}') if should_display_debug: got_errors = True error_counter += 1 for msg in outputs: print(msg) print("\n") if got_errors == True: print(f'[bright_red]You have {error_counter} notebooks having errors!') exit(1) ```
github_jupyter
# Predict H1N1 and Seasonal Flu Vaccines ## Preprocessing ### Import libraries ``` import pandas as pd import numpy as np %matplotlib inline import matplotlib.pyplot as plt ``` ### Import data ``` features_raw_df = pd.read_csv("data/training_set_features.csv", index_col="respondent_id") labels_raw_df = pd.read_csv("data/training_set_labels.csv", index_col="respondent_id") print("features_raw_df.shape", features_raw_df.shape) features_raw_df.head() features_raw_df.dtypes print("labels_raw_df.shape", labels_raw_df.shape) labels_raw_df.head() labels_raw_df.dtypes features_df = features_raw_df.copy() labels_df = labels_raw_df.copy() ``` ### Exploratory Data Analysis ``` fig, ax = plt.subplots(2, 1, sharex=True) n_entries = labels_df.shape[0] (labels_df['h1n1_vaccine'].value_counts().div(n_entries) .plot.barh(title="Proportion of H1N1 Vaccine", ax=ax[0])) ax[0].set_ylabel("seasonal_vaccine") (labels_df['seasonal_vaccine'].value_counts().div(n_entries) .plot.barh(title="Proportion of H1N1 Vaccine", ax=ax[1])) ax[1].set_ylabel("seasonal_vaccine") fig.tight_layout() pd.crosstab( labels_df["h1n1_vaccine"], labels_df["seasonal_vaccine"], margins=True, normalize=True ) (labels_df["h1n1_vaccine"] .corr(labels_df["seasonal_vaccine"], method="pearson") ) ``` ### Features ``` df = features_df.join(labels_df) print(df.shape) df.head() h1n1_concern_vaccine = df[['h1n1_concern', 'h1n1_vaccine']].groupby(['h1n1_concern', 'h1n1_vaccine']).size().unstack() h1n1_concern_vaccine ax = h1n1_concern_vaccine.plot.barh() ax.invert_yaxis() h1n1_concern_counts = h1n1_concern_vaccine.sum(axis='columns') h1n1_concern_counts h1n1_concern_vaccine_prop = h1n1_concern_vaccine.div(h1n1_concern_counts, axis='index') h1n1_concern_vaccine_prop ax = h1n1_concern_vaccine_prop.plot.barh(stacked=True) ax.invert_yaxis() ax.legend(loc='center left', bbox_to_anchor=(1.05, 0.5), title='h1n1_vaccine') plt.show() def vaccination_rate_plot(vaccine, feature, df, ax=None): feature_vaccine = df[[feature, vaccine]].groupby([feature, vaccine]).size().unstack() counts = feature_vaccine.sum(axis='columns') proportions = feature_vaccine.div(counts, axis='index') ax = proportions.plot.barh(stacked=True, ax=ax) ax.invert_yaxis() ax.legend(loc='center left', bbox_to_anchor=(1.05, 0.5), title=vaccine) ax.legend().remove() vaccination_rate_plot('seasonal_vaccine', 'h1n1_concern', df) cols_to_plot = [ 'h1n1_concern', 'h1n1_knowledge', 'opinion_h1n1_vacc_effective', 'opinion_h1n1_risk', 'opinion_h1n1_sick_from_vacc', 'opinion_seas_vacc_effective', 'opinion_seas_risk', 'opinion_seas_sick_from_vacc', 'sex', 'age_group', 'race', ] fig, ax = plt.subplots(len(cols_to_plot), 2, figsize=(10,len(cols_to_plot)*2.5)) for idx, col in enumerate(cols_to_plot): vaccination_rate_plot('h1n1_vaccine', col, df, ax=ax[idx, 0]) vaccination_rate_plot('seasonal_vaccine', col, df, ax=ax[idx, 1]) ax[0, 0].legend(loc='lower center', bbox_to_anchor=(0.5, 1.05), title='h1n1_vaccine') ax[0, 1].legend(loc='lower center', bbox_to_anchor=(0.5, 1.05), title='seasonal_vaccine') fig.tight_layout() ``` ### Categorical columns ``` features_df = features_raw_df.copy() labels_df = labels_raw_df.copy() features_df.dtypes == object # All categorical columns considered apart from employment-related categorical_cols = features_df.columns[features_df.dtypes == "object"].values[:-2] categorical_cols categorical_cols = np.delete(categorical_cols, np.where(categorical_cols == 'hhs_geo_region')) categorical_cols features_df.employment_occupation.unique() features_df.hhs_geo_region.unique() features_df[categorical_cols].head() for col in categorical_cols: col_dummies = pd.get_dummies(features_df[col], drop_first = True) features_df = features_df.drop(col, axis=1) features_df = pd.concat([features_df, col_dummies], axis=1) features_df.head() features_df.isna().sum() def preprocess_categorical(df): categorical_cols = df.columns[df.dtypes == "object"].values[:-2] categorical_cols = np.delete(categorical_cols, np.where(categorical_cols == 'hhs_geo_region')) for col in categorical_cols: col_dummies = pd.get_dummies(df[col], drop_first = True) df = df.drop(col, axis=1) df = pd.concat([df, col_dummies], axis=1) df = df.drop(['hhs_geo_region', 'employment_industry', 'employment_occupation'], axis=1) return df ``` ## MACHINE LEARNING ### Machine Learning Model ``` from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer from sklearn.compose import ColumnTransformer from sklearn.linear_model import LogisticRegression from sklearn.multioutput import MultiOutputClassifier from sklearn.pipeline import Pipeline from sklearn.model_selection import train_test_split from sklearn.metrics import roc_curve, roc_auc_score RANDOM_SEED = 6 features_raw_df.dtypes != "object" numeric_cols = features_raw_df.columns[features_raw_df.dtypes != "object"].values print(numeric_cols) ``` ### Features Preprocessing ``` # chain preprocessing into a Pipeline object numeric_preprocessing_steps = Pipeline([ ('standard_scaler', StandardScaler()), ('simple_imputer', SimpleImputer(strategy='median')) ]) # create the preprocessor stage of final pipeline preprocessor = ColumnTransformer( transformers = [ ("numeric", numeric_preprocessing_steps, numeric_cols) ], remainder = "passthrough" ) estimators = MultiOutputClassifier( estimator=LogisticRegression(penalty="l2", C=1) ) full_pipeline = Pipeline([ ("preprocessor", preprocessor), ("estimators", estimators), ]) features_df_trans = preprocess_categorical(features_df) X_train, X_test, y_train, y_test = train_test_split( features_df_trans, labels_df, test_size=0.33, shuffle=True, stratify=labels_df, random_state=RANDOM_SEED ) X_train # Train model full_pipeline.fit(X_train, y_train) # Predict on evaluation set # This competition wants probabilities, not labels preds = full_pipeline.predict_proba(X_test) preds print("test_probas[0].shape", preds[0].shape) print("test_probas[1].shape", preds[1].shape) y_pred = pd.DataFrame( { "h1n1_vaccine": preds[0][:, 1], "seasonal_vaccine": preds[1][:, 1], }, index = y_test.index ) print("y_pred.shape:", y_pred.shape) y_pred.head() fig, ax = plt.subplots(1, 2, figsize=(7, 3.5)) fpr, tpr, thresholds = roc_curve(y_test['h1n1_vaccine'], y_pred['h1n1_vaccine']) ax[0].plot(fpr, tpr) ax[0].plot([0, 1], [0, 1], color='grey', linestyle='--') ax[0].set_ylabel('TPR') ax[0].set_xlabel('FPR') ax[0].set_title(f"{'h1n1_vaccine'}: AUC = {roc_auc_score(y_test['h1n1_vaccine'], y_pred['h1n1_vaccine']):.4f}") fpr, tpr, thresholds = roc_curve(y_test['seasonal_vaccine'], y_pred['seasonal_vaccine']) ax[1].plot(fpr, tpr) ax[1].plot([0, 1], [0, 1], color='grey', linestyle='--') ax[1].set_xlabel('FPR') ax[1].set_title(f"{'seasonal_vaccine'}: AUC = {roc_auc_score(y_test['seasonal_vaccine'], y_pred['seasonal_vaccine']):.4f}") fig.tight_layout() roc_auc_score(y_test, y_pred) ``` ### Retrain on full Dataset ``` full_pipeline.fit(features_df_trans, labels_df); ``` ## PREDICTIONS FOR THE TEST SET ``` test_features_df = pd.read_csv('data/test_set_features.csv', index_col='respondent_id') test_features_df test_features_df_trans = preprocess_categorical(test_features_df) test_preds = full_pipeline.predict_proba(test_features_df_trans) submission_df = pd.read_csv('data/submission_format.csv', index_col='respondent_id') # Save predictions to submission data frame submission_df["h1n1_vaccine"] = test_preds[0][:, 1] submission_df["seasonal_vaccine"] = test_preds[1][:, 1] submission_df.head() submission_df.to_csv('data/my_submission.csv', index=True) ```
github_jupyter
# QUANTUM PHASE ESTIMATION This tutorial provides a detailed implementation of the Quantum Phase Estimation (QPE) algorithm using the Amazon Braket SDK. The QPE algorithm is designed to estimate the eigenvalues of a unitary operator $U$ [1, 2]; it is a very important subroutine to many quantum algorithms, most famously Shor's algorithm for factoring and the HHL algorithm (named after the physicists Harrow, Hassidim and Lloyd) for solving linear systems of equations on a quantum computer [1, 2]. Moreover, eigenvalue problems can be found across many disciplines and application areas, including (for example) principal component analysis (PCA) as used in machine learning or the solution of differential equations as relevant across mathematics, physics, engineering and chemistry. We first review the basics of the QPE algorithm. We then implement the QPE algorithm in code using the Amazon Braket SDK, and we illustrate the application thereof with simple examples. This notebook also showcases the Amazon Braket `circuit.subroutine` functionality, which allows us to use custom-built gates as if they were any other built-in gates. This tutorial is set up to run either on the local simulator or the managed simulators; changing between these devices merely requires changing one line of code as demonstrated as follows in cell [4]. ## TECHNICAL BACKGROUND OF QPE __Introduction__: A unitary matrix is a complex, square matrix whose adjoint (or conjugate transpose) is equal to its inverse. Unitary matrices have many nice properties, including the fact that their eigenvalues are always roots of unity (that is, phases). Given a unitary matrix $U$ (satisfying $U^{\dagger}U=\mathbb{1}=UU^{\dagger}$) and an eigenstate $|\psi \rangle$ with $U|\psi \rangle = e^{2\pi i\varphi}|\psi \rangle$, the Quantum Phase Estimation (QPE) algorithm provides an estimate $\tilde{\varphi} \approx \varphi$ for the phase $\varphi$ (with $\varphi \in [0,1]$ since the eigenvalues $\lambda = \exp(2\pi i\varphi)$ of a unitary have modulus one). The QPE works with high probability within an additive error $\varepsilon$ using $O(\log(1/\varepsilon))$ qubits (without counting the qubits used to encode the eigenstate) and $O(1/\varepsilon)$ controlled-$U$ operations [1]. __Quantum Phase Estimation Algorithm__: The QPE algorithm takes a unitary $U$ as input. For the sake of simplicity (we will generalize the discussion below), suppose that the algorithm also takes as input an eigenstate $|\psi \rangle$ fulfilling $$U|\psi \rangle = \lambda |\psi \rangle,$$ with $\lambda = \exp(2\pi i\varphi)$. QPE uses two registers of qubits: we refer to the first register as *precision* qubits (as the number of qubits $n$ in the first register sets the achievable precision of our results) and the second register as *query* qubits (as the second register hosts the eigenstate $|\psi \rangle$). Suppose we have prepared this second register in $|\psi \rangle$. We then prepare a uniform superposition of all basis vectors in the first register using a series of Hadamard gates. Next, we apply a series of controlled-unitaries $C-U^{2^{k}}$ for different powers of $k=0,1,\dots, n-1$ (as illustrated in the circuit diagram that follows). For example, for $k=1$ we get \begin{equation} \begin{split} (|0 \rangle + |1 \rangle) |\psi \rangle & \rightarrow |0 \rangle |\psi \rangle + |1 \rangle U|\psi \rangle \\ & = (|0 \rangle + e^{2\pi i \varphi}|1 \rangle) |\psi \rangle. \end{split} \end{equation} Note that the second register remains unaffected as it stays in the eigenstate $|\psi \rangle$. However, we managed to transfer information about the phase of the eigenvalue of $U$ (that is, $\varphi$) into the first *precision* register by encoding it as a relative phase in the state of the qubits in the first register. Similarly, for $k=2$ we obtain \begin{equation} \begin{split} (|0 \rangle + |1 \rangle) |\psi \rangle & \rightarrow |0 \rangle |\psi \rangle + |1 \rangle U^{2}|\psi \rangle \\ & = (|0 \rangle + e^{2\pi i 2\varphi}|1 \rangle) |\psi \rangle, \end{split} \end{equation} where this time we wrote $2\varphi$ into the precision register. The process is similar for all $k>2$. Introducing the following notation for binary fractions $$[0. \varphi_{l}\varphi_{l+1}\dots \varphi_{m}] = \frac{\varphi_{l}}{2^{1}} + \frac{\varphi_{l+1}}{2^{2}} + \frac{\varphi_{m}}{2^{m-l+1}},$$ one can show that the application of a controlled unitary $C-U^{2^{k}}$ leads to the following transformation \begin{equation} \begin{split} (|0 \rangle + |1 \rangle) |\psi \rangle & \rightarrow |0 \rangle |\psi \rangle + |1 \rangle U^{2^{k}}|\psi \rangle \\ & = (|0 \rangle + e^{2\pi i 2^{k}\varphi}|1 \rangle) |\psi \rangle \\ & = (|0 \rangle + e^{2\pi i [0.\varphi_{k+1}\dots \varphi_{n}]}|1 \rangle) |\psi \rangle, \end{split} \end{equation} where the first $k$ bits of precision in the binary expansion (that is, those bits to the left of the decimal) can be dropped, because $e^{2\pi i \theta} = 1$ for any whole number $\theta$. The QPE algorithm implements a series of these transformations for $k=0, 1, \dots, n-1$, using $n$ qubits in the precision register. In its entirety, this sequence of controlled unitaries leads to the transformation $$ |0, \dots, 0 \rangle \otimes |\psi \rangle \longrightarrow (|0 \rangle + e^{2\pi i [0.\varphi_{n}]}|1 \rangle) \otimes (|0 \rangle + e^{2\pi i [0.\varphi_{n-1}\varphi_{n}]}|1 \rangle) \otimes \dots \otimes (|0 \rangle + e^{2\pi i [0.\varphi_{1}\dots\varphi_{n}]}|1 \rangle) \otimes |\psi \rangle. $$ By inspection, one can see that the state of the register qubits above corresponds to a quantum Fourier transform of the state $|\varphi_1,\dots,\varphi_n\rangle$. Thus, the final step of the QPE algorithm is to run the *inverse* Quantum Fourier Transform (QFT) algorithm on the precision register to extract the phase information from this state. The resulting state is $$|\varphi_{1}, \varphi_{2}, \dots, \varphi_{n} \rangle \otimes |\psi\rangle.$$ Measuring the precision qubits in the computational basis then gives the classical bitstring $\varphi_{1}, \varphi_{2}, \dots, \varphi_{n}$, from which we can readily infer the phase estimate $\tilde{\varphi} = 0.\varphi_{1} \dots \varphi_{n}$ with the corresponding eigenvalue $\tilde{\lambda} = \exp(2\pi i \tilde{\varphi})$. __Simple example for illustration__: For concreteness, consider a simple example with the unitary given by the Pauli $X$ gate, $U=X$, for which $|\Psi \rangle = |+\rangle = (|0 \rangle + |1 \rangle)/\sqrt{2}$ is an eigenstate with eigenvalue $\lambda = 1$, i.e., $\varphi=0$. This state can be prepared with a Hadamard gate as $|\Psi \rangle = H|0 \rangle$. We take a precision register consisting of just two qubits ($n=2$). Thus, after the first layer of Hadamard gates, the quantum state is $$|0,0,0 \rangle \rightarrow |+,+,+\rangle.$$ Next, the applications of the controlled-$U$ gates (equal to $C-X$ operations, or CNOT gates in this example) leave this state untouched, because $|+\rangle$ is an eigenstate of $X$ with eigenvalue $+1$. Finally, applying the inverse QFT leads to $$\mathrm{QFT}^{\dagger}|+++\rangle=\mathrm{QFT}^\dagger\frac{|00\rangle + |01\rangle + |10\rangle + |11\rangle}{4}\otimes |+\rangle = |00\rangle \otimes |+\rangle,$$ from which we deduce $\varphi = [0.00]=0$ and therefore $\lambda=1$, as expected. Here, in the last step we have used $|00\rangle + |01\rangle + |10\rangle + |11\rangle = (|0\rangle + e^{2\pi i[0.0]}|1\rangle)(|0\rangle + e^{2\pi i[0.00]}|1\rangle)$, which makes the effect of the inverse QFT more apparent. __Initial state of query register__: So far, we have assumed that the query register is prepared in an eigenstate $|\Psi\rangle$ of $U$. What happens if this is not the case? Let's reconsider the simple example given previously. Suppose now that the query register is instead prepared in the state $|\Psi\rangle = |1\rangle$. We can always express this state in the eigenbasis of $U$, that is, $|1\rangle = \frac{1}{\sqrt{2}}(|+\rangle - |-\rangle)$. By linearity, application of the QPE algorithm then gives (up to normalization) \begin{equation} \begin{split} \mathrm{QPE}(|0,0,\dots\rangle \otimes |1\rangle) & = \mathrm{QPE}(|0,0,\dots\rangle \otimes |+\rangle) - \mathrm{QPE}(|0,0,\dots\rangle \otimes |-\rangle) \\ & = |\varphi_{+}\rangle \otimes |+\rangle - |\varphi_{-}\rangle \otimes |-\rangle. \\ \end{split} \end{equation} When we measure the precision qubits in this state, 50% of the time we will observe the eigenphase $\varphi_{+}$ and 50% of the time we will measure $\varphi_{-}$. We illustrate this example numerically as follows. This example motivates the general case: we can pass a state that is not an eigenstate of $U$ to the QPE algorithm, but we may need to repeat our measurements several times in order to obtain an estimate of the desired phase. ## CIRCUIT IMPLEMENTATION OF QPE The QPE circuit can be implemented using Hadamard gates, controlled-$U$ unitaries, and the inverse QFT (denoted as $\mathrm{QFT}^{-1}$). The details of the calculation can be found in a number of resources (such as, [1]); we omit them here. Following the previous discussion, the circuit that implements the QPE algorithm reads as below, where m is the size of lower query register and n is the size of upper precision register. ![image.png](attachment:image.png) ## IMPORTS and SETUP ``` # general imports import numpy as np import math import matplotlib.pyplot as plt # magic word for producing visualizations in notebook %matplotlib inline # AWS imports: Import Amazon Braket SDK modules from braket.circuits import Circuit, circuit from braket.devices import LocalSimulator from braket.aws import AwsDevice # local imports from utils_qpe import qpe, run_qpe %load_ext autoreload %autoreload 2 ``` __NOTE__: Enter your desired device and S3 location (bucket and key) in the following area. If you are working with the local simulator ```LocalSimulator()``` you do not need to specify any S3 location. However, if you are using the managed (cloud-based) device or any QPU devices, you must specify the S3 location where your results will be stored. In this case, you must replace the API call ```device.run(circuit, ...)``` in the example that follows with ```device.run(circuit, s3_folder, ...)```. ``` # set up device: local simulator or the managed cloud-based simulator # device = LocalSimulator() device = AwsDevice("arn:aws:braket:::device/quantum-simulator/amazon/sv1") # Enter the S3 bucket you created during onboarding into the code that follows my_bucket = "amazon-braket-Your-Bucket-Name" # the name of the bucket my_prefix = "Your-Folder-Name" # the name of the folder in the bucket s3_folder = (my_bucket, my_prefix) ``` ### Pauli Matrices: In some of our examples, we choose the unitary $U$ to be given by the **Pauli Matrices**, which we thus define as follows: ``` # Define Pauli matrices Id = np.eye(2) # Identity matrix X = np.array([[0., 1.], [1., 0.]]) # Pauli X Y = np.array([[0., -1.j], [1.j, 0.]]) # Pauli Y Z = np.array([[1., 0.], [0., -1.]]) # Pauli Z ``` ## IMPLEMENTATION OF THE QPE CIRCUIT In ```utils_qpe.py``` we provide simple helper functions to implement the quantum circuit for the QPE algorithm. Specifically, we demonstrate that such modular building blocks can be registered as subroutines, using ```@circuit.subroutine(register=True)```. Moreover, we provide a helper function (called ```get_qpe_phases```) to perform postprocessing based on the measurement results to extract the phase. The details of ```utils_qpe.py``` are shown in the Appendix. To implement the unitary $C-U^{2^k}$, one can use the fact that $C-U^{2} = (C-U)(C-U)$, so that $C-U^{2^{k}}$ can be constructed by repeatedly applying the core building block $C-U$. However, the circuit generated using this approach will have a significantly larger depth. In our implementation, we instead define the matrix $U^{2^k}$ and create the controlled $C-(U^{2^k})$ gate from that. ## VISUALIZATION OF THE QFT CIRCUIT To check our implementation of the QPE circuit, we visualize this circuit for a small number of qubits. ``` # set total number of qubits precision_qubits = [0, 1] query_qubits = [2] # prepare query register my_qpe_circ = Circuit().h(query_qubits) # set unitary unitary = X # show small QPE example circuit my_qpe_circ = my_qpe_circ.qpe(precision_qubits, query_qubits, unitary) print('QPE CIRCUIT:') print(my_qpe_circ) ``` As shown in the folllowing code, the two registers can be distributed anywhere across the circuit, with arbitrary indices for the precision and the query registers. ``` # set qubits precision_qubits = [1, 3] query_qubits = [5] # prepare query register my_qpe_circ = Circuit().i(range(7)) my_qpe_circ.h(query_qubits) # set unitary unitary = X # show small QPE example circuit my_qpe_circ = my_qpe_circ.qpe(precision_qubits, query_qubits, unitary) print('QPE CIRCUIT:') print(my_qpe_circ) ``` As follows, we set up the same circuit, this time implementing the unitary $C-U^{2^k}$, by repeatedly applying the core building block $C-U$. This operation can be done by setting the parameter ```control_unitary=False``` (default is ```True```). ``` # set qubits precision_qubits = [1, 3] query_qubits = [5] # prepare query register my_qpe_circ = Circuit().i(range(7)) my_qpe_circ.h(query_qubits) # set unitary unitary = X # show small QPE example circuit my_qpe_circ = my_qpe_circ.qpe(precision_qubits, query_qubits, unitary, control_unitary=False) print('QPE CIRCUIT:') print(my_qpe_circ) ``` In the circuit diagram, we can visually infer the exponents for $k=0,1$, at the expense of a larger circuit depth. ## NUMERICAL TEST EXPERIMENTS In the following section, we verify that our QFT implementation works as expected with a few test examples: 1. We run QPE with $U=X$ and prepare the eigenstate $|\Psi\rangle = |+\rangle = H|0\rangle$ with phase $\varphi=0$ and eigenvalue $\lambda=1$. 2. We run QPE with $U=X$ and prepare the eigenstate $|\Psi\rangle = |-\rangle = HX|0\rangle$ with phase $\varphi=0.5$ and eigenvalue $\lambda=-1$. 3. We run QPE with $U=X$ and prepare $|\Psi\rangle = |1\rangle = X|0\rangle$ which is *not* an eigenstate of $U$. Because $|1\rangle = (|+\rangle - |-\rangle)/\sqrt{2}$, we expect to measure both $\varphi=0$ and $\varphi=0.5$ associated with the two eigenstates $|\pm\rangle$. 4. We run QPE with unitary $U=X \otimes Z$, and prepare the query register in the eigenstate $|\Psi\rangle = |+\rangle \otimes |1\rangle = H|0\rangle \otimes Z|0\rangle$. Here, we expect to measure the phase $\varphi=0.5$ (giving the corresponding eigenvalue $\lambda=-1$). 5. We run QPE with a _random_ two qubit unitary, diagonal in the computational basis, and prepare the query register in the eigenstate $|11\rangle$. In this case, we should be able to read off the eigenvalue and phase from $U$ and verify QPE gives the right answer (with high probability) up to a small error (that depends on the number of qubits in the precision register). ## HELPER FUNCTIONS FOR NUMERICAL TESTS Because we will run the same code repeatedly, let's first create a helper function we can use to keep the notebook clean. ``` def postprocess_qpe_results(out): """ Function to postprocess dictionary returned by run_qpe Args: out: dictionary containing results/information associated with QPE run as produced by run_qpe """ # unpack results circ = out['circuit'] measurement_counts = out['measurement_counts'] bitstring_keys = out['bitstring_keys'] probs_values = out['probs_values'] precision_results_dic = out['precision_results_dic'] phases_decimal = out['phases_decimal'] eigenvalues = out['eigenvalues'] # print the circuit print('Printing circuit:') print(circ) # print measurement results print('Measurement counts:', measurement_counts) # plot probabalities plt.bar(bitstring_keys, probs_values); plt.xlabel('bitstrings'); plt.ylabel('probability'); plt.xticks(rotation=90); # print results print('Results in precision register:', precision_results_dic) print('QPE phase estimates:', phases_decimal) print('QPE eigenvalue estimates:', np.round(eigenvalues, 5)) ``` ### NUMERICAL TEST EXAMPLE 1 First, apply the QPE algorithm to the simple single-qubit unitary $U=X$, with eigenstate $|\Psi\rangle = |+\rangle = H|0\rangle$. Here, we expect to measure the phase $\varphi=0$ (giving the corresponding eigenvalue $\lambda=1$). We show that this result stays the same as we increase the number of qubits $n$ for the top register. ``` # Set total number of precision qubits: 2 number_precision_qubits = 2 # Define the set of precision qubits precision_qubits = range(number_precision_qubits) # Define the query qubits. We'll have them start after the precision qubits query_qubits = [number_precision_qubits] # State preparation for eigenstate of U=X query = Circuit().h(query_qubits) # Run the test with U=X out = run_qpe(X, precision_qubits, query_qubits, query, device, s3_folder) # Postprocess results postprocess_qpe_results(out) ``` Next, check that we get the same result for a larger precision (top) register. ``` # Set total number of precision qubits: 3 number_precision_qubits = 3 # Define the set of precision qubits precision_qubits = range(number_precision_qubits) # Define the query qubits. We'll have them start after the precision qubits query_qubits = [number_precision_qubits] # State preparation for eigenstate of U=X query = Circuit().h(query_qubits) # Run the test with U=X out = run_qpe(X, precision_qubits, query_qubits, query, device, s3_folder) # Postprocess results postprocess_qpe_results(out) ``` ### NUMERICAL TEST EXAMPLE 2 Next, apply the QPE algorithm to the simple single-qubit unitary $U=X$, with eigenstate $|\Psi\rangle = |-\rangle = HX|0\rangle$. Here, we expect to measure the phase $\varphi=0.5$ (giving the corresponding eigenvalue $\lambda=-1$). ``` # Set total number of precision qubits: 2 number_precision_qubits = 2 # Define the set of precision qubits precision_qubits = range(number_precision_qubits) # Define the query qubits. We'll have them start after the precision qubits query_qubits = [number_precision_qubits] # State preparation for eigenstate of U=X query = Circuit().x(query_qubits).h(query_qubits) # Run the test with U=X out = run_qpe(X, precision_qubits, query_qubits, query, device, s3_folder) # Postprocess results postprocess_qpe_results(out) ``` ### NUMERICAL TEST EXAMPLE 3 Next, apply the QPE algorithm again to the simple single-qubit unitary $U=X$, but we initialize the query register in the state $|\Psi\rangle = |1\rangle$ which is *not* an eigenstate of $U$. Here, following the previous discussion, we expect to measure the phases $\varphi=0, 0.5$ (giving the corresponding eigenvalue $\lambda=\pm 1$). Accordingly, here we set ```items_to_keep=2```. ``` # Set total number of precision qubits: 2 number_precision_qubits = 2 # Define the set of precision qubits precision_qubits = range(number_precision_qubits) # Define the query qubits. We'll have them start after the precision qubits query_qubits = [number_precision_qubits] # State preparation for |1>, which is not an eigenstate of U=X query = Circuit().x(query_qubits) # Run the test with U=X out = run_qpe(X, precision_qubits, query_qubits, query, device, s3_folder, items_to_keep=2) # Postprocess results postprocess_qpe_results(out) ``` ### NUMERICAL TEST EXAMPLE 4 Next, apply the QPE algorithm to the two-qubit unitary $U=X \otimes Z$, and prepare the query register in the eigenstate $|\Psi\rangle = |+\rangle \otimes |1\rangle = H|0\rangle \otimes Z|0\rangle$. Here, we expect to measure the phase $\varphi=0.5$ (giving the corresponding eigenvalue $\lambda=-1$). ``` # set unitary matrix U u1 = np.kron(X, Id) u2 = np.kron(Id, Z) unitary = np.dot(u1, u2) print('Two-qubit unitary (XZ):\n', unitary) # get example eigensystem eig_values, eig_vectors = np.linalg.eig(unitary) print('Eigenvalues:', eig_values) # print('Eigenvectors:', eig_vectors) # Set total number of precision qubits: 2 number_precision_qubits = 2 # Define the set of precision qubits precision_qubits = range(number_precision_qubits) # Define the query qubits. We'll have them start after the precision qubits query_qubits = [number_precision_qubits, number_precision_qubits+1] # State preparation for eigenstate |+,1> of U=X \otimes Z query = Circuit().h(query_qubits[0]).x(query_qubits[1]) # Run the test with U=X out = run_qpe(unitary, precision_qubits, query_qubits, query, device, s3_folder) # Postprocess results postprocess_qpe_results(out) ``` ### NUMERICAL TEST EXAMPLE 5 In this example, we choose the unitary to be a _random_ two-qubit unitary, diagonal in the computational basis. We initialize the query register to be in the eigenstate $|11\rangle$ of $U$, which we can prepare using that $|11\rangle = X\otimes X|00\rangle$. In this case we should be able to read off the eigenvalue and phase from $U$ and verify that QPE gives the right answer. ``` # Generate a random 2 qubit unitary matrix: from scipy.stats import unitary_group # Fix random seed for reproducibility np.random.seed(seed=42) # Get random two-qubit unitary random_unitary = unitary_group.rvs(2**2) # Let's diagonalize this evals = np.linalg.eig(random_unitary)[0] # Since we want to be able to read off the eigenvalues of the unitary in question # let's choose our unitary to be diagonal in this basis unitary = np.diag(evals) # Check that this is indeed unitary, and print it out: print('Two-qubit random unitary:\n', np.round(unitary, 3)) print('Check for unitarity: ', np.allclose(np.eye(len(unitary)), unitary.dot(unitary.T.conj()))) # Print eigenvalues print('Eigenvalues:', np.round(evals, 3)) ``` When we execute the QPE circuit, we expect the following (approximate) result for the eigenvalue estimate: ``` print('Target eigenvalue:', np.round(evals[-1], 3)) # Set total number of precision qubits number_precision_qubits = 3 # Define the set of precision qubits precision_qubits = range(number_precision_qubits) # Define the query qubits. We'll have them start after the precision qubits query_qubits = [number_precision_qubits, number_precision_qubits+1] # State preparation for eigenstate |1,1> of diagonal U query = Circuit().x(query_qubits[0]).x(query_qubits[1]) # Run the test with U=X out = run_qpe(unitary, precision_qubits, query_qubits, query, device, s3_folder) # Postprocess results postprocess_qpe_results(out) # compare output to exact target values print('Target eigenvalue:', np.round(evals[-1], 3)) ``` We can easily improve the precision of our parameter estimate by increasing the number of qubits in the precision register, as shown in the following example. ``` # Set total number of precision qubits number_precision_qubits = 10 # Define the set of precision qubits precision_qubits = range(number_precision_qubits) # Define the query qubits. We'll have them start after the precision qubits query_qubits = [number_precision_qubits, number_precision_qubits+1] # State preparation for eigenstate |1,1> of diagonal U query = Circuit().x(query_qubits[0]).x(query_qubits[1]) # Run the test with U=X out = run_qpe(unitary, precision_qubits, query_qubits, query, device, s3_folder) # Postprocess results eigenvalues = out['eigenvalues'] print('QPE eigenvalue estimates:', np.round(eigenvalues, 5)) # compare output to exact target values print('Target eigenvalue:', np.round(evals[-1], 5)) ``` --- ## APPENDIX ``` # Check SDK version # alternative: braket.__version__ !pip show amazon-braket-sdk | grep Version ``` ## Details of the ```utiles_qpe.py``` module ### Imports, including inverse QFT ```python # general imports import numpy as np import math from collections import Counter from datetime import datetime import pickle # AWS imports: Import Braket SDK modules from braket.circuits import Circuit, circuit # local imports from utils_qft import inverse_qft ``` ### QPE Subroutine ```python @circuit.subroutine(register=True) def controlled_unitary(control, target_qubits, unitary): """ Construct a circuit object corresponding to the controlled unitary Args: control: The qubit on which to control the gate target_qubits: List of qubits on which the unitary U acts unitary: matrix representation of the unitary we wish to implement in a controlled way """ # Define projectors onto the computational basis p0 = np.array([[1., 0.], [0., 0.]]) p1 = np.array([[0., 0.], [0., 1.]]) # Instantiate circuit object circ = Circuit() # Construct numpy matrix id_matrix = np.eye(len(unitary)) controlled_matrix = np.kron(p0, id_matrix) + np.kron(p1, unitary) # Set all target qubits targets = [control] + target_qubits # Add controlled unitary circ.unitary(matrix=controlled_matrix, targets=targets) return circ @circuit.subroutine(register=True) def qpe(precision_qubits, query_qubits, unitary, control_unitary=True): """ Function to implement the QPE algorithm using two registers for precision (read-out) and query. Register qubits need not be contiguous. Args: precision_qubits: list of qubits defining the precision register query_qubits: list of qubits defining the query register unitary: Matrix representation of the unitary whose eigenvalues we wish to estimate control_unitary: Optional boolean flag for controlled unitaries, with C-(U^{2^k}) by default (default is True), or C-U controlled-unitary (2**power) times """ qpe_circ = Circuit() # Get number of qubits num_precision_qubits = len(precision_qubits) num_query_qubits = len(query_qubits) # Apply Hadamard across precision register qpe_circ.h(precision_qubits) # Apply controlled unitaries. Start with the last precision_qubit, and end with the first for ii, qubit in enumerate(reversed(precision_qubits)): # Set power exponent for unitary power = ii # Alterantive 1: Implement C-(U^{2^k}) if control_unitary: # Define the matrix U^{2^k} Uexp = np.linalg.matrix_power(unitary,2**power) # Apply the controlled unitary C-(U^{2^k}) qpe_circ.controlled_unitary(qubit, query_qubits, Uexp) # Alterantive 2: One can instead apply controlled-unitary (2**power) times to get C-U^{2^power} else: for _ in range(2**power): qpe_circ.controlled_unitary(qubit, query_qubits, unitary) # Apply inverse qft to the precision_qubits qpe_circ.inverse_qft(precision_qubits) return qpe_circ ``` ### QPE postprocessing helper functions ```python # helper function to remove query bits from bitstrings def substring(key, precision_qubits): """ Helper function to get substring from keys for dedicated string positions as given by precision_qubits. This function is necessary to allow for arbitary qubit mappings in the precision and query registers (that is, so that the register qubits need not be contiguous.) Args: key: string from which we want to extract the substring supported only on the precision qubits precision_qubits: List of qubits corresponding to precision_qubits. Currently assumed to be a list of integers corresponding to the indices of the qubits """ short_key = '' for idx in precision_qubits: short_key = short_key + key[idx] return short_key # helper function to convert binary fractional to decimal # reference: https://www.geeksforgeeks.org/convert-binary-fraction-decimal/ def binaryToDecimal(binary): """ Helper function to convert binary string (example: '01001') to decimal Args: binary: string which to convert to decimal fraction """ length = len(binary) fracDecimal = 0 # Convert fractional part of binary to decimal equivalent twos = 2 for ii in range(length): fracDecimal += ((ord(binary[ii]) - ord('0')) / twos); twos *= 2.0 # return fractional part return fracDecimal # helper function for postprocessing based on measurement shots def get_qpe_phases(measurement_counts, precision_qubits, items_to_keep=1): """ Get QPE phase estimate from measurement_counts for given number of precision qubits Args: measurement_counts: measurement results from a device run precision_qubits: List of qubits corresponding to precision_qubits. Currently assumed to be a list of integers corresponding to the indices of the qubits items_to_keep: number of items to return (topmost measurement counts for precision register) """ # Aggregate the results (that is, ignore the query register qubits): # First get bitstrings with corresponding counts for precision qubits only bitstrings_precision_register = [substring(key, precision_qubits) for key in measurement_counts.keys()] # Then keep only the unique strings bitstrings_precision_register_set = set(bitstrings_precision_register) # Cast as a list for later use bitstrings_precision_register_list = list(bitstrings_precision_register_set) # Now create a new dict to collect measurement results on the precision_qubits. # Keys are given by the measurement count substrings on the register qubits. Initialize the counts to zero. precision_results_dic = {key: 0 for key in bitstrings_precision_register_list} # Loop over all measurement outcomes for key in measurement_counts.keys(): # Save the measurement count for this outcome counts = measurement_counts[key] # Generate the corresponding shortened key (supported only on the precision_qubits register) count_key = substring(key, precision_qubits) # Add these measurement counts to the corresponding key in our new dict precision_results_dic[count_key] += counts # Get topmost values only c = Counter(precision_results_dic) topmost= c.most_common(items_to_keep) # get decimal phases from bitstrings for topmost bitstrings phases_decimal = [binaryToDecimal(item[0]) for item in topmost] # Get decimal phases from bitstrings for all bitstrings # number_precision_qubits = len(precision_qubits) # Generate binary decimal expansion # phases_decimal = [int(key, 2)/(2**number_precision_qubits) for key in precision_results_dic] # phases_decimal = [binaryToDecimal(key) for key in precision_results_dic] return phases_decimal, precision_results_dic ``` ### Run QPE experiments: ```python def run_qpe(unitary, precision_qubits, query_qubits, query_circuit, device, s3_folder, items_to_keep=1, shots=1000, save_to_pck=False): """ Function to run QPE algorithm end-to-end and return measurement counts. Args: precision_qubits: list of qubits defining the precision register query_qubits: list of qubits defining the query register unitary: Matrix representation of the unitary whose eigenvalues we wish to estimate query_circuit: query circuit for state preparation of query register items_to_keep: (optional) number of items to return (topmost measurement counts for precision register) device: Braket device backend shots: (optional) number of measurement shots (default is 1000) save_to_pck: (optional) save results to pickle file if True (default is False) """ # get size of precision register and total number of qubits number_precision_qubits = len(precision_qubits) num_qubits = len(precision_qubits) + len(query_qubits) # Define the circuit. Start by copying the query_circuit, then add the QPE: circ = query_circuit circ.qpe(precision_qubits, query_qubits, unitary) # Add desired results_types circ.probability() # Run the circuit with all zeros input. # The query_circuit subcircuit generates the desired input from all zeros. # The following code executes the correct device.run call, depending on whether the backend is local or managed (cloud-based) if device.name == 'DefaultSimulator': task = device.run(circ, shots=shots) else: task = device.run(circ, s3_folder, shots=shots) # get result for this task result = task.result() # get metadata metadata = result.task_metadata # get output probabilities (see result_types above) probs_values = result.values[0] # get measurement results measurements = result.measurements measured_qubits = result.measured_qubits measurement_counts = result.measurement_counts measurement_probabilities = result.measurement_probabilities # bitstrings format_bitstring = '{0:0' + str(num_qubits) + 'b}' bitstring_keys = [format_bitstring.format(ii) for ii in range(2**num_qubits)] # QPE postprocessing phases_decimal, precision_results_dic = get_qpe_phases(measurement_counts, precision_qubits, items_to_keep) eigenvalues = [np.exp(2*np.pi*1j*phase) for phase in phases_decimal] # aggregate results out = {'circuit': circ, 'task_metadata': metadata, 'measurements': measurements, 'measured_qubits': measured_qubits, 'measurement_counts': measurement_counts, 'measurement_probabilities': measurement_probabilities, 'probs_values': probs_values, 'bitstring_keys': bitstring_keys, 'precision_results_dic': precision_results_dic, 'phases_decimal': phases_decimal, 'eigenvalues': eigenvalues} if save_to_pck: # store results: dump output to pickle with timestamp in filename time_now = datetime.strftime(datetime.now(), '%Y%m%d%H%M%S') results_file = 'results-'+time_now+'.pck' pickle.dump(out, open(results_file, "wb")) # you can load results as follows # out = pickle.load(open(results_file, "rb")) return out ``` --- ## REFERENCES [1] Wikipedia: https://en.wikipedia.org/wiki/Quantum_phase_estimation_algorithm [2] Nielsen, Michael A., Chuang, Isaac L. (2010). Quantum Computation and Quantum Information (2nd ed.). Cambridge: Cambridge University Press.
github_jupyter
# Módulo 4: APIs ## Spotify <img src="https://developer.spotify.com/assets/branding-guidelines/[email protected]" width=400></img> En este módulo utilizaremos APIs para obtener información sobre artistas, discos y tracks disponibles en Spotify. Pero primero.. ¿Qué es una **API**?<br> Por sus siglas en inglés, una API es una interfaz para programar aplicaciones (*Application Programming Interface*). Es decir que es un conjunto de funciones, métodos, reglas y definiciones que nos permitirán desarrollar aplicaciones (en este caso un scraper) que se comuniquen con los servidores de Spotify. Las APIs son diseñadas y desarrolladas por las empresas que tienen interés en que se desarrollen aplicaciones (públicas o privadas) que utilicen sus servicios. Spotify tiene APIs públicas y bien documentadas que estaremos usando en el desarrollo de este proyecto. #### REST Un término se seguramente te vas a encontrar cuando estés buscando información en internet es **REST** o *RESTful*. Significa *representational state transfer* y si una API es REST o RESTful, implica que respeta unos determinados principios de arquitectura, como por ejemplo un protocolo de comunicación cliente/servidor (que será HTTP) y (entre otras cosas) un conjunto de operaciones definidas que conocemos como **métodos**. Ya veníamos usando el método GET para hacer solicitudes a servidores web. #### Documentación Como mencioné antes, las APIs son diseñadas por las mismas empresas que tienen interés en que se desarrollen aplicaciones (públicas o privadas) que consuman sus servicios o información. Es por eso que la forma de utilizar las APIs variará dependiendo del servicio que querramos consumir. No es lo mismo utilizar las APIs de Spotify que las APIs de Twitter. Por esta razón es de suma importancia leer la documentación disponible, generalmente en la sección de desarrolladores de cada sitio. Te dejo el [link a la de Spotify](https://developer.spotify.com/documentation/) #### JSON Json significa *JavaScript Object Notation* y es un formato para describir objetos que ganó tanta popularidad en su uso que ahora se lo considera independiente del lenguaje. De hecho, lo utilizaremos en este proyecto por más que estemos trabajando en Python, porque es la forma en la que obtendremos las respuestas a las solicitudes que realicemos utilizando las APIs. Para nosotros, no será ni más ni menos que un diccionario con algunas particularidades que iremos viendo a lo largo del curso. Links útiles para la clase: - [Documentación de Spotify - Artistas](https://developer.spotify.com/documentation/web-api/reference/artists/) - [Iron Maiden en Spotify](https://open.spotify.com/artist/6mdiAmATAx73kdxrNrnlao) ``` import requests id_im = '6mdiAmATAx73kdxrNrnlao' url_base = 'https://api.spotify.com/v1' ep_artist = '/artists/{artist_id}' url_base+ep_artist.format(artist_id=id_im) r = requests.get(url_base+ep_artist.format(artist_id=id_im)) r.status_code r.json() token_url = 'https://accounts.spotify.com/api/token' params = {'grant_type': 'client_credentials'} headers = {'Authorization': 'Basic NDRiN2IzNmVjMTQ1NDY3ZjlhOWVlYWY3ZTQxN2NmOGI6N2I0YWE3YTBlZjQ4NDQwNDhhYjFkMjI0MzBhMWViMWY='} r = requests.post(token_url, data=params, headers=headers) r.status_code r.json() token = r.json()['access_token'] token header = {"Authorization": "Bearer {}".format(token)} r = requests.get(url_base+ep_artist.format(artist_id=id_im), headers=header) r.status_code r.json() url_busqueda = 'https://api.spotify.com/v1/search' search_params = {'q': "Iron+Maiden", 'type':'artist', 'market':'AR'} busqueda = requests.get(url_busqueda, headers=header, params=search_params) busqueda.status_code busqueda.json() import pandas as pd df = pd.DataFrame(busqueda.json()['artists']['items']) df.head() df.sort_values(by='popularity', ascending=False).iloc[0]['id'] import base64 def get_token(client_id, client_secret): encoded = base64.b64encode(bytes(client_id+':'+client_secret, 'utf-8')) params = {'grant_type':'client_credentials'} header={'Authorization': 'Basic ' + str(encoded, 'utf-8')} r = requests.post('https://accounts.spotify.com/api/token', headers=header, data=params) if r.status_code != 200: print('Error en la request.', r.json()) return None print('Token válido por {} segundos.'.format(r.json()['expires_in'])) return r.json()['access_token'] client_id = '44b7b36ec145467f9a9eeaf7e417cf8b' client_secret = '7b4aa7a0ef4844048ab1d22430a1eb1f' token = get_token(client_id, client_secret) header = {"Authorization": "Bearer {}".format(token)} id_im artist_im = requests.get(url_base+ep_artist.format(artist_id=id_im), headers=header) artist_im.status_code artist_im.json() params = {'country': 'AR'} albums_im = requests.get(url_base+ep_artist.format(artist_id=id_im)+'/albums', headers=header, params=params) albums_im.status_code albums_im.json()['items'] [(album['id'], album['name']) for album in albums_im.json()['items']] bnw_id = '1hDF0QPIHVTnSJtxyQVguB' album_ep = '/albums/{album_id}' album_params = {'market':'AR'} bnw = requests.get(url_base+album_ep.format(album_id=bnw_id)+'/tracks', headers=header, params=album_params) bnw bnw.json() bnw.json()['items'] [(track['id'], track['name']) for track in bnw.json()['items']] ``` ## Clase 5 ``` def obtener_discografia(artist_id, token, return_name=False, page_limit=50, country=None): url = f'https://api.spotify.com/v1/artists/{artist_id}/albums' header = {'Authorization': f'Bearer {token}'} params = {'limit': page_limit, 'offset': 0, 'country': country} lista = [] r = requests.get(url, params=params, headers=header) if r.status_code != 200: print('Error en request.', r.json()) return None if return_name: lista += [(item['id'], item['name']) for item in r.json()['items']] else: lista += [item['id'] for item in r.json()['items']] while r.json()['next']: r = requests.get(r.json()['next'], headers=header) # El resto de los parámetros están dentro de la URL if return_name: lista += [(item['id'], item['name']) for item in r.json()['items']] else: lista += [item['id'] for item in r.json()['items']] return lista def obtener_tracks(album_id, token, return_name=False, page_limit=50, market=None): url=f'https://api.spotify.com/v1/albums/{album_id}/tracks' header = {'Authorization': f'Bearer {token}'} params = {'limit': page_limit, 'offset': 0, 'market': market} lista = [] r = requests.get(url, params=params, headers=header) if r.status_code != 200: print('Error en request.', r.json()) return None if return_name: lista += [(item['id'], item['name']) for item in r.json()['items']] else: lista += [item['id'] for item in r.json()['items']] while r.json()['next']: r = requests.get(r.json()['next'], headers=header) # El resto de los parámetros están dentro de la URL if return_name: lista += [(item['id'], item['name']) for item in r.json()['items']] else: lista += [item['id'] for item in r.json()['items']] return lista ```
github_jupyter
## Computer Vision Learner [`vision.learner`](/vision.learner.html#vision.learner) is the module that defines the [`cnn_learner`](/vision.learner.html#cnn_learner) method, to easily get a model suitable for transfer learning. ``` from fastai.gen_doc.nbdoc import * from fastai.vision import * ``` ## Transfer learning Transfer learning is a technique where you use a model trained on a very large dataset (usually [ImageNet](http://image-net.org/) in computer vision) and then adapt it to your own dataset. The idea is that it has learned to recognize many features on all of this data, and that you will benefit from this knowledge, especially if your dataset is small, compared to starting from a randomly initialized model. It has been proved in [this article](https://arxiv.org/abs/1805.08974) on a wide range of tasks that transfer learning nearly always give better results. In practice, you need to change the last part of your model to be adapted to your own number of classes. Most convolutional models end with a few linear layers (a part will call head). The last convolutional layer will have analyzed features in the image that went through the model, and the job of the head is to convert those in predictions for each of our classes. In transfer learning we will keep all the convolutional layers (called the body or the backbone of the model) with their weights pretrained on ImageNet but will define a new head initialized randomly. Then we will train the model we obtain in two phases: first we freeze the body weights and only train the head (to convert those analyzed features into predictions for our own data), then we unfreeze the layers of the backbone (gradually if necessary) and fine-tune the whole model (possibly using differential learning rates). The [`cnn_learner`](/vision.learner.html#cnn_learner) factory method helps you to automatically get a pretrained model from a given architecture with a custom head that is suitable for your data. ``` show_doc(cnn_learner) ``` This method creates a [`Learner`](/basic_train.html#Learner) object from the [`data`](/vision.data.html#vision.data) object and model inferred from it with the backbone given in `arch`. Specifically, it will cut the model defined by `arch` (randomly initialized if `pretrained` is False) at the last convolutional layer by default (or as defined in `cut`, see below) and add: - an [`AdaptiveConcatPool2d`](/layers.html#AdaptiveConcatPool2d) layer, - a [`Flatten`](/layers.html#Flatten) layer, - blocks of \[[`nn.BatchNorm1d`](https://pytorch.org/docs/stable/nn.html#torch.nn.BatchNorm1d), [`nn.Dropout`](https://pytorch.org/docs/stable/nn.html#torch.nn.Dropout), [`nn.Linear`](https://pytorch.org/docs/stable/nn.html#torch.nn.Linear), [`nn.ReLU`](https://pytorch.org/docs/stable/nn.html#torch.nn.ReLU)\] layers. The blocks are defined by the `lin_ftrs` and `ps` arguments. Specifically, the first block will have a number of inputs inferred from the backbone `arch` and the last one will have a number of outputs equal to `data.c` (which contains the number of classes of the data) and the intermediate blocks have a number of inputs/outputs determined by `lin_frts` (of course a block has a number of inputs equal to the number of outputs of the previous block). The default is to have an intermediate hidden size of 512 (which makes two blocks `model_activation` -> 512 -> `n_classes`). If you pass a float then the final dropout layer will have the value `ps`, and the remaining will be `ps/2`. If you pass a list then the values are used for dropout probabilities directly. Note that the very last block doesn't have a [`nn.ReLU`](https://pytorch.org/docs/stable/nn.html#torch.nn.ReLU) activation, to allow you to use any final activation you want (generally included in the loss function in pytorch). Also, the backbone will be frozen if you choose `pretrained=True` (so only the head will train if you call [`fit`](/basic_train.html#fit)) so that you can immediately start phase one of training as described above. Alternatively, you can define your own `custom_head` to put on top of the backbone. If you want to specify where to split `arch` you should so in the argument `cut` which can either be the index of a specific layer (the result will not include that layer) or a function that, when passed the model, will return the backbone you want. The final model obtained by stacking the backbone and the head (custom or defined as we saw) is then separated in groups for gradual unfreezing or differential learning rates. You can specify how to split the backbone in groups with the optional argument `split_on` (should be a function that returns those groups when given the backbone). The `kwargs` will be passed on to [`Learner`](/basic_train.html#Learner), so you can put here anything that [`Learner`](/basic_train.html#Learner) will accept ([`metrics`](/metrics.html#metrics), `loss_func`, `opt_func`...) ``` path = untar_data(URLs.MNIST_SAMPLE) data = ImageDataBunch.from_folder(path) learner = cnn_learner(data, models.resnet18, metrics=[accuracy]) learner.fit_one_cycle(1,1e-3) learner.save('one_epoch') show_doc(unet_learner) ``` This time the model will be a [`DynamicUnet`](/vision.models.unet.html#DynamicUnet) with an encoder based on `arch` (maybe `pretrained`) that is cut depending on `split_on`. `blur_final`, `norm_type`, `blur`, `self_attention`, `y_range`, `last_cross` and `bottle` are passed to unet constructor, the `kwargs` are passed to the initialization of the [`Learner`](/basic_train.html#Learner). ``` jekyll_warn("The models created with this function won't work with pytorch `nn.DataParallel`, you have to use distributed training instead!") ``` ### Get predictions Once you've actually trained your model, you may want to use it on a single image. This is done by using the following method. ``` show_doc(Learner.predict) img = learner.data.train_ds[0][0] learner.predict(img) ``` Here the predict class for our image is '3', which corresponds to a label of 0. The probabilities the model found for each class are 99.65% and 0.35% respectively, so its confidence is pretty high. Note that if you want to load your trained model and use it on inference mode with the previous function, you should export your [`Learner`](/basic_train.html#Learner). ``` learner.export() ``` And then you can load it with an empty data object that has the same internal state like this: ``` learn = load_learner(path) ``` ### Customize your model You can customize [`cnn_learner`](/vision.learner.html#cnn_learner) for your own model's default `cut` and `split_on` functions by adding them to the dictionary `model_meta`. The key should be your model and the value should be a dictionary with the keys `cut` and `split_on` (see the source code for examples). The constructor will call [`create_body`](/vision.learner.html#create_body) and [`create_head`](/vision.learner.html#create_head) for you based on `cut`; you can also call them yourself, which is particularly useful for testing. ``` show_doc(create_body) show_doc(create_head, doc_string=False) ``` Model head that takes `nf` features, runs through `lin_ftrs`, and ends with `nc` classes. `ps` is the probability of the dropouts, as documented above in [`cnn_learner`](/vision.learner.html#cnn_learner). ``` show_doc(ClassificationInterpretation, title_level=3) ``` This provides a confusion matrix and visualization of the most incorrect images. Pass in your [`data`](/vision.data.html#vision.data), calculated `preds`, actual `y`, and your `losses`, and then use the methods below to view the model interpretation results. For instance: ``` learn = cnn_learner(data, models.resnet18) learn.fit(1) preds,y,losses = learn.get_preds(with_loss=True) interp = ClassificationInterpretation(learn, preds, y, losses) ``` The following factory method gives a more convenient way to create an instance of this class: ``` show_doc(ClassificationInterpretation.from_learner, full_name='from_learner') ``` You can also use a shortcut `learn.interpret()` to do the same. ``` show_doc(Learner.interpret, full_name='interpret') ``` Note that this shortcut is a [`Learner`](/basic_train.html#Learner) object/class method that can be called as: `learn.interpret()`. ``` show_doc(ClassificationInterpretation.plot_top_losses, full_name='plot_top_losses') ``` The `k` items are arranged as a square, so it will look best if `k` is a square number (4, 9, 16, etc). The title of each image shows: prediction, actual, loss, probability of actual class. When `heatmap` is True (by default it's True) , Grad-CAM heatmaps (http://openaccess.thecvf.com/content_ICCV_2017/papers/Selvaraju_Grad-CAM_Visual_Explanations_ICCV_2017_paper.pdf) are overlaid on each image. `plot_top_losses` should be used with single-labeled datasets. See `plot_multi_top_losses` below for a version capable of handling multi-labeled datasets. ``` interp.plot_top_losses(9, figsize=(7,7)) show_doc(ClassificationInterpretation.top_losses) ``` Returns tuple of *(losses,indices)*. ``` interp.top_losses(9) show_doc(ClassificationInterpretation.plot_multi_top_losses, full_name='plot_multi_top_losses') ``` Similar to `plot_top_losses()` but aimed at multi-labeled datasets. It plots misclassified samples sorted by their respective loss. Since you can have multiple labels for a single sample, they can easily overlap in a grid plot. So it plots just one sample per row. Note that you can pass `save_misclassified=True` (by default it's `False`). In such case, the method will return a list containing the misclassified images which you can use to debug your model and/or tune its hyperparameters. ``` show_doc(ClassificationInterpretation.plot_confusion_matrix) ``` If [`normalize`](/vision.data.html#normalize), plots the percentages with `norm_dec` digits. `slice_size` can be used to avoid out of memory error if your set is too big. `kwargs` are passed to `plt.figure`. ``` interp.plot_confusion_matrix() show_doc(ClassificationInterpretation.confusion_matrix) interp.confusion_matrix() show_doc(ClassificationInterpretation.most_confused) ``` #### Working with large datasets When working with large datasets, memory problems can arise when computing the confusion matrix. For example, an error can look like this: RuntimeError: $ Torch: not enough memory: you tried to allocate 64GB. Buy new RAM! In this case it is possible to force [`ClassificationInterpretation`](/train.html#ClassificationInterpretation) to compute the confusion matrix for data slices and then aggregate the result by specifying slice_size parameter. ``` interp.confusion_matrix(slice_size=10) interp.plot_confusion_matrix(slice_size=10) interp.most_confused(slice_size=10) ``` ## Undocumented Methods - Methods moved below this line will intentionally be hidden ## New Methods - Please document or move to the undocumented section
github_jupyter
# The Atoms of Computation Programming a quantum computer is now something that anyone can do in the comfort of their own home. But what to create? What is a quantum program anyway? In fact, what is a quantum computer? These questions can be answered by making comparisons to standard digital computers. Unfortunately, most people don’t actually understand how digital computers work either. In this article, we’ll look at the basics principles behind these devices. To help us transition over to quantum computing later on, we’ll do it using the same tools as we'll use for quantum. ## Contents 1. [Splitting information into bits](#bits) 2. [Computation as a Diagram](#diagram) 3. [Your First Quantum Circuit](#first-circuit) 4. [Example: Adder Circuit](#adder) 4.1 [Encoding an Input](#encoding) 4.2 [Remembering how to Add](#remembering-add) 4.3 [Adding with Qiskit](#adding-qiskit) Below is some Python code we'll need to run if we want to use the code in this page: ``` from qiskit import QuantumCircuit, assemble, Aer from qiskit.visualization import plot_histogram ``` ## 1. Splitting information into bits <a id="bits"></a> The first thing we need to know about is the idea of bits. These are designed to be the world’s simplest alphabet. With only two characters, 0 and 1, we can represent any piece of information. One example is numbers. You are probably used to representing a number through a string of the ten digits 0, 1, 2, 3, 4, 5, 6, 7, 8, and 9. In this string of digits, each digit represents how many times the number contains a certain power of ten. For example, when we write 9213, we mean $$ 9000 + 200 + 10 + 3 $$ or, expressed in a way that emphasizes the powers of ten $$ (9\times10^3) + (2\times10^2) + (1\times10^1) + (3\times10^0) $$ Though we usually use this system based on the number 10, we can just as easily use one based on any other number. The binary number system, for example, is based on the number two. This means using the two characters 0 and 1 to express numbers as multiples of powers of two. For example, 9213 becomes 10001111111101, since $$ 9213 = (1 \times 2^{13}) + (0 \times 2^{12}) + (0 \times 2^{11})+ (0 \times 2^{10}) +(1 \times 2^9) + (1 \times 2^8) + (1 \times 2^7) \\\\ \,\,\, + (1 \times 2^6) + (1 \times 2^5) + (1 \times 2^4) + (1 \times 2^3) + (1 \times 2^2) + (0 \times 2^1) + (1 \times 2^0) $$ In this we are expressing numbers as multiples of 2, 4, 8, 16, 32, etc. instead of 10, 100, 1000, etc. <a id="binary_widget"></a> ``` from qiskit_textbook.widgets import binary_widget binary_widget(nbits=5) ``` These strings of bits, known as binary strings, can be used to represent more than just numbers. For example, there is a way to represent any text using bits. For any letter, number, or punctuation mark you want to use, you can find a corresponding string of at most eight bits using [this table](https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/com.ibm.aix.networkcomm/conversion_table.htm). Though these are quite arbitrary, this is a widely agreed-upon standard. In fact, it's what was used to transmit this article to you through the internet. This is how all information is represented in computers. Whether numbers, letters, images, or sound, it all exists in the form of binary strings. Like our standard digital computers, quantum computers are based on this same basic idea. The main difference is that they use *qubits*, an extension of the bit to quantum mechanics. In the rest of this textbook, we will explore what qubits are, what they can do, and how they do it. In this section, however, we are not talking about quantum at all. So, we just use qubits as if they were bits. ### Quick Exercises 1. Think of a number and try to write it down in binary. 2. If you have $n$ bits, how many different states can they be in? ## 2. Computation as a diagram <a id="diagram"></a> Whether we are using qubits or bits, we need to manipulate them in order to turn the inputs we have into the outputs we need. For the simplest programs with very few bits, it is useful to represent this process in a diagram known as a *circuit diagram*. These have inputs on the left, outputs on the right, and operations represented by arcane symbols in between. These operations are called 'gates', mostly for historical reasons. Here's an example of what a circuit looks like for standard, bit-based computers. You aren't expected to understand what it does. It should simply give you an idea of what these circuits look like. ![image1](images/classical_circuit.png) For quantum computers, we use the same basic idea but have different conventions for how to represent inputs, outputs, and the symbols used for operations. Here is the quantum circuit that represents the same process as above. ![image2](images/quantum_circuit.png) In the rest of this section, we will explain how to build circuits. At the end, you'll know how to create the circuit above, what it does, and why it is useful. ## 3. Your first quantum circuit <a id="first-circuit"></a> In a circuit, we typically need to do three jobs: First, encode the input, then do some actual computation, and finally extract an output. For your first quantum circuit, we'll focus on the last of these jobs. We start by creating a circuit with eight qubits and eight outputs. ``` n = 8 n_q = n n_b = n qc_output = QuantumCircuit(n_q,n_b) ``` This circuit, which we have called `qc_output`, is created by Qiskit using `QuantumCircuit`. The number `n_q` defines the number of qubits in the circuit. With `n_b` we define the number of output bits we will extract from the circuit at the end. The extraction of outputs in a quantum circuit is done using an operation called `measure`. Each measurement tells a specific qubit to give an output to a specific output bit. The following code adds a `measure` operation to each of our eight qubits. The qubits and bits are both labelled by the numbers from 0 to 7 (because that’s how programmers like to do things). The command `qc.measure(j,j)` adds a measurement to our circuit `qc` that tells qubit `j` to write an output to bit `j`. ``` for j in range(n): qc_output.measure(j,j) ``` Now that our circuit has something in it, let's take a look at it. ``` qc_output.draw() ``` Qubits are always initialized to give the output ```0```. Since we don't do anything to our qubits in the circuit above, this is exactly the result we'll get when we measure them. We can see this by running the circuit many times and plotting the results in a histogram. We will find that the result is always ```00000000```: a ```0``` from each qubit. ``` sim = Aer.get_backend('qasm_simulator') # this is the simulator we'll use qobj = assemble(qc_output) # this turns the circuit into an object our backend can run result = sim.run(qobj).result() # we run the experiment and get the result from that experiment # from the results, we get a dictionary containing the number of times (counts) # each result appeared counts = result.get_counts() # and display it on a histogram plot_histogram(counts) ``` The reason for running many times and showing the result as a histogram is because quantum computers may have some randomness in their results. In this case, since we aren’t doing anything quantum, we get just the ```00000000``` result with certainty. Note that this result comes from a quantum simulator, which is a standard computer calculating what an ideal quantum computer would do. Simulations are only possible for small numbers of qubits (~30 qubits), but they are nevertheless a very useful tool when designing your first quantum circuits. To run on a real device you simply need to replace ```Aer.get_backend('qasm_simulator')``` with the backend object of the device you want to use. ## 4. Example: Creating an Adder Circuit <a id="adder"></a> ### 4.1 Encoding an input <a id="encoding"></a> Now let's look at how to encode a different binary string as an input. For this, we need what is known as a NOT gate. This is the most basic operation that you can do in a computer. It simply flips the bit value: ```0``` becomes ```1``` and ```1``` becomes ```0```. For qubits, it is an operation called ```x``` that does the job of the NOT. Below we create a new circuit dedicated to the job of encoding and call it `qc_encode`. For now, we only specify the number of qubits. ``` qc_encode = QuantumCircuit(n) qc_encode.x(7) qc_encode.draw() ``` Extracting results can be done using the circuit we have from before: `qc_output`. Adding the two circuits using `qc_encode + qc_output` creates a new circuit with everything needed to extract an output added at the end. ``` qc = qc_encode + qc_output qc.draw() ``` Now we can run the combined circuit and look at the results. ``` qobj = assemble(qc) counts = sim.run(qobj).result().get_counts() plot_histogram(counts) ``` Now our computer outputs the string ```10000000``` instead. The bit we flipped, which comes from qubit 7, lives on the far left of the string. This is because Qiskit numbers the bits in a string from right to left. Some prefer to number their bits the other way around, but Qiskit's system certainly has its advantages when we are using the bits to represent numbers. Specifically, it means that qubit 7 is telling us about how many $2^7$s we have in our number. So by flipping this bit, we’ve now written the number 128 in our simple 8-bit computer. Now try out writing another number for yourself. You could do your age, for example. Just use a search engine to find out what the number looks like in binary (if it includes a ‘0b’, just ignore it), and then add some 0s to the left side if you are younger than 64. ``` qc_encode = QuantumCircuit(n) qc_encode.x(1) qc_encode.x(5) qc_encode.draw() ``` Now we know how to encode information in a computer. The next step is to process it: To take an input that we have encoded, and turn it into an output that we need. ### 4.2 Remembering how to add <a id="remembering-add"></a> To look at turning inputs into outputs, we need a problem to solve. Let’s do some basic maths. In primary school, you will have learned how to take large mathematical problems and break them down into manageable pieces. For example, how would you go about solving the following? ``` 9213 + 1854 = ???? ``` One way is to do it digit by digit, from right to left. So we start with 3+4 ``` 9213 + 1854 = ???7 ``` And then 1+5 ``` 9213 + 1854 = ??67 ``` Then we have 2+8=10. Since this is a two digit answer, we need to carry the one over to the next column. ``` 9213 + 1854 = ?067 ¹ ``` Finally we have 9+1+1=11, and get our answer ``` 9213 + 1854 = 11067 ¹ ``` This may just be simple addition, but it demonstrates the principles behind all algorithms. Whether the algorithm is designed to solve mathematical problems or process text or images, we always break big tasks down into small and simple steps. To run on a computer, algorithms need to be compiled down to the smallest and simplest steps possible. To see what these look like, let’s do the above addition problem again but in binary. ``` 10001111111101 + 00011100111110 = ?????????????? ``` Note that the second number has a bunch of extra 0s on the left. This just serves to make the two strings the same length. Our first task is to do the 1+0 for the column on the right. In binary, as in any number system, the answer is 1. We get the same result for the 0+1 of the second column. ``` 10001111111101 + 00011100111110 = ????????????11 ``` Next, we have 1+1. As you’ll surely be aware, 1+1=2. In binary, the number 2 is written ```10```, and so requires two bits. This means that we need to carry the 1, just as we would for the number 10 in decimal. ``` 10001111111101 + 00011100111110 = ???????????011 ¹ ``` The next column now requires us to calculate ```1+1+1```. This means adding three numbers together, so things are getting complicated for our computer. But we can still compile it down to simpler operations, and do it in a way that only ever requires us to add two bits together. For this, we can start with just the first two 1s. ``` 1 + 1 = 10 ``` Now we need to add this ```10``` to the final ```1``` , which can be done using our usual method of going through the columns. ``` 10 + 01 = 11 ``` The final answer is ```11``` (also known as 3). Now we can get back to the rest of the problem. With the answer of ```11```, we have another carry bit. ``` 10001111111101 + 00011100111110 = ??????????1011 ¹¹ ``` So now we have another 1+1+1 to do. But we already know how to do that, so it’s not a big deal. In fact, everything left so far is something we already know how to do. This is because, if you break everything down into adding just two bits, there are only four possible things you’ll ever need to calculate. Here are the four basic sums (we’ll write all the answers with two bits to be consistent). ``` 0+0 = 00 (in decimal, this is 0+0=0) 0+1 = 01 (in decimal, this is 0+1=1) 1+0 = 01 (in decimal, this is 1+0=1) 1+1 = 10 (in decimal, this is 1+1=2) ``` This is called a *half adder*. If our computer can implement this, and if it can chain many of them together, it can add anything. ### 4.3 Adding with Qiskit <a id="adding-qiskit"></a> Let's make our own half adder using Qiskit. This will include a part of the circuit that encodes the input, a part that executes the algorithm, and a part that extracts the result. The first part will need to be changed whenever we want to use a new input, but the rest will always remain the same. ![half adder implemented on a quantum circuit](images/half-adder.svg) The two bits we want to add are encoded in the qubits 0 and 1. The above example encodes a ```1``` in both these qubits, and so it seeks to find the solution of ```1+1```. The result will be a string of two bits, which we will read out from the qubits 2 and 3. All that remains is to fill in the actual program, which lives in the blank space in the middle. The dashed lines in the image are just to distinguish the different parts of the circuit (although they can have more interesting uses too). They are made by using the `barrier` command. The basic operations of computing are known as logic gates. We’ve already used the NOT gate, but this is not enough to make our half adder. We could only use it to manually write out the answers. Since we want the computer to do the actual computing for us, we’ll need some more powerful gates. To see what we need, let’s take another look at what our half adder needs to do. ``` 0+0 = 00 0+1 = 01 1+0 = 01 1+1 = 10 ``` The rightmost bit in all four of these answers is completely determined by whether the two bits we are adding are the same or different. So for ```0+0``` and ```1+1```, where the two bits are equal, the rightmost bit of the answer comes out ```0```. For ```0+1``` and ```1+0```, where we are adding different bit values, the rightmost bit is ```1```. To get this part of our solution correct, we need something that can figure out whether two bits are different or not. Traditionally, in the study of digital computation, this is called an XOR gate. | Input 1 | Input 2 | XOR Output | |:-------:|:-------:|:------:| | 0 | 0 | 0 | | 0 | 1 | 1 | | 1 | 0 | 1 | | 1 | 1 | 0 | In quantum computers, the job of the XOR gate is done by the controlled-NOT gate. Since that's quite a long name, we usually just call it the CNOT. In Qiskit its name is ```cx```, which is even shorter. In circuit diagrams, it is drawn as in the image below. ``` qc_cnot = QuantumCircuit(2) qc_cnot.cx(0,1) qc_cnot.draw() ``` This is applied to a pair of qubits. One acts as the control qubit (this is the one with the little dot). The other acts as the *target qubit* (with the big circle). There are multiple ways to explain the effect of the CNOT. One is to say that it looks at its two input bits to see whether they are the same or different. Next, it overwrites the target qubit with the answer. The target becomes ```0``` if they are the same, and ```1``` if they are different. <img src="images/cnot_xor.svg"> Another way of explaining the CNOT is to say that it does a NOT on the target if the control is ```1```, and does nothing otherwise. This explanation is just as valid as the previous one (in fact, it’s the one that gives the gate its name). Try the CNOT out for yourself by trying each of the possible inputs. For example, here's a circuit that tests the CNOT with the input ```01```. ``` qc = QuantumCircuit(2,2) qc.x(0) qc.cx(0,1) qc.measure(0,0) qc.measure(1,1) qc.draw() ``` If you execute this circuit, you’ll find that the output is ```11```. We can think of this happening because of either of the following reasons. - The CNOT calculates whether the input values are different and finds that they are, which means that it wants to output ```1```. It does this by writing over the state of qubit 1 (which, remember, is on the left of the bit string), turning ```01``` into ```11```. - The CNOT sees that qubit 0 is in state ```1```, and so applies a NOT to qubit 1. This flips the ```0``` of qubit 1 into a ```1```, and so turns ```01``` into ```11```. Here is a table showing all the possible inputs and corresponding outputs of the CNOT gate: | Input (q1 q0) | Output (q1 q0) | |:-------------:|:--------------:| | 00 | 00 | | 01 | 11 | | 10 | 10 | | 11 | 01 | For our half adder, we don’t want to overwrite one of our inputs. Instead, we want to write the result on a different pair of qubits. For this, we can use two CNOTs. ``` qc_ha = QuantumCircuit(4,2) # encode inputs in qubits 0 and 1 qc_ha.x(0) # For a=0, remove this line. For a=1, leave it. qc_ha.x(1) # For b=0, remove this line. For b=1, leave it. qc_ha.barrier() # use cnots to write the XOR of the inputs on qubit 2 qc_ha.cx(0,2) qc_ha.cx(1,2) qc_ha.barrier() # extract outputs qc_ha.measure(2,0) # extract XOR value qc_ha.measure(3,1) qc_ha.draw() ``` We are now halfway to a fully working half adder. We just have the other bit of the output left to do: the one that will live on qubit 3. If you look again at the four possible sums, you’ll notice that there is only one case for which this is ```1``` instead of ```0```: ```1+1```=```10```. It happens only when both the bits we are adding are ```1```. To calculate this part of the output, we could just get our computer to look at whether both of the inputs are ```1```. If they are — and only if they are — we need to do a NOT gate on qubit 3. That will flip it to the required value of ```1``` for this case only, giving us the output we need. For this, we need a new gate: like a CNOT but controlled on two qubits instead of just one. This will perform a NOT on the target qubit only when both controls are in state ```1```. This new gate is called the *Toffoli*. For those of you who are familiar with Boolean logic gates, it is basically an AND gate. In Qiskit, the Toffoli is represented with the `ccx` command. ``` qc_ha = QuantumCircuit(4,2) # encode inputs in qubits 0 and 1 qc_ha.x(0) # For a=0, remove the this line. For a=1, leave it. qc_ha.x(1) # For b=0, remove the this line. For b=1, leave it. qc_ha.barrier() # use cnots to write the XOR of the inputs on qubit 2 qc_ha.cx(0,2) qc_ha.cx(1,2) # use ccx to write the AND of the inputs on qubit 3 qc_ha.ccx(0,1,3) qc_ha.barrier() # extract outputs qc_ha.measure(2,0) # extract XOR value qc_ha.measure(3,1) # extract AND value qc_ha.draw() ``` In this example, we are calculating ```1+1```, because the two input bits are both ```1```. Let's see what we get. ``` qobj = assemble(qc_ha) counts = sim.run(qobj).result().get_counts() plot_histogram(counts) ``` The result is ```10```, which is the binary representation of the number 2. We have built a computer that can solve the famous mathematical problem of 1+1! Now you can try it out with the other three possible inputs, and show that our algorithm gives the right results for those too. The half adder contains everything you need for addition. With the NOT, CNOT, and Toffoli gates, we can create programs that add any set of numbers of any size. These three gates are enough to do everything else in computing too. In fact, we can even do without the CNOT. Additionally, the NOT gate is only really needed to create bits with value ```1```. The Toffoli gate is essentially the atom of mathematics. It is the simplest element, from which every other problem-solving technique can be compiled. As we'll see, in quantum computing we split the atom. ``` import qiskit qiskit.__qiskit_version__ ```
github_jupyter
``` %cd -q data/actr_reco import matplotlib.pyplot as plt import tqdm import numpy as np with open("users.txt", "r") as f: users = f.readlines() hist = [] for user in tqdm.tqdm(users): user = user.strip() ret = !wc -l user_split/listening_events_2019_{user}.tsv lc, _ = ret[0].split(" ") hist.append(int(lc)) len(hist), sum(hist) plt.hist(hist, bins=100) plt.show() subset = [x for x in hist if x < 30_000 and x >= 1_000] len(subset) plt.hist(subset, bins=100) plt.show() plt.hist(subset, bins=5) plt.show() plt.hist(subset, bins=10) plt.show() plt.hist(subset, bins=10) ``` # Stratification ``` def stratification_numbers(data, min_value, max_value, bins, num_samples): subset = [x for x in data if x >= min_value and x < max_value] percentage = num_samples / len(subset) bin_size = int((max_value-min_value)/bins) num_per_bin = [] old_boundary = min_value for new_boundary in range(min_value+bin_size, max_value+1, bin_size): data_in_bin = [x for x in subset if x >= old_boundary and x < new_boundary] num_per_bin.append(len(data_in_bin)) old_boundary = new_boundary assert sum(num_per_bin) == len(subset) samples_per_bin = np.array(num_per_bin)*percentage floor_samples_per_bin = np.floor(samples_per_bin) error = int(round(sum(samples_per_bin) - sum(floor_samples_per_bin))) if error == 0: assert sum(floor_samples_per_bin) == num_samples return floor_samples_per_bin remainders = np.remainder(samples_per_bin, 1) to_adjust = np.argsort(remainders)[::-1][:error] for ta in to_adjust: floor_samples_per_bin[ta] += 1 assert sum(floor_samples_per_bin) == num_samples return floor_samples_per_bin samples_per_bin = stratification_numbers(hist, 1_000, 30_000, 10, num_samples=100) samples_per_bin, sum(samples_per_bin) stratification_numbers(hist, 1_000, 30_000, 10, 2) ``` # Iterative Stratified Sampling ``` test_hist = hist[len(test_users):] assert len(test_hist) == len(test_users) test_user_interaction = list(zip(test_users, test_hist)) test_user_interaction[:2] !wc -l user_split/listening_events_2019_61740.tsv def get_bin_boundaries_from_config(bin_config=None): if not bin_config: bin_config = {"min_value": 1_000, "max_value": 30_000, "bins": 10} bin_size = int((bin_config["max_value"]-bin_config["min_value"])/bin_config["bins"]) return list(range(bin_config["min_value"], bin_config["max_value"]+1, bin_size)) def check_in_bin(item_value, target_bin, bin_config=None): bin_boundaries = get_bin_boundaries_from_config() return item_value >= bin_boundaries[target_bin] and item_value < bin_boundaries[target_bin+1] assert check_in_bin(2400, 0) assert not check_in_bin(5000, 0) assert check_in_bin(29_000, 9) def get_next_for_bin(user_interactions, target_bin): iterlist = user_interactions.copy() for ui in user_interactions: if check_in_bin(ui[1], target_bin): iterlist.remove(ui) return ui[0], iterlist raise StopIteration("No remaing items for bin.") def list_index_difference(list1, list2): changed_indices = [] for index, (first, second) in enumerate(zip(list1, list2)): if first != second: changed_indices.append(index) return changed_indices assert list_index_difference([0,1], [0,0]) == [1] def iterative_sampling(user_interactions, max_size=1000, num_bins=10): iterlist = user_interactions.copy() bins = num_bins*[0] sampled_list = [] mult_index_changes = [] for i in tqdm.tqdm(range(1, max_size+1)): updated_bins = stratification_numbers(hist, 1_000, 30_000, 10, num_samples=i) changed_indices = list_index_difference(bins, updated_bins) if len(changed_indices) != 1: mult_index_changes.append(i) # print(f"Multi-index change at pos {i}: {changed_indices} (old: {bins} vs new: {updated_bins}") target_bin = changed_indices[0] # empirically increase the first change index, assuming items are in descending order bins[target_bin] += 1 item, iterlist = get_next_for_bin(iterlist, target_bin) sampled_list.append(item) print(len(mult_index_changes)) print(mult_index_changes[-3:]) print(bins) return sampled_list sampled_list = iterative_sampling(test_user_interaction, 150) len(sampled_list) # overlap len(set(test_users[:300]).intersection(set(sampled_list[:150]))) with open("sampled.txt", "w") as f: f.write("".join(sampled_list)) !head sampled.txt !wc -l sampled.txt ```
github_jupyter
``` import pandas as pd ``` ## Load in the "rosetta stone" file I made this file using QGIS, the open-source mapping software. I loaded in the US Census 2010 block-level shapefile for Hennipin County. I then used the block centroids, provided by the census, to colect them within each zone. Since the centroids, by nature, are a "half a block" from the nearest street, this is more reliable than a polygon-in-polygon calculation. I then inspected the map visually for outliers. I'll write up my steps for that soonest. ``` rosetta_df = pd.read_csv('../data/minneapolis/rosetta_nabes.csv') rosetta_df ``` ## Load in the population data I downloaded the population files from [census.data.gov](https://census.data.gov). Here are the [P3 and P5 census table files for Cook County](https://s3.amazonaws.com/media.johnkeefe.net/census-by-precinct/17031_Cook_County.zip). And here is the ["productDownload_2020-06-07T173132" zip file](https://s3.amazonaws.com/media.johnkeefe.net/census-by-precinct/productDownload_2020-06-07T173132.zip). It's a little messy, and the census doesn't label the files well, but I'm providing them as I got them. The CSVs you need are in there! Adjust your paths accordingly. ``` # census P3 for county by block p3_df = pd.read_csv('/Volumes/JK_Smarts_Data/precinct_project/MN/productDownload_2020-06-19T224000/DECENNIALSF12010.P3_data_with_overlays_2020-06-19T223910.csv') p3_df p3_df.reset_index() p3_df.drop(0, inplace=True) p5_df = pd.read_csv('/Volumes/JK_Smarts_Data/precinct_project/MN/productDownload_2020-06-19T224000/DECENNIALSF12010.P5_data_with_overlays_2020-06-19T223910.csv') p5_df.reset_index() p5_df.drop(0, inplace=True) p3_df.shape, p5_df.shape population_df = p3_df.merge(p5_df, on='GEO_ID') population_df.shape population_df rosetta_df.shape rosetta_df.dtypes population_df.dtypes population_df['GEOID10'] = population_df['GEO_ID'].str[9:].astype(int) population_df.drop(columns=['NAME_y'], inplace = True) ## Add demographic data to each chicago PD district block block_data = rosetta_df.merge(population_df, on="GEOID10", how="left") block_data.shape block_data # need to make all those columns numeric block_data[['P003001', 'P003002', 'P003003', 'P003004', 'P003005', 'P003006', 'P003007', 'P003008', 'P005001', 'P005002', 'P005003', 'P005004', 'P005005', 'P005006', 'P005007', 'P005008', 'P005009', 'P005010', 'P005011', 'P005012', 'P005013', 'P005014', 'P005015', 'P005016', 'P005017']] = block_data[['P003001', 'P003002', 'P003003', 'P003004', 'P003005', 'P003006', 'P003007', 'P003008', 'P005001', 'P005002', 'P005003', 'P005004', 'P005005', 'P005006', 'P005007', 'P005008', 'P005009', 'P005010', 'P005011', 'P005012', 'P005013', 'P005014', 'P005015', 'P005016', 'P005017']].apply(pd.to_numeric) block_data.to_csv('./temp_data/mpls_2010blocks_2020nabes_population.csv', index=False) ``` ----------------------- **Note**: I stopped here because I'm going to publish the rest using Datasette Done!
github_jupyter
# Sample for KFServing SDK This is a sample for KFServing SDK. The notebook shows how to use KFServing SDK to create, get, rollout_canary, promote and delete InferenceService. ``` from kubernetes import client from kfserving import KFServingClient from kfserving import constants from kfserving import utils from kfserving import V1alpha2EndpointSpec from kfserving import V1alpha2PredictorSpec from kfserving import V1alpha2TensorflowSpec from kfserving import V1alpha2InferenceServiceSpec from kfserving import V1alpha2InferenceService from kubernetes.client import V1ResourceRequirements ``` Define namespace where InferenceService needs to be deployed to. If not specified, below function defines namespace to the current one where SDK is running in the cluster, otherwise it will deploy to default namespace. ``` namespace = utils.get_default_target_namespace() ``` ## Define InferenceService Firstly define default endpoint spec, and then define the inferenceservice basic on the endpoint spec. ``` api_version = constants.KFSERVING_GROUP + '/' + constants.KFSERVING_VERSION default_endpoint_spec = V1alpha2EndpointSpec( predictor=V1alpha2PredictorSpec( tensorflow=V1alpha2TensorflowSpec( storage_uri='gs://kfserving-samples/models/tensorflow/flowers', resources=V1ResourceRequirements( requests={'cpu':'100m','memory':'1Gi'}, limits={'cpu':'100m', 'memory':'1Gi'})))) isvc = V1alpha2InferenceService(api_version=api_version, kind=constants.KFSERVING_KIND, metadata=client.V1ObjectMeta( name='flower-sample', namespace=namespace), spec=V1alpha2InferenceServiceSpec(default=default_endpoint_spec)) ``` ## Create InferenceService Call KFServingClient to create InferenceService. ``` KFServing = KFServingClient() KFServing.create(isvc) ``` ## Check the InferenceService ``` KFServing.get('flower-sample', namespace=namespace, watch=True, timeout_seconds=120) ``` ## Add Canary to InferenceService Firstly define canary endpoint spec, and then rollout 10% traffic to the canary version, watch the rollout process. ``` canary_endpoint_spec = V1alpha2EndpointSpec( predictor=V1alpha2PredictorSpec( tensorflow=V1alpha2TensorflowSpec( storage_uri='gs://kfserving-samples/models/tensorflow/flowers-2', resources=V1ResourceRequirements( requests={'cpu':'100m','memory':'1Gi'}, limits={'cpu':'100m', 'memory':'1Gi'})))) KFServing.rollout_canary('flower-sample', canary=canary_endpoint_spec, percent=10, namespace=namespace, watch=True, timeout_seconds=120) ``` ## Rollout more traffic to canary of the InferenceService Rollout traffice percent to 50% to canary version. ``` KFServing.rollout_canary('flower-sample', percent=50, namespace=namespace, watch=True, timeout_seconds=120) ``` ## Promote Canary to Default ``` KFServing.promote('flower-sample', namespace=namespace, watch=True, timeout_seconds=120) ``` ## Delete the InferenceService ``` KFServing.delete('flower-sample', namespace=namespace) ```
github_jupyter
# Assignment Submission for FMUP ## Kishlaya Jaiswal ### Chennai Mathematical Institute - MCS201909 --- # Solution 1 I have choosen the following stocks from Nifty50: - Kotak Mahindra Bank Ltd (KOTAKBANK) - Hindustan Unilever Ltd (HINDUNILVR) - Nestle India Limited (NESTLEIND) Note: - I am doing these computations on Apr 2, 2021, and hence using the closing price for this day as my strike price. - I am using the historical data for the month of February to find the volatility of each of these stocks (volatility computation is done at the end) ``` import QuantLib as ql # function to find the price and greeks for a given option # with it's strike/spot price and it's volatility def find_price_greeks(spot_price, strike_price, volatility, option_type): # construct the European Option payoff = ql.PlainVanillaPayoff(option_type, strike_price) exercise = ql.EuropeanExercise(maturity_date) european_option = ql.VanillaOption(payoff, exercise) # quote the spot price spot_handle = ql.QuoteHandle( ql.SimpleQuote(spot_price) ) flat_ts = ql.YieldTermStructureHandle( ql.FlatForward(calculation_date, risk_free_rate, day_count) ) dividend_yield = ql.YieldTermStructureHandle( ql.FlatForward(calculation_date, dividend_rate, day_count) ) flat_vol_ts = ql.BlackVolTermStructureHandle( ql.BlackConstantVol(calculation_date, calendar, volatility, day_count) ) # create the Black Scholes process bsm_process = ql.BlackScholesMertonProcess(spot_handle, dividend_yield, flat_ts, flat_vol_ts) # set the engine to use the above process european_option.setPricingEngine(ql.AnalyticEuropeanEngine(bsm_process)) return european_option tickers = ["KOTAKBANK", "HINDUNILVR", "NESTLEIND"] # spot price = closing price as on Mar 1, 2021 spot = {"KOTAKBANK":1845.35, "HINDUNILVR":2144.70, "NESTLEIND":16288.20} # strike price = closing price as on Apr 2, 2021 strike = {"KOTAKBANK":1804.45, "HINDUNILVR":2399.45, "NESTLEIND":17102.15} # historical volatility from the past month's data vol = {"KOTAKBANK":0.38, "HINDUNILVR":0.15, "NESTLEIND":0.18} # date of option purchase calculation_date = ql.Date(1,3,2021) # exercise date # this excludes the holidays in the Indian calendar calendar = ql.India() period = ql.Period(65, ql.Days) maturity_date = calendar.advance(calculation_date, period) # rate of interest risk_free_rate = 0.06 # other settings dividend_rate = 0.0 day_count = ql.Actual365Fixed() ql.Settings.instance().evaluationDate = calculation_date # store final variables for future calculations delta = {} gamma = {} vega = {} # print settings format_type_head = "{:<15}" + ("{:<12}" * 7) format_type = "{:<15}{:<12}" + ("{:<12.2f}" * 6) print(format_type_head.format("Name", "Type", "Price", "Delta", "Gamma", "Rho", "Theta", "Vega")) print() for ticker in tickers: option = find_price_greeks(spot[ticker], strike[ticker], vol[ticker], ql.Option.Call) print(format_type.format(ticker, "Call", option.NPV(), option.delta(), option.gamma(), option.rho(), option.theta(), option.vega())) delta[ticker] = option.delta() gamma[ticker] = option.gamma() vega[ticker] = option.vega() option = find_price_greeks(spot[ticker], strike[ticker], vol[ticker], ql.Option.Put) print(format_type.format(ticker, "Put", option.NPV(), option.delta(), option.gamma(), option.rho(), option.theta(), option.vega())) print() ``` ### Delta Gamma Vega neutrality First we make the Gamma and Vega neutral by taking - x units of KOTAKBANK - y units of HINDUNILVR - 1 unit of NESTLEIND To solve for x,y we have the following: ``` import numpy as np G1, G2, G3 = gamma["KOTAKBANK"], gamma["HINDUNILVR"], gamma["NESTLEIND"] V1, V2, V3 = vega["KOTAKBANK"], vega["HINDUNILVR"], vega["NESTLEIND"] # Solve the following equation: # G1 x + G2 y + G3 = 0 # V1 x + V2 y + V3 = 0 A = np.array([[G1, G2], [V1, V2]]) b = np.array([-G3, -V3]) z = np.linalg.solve(A, b) print("x = {:.2f}".format(z[0])) print("y = {:.2f}".format(z[1])) print() final_delta = z[0]*delta["KOTAKBANK"] + z[1]*delta["HINDUNILVR"] + delta["NESTLEIND"] print("Delta of portfolio is {:.2f}".format(final_delta)) ``` ## Final Strategy - Take a short position of 18.46 units of Kotak Mahindra Bank Ltd Call Option - Take a long position of 17.34 units of Hindustan Unilever Ltd Call Option - Take a long position of 1 unit of Nestle India Limited Call Option - Take a long position of 9.13 units of Nestle India Limited Stock This will yield a portfolio with Delta, Gamma and Vega neutral. # Solution 2 Using Taylor expansion, we get $$\Delta P = \frac{\partial P}{\partial y} \Delta y + \frac12 \frac{\partial^2 P}{\partial y^2}(\Delta y)^2$$ $$\implies \frac{\Delta P}{P} = -D \Delta y + \frac12 C (\Delta y)^2$$ where $D$ denotes duration and $C$ denotes convexity of a bond. We remark that the duration of the bonds we are comparing are same and fixed. --- <p>With that being said, let's say the interest rates fall, then we have $$\Delta y < 0 \implies - D \Delta y + C \Delta y^2 > 0 \implies \Delta P > 0$$ Now for the bond with greater convexity, $C \Delta y^2$ has a large value hence $\Delta P$ has to be large and so we get that "Greater convexity translates into greater price gains as interest rates fall" </p> --- Now suppose interest rates rise that is $\Delta y > 0$, then we $-D \Delta y < 0$, that is the price of the bonds decreases but the bond with greater convexity will add up for a large $C \Delta y^2$ also and so the price decrease will be less for bond with high convexity. This explains "Lessened price declines as interest rates rise" # Solution 3 ``` import QuantLib as ql # function to calculate coupon value def find_coupon(pv, r, m, n): discount_factor = (r/m) / (1 - (1 + r/m)**(-n*m)) C = pv * discount_factor return C # loan settings loan_amt = 0.8*1000000 rate = 0.12 pay = find_coupon(loan_amt, rate, 12, 5) month = ql.Date(15,8,2021) period = ql.Period('1m') # print settings print("Monthly coupon is: {:.2f}".format(pay)) print() format_type = "{:<15}" * 4 print(format_type.format("Date", "Interest", "Principal", "Remaining")) while loan_amt > 0: interest = loan_amt * rate / 12 principal = pay - interest loan_amt = loan_amt - principal print(format_type.format(month.ISO(), "{:.2f}".format(interest), "{:.2f}".format(principal), "{:.2f}".format(loan_amt))) if round(loan_amt) == 0: break month = month + period ``` ### Volatility Computation for Problem 1 ``` import math def get_volatility(csv): data = csv.split('\n')[1:] data = map(lambda x: x.split(','), data) closing_prices = list(map(lambda x: float(x[-2]), data)) n = len(closing_prices) log_returns = [] for i in range(1,n): log_returns.append(math.log(closing_prices[i]/closing_prices[i-1])) mu = sum(log_returns)/(n-1) tmp = map(lambda x: (x-mu)**2, log_returns) vol = math.sqrt(sum(tmp)/(n-1)) * math.sqrt(252) return vol kotak_csv = '''Date,Open,High,Low,Close,Adj Close,Volume 2021-02-01,1730.000000,1810.000000,1696.250000,1801.349976,1801.349976,220763 2021-02-02,1825.000000,1878.650024,1801.349976,1863.500000,1863.500000,337556 2021-02-03,1875.000000,1882.349976,1820.099976,1851.849976,1851.849976,147146 2021-02-04,1857.900024,1914.500000,1831.050049,1911.250000,1911.250000,188844 2021-02-05,1921.000000,1997.900024,1915.000000,1982.550049,1982.550049,786773 2021-02-08,1995.000000,2029.949951,1951.949951,1956.300049,1956.300049,212114 2021-02-09,1950.000000,1975.000000,1938.000000,1949.199951,1949.199951,62613 2021-02-10,1954.550049,1961.849976,1936.300049,1953.650024,1953.650024,143830 2021-02-11,1936.000000,1984.300049,1936.000000,1961.300049,1961.300049,120121 2021-02-12,1966.000000,1974.550049,1945.599976,1951.449951,1951.449951,86860 2021-02-15,1954.000000,1999.000000,1954.000000,1986.199951,1986.199951,135074 2021-02-16,1995.000000,2048.949951,1995.000000,2021.650024,2021.650024,261589 2021-02-17,2008.500000,2022.400024,1969.500000,1989.150024,1989.150024,450365 2021-02-18,1980.000000,1982.349976,1938.000000,1945.300049,1945.300049,193234 2021-02-19,1945.000000,1969.599976,1925.050049,1937.300049,1937.300049,49189 2021-02-22,1941.000000,1961.650024,1921.650024,1948.550049,1948.550049,44651 2021-02-23,1955.000000,1961.900024,1867.000000,1873.150024,1873.150024,118138 2021-02-24,1875.199951,1953.949951,1852.000000,1919.000000,1919.000000,454695 2021-02-25,1935.000000,1964.949951,1886.900024,1895.349976,1895.349976,195212 2021-02-26,1863.000000,1868.000000,1773.099976,1782.349976,1782.349976,180729''' hind_csv = '''Date,Open,High,Low,Close,Adj Close,Volume 2021-02-01,2265.000000,2286.000000,2226.550049,2249.149902,2249.149902,130497 2021-02-02,2271.000000,2275.000000,2207.699951,2231.850098,2231.850098,327563 2021-02-03,2234.000000,2256.699951,2218.199951,2232.600098,2232.600098,121232 2021-02-04,2234.000000,2258.449951,2226.949951,2247.050049,2247.050049,533609 2021-02-05,2252.000000,2285.000000,2241.000000,2270.350098,2270.350098,254911 2021-02-08,2275.000000,2287.000000,2233.000000,2237.800049,2237.800049,211465 2021-02-09,2247.000000,2254.000000,2211.199951,2216.649902,2216.649902,171285 2021-02-10,2216.649902,2240.000000,2213.449951,2235.899902,2235.899902,185915 2021-02-11,2245.000000,2267.500000,2235.000000,2262.399902,2262.399902,121168 2021-02-12,2270.000000,2270.649902,2232.199951,2241.899902,2241.899902,33016 2021-02-15,2252.000000,2261.500000,2212.100098,2215.850098,2215.850098,91240 2021-02-16,2225.000000,2228.399902,2190.500000,2196.899902,2196.899902,101652 2021-02-17,2191.000000,2200.000000,2160.300049,2164.649902,2164.649902,138504 2021-02-18,2165.000000,2168.449951,2143.050049,2147.750000,2147.750000,110272 2021-02-19,2150.000000,2193.649902,2148.000000,2181.149902,2181.149902,150398 2021-02-22,2200.000000,2201.699951,2161.100098,2167.250000,2167.250000,98782 2021-02-23,2173.550049,2192.000000,2169.399902,2177.949951,2177.949951,22743 2021-02-24,2179.000000,2183.949951,2104.250000,2181.600098,2181.600098,329265 2021-02-25,2190.000000,2190.000000,2160.000000,2163.600098,2163.600098,357853 2021-02-26,2151.149902,2182.000000,2122.000000,2132.050049,2132.050049,158925''' nestle_csv = '''Date,Open,High,Low,Close,Adj Close,Volume 2021-02-01,17162.099609,17277.000000,16996.449219,17096.949219,17096.949219,3169 2021-02-02,17211.000000,17328.099609,16800.000000,17189.349609,17189.349609,3852 2021-02-03,17247.449219,17284.000000,17064.349609,17155.400391,17155.400391,2270 2021-02-04,17250.000000,17250.000000,17054.800781,17073.199219,17073.199219,13193 2021-02-05,17244.000000,17244.000000,17019.949219,17123.300781,17123.300781,2503 2021-02-08,17199.949219,17280.000000,17107.349609,17213.550781,17213.550781,7122 2021-02-09,17340.000000,17510.699219,17164.050781,17325.800781,17325.800781,2714 2021-02-10,17396.900391,17439.300781,17083.800781,17167.699219,17167.699219,3341 2021-02-11,17167.699219,17442.000000,17165.550781,17416.650391,17416.650391,2025 2021-02-12,17449.849609,17500.000000,17241.000000,17286.099609,17286.099609,3486 2021-02-15,17290.000000,17500.000000,17280.000000,17484.500000,17484.500000,1927 2021-02-16,17600.000000,17634.599609,17141.250000,17222.449219,17222.449219,7901 2021-02-17,16900.000000,16900.000000,16360.000000,16739.900391,16739.900391,28701 2021-02-18,17050.000000,17050.000000,16307.000000,16374.150391,16374.150391,13711 2021-02-19,16395.000000,16477.599609,16214.450195,16386.099609,16386.099609,5777 2021-02-22,16400.000000,16531.050781,16024.599609,16099.200195,16099.200195,9051 2021-02-23,16123.000000,16250.000000,16003.000000,16165.250000,16165.250000,6261 2021-02-24,16249.000000,16800.000000,15900.000000,16369.950195,16369.950195,18003 2021-02-25,16394.699219,16394.699219,16102.000000,16114.349609,16114.349609,18735 2021-02-26,16075.000000,16287.200195,16010.000000,16097.700195,16097.700195,13733''' print("Annualized Volatility of KOTAKBANK is {:.2f}%".format(get_volatility(kotak_csv)*100)) print("Annualized Volatility of HINDUNILVR is {:.2f}%".format(get_volatility(hind_csv)*100)) print("Annualized Volatility of NESTLEIND is {:.2f}%".format(get_volatility(nestle_csv)*100)) ```
github_jupyter
# QCoDeS Example with Lakeshore 325 Here provided is an example session with model 325 of the Lakeshore temperature controller ``` %matplotlib notebook import numpy as np import matplotlib.pyplot as plt from qcodes.instrument_drivers.Lakeshore.Model_325 import Model_325 lake = Model_325("lake", "GPIB0::12::INSTR") ``` ## Sensor commands ``` # Check that the sensor is in the correct status lake.sensor_A.status() # What temperature is it reading? lake.sensor_A.temperature() lake.sensor_A.temperature.unit # We can access the sensor objects through the sensor list as well assert lake.sensor_A is lake.sensor[0] ``` ## Heater commands ``` # In a closed loop configuration, heater 1 reads from... lake.heater_1.input_channel() lake.heater_1.unit() # Get the PID values print("P = ", lake.heater_1.P()) print("I = ", lake.heater_1.I()) print("D = ", lake.heater_1.D()) # Is the heater on? lake.heater_1.output_range() ``` ## Loading and updating sensor calibration values ``` curve = lake.sensor_A.curve curve_data = curve.get_data() curve_data.keys() fig, ax = plt.subplots() ax.plot(curve_data["Temperature (K)"], curve_data['log Ohm'], '.') plt.show() curve.curve_name() curve_x = lake.curve[23] curve_x_data = curve_x.get_data() curve_x_data.keys() temp = np.linspace(0, 100, 200) new_data = {"Temperature (K)": temp, "log Ohm": 1/(temp+1)+2} fig, ax = plt.subplots() ax.plot(new_data["Temperature (K)"], new_data["log Ohm"], '.') plt.show() curve_x.format("log Ohm/K") curve_x.set_data(new_data) curve_x.format() curve_x_data = curve_x.get_data() fig, ax = plt.subplots() ax.plot(curve_x_data["Temperature (K)"], curve_x_data['log Ohm'], '.') plt.show() ``` ## Go to a set point ``` import time import numpy from IPython.display import display from ipywidgets import interact, widgets from matplotlib import pyplot as plt def live_plot_temperature_reading(channel_to_read, read_period=0.2, n_reads=1000): """ Live plot the temperature reading from a Lakeshore sensor channel Args: channel_to_read Lakeshore channel object to read the temperature from read_period time in seconds between two reads of the temperature n_reads total number of reads to perform """ # Make a widget for a text display that is contantly being updated text = widgets.Text() display(text) fig, ax = plt.subplots(1) line, = ax.plot([], [], '*-') ax.set_xlabel('Time, s') ax.set_ylabel(f'Temperature, {channel_to_read.temperature.unit}') fig.show() plt.ion() for i in range(n_reads): time.sleep(read_period) # Update the text field text.value = f'T = {channel_to_read.temperature()}' # Add new point to the data that is being plotted line.set_ydata(numpy.append(line.get_ydata(), channel_to_read.temperature())) line.set_xdata(numpy.arange(0, len(line.get_ydata()), 1)*read_period) ax.relim() # Recalculate limits ax.autoscale_view(True, True, True) # Autoscale fig.canvas.draw() # Redraw lake.heater_1.control_mode("Manual PID") lake.heater_1.output_range("Low (2.5W)") lake.heater_1.input_channel("A") # The following seem to be good settings for our setup lake.heater_1.P(400) lake.heater_1.I(40) lake.heater_1.D(10) lake.heater_1.setpoint(15.0) # <- temperature live_plot_temperature_reading(lake.sensor_a, n_reads=400) ``` ## Querying the resistance and heater output ``` # to get the resistance of the system (25 or 50 Ohm) lake.heater_1.resistance() # to set the resistance of the system (25 or 50 Ohm) lake.heater_1.resistance(50) lake.heater_1.resistance() # output in percent (%) of current or power, depending on setting, which can be queried by lake.heater_1.output_metric() lake.heater_1.heater_output() # in %, 50 means 50% ```
github_jupyter
Below is code with a link to a happy or sad dataset which contains 80 images, 40 happy and 40 sad. Create a convolutional neural network that trains to 100% accuracy on these images, which cancels training upon hitting training accuracy of >.999 Hint -- it will work best with 3 convolutional layers. ``` import tensorflow as tf import os import zipfile from os import path, getcwd, chdir # DO NOT CHANGE THE LINE BELOW. If you are developing in a local # environment, then grab happy-or-sad.zip from the Coursera Jupyter Notebook # and place it inside a local folder and edit the path to that location path = f"{getcwd()}/../tmp2/happy-or-sad.zip" zip_ref = zipfile.ZipFile(path, 'r') zip_ref.extractall("/tmp/h-or-s") zip_ref.close() # GRADED FUNCTION: train_happy_sad_model def train_happy_sad_model(): # Please write your code only where you are indicated. # please do not remove # model fitting inline comments. DESIRED_ACCURACY = 0.999 class myCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, callback, logs={}): if (logs.get('acc') > DESIRED_ACCURACY): print("\nReached {}% accuracy, stopping training".format(DESIRED_ACCURACY*100)) self.model.stop_training = True callbacks = myCallback() # This Code Block should Define and Compile the Model. Please assume the images are 150 X 150 in your implementation. model = tf.keras.models.Sequential([ # Your Code Here tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(150,150,3)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(16, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(16, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid') ]) from tensorflow.keras.optimizers import RMSprop model.compile(loss='binary_crossentropy', optimizer=RMSprop(lr=0.001), metrics=['acc']) # This code block should create an instance of an ImageDataGenerator called train_datagen # And a train_generator by calling train_datagen.flow_from_directory from tensorflow.keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale=1./255) # Please use a target_size of 150 X 150. train_generator = train_datagen.flow_from_directory( '/tmp/h-or-s', target_size=(150, 150), batch_size=10, class_mode='binary' ) # Expected output: 'Found 80 images belonging to 2 classes' # This code block should call model.fit_generator and train for # a number of epochs. # model fitting history = model.fit_generator( train_generator, steps_per_epoch=8, epochs=15, verbose=1, callbacks=[callbacks] ) # model fitting return history.history['acc'][-1] # The Expected output: "Reached 99.9% accuracy so cancelling training!"" train_happy_sad_model() # Now click the 'Submit Assignment' button above. # Once that is complete, please run the following two cells to save your work and close the notebook %%javascript <!-- Save the notebook --> IPython.notebook.save_checkpoint(); %%javascript IPython.notebook.session.delete(); window.onbeforeunload = null setTimeout(function() { window.close(); }, 1000); ```
github_jupyter
# Capstone Part 2a - Classical ML Models (MFCCs with Offset) ___ ## Setup ``` # Basic packages import numpy as np import pandas as pd # For splitting the data into training and test sets from sklearn.model_selection import train_test_split # For scaling the data as necessary from sklearn.preprocessing import StandardScaler # For doing principal component analysis as necessary from sklearn.decomposition import PCA # For visualizations import matplotlib.pyplot as plt %matplotlib inline # For building a variety of models from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from xgboost import XGBClassifier from sklearn.neighbors import KNeighborsClassifier # For hyperparameter optimization from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV # For caching pipeline and grid search results from tempfile import mkdtemp # For model evaluation from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report # For getting rid of warning messages import warnings warnings.filterwarnings('ignore') # For pickling models import joblib # Loading in the finished dataframe from part 1 ravdess_mfcc_df = pd.read_csv('C:/Users/Patrick/Documents/Capstone Data/ravdess_mfcc.csv') ``` ___ # Building Models for Classifying Gender (Regardless of Emotion) ``` # Splitting the dataframe into features and target X = ravdess_mfcc_df.iloc[:, :-2] g = ravdess_mfcc_df['Gender'] ``` The convention is to name the target variable 'y', but I will be declaring many different target variables throughout the notebook, so I opted for 'g' for simplicity instead of 'y_g' or 'y_gen', for example. ``` # # Encoding the genders # gender_encoder = LabelEncoder() # g = gender_encoder.fit_transform(g) # # Checking the results # g # # Which number represents which gender? # for num in np.unique(g): # print(f'{num} represents {gender_encoder.inverse_transform([num])[0]}.') ``` Note: I realized that encoding the target is unnecessary; it is done automatically by the models. ``` # What test size should I use? print(f'Length of g: {len(g)}') print(f'30% of {len(g)} is {len(g)*0.3}') ``` I will use 30%. ``` # Splitting the data into training and test sets X_train, X_test, g_train, g_test = train_test_split(X, g, test_size=0.3, stratify=g, random_state=1) # Checking the shapes print(X_train.shape) print(X_test.shape) print(g_train.shape) print(g_test.shape) ``` I want to build a simple, initial classifier to get a sense of the performances I might get in more optimized models. To this end, I will build a logistic regression model without doing any cross-validation or hyperparameter optimization. ``` # Instantiate the model initial_logreg = LogisticRegression() # Fit to training set initial_logreg.fit(X_train, g_train) # Score on training set print(f'Model accuracy on training set: {initial_logreg.score(X_train, g_train)*100}%') # Score on test set print(f'Model accuracy on test set: {initial_logreg.score(X_test, g_test)*100}%') ``` These are extremely high accuracies. The model has most likely overfit to the training set, but the accuracy on the test set is still surprisingly high. Here are some possible explanations: - The dataset (RAVDESS) is relatively small, with only 1440 data points (1438 if I do not count the two very short clips that I excluded). This model is likely not very robust and has easily overfit to the training set. - The features I have extracted could be excellent predictors of gender. - This could be a very simple classification task. After all, there are only two classes, and theoretically, features extracted from male and female voice clips should have distinguishable patterns. I had originally planned to build more gender classification models for this dataset, but I will forgo this for now. In part 4, I will try using this model to classify clips from another dataset and examine its performance. ``` # Pickling the model for later use joblib.dump(initial_logreg, 'pickle1_gender_logreg.pkl') ``` ___ # Building Models for Classifying Emotion for Males ``` # Making a new dataframe that contains only male recordings ravdess_mfcc_m_df = ravdess_mfcc_df[ravdess_mfcc_df['Gender'] == 'male'].reset_index().drop('index', axis=1) ravdess_mfcc_m_df # Splitting the dataframe into features and target Xm = ravdess_mfcc_m_df.iloc[:, :-2] em = ravdess_mfcc_m_df['Emotion'] # # Encoding the emotions # emotion_encoder = LabelEncoder() # em = emotion_encoder.fit_transform(em) # # Checking the results # em # # Which number represents which emotion? # for num in np.unique(em): # print(f'{num} represents {emotion_encoder.inverse_transform([num])[0]}.') ``` Note: I realized that encoding the target is unnecessary; it is done automatically by the models. ``` # Splitting the data into training and test sets Xm_train, Xm_test, em_train, em_test = train_test_split(Xm, em, test_size=0.3, stratify=em, random_state=1) # Checking the shapes print(Xm_train.shape) print(Xm_test.shape) print(em_train.shape) print(em_test.shape) ``` As before, I will try building an initial model. ``` # Instantiate the model initial_logreg_em = LogisticRegression() # Fit to training set initial_logreg_em.fit(Xm_train, em_train) # Score on training set print(f'Model accuracy on training set: {initial_logreg_em.score(Xm_train, em_train)*100}%') # Score on test set print(f'Model accuracy on test set: {initial_logreg_em.score(Xm_test, em_test)*100}%') ``` The model has overfit to the training set yet again, and this time the accuracy on the test set leaves a lot to be desired. Let's evaluate the model further using a confusion matrix and a classification report. ``` # Having initial_logreg_em make predictions based on the test set features em_pred = initial_logreg_em.predict(Xm_test) # Building the confusion matrix as a dataframe emotions = ['angry', 'calm', 'disgusted', 'fearful', 'happy', 'neutral', 'sad', 'surprised'] em_confusion_df = pd.DataFrame(confusion_matrix(em_test, em_pred)) em_confusion_df.columns = [f'Predicted {emotion}' for emotion in emotions] em_confusion_df.index = [f'Actual {emotion}' for emotion in emotions] em_confusion_df # Classification report print(classification_report(em_test, em_pred)) ``` In a binary classification problem, there is one negative class and one positive class. This is not the case here, because this is a multiclass classification problem. In the table above, each row of precision and recall scores assumes the corresponding emotion is the positive class, and groups all other emotions as the negative class. Precision is the following measure: Of all the data points that the model classified as belonging to the positive class (i.e., the true and false positives), what proportion is correct (i.e., truly positive)? Recall is the following measure: Of all the data points that are truly positive (i.e., the true positives and false negatives as classified by the model), what proportion did the model correctly classify (i.e., the true positives)? It appears that the initial model is strongest at classifying calm voice clips, and weakest at classifying neutral voice clips. In order of strongest to weakest: calm, angry, fearful, disgusted, surprised, happy, sad, and neutral. I will now try building new models and optimizing hyperparameters to obtain better performance. I will use a pipeline and multiple grid searches to accomplish this. Before I build all my models in bulk, I want to see if doing principal component analysis (PCA) could be beneficial. I will do PCA on both unscaled and scaled features, and plot the resulting explained variance ratios. I have two goals here: - Get a sense of whether scaling would be beneficial for model performance - Get a sense of how many principal components I should use ``` # PCA on unscaled features # Instantiate PCA and fit to Xm_train pca = PCA().fit(Xm_train) # Transform Xm_train Xm_train_pca = pca.transform(Xm_train) # Transform Xm_test Xm_test_pca = pca.transform(Xm_test) # Standard scaling # Instantiate the scaler and fit to Xm_train scaler = StandardScaler().fit(Xm_train) # Transform Xm_train Xm_train_scaled = scaler.transform(Xm_train) # Transform Xm_test Xm_test_scaled = scaler.transform(Xm_test) # PCA on scaled features # Instantiate PCA and fit to Xm_train_scaled pca_scaled = PCA().fit(Xm_train_scaled) # Transform Xm_train_scaled Xm_train_scaled_pca = pca_scaled.transform(Xm_train_scaled) # Transform Xm_test_scaled Xm_test_scaled_pca = pca_scaled.transform(Xm_test_scaled) # Plot the explained variance ratios plt.subplots(1, 2, figsize = (15, 5)) # Unscaled plt.subplot(1, 2, 1) plt.bar(np.arange(1, len(pca.explained_variance_ratio_)+1), pca.explained_variance_ratio_) plt.xlabel('Principal Component') plt.ylabel('Explained Variance Ratio') plt.title('PCA on Unscaled Features') plt.ylim(top = 0.5) # Equalizing the y-axes # Scaled plt.subplot(1, 2, 2) plt.bar(np.arange(1, len(pca_scaled.explained_variance_ratio_)+1), pca_scaled.explained_variance_ratio_) plt.xlabel('Principal Component') plt.ylabel('Explained Variance Ratio') plt.title('PCA on Scaled Features') plt.ylim(top = 0.5) # Equalizing the y-axes plt.tight_layout() plt.show() ``` Principal components are linear combinations of the original features, ordered by how much of the dataset's variance they explain. Looking at the two plots above, it appears that for the same number of principal components, those using unscaled features are able to explain more variance (i.e., capture more information) than those using scaled features. For example, looking at the first ~25 principal components of each plot, the bars of the left plot (unscaled) are higher and skewed more to the left than those of the right plot (scaled). Since the purpose of PCA is to reduce dimensionality of the data by keeping the components that explain the most variance and discarding the rest, the unscaled principal components might benefit my models more than the scaled principal components will. However, I have to be mindful of the underlying variance in my features. Some features have values in the -800s, while others are close to 0. ``` # Examining the variances var_df = pd.DataFrame(ravdess_mfcc_m_df.var()).T var_df ``` Since PCA is looking for high variance directions, it can become biased by the underlying variance in a given feature if I do not scale it down first. I can see that some features have much higher variance than others do, so there is likely a lot of bias in the unscaled principal components above. How much variance is explained by certain numbers of unscaled and scaled principal components? This will help me determine how many principal components to try in my grid searches later. ``` # Unscaled num_components = [503, 451, 401, 351, 301, 251, 201, 151, 101, 51] for n in num_components: print(f'Variance explained by {n-1} unscaled principal components: {np.round(np.sum(pca.explained_variance_ratio_[:n])*100, 2)}%') # Scaled num_components = [503, 451, 401, 351, 301, 251, 201, 151, 101, 51] for n in num_components: print(f'Variance explained by {n-1} scaled principal components: {np.round(np.sum(pca_scaled.explained_variance_ratio_[:n])*100, 2)}%') ``` I will now build a pipeline and multiple grid searches with five-fold cross-validation to optimize the hyperparameters. I will try five types of classifiers: logistic regression, support vector machine, random forest, XGBoost, and k-nearest neighbours. To get a better sense of how each type performs, I will make a grid search for each one. I will also try different numbers of principal components for unscaled and scaled features. ``` # Cache cachedir = mkdtemp() # Pipeline (these values are placeholders) my_pipeline = Pipeline(steps=[('scaler', StandardScaler()), ('dim_reducer', PCA()), ('model', LogisticRegression())], memory=cachedir) # Parameter grid for log reg logreg_param_grid = [ # l1 without PCA # unscaled and scaled * 9 regularization strengths = 18 models {'scaler': [None, StandardScaler()], 'dim_reducer': [None], 'model': [LogisticRegression(penalty='l1', n_jobs=-1)], 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]}, # l1 unscaled with PCA # 5 PCAs * 9 regularization strengths = 45 models {'scaler': [None], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(50, 251, 50), 'model': [LogisticRegression(penalty='l1', n_jobs=-1)], 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]}, # l1 scaled with PCA # 4 PCAs * 9 regularization strengths = 36 models {'scaler': [StandardScaler()], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(200, 351, 50), 'model': [LogisticRegression(penalty='l1', n_jobs=-1)], 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]}, # l2 (default) without PCA # unscaled and scaled * 9 regularization strengths = 18 models {'scaler': [None, StandardScaler()], 'dim_reducer': [None], 'model': [LogisticRegression(solver='lbfgs', n_jobs=-1)], 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]}, # l2 (default) unscaled with PCA # 5 PCAs * 9 regularization strengths = 45 models {'scaler': [None], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(50, 251, 50), 'model': [LogisticRegression(solver='lbfgs', n_jobs=-1)], 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]}, # l2 (default) scaled with PCA # 4 PCAs * 9 regularization strengths = 36 models {'scaler': [StandardScaler()], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(200, 351, 50), 'model': [LogisticRegression(solver='lbfgs', n_jobs=-1)], 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]} ] # Instantiate the log reg grid search logreg_grid_search = GridSearchCV(estimator=my_pipeline, param_grid=logreg_param_grid, cv=5, n_jobs=-1, verbose=5) # Fit the log reg grid search fitted_logreg_grid_em = logreg_grid_search.fit(Xm_train, em_train) # What was the best log reg? fitted_logreg_grid_em.best_estimator_ print(f"The best log reg's accuracy on the training set: {fitted_logreg_grid_em.score(Xm_train, em_train)*100}%") print(f"The best log reg's accuracy on the test set: {fitted_logreg_grid_em.score(Xm_test, em_test)*100}%") # Pickling the best log reg for later use joblib.dump(fitted_logreg_grid_em.best_estimator_, 'pickle2_male_emotion_logreg.pkl') # Parameter grid for SVM svm_param_grid = [ # unscaled and scaled * 9 regularization strengths = 18 models {'scaler': [None, StandardScaler()], 'dim_reducer': [None], 'model': [SVC()], 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]}, # unscaled # 5 PCAs * 9 regularization strengths = 45 models {'scaler': [None], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(50, 251, 50), 'model': [SVC()], 'model__C':[0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]}, # scaled # 4 PCAs * 9 regularization strengths = 36 models {'scaler': [StandardScaler()], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(200, 351, 50), 'model': [SVC()], 'model__C':[0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]} ] # Instantiate the SVM grid search svm_grid_search = GridSearchCV(estimator=my_pipeline, param_grid=svm_param_grid, cv=5, n_jobs=-1, verbose=5) # Fit the SVM grid search fitted_svm_grid_em = svm_grid_search.fit(Xm_train, em_train) # What was the best SVM? fitted_svm_grid_em.best_estimator_ print(f"The best SVM's accuracy on the training set: {fitted_svm_grid_em.score(Xm_train, em_train)*100}%") print(f"The best SVM's accuracy on the test set: {fitted_svm_grid_em.score(Xm_test, em_test)*100}%") # Pickling the best SVM for later use joblib.dump(fitted_svm_grid_em.best_estimator_, 'pickle3_male_emotion_svm.pkl') # Parameter grid for random forest (scaling is unnecessary) rf_param_grid = [ # 5 numbers of estimators * 5 max depths = 25 models {'scaler': [None], 'dim_reducer': [None], 'model': [RandomForestClassifier(n_jobs=-1)], 'model__n_estimators': np.arange(100, 501, 100), 'model__max_depth': np.arange(5, 26, 5)}, # 5 PCAs * 5 numbers of estimators * 5 max depths = 150 models {'scaler': [None], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(50, 251, 50), 'model': [RandomForestClassifier(n_jobs=-1)], 'model__n_estimators': np.arange(100, 501, 100), 'model__max_depth': np.arange(5, 26, 5)} ] # Instantiate the rf grid search rf_grid_search = GridSearchCV(estimator=my_pipeline, param_grid=rf_param_grid, cv=5, n_jobs=-1, verbose=5) # Fit the rf grid search fitted_rf_grid_em = rf_grid_search.fit(Xm_train, em_train) # What was the best rf? fitted_rf_grid_em.best_estimator_ print(f"The best random forest's accuracy on the training set: {fitted_rf_grid_em.score(Xm_train, em_train)*100}%") print(f"The best random forest's accuracy on the test set: {fitted_rf_grid_em.score(Xm_test, em_test)*100}%") # # Parameter grid for XGBoost (scaling is unnecessary) # xgb_param_grid = [ # # 5 numbers of estimators * 5 max depths = 25 models # {'scaler': [None], 'dim_reducer': [None], 'model': [XGBClassifier(n_jobs=-1)], 'model__n_estimators': np.arange(100, 501, 100), # 'model__max_depth': np.arange(5, 26, 5)}, # # 3 PCAs * 5 numbers of estimators * 5 max depths = 75 models # # I am trying fewer PCAs for XGBoost # {'scaler': [None], 'dim_reducer': [PCA()], 'dim_reducer__n_components': [200, 250, 300], 'model': [XGBClassifier(n_jobs=-1)], # 'model__n_estimators': np.arange(100, 501, 100), 'model__max_depth': np.arange(5, 26, 5)} # ] # # Instantiate the XGB grid search # xgb_grid_search = GridSearchCV(estimator=my_pipeline, param_grid=xgb_param_grid, cv=5, n_jobs=-1, verbose=5) # # Fit the XGB grid search # fitted_xgb_grid_em = xgb_grid_search.fit(Xm_train, em_train) ``` The above never finished so I decided to comment it out. I will try again without passing `n_jobs=-1` into `XGBClassifier()`, and with a higher number (10 instead of 5) for `verbose` in `GridSearchCV()`. ``` # Parameter grid for XGBoost (scaling is unnecessary) xgb_param_grid = [ # 5 numbers of estimators * 5 max depths = 25 models {'scaler': [None], 'dim_reducer': [None], 'model': [XGBClassifier()], 'model__n_estimators': np.arange(100, 501, 100), 'model__max_depth': np.arange(5, 26, 5)}, # 3 PCAs * 5 numbers of estimators * 5 max depths = 75 models # I am trying fewer PCAs for XGBoost {'scaler': [None], 'dim_reducer': [PCA()], 'dim_reducer__n_components': [200, 250, 300], 'model': [XGBClassifier()], 'model__n_estimators': np.arange(100, 501, 100), 'model__max_depth': np.arange(5, 26, 5)} ] # Instantiate the XGB grid search xgb_grid_search = GridSearchCV(estimator=my_pipeline, param_grid=xgb_param_grid, cv=5, n_jobs=-1, verbose=10) # Fit the XGB grid search fitted_xgb_grid_em = xgb_grid_search.fit(Xm_train, em_train) # What was the best XGB model? fitted_xgb_grid_em.best_estimator_ print(f"The best XGB model's accuracy on the training set: {fitted_xgb_grid_em.score(Xm_train, em_train)*100}%") print(f"The best XGB model's accuracy on the test set: {fitted_xgb_grid_em.score(Xm_test, em_test)*100}%") # Parameter grid for KNN knn_param_grid = [ # unscaled and scaled * 10 Ks = 20 models {'scaler': [None, StandardScaler()], 'dim_reducer': [None], 'model': [KNeighborsClassifier(n_jobs=-1)], 'model__n_neighbors': np.arange(3, 22, 2)}, # unscaled # 5 PCAs * 10 Ks = 50 models {'scaler': [None], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(50, 251, 50), 'model': [KNeighborsClassifier(n_jobs=-1)], 'model__n_neighbors': np.arange(3, 22, 2)}, # scaled # 4 PCAs * 10 Ks = 40 models {'scaler': [StandardScaler()], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(200, 351, 50), 'model': [KNeighborsClassifier(n_jobs=-1)], 'model__n_neighbors': np.arange(3, 22, 2)} ] # Instantiate the grid search knn_grid_search = GridSearchCV(estimator=my_pipeline, param_grid=knn_param_grid, cv=5, n_jobs=-1, verbose=5) # Fit the KNN grid search fitted_knn_grid_em = knn_grid_search.fit(Xm_train, em_train) # What was the best KNN model? fitted_knn_grid_em.best_estimator_ print(f"The best KNN model's accuracy on the training set: {fitted_knn_grid_em.score(Xm_train, em_train)*100}%") print(f"The best KNN model's accuracy on the test set: {fitted_knn_grid_em.score(Xm_test, em_test)*100}%") ``` ### Conclusions for classifying emotions for males - Of the five classifier types I tried in my grid searches, SVM had the highest accuracy on the test set (60.19%), followed by logistic regression (58.80%), XGBoost (51.39%), random forest (46.76%), and lastly, KNN (45.37%). - Based on these results, I have pickled the best SVM and logistic regression. In part 4, I will try them on a new, male-only dataset. - Except for the best KNN model, all the best models found in the grid searches had training accuracies of 100%, indicating that they overfit to the training set. - The best KNN model had a training accuracy of 76.29%, but this was still much higher than its test accuracy of 45.37%. - For the classifier types in which scaling the features matters (logistic regression, SVM, and KNN), all the best models made use of the standard scaler. - Of the five best-in-type models, random forest and KNN were the only two which made use of principal components. ___ # Building Models for Classifying Emotion for Females I will follow the same steps I took in classifying emotions for males, with one difference: This time I will not try XGBoost, due to its long computation time and comparatively low performance. ``` # Making a new dataframe that contains only female recordings ravdess_mfcc_f_df = ravdess_mfcc_df[ravdess_mfcc_df['Gender'] == 'female'].reset_index().drop('index', axis=1) ravdess_mfcc_f_df # Splitting the dataframe into features and target Xf = ravdess_mfcc_f_df.iloc[:, :-2] ef = ravdess_mfcc_f_df['Emotion'] # Splitting the data into training and test sets Xf_train, Xf_test, ef_train, ef_test = train_test_split(Xf, ef, test_size=0.3, stratify=ef, random_state=1) # Checking the shapes print(Xf_train.shape) print(Xf_test.shape) print(ef_train.shape) print(ef_test.shape) ``` Here is an initial model: ``` # Instantiate the model initial_logreg_ef = LogisticRegression() # Fit to training set initial_logreg_ef.fit(Xf_train, ef_train) # Score on training set print(f'Model accuracy on training set: {initial_logreg_ef.score(Xf_train, ef_train)*100}%') # Score on test set print(f'Model accuracy on test set: {initial_logreg_ef.score(Xf_test, ef_test)*100}%') ``` The model has overfit to the training set yet again. Interestingly, this initial accuracy on the female test set is noticeably higher than the initial accuracy on the male test set, which was 56.48%. Again, let's evaluate the model further using a confusion matrix and a classification report. ``` # Having initial_logreg_ef make predictions based on the test set features ef_pred = initial_logreg_ef.predict(Xf_test) # Building the confusion matrix as a dataframe emotions = ['angry', 'calm', 'disgusted', 'fearful', 'happy', 'neutral', 'sad', 'surprised'] ef_confusion_df = pd.DataFrame(confusion_matrix(ef_test, ef_pred)) ef_confusion_df.columns = [f'Predicted {emotion}' for emotion in emotions] ef_confusion_df.index = [f'Actual {emotion}' for emotion in emotions] ef_confusion_df # Classification report print(classification_report(ef_test, ef_pred)) ``` It appears that the initial model is strongest at classifying calm voice clips, and weakest at classifying fearful voice clips. In order of strongest to weakest: calm, neutral, happy, surprised, angry, disgusted, sad, and fearful. There is not as much variance in performance across the emotions when compared to that of the initial model for male emotions. Although I found that none of the best male emotion classifiers made use of PCA, I will still examine the explained variance ratios like I did before. ``` # PCA on unscaled features # Instantiate PCA and fit to Xf_train pca = PCA().fit(Xf_train) # Transform Xf_train Xf_train_pca = pca.transform(Xf_train) # Transform Xf_test Xf_test_pca = pca.transform(Xf_test) # Standard scaling # Instantiate the scaler and fit to Xf_train scaler = StandardScaler().fit(Xf_train) # Transform Xf_train Xf_train_scaled = scaler.transform(Xf_train) # Transform Xf_test Xf_test_scaled = scaler.transform(Xf_test) # PCA on scaled features # Instantiate PCA and fit to Xf_train_scaled pca_scaled = PCA().fit(Xf_train_scaled) # Transform Xf_train_scaled Xf_train_scaled_pca = pca_scaled.transform(Xf_train_scaled) # Transform Xf_test_scaled Xf_test_scaled_pca = pca_scaled.transform(Xf_test_scaled) # Plot the explained variance ratios plt.subplots(1, 2, figsize = (15, 5)) # Unscaled plt.subplot(1, 2, 1) plt.bar(np.arange(1, len(pca.explained_variance_ratio_)+1), pca.explained_variance_ratio_) plt.xlabel('Principal Component') plt.ylabel('Explained Variance Ratio') plt.title('PCA on Unscaled Features') plt.ylim(top = 0.5) # Equalizing the y-axes # Scaled plt.subplot(1, 2, 2) plt.bar(np.arange(1, len(pca_scaled.explained_variance_ratio_)+1), pca_scaled.explained_variance_ratio_) plt.xlabel('Principal Component') plt.ylabel('Explained Variance Ratio') plt.title('PCA on Scaled Features') plt.ylim(top = 0.5) # Equalizing the y-axes plt.tight_layout() plt.show() ``` These are the same trends I saw previously for male emotions. How much variance is explained by certain numbers of unscaled and scaled principal components? This will help me determine how many principal components to try in my grid searches later. ``` # Unscaled num_components = [503, 451, 401, 351, 301, 251, 201, 151, 101, 51] for n in num_components: print(f'Variance explained by {n-1} unscaled principal components: {np.round(np.sum(pca.explained_variance_ratio_[:n])*100, 2)}%') # Scaled num_components = [503, 451, 401, 351, 301, 251, 201, 151, 101, 51] for n in num_components: print(f'Variance explained by {n-1} scaled principal components: {np.round(np.sum(pca_scaled.explained_variance_ratio_[:n])*100, 2)}%') ``` Like before, I will now do a grid search for each classifier type, with five-fold cross-validation to optimize the hyperparameters. ``` # Cache cachedir = mkdtemp() # Pipeline (these values are placeholders) my_pipeline = Pipeline(steps=[('scaler', StandardScaler()), ('dim_reducer', PCA()), ('model', LogisticRegression())], memory=cachedir) # Parameter grid for log reg logreg_param_grid = [ # l1 without PCA # unscaled and scaled * 9 regularization strengths = 18 models {'scaler': [None, StandardScaler()], 'dim_reducer': [None], 'model': [LogisticRegression(penalty='l1', n_jobs=-1)], 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]}, # l1 unscaled with PCA # 6 PCAs * 9 regularization strengths = 54 models {'scaler': [None], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(50, 301, 50), 'model': [LogisticRegression(penalty='l1', n_jobs=-1)], 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]}, # l1 scaled with PCA # 4 PCAs * 9 regularization strengths = 36 models {'scaler': [StandardScaler()], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(200, 351, 50), 'model': [LogisticRegression(penalty='l1', n_jobs=-1)], 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]}, # l2 (default) without PCA # unscaled and scaled * 9 regularization strengths = 18 models {'scaler': [None, StandardScaler()], 'dim_reducer': [None], 'model': [LogisticRegression(solver='lbfgs', n_jobs=-1)], 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]}, # l2 (default) unscaled with PCA # 6 PCAs * 9 regularization strengths = 54 models {'scaler': [None], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(50, 301, 50), 'model': [LogisticRegression(solver='lbfgs', n_jobs=-1)], 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]}, # l2 (default) scaled with PCA # 4 PCAs * 9 regularization strengths = 36 models {'scaler': [StandardScaler()], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(200, 351, 50), 'model': [LogisticRegression(solver='lbfgs', n_jobs=-1)], 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]} ] # Instantiate the log reg grid search logreg_grid_search = GridSearchCV(estimator=my_pipeline, param_grid=logreg_param_grid, cv=5, n_jobs=-1, verbose=5) # Fit the log reg grid search fitted_logreg_grid_ef = logreg_grid_search.fit(Xf_train, ef_train) # What was the best log reg? fitted_logreg_grid_ef.best_estimator_ print(f"The best log reg's accuracy on the training set: {fitted_logreg_grid_ef.score(Xf_train, ef_train)*100}%") print(f"The best log reg's accuracy on the test set: {fitted_logreg_grid_ef.score(Xf_test, ef_test)*100}%") # Parameter grid for SVM svm_param_grid = [ # unscaled and scaled * 9 regularization strengths = 18 models {'scaler': [None, StandardScaler()], 'dim_reducer': [None], 'model': [SVC()], 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]}, # unscaled # 6 PCAs * 9 regularization strengths = 54 models {'scaler': [None], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(50, 301, 50), 'model': [SVC()], 'model__C':[0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]}, # scaled # 4 PCAs * 9 regularization strengths = 36 models {'scaler': [StandardScaler()], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(200, 351, 50), 'model': [SVC()], 'model__C':[0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]} ] # Instantiate the SVM grid search svm_grid_search = GridSearchCV(estimator=my_pipeline, param_grid=svm_param_grid, cv=5, n_jobs=-1, verbose=5) # Fit the SVM grid search fitted_svm_grid_ef = svm_grid_search.fit(Xf_train, ef_train) # What was the best SVM? fitted_svm_grid_ef.best_estimator_ print(f"The best SVM's accuracy on the training set: {fitted_svm_grid_ef.score(Xf_train, ef_train)*100}%") print(f"The best SVM's accuracy on the test set: {fitted_svm_grid_ef.score(Xf_test, ef_test)*100}%") # Parameter grid for random forest (scaling is unnecessary) rf_param_grid = [ # 5 numbers of estimators * 5 max depths = 25 models {'scaler': [None], 'dim_reducer': [None], 'model': [RandomForestClassifier(n_jobs=-1)], 'model__n_estimators': np.arange(100, 501, 100), 'model__max_depth': np.arange(5, 26, 5)}, # 6 PCAs * 5 numbers of estimators * 5 max depths = 150 models {'scaler': [None], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(50, 301, 50), 'model': [RandomForestClassifier(n_jobs=-1)], 'model__n_estimators': np.arange(100, 501, 100), 'model__max_depth': np.arange(5, 26, 5)} ] # Instantiate the rf grid search rf_grid_search = GridSearchCV(estimator=my_pipeline, param_grid=rf_param_grid, cv=5, n_jobs=-1, verbose=5) # Fit the rf grid search fitted_rf_grid_ef = rf_grid_search.fit(Xf_train, ef_train) # What was the best rf? fitted_rf_grid_ef.best_estimator_ print(f"The best random forest's accuracy on the training set: {fitted_rf_grid_ef.score(Xf_train, ef_train)*100}%") print(f"The best random forest's accuracy on the test set: {fitted_rf_grid_ef.score(Xf_test, ef_test)*100}%") # Parameter grid for KNN knn_param_grid = [ # unscaled and scaled * 10 Ks = 20 models {'scaler': [None, StandardScaler()], 'dim_reducer': [None], 'model': [KNeighborsClassifier(n_jobs=-1)], 'model__n_neighbors': np.arange(3, 22, 2)}, # unscaled # 6 PCAs * 10 Ks = 60 models {'scaler': [None], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(50, 301, 50), 'model': [KNeighborsClassifier(n_jobs=-1)], 'model__n_neighbors': np.arange(3, 22, 2)}, # scaled # 4 PCAs * 10 Ks = 40 models {'scaler': [StandardScaler()], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(200, 351, 50), 'model': [KNeighborsClassifier(n_jobs=-1)], 'model__n_neighbors': np.arange(3, 22, 2)} ] # Instantiate the grid search knn_grid_search = GridSearchCV(estimator=my_pipeline, param_grid=knn_param_grid, cv=5, n_jobs=-1, verbose=5) # Fit the KNN grid search fitted_knn_grid_ef = knn_grid_search.fit(Xf_train, ef_train) # What was the best KNN model? fitted_knn_grid_ef.best_estimator_ print(f"The best KNN model's accuracy on the training set: {fitted_knn_grid_ef.score(Xf_train, ef_train)*100}%") print(f"The best KNN model's accuracy on the test set: {fitted_knn_grid_ef.score(Xf_test, ef_test)*100}%") ``` ### Conclusions for classifying emotions for females - Of the four classifier types I tried in my grid searches, logistic regression had the highest accuracy on the test set (71.29%), followed by SVM (70.83%), random forest (61.57%), and lastly, KNN (55.56%). - Except for the best KNN model, all the best models found in the grid searches had training accuracies of 100%, indicating that they overfit to the training set. - The best KNN model had a training accuracy of 59.33%, which was not much higher than its test accuracy of 55.56%. A much wider gap was found in the best KNN model for male emotions. - For the classifier types in which scaling the features matters (logistic regression, SVM, and KNN), the best logistic regression and SVM models made use of the standard scaler, while the best KNN model did not. - All the best-in-type models made use of principal components, except SVM. - Interestingly, the female emotion classifiers achieved higher accuracies than their male counterparts. It appears that for the RAVDESS dataset, the differences between female emotions are greater the differences between male emotions. - Based on this alone, I cannot extrapolate and conclude that women are more socially more expressive than men are, although this is an interesting thought.
github_jupyter
``` ! pip install opencv-python import pandas as pd import numpy as np import matplotlib.pyplot as plt import cv2 #tensorflow packages from tensorflow.keras.models import load_model from tensorflow.keras.preprocessing import image # Face Emotion Recognition #Here i am using my trained model, that is trained and saved as a h5 file faceDetection_model = 'D:\pavi\DeepLearningProjects\Face_Emosion_Recognition\pretrained_model\Face_Detection_TrainedModel\haarcascade_frontalface_default.xml' Emotion_Detction_model = 'D:\pavi\DeepLearningProjects\Face_Emosion_Recognition\pretrained_model\Face_Emotion_model\FER_vggnet.h5' vggnet = load_model(Emotion_Detction_model) vggnet.summary() #defining the emotion classes for classification classes = np.array(("Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral")) #video capturing and classifing faceCascade = cv2.CascadeClassifier(faceDetection_model) video_capture = cv2.VideoCapture(0) while True: ret,frame = video_capture.read() cv2.imshow('Original Video' , frame) gray = cv2.cvtColor(frame , cv2.COLOR_BGR2GRAY) face = faceCascade.detectMultiScale(gray ,scaleFactor=1.1 , minNeighbors=5,) #draw rectangle around the face and cut the face only for (x,y,w,h) in face: cv2.rectangle( frame , (x,y) , (x+w , y+h) , (0,255,255) , 2) face_img = gray[ y:(y+h) , x:(x+w)] x = cv2.resize(face_img, (48,48) , interpolation = cv2.INTER_AREA) if np.sum([x])!=0: #preprocessing x = x.astype('float')/255.0 x = image.img_to_array(x) x = np.expand_dims(x , axis = 0) #face_img = face_img.reshape(48,48) # prediction p = vggnet.predict(x) a = np.argmax(p,axis=1) print('prediction',classes[a]) label = str(classes[a][0]) print(label) label_position = (x-10,y-10) fontScale = 0.6 thickness = 3 cv2.putText(frame , label , label_position , cv2.FONT_HERSHEY_SIMPLEX , fontScale , (0,255,0) , thickness , cv2.LINE_AA) else: cv2.putText(frame , 'No Face Detection' , label_position , cv2.FONT_HERSHEY_SIMPLEX , 0.6 , (0,255,0) , 3 ,cv2.LINE_AA) #cv2.imshow('croped image' , face_img) #display the resulting frame cv2.imshow('Face Detected Video' , frame) #break the capturing if cv2.waitKey(1) & 0xFF == ord('q'): break video_capture.release() cv2.destroyAllWindows() ```
github_jupyter
``` from mplsoccer import Pitch, VerticalPitch from mplsoccer.dimensions import valid, size_varies import matplotlib.pyplot as plt import numpy as np import random np.random.seed(42) ``` # Test five points are same in both orientations ``` for pitch_type in valid: if pitch_type in size_varies: kwargs = {'pitch_length': 105, 'pitch_width': 68} else: kwargs = {} pitch = Pitch(pitch_type=pitch_type, line_zorder=2, **kwargs) pitch_vertical = VerticalPitch(pitch_type=pitch_type, line_zorder=2, **kwargs) fig, ax = plt.subplots(ncols=2, figsize=(12, 7)) fig.suptitle(pitch_type) x = np.random.uniform(low=pitch.dim.pitch_extent[0], high=pitch.dim.pitch_extent[1], size=5) y = np.random.uniform(low=pitch.dim.pitch_extent[2], high=pitch.dim.pitch_extent[3], size=5) pitch.draw(ax[0]) pitch.scatter(x, y, ax=ax[0], color='red', zorder=3) stats = pitch.bin_statistic(x, y) stats['statistic'][stats['statistic'] == 0] = np.nan hm = pitch.heatmap(stats, ax=ax[0]) txt = pitch.label_heatmap(stats, color='white', ax=ax[0]) pitch_vertical.draw(ax[1]) pitch_vertical.scatter(x, y, ax=ax[1], color='red', zorder=3) stats_vertical = pitch_vertical.bin_statistic(x, y) stats_vertical['statistic'][stats_vertical['statistic'] == 0] = np.nan hm_vertical = pitch_vertical.heatmap(stats_vertical, ax=ax[1]) txt_vertical = pitch_vertical.label_heatmap(stats, color='white', ax=ax[1]) ``` # Test five points are same in both orientations - positional ``` for pitch_type in valid: if pitch_type in size_varies: kwargs = {'pitch_length': 105, 'pitch_width': 68} else: kwargs = {} pitch = Pitch(pitch_type=pitch_type, line_zorder=2, **kwargs) pitch_vertical = VerticalPitch(pitch_type=pitch_type, line_zorder=2, **kwargs) fig, ax = plt.subplots(ncols=2, figsize=(12, 7)) fig.suptitle(pitch_type) x = np.random.uniform(low=pitch.dim.pitch_extent[0], high=pitch.dim.pitch_extent[1], size=5) y = np.random.uniform(low=pitch.dim.pitch_extent[2], high=pitch.dim.pitch_extent[3], size=5) pitch.draw(ax[0]) pitch.scatter(x, y, ax=ax[0], color='red', zorder=3) stats = pitch.bin_statistic_positional(x, y) hm = pitch.heatmap_positional(stats, ax=ax[0]) txt = pitch.label_heatmap(stats, color='white', ax=ax[0]) pitch_vertical.draw(ax[1]) pitch_vertical.scatter(x, y, ax=ax[1], color='red', zorder=3) stats_vertical = pitch_vertical.bin_statistic_positional(x, y) hm_vertical = pitch_vertical.heatmap_positional(stats_vertical, ax=ax[1]) txt_vertical = pitch_vertical.label_heatmap(stats, color='white', ax=ax[1]) ``` # Test edges - positional x ``` for pitch_type in valid: if pitch_type in size_varies: kwargs = {'pitch_length': 105, 'pitch_width': 68} else: kwargs = {} pitch = Pitch(pitch_type=pitch_type, line_zorder=2, pitch_color='None', axis=True, label=True, **kwargs) pitch_vertical = VerticalPitch(pitch_type=pitch_type, line_zorder=2, pitch_color='None', axis=True, label=True, **kwargs) fig, ax = plt.subplots(ncols=2, figsize=(12, 7)) fig.suptitle(pitch_type) x = pitch.dim.positional_x y = np.random.uniform(low=pitch.dim.pitch_extent[2], high=pitch.dim.pitch_extent[3], size=x.size) pitch.draw(ax[0]) pitch.scatter(x, y, ax=ax[0], color='red', zorder=3) stats = pitch.bin_statistic_positional(x, y) hm = pitch.heatmap_positional(stats, ax=ax[0], edgecolors='yellow') txt = pitch.label_heatmap(stats, color='white', ax=ax[0]) pitch_vertical.draw(ax[1]) pitch_vertical.scatter(x, y, ax=ax[1], color='red', zorder=3) stats_vertical = pitch_vertical.bin_statistic_positional(x, y) hm_vertical = pitch_vertical.heatmap_positional(stats_vertical, ax=ax[1], edgecolors='yellow') txt_vertical = pitch_vertical.label_heatmap(stats_vertical, color='white', ax=ax[1]) ``` # Test edges - positional y ``` for pitch_type in valid: if pitch_type in size_varies: kwargs = {'pitch_length': 105, 'pitch_width': 68} else: kwargs = {} pitch = Pitch(pitch_type=pitch_type, line_zorder=2, pitch_color='None', axis=True, label=True, **kwargs) pitch_vertical = VerticalPitch(pitch_type=pitch_type, line_zorder=2, pitch_color='None', axis=True, label=True, **kwargs) fig, ax = plt.subplots(ncols=2, figsize=(12, 7)) fig.suptitle(pitch_type) y = pitch.dim.positional_y x = np.random.uniform(low=pitch.dim.pitch_extent[0], high=pitch.dim.pitch_extent[1], size=y.size) pitch.draw(ax[0]) pitch.scatter(x, y, ax=ax[0], color='red', zorder=3) stats = pitch.bin_statistic_positional(x, y) hm = pitch.heatmap_positional(stats, ax=ax[0], edgecolors='yellow') txt = pitch.label_heatmap(stats, color='white', ax=ax[0]) pitch_vertical.draw(ax[1]) pitch_vertical.scatter(x, y, ax=ax[1], color='red', zorder=3) stats_vertical = pitch_vertical.bin_statistic_positional(x, y) hm_vertical = pitch_vertical.heatmap_positional(stats_vertical, ax=ax[1], edgecolors='yellow') txt_vertical = pitch_vertical.label_heatmap(stats_vertical, color='white', ax=ax[1]) ```
github_jupyter
# Pipelines for classifiers using Balanced Accuracy For each dataset, classifier and folds: - Robust scaling - 2, 3, 5, 10-fold outer CV - balanced accurary as score We will use folders *datasets2* and *results2*. ``` %reload_ext autoreload %autoreload 2 %matplotlib inline # remove warnings import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) import numpy as np import pandas as pd import time import matplotlib.pyplot as plt from sklearn.pipeline import Pipeline from sklearn.model_selection import cross_val_score, GridSearchCV, StratifiedKFold, LeaveOneOut from sklearn.metrics import confusion_matrix,accuracy_score, roc_auc_score,f1_score, recall_score, precision_score from sklearn.utils import class_weight from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression, LassoCV from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier from xgboost import XGBClassifier from sklearn.svm import SVC from sklearn.gaussian_process.kernels import RBF from sklearn.svm import LinearSVC from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler from sklearn.feature_selection import RFECV, VarianceThreshold, SelectKBest, chi2 from sklearn.feature_selection import SelectFromModel, SelectPercentile, f_classif import os !ls ./datasets2/* !ls ./results2/* # get list of files in datasets2 = all datasets dsList = os.listdir('./datasets2') print('--> Found', len(dsList), 'dataset files') # create a list with all output variable names outVars = [] for eachdsFile in dsList: outVars.append( (eachdsFile[:-4])[3:] ) ``` ### Define script parameters ``` # define list of folds foldTypes = [2,3,5,10] # define a label for output files targetName = '_Outer' seed = 42 ``` ### Function definitions ``` def set_weights(y_data, option='balanced'): """Estimate class weights for umbalanced dataset If ‘balanced’, class weights will be given by n_samples / (n_classes * np.bincount(y)). If a dictionary is given, keys are classes and values are corresponding class weights. If None is given, the class weights will be uniform """ cw = class_weight.compute_class_weight(option, np.unique(y_data), y_data) w = {i:j for i,j in zip(np.unique(y_data), cw)} return w def getDataFromDataset(sFile, OutVar): # read details file print('\n-> Read dataset', sFile) df = pd.read_csv(sFile) #df = feather.read_dataframe(sFile) print('Shape', df.shape) # print(list(df.columns)) # select X and Y ds_y = df[OutVar] ds_X = df.drop(OutVar,axis = 1) Xdata = ds_X.values # get values of features Ydata = ds_y.values # get output values print('Shape X data:', Xdata.shape) print('Shape Y data:',Ydata.shape) # return data for X and Y, feature names as list return (Xdata, Ydata, list(ds_X.columns)) def Pipeline_OuterCV(Xdata, Ydata, label = 'my', class_weights = {0: 1, 1: 1}, folds = 3, seed = 42): # inputs: # data for X, Y; a label about data, number of folds, seeed # default: 3-fold CV # define classifiers names = ['KNN', 'SVM linear', 'SVM', 'LR', 'DT', 'RF', 'XGB'] classifiers = [KNeighborsClassifier(3), SVC(kernel="linear",random_state=seed,gamma='scale'), SVC(kernel = 'rbf', random_state=seed,gamma='auto'), LogisticRegression(solver='lbfgs',random_state=seed), DecisionTreeClassifier(random_state = seed), RandomForestClassifier(n_estimators=50,n_jobs=-1,random_state=seed), XGBClassifier(n_jobs=-1,seed=seed) ] # results dataframe: each column for a classifier df_res = pd.DataFrame(columns=names) # build each classifier print('* Building scaling+feature selection+outer '+str(folds)+'-fold CV for '+str(len(names))+' classifiers:', str(names)) total = time.time() # define a fold-CV for all the classifier outer_cv = StratifiedKFold(n_splits=folds,shuffle=True,random_state=seed) # use each ML for name, clf in zip(names, classifiers): start = time.time() # create pipeline: scaler + classifier estimators = [] # SCALER estimators.append(('Scaler', RobustScaler() )) # add Classifier estimators.append(('Classifier', clf)) # create pipeline model = Pipeline(estimators) # evaluate pipeline scores = cross_val_score(model, Xdata, Ydata, cv=outer_cv, scoring='balanced_accuracy', n_jobs=-1) df_res[name] = scores print('%s, MeanScore=%0.2f, Time:%0.1f mins' % (name, scores.mean(), (time.time() - start)/60)) # save results resFile = './results2/'+str(label)+str(targetName)+'_Outer-'+str(folds)+'-foldCV.csv' df_res.to_csv(resFile, index=False) print('* Scores saved', resFile) print('Total time:', (time.time() - total)/60, ' mins') # return scores for all classifiers as dataframe (each column a classifier) return df_res ``` ### Calculations ``` df_results = None # all results # apply MLs to each data for OutVar in outVars: sFile = './datasets2/ds.'+str(OutVar)+'.csv' # get data from file Xdata, Ydata, Features = getDataFromDataset(sFile,OutVar) # Calculate class weights class_weights = set_weights(Ydata) print("Class weights = ", class_weights) # try different folds for each subset -> box plots for folds in foldTypes: # calculate outer CV for different binary classifiers df_fold = Pipeline_OuterCV(Xdata, Ydata, label = OutVar, class_weights = class_weights, folds = folds, seed = seed) df_fold['Dataset'] = OutVar df_fold['folds'] = folds # add each result to a summary dataframe df_results = pd.concat([df_results,df_fold]) # save the results to file resFile = './results2/'+'ML_Outer-n-foldCV.csv' df_results.to_csv(resFile, index=False) ``` ### Mean scores ``` # calculate means of ACC scores for each ML df_means =df_results.groupby(['Dataset','folds'], as_index = False).mean()[['Dataset', 'folds','KNN', 'SVM linear', 'SVM', 'LR', 'DT', 'RF', 'XGB']] # save averaged values resFile_means = './results2/'+'ML_Outer-n-foldCV_means.csv' df_means.to_csv(resFile_means, index=False) ``` ### Best ML results ``` # find the maximum value rows for all MLs bestMLs = df_means[['KNN', 'SVM linear', 'SVM', 'LR', 'DT', 'RF', 'XGB']].idxmax() print(bestMLs) # get the best score by ML method for ML in ['KNN', 'SVM linear', 'SVM', 'LR', 'DT', 'RF', 'XGB']: print(ML, '\t', list(df_means.iloc[df_means[ML].idxmax()][['Dataset', 'folds', ML]])) # Add a new column with the original output name (get first 2 characters from Dataset column) getOutOrig = [] for each in df_means['Dataset']: getOutOrig.append(each[:2]) df_means['Output'] = getOutOrig df_means # save new results including extra column with output variable name resFile_means2 = './results2/'+'ML_Outer-n-foldCV_means2.csv' df_means.to_csv(resFile_means2, index=False) ``` ### Get the best ML for each type of output We are checking all 2, 3, 5, 10-fold CV results: ``` for outName in list(set(df_means['Output'])): print('*********************') print('OUTPUT =', outName) df_sel = df_means[df_means['Output'] == outName].copy() for ML in ['KNN', 'SVM linear', 'SVM', 'LR', 'DT', 'RF', 'XGB']: print(ML, '\t', list(df_sel.loc[df_sel[ML].idxmax(),:][['Dataset', 'folds', ML]])) df_sel.loc[df_sel[ML].idxmax(),:] ``` ### Get the best ML for each type of output for 10-fold CV ``` df_10fold = df_means[df_means['folds']==10].copy() df_10fold.head() for outName in list(set(df_10fold['Output'])): print('*********************') print('OUTPUT =', outName) df_sel = df_10fold[df_10fold['Output'] == outName].copy() print('MAX =',df_sel[['KNN', 'SVM linear', 'SVM', 'LR', 'DT', 'RF', 'XGB']].max().max()) for ML in ['KNN', 'SVM linear', 'SVM', 'LR', 'DT', 'RF', 'XGB']: print(ML, '\t', list(df_sel.loc[df_sel[ML].idxmax(),:][['Dataset', 'folds', ML]])) ``` ### Get the best ML for each type of output for 5-fold CV ``` df_5fold = df_means[df_means['folds']==5].copy() df_5fold.head() for outName in list(set(df_5fold['Output'])): print('*********************') print('OUTPUT =', outName) df_sel = df_5fold[df_5fold['Output'] == outName].copy() print('MAX =',df_sel[['KNN', 'SVM linear', 'SVM', 'LR', 'DT', 'RF', 'XGB']].max().max()) for ML in ['KNN', 'SVM linear', 'SVM', 'LR', 'DT', 'RF', 'XGB']: print(ML, '\t', list(df_sel.loc[df_sel[ML].idxmax(),:][['Dataset', 'folds', ML]])) ``` Get only the best values from all MLs for 5- and 10-fold CV: ``` print('5-fold CV') for outName in list(set(df_5fold['Output'])): df_sel = df_5fold[df_5fold['Output'] == outName].copy() print(outName,df_sel[['KNN', 'SVM linear', 'SVM', 'LR', 'DT', 'RF', 'XGB']].max().max()) print('10-fold CV') for outName in list(set(df_10fold['Output'])): df_sel = df_10fold[df_10fold['Output'] == outName].copy() print(outName,df_sel[['KNN', 'SVM linear', 'SVM', 'LR', 'DT', 'RF', 'XGB']].max().max()) ``` **Conclusion**: even with **5,10-CV** we are able to obtain classification models with **ACC > 0.70** and in one case with **ACC > 0.81**.
github_jupyter
|<img style="float:left;" src="http://pierreproulx.espaceweb.usherbrooke.ca/images/usherb_transp.gif" > |Pierre Proulx, ing, professeur| |:---|:---| |Département de génie chimique et de génie biotechnologique |** GCH200-Phénomènes d'échanges I **| ### Section 10.6, Conduction de la chaleur dans une sphère ``` # # Pierre Proulx # # Préparation de l'affichage et des outils de calcul symbolique # import sympy as sp from IPython.display import * sp.init_printing(use_latex=True) %matplotlib inline # Paramètres, variables et fonctions r,k01,k12,k23,h0,h3=sp.symbols('r k_1 k_2 k_3 h_0 h_3') r0,r1,r2,r3,Ta,Tb=sp.symbols('r_0 r_1 r_2 r_3 T_a T_b') q=sp.symbols('q') T=sp.Function('T')(r) eq1=sp.Eq(k01/r**2*sp.Derivative(r**2*sp.Derivative(T,r)),0) eq2=sp.Eq(k12/r**2*sp.Derivative(r**2*sp.Derivative(T,r)),0) eq3=sp.Eq(k23/r**2*sp.Derivative(r**2*sp.Derivative(T,r)),0) T1=sp.dsolve(eq1).rhs T2=sp.dsolve(eq2) T2=T2.subs(sp.symbols('C1'),sp.symbols('C3')) T2=T2.subs(sp.symbols('C2'),sp.symbols('C4')).rhs T3=sp.dsolve(eq3) T3=T3.subs(sp.symbols('C1'),sp.symbols('C5')) T3=T3.subs(sp.symbols('C2'),sp.symbols('C6')).rhs display(T1) display(T2) display(T3) # Maintenant on pose les conditions aux limites pour trouver les 6 constantes cl1=sp.Eq(T1.subs(r,r1)-T2.subs(r,r1)) # températures égales sur les points intérieurs cl2=sp.Eq(T2.subs(r,r2)-T3.subs(r,r2)) # flux égaux sur les points intérieurs cl3=sp.Eq(k01*T1.diff(r).subs(r,r1)-k12*T2.diff(r).subs(r,r1)) cl4=sp.Eq(k12*T2.diff(r).subs(r,r2)-k23*T3.diff(r).subs(r,r2)) # flux donnés par la loi de refroidissement de Newton sur les parois cl5=sp.Eq(-k01*T1.diff(r).subs(r,r0)+h0*(T1.subs(r,r0)-Ta)) cl6=sp.Eq(-k23*T3.diff(r).subs(r,r3)+h3*(Tb-T3.subs(r,r3))) constantes=sp.solve((cl1,cl2,cl3,cl4,cl5,cl6),sp.symbols('C1 C2 C3 C4 C5 C6')) T1=T1.subs(constantes) T2=T2.subs(constantes) T3=T3.subs(constantes) dico={'k_1':4,'k_2':25,'k_3':1, 'h_0':100,'h_3':20,'r_0':0.020,'r_1':0.025,'r_2':0.026,'r_3':0.035,'T_a':100,'T_b':20} T1p=T1.subs(dico) T2p=T2.subs(dico) T3p=T3.subs(dico) # Calcule les taux de chaleur en 0 et en 3 (doivent être égaux) (watts / mètre de longueur) # taux3=(h3*(T3-Tb)*2*sp.pi*r3).subs(dico) # pour mettre les valeurs numériques dans taux0=(h0*(Ta-T1)*2*sp.pi*r0).subs(dico) # l'expression symbolique, on subs(dico) # # print(taux3.subs(r,r3.subs(dico)), taux0.subs(r,r0.subs(dico))) import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = 12, 10 #plt.figure(figsize=(12,10)) p=sp.plot((T1p,(r,r0.subs(dico),r1.subs(dico))) ,(T2p,(r,r1.subs(dico),r2.subs(dico))) ,(T3p,(r,r2.subs(dico),r3.subs(dico))) ,legend=True,ylabel='T(r)',xlabel='r',show=False) #affiche pas tout de suite p[0].line_color = 'red' p[0].label='de r = r_0 à r=r_1 ' p[1].line_color = 'black' p[1].label='de r = r_1 à r=r_2 ' p[2].line_color = 'green' p[2].label='de r = r_2 à r=r_3 ' p.show() # maintenant on est prêts à afficher ```
github_jupyter
# 1-1 Intro Python Practice ## Getting started with Python in Jupyter Notebooks ### notebooks, comments, print(), type(), addition, errors and art <font size="5" color="#00A0B2" face="verdana"> <B>Student will be able to</B></font> - use Python 3 in Jupyter notebooks - write working code using `print()` and `#` comments - write working code using `type()` and variables - combine strings using string addition (+) - add numbers in code (+) - troubleshoot errors - create character art # &nbsp; >**note:** the **[ ]** indicates student has a task to complete >**reminder:** to run code and save changes: student should upload or clone a copy of notebooks #### notebook use - [ ] insert a **code cell** below - [ ] enter the following Python code, including the comment: ```python # [ ] print 'Hello!' and remember to save notebook! print('Hello!') ``` Then run the code - the output should be: `Hello!` #### run the cell below - [ ] use **Ctrl + Enter** - [ ] use **Shift + Enter** ``` print('watch for the cat') ``` #### Student's Notebook editing - [ ] Edit **this** notebook Markdown cell replacing the word "Student's" above with your name - [ ] Run the cell to display the formatted text - [ ] Run any 'markdown' cells that are in edit mode, so they are easier to read #### [ ] convert \*this\* cell from markdown to a code cell, then run it print('Run as a code cell') ## # comments create a code comment that identifies this notebook, containing your name and the date #### use print() to - [ ] print [**your_name**] - [ ] print **is using python!** ``` # [ ] print your name # [ ] print "is using Python" ``` Output above should be: `Your Name is using Python!` #### use variables in print() - [ ] create a variable **your_name** and assign it a string containing your name - [ ] print **your_name** ``` # [ ] create a variable your_name and assign it a sting containing your name #[ ] print your_name ``` #### create more string variables - **[ ]** create variables as directed below - **[ ]** print the variables ``` # [ ] create variables and assign values for: favorite_song, shoe_size, lucky_number # [ ] print the value of each variable favorite_song, shoe_size, and lucky_number ``` #### use string addition - **[ ]** print the above string variables (favorite_song, shoe_size, lucky_number) combined with a description by using **string addition** >for example favorite_song displayed as: `favorite song is happy birthday` ``` # [ ] print favorite_song with description # [ ] print shoe_size with description # [ ] print lucky_number with description ``` ##### more string addition - **[ ]** make a single string (sentence) in a variable called favorite_lucky_shoe using **string addition** with favorite_song, shoe_size, lucky_number variables and other strings as needed - **[ ]** print the value of the favorite_lucky_shoe variable string > sample output: `For singing happy birthday 8.5 times, you will be fined $25` ``` # assign favorite_lucky_shoe using ``` ### print() art #### use `print()` and the asterisk **\*** to create the following shapes - [ ] diagonal line - [ ] rectangle - [ ] smiley face ``` # [ ] print a diagonal using "*" # [ ] rectangle using "*" # [ ] smiley using "*" ``` #### Using `type()` -**[ ]** calulate the *type* using `type()` ``` # [ ] display the type of 'your name' (use single quotes) # [ ] display the type of "save your notebook!" (use double quotes) # [ ] display the type of "25" (use quotes) # [ ] display the type of "save your notebook " + 'your name' # [ ] display the type of 25 (no quotes) # [ ] display the type of 25 + 10 # [ ] display the type of 1.55 # [ ] display the type of 1.55 + 25 ``` #### Find the type of variables - **[ ]** run the cell below to make the variables available to be used in other code - **[ ]** display the data type as directed in the cells that follow ``` # assignments ***RUN THIS CELL*** before starting the section student_name = "Gus" student_age = 16 student_grade = 3.5 student_id = "ABC-000-000" # [ ] display the current type of the variable student_name # [ ] display the type of student_age # [ ] display the type of student_grade # [ ] display the type of student_age + student_grade # [ ] display the current type of student_id # assign new value to student_id # [ ] display the current of student_id ``` #### number integer addition - **[ ]** create variables (x, y, z) with integer values ``` # [ ] create integer variables (x, y, z) and assign them 1-3 digit integers (no decimals - no quotes) ``` - **[ ]** insert a **code cell** below - **[ ]** create an integer variable named **xyz_sum** equal to the sum of x, y, and z - **[ ]** print the value of **xyz_sum** ``` ``` ### Errors - **[ ]** troubleshoot and fix the errors below ``` # [ ] fix the error print("Hello World!"") # [ ] fix the error print(strings have quotes and variables have names) # [ ] fix the error print( "I have $" + 5) # [ ] fix the error print('always save the notebook") ``` ## ASCII art - **[ ]** Display first name or initials as ASCII Art - **[ ]** Challenge: insert an additional code cell to make an ASCII picture ``` # [ ] ASCII ART # [ ] ASCII ART ``` [Terms of use](http://go.microsoft.com/fwlink/?LinkID=206977) &nbsp; [Privacy & cookies](https://go.microsoft.com/fwlink/?LinkId=521839) &nbsp; © 2017 Microsoft
github_jupyter
Week 7 Notebook: Optimizing Other Objectives =============================================================== This week, we will look at optimizing multiple objectives simultaneously. In particular, we will look at pivoting with adversarial neural networks {cite:p}`Louppe:2016ylz,ganin2014unsupervised,Sirunyan:2019nfw`. We will borrow the implementation from: <https://github.com/glouppe/paper-learning-to-pivot> ``` import tensorflow.keras as keras import numpy as np from sklearn.metrics import roc_curve, auc import matplotlib.pyplot as plt import uproot from tqdm.notebook import tqdm import yaml with open('definitions.yml') as file: # The FullLoader parameter handles the conversion from YAML # scalar values to Python the dictionary format definitions = yaml.load(file, Loader=yaml.FullLoader) features = definitions['features'] spectators = definitions['spectators'] labels = definitions['labels'] nfeatures = definitions['nfeatures'] nspectators = definitions['nspectators'] nlabels = definitions['nlabels'] ntracks = definitions['ntracks'] ``` ## Define discriminator, regression, and combined adversarial models The combined loss function is $$L = L_\mathrm{class} - \lambda L_\mathrm{reg}$$ - $L_\mathrm{class}$ is the loss function for the classification part (categorical cross entropy) - $L_\mathrm{reg}$ is the loss function for the adversarial part (in this case a regression) - $\lambda$ is a hyperparamter that controls how important the adversarial part of the loss is compared to the classification part, which we nominally set to 1 ``` from tensorflow.keras.models import Model from tensorflow.keras.layers import Input, Dense, BatchNormalization, Concatenate, GlobalAveragePooling1D import tensorflow.keras.backend as K # define Deep Sets model with Dense Keras layer inputs = Input(shape=(ntracks, nfeatures,), name='input') x = BatchNormalization(name='bn_1')(inputs) x = Dense(64, name='dense_1', activation='relu')(x) x = Dense(32, name='dense_2', activation='relu')(x) x = Dense(32, name='dense_3', activation='relu')(x) # sum over tracks x = GlobalAveragePooling1D(name='pool_1')(x) x = Dense(100, name='dense_4', activation='relu')(x) output = Dense(nlabels, name = 'output', activation='softmax')(x) keras_model_disc = Model(inputs=inputs, outputs=output) keras_model_disc.compile(optimizer='adam', loss='categorical_crossentropy') # regressor x = Dense(100, name='dense_5', activation='relu')(keras_model_disc(inputs)) x = Dense(100, name='dense_6', activation='relu')(x) output_reg = Dense(2, activation='linear', name='mass_pt_reg')(x) sgd_opt = keras.optimizers.SGD(momentum=0) keras_model_reg = Model(inputs=inputs, outputs=output_reg) keras_model_reg.compile(optimizer=sgd_opt, loss='mse') # combined model lam = 1 keras_model_adv = Model(inputs=inputs, outputs=[keras_model_disc(inputs), keras_model_reg(inputs)]) keras_model_adv.compile(optimizer=sgd_opt, loss=['categorical_crossentropy', 'mse'], loss_weights = [1, -lam]) print(keras_model_disc.summary()) print(keras_model_reg.summary()) print(keras_model_adv.summary()) ``` ## Load data ``` from DataGenerator import DataGenerator # load training and validation generators train_files = ['root://eospublic.cern.ch//eos/opendata/cms/datascience/HiggsToBBNtupleProducerTool/HiggsToBBNTuple_HiggsToBB_QCD_RunII_13TeV_MC/train/ntuple_merged_10.root'] val_files = ['root://eospublic.cern.ch//eos/opendata/cms/datascience/HiggsToBBNtupleProducerTool/HiggsToBBNTuple_HiggsToBB_QCD_RunII_13TeV_MC/train/ntuple_merged_11.root'] train_generator = DataGenerator(train_files, features, labels, spectators, batch_size=1024, n_dim=ntracks, remove_mass_pt_window=False, remove_unlabeled=True, max_entry=5000, return_spectators=True, scale_mass_pt=[100., 10000.]) val_generator = DataGenerator(val_files, features, labels, spectators, batch_size=1024, n_dim=ntracks, remove_mass_pt_window=False, remove_unlabeled=True, max_entry=5000, return_spectators=True, scale_mass_pt=[100., 10000.]) ``` ## Pretrain discriminator and regressor models ``` # pretrain discriminator keras_model_disc.trainable = True keras_model_disc.compile(optimizer='adam', loss='categorical_crossentropy') for n_epoch in tqdm(range(20)): for t in tqdm(train_generator, total=len(train_generator), leave=bool(n_epoch==19)): keras_model_disc.fit(t[0], t[1][0],verbose=0) # pretrain regressor keras_model_reg.trainable = True keras_model_disc.trainable = False keras_model_reg.compile(optimizer=sgd_opt, loss='mse') for n_epoch in tqdm(range(20)): for t in tqdm(train_generator, total=len(train_generator), leave=bool(n_epoch==19)): keras_model_reg.fit(t[0], t[1][1], verbose=0) ``` ## Main training loop During the main training loop, we do two things: 1. Train the discriminator model with the combined loss function $$L = L_\mathrm{class} - \lambda L_\mathrm{reg}$$ 1. Train the regression model to learn the mass from with the standard MSE loss function $$L_\mathrm{reg}$$ ``` # alternate training discriminator and regressor for n_epoch in tqdm(range(40)): for t in tqdm(train_generator, total=len(train_generator), leave=bool(n_epoch==39)): # train discriminator keras_model_reg.trainable = False keras_model_disc.trainable = True keras_model_adv.compile(optimizer=sgd_opt, loss=['categorical_crossentropy', 'mse'], loss_weights=[1, -lam]) keras_model_adv.fit(t[0], t[1], verbose=0) # train regressor keras_model_reg.trainable = True keras_model_disc.trainable = False keras_model_reg.compile(optimizer=sgd_opt, loss='mse') keras_model_reg.fit(t[0], t[1][1],verbose=0) keras_model_adv.save_weights('keras_model_adv_best.h5') ``` ## Test ``` # load testing file test_files = ['root://eospublic.cern.ch//eos/opendata/cms/datascience/HiggsToBBNtupleProducerTool/HiggsToBBNTuple_HiggsToBB_QCD_RunII_13TeV_MC/test/ntuple_merged_0.root'] test_generator = DataGenerator(test_files, features, labels, spectators, batch_size=8192, n_dim=ntracks, remove_mass_pt_window=True, remove_unlabeled=True, return_spectators=True, max_entry=200000) # basically, no maximum # run model inference on test data set predict_array_adv = [] label_array_test = [] spec_array_test = [] for t in tqdm(test_generator, total=len(test_generator)): label_array_test.append(t[1][0]) spec_array_test.append(t[1][1]) predict_array_adv.append(keras_model_adv.predict(t[0])[0]) predict_array_adv = np.concatenate(predict_array_adv, axis=0) label_array_test = np.concatenate(label_array_test, axis=0) spec_array_test = np.concatenate(spec_array_test, axis=0) # create ROC curves print(label_array_test.shape) print(spec_array_test.shape) print(predict_array_adv.shape) fpr_adv, tpr_adv, threshold_adv = roc_curve(label_array_test[:,1], predict_array_adv[:,1]) # plot ROC curves plt.figure() plt.plot(tpr_adv, fpr_adv, lw=2.5, label="Adversarial, AUC = {:.1f}%".format(auc(fpr_adv,tpr_adv)*100)) plt.xlabel(r'True positive rate') plt.ylabel(r'False positive rate') plt.semilogy() plt.ylim(0.001, 1) plt.xlim(0, 1) plt.grid(True) plt.legend(loc='upper left') plt.show() from utils import find_nearest plt.figure() for wp in [1.0, 0.5, 0.3, 0.1, 0.05]: idx, val = find_nearest(fpr_adv, wp) plt.hist(spec_array_test[:,0], bins=np.linspace(40, 200, 21), weights=label_array_test[:,0]*(predict_array_adv[:,1] > threshold_adv[idx]), alpha=0.4, density=True, label='QCD, {}% FPR cut'.format(int(wp*100)),linestyle='-') plt.legend() plt.xlabel(r'$m_{SD}$') plt.ylabel(r'Normalized probability') plt.xlim(40, 200) plt.figure() for wp in [1.0, 0.5, 0.3, 0.1, 0.05]: idx, val = find_nearest(fpr_adv, wp) plt.hist(spec_array_test[:,0], bins=np.linspace(40, 200, 21), weights=label_array_test[:,1]*(predict_array_adv[:,1] > threshold_adv[idx]), alpha=0.4, density=True, label='H(bb), {}% FPR cut'.format(int(wp*100)),linestyle='-') plt.legend() plt.xlabel(r'$m_{SD}$') plt.ylabel(r'Normalized probability') plt.xlim(40, 200) plt.show() plt.figure() plt.hist(predict_array_adv[:,1], bins = np.linspace(0, 1, 21), weights=label_array_test[:,1]*0.1, alpha=0.4, linestyle='-', label='H(bb)') plt.hist(predict_array_adv[:,1], bins = np.linspace(0, 1, 21), weights=label_array_test[:,0], alpha=0.4, linestyle='-', label='QCD') plt.legend() plt.show() plt.figure() plt.hist(spec_array_test[:,0], bins = np.linspace(40, 200, 21), weights = label_array_test[:,1]*0.1, alpha=0.4, linestyle='-', label='H(bb)') plt.hist(spec_array_test[:,0], bins = np.linspace(40, 200, 21), weights = label_array_test[:,0], alpha=0.4, linestyle='-', label='QCD') plt.legend() plt.show() ```
github_jupyter
## TASK-1: Make a class to calculate the range, time of flight and horizontal range of the projectile fired from the ground. ## TASK-2: Use the list to find the range, time of flight and horizontal range for varying value of angle from 1 degree to 90 dergree. ## TASK-3: Make a plot to show the variation of range, time of flight and horizontal range with angle of projection. ## TASK-4: Change the list of [angle], [range], [time of flight] and [horizontal range] into dictionary and finely into dataframe using pandas. Save the file in your PC in csv file. ### Required formula: ### Horizontal range: $R=u^2sin2A/g$ ### Time of flight: $T = 2usinA/g$ ### Maximum Height: $H = u^2*sin^2A/2g$ ``` import math import numpy as np class Projectile(): def __init__(self,u,A,g): self.u=u self.A=A self.g=g def HorizontalRange(self): R= (self.u^2) * math.sin(2 * self.A * math.pi/180)/ (self.g) return R def TimeofFlight(self): T= (self.u*2) * math.sin(self.A* math.pi/180) / (self.g) return T def MaximumHeight(self): H=(self.u * math.sin(self.A* math.pi/180))**2 / (self.g*2) return H def update_A(self,A): self.A=A u=36 #in m/s g=9.8 #in m/s^2 P = Projectile(36, 0, 9.8 ) R=[] #empty list to collect horizontal range T=[] #empty list to collect the time of flight H=[] #empty list to collect the maximum height N=[] #empty list to collect angle of projection x=np.arange(0,90+0.1,0.1) for i in x: N.append(i) P.update_A(i) r=P.HorizontalRange() t=P.TimeofFlight() h=P.MaximumHeight() R.append(i) T.append(t) H.append(h) import matplotlib.pyplot as plt plt.subplot(2,2,1) plt.plot(N,R) plt.xlabel('N') plt.ylabel('R') plt.title("Angle of projection with Horizontal Range") plt.subplot(2,2,2) plt.plot(N,T) plt.xlabel('N') plt.ylabel('T') plt.title("Angle of projection with Time of Flight") plt.subplot(2,2,3) plt.plot(N,H) plt.xlabel('N') plt.ylabel('H') plt.title("Angle of projection with Maximum Distance") data={} #empty list data.update({"Angle_of_projection":N,"Horizontal_Range":R,"Time_of_Flight":T,"Maximum_Distance":H}) print(data) import pandas as pd Df=pd.DataFrame(data) print(Df) Df.to_csv('Projectile.csv') df=pd.read_csv('Projectile.csv') df.head() plt.figure(figsize=[10,10]) plt.subplot(2,2,1) plt.semilogy(df.Angle_of_projection,df.Horizontal_Range) plt.xlabel('N') plt.ylabel('R') plt.title('Angle of projection with Horizontal Range') plt.subplot(2,2,2) plt.semilogy(df.Angle_of_projection,df.Time_of_Flight) plt.xlabel('N') plt.ylabel('T') plt.title('Angle of projecton with Time of Flight') plt.subplot(2,2,3) plt.semilogy(df.Angle_of_projection,df.Maximum_Distance) plt.xlabel('N') plt.ylabel('H') plt.title('Angle of projection with Maximum Distance') ```
github_jupyter
#Improving Computer Vision Accuracy using Convolutions In the previous lessons you saw how to do fashion recognition using a Deep Neural Network (DNN) containing three layers -- the input layer (in the shape of the data), the output layer (in the shape of the desired output) and a hidden layer. You experimented with the impact of different sized of hidden layer, number of training epochs etc on the final accuracy. For convenience, here's the entire code again. Run it and take a note of the test accuracy that is printed out at the end. ``` import tensorflow as tf mnist = tf.keras.datasets.fashion_mnist (training_images, training_labels), (test_images, test_labels) = mnist.load_data() training_images=training_images / 255.0 test_images=test_images / 255.0 model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(training_images, training_labels, epochs=5) test_loss = model.evaluate(test_images, test_labels) ``` Your accuracy is probably about 89% on training and 87% on validation...not bad...But how do you make that even better? One way is to use something called Convolutions. I'm not going to details on Convolutions here, but the ultimate concept is that they narrow down the content of the image to focus on specific, distinct, details. If you've ever done image processing using a filter (like this: https://en.wikipedia.org/wiki/Kernel_(image_processing)) then convolutions will look very familiar. In short, you take an array (usually 3x3 or 5x5) and pass it over the image. By changing the underlying pixels based on the formula within that matrix, you can do things like edge detection. So, for example, if you look at the above link, you'll see a 3x3 that is defined for edge detection where the middle cell is 8, and all of its neighbors are -1. In this case, for each pixel, you would multiply its value by 8, then subtract the value of each neighbor. Do this for every pixel, and you'll end up with a new image that has the edges enhanced. This is perfect for computer vision, because often it's features that can get highlighted like this that distinguish one item for another, and the amount of information needed is then much less...because you'll just train on the highlighted features. That's the concept of Convolutional Neural Networks. Add some layers to do convolution before you have the dense layers, and then the information going to the dense layers is more focussed, and possibly more accurate. Run the below code -- this is the same neural network as earlier, but this time with Convolutional layers added first. It will take longer, but look at the impact on the accuracy: ``` import tensorflow as tf print(tf.__version__) mnist = tf.keras.datasets.fashion_mnist (training_images, training_labels), (test_images, test_labels) = mnist.load_data() training_images=training_images.reshape(60000, 28, 28, 1) training_images=training_images / 255.0 test_images = test_images.reshape(10000, 28, 28, 1) test_images=test_images/255.0 model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(10, activation='softmax') ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary() model.fit(training_images, training_labels, epochs=5) test_loss = model.evaluate(test_images, test_labels) ``` It's likely gone up to about 93% on the training data and 91% on the validation data. That's significant, and a step in the right direction! Try running it for more epochs -- say about 20, and explore the results! But while the results might seem really good, the validation results may actually go down, due to something called 'overfitting' which will be discussed later. (In a nutshell, 'overfitting' occurs when the network learns the data from the training set really well, but it's too specialised to only that data, and as a result is less effective at seeing *other* data. For example, if all your life you only saw red shoes, then when you see a red shoe you would be very good at identifying it, but blue suade shoes might confuse you...and you know you should never mess with my blue suede shoes.) Then, look at the code again, and see, step by step how the Convolutions were built: Step 1 is to gather the data. You'll notice that there's a bit of a change here in that the training data needed to be reshaped. That's because the first convolution expects a single tensor containing everything, so instead of 60,000 28x28x1 items in a list, we have a single 4D list that is 60,000x28x28x1, and the same for the test images. If you don't do this, you'll get an error when training as the Convolutions do not recognize the shape. ``` import tensorflow as tf mnist = tf.keras.datasets.fashion_mnist (training_images, training_labels), (test_images, test_labels) = mnist.load_data() training_images=training_images.reshape(60000, 28, 28, 1) training_images=training_images / 255.0 test_images = test_images.reshape(10000, 28, 28, 1) test_images=test_images/255.0 ``` Next is to define your model. Now instead of the input layer at the top, you're going to add a Convolution. The parameters are: 1. The number of convolutions you want to generate. Purely arbitrary, but good to start with something in the order of 32 2. The size of the Convolution, in this case a 3x3 grid 3. The activation function to use -- in this case we'll use relu, which you might recall is the equivalent of returning x when x>0, else returning 0 4. In the first layer, the shape of the input data. You'll follow the Convolution with a MaxPooling layer which is then designed to compress the image, while maintaining the content of the features that were highlighted by the convlution. By specifying (2,2) for the MaxPooling, the effect is to quarter the size of the image. Without going into too much detail here, the idea is that it creates a 2x2 array of pixels, and picks the biggest one, thus turning 4 pixels into 1. It repeats this across the image, and in so doing halves the number of horizontal, and halves the number of vertical pixels, effectively reducing the image by 25%. You can call model.summary() to see the size and shape of the network, and you'll notice that after every MaxPooling layer, the image size is reduced in this way. ``` model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(2, 2), ``` Add another convolution ``` tf.keras.layers.Conv2D(64, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2) ``` Now flatten the output. After this you'll just have the same DNN structure as the non convolutional version ``` tf.keras.layers.Flatten(), ``` The same 128 dense layers, and 10 output layers as in the pre-convolution example: ``` tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(10, activation='softmax') ]) ``` Now compile the model, call the fit method to do the training, and evaluate the loss and accuracy from the test set. ``` model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(training_images, training_labels, epochs=5) test_loss, test_acc = model.evaluate(test_images, test_labels) print(test_acc) ``` # Visualizing the Convolutions and Pooling This code will show us the convolutions graphically. The print (test_labels[;100]) shows us the first 100 labels in the test set, and you can see that the ones at index 0, index 23 and index 28 are all the same value (9). They're all shoes. Let's take a look at the result of running the convolution on each, and you'll begin to see common features between them emerge. Now, when the DNN is training on that data, it's working with a lot less, and it's perhaps finding a commonality between shoes based on this convolution/pooling combination. ``` print(test_labels[:100]) import matplotlib.pyplot as plt f, axarr = plt.subplots(3,4) FIRST_IMAGE=0 SECOND_IMAGE=7 THIRD_IMAGE=26 CONVOLUTION_NUMBER = 1 from tensorflow.keras import models layer_outputs = [layer.output for layer in model.layers] activation_model = tf.keras.models.Model(inputs = model.input, outputs = layer_outputs) for x in range(0,4): f1 = activation_model.predict(test_images[FIRST_IMAGE].reshape(1, 28, 28, 1))[x] axarr[0,x].imshow(f1[0, : , :, CONVOLUTION_NUMBER], cmap='inferno') axarr[0,x].grid(False) f2 = activation_model.predict(test_images[SECOND_IMAGE].reshape(1, 28, 28, 1))[x] axarr[1,x].imshow(f2[0, : , :, CONVOLUTION_NUMBER], cmap='inferno') axarr[1,x].grid(False) f3 = activation_model.predict(test_images[THIRD_IMAGE].reshape(1, 28, 28, 1))[x] axarr[2,x].imshow(f3[0, : , :, CONVOLUTION_NUMBER], cmap='inferno') axarr[2,x].grid(False) ``` EXERCISES 1. Try editing the convolutions. Change the 32s to either 16 or 64. What impact will this have on accuracy and/or training time. 2. Remove the final Convolution. What impact will this have on accuracy or training time? 3. How about adding more Convolutions? What impact do you think this will have? Experiment with it. 4. Remove all Convolutions but the first. What impact do you think this will have? Experiment with it. 5. In the previous lesson you implemented a callback to check on the loss function and to cancel training once it hit a certain amount. See if you can implement that here! ``` import tensorflow as tf print(tf.__version__) mnist = tf.keras.datasets.mnist (training_images, training_labels), (test_images, test_labels) = mnist.load_data() training_images=training_images.reshape(60000, 28, 28, 1) training_images=training_images / 255.0 test_images = test_images.reshape(10000, 28, 28, 1) test_images=test_images/255.0 model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(10, activation='softmax') ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(training_images, training_labels, epochs=10) test_loss, test_acc = model.evaluate(test_images, test_labels) print(test_acc) ```
github_jupyter
DeepLarning Couse HSE 2016 fall: * Arseniy Ashuha, you can text me ```[email protected]```, * ```https://vk.com/ars.ashuha``` * partially reusing https://github.com/ebenolson/pydata2015 <h1 align="center"> Image Captioning </h1> In this seminar you'll be going through the image captioning pipeline. To begin with, let us download the dataset of image features from a pre-trained GoogleNet. ``` !wget https://www.dropbox.com/s/3hj16b0fj6yw7cc/data.tar.gz?dl=1 -O data.tar.gz !tar -xvzf data.tar.gz ``` ### Data preprocessing ``` %%time # Read Dataset import numpy as np import pickle img_codes = np.load("data/image_codes.npy") captions = pickle.load(open('data/caption_tokens.pcl', 'rb')) print "each image code is a 1000-unit vector:", img_codes.shape print img_codes[0,:10] print '\n\n' print "for each image there are 5-7 descriptions, e.g.:\n" print '\n'.join(captions[0]) #split descriptions into tokens for img_i in range(len(captions)): for caption_i in range(len(captions[img_i])): sentence = captions[img_i][caption_i] captions[img_i][caption_i] = ["#START#"]+sentence.split(' ')+["#END#"] # Build a Vocabulary from collections import Counter word_counts = Counter() <Compute word frequencies for each word in captions. See code above for data structure> vocab = ['#UNK#', '#START#', '#END#'] vocab += [k for k, v in word_counts.items() if v >= 5] n_tokens = len(vocab) assert 10000 <= n_tokens <= 10500 word_to_index = {w: i for i, w in enumerate(vocab)} PAD_ix = -1 UNK_ix = vocab.index('#UNK#') #good old as_matrix for the third time def as_matrix(sequences,max_len=None): max_len = max_len or max(map(len,sequences)) matrix = np.zeros((len(sequences),max_len),dtype='int32')+PAD_ix for i,seq in enumerate(sequences): row_ix = [word_to_index.get(word,UNK_ix) for word in seq[:max_len]] matrix[i,:len(row_ix)] = row_ix return matrix #try it out on several descriptions of a random image as_matrix(captions[1337]) ``` ### Mah Neural Network ``` # network shapes. CNN_FEATURE_SIZE = img_codes.shape[1] EMBED_SIZE = 128 #pls change me if u want LSTM_UNITS = 200 #pls change me if u want import theano import theano.tensor as T # Input Variable sentences = T.imatrix()# [batch_size x time] of word ids image_vectors = T.matrix() # [batch size x unit] of CNN image features sentence_mask = T.neq(sentences,PAD_ix) import lasagne from lasagne.layers import * #network inputs l_words = InputLayer((None,None),sentences ) l_mask = InputLayer((None,None),sentence_mask ) #embeddings for words l_word_embeddings = <apply word embedding. use EMBED_SIZE> #cudos for using some pre-trained embedding :) # input layer for image features l_image_features = InputLayer((None,CNN_FEATURE_SIZE),image_vectors ) #convert 1000 image features from googlenet to whatever LSTM_UNITS you have set #it's also a good idea to add some dropout here and there l_image_features_small = <convert l_image features to a shape equal to rnn hidden state. Also play with dropout/noize> assert l_image_features_small.output_shape == (None,LSTM_UNITS) # Concatinate image features and word embedings in one sequence decoder = a recurrent layer (gru/lstm) with following checklist: # * takes word embeddings as an input # * has LSTM_UNITS units in the final layer # * has cell_init (or hid init for gru) set to converted image features # * mask_input = input_mask # * don't forget the grad clipping (~5-10) #find out better recurrent architectures for bonus point # Decoding of rnn hiden states from broadcast import BroadcastLayer,UnbroadcastLayer #apply whatever comes next to each tick of each example in a batch. Equivalent to 2 reshapes broadcast_decoder_ticks = BroadcastLayer(decoder,(0,1)) print "broadcasted decoder shape = ",broadcast_decoder_ticks.output_shape #predict probabilities for next tokens predicted_probabilities_each_tick = <predict probabilities for each tick, using broadcasted_decoder_shape as an input. No reshaping needed here.> # maybe a more complicated architecture will work better? #un-broadcast back into (batch,tick,probabilities) predicted_probabilities = UnbroadcastLayer(predicted_probabilities_each_tick, broadcast_layer=broadcast_decoder_ticks) print "output shape = ",predicted_probabilities.output_shape #remove if you know what you're doing (e.g. 1d convolutions or fixed shape) assert predicted_probabilities.output_shape == (None, None, 10373) ``` ### Some tricks * If you train large network, it is usually a good idea to make a 2-stage prediction 1. (large recurrent state) -> (bottleneck e.g. 256) 2. (bottleneck) -> (vocabulary size) * this way you won't need to store/train (large_recurrent_state x vocabulary size) matrix * Also maybe use Hierarchical Softmax? * https://gist.github.com/justheuristic/581853c6d6b87eae9669297c2fb1052d ``` next_word_probas = <get network output> predictions_flat = next_word_probas[:,:-1].reshape((-1,n_tokens)) reference_answers = sentences[:,1:].reshape((-1,)) #write symbolic loss function to minimize over NN params loss = <compute elementwise loss function> #trainable NN weights weights = get_all_params(predicted_probabilities,trainable=True) updates = <parameter updates using your favorite algoritm> #compile a functions for training and evaluation #please not that your functions must accept image features as FIRST param and sentences as second one train_step = <function that takes input sentence and image mask, outputs loss and updates weights> val_step = <function that takes input sentence and image mask and outputs loss> #for val_step use deterministic=True if you have any dropout/noize ``` # Training * You first have to implement a batch generator * Than the network will get trained the usual way ``` captions = np.array(captions) from random import choice def generate_batch(images,captions,batch_size,max_caption_len=None): #sample random numbers for image/caption indicies random_image_ix = np.random.randint(0,len(images),size=batch_size) #get images batch_images = images[random_image_ix] #5-7 captions for each image captions_for_batch_images = captions[random_image_ix] #pick 1 from 5-7 captions for each image batch_captions = map(choice,captions_for_batch_images) #convert to matrix batch_captions_ix = as_matrix(batch_captions,max_len=max_caption_len) return batch_images, batch_captions_ix generate_batch(img_codes,captions,3) ``` ### Main loop * We recommend you to periodically evaluate the network using the next "apply trained model" block * its safe to interrupt training, run a few examples and start training again ``` batch_size=50 #adjust me n_epochs=100 #adjust me n_batches_per_epoch = 50 #adjust me n_validation_batches = 5 #how many batches are used for validation after each epoch from tqdm import tqdm for epoch in range(n_epochs): train_loss=0 for _ in tqdm(range(n_batches_per_epoch)): train_loss += train_step(*generate_batch(img_codes,captions,batch_size)) train_loss /= n_batches_per_epoch val_loss=0 for _ in range(n_validation_batches): val_loss += val_step(*generate_batch(img_codes,captions,batch_size)) val_loss /= n_validation_batches print('\nEpoch: {}, train loss: {}, val loss: {}'.format(epoch, train_loss, val_loss)) print("Finish :)") ``` ### apply trained model ``` #the same kind you did last week, but a bit smaller from pretrained_lenet import build_model,preprocess,MEAN_VALUES # build googlenet lenet = build_model() #load weights lenet_weights = pickle.load(open('data/blvc_googlenet.pkl'))['param values'] #python3: pickle.load(open('data/blvc_googlenet.pkl', 'rb'), encoding='latin1')['param values'] set_all_param_values(lenet["prob"], lenet_weights) #compile get_features cnn_input_var = lenet['input'].input_var cnn_feature_layer = lenet['loss3/classifier'] get_cnn_features = theano.function([cnn_input_var], lasagne.layers.get_output(cnn_feature_layer)) from matplotlib import pyplot as plt %matplotlib inline #sample image img = plt.imread('data/Dog-and-Cat.jpg') img = preprocess(img) #deprocess and show, one line :) from pretrained_lenet import MEAN_VALUES plt.imshow(np.transpose((img[0] + MEAN_VALUES)[::-1],[1,2,0]).astype('uint8')) ``` ## Generate caption ``` last_word_probas = <get network-predicted probas at last tick #TRY OUT deterministic=True if you want more steady results get_probs = theano.function([image_vectors,sentences], last_word_probas) #this is exactly the generation function from week5 classwork, #except now we condition on image features instead of words def generate_caption(image,caption_prefix = ("START",),t=1,sample=True,max_len=100): image_features = get_cnn_features(image) caption = list(caption_prefix) for _ in range(max_len): next_word_probs = <obtain probabilities for next words> assert len(next_word_probs.shape) ==1 #must be one-dimensional #apply temperature next_word_probs = next_word_probs**t / np.sum(next_word_probs**t) if sample: next_word = np.random.choice(vocab,p=next_word_probs) else: next_word = vocab[np.argmax(next_word_probs)] caption.append(next_word) if next_word=="#END#": break return caption for i in range(10): print ' '.join(generate_caption(img,t=5.)[1:-1]) ``` # Demo ### Find at least 10 images to test it on. * Seriously, that's part of an assignment. Go get at least 10 pictures to get captioned * Make sure it works okay on __simple__ images before going to something more comples * Photos, not animation/3d/drawings, unless you want to train CNN network on anime * Mind the aspect ratio (see what `preprocess` does to your image) ``` #apply your network on image sample you found # # ``` # grading * base 5 if it compiles and trains without exploding * +1 for finding representative set of reference examples * +2 for providing 10+ examples where network provides reasonable captions (at least sometimes :) ) * you may want to predict with sample=False and deterministic=True for consistent results * kudos for submitting network params that reproduce it * +2 for providing 10+ examples where network fails IF you also got previous 10 examples right * bonus points for experiments with architecture and initialization (see above) * bonus points for trying out other pre-trained nets for captioning * a whole lot of bonus points if you also train via metric learning * image -> vec * caption -> vec (encoder, not decoder) * loss = correct captions must be closer, wrong ones must be farther * prediction = choose caption that is closest to image * a freaking whole lot of points if you also obtain statistically signifficant results the other way round * take caption, get closest image
github_jupyter
# Edge Computing using Tensorflow and Neural Compute Stick ## " Generate piano sounds using EEG capturing rhythmic activity of brain" ### Contents #### 1. Motivation #### 2. Signal acquisition #### 3. Signal postprocessing #### 4. Synthesize music ##### 4.1 Training Data ##### 4.2 Training data preprocessing ##### 4.3 Neural Network architecture ##### 4.4 Training methodology #### 5. Error definition and Further development ### 1. Motivation The following work is inspired by EEG. EEG can be described in terms of rhythmic cortical electrical activity of brain triggered by perceived sensory stimuli , where those rythmic activity falls in certain frequency bands(delta to gamma). In sound engineering, signals with dominant frequencies makes a pitch and sequences of pitches creates rhythm. Combining this concepts intuitively shows, by detecting those dominant frequencies, it is possible to listen to our brain using the signals it generates for different stimuli. Using principles of sound synthesis and sampling along with deep neural networks(DNN), in this project, i made an attempt to extract the rhythm or pitch hidding within brain waves and reproduce it as piano music. ### 2. Signal acquisition: (Not available) EEG/EOG recordings are not available. For the sake of simplicity and bring general working prototype of the model, used some random auto generated signal for test. This is because, the trained DNN is not constrained within brain waves, but to any kind of signal with dominant frequencies. Piano data set available for none commercial use is used during training and evaluation phase. ### 3. Signal Postprocessing (idea proposed) Enough researches proved, "brain waves are rhytmic"[2] and they falls in frequency bandwidth from Delta(<4Hz) to Gamma (>30-100Hz). Human audible frequecy range 20 - 20KHz. Hence, increasing the acquired EEG freuencies by certain constant value and preprocess with sampling rate 44100 makes it resembles piano sounds (fundamental frequency range 27.5 - 4186.01Hz), which itself within human audible range. Later, save the processed brain signals as numpy arrays and convert them as .wav files to reproduce the sound. Using the .wav files to extract brain signal (now sound) informations (frequencies, sampling rate and pitch). In case, we succeed to regenerate the sounds, since we increased the signal frequency by constant (to fit our piano data), the sounds plays faster. Hence we need to reduce the frequency by the same value while replaying the sound that fits the original brain signal. ### 4. Synthesize music #### 4.1 Training data Piano chords dataset available to public for non commercial purposes [3]. Each piano .wav files in the data set are sampled at 44100 and have varying data length. Data is analysed and studied further in detail from the code blocks below. #### 4.2 Training data preprocessing ###### Import required python libraries and add the current working directory to python path and system paths Directory structure <br> <br> Wavenet/ -/dataset (downloaded piano chords) - /UMAPiano-DB-Poly-1/UMAPiano-DB-A0-NO-F.wav -/clipped_data (clipped paino sounds are here) -/wavenet_logs (tensorflow checkpoints and logs) ``` %matplotlib inline from __future__ import division import numpy as np import tensorflow as tf import scipy.io import matplotlib import matplotlib.pyplot as plt import os import sys import random import scipy.io.wavfile import scipy matplotlib.rcParams['figure.figsize'] = (8.0, 6.0) #-------------------------------------Add working directory to path----------------------------------------------- cwd = os.getcwd() sys.path.append(cwd) sys.path.insert(0,'E:/!CogSci/!!!WS2017/Edge_computing/Wavenet') sys.path.insert(0,'C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/dataset') sys.path.insert(0,'E:/!CogSci/!!!WS2017/Edge_computing/Wavenet/clipped_data') # Save the variables in a log/directory during training save_path = "C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/wavenet_logs" if not os.path.exists(save_path): os.makedirs(save_path) ``` Each piano file from the dataset is approximately 1-2 seconds in length. We used the scipy to read each music file and get their sampling rate and data as array and found that all audio files has sampling rate 44100 and the data length varies based on length of audio. To train DNN, we need all training data with same length and increase the sampling rate to prevent signal loss/corruption. Below code shows the acquisition of first information about the piano dataset. ``` # Location of the wav file in the file system. fileName1 = 'E:/!CogSci/!!!WS2017/Edge_computing/Wavenet/dataset/UMAPiano-DB-Poly-1/UMAPiano-DB-A0-NO-F.wav' fileName2 = 'E:/!CogSci/!!!WS2017/Edge_computing/Wavenet/dataset/UMAPiano-DB-Poly-1/UMAPiano-DB-A0-NO-M.wav' # Loads sample rate (bps) and signal data (wav). sample_rate1, data1 = scipy.io.wavfile.read(fileName1) sample_rate2, data2 = scipy.io.wavfile.read(fileName2) # Print in sdout the sample rate, number of items and duration in seconds of the wav file print("Sample rate1 %s data size1 %s duration1: %s seconds"%(sample_rate1,data1.shape,len(data1)/sample_rate1)) print("Sample rate2 %s data size2 %s duration2: %s seconds"%(sample_rate2,data2.shape,len(data2)/sample_rate2)) print("DATA SIZES ARE DIFFERENT NEEDS TO BE CONSIDERED") # Plot the wave file and get insight about the sample. Here we test first 100 samples of the wav file plt.plot(data1) plt.plot(data2) plt.show() ``` Looking at the plot above, it is clear that there is no signal informations at the head and tail of the piano data. We can clip them safely and that reduces computation and memory resources. Also, i changed all the data file names with numbers for convenient. Later, i checked the files with shortest and longest length to fix varying length problem in the code block below. ``` """ dataset_path = 'E:/!CogSci/!!!WS2017/Edge_computing/Wavenet/dataset/UMAPiano-DB-Poly-1' dir_list_len = len(os.listdir(dataset_path)) print("Number of files in the Dataset ",dir_list_len) # Change file names to be easily recognized def change_filenames(dataset_path): i = 0 # Counter and target filename for old_name in os.listdir(dataset_path): # os.rename(dataset_path + "/" + old_name, dataset_path + "/" + str(i) + '.wav') os.rename(os.path.join(dataset_path, old_name), os.path.join(dataset_path, str(i) + '.wav')) i+=1 change_filenames(dataset_new) list_sizes_new =[] for data_new in os.listdir(dataset_new): _,data_new = scipy.io.wavfile.read(dataset_new+'/'+data_new) list_sizes_new.append(data_new.shape[0]) print("Maximum size %s and the music file is",np.argmax(list_sizes_new)) """ dataset_new = 'C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/dataset' list_sizes =[] for datas in os.listdir(dataset_new): _,data_new = scipy.io.wavfile.read(os.path.join(dataset_new,datas)) list_sizes.append(data_new.shape[0]) if data_new.shape[0]== 39224: print("Minimum sized file is",datas) if data_new.shape[0] == 181718: print("Max sized file is",datas) print("Maximum size %s "%(max(list_sizes))) print("Minimum size %s "%(min(list_sizes))) print("Dataset is in C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/dataset and all the files are numbered") # -------------------------Get some insights and information about the max and min sized data----------------------------- # Location of the wav file in the file system. fileName3 = 'C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/dataset/356.wav' fileName4 = 'C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/dataset/722.wav' # Loads sample rate (bps) and signal data (wav). sample_rate3, data3 = scipy.io.wavfile.read(fileName3) sample_rate4, data4 = scipy.io.wavfile.read(fileName4) # Print in sdout the sample rate, number of items and duration in seconds of the wav file print("Sample rate3 %s data size3 %s duration3: %s seconds"%(sample_rate3,data3.shape,len(data3)/sample_rate3)) print("Sample rate4 %s data size4 %s duration4: %s seconds"%(sample_rate4,data4.shape,len(data4)/sample_rate4)) print("Data sizes are different") # Plot the wave file and get insight about the sample. Here we test first 100 samples of the wav file plt.plot(data4) plt.show() print("Safe to clip first 10000 sample points out from the array and convert them back to .wav file") ``` As we can see that even the smallest piano file has 20k values of zeros at head and tail combined. Hence it is safe to clip the first and last 10k indices from all files and save them back to .wav files. We can also add small amount of noise to the training data at this step using the code below. We will discuss the reason later briefly. ``` #----------------------- .WAV training data preprocessing steps ---------------------- import IPython # Clip the first and last 10000 values which doesn't show any informations """ def clip_write_wav(dataset_path): i = 0 # Counter and target filename for datas in os.listdir(dataset_path): _,data = scipy.io.wavfile.read(dataset_path+'/'+datas) data= data[:-10000] # Slice out last 10000 elements in data data= data[10000:] # Slice out first 10000 elements in the data #IF ADD NOISE DO it here in the data which is an array. scipy.io.wavfile.write('C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/clipped_data/%i.wav'%i, 44100, data) i+=1 """ _dataset = 'C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/dataset' _target = 'C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/clipped_data' clip_points = 10000 _sampling_rate = 44100 # clip_write_wav(_dataset) # Uncomment this line to clip and write the wav files again # Verify required informations again sample_rate3, data3 = scipy.io.wavfile.read('C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/clipped_data/3.wav') print("Sample rate %s data size %s duration: %s seconds"%(sample_rate3,data3.shape,len(data3)/sample_rate3)) plt.plot(data3) plt.show() #Play the audio inline IPython.display.Audio('C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/clipped_data/3.wav') ``` The data are clipped and they have shorter neck and tail now. Now we will increase the sampling rate (using "write_wav" function below) and fix the varying length in data by choosing the data with longest length as reference and zero padd other data until their length matches the length of the largest file done while feeding DNN using "get_training_data" function below . <br> But the scipy read module doesn't preserve the indices of the files in the dataset, as we can see that the largest and smallest file names from code block above and below are different. So, i hard coded the size of smallest and largest and search for the corresponding files. ``` # ------------- Search for the largest and smallest files -------------- _dataset_new = 'C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/clipped_data' _list_sizes =[] for datas in os.listdir(_dataset_new): _,_data_new = scipy.io.wavfile.read(os.path.join(_dataset_new,datas)) _list_sizes.append(_data_new.shape[0]) if _data_new.shape[0]== 19224: print("Minimum sized file is",datas) if _data_new.shape[0] == 161718: print("Max sized file is",datas) print("Maximum size %s "%(max(_list_sizes))) print("Minimum size %s "%(min(_list_sizes))) print("Notice that io read and write doesnt preserve the index of files in the directory") # ------------------------ Upsample the data ----------------------------- """ def write_wav(dataset_path): i=0 for datas in os.listdir(dataset_path): _,data = scipy.io.wavfile.read(dataset_path+'/'+datas) #IF ADD NOISE DO it here in the data which is an array. scipy.io.wavfile.write('C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/upsampled_data/%i.wav'%i, 88000, data) i+=1 write_wav(_dataset_new) """ # ----------------- Verifying data integrity again ----------------------- sampled_datapath ='C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/upsampled_data' _list_sizes =[] for datas in os.listdir(sampled_datapath): sampling_rate,_data_new = scipy.io.wavfile.read(os.path.join(sampled_datapath,datas)) _list_sizes.append(_data_new.shape[0]) if _data_new.shape[0]== 19224: print("Minimum sized file is %s and sampling rate"%datas,sampling_rate) elif _data_new.shape[0] == 161718: print("Max sized file is %s and sampling rate"%datas,sampling_rate) print("Maximum size %s "%(max(_list_sizes))) print("Minimum size %s "%(min(_list_sizes))) # Verify required informations again sample_rate5, data5 = scipy.io.wavfile.read('C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/upsampled_data/3.wav') print("Sample rate %s data size %s duration: %s seconds"%(sample_rate5,data5.shape,len(data5)/sample_rate5)) plt.plot(data5) plt.show() #Play the audio inline IPython.display.Audio('C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/clipped_data/3.wav') ``` Since, we use stacks of CNN in the encoder, i decided to convert the data as matrix of size 512*512 for which we need each file to have 262144 entries. So, instead of using largest file as reference, i opted 262144 as a length limit for all files. Function "get_training_data" serve this purpose for us. ``` # Each audio file should have 262144 entries. Extend them all with zeros in the tail # Convert all audio files as matrices of 512x512 shape def get_training_data(dataset_path): training_data = [] for datas in os.listdir(dataset_path): _,data = scipy.io.wavfile.read(dataset_path+'/'+datas) # Add Zeros at the tail until 262144 temp_zeros = [0]*262144 temp_zeros[:len(data)] = data # Slice temp_zeros and add the data into the slice # Reshape the data as square matrix of 512*512 of size 262144 data_ = np.reshape(temp_zeros,(512,512)) training_data.append(data_) return training_data training_data = get_training_data('C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/upsampled_data') print(training_data[0].shape) # Expand the dims # The third dimension represents number of channels for i in range(len(training_data)): training_data[i] = training_data[i][:,:,np.newaxis] print(training_data[0].shape) ``` The training data is ready to be fed into the network. But we still require the pitch info about each training data, since the network architecture we use require them while training. Class "HarmonicPowerSpectrum" and the nesxt two code blocks are bandpass filtering the signal that ease pitch detection. ``` # Get pitch of corresponding data """ Steps to extract the pitches of input signal: Reference: https://stackoverflow.com/questions/43946112/slicing-audio-signal-to-detect-pitch 1. Detect the fundamental frequencies "f0 estimation" (For piano, lowest freq - 27.5 and highest - 4186.01 Hz) 2. Get ride of garbage transients and low frequency noise using bandpass filter 3. After filtering do the peak detection using fft to find the pitches """ # 1. Fundamental frequencies [27.5,4186.01] Hz # 2. Build bandpass fileter from scipy.signal import butter, lfilter def butter_bandpass(f0, fs, order): """Give the Sampling freq(fs),Bandpass window(f0) of filter, build the bandpass filter""" nyq = 0.5 * fs low = f0[0] / nyq high = f0[1] / nyq b, a = butter(order, [low, high], btype='band') # Numerator (b) and denominator (a) polynomials of the IIR filter return b, a def butter_bandpass_filter(sig, f0, fs, order): """ Apply bandpass filter to the given signal""" b, a = butter_bandpass(f0, fs,order) y = lfilter(b, a, sig) # Apply the filter to the signal return y # Verify filter signal sig = data5 f0= (27.5, 4186.01) # Fundamental freq of piano fs = sample_rate5 # sampling rate of .wav files in the preprocessed training dataset order = 1 b, a = butter_bandpass(f0, fs, order=1) # Numerator (b) and denominator (a) polynomials of the IIR filter filtered_sig= butter_bandpass_filter(sig, f0,fs,order=1) # Plot some range of samples from both raw signal and bandpass fitered signal. plt.plot(sig[10000:10500], label='training signal') plt.plot(filtered_sig[10000:10500], label='Bandpass filtered signal with order %d'% order) plt.legend(loc='upper left') # orders = [1,2,3,4,5] # for order in orders: # filtered_sig= butter_bandpass_filter(sig, f0,fs,order) # Bandpass filtered signal # plt.plot(data5[10000:10500], label='training signal') # plt.plot(filtered_sig[10000:10500], label='Bandpass filtered signal with order %d'% order) # plt.legend(loc='upper left') print("Bandpass filter with order 1 looks okay. We do not want to loose much informations in the data by filter it with higher orders") # Reference :https://github.com/pydanny/pydanny-event-notes/blob/master/Pycon2008/intro_to_numpy/files/pycon_demos/windowed_fft/short_time_fft_solution.py # Get frequency components of the data using Short time fourier transform from scipy.fftpack import fft, fftfreq, fftshift from scipy.signal import get_window from math import ceil from pylab import figure, imshow, clf, gray, xlabel, ylabel sig = data5 f0= (27.5, 4186.01) # Fundamental freq of piano fs = sample_rate5 # sampling rate of .wav files in the preprocessed training dataset def freq_comp(signal,sample_rate): # Define the sample spacing and window size. dT = 1.0/sample_rate T_window = 50e-3 # 50ms ; window time frame N_window = int(T_window * sample_rate) # 440 N_data = len(signal) # 1. Get the window profile window = get_window('hamming', N_window) # Multiply the segments of data using hamming window func # 2. Set up the FFT result = [] start = 0 while (start < N_data - N_window): end = start + N_window result.append(fftshift(fft(window*signal[start:end]))) start = end result.append(fftshift(fft(window*signal[-N_window:]))) result = np.array(result,result[0].dtype) return result freq_comp_unfiltered = freq_comp(sig,fs) freq_comp_filtered = freq_comp(filtered_sig,fs) plt.figure(1) plt.plot(freq_comp_unfiltered) plt.title("Unfiltered Frequency componenets of the training signal") plt.show() plt.figure(2) plt.plot(freq_comp_filtered) plt.title("Filtered frequency component of the training signal") plt.show() # # Display results # freqscale = fftshift(fftfreq(N_window,dT))[150:-150]/1e3 # figure(1) # clf() # imshow(abs(result[:,150:-150]),extent=(freqscale[-1],freqscale[0],(N_data*dT-T_window/2.0),T_window/2.0)) # xlabel('Frequency (kHz)') # ylabel('Time (sec.)') # gray() # Reference: http://musicweb.ucsd.edu/~trsmyth/analysis/Harmonic_Product_Spectrum.html # Get the fundamental frequency(peak frequency) of the training data import parabolic from pylab import subplot, plot, log, copy, show # def hps(sig,fs,maxharms): # """ # Estimate peak frequency using harmonic product spectrum (HPS) # """ # window = sig * scipy.signal.blackmanharris(len(sig)) # # Harmonic product spectrum: Measures the maximum coincidence for harmonics for each spectral frame # c = abs(np.fft.rfft(window)) # Compute the one-dimensional discrete Fourier Transform for real input. # plt.plot(c) # plt.title("Discrete fourier transform of signal") # plt.figure() # pitch = np.log(c) # plt.plot(pitch) # plt.title("Max Harmonics for the range same as fundamental frequencies") # # Search for a maximum value of a range of possible fundamental frequencies # # for x in range(2, maxharms): # # a = copy(c[::x]) # Should average or maximum instead of decimating # # c = c[:len(a)] # # i = np.argmax(abs(c)) # # c *= a # # plt.title("Max Harmonics for the range of %d times the fundamental frequencies"%x) # # plt.plot(maxharms, x) # # plt.plot(np.log(c)) # # show() # hps(butter_bandpass_filter(sig,f0, fs,order = 1),fs,maxharms=0) # print(" As usual we opt to choose the same range as fundamental frequecies to make sure we dont loss much informations") # Wrap them all in one class HarmonicPowerSpectrum class HarmonicPowerSpectrum(object): def __init__(self,sig,f0,fs,order,maxharms): self.sig = sig self.f0 = f0 self.fs = fs self.order = order self.maxharms = maxharms @property def butter_bandpass(self): """Give the Sampling freq(fs),Bandpass window(f0) of filter, build the bandpass filter""" nyq = 0.5 * fs # Nyquist frequency low = self.f0[0] / nyq high = self.f0[1] / nyq b, a = butter(self.order, [low, high], btype='band') # Numerator (b) and denominator (a) polynomials of the IIR filter return b, a @property def butter_bandpass_filter(self): """ Apply bandpass filter to the given signal""" b, a = self.butter_bandpass y = lfilter(b, a, self.sig) # Apply the filter to the signal return y @property def hps(self): """Estimate peak frequency using harmonic product spectrum (HPS)""" y = self.butter_bandpass_filter window = y * scipy.signal.blackmanharris(len(y)) #Create window to search harmonics in signal slices # Harmonic product spectrum: Measures the maximum coincidence for harmonics for each spectral frame c = abs(np.fft.rfft(window)) # Compute the one-dimensional discrete Fourier Transform for real input. z = np.log(c) # Fundamental frequency or pitch of the given signal return z z = HarmonicPowerSpectrum(sig, f0, fs, order = 1,maxharms=0) harm_pow_spec = z.hps plt.figure(1) plt.plot(harm_pow_spec) plt.title("Max Harmonics for the range same as fundamental frequencies Bp filtered in Order 0 and max harmonic psectum 0") freq_comp_hps = freq_comp(harm_pow_spec,fs) plt.figure(2) plt.plot(freq_comp_hps) plt.title("""Frequency components(in logarithmix scale) of harmonic spectrum of filtered training data. A harmonic set of two pitches contributing significantly to this piano chord""") plt.show() ``` Hence, i updated the get_training_data function to perform pitch detection using the HarmonicPowerSpectrum analyser as seen below. ``` # Each audio file should have 262144 entries. Extend them all with zeros in the tail # Convert all audio files as matrices of 512x512 shape def get_training_data(dataset_path, f0, fs, order = 1,maxharms=0): training_data = [] pitch_data = [] for datas in os.listdir(dataset_path): _,data = scipy.io.wavfile.read(dataset_path+'/'+datas) # Add Zeros at the tail until 162409 temp_zeros_data = [0]*262144 # print("Unpadded data len",len(data)) # print(len(temp_zeros)) temp_zeros_data[:len(data)] = data # Slice temp_zeros and add the data into the slice # print("Padded data len",len(temp_zeros)) # print(np.shape(temp_zeros)) # Reshape the data as square matrix of 403*403 of size 162409 data_ = np.reshape(temp_zeros_data,(512,512)) # Get pitch of the signal z = HarmonicPowerSpectrum(temp_zeros_data, f0, fs, order = 1,maxharms=0) harm_pow_spec = z.hps training_data.append(data_) pitch_data.append(harm_pow_spec) return training_data,pitch_data training_data,pitch_data = get_training_data('C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/upsampled_data',f0, fs, order = 1,maxharms=0) print(training_data[0].shape) # Expand the dims # The third dimension represents number of channels for i in range(len(training_data)): training_data[i] = training_data[i][:,:,np.newaxis] print(training_data[0].shape) ```
github_jupyter
# Customizing and controlling xclim xclim's behaviour can be controlled globally or contextually through `xclim.set_options`, which acts the same way as `xarray.set_options`. For the extension of xclim with the addition of indicators, see the [Extending xclim](extendxclim.ipynb) notebook. ``` import xarray as xr import xclim from xclim.testing import open_dataset ``` Let's create fake data with some missing values and mask every 10th, 20th and 30th of the month.This represents 9.6-10% of masked data for all months except February where it is 7.1%. ``` tasmax = ( xr.tutorial.open_dataset("air_temperature") .air.resample(time="D") .max(keep_attrs=True) ) tasmax = tasmax.where(tasmax.time.dt.day % 10 != 0) ``` ## Checks Above, we created fake temperature data from a xarray tutorial dataset that doesn't have all the standard CF attributes. By default, when triggering a computation with an Indicator from xclim, warnings will be raised: ``` tx_mean = xclim.atmos.tx_mean(tasmax=tasmax, freq="MS") # compute monthly max tasmax ``` Setting `cf_compliance` to `'log'` mutes those warnings and sends them to the log instead. ``` xclim.set_options(cf_compliance="log") tx_mean = xclim.atmos.tx_mean(tasmax=tasmax, freq="MS") # compute monthly max tasmax ``` ## Missing values For example, one can globally change the missing method. Change the default missing method to "pct" and set its tolerance to 8%: ``` xclim.set_options(check_missing="pct", missing_options={"pct": {"tolerance": 0.08}}) tx_mean = xclim.atmos.tx_mean(tasmax=tasmax, freq="MS") # compute monthly max tasmax tx_mean.sel(time="2013", lat=75, lon=200) ``` Only February has non-masked data. Let's say we want to use the "wmo" method (and its default options), but only once, we can do: ``` with xclim.set_options(check_missing="wmo"): tx_mean = xclim.atmos.tx_mean( tasmax=tasmax, freq="MS" ) # compute monthly max tasmax tx_mean.sel(time="2013", lat=75, lon=200) ``` This method checks that there is less than `nm=5` invalid values in a month and that there are no consecutive runs of `nc>=4` invalid values. Thus, every month is now valid. Finally, it is possible for advanced users to register their own method. Xclim's missing methods are in fact based on class instances. Thus, to create a custom missing class, one should implement a subclass based on `xclim.core.checks.MissingBase` and overriding at least the `is_missing` method. The method should take a `null` argument and a `count` argument. - `null` is a `DataArrayResample` instance of the resampled mask of invalid values in the input dataarray. - `count` is the number of days in each resampled periods and any number of other keyword arguments. The `is_missing` method should return a boolean mask, at the same frequency as the indicator output (same as `count`), where True values are for elements that are considered missing and masked on the output. When registering the class with the `xclim.core.checks.register_missing_method` decorator, the keyword arguments will be registered as options for the missing method. One can also implement a `validate` static method that receives only those options and returns whether they should be considered valid or not. ``` from xclim.core.missing import register_missing_method from xclim.core.missing import MissingBase from xclim.indices.run_length import longest_run @register_missing_method("consecutive") class MissingConsecutive(MissingBase): """Any period with more than max_n consecutive missing values is considered invalid""" def is_missing(self, null, count, max_n=5): return null.map(longest_run, dim="time") >= max_n @staticmethod def validate(max_n): return max_n > 0 ``` The new method is now accessible and usable with: ``` with xclim.set_options( check_missing="consecutive", missing_options={"consecutive": {"max_n": 2}} ): tx_mean = xclim.atmos.tx_mean( tasmax=tasmax, freq="MS" ) # compute monthly max tasmax tx_mean.sel(time="2013", lat=75, lon=200) ```
github_jupyter
``` # Import plotting modules import matplotlib.pyplot as plt import seaborn as sns import numpy as np df = [4.7, 4.5, 4.9, 4.0, 4.6, 4.5, 4.7, 3.3, 4.6, 3.9, 3.5, 4.2, 4.0, 4.7, 3.6, 4.4, 4.5, 4.1, 4.5, 3.9, 4.8, 4.0, 4.9, 4.7, 4.3, 4.4, 4.8, 5.0, 4.5, 3.5, 3.8, 3.7, 3.9, 5.1, 4.5, 4.5, 4.7, 4.4, 4.1, 4.0, 4.4, 4.6, 4.0, 3.3, 4.2, 4.2, 4.2, 4.3, 3.0, 4.1] versicolor_petal_length = np.array(df) # Set default Seaborn style sns.set() # Plot histogram of versicolor petal lengths _ = plt.hist(versicolor_petal_length, ec='white') # Show histogram plt.show() # Plot histogram of versicolor petal lengths _ = plt.hist(versicolor_petal_length, ec='black') # Label axes _ = plt.xlabel('petal length (cm)') _ = plt.ylabel('count') # Show histogram plt.show() # Import numpy import numpy as np # Compute number of data points: n_data n_data = len(versicolor_petal_length) print(n_data) # Number of bins is the square root of number of data points: n_bins n_bins = np.sqrt(n_data) print(n_bins) # Convert number of bins to integer: n_bins n_bins = int(n_bins) # Plot the histogram _ = plt.hist(versicolor_petal_length, bins=n_bins, ec='black') # Label axes _ = plt.xlabel('petal length (cm)') _ = plt.ylabel('count') # Show histogram plt.show() import pandas as pd sepal1 = [5.1, 4.9, 4.7, 4.6, 5.0, 5.4, 4.6, 5.0, 4.4, 4.9, 5.4, 4.8, 4.8, 4.3, 5.8, 5.7, 5.4, 5.1, 5.7, 5.1, 5.4, 5.1, 4.6, 5.1, 4.8, 5.0, 5.0, 5.2, 5.2, 4.7, 4.8, 5.4, 5.2, 5.5, 4.9, 5.0, 5.5, 4.9, 4.4, 5.1, 5.0, 4.5, 4.4, 5.0, 5.1, 4.8, 5.1, 4.6, 5.3, 5.0, 7.0, 6.4, 6.9, 5.5, 6.5, 5.7, 6.3, 4.9, 6.6, 5.2, 5.0, 5.9, 6.0, 6.1, 5.6, 6.7, 5.6, 5.8, 6.2, 5.6, 5.9, 6.1, 6.3, 6.1, 6.4, 6.6, 6.8, 6.7, 6.0, 5.7, 5.5, 5.5, 5.8, 6.0, 5.4, 6.0, 6.7, 6.3, 5.6, 5.5, 5.5, 6.1, 5.8, 5.0, 5.6, 5.7, 5.7, 6.2, 5.1, 5.7, 6.3, 5.8, 7.1, 6.3, 6.5, 7.6, 4.9, 7.3, 6.7, 7.2, 6.5, 6.4, 6.8, 5.7, 5.8, 6.4, 6.5, 7.7, 7.7, 6.0, 6.9, 5.6, 7.7, 6.3, 6.7, 7.2, 6.2, 6.1, 6.4, 7.2, 7.4, 7.9, 6.4, 6.3, 6.1, 7.7, 6.3, 6.4, 6.0, 6.9, 6.7, 6.9, 5.8, 6.8, 6.7, 6.7, 6.3, 6.5, 6.2, 5.9] sepal2 = [3.5, 3.0, 3.2, 3.1, 3.6, 3.9, 3.4, 3.4, 2.9, 3.1, 3.7, 3.4, 3.0, 3.0, 4.0, 4.4, 3.9, 3.5, 3.8, 3.8, 3.4, 3.7, 3.6, 3.3, 3.4, 3.0, 3.4, 3.5, 3.4, 3.2, 3.1, 3.4, 4.1, 4.2, 3.1, 3.2, 3.5, 3.1, 3.0, 3.4, 3.5, 2.3, 3.2, 3.5, 3.8, 3.0, 3.8, 3.2, 3.7, 3.3, 3.2, 3.2, 3.1, 2.3, 2.8, 2.8, 3.3, 2.4, 2.9, 2.7, 2.0, 3.0, 2.2, 2.9, 2.9, 3.1, 3.0, 2.7, 2.2, 2.5, 3.2, 2.8, 2.5, 2.8, 2.9, 3.0, 2.8, 3.0, 2.9, 2.6, 2.4, 2.4, 2.7, 2.7, 3.0, 3.4, 3.1, 2.3, 3.0, 2.5, 2.6, 3.0, 2.6, 2.3, 2.7, 3.0, 2.9, 2.9, 2.5, 2.8, 3.3, 2.7, 3.0, 2.9, 3.0, 3.0, 2.5, 2.9, 2.5, 3.6, 3.2, 2.7, 3.0, 2.5, 2.8, 3.2, 3.0, 3.8, 2.6, 2.2, 3.2, 2.8, 2.8, 2.7, 3.3, 3.2, 2.8, 3.0, 2.8, 3.0, 2.8, 3.8, 2.8, 2.8, 2.6, 3.0, 3.4, 3.1, 3.0, 3.1, 3.1, 3.1, 2.7, 3.2, 3.3, 3.0, 2.5, 3.0, 3.4, 3.0] petal = [1.4, 1.4, 1.3, 1.5, 1.4, 1.7, 1.4, 1.5, 1.4, 1.5, 1.5, 1.6, 1.4, 1.1, 1.2, 1.5, 1.3, 1.4, 1.7, 1.5, 1.7, 1.5, 1.0, 1.7, 1.9, 1.6, 1.6, 1.5, 1.4, 1.6, 1.6, 1.5, 1.5, 1.4, 1.5, 1.2, 1.3, 1.5, 1.3, 1.5, 1.3, 1.3, 1.3, 1.6, 1.9, 1.4, 1.6, 1.4, 1.5, 1.4, 4.7, 4.5, 4.9, 4.0, 4.6, 4.5, 4.7, 3.3, 4.6, 3.9, 3.5, 4.2, 4.0, 4.7, 3.6, 4.4, 4.5, 4.1, 4.5, 3.9, 4.8, 4.0, 4.9, 4.7, 4.3, 4.4, 4.8, 5.0, 4.5, 3.5, 3.8, 3.7, 3.9, 5.1, 4.5, 4.5, 4.7, 4.4, 4.1, 4.0, 4.4, 4.6, 4.0, 3.3, 4.2, 4.2, 4.2, 4.3, 3.0, 4.1, 6.0, 5.1, 5.9, 5.6, 5.8, 6.6, 4.5, 6.3, 5.8, 6.1, 5.1, 5.3, 5.5, 5.0, 5.1, 5.3, 5.5, 6.7, 6.9, 5.0, 5.7, 4.9, 6.7, 4.9, 5.7, 6.0, 4.8, 4.9, 5.6, 5.8, 6.1, 6.4, 5.6, 5.1, 5.6, 6.1, 5.6, 5.5, 4.8, 5.4, 5.6, 5.1, 5.1, 5.9, 5.7, 5.2, 5.0, 5.2, 5.4, 5.1] petal2 = [0.2, 0.2, 0.2, 0.2, 0.2, 0.4, 0.3, 0.2, 0.2, 0.1, 0.2, 0.2, 0.1, 0.1, 0.2, 0.4, 0.4, 0.3, 0.3, 0.3, 0.2, 0.4, 0.2, 0.5, 0.2, 0.2, 0.4, 0.2, 0.2, 0.2, 0.2, 0.4, 0.1, 0.2, 0.1, 0.2, 0.2, 0.1, 0.2, 0.2, 0.3, 0.3, 0.2, 0.6, 0.4, 0.3, 0.2, 0.2, 0.2, 0.2, 1.4, 1.5, 1.5, 1.3, 1.5, 1.3, 1.6, 1.0, 1.3, 1.4, 1.0, 1.5, 1.0, 1.4, 1.3, 1.4, 1.5, 1.0, 1.5, 1.1, 1.8, 1.3, 1.5, 1.2, 1.3, 1.4, 1.4, 1.7, 1.5, 1.0, 1.1, 1.0, 1.2, 1.6, 1.5, 1.6, 1.5, 1.3, 1.3, 1.3, 1.2, 1.4, 1.2, 1.0, 1.3, 1.2, 1.3, 1.3, 1.1, 1.3, 2.5, 1.9, 2.1, 1.8, 2.2, 2.1, 1.7, 1.8, 1.8, 2.5, 2.0, 1.9, 2.1, 2.0, 2.4, 2.3, 1.8, 2.2, 2.3, 1.5, 2.3, 2.0, 2.0, 1.8, 2.1, 1.8, 1.8, 1.8, 2.1, 1.6, 1.9, 2.0, 2.2, 1.5, 1.4, 2.3, 2.4, 1.8, 1.8, 2.1, 2.4, 2.3, 1.9, 2.3, 2.5, 2.3, 1.9, 2.0, 2.3, 1.8] species = ['setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica'] df = list(zip(sepal1, sepal2, petal, petal2, species)) df = pd.DataFrame(df) df.columns = ['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)', 'species'] # Create bee swarm plot with Seaborn's default settings _ = sns.swarmplot(x='species', y='petal length (cm)', data=df) # Label the axes _ = plt.xlabel('species') _ = plt.ylabel('petal length (cm)') # Show the plot plt.show() def ecdf(data): """Compute ECDF for a one-dimensional array of measurements.""" # Number of data points: n n = len(data) # x-data for the ECDF: x x = np.sort(data) # y-data for the ECDF: y y = np.arange(1, n+1) / n return x, y # Compute ECDF for versicolor data: x_vers, y_vers x_vers, y_vers = ecdf(versicolor_petal_length) # Generate plot _ = plt.plot(x_vers, y_vers, marker='.', linestyle='none') # Label the axes _ = plt.xlabel('petal length (cm)') _ = plt.ylabel('ECDF') # Display the plot plt.show() # Plotting the ECDF # You will now use your ecdf() function to compute the ECDF for the # petal lengths of Anderson's Iris versicolor flowers. You will then plot the ECDF. # Recall that your ecdf() function returns two arrays # so you will need to unpack them. An example of such unpacking is x, y = foo(data), for some function foo(). setosa_petal_length = [1.4, 1.4, 1.3, 1.5, 1.4, 1.7, 1.4, 1.5, 1.4, 1.5, 1.5, 1.6, 1.4, 1.1, 1.2, 1.5, 1.3, 1.4, 1.7, 1.5, 1.7, 1.5, 1. , 1.7, 1.9, 1.6, 1.6, 1.5, 1.4, 1.6, 1.6, 1.5, 1.5, 1.4, 1.5, 1.2, 1.3, 1.5, 1.3, 1.5, 1.3, 1.3, 1.3, 1.6, 1.9, 1.4, 1.6, 1.4, 1.5, 1.4] versicolor_petal_length = [4.7, 4.5, 4.9, 4.0, 4.6, 4.5, 4.7, 3.3, 4.6, 3.9, 3.5, 4.2, 4.0, 4.7, 3.6, 4.4, 4.5, 4.1, 4.5, 3.9, 4.8, 4.0, 4.9, 4.7, 4.3, 4.4, 4.8, 5.0, 4.5, 3.5, 3.8, 3.7, 3.9, 5.1, 4.5, 4.5, 4.7, 4.4, 4.1, 4.0, 4.4, 4.6, 4.0, 3.3, 4.2, 4.2, 4.2, 4.3, 3.0, 4.1] virginica_petal_length = [6. , 5.1, 5.9, 5.6, 5.8, 6.6, 4.5, 6.3, 5.8, 6.1, 5.1, 5.3, 5.5, 5. , 5.1, 5.3, 5.5, 6.7, 6.9, 5. , 5.7, 4.9, 6.7, 4.9, 5.7, 6. , 4.8, 4.9, 5.6, 5.8, 6.1, 6.4, 5.6, 5.1, 5.6, 6.1, 5.6, 5.5, 4.8, 5.4, 5.6, 5.1, 5.1, 5.9, 5.7, 5.2, 5. , 5.2, 5.4, 5.1] setosa_petal_length = np.array(setosa_petal_length) # do this for the other 2 .............................. # Compute ECDFs x_set, y_set = ecdf(setosa_petal_length) x_vers, y_vers = ecdf(versicolor_petal_length) x_virg, y_virg = ecdf(virginica_petal_length) # Plot all ECDFs on the same plot _ = plt.plot(x_set, y_set, marker='.', linestyle='none') _ = plt.plot(x_vers, y_vers, marker='.', linestyle='none') _ = plt.plot(x_virg, y_virg, marker='.', linestyle='none') # Annotate the plot _ = plt.legend(('setosa', 'versicolor', 'virginica'), loc='lower right') _ = plt.xlabel('petal length (cm)') _ = plt.ylabel('ECDF') # Display the plot plt.show() # ECDFs also allow you to compare two or more distributions # (though plots get cluttered if you have too many). Here, you will plot ECDFs # for the petal lengths of all three iris species. # You already wrote a function to generate ECDFs so you can put it to good use! # Compute the mean mean_length_vers = np.mean(versicolor_petal_length) # Print the results with some nice formatting print('I. versicolor:', mean_length_vers, 'cm') # The mean of all measurements gives an indication of # the typical magnitude of a measurement. It is computed using np.mean(). # Specify array of percentiles: percentiles percentiles = np.array([2.5, 25, 50, 75, 97.5]) # Compute percentiles: ptiles_vers ptiles_vers = np.percentile(versicolor_petal_length, percentiles) # Print the result print(ptiles_vers) # In this exercise, you will compute the percentiles of petal length of Iris versicolor. # Plot the ECDF _ = plt.plot(x_vers, y_vers, '.') _ = plt.xlabel('petal length (cm)') _ = plt.ylabel('ECDF') # Overlay percentiles as red x's _ = plt.plot(ptiles_vers, percentiles/100, marker='D', color='red', linestyle='none') # Show the plot plt.show() # To see how the percentiles relate to the ECDF, you will plot the percentiles of # Iris versicolor petal lengths you calculated in the last exercise on the ECDF plot you generated in chapter 1. # The percentile variables from the previous exercise are available in the workspace as ptiles_vers and percentiles. # Create box plot with Seaborn's default settings _ = sns.boxplot(x='species', y='petal length (cm)', data=df) # Label the axes _ = plt.xlabel('species') _ = plt.ylabel('petal length (cm)') # Show the plot plt.show() # Making a box plot for the petal lengths is unnecessary because the iris data set is # not too large and the bee swarm plot works fine. However, it is always good to get some practice. # Standard deviation is a reasonable metric for the typical spread of the data # Array of differences to mean: differences differences = versicolor_petal_length - np.mean(versicolor_petal_length) # Square the differences: diff_sq diff_sq = differences**2 # Compute the mean square difference: variance_explicit variance_explicit = np.mean(diff_sq) # Compute the variance using NumPy: variance_np variance_np = np.var(versicolor_petal_length) # Print the results print(variance_explicit, variance_np) # Compute the variance: variance variance = np.var(versicolor_petal_length) # Print the square root of the variance print(np.sqrt(variance)) # Print the standard deviation print(np.std(versicolor_petal_length)) # the standard deviation is the square root of the variance # the variance is how far a set of random data points are spread out from the mean # The variance measures how far each number in the set is from the mean # A low standard deviation means that most of the numbers are very close to the average. # A high standard deviation means that the numbers are spread out. # Covariance of a point is the mean of the product of those differences, with respect to the mean of the x and mean # of the y axis # If covariance is positive, the point is positively correlated, (if its above the x mean and y mean) # If x is high and y is low or vice versa, then the point is negatively correlated # covariance / std of x * std of y = Pearson correpation p versicolor_petal_width = np.array([1.4, 1.5, 1.5, 1.3, 1.5, 1.3, 1.6, 1. , 1.3, 1.4, 1. , 1.5, 1. , 1.4, 1.3, 1.4, 1.5, 1. , 1.5, 1.1, 1.8, 1.3, 1.5, 1.2, 1.3, 1.4, 1.4, 1.7, 1.5, 1. , 1.1, 1. , 1.2, 1.6, 1.5, 1.6, 1.5, 1.3, 1.3, 1.3, 1.2, 1.4, 1.2, 1. , 1.3, 1.2, 1.3, 1.3, 1.1, 1.3]) # Make a scatter plot _ = plt.plot(versicolor_petal_length, versicolor_petal_width, marker='.', linestyle='none') # Label the axes _ = plt.xlabel('petal length (cm)') _ = plt.ylabel('petal width (cm)') # Show the result plt.show() # When you made bee swarm plots, box plots, and ECDF plots in previous exercises, you compared # the petal lengths of different species of iris. But what if you want to compare # two properties of a single species? This is exactly what we will do in this exercise. We will make a scatter # plot of the petal length and width measurements of Anderson's Iris versicolor flowers. If the flower scales # (that is, it preserves its proportion as it grows), we would expect the length and width to be correlated. # the highest variance in the variable x, # the highest covariance, # negative covariance? # Compute the covariance matrix: covariance_matrix covariance_matrix = np.cov(versicolor_petal_length, versicolor_petal_width) # Print covariance matrix print(covariance_matrix) # Extract covariance of length and width of petals: petal_cov petal_cov = covariance_matrix[0,1] # Print the length/width covariance print(petal_cov) # The covariance may be computed using the Numpy function np.cov(). For example, we have two sets of # data x and y, np.cov(x, y) returns a 2D array where entries [0,1] and # [1,0] are the covariances. Entry [0,0] is the variance of the data in x, and entry [1,1] is the variance of # the data in y. This 2D output array is called the covariance matrix, since it organizes the self- and covariance. def pearson_r(x, y): """Compute Pearson correlation coefficient between two arrays.""" # Compute correlation matrix: corr_mat corr_mat = np.corrcoef(x, y) # Return entry [0,1] return corr_mat[0,1] # Compute Pearson correlation coefficient for I. versicolor r = pearson_r(versicolor_petal_width, versicolor_petal_length) # Print the result print(r) # Computing the Pearson correlation coefficient # As mentioned in the video, the Pearson correlation coefficient, also called the Pearson r, is # often easier to interpret than the covariance. It is computed using the np.corrcoef() function. Like np.cov(), # it takes two arrays as arguments and returns a 2D array. Entries [0,0] and [1,1] are necessarily equal to 1 # (can you think about why?), and the value we are after is entry [0,1]. # In this exercise, you will write a function, pearson_r(x, y) that takes in two arrays # and returns the Pearson correlation coefficient. You will then use this function to compute it for the # petal lengths and widths of I. versicolor. # Why do we do statistical inference? # To draw probabilistic conclusions about what we might expect if we collected the same data again. # To draw actionable conclusions from data. # To draw more general conclusions from relatively few data or observations. # Summary: Correct! Statistical inference involves taking your data to probabilistic # conclusions about what you would expect if you # took even more data, and you can make decisions based on these conclusions. # Seed the random number generator np.random.seed(42) # Initialize random numbers: random_numbers random_numbers = np.empty(100000) # Generate random numbers by looping over range(100000) for i in range(100000): random_numbers[i] = np.random.random() # Plot a histogram _ = plt.hist(random_numbers, ec='white') # Show the plot plt.show() def perform_bernoulli_trials(n, p): """Perform n Bernoulli trials with success probability p and return number of successes.""" # Initialize number of successes: n_success n_success = 0 # Perform trials for i in range(n): # Choose random number between zero and one: random_number random_number = np.random.random() # If less than p, it's a success so add one to n_success if random_number < p: n_success += 1 return n_success # The np.random module and Bernoulli trials # You can think of a Bernoulli trial as a flip of a possibly biased coin. Specifically, each coin flip # has a probability p of landing heads (success) and probability 1−p of landing tails (failure). # In this exercise, you will write a function to perform n Bernoulli trials, perform_bernoulli_trials(n, p), # which returns the number of successes out of n Bernoulli trials, each of which has probability p of success. # To perform each Bernoulli trial, # use the np.random.random() function, which returns a random number between zero and one. # Seed random number generator np.random.seed(42) # Initialize the number of defaults: n_defaults n_defaults = np.empty(1000) # Compute the number of defaults for i in range(1000): n_defaults[i] = perform_bernoulli_trials(100, 0.05) # Plot the histogram with default number of bins; label your axes _ = plt.hist(n_defaults, normed=True) _ = plt.xlabel('number of defaults out of 100 loans') _ = plt.ylabel('probability') # Show the plot plt.show() # How many defaults might we expect? # Let's say a bank made 100 mortgage loans. It is possible that anywhere between 0 and 100 of the loans will be defaulted upon. # You would like to know the probability of getting a given number of defaults, given that the probability of a # default is p = 0.05. To investigate this, you will do a simulation. You will perform 100 Bernoulli trials using # the perform_bernoulli_trials() function you wrote in the previous exercise and record how many defaults we get. Here, a success # is a default. (Remember that the word "success" just means that the Bernoulli trial evaluates to True, i.e., # did the loan recipient default?) You will do this for another 100 Bernoulli trials. And again and again until we # have tried it 1000 times. Then, you will plot a histogram describing the probability of the number of defaults. # Compute ECDF: x, y x, y = ecdf(n_defaults) # Plot the CDF with labeled axes _ = plt.plot(x, y, marker='.', linestyle='none') _ = plt.xlabel('number of defaults out of 100') _ = plt.ylabel('CDF') # Show the plot plt.show() # Compute the number of 100-loan simulations with 10 or more defaults: n_lose_money n_lose_money = np.sum(n_defaults >= 10) # Compute and print probability of losing money print('Probability of losing money =', n_lose_money / len(n_defaults)) # Take 10,000 samples out of the binomial distribution: n_defaults n_defaults = np.random.binomial(n=100, p=0.05, size=10000) # Compute CDF: x, y x, y = ecdf(n_defaults) # Plot the CDF with axis labels _ = plt.plot(x, y, marker='.', linestyle='none') _ = plt.xlabel('number of defaults out of 100 loans') _ = plt.ylabel('CDF') # Show the plot plt.show() # Sampling out of the Binomial distribution # Compute the probability mass function for the number of defaults we would expect for 100 loans as in the last # section, but instead of simulating all of the Bernoulli trials, perform the sampling using np.random.binomial(). # This is identical to the calculation you did in the last set of exercises using your custom-written # perform_bernoulli_trials() function, but far more computationally efficient. Given this extra efficiency, we will # take 10,000 samples instead of 1000. After # taking the samples, plot the CDF as last time. This CDF that you are plotting is that of the Binomial distribution. # Compute bin edges: bins bins = np.arange(0, max(n_defaults) + 1.5) - 0.5 # Generate histogram _ = plt.hist(n_defaults, normed=True, bins=bins) # Label axes _ = plt.xlabel('number of defaults out of 100 loans') _ = plt.ylabel('PMF') # Show the plot plt.show() # Plotting the Binomial PMF # As mentioned in the video, plotting a nice looking PMF requires a bit of matplotlib trickery that we will not # go into here. Instead, we will plot the PMF of the Binomial distribution as a histogram with skills you have already # learned. The trick is setting up the edges of the bins to pass to plt.hist() via the bins keyword argument. # We want the bins centered on the integers. So, the edges of the bins should be -0.5, 0.5, 1.5, 2.5, ... up to # max(n_defaults) + 1.5. You can generate an array like this using np.arange() and then subtracting 0.5 from the array. # Draw 10,000 samples out of Poisson distribution: samples_poisson samples_poisson = np.random.poisson(10, size=10000) # Print the mean and standard deviation print('Poisson: ', np.mean(samples_poisson), np.std(samples_poisson)) # Specify values of n and p to consider for Binomial: n, p n = [20, 100, 1000] p = [0.5, 0.1, 0.01] # Draw 10,000 samples for each n,p pair: samples_binomial for i in range(3): samples_binomial = np.random.binomial(n[i], p[i], size=10000) # Print results print('n =', n[i], 'Binom:', np.mean(samples_binomial), np.std(samples_binomial)) # Relationship between Binomial and Poisson distributions # You just heard that the Poisson distribution is a limit of the Binomial distribution for rare events. # This makes sense if you think about the stories. Say we do a Bernoulli trial every minute for an hour, # each with a success probability of 0.1. We would do 60 trials, and the number of successes is Binomially distributed, # and we would expect to get about 6 successes. This is just like the Poisson story we discussed in the video, # where we get on average 6 hits on a website per hour. So, the Poisson distribution with arrival rate equal # to np approximates a Binomial distribution for n Bernoulli trials with probability p of success # (with n large and p small). Importantly, the Poisson distribution is often simpler to work # with because it has only one parameter instead of two for the Binomial distribution. # Possible Answers # Discrete uniform # Binomial # Poisson # Both Binomial and Poisson, though Poisson is easier to model and compute. # Both Binomial and Poisson, though Binomial is easier to model and compute. # Correct! When we have rare events (low p, high n), the Binomial distribution is Poisson. # This has a single parameter, # the mean number of successes per time interval, in our case the mean number of no-hitters per season. # Draw 10,000 samples out of Poisson distribution: n_nohitters n_nohitters = np.random.poisson(251/115, size=10000) # Compute number of samples that are seven or greater: n_large n_large = np.sum(n_nohitters >= 7) # Compute probability of getting seven or more: p_large p_large = n_large / 10000 # Print the result print('Probability of seven or more no-hitters:', p_large) # 1990 and 2015 featured the most no-hitters of any season of baseball (there were seven). Given that # there are on average 251/115 no-hitters per season, what is the probability of having seven or more in a season? # a discrete quantity is like a dice roll # a continuous quantity is like light # The value of the CDF at # x = 10 is 0.75, so the probability that x < 10 is 0.75. Thus, the probability that x > 10 is 0.25. # Draw 100000 samples from Normal distribution with stds of interest: samples_std1, samples_std3, samples_std10 samples_std1 = np.random.normal(20, 1, size=100000) samples_std3 = np.random.normal(20, 3, size=100000) samples_std10 = np.random.normal(20, 10, size=100000) # Make histograms _ = plt.hist(samples_std1, bins=100, normed=True, histtype='step') _ = plt.hist(samples_std3, bins=100, normed=True, histtype='step') _ = plt.hist(samples_std10, bins=100, normed=True, histtype='step') # Make a legend, set limits and show plot _ = plt.legend(('std = 1', 'std = 3', 'std = 10')) plt.ylim(-0.01, 0.42) plt.show() # In this exercise, you will explore the Normal PDF and also learn a way to plot a PDF of a known distribution # using hacker statistics. Specifically, you will plot a Normal PDF for various values of the variance. # You can see how the different standard deviations result # in PDFS of different widths. The peaks are all centered at the mean of 20. # Generate CDFs x_std1, y_std1 = ecdf(samples_std1) x_std3, y_std3 = ecdf(samples_std3) x_std10, y_std10 = ecdf(samples_std10) # Plot CDFs _ = plt.plot(x_std1, y_std1, marker='.', linestyle='none') _ = plt.plot(x_std3, y_std3, marker='.', linestyle='none') _ = plt.plot(x_std10, y_std10, marker='.', linestyle='none') # Make a legend and show the plot _ = plt.legend(('std = 1', 'std = 3', 'std = 10'), loc='lower right') plt.show() # Now that you have a feel for how the Normal PDF looks, let's consider # its CDF. Using the samples you generated in the last exercise # (in your namespace as samples_std1, samples_std3, and samples_std10), generate and plot the CDFs. # The CDFs all pass through the mean at the 50th percentile; the # mean and median of a Normal distribution are equal. The width of the CDF varies with the standard deviation. belmont = [148.51, 146.65, 148.52, 150.7, 150.42000000000002, 150.88, 151.57, 147.54, 149.65, 148.74, 147.86, 148.75, 147.5, 148.26, 149.71, 146.56, 151.19, 147.88, 149.16, 148.82, 148.96, 152.02, 146.82, 149.97, 146.13, 148.1, 147.2, 146.0, 146.4, 148.2, 149.8, 147.0, 147.2, 147.8, 148.2, 149.0, 149.8, 148.6, 146.8, 149.6, 149.0, 148.2, 149.2, 148.0, 150.4, 148.8, 147.2, 148.8, 149.6, 148.4, 148.4, 150.2, 148.8, 149.2, 149.2, 148.4, 150.2, 146.6, 149.8, 149.0, 150.8, 148.6, 150.2, 149.0, 148.6, 150.2, 148.2, 149.4, 150.8, 150.2, 152.2, 148.2, 149.2, 151.0, 149.6, 149.6, 149.4, 148.6, 150.0, 150.6, 149.2, 152.6, 152.8, 149.6, 151.6, 152.8, 153.2, 152.4, 152.2] belmont_no_outliers = np.array(belmont) # Compute mean and standard deviation: mu, sigma mu = np.mean(belmont_no_outliers) sigma = np.std(belmont_no_outliers) # Sample out of a normal distribution with this mu and sigma: samples samples = np.random.normal(mu, sigma, size=10000) # Get the CDF of the samples and of the data x_theor, y_theor = ecdf(samples) x, y = ecdf(belmont_no_outliers) # Plot the CDFs and show the plot _ = plt.plot(x_theor, y_theor) _ = plt.plot(x, y, marker='.', linestyle='none') _ = plt.xlabel('Belmont winning time (sec.)') _ = plt.ylabel('CDF') plt.show() # Since 1926, the Belmont Stakes is a 1.5 mile-long race of 3-year old thoroughbred horses. # Secretariat ran the fastest Belmont Stakes in history in 1973. While that was the fastest year, 1970 was # the slowest because of unusually wet and sloppy conditions. With these two outliers removed from the data # set, compute the mean and standard deviation of the Belmont winners' times. Sample out of a Normal # distribution with this mean and standard deviation using the np.random.normal() function and plot a CDF. # Overlay the ECDF from the winning Belmont times. Are these close to Normally distributed? # Note: Justin scraped the data concerning the Belmont Stakes from the Belmont Wikipedia page. # The theoretical CDF and the ECDF of the data suggest that the winning Belmont times are, indeed, Normally # distributed. This also suggests that in the last 100 years or so, there have not been major # technological or training advances that have significantly affected the speed at which horses can run this race. # What are the chances of a horse matching or beating Secretariat's record? # Assume that the Belmont winners' times are Normally distributed (with the 1970 and 1973 years removed), what # is the probability that the winner of a given Belmont Stakes will run it as fast or faster than Secretariat? # Take a million samples out of the Normal distribution: samples samples = np.random.normal(mu, sigma, size=1000000) # Compute the fraction that are faster than 144 seconds: prob prob = np.sum(samples <= 144) / len(samples) # Print the result print('Probability of besting Secretariat:', prob) # Great work! We had to take a million samples because the probability of # a fast time is very low and we had to be sure to sample enough. # We get that there is only a 0.06% chance of a horse running the Belmont as fast as Secretariat. # Matching a story and a distribution # How might we expect the time between Major League no-hitters to be distributed? # Be careful here: a few exercises ago, we considered the probability distribution # for the number of no-hitters in a season. # Now, we are looking at the probability distribution of the time between no hitters. # Possible Answers # Normal # Exponential # Poisson # Uniform # Waiting for the next Secretariat # Unfortunately, Justin was not alive when Secretariat ran the Belmont in 1973. # Do you think he will get to see a performance like that? # To answer this, you are interested in how many years you would expect to wait until you see another # performance like Secretariat's. How is the waiting time # until the next performance as good or better than Secretariat's distributed? Choose the best answer. # Possible Answers # Normal, because the distribution of Belmont winning times are Normally distributed. # Normal, because there is a most-expected waiting time, so there should be a single peak to the distribution. # Exponential: It is very unlikely for a horse to be faster than Secretariat, so the distribution should decay # away to zero for high waiting time. # Exponential: A horse as fast as Secretariat is a rare event, which can be modeled as a Poisson process, # and the waiting time between arrivals of a Poisson process is Exponentially distributed. # Correct! The Exponential distribution describes the waiting times between rare events, and Secretariat is rare! def successive_poisson(tau1, tau2, size=1): """Compute time for arrival of 2 successive Poisson processes.""" # Draw samples out of first exponential distribution: t1 t1 = np.random.exponential(tau1, size=size) # Draw samples out of second exponential distribution: t2 t2 = np.random.exponential(tau2, size=size) return t1 + t2 # If you have a story, you can simulate it! # Sometimes, the story describing our probability distribution does not # have a named distribution to go along with it. In these cases, fear not! # You can always simulate it. We'll do that in this and the next exercise. # In earlier exercises, we looked at the rare event of no-hitters in Major # League Baseball. Hitting the cycle is another rare baseball event. When a # batter hits the cycle, he gets all four kinds of hits, a single, double, # triple, and home run, in a single game. Like no-hitters, this can be modeled # as a Poisson process, so the time between hits of the cycle are also Exponentially distributed. # How long must we wait to see both a no-hitter and then a batter hit # the cycle? The idea is that we have to wait some time for the no-hitter, # and then after the no-hitter, we have to wait for hitting the cycle. Stated # another way, what is the total waiting time for the arrival of two different # Poisson processes? The total waiting time is the time waited for the no-hitter, # plus the time waited for the hitting the cycle. # Now, you will write a function to sample out of the distribution described by this story. # Distribution of no-hitters and cycles # Now, you'll use your sampling function to compute the waiting time to observe a no-hitter and hitting of the cycle. # The mean waiting time for a no-hitter is 764 games, and the mean waiting time for hitting the cycle is 715 games. # Draw samples of waiting times waiting_times = successive_poisson(764, 715, size=100000) # Make the histogram _ = plt.hist(waiting_times, bins=100, histtype='step', normed=True) # Label axes _ = plt.xlabel('total waiting time (games)') _ = plt.ylabel('PDF') # Show the plot plt.show() ```
github_jupyter
# Introduction ## Research Question What is the information flow from visual stream to motor processing and how early in processing can we predict behavioural outcomes. - Can decoding models be trained by region - How accurate are the modeled regions at predicting a behaviour - Possible behaviours (correct vs. incorrect) - Movement of wheel ## Brief background The Steinmetz (2018) dataset reported that neurons with action correlates are found globally and that neurons in nearly every brain region are non-selectively activated in the moments leading up to movement onset, however it is currently not known how the information integration occurs across the motor areas and how that integration gives rise to motor behaviour. Neuron population coding has been robustly used to decode motor behaviours across various species (Georgopoulos et al., 1986), and recent literature has suggested that motor preparation and planning uses distributed populations in corticomotor areas to plan motor movements. However this previous work has been limited by the number of electrodes and therefore areas measured in a single task. The following assignment seeks to take advantage of the multi-array recording from the Steinmetz (2018) neuropixel data set to investigate temporal aspects of motor behaviours. # Data Analyses :brain: :mouse: :brain: ## Set Up ``` import pandas as pd import numpy as np import dataframe_image as dfi import pathlib from matplotlib import rcParams from matplotlib import pyplot as plt import emoji rcParams['figure.figsize'] = [15, 5] rcParams['font.size'] = 15 rcParams['axes.spines.top'] = False rcParams['axes.spines.right'] = False rcParams['figure.autolayout'] = True import os, requests fname = [] for i in range(3): fname.append('steinmetz_part%d.npz'%i) url = ['https://osf.io/agvxh/download'] url.append('https://osf.io/uv3mw/download') url.append('https://osf.io/ehmw2/download') for i in range(len(url)): if not os.path.isfile(fname[i]): try: r = requests.get(url[i]) except requests.ConnectionError: print("Data could not download!") else: if r.status_code != requests.codes.ok: print("Data could not download!") else: with open(fname[i], "wb") as fid: fid.write(r.content) steinmetz_data = np.array([]) for i in range(len(fname)): steinmetz_data = np.hstack((steinmetz_data, np.load('steinmetz_part%d.npz'%i, allow_pickle=True)['dat'])) ``` ## Exploring the data ``` # choose one recording session (20) to get labels session_20 = steinmetz_data[20] keys = session_20.keys() print(keys) for key in session_20.keys(): dataset_info = session_20[key] if isinstance (dataset_info, np.ndarray): print(key, dataset_info.shape, " - array") elif isinstance (dataset_info, list): print(key, len(dataset_info), " - list") else: print(key, type(dataset_info), " - other") brain_areas = [] for i in range(steinmetz_data.shape[0]): unique_area = np.unique(steinmetz_data[i]['brain_area']) # check this line for the for u in unique_area: brain_areas.append(u) ubs = list(np.unique(brain_areas)) table = pd.DataFrame(columns=['session', 'mouse_name', 'n_neuron'] + ubs) for i in range(steinmetz_data.shape[0]): this_session: dict = {} unique_barea = list(np.unique(steinmetz_data[i]['brain_area'])) this_session['session'] = i this_session['mouse_name'] = steinmetz_data[i]['mouse_name'] this_session['n_neuron'] = steinmetz_data[i]['spks'].shape[0] this_session['n_trial'] = steinmetz_data[i]['spks'].shape[1] for ubrea in unique_barea: n_neuron, n_trial, _ = (steinmetz_data[i]['spks'][steinmetz_data[i]['brain_area'] == ubrea]).shape this_session[ubrea] = n_neuron table = table.append(this_session, ignore_index=True) table = table.fillna(0) pathlib.Path('/Users/sophiabatchelor/Code/SteinmetzAnalyses/Images').mkdir(parents=True, exist_ok=True) dfi.export(table, '/Users/sophiabatchelor/Code/SteinmetzAnalyses/Images/steinmetz_all_data_table.png', max_cols=77) table ``` ## Investigate Spiking Reponses ``` # groupings of brain regions brain_regions = ["vis ctx", "thal", "hipp", "other ctx", "midbrain", "basal ganglia", "cortical subplate", "other"] brain_groupings = [["VISa", "VISam", "VISl", "VISp", "VISpm", "VISrl"], # visual cortex ["CL", "LD", "LGd", "LH", "LP", "MD", "MG", "PO", "POL", "PT", "RT", "SPF", "TH", "VAL", "VPL", "VPM"], # thalamus ["CA", "CA1", "CA2", "CA3", "DG", "SUB", "POST"], # hippocampal ["ACA", "AUD", "COA", "DP", "ILA", "MOp", "MOs", "OLF", "ORB", "ORBm", "PIR", "PL", "SSp", "SSs", "RSP"," TT"], # non-visual cortex ["APN", "IC", "MB", "MRN", "NB", "PAG", "RN", "SCs", "SCm", "SCig", "SCsg", "ZI"], # midbrain ["ACB", "CP", "GPe", "LS", "LSc", "LSr", "MS", "OT", "SNr", "SI"], # basal ganglia ["BLA", "BMA", "EP", "EPd", "MEA"] # cortical subplate ] mouse_dict = {} # create a dictionary for session, dat_i in enumerate(steinmetz_data): name = dat_i["mouse_name"] if name not in mouse_dict.keys(): mouse_dict[name] = [dat_i] else: lst = mouse_dict[name] lst.append(dat_i) mouse_dict[name] = lst assigned_region = "VISp" # analyse for all runs of a single mouse for mouse in ["Cori"]: mouse_data = mouse_dict[mouse] #list of the sessions corresponding to this mouse, [alldat[0], alldat[1], alldat[2]] num_sessions = len(mouse_dict[mouse]) thing = None for trial in mouse_data: spk_trial = trial['spks'] if assigned_region in trial["brain_area"]: spk_trial_region = spk_trial[trial["brain_area"] == assigned_region] # average over trials spk_trial_region_avg = np.mean(spk_trial_region, axis=1) # take only values that are average above 0.2 spk_trial_region_avg_good = spk_trial_region_avg[np.mean(spk_trial_region_avg, axis=1) >= 0.2,:] if thing is not None: thing = np.concatenate((thing, spk_trial_region_avg_good)) else: thing = spk_trial_region_avg_good plot = plt.figure() plt.plot(thing.T) plot.suptitle("High Spiking Neurons in Cori's Primary Visual Cortex") plt.xlabel("Timebins") plt.ylabel("Average Number of Spikes") plt.savefig('/Users/sophiabatchelor/Code/SteinmetzAnalyses/Images/Plots/cori_v1_spks.png') plt.show(plot) # Group The data by mouse for session, dat_i in enumerate(steinmetz_data): name = dat_i["mouse_name"] if name not in mouse_dict.keys(): mouse_dict[name] = [dat_i] else: lst = mouse_dict[name] lst.append(dat_i) mouse_dict[name] = lst names = [] for dat_i in steinmetz_data: name = dat_i["mouse_name"] if name not in names: names.append(name) print("Mice: {}".format(names)) assigned_regions = ['CA1', 'CA3',"VISp", "VISpm", "VISrl", "VISam", "VISa", "DG", "MD", "MOs", "MG", "MOp" ,] # change this to be whichever regions are of interest # !! NOTE !! the order matters ### Note ### # LIST OF AREAS # "VISp", "VISpm", "VISI", "VISrl", "VISam", "VISa", 'CA1', 'CA3', "DG", "CP", "SCm", "SCs", "SNr", "SSp", "ACA", "ILA", "GPe", "ACB", "APN", "BLA", "LD", "LGd", "LP", "LS", "MD", "MG", "MOp", "MOs", "MRN", "OLF", "ORB", "PAG", "PL", "PO", "POL", "POST", "RSP", "RT", "SUB", "ZI", "VPL", "VPM" # VISI is throwing an error for assigned_region in assigned_regions: all_mice_names = [] all_mice_lines = None for mouse in mouse_dict.keys(): mouse_data = mouse_dict[mouse] num_sessions = len(mouse_dict[mouse]) spk_all_sessions = None for session in mouse_data: spk_session = session['spks'] if assigned_region in session['brain_area']: spk_session_region = spk_session[session['brain_area'] == assigned_region] # average over trials spk_session_region_avg = np.mean(spk_session_region, axis=1) if spk_all_sessions is not None: spk_all_sessions = np.concatenate((spk_all_sessions, spk_session_region_avg)) else: spk_all_sessions = spk_session_region_avg # average over all neurons if spk_all_sessions is not None: name_i = mouse all_mice_names.append(name_i) mouse_i = np.mean(spk_all_sessions, axis=0) mouse_i = np.expand_dims(mouse_i, 0) if all_mice_lines is not None: all_mice_lines = np.concatenate((all_mice_lines, mouse_i), axis = 0) else: all_mice_lines = mouse_i plot = plt.figure(figsize=(10, 5)) plt.plot(all_mice_lines.T) # had to transpose so that time was on the x axis plot.suptitle("Average Spiking of {}".format(assigned_region)) plt.xlabel("Timebins") # change axis labels if you need reminders plt.ylabel("Average Number of Spikes per time bin") plt.legend(all_mice_names, loc = "upper right") pathlib.Path('/Users/sophiabatchelor/Code/SteinmetzAnalyses/Images/Plots').mkdir(parents=True, exist_ok=True) plt.savefig('/Users/sophiabatchelor/Code/SteinmetzAnalyses/Images/Plots/Plotof{}.png'.format(assigned_region)) plt.show() ``` ## Relationship between spiking and behaviour ``` # analyses for Lederberg : session 11 session_11 = steinmetz_data[11] dt = session_11['bin_size'] # 10ms bins NT = session_11['spks'].shape[-1] # ax = plt.subplot(1,5,1) response = session_11['response'] # right - nogo - left (-1, 0, 1) vis_right = session_11['contrast_right'] # 0 - low - high vis_left = session_11['contrast_left'] # 0 - low - high avg_gocue = (np.mean(session_11["gocue"])) plt.plot(dt * np.arange(NT), 1 / dt * session_11['spks'][:,response>=0].mean(axis=(0,1))) # left responses plt.plot(dt * np.arange(NT), 1 / dt * session_11['spks'][:,response<0].mean(axis=(0,1))) # right responses plt.plot(dt * np.arange(NT), 1 / dt * session_11['spks'][:,vis_right>0].mean(axis=(0,1))) # right stimuli plt.plot(dt * np.arange(NT), 1 / dt * session_11['spks'][:,vis_right==0].mean(axis=(0,1))) # left stimuli plt.axvline(avg_gocue, color='black') plt.title("Session 11 Spike Frequency") plt.xlabel("Time (sec)") # change axis labels if you need reminders plt.ylabel("Firing rate (Hz)") plt.legend(['left resp', 'right resp', 'right stim', 'left stim', 'stimuli onset'], fontsize=14) pathlib.Path('/Users/sophiabatchelor/Code/SteinmetzAnalyses/Images/Plots/ResponseSpikeAnalyses').mkdir(parents=True, exist_ok=True) plt.savefig('/Users/sophiabatchelor/Code/SteinmetzAnalyses/Images/Plots/ResponseSpikeAnalyses/session_11_spikes.png') plt.show() regions = ["vis ctx", "thal", "hipp", "other ctx", "midbrain", "basal ganglia", "cortical subplate", "other"] brain_groups = [["VISa", "VISam", "VISl", "VISp", "VISpm", "VISrl"], # visual cortex ["CL", "LD", "LGd", "LH", "LP", "MD", "MG", "PO", "POL", "PT", "RT", "SPF", "TH", "VAL", "VPL", "VPM"], # thalamus ["CA", "CA1", "CA2", "CA3", "DG", "SUB", "POST"], # hippocampal ["ACA", "AUD", "COA", "DP", "ILA", "MOp", "MOs", "OLF", "ORB", "ORBm", "PIR", "PL", "SSp", "SSs", "RSP"," TT"], # non-visual cortex ["APN", "IC", "MB", "MRN", "NB", "PAG", "RN", "SCs", "SCm", "SCig", "SCsg", "ZI"], # midbrain ["ACB", "CP", "GPe", "LS", "LSc", "LSr", "MS", "OT", "SNr", "SI"], # basal ganglia ["BLA", "BMA", "EP", "EPd", "MEA"] # cortical subplate ] num_good_areas = 4 # only the top 4 regions are in this particular mouse neurons = len(session_11['brain_area']) # gives the number of neurons good_areas = num_good_areas * np.ones(neurons, ) # note: last brain region is "other" for i in range(num_good_areas): good_areas[np.isin(session_11['brain_area'], brain_groups[i])] = i # assign a number to each region # Neural response to visual stimuli for i in range(num_good_areas): fig, axs = plt.subplots(sharey = True) plt.plot(1 / dt * session_11['spks'][good_areas == i][:,np.logical_and(vis_left == 0, vis_right > 0)].mean(axis=(0,1))) plt.plot(1 / dt * session_11['spks'][good_areas == i][:,np.logical_and(vis_left == 0, vis_right == 0)].mean(axis=(0,1))) plt.plot(1 / dt * session_11['spks'][good_areas == i][:,np.logical_and(vis_left > 0, vis_right == 0)].mean(axis=(0,1))) plt.plot(1 / dt * session_11['spks'][good_areas == i][:,np.logical_and(vis_left > 0, vis_right > 0)].mean(axis=(0,1))) fig.suptitle('{} response to visual stimuli'.format(regions[i])) plt.xlabel('Time (ms)') plt.ylabel('Spike rate (Hz)') plt.legend(['right cue', 'left cue', 'no_cue', 'spike response any cue'], fontsize=12) plt.savefig('/Users/sophiabatchelor/Code/SteinmetzAnalyses/Images/Plots/ResponseSpikeAnalyses/session11_{}_vep.png'.format(regions[i])) ``` ## Now let's model ``` print(emoji.emojize(':bug: :bug: :bug: :bug: :bug: :bug: :bug: :bug: :bug: :bug: :bug: :bug: :bug: :bug: :bug: :bug: :bug: :bug:')) from sklearn.linear_model import LogisticRegression from sklearn import metrics from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score session_data = steinmetz_data[11] num_timebins = session_data['spks'].shape[2] num_trials = session_data['spks'].shape[1] move = session_data['response'] # right - nogo - left (-1, 0, 1) regions = np.unique(session_data['brain_area']) spikes_in_a_region = {} for region in regions: spikes_in_a_region[region] = session_data['spks'][np.where(session_data['brain_area']==region)] session_spikes = session_data['spks'] quick_info = session_spikes.shape print("Number of neurons recorded in all sessions: {}, Number of Trials: {}, Number of timebins: {}".format(quick_info[0], quick_info[1], quick_info[2])) Y = (move != 0).astype(int) # boolean true Y # 1D array target_regions = spikes_in_a_region.keys() scores = np.zeros((len(target_regions),num_timebins)) for target_regions,(, spikes) in enumerate(spikes_in_a_region.items()): for t in range(num_timebins): X = spikes[:,:,t].T X.shape # check if the function is actually reading in the files okay alldata = np.array([]) for j in range(2): alldata = np.hstack((alldata, np.load('/Users/sophiabatchelor/Code/SteinmetzAnalyses/steinmetz_part%d.npz'%(j+1), allow_pickle=True)['dat'])) data = alldata[11] print(data.keys()) ## BUGS ALL THE WAY DOWN # Note: data isn't the same shape ### nextsteps: ### # - strip back the functions # - reshape or Transpose data def prepare_data(session=11): model_data = np.array([]) for j in range(2): model_data = np.hstack((model_data, np.load('/Users/sophiabatchelor/Code/SteinmetzAnalyses/steinmetz_part%d.npz'%(j+1), allow_pickle=True)['dat'])) data = model_data[session] num_trials = session_data['spks'].shape[1] n_timebins = data['spks'].shape[2] move = session_data['response'] # right - nogo - left (-1, 0, 1) regions = np.unique(data['brain_area']) spikes_per_region = dict() for region in regions: spikes_per_region[region] = data['spks'][np.where(data['brain_area']==region)] return spikes_per_region, labels, n_timebins def simple_decoder(session=11): model = LogisticRegression(penalty='l2',multi_class='ovr',solver='liblinear') spikes_per_region, Y, n_timebins = prepare_data(session=session) regions = spikes_per_region.keys() scores = np.zeros((len(regions),n_timebins)) for region,(_, spikes) in enumerate(spikes_per_region.items()): for t in range(n_timebins): X = spikes[:,:,t].T x = X.transpose() score = cross_val_score(model, x, Y, cv=5) scores[region,t] = np.mean(score) return scores def plot_scores(scores,session,save_name): spikes_per_region, _, n_timebins = prepare_data(session=session) regions = spikes_per_region.keys() fig = plt.figure(figsize=[10,5]) contour = plt.contourf(scores) cb = fig.colorbar(contour, shrink = 0.5, aspect = 5) cb.set_label('Accuracy') tick_marks = np.arange(len(regions)) plt.yticks(tick_marks, regions) plt.xticks(np.arange(0,n_timebins,20), np.arange(0,n_timebins*10,200)) plt.ylabel('Brain area') plt.xlabel('Time (ms)') plt.tight_layout() plt.show() # TODO create a dir in Images for the plot to be saved in # fig.savefig(<path> + save_name, format='png') if __name__=="__main__": scores = simple_decoder(session = 12) plot_scores(scores,12,'scores_s12.png') def plot_all_sessions(): n_sessions = 39 for i in range(n_sessions): scores = simple_decoder(session = i) plot_scores(scores,i,'scores_s%d.png'%i) if __name__=="__main__": scores = simple_decoder(session=12) plot_scores(scores,12,'scores_s12.png') # plot_all_sessions() for trial in range(num_trials): # this will run 340 times # find the avg spike per time bin # get the avg spk_per_time_bin list_of_spikes_in_a_trial = [] list_spk_avg_per_trial= [] for t in range(num_timebins): spikes_in_a_trial = session_spikes[t,t,:] list_of_spikes_in_a_trial.append(spikes_in_a_trial) trial_spk_avg = np.mean(spikes_in_a_trial) list_spk_avg_per_trial.append(trial_spk_avg) list_of_spikes_in_a_trial = [] list_spk_avg_per_trial= [] for t in range(num_timebins): spikes_in_a_trial = session_spikes[t,t,:] list_of_spikes_in_a_trial.append(spikes_in_a_trial) trial_spk_avg = np.mean(spikes_in_a_trial) list_spk_avg_per_trial.append(trial_spk_avg) len(list_of_spikes_in_a_trial) num_trials avg_spks_per_timebin = [] for a_session in range(num_sessions): spikes_in_bin = session_spikes[c,:,t] avg_per_bin = np.mean(spikes_in_bin) avg_spks_per_timebin.append(avg_per_bin) avg_spks_per_timebin for t in range(num_timebins): test_spks = test_set[t,t,:] test_spks print(test_spks.ndim) print(test_spks.shape) for t in range(num_timebins): test_bin_piece = test_set[:,:,t] test_bin_piece print(test_bin_piece.ndim) print(test_bin_piece.shape) hat1 = test_set[0,0,:] hat1 # 250 results -> these are the spikes in session hat2 = test_set[1,1,:] hat2 hat3 = eqtest_setp[2,2,:] hat3 np.mean(hat1) np.mean(hat2) np.mean(hat3) list_the_spikes_in_a_session = [] list_bin_means = [] for t in range(num_timebins): the_spikes_in_a_session = test_set[t,t,:] list_the_spikes_in_a_session.append(the_spikes_in_a_session) avg_per_bin = np.mean(the_spikes_in_a_session) list_bin_means.append(avg_per_bin) print(list_the_spikes_in_a_session) len(list_the_spikes_in_a_session) Lederb = table.iloc[11] Lederb list_bin_means ```
github_jupyter
<center> <img src="../../img/ods_stickers.jpg"> ## Открытый курс по машинному обучению <center>Автор материала: Michael Kazachok (@miklgr500) # <center>Другая сторона tensorflow:KMeans ## <center>Введение <p style="text-indent:20px;"> Многие знают <strong>tensorflow</strong>, как одну из лучших библиотек для обучения нейронных сетей, но в последнее время tensorflow довольно сильно вырос. Появились новые <a href='https://www.tensorflow.org/programmers_guide/estimators'>Estimators</a>, которые более удобны, чем старая парадигма, являющаяся фундаментом для новой.</p> <p style="text-indent:20px;"> На сайте <a href = 'https://www.tensorflow.org/'>tensorflow</a> будет хорошая инструкция по установке под определенную операционную ситему и возможностях использование <a href = 'https://ru.wikipedia.org/wiki/GPGPU'>GPGPU</a>.Я не буду грузить данную работу особенностями "кухни" tensorflow (поэтому советую почитать хотябы основы в <a href='https://www.tensorflow.org/tutorials/'>официальном тьюториале</a> и посмотреть <a href='https://github.com/aymericdamien/TensorFlow-Examples'>TensorFlow Tutorial and Examples for Beginners with Latest APIs</a>; там же есть примеры, которые помогут в дальнейшем в изучении нейронных сетей), а я пройдусь по уже прошитым в этой либе алгоритмам крастеризации(а их фактически пока только два).</p> <p style="text-indent:20px;"> При этом будет использоваться набор данных с Kaggel для соревнования <a href = 'https://www.kaggle.com/chicago/chicago-taxi-rides-2016'>Chicago Taxi Rides 2016</a>, который использовался в одной из домашек (<span style='color:green'>рекомендую использовать не более двух месяцев</span>).</p> <p style="text-indent:20px;"> Применение простейшего алгоритма кластеризации в tensorflow будет сопроваждаться рассмотрением вопросов изящной визуализации (которую я увидел этим летом на соревнований Kaggle <a href = 'https://www.kaggle.com/c/nyc-taxi-trip-duration'>New York City Taxi Trip</a>), представленой <a href = 'https://www.kaggle.com/drgilermo'>DrGuillermo</a> и <a href = 'https://www.kaggle.com/maheshdadhich'>BuryBuryZymon</a> в их работах <a href = 'https://www.kaggle.com/drgilermo/dynamics-of-new-york-city-animation'>Dynamics of New York city - Animation</a> и <a href = 'https://www.kaggle.com/maheshdadhich/strength-of-visualization-python-visuals-tutorial'>Strength of visualization-python visuals tutorial</a> на соревновании.</p> <p style="text-indent:20px;"><i>P.S. На написание данного тьюториала автора сподвигло довольно плохая освещенность возможностей tensorflow для создания уже довольно хорошо всем известных простых алгоритмов машинного обучения, которые для определенных задачах могут быть более эфективны, чем сложные алгоритмы машинного обучения.</i></p> ## <center>Подключение используемых в работе библиотек и загрузка данных ``` FIG_SIZE = (12,12) PATH_DATA_JSON = '../../data/column_remapping.json' PATH_DATA_CSV = '../../data/chicago_taxi_trips_2016_*.csv' GIF_PATH = '../../img/animation.gif' KMEANS_GIF_PATH='../../img/kmeans_animation.gif' NUM_CLUSTERS = 5 BATCH_SIZE = 5 NUM_STEPS = 50 LON_CONST = -87.623177 LAT_CONST = 41.881832 LON_ANI_CENTER = [-87.73, -87.60] LAT_ANI_CENTER = [41.85, 42.00] import json import pandas as pd from glob import glob from joblib import Parallel, delayed import folium import matplotlib.pyplot as plt import seaborn as sns from matplotlib import animation from matplotlib.patches import Ellipse from IPython.display import HTML plt.rcParams.update({'figure.max_open_warning': 0}) import numpy as np import tensorflow as tf from geopy.geocoders import Nominatim import io import base64 from dateutil import parser %load_ext watermark ``` Версии основных библиотек и параметры системы. ``` %watermark -v -m -p numpy,pandas,matplotlib,tensorflow -g ``` Открываем данные за два первых месяца. Будте внимательны со ссылками на данные. ``` #ядро которое будем использовать #для загруски и преобработки данных за один месяц def preproc_kernel(path): with open(PATH_DATA_JSON) as json_file: column_remapping = json.load(json_file) df = pd.read_csv(path) # в дальнейшем понадобяться только геоданные # и время начала поездки df = df.loc[:, [ 'trip_start_timestamp', 'pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude']].dropna() geo_labels = ['pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude'] for g in geo_labels: df[g] = df[g].apply(lambda x: float(column_remapping[g].get(str(int(x))))) return df dataset_files = sorted(glob(PATH_DATA_CSV)) # выполняем загрузку данных параллельно # на двух ядрах, каждому по одному файлу dfs = Parallel(n_jobs=2)(delayed(preproc_kernel)(path) for path in dataset_files) # склеиваем данные df = pd.concat(dfs, ignore_index=True) df.head() ``` ## <center> Визуализация данных Произведем предварительную визуализацию всех гео данных и выявим их границы. ``` # соединяем гео данные для точек посадки и точек высадки longitude = list(df.pickup_longitude)+list(df.dropoff_longitude) print('max_long:'+str(max(longitude))) print('min_long:'+str(min(longitude))) latitude = list(df.pickup_latitude)+list(df.dropoff_latitude) print('max_lat:'+str(max(latitude))) print('min_lat:'+str(min(latitude))) loc_df = pd.DataFrame() loc_df['longitude'] = longitude loc_df['latitude'] = latitude #производим визуализацию объединенных гео данных fig, ax = plt.subplots(1,1, figsize = FIG_SIZE) plt.plot(longitude, latitude, '.', color = 'orangered', markersize = 1.5, axes = ax, figure = fig ) ax.set_axis_off() plt.show(); ``` <p style="text-indent:20px;">Мало что можно сказать про количество кластеров из графика выше. Но если вывести рапределение по широте и долготе, то картина немного прояснится.</p> ``` fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, figsize=FIG_SIZE) sns.distplot(loc_df['longitude'], bins=300, kde=False, ax=ax1) sns.distplot(loc_df['latitude'], bins=300, kde=False, ax=ax2) plt.show(); ``` <p style="text-indent:20px;">Из графиков выше видно, что наибольший трафик приходится практически на центр города. При этом стоит отметить, наличее довольно сильно выделяющегося трафика на долготе -87.90, а по долготе правея центра выделятся три центра с ярко выраженным трафиков. Таким образом кроме одного основного яровыделяющего по трафику центра есть еще как миниму четыре центра, которые можно выделить в отдельный кластер. В итоге можно выделить пять кластеров, которые имеют ярковыраженый трафик.</p> ## <center>Kmean в tensorflow <p style="text-indent:20px;">Пожалуй это один из самых востребованных алгоритмов кластеризации на на данный момент. Не думаю, что тут стоит излагать теорию (учитывая, что она затрагивалась в <a href='https://habrahabr.ru/company/ods/blog/325654/'>лекции курса</a>), если кто-то хочет почитать что-то еще по данному алгоритму и по кластеризации в целом, то я пожалуй могу посоветовать <a href='http://www.machinelearning.ru/wiki/images/2/28/Voron-ML-Clustering-slides.pdf'>лекции К.В.Воронцова</a>.</p> ``` # формируем массив с данными в нужном формате # т.е. формируем пары [lon, lat] # Для правильной работы алгоритма # неообходимо омязательно избавиться от # постоянной компаненты data = [[(lon-LON_CONST), (lat-LAT_CONST)] for lon, lat in zip(longitude, latitude)] data = np.array(data) ``` <p style="text-indent:20px;">В качестве основы выберем уже прошитый в tensorflow алгоритм <a href='https://www.tensorflow.org/versions/master/api_docs/python/tf/contrib/factorization/KMeans'>KMeans</a>(<a href='https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/contrib/factorization/python/ops/clustering_ops.py'>люблю открытый код</a>). Те кто разобрал открытый код, мог заметить, что из большого набора функций вызвать можем только <i>training_graph(self)</i>. Обратите внимание возвращается ли в вашей версии tensorflow данная функция переменную <i>cluster_centers_var</i>(в 1.3 она не возвращается).</p> ``` def KMeans_clustering(num_clusters=NUM_CLUSTERS, flag_print=True): # создаем placeholder X # подставляя его вместо каких-то знаений # мы говорим вычислительному графу # что эти значения будут предоставлены потом: # в процессе обучения и/или инициализации X = tf.placeholder(tf.float32, shape=[None, 2]) # производим построение вычислительного графа для KMeans kmeans = tf.contrib.factorization.KMeans( inputs=X, num_clusters=num_clusters, initial_clusters="kmeans_plus_plus", mini_batch_steps_per_iteration=BATCH_SIZE, random_seed=29, use_mini_batch=True ) (all_scores,cluster_idx, scores,cluster_centers_initialized,\ cluster_centers_var,init_op,train_op) = kmeans.training_graph() # т.к. изначально возвращается tuple # то берем только первый его член cluster_idx = cluster_idx[0] # производим расчет средней дистанции # точек до своего кластера avg_distance = tf.reduce_mean(scores) # создание сессии и инициальзация init_vars = tf.global_variables_initializer() sess = tf.Session() sess.run(init_vars) sess.run(init_op, feed_dict={X: data}) # пошагово обучаем модель # получая на каждом шаге # d:среднюю дистанцию от точки # до центра своего кластера #---------------------------- # задаем критерии остановки for i in range(1,NUM_STEPS+1): _, d, idx, cl_c = sess.run([train_op, avg_distance, cluster_idx, cluster_centers_var], feed_dict={X: data} ) if (i%10==0)&(flag_print): print('Step %i, Average Distance %.8f'%(i, d)) sess.close() return d,idx,cl_c ``` <p style="text-indent:20px;">Визуализируем работу алгоритма, произведя инициализацию всех кластеров в координате [LON_CONST, LAT_CONST], являющеся центром города.</p> ``` # сделаем анимацию обучения num_clusters = 8 # массив для инициализации кластеров # в точке [LON_CONST, LAT_CONST], но # т.к. у нас все данные смещенны на # значение данной координаты, # то инициализацию необходимо провести # в точке [0, 0] init_cl = np.array([[0, 0] for i in range(num_clusters)], dtype=np.float32 ) X = tf.placeholder(tf.float32, shape=[None, 2]) # производим построение вычислительного графа для KMeans kmeans = tf.contrib.factorization.KMeans( inputs=X, num_clusters=num_clusters, initial_clusters=init_cl, mini_batch_steps_per_iteration=2, random_seed=29, use_mini_batch=False ) (all_scores,cluster_idx, scores,cluster_centers_initialized,\ cluster_centers_var,init_op,train_op) = kmeans.training_graph() # т.к. изначально возвращается tuple # то берем только первый его член cluster_idx = cluster_idx[0] avg_distance = tf.reduce_mean(scores) # создание сессии и инициальзация init_vars = tf.global_variables_initializer() sess = tf.Session() sess.run(init_vars) sess.run(init_op, feed_dict={X: data}) fig, ax = plt.subplots(1,1, figsize = FIG_SIZE) # задаем функцию, которую передадим в animation.FuncAnimation # эта функция будет производить просчет полученого графика # на каждом шагу, но так как mini_batch_steps_per_iteration=2 # то изменение будут каждые 2 шага, всего шагов будет 10 # их мы непосредственно будем задавать в FuncAnimation # в виде массива и FuncAnimation пошагово будет передовать # заданные значения в animate_kmeans def animate_kmeans(step): _, d, idx, cl_c = sess.run([train_op, avg_distance, cluster_idx, cluster_centers_var], feed_dict={X: data} ) # для упрощения работы с полученными данными после обучения # создается DataFrame, который в конце кода будет удален # данное решение может быть не совсем оптимально # оно просто упрощает жизнь вашему слуге =) loc_df['labels'] = idx cl_df = pd.DataFrame() cl_df['longitude'] = cl_c[:,0]+LON_CONST cl_df['latitude'] = cl_c[:,1]+LAT_CONST cl_df['labels'] = cl_df.index # обязательно чистим предыдущий график ax.clear() ax.set_title('Step: '+str(step)) for l in cl_df['labels']: ax.plot(loc_df.loc[loc_df['labels'] == l, 'longitude'], loc_df.loc[loc_df['labels'] == l, 'latitude'], '.', markersize = 1.5 ) ax.plot(cl_df.loc[cl_df['labels'] == l, 'longitude'], cl_df.loc[cl_df['labels'] == l, 'latitude'], 'ro' ) ax.annotate(s=str(l), xy=(cl_df.loc[cl_df['labels'] == l, 'longitude'], cl_df.loc[cl_df['labels'] == l, 'latitude']) ) ax.set_axis_off() del cl_df ani = animation.FuncAnimation(fig, animate_kmeans, list(range(0, 20)), interval=500 ) # производим закрытие отрисованных графиков plt.close() # дириктори сохранения гифки gif_path = KMEANS_GIF_PATH # сохранение гифки ani.save(gif_path, writer='imagemagick', fps=1 ) # открываем сохраненную гифку и производим ее дешифрование # для дальнейшего URL и подстановки их в HTML video = io.open(gif_path, 'r+b' ).read() encoded = base64.b64encode(video) # производим отрисовку анимации в notebook HTML(data='''<img src="data:image/gif;base64,{0}"type="gif"/>'''.format( encoded.decode('ascii'))) ``` <p style="text-indent:20px;">Видно что обновление происходит каждые 2 шага за счет установки mini_batch_steps_per_iteration=2. Вы можете поиграться с кодом выше! Выставте другую инициализацию("kmeans_plus_plus","random") или поиграйтесь с параметрами для mini_batch, а можно и вовсе изменить количество кластеров!</p> <p style="text-indent:20px;">Найдем оптимальное число кластеров по методу, который был предложен на лекции,а пока идут вычисления можно заварить чашечку кофе и изучить новый алгоритм =)<p> ``` n_cluster = range(1,15,1) avg_distance = [] for i in n_cluster: d,idx,cl_c = KMeans_clustering(num_clusters=i, flag_print=False) avg_distance.append(d) plt.plot([i for i in n_cluster], avg_distance, color = 'seagreen') plt.xlabel('number of cluster') plt.ylabel('avg_distance') plt.title('Optimal Number Of Cluster') plt.show(); ``` <p style="text-indent:20px;">Из графика видно, что ничего не видно=). Опять гадаем=) Я бы взять 4 кластера, и это довольно неплохо согласуется с предыдущей оценкой, поэтому возмем 5 кластеров(в данном случае лучше взять большее число, т.о. получится более детальная картина трафика).</p> ``` NUM_CLUSTERS = 5 d,idx,cl_c = KMeans_clustering(num_clusters=NUM_CLUSTERS, flag_print=True) ``` <p style="text-indent:20px;">Добавим метки кластеров в loc_df, и создадим новый DataFrame с параметрами (широта, долгота и метка кластера для каждого кластера).</p> ``` loc_df['labels'] = idx cl_df = pd.DataFrame() cl_df['longitude'] = cl_c[:,0]+LON_CONST cl_df['latitude'] = cl_c[:,1]+LAT_CONST cl_df['labels'] = cl_df.index cl_df.tail() ``` ## <center> Визуализация полученых кластеров ``` fig, ax = plt.subplots(1,1, figsize = FIG_SIZE) for l in cl_df['labels']: plt.plot(loc_df.loc[loc_df['labels'] == l, 'longitude'], loc_df.loc[loc_df['labels'] == l, 'latitude'], '.', markersize = 1.5, axes = ax, figure = fig ) plt.plot(cl_df.loc[cl_df['labels'] == l, 'longitude'], cl_df.loc[cl_df['labels'] == l, 'latitude'], 'ro', axes = ax, figure = fig ) ax.annotate(s=str(l), xy=(cl_df.loc[cl_df['labels'] == l, 'longitude'], cl_df.loc[cl_df['labels'] == l, 'latitude']) ) ax.set_axis_off() plt.show(); # посмотрим где наши кластеры расположились на карте chikago_map = folium.Map(location=[LAT_CONST, LON_CONST], zoom_start=10, tiles='OpenStreetMap' ) # выставляем маркеры на карту Чикаго for lon, lat in zip(cl_df['longitude'], cl_df['latitude']): folium.Marker(location=[lat, lon]).add_to(chikago_map) chikago_map ``` <p style="text-indent:20px;">Можно заметить, что две самых удаленных от скопления мест посадок и высодок центроид кластеров находяться ровно около аэропортов(1,3), одна принадлежит северным жилым зонам Чикаго(2), а две центроиды можно отнести на деловой и культурный части (4,0) Чикаго.</p> <p style="text-indent:20px;">Может показаться странным, что на южные жилые зоны Чикаго нет ярко выраженной центроиды, но если больше узнать об этом городе, то станет понятно, что это не так уж и странно. Южные кварталы Чикаго - это мексиканские и ирландские районы, в которых уровень жизни ниже северной части Чикаго.</p> ## <center>Визуализация трафика между центрами <p style="text-indent:20px;">Для прогноза трафика между кластерами по часам необходимо: выделить час посадки и выставить метки принадлежности определенному кластеру для мест посадки и высадки.</p> ``` df['pickup_hour'] = df['trip_start_timestamp'].apply(lambda x: parser.parse(x).hour) df['pickup_cluster'] = loc_df.loc[:len(df)-1,'labels'].values df['dropoff_cluster'] = loc_df.loc[len(df):, 'labels'].values ``` <p style="text-indent:20px;">Начнем делать красоту (т.е. анимацию трафика между кластерами). Тот кто хочет получше разобраться с анимацией в matplotlib можно почитать документацию с <a href='https://matplotlib.org/api/animation_api.html'>официального сайта</a>.</p> ``` def trafic_animation(lon_ani_lim=None, lat_ani_lim=None, strong=6): # передовая пределы возможно ограничить зону # изображения анимации # так же немаловажен параметр strong # который является маштабирующим коэффициентом # и влияет на ширину стрелок if (lon_ani_lim==None)|(lat_ani_lim==None): lim_cl_df = cl_df elif (len(lon_ani_lim)!=2)|(len(lat_ani_lim)!=2): lim_cl_df = cl_df else: lim_cl_df = cl_df[ ((cl_df['longitude']>lon_ani_lim[0])&(cl_df['longitude']<lon_ani_lim[1]))& ((cl_df['latitude']>lat_ani_lim[0])&(cl_df['latitude']<lat_ani_lim[1])) ] fig, ax = plt.subplots(1,1, figsize = FIG_SIZE) # функция, которая будет передоваться в animation.FuncAnimation def animate(hour): # чистим все что было отрисовано ранее ax.clear() # отрисовываем все заново ax.set_title('Absolute Traffic - Hour' + str(int(hour)) + ':00') plt.figure(figsize = FIG_SIZE) # статическая часть, она будет неизменна # но так как мы чистим все перед этим # то нам необходимо будет все отрисовать заново for l in lim_cl_df['labels']: ax.plot(loc_df.loc[loc_df['labels'] == l, 'longitude'], loc_df.loc[loc_df['labels'] == l, 'latitude'], '.', markersize = 1.5 ) ax.plot(cl_df.loc[cl_df['labels'] == l, 'longitude'], cl_df.loc[cl_df['labels'] == l, 'latitude'], 'ro' ) ax.annotate(s=str(l), xy=(cl_df.loc[cl_df['labels'] == l, 'longitude'], cl_df.loc[cl_df['labels'] == l, 'latitude']) ) # динамическая часть(стрелочки) # они будут изменяться со временем for first_label in lim_cl_df['labels']: for second_label in lim_cl_df['labels']: # расчитываем количество поездов в данный час # из первого кластера во второй и из второго в первый num_of_rides = len(df[(df['pickup_cluster'] == first_label)& (df['dropoff_cluster'] == second_label)& (df['pickup_hour'] == hour)]) # стрелка проводиться как и вектор по двум точкам # первую задаем начальными координатами # в качестве второй передаем разность от уже заданной # до второй точки по обеим осям dist_x = cl_df.longitude[cl_df['labels'] == first_label].values[0] - \ cl_df.longitude[cl_df['labels'] == second_label].values[0] dist_y = cl_df.latitude[cl_df['labels'] == first_label].values[0] - \ cl_df.latitude[cl_df['labels'] == second_label].values[0] # нормировка количества поездок производится по всем поездкам pct = np.true_divide(num_of_rides, len(df)) # непосредственное создание объекта Arrow # и его отрисовка arr = plt.Arrow(cl_df.longitude[cl_df['labels'] == first_label].values, cl_df.latitude[cl_df['labels'] == first_label].values, -dist_x, -dist_y, edgecolor='white', width=strong*pct ) ax.add_patch(arr) arr.set_facecolor('g') ax.set_axis_off() ani = animation.FuncAnimation(fig, animate, sorted(df['pickup_hour'].unique()), interval=1000 ) # производим закрытие отрисованных графиков plt.close() # дириктори сохранения гифки gif_path = GIF_PATH # сохранение гифки ani.save(gif_path, writer='imagemagick', fps=1 ) # открываем сохраненную гифку и производим ее дешифрование # для дальнейшего URL и подстановки их в HTML video = io.open(gif_path, 'r+b' ).read() encoded = base64.b64encode(video) return encoded encoded = trafic_animation() # производим отрисовку анимации HTML(data='''<img src="data:image/gif;base64,{0}"type="gif"/>'''.format( encoded.decode('ascii'))) # присмотримся к центру города encoded = trafic_animation(lon_ani_lim=LON_ANI_CENTER, lat_ani_lim=LAT_ANI_CENTER, strong=2 ) HTML(data='''<img src="data:image/gif;base64,{0}"type="gif"/>'''.format( encoded.decode('ascii'))) ``` <p style="text-indent:20px;">Прелесть такого рода визуализации в том, что ее может проинтерпритировать даже ребенок.</p> ## <center> Заключение <p style="text-indent:20px;">Tensorflow довольно мощное API, которое хорошо подходит не только для обучения нейронных сетей. Хотя стоит отметит скудность документации(по сравнению с sklearn) по некоторым частям библиотеки. Одна из таких частей и была рассмотренна в данном тьюториале. Я так же надеюсь вам понравилась визуализации и вы влюбились в нее так же как и я когда впервые ее увидел. Если такого рода тьюториал вам понравится, то я подумаю о переносе его в виде статьи на хабрахабр и создании цикла такого рода статей.</p> <p style="text-indent:20px;">Спасибо за внимание!</p>
github_jupyter
# NumPy NumPy is the fundamental package for scientific computing in Python. It is a Python library that provides a multidimensional array object, various derived objects (such as masked arrays and matrices), and an assortment of routines for fast operations on arrays, including mathematical, logical, shape manipulation, sorting, selecting, I/O, discrete Fourier transforms, basic linear algebra, basic statistical operations, random simulation and much more. - NumPy is a python library, stands for Numerical Python - Used for working with arrays. It is very useful in Numerical calculations - matrices, linear algebra, etc - The array object in NumPy is called ndarray (n-dimensional array). Arrays are frequently used in data sciences, where speed and accuracy matters. It is similar to list but it is way faster than that. - Elements in NumPy array cannot be heterogeneous like in lists. The elements in a NumPy array are all required to be of the same data type, and thus will be the same size in memory. - NumPy arrays have a fixed size at creation, unlike Python lists (which can grow dynamically). Changing the size of an ndarray will create a new array and delete the original. - NumPy library was written partially in Python, but most of the parts that require fast computation are written in C or C++. - For detailed information you can go through the [official documentation](https://numpy.org/doc/stable/user/absolute_beginners.html#numpy-the-absolute-basics-for-beginners) - [Source code for NumPy](https://github.com/numpy/numpy) ``` # To import the library use import numpy # add keyword numpy before using a = numpy.array([1,2,3,4,5]) # defines a as numpy object # array is enclosed in ([]) ``` NumPy is imported under the alias using the keyword "as" - import numpy as np This shortens the keyword required in syntax, instead of numpy.array we can type np.array ``` import numpy as np a = np.array([1,2,3,4,5]) b = [1,2,3,4,5] print(a) print(b) print(type(a)) # shows the type print(type(b)) ``` Notice the output of print(a), it is enclosed in square brackets like lists but not separated by commas like lists. Hence the output is a numpy array. ``` #Use Tuple to create numpy array import numpy as np a = np.array((1,2,3,4,5)) print(a) print(type(a)) # To create an ndarray, we can pass a list, tuple or any array-like object into the array() method. ``` ## Dimensions in Array A dimension in array is one level of array depth - nested arrays: are arrays that have arrays as elements. #### Check Number of Dimensions of array *ndim* attribute returns an integer that tells us how many dimensions an array has if a is defined as an array, to check the dimensions of a, the syntax is - a.ndim ### 0-D Arrays - 0-D Arrays or scalars are elements in array, each value in array is 0-D array. ``` import numpy as np a = np.array(9) # single element print(a) print(a.ndim) #prints the dimension of an array ``` ### 1-D Arrays An array that has 0D Arrays as its elements. ``` a = np.array([1,2,3,4,5]) print(a) print(a.ndim) ``` ### 2-D Arrays An array that has 1-D elements is called a 2D array Represents a matrix Note: NumPy also has a submodule dedicated for matrix operations called numpy.mat (go through [documentation](https://numpy.org/doc/stable/reference/generated/numpy.mat.html)) ``` import numpy as np a = np.array([[1,2,3],[4,5,6]]) print(a) print(a.ndim) ``` ### 3-D Arrays An array of 2D arrays is called a 3D array. ``` import numpy as np a = np.array([[[1,2,3],[4,5,6],[7,8,9]],[[9,8,7],[6,5,4],[3,2,1]]]) print(a) print(a.ndim) # Common example to demonstrate dimensions import numpy as np a = np.array(45) b = np.array([1,2,3,4,5]) c = np.array([[1,2,3],[4,5,6]]) d = np.array([[1,2,3],[4,5,6],[7,8,9]]) e = np.array([[[1,2,3],[4,5,6]],[[1,2,3],[4,5,6]]]) # Pay very close attention to the number of square brackets. # One neat trick is the number of square brackets at the beginning is the dimensions of that array. print(a,'\n') print(b,'\n') print(c,'\n') print(d,'\n') print(e,'\n') print("The dimension of",'\n',a,"is --",a.ndim) print("The dimension of",'\n',b,"is --",b.ndim) print("The dimension of",'\n',c,"is --",c.ndim) print("The dimension of",'\n',d,"is --",d.ndim) print("The dimension of",'\n',e,"is --",e.ndim) # To make an array of desired dimensions a = np.array([1,2,3,4],ndmin=7) print(a) print("Number of dimensions: ",a.ndim) ``` ## Access Array Elements Array indexing is the same as accessing an array element. You can access an array element by referring to its index number. ``` import numpy as np a = np.array([1,2,3,4]) print(a[0]) # Remember! first element has 0 index in python '''To access elements from 2-D arrays we can use comma separated integers representing the dimension and the index of the element.''' a = np.array([[1,2,3,4,5],[6,7,8,9,10]]) #[1,2,3,4,5] = 0th dimension, [6,7,8,9,10] = 1st dimension print(a[0,1]) # first index = 0 ->selects 1st array, second index = 1 ->selects second element of first array print(a[1,3]) #syntax - a[dimension,element] a = np.array([[[1,2,3],[4,5,6]],[[7,8,9],[10,11,12]]]) print(a[0,1,1]) ''' first index = 0 -> Selects [[1,2,3],[4,5,6]] second index = 1 -> Selects [4,5,6] third index = 1 -> Selects 5 ''' print("Dimensions of a: ",a.ndim) a.shape ``` a has 2 elements `[[1,2,3],[4,5,6]]` & `[[7,8,9],[10,11,12]]` of which each has 2 elements `[1,2,3]` & `[4,5,6]` of 1st element; `[7,8,9]` & `[10,11,12]` of 2nd element of which each has 3 elements `1,2,3` .... and so on you get the point `a.shape` returns (2,2,3) which is the shape of an array ## Slicing Arrays Syntax [start_inclusive:end_exclusive] also [start:end:step] Leaving start or end index blank will mean start from beginning and go till end respectively ``` a = np.array([1,2,3,4,5,6,7,8,9]) print(a[1:5]) # From 1st index to 4th index a[:5] # From beginning to 4th index a[5:] a[2:6:2] # from index 2 to 5 in steps of 2 b = np.array([1,2,3,4,5,6,7]) c = np.array_split(b,3) # array_split(array,no. of splits) print(c) ``` ## Random NumPy has a function `random` which creates an array of given shape and populate it with random samples from a uniform distribution over `[0,1)` [Documentation](https://numpy.org/doc/stable/reference/random/generated/numpy.random.rand.html) ``` # import random from numpy so that we don't have to write np.random.rand() from numpy import random x = random.rand() #returns a random float between 0 and 1 x ``` `random.randint(low, high=None, size=None, dtype=int)` Return random integers from low (inclusive) to high (exclusive). Return random integers from the “discrete uniform” distribution of the specified dtype in the “half-open” interval [low, high). If high is None (the default), then results are from [0, low). [Documentation](https://numpy.org/doc/stable/reference/random/generated/numpy.random.randint.html#numpy-random-randint) ``` x = random.randint(100, size=(5)) #gives an array of 5 random integers between 0 and 100 x x = random.randint(100, size=(3,3)) # gives a 3 x 3 array x x = random.choice([3,5,7,9]) # chooses a random value from given array x x = random.choice([3,5,7,9],size=(3,3)) # creates a 3 x 3 array by choosing values randomly from given array x x = random.randint(100, size=(5)) y = np.random.shuffle(x) print(x) print(y) x = random.randint(1000,size=(10)) # 10 random values between 0 and 1000 print(x) print(np.var(x)) # Variance print(np.std(x)) # Standard Deviation print(np.average(x)) # Average ``` `np.random.randn()` returns a sample(or samples) from the "Standard Normal" Distribution. If positive int_like arguments are provided, `randn` generates an array of shape (d0,d1,...,dn), filled with random floats sampled from a univariate "normal" (Gaussian) distribution of mean 0 and variance 1. A single float randomly sampled from the distribution is returned if no argument is provided. ``` x = np.random.randn(10) x ``` Modify a sequence in-place by shuffling its contents. This function only shuffles the array along the first axis of a multi-dimensional array. The order of sub-arrays is changed but their contents remains the same. ``` x = np.array([1,2,3,4,5,6,7,8,9,10]) random.shuffle(x) x ``` ## Products ``` p1 = np.inner(2,2) # gives inner product v_a = 9 + 6j v_b = 5 + 2j p2 = np.inner(v_a,v_b) # inner product of 2 vectors print(p1) print(p2) a1 = np.array([[2,6],[7,8]]) a2 = np.array([[5,10],[-2,3]]) p3 = np.inner(a1,a2) print(p3) # Cross Product p4 = np.cross(a1,a2) print(p4) # Dot Product p5 = np.dot(a1,a2) p5 ``` If we just want the indices where a certain condition is satisfied, we can use `np.where( )`. This function is used to filter out data. ``` x = np.array([0,1,2,3,4,5,6,7,8,9]) indices = np.where(x<5) x[indices] ``` **Functions like np.arange, np.linspace are very useful:** np.arange (read as 'a range') gives an array of numbers within a given range and stepsize np.linspace gives an array of linearly spaced numbers ``` np.arange(0,10,3) # syntax - (inclusive_start, exclusive_stop, stepsize) #This will give an array of values from 0 to 10 in steps of 3 np.arange(-np.pi, np.pi, 1) np.linspace(-np.pi, np.pi, 7) # linearly spaced values - difference between 2 consecutive values not necessarily 1 ``` **Notice** the difference between `np.arange()` function and `np.linspace()` function: `np.arange` function gives values which have same difference but doesn't include the last value, whereas `np.linspace` function first sets start and end value and divides the numbers linearly. This changes the output of both of these function significantly. In the syntax of `np.arange` function the **last value denotes the difference between each element**. But in `np.linspace` function the **last value denotes the number of elements desired in the given range**, the difference between each element is determined accoridingly by the system. ``` np.linspace(0,np.pi,10) #syntax - (inclusive_start, INCLUSIVE_stop, Number of elements) ``` ## NumPy Logarithms NumPy has functions to perform log at base 2, e and 10 - `log2()` - log to the base 2 - `log10()` - log to the base 10 - `log()` - natural log / base $\mathcal{e}$ ``` #log to the base 2 x = np.arange(1,10) print(x) print(np.log2(x)) # log to the base 10 print(np.log10(x)) # log to the base e or natural log (ln) print(np.log(x)) ``` ## NumPy LCM and GCD NumPy has the functions `np.lcm` and `np.gcd`. We can also use this functions to find lcm and gcd of each element in an array using the $reduce( )$ method ``` x = 4 y = 12 lcm = np.lcm(x,y) lcm gcd = np.gcd(x,y) gcd x = np.arange(2,10) y = np.lcm.reduce(x) # use reduce() when the element is an array y x = np.array([4,44,40,20,22]) np.gcd.reduce(x) x = np.random.randint(100,size=2) print(x) print(np.gcd.reduce(x)) ``` ## Convert Degrees into Radians and Radians to Degrees By default the values are in radians, but we can convert it into degrees and vice versa if required $$180^\circ=\pi\;rad$$ $$\therefore 1\;rad=\Big(\frac{180}{\pi}\Big)^\circ$$ ``` # Suppose we have array of values in degrees import numpy as np x = np.array([0,30,45,60,90,180,270,360]) radian = np.deg2rad(x) print(radian) degree = np.rad2deg(radian) print(degree) y = np.array([np.pi/2, np.pi, 4*np.pi/3, 2*np.pi]) y_degree = np.rad2deg(y) print(y_degree) ``` NumPy also has the function to find angles i.e inverse trig values arcsin( ), arccos( ), arctan( ) ``` x = np.arcsin(0.8) x_deg = np.rad2deg(x) print(x) print(round(x_deg,2)) # round(x_deg,2) rounds off the value of x_deg to 2 decimal places ```
github_jupyter
<!--BOOK_INFORMATION--> <img align="left" style="padding-right:10px;" src="https://github.com/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/figures/PDSH-cover-small.png?raw=1"> *This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).* *The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!* <!--NAVIGATION--> < [In-Depth: Decision Trees and Random Forests](05.08-Random-Forests.ipynb) | [Contents](Index.ipynb) | [In-Depth: Manifold Learning](05.10-Manifold-Learning.ipynb) > <a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/05.09-Principal-Component-Analysis.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a> # In Depth: Principal Component Analysis Up until now, we have been looking in depth at supervised learning estimators: those estimators that predict labels based on labeled training data. Here we begin looking at several unsupervised estimators, which can highlight interesting aspects of the data without reference to any known labels. In this section, we explore what is perhaps one of the most broadly used of unsupervised algorithms, principal component analysis (PCA). PCA is fundamentally a dimensionality reduction algorithm, but it can also be useful as a tool for visualization, for noise filtering, for feature extraction and engineering, and much more. After a brief conceptual discussion of the PCA algorithm, we will see a couple examples of these further applications. We begin with the standard imports: ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt import seaborn as sns; sns.set() ``` ## Introducing Principal Component Analysis Principal component analysis is a fast and flexible unsupervised method for dimensionality reduction in data, which we saw briefly in [Introducing Scikit-Learn](05.02-Introducing-Scikit-Learn.ipynb). Its behavior is easiest to visualize by looking at a two-dimensional dataset. Consider the following 200 points: ``` rng = np.random.RandomState(1) X = np.dot(rng.rand(2, 2), rng.randn(2, 200)).T plt.scatter(X[:, 0], X[:, 1]) plt.axis('equal'); ``` By eye, it is clear that there is a nearly linear relationship between the x and y variables. This is reminiscent of the linear regression data we explored in [In Depth: Linear Regression](05.06-Linear-Regression.ipynb), but the problem setting here is slightly different: rather than attempting to *predict* the y values from the x values, the unsupervised learning problem attempts to learn about the *relationship* between the x and y values. In principal component analysis, this relationship is quantified by finding a list of the *principal axes* in the data, and using those axes to describe the dataset. Using Scikit-Learn's ``PCA`` estimator, we can compute this as follows: ``` from sklearn.decomposition import PCA pca = PCA(n_components=2) pca.fit(X) ``` The fit learns some quantities from the data, most importantly the "components" and "explained variance": ``` print(pca.components_) print(pca.explained_variance_) ``` To see what these numbers mean, let's visualize them as vectors over the input data, using the "components" to define the direction of the vector, and the "explained variance" to define the squared-length of the vector: ``` def draw_vector(v0, v1, ax=None): ax = ax or plt.gca() arrowprops=dict(arrowstyle='->', linewidth=2, shrinkA=0, shrinkB=0) ax.annotate('', v1, v0, arrowprops=arrowprops) # plot data plt.scatter(X[:, 0], X[:, 1], alpha=0.2) for length, vector in zip(pca.explained_variance_, pca.components_): v = vector * 3 * np.sqrt(length) draw_vector(pca.mean_, pca.mean_ + v) plt.axis('equal'); ``` These vectors represent the *principal axes* of the data, and the length of the vector is an indication of how "important" that axis is in describing the distribution of the data—more precisely, it is a measure of the variance of the data when projected onto that axis. The projection of each data point onto the principal axes are the "principal components" of the data. If we plot these principal components beside the original data, we see the plots shown here: ![](https://github.com/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/figures/05.09-PCA-rotation.png?raw=1) [figure source in Appendix](06.00-Figure-Code.ipynb#Principal-Components-Rotation) This transformation from data axes to principal axes is an *affine transformation*, which basically means it is composed of a translation, rotation, and uniform scaling. While this algorithm to find principal components may seem like just a mathematical curiosity, it turns out to have very far-reaching applications in the world of machine learning and data exploration. ### PCA as dimensionality reduction Using PCA for dimensionality reduction involves zeroing out one or more of the smallest principal components, resulting in a lower-dimensional projection of the data that preserves the maximal data variance. Here is an example of using PCA as a dimensionality reduction transform: ``` pca = PCA(n_components=1) pca.fit(X) X_pca = pca.transform(X) print("original shape: ", X.shape) print("transformed shape:", X_pca.shape) ``` The transformed data has been reduced to a single dimension. To understand the effect of this dimensionality reduction, we can perform the inverse transform of this reduced data and plot it along with the original data: ``` X_new = pca.inverse_transform(X_pca) plt.scatter(X[:, 0], X[:, 1], alpha=0.2) plt.scatter(X_new[:, 0], X_new[:, 1], alpha=0.8) plt.axis('equal'); ``` The light points are the original data, while the dark points are the projected version. This makes clear what a PCA dimensionality reduction means: the information along the least important principal axis or axes is removed, leaving only the component(s) of the data with the highest variance. The fraction of variance that is cut out (proportional to the spread of points about the line formed in this figure) is roughly a measure of how much "information" is discarded in this reduction of dimensionality. This reduced-dimension dataset is in some senses "good enough" to encode the most important relationships between the points: despite reducing the dimension of the data by 50%, the overall relationship between the data points are mostly preserved. ### PCA for visualization: Hand-written digits The usefulness of the dimensionality reduction may not be entirely apparent in only two dimensions, but becomes much more clear when looking at high-dimensional data. To see this, let's take a quick look at the application of PCA to the digits data we saw in [In-Depth: Decision Trees and Random Forests](05.08-Random-Forests.ipynb). We start by loading the data: ``` from sklearn.datasets import load_digits digits = load_digits() digits.data.shape ``` Recall that the data consists of 8×8 pixel images, meaning that they are 64-dimensional. To gain some intuition into the relationships between these points, we can use PCA to project them to a more manageable number of dimensions, say two: ``` pca = PCA(2) # project from 64 to 2 dimensions projected = pca.fit_transform(digits.data) print(digits.data.shape) print(projected.shape) ``` We can now plot the first two principal components of each point to learn about the data: ``` plt.scatter(projected[:, 0], projected[:, 1], c=digits.target, edgecolor='none', alpha=0.5, cmap=plt.cm.get_cmap('spectral', 10)) plt.xlabel('component 1') plt.ylabel('component 2') plt.colorbar(); ``` Recall what these components mean: the full data is a 64-dimensional point cloud, and these points are the projection of each data point along the directions with the largest variance. Essentially, we have found the optimal stretch and rotation in 64-dimensional space that allows us to see the layout of the digits in two dimensions, and have done this in an unsupervised manner—that is, without reference to the labels. ### What do the components mean? We can go a bit further here, and begin to ask what the reduced dimensions *mean*. This meaning can be understood in terms of combinations of basis vectors. For example, each image in the training set is defined by a collection of 64 pixel values, which we will call the vector $x$: $$ x = [x_1, x_2, x_3 \cdots x_{64}] $$ One way we can think about this is in terms of a pixel basis. That is, to construct the image, we multiply each element of the vector by the pixel it describes, and then add the results together to build the image: $$ {\rm image}(x) = x_1 \cdot{\rm (pixel~1)} + x_2 \cdot{\rm (pixel~2)} + x_3 \cdot{\rm (pixel~3)} \cdots x_{64} \cdot{\rm (pixel~64)} $$ One way we might imagine reducing the dimension of this data is to zero out all but a few of these basis vectors. For example, if we use only the first eight pixels, we get an eight-dimensional projection of the data, but it is not very reflective of the whole image: we've thrown out nearly 90% of the pixels! ![](https://github.com/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/figures/05.09-digits-pixel-components.png?raw=1) [figure source in Appendix](06.00-Figure-Code.ipynb#Digits-Pixel-Components) The upper row of panels shows the individual pixels, and the lower row shows the cumulative contribution of these pixels to the construction of the image. Using only eight of the pixel-basis components, we can only construct a small portion of the 64-pixel image. Were we to continue this sequence and use all 64 pixels, we would recover the original image. But the pixel-wise representation is not the only choice of basis. We can also use other basis functions, which each contain some pre-defined contribution from each pixel, and write something like $$ image(x) = {\rm mean} + x_1 \cdot{\rm (basis~1)} + x_2 \cdot{\rm (basis~2)} + x_3 \cdot{\rm (basis~3)} \cdots $$ PCA can be thought of as a process of choosing optimal basis functions, such that adding together just the first few of them is enough to suitably reconstruct the bulk of the elements in the dataset. The principal components, which act as the low-dimensional representation of our data, are simply the coefficients that multiply each of the elements in this series. This figure shows a similar depiction of reconstructing this digit using the mean plus the first eight PCA basis functions: ![](https://github.com/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/figures/05.09-digits-pca-components.png?raw=1) [figure source in Appendix](06.00-Figure-Code.ipynb#Digits-PCA-Components) Unlike the pixel basis, the PCA basis allows us to recover the salient features of the input image with just a mean plus eight components! The amount of each pixel in each component is the corollary of the orientation of the vector in our two-dimensional example. This is the sense in which PCA provides a low-dimensional representation of the data: it discovers a set of basis functions that are more efficient than the native pixel-basis of the input data. ### Choosing the number of components A vital part of using PCA in practice is the ability to estimate how many components are needed to describe the data. This can be determined by looking at the cumulative *explained variance ratio* as a function of the number of components: ``` pca = PCA().fit(digits.data) plt.plot(np.cumsum(pca.explained_variance_ratio_)) plt.xlabel('number of components') plt.ylabel('cumulative explained variance'); ``` This curve quantifies how much of the total, 64-dimensional variance is contained within the first $N$ components. For example, we see that with the digits the first 10 components contain approximately 75% of the variance, while you need around 50 components to describe close to 100% of the variance. Here we see that our two-dimensional projection loses a lot of information (as measured by the explained variance) and that we'd need about 20 components to retain 90% of the variance. Looking at this plot for a high-dimensional dataset can help you understand the level of redundancy present in multiple observations. ## PCA as Noise Filtering PCA can also be used as a filtering approach for noisy data. The idea is this: any components with variance much larger than the effect of the noise should be relatively unaffected by the noise. So if you reconstruct the data using just the largest subset of principal components, you should be preferentially keeping the signal and throwing out the noise. Let's see how this looks with the digits data. First we will plot several of the input noise-free data: ``` def plot_digits(data): fig, axes = plt.subplots(4, 10, figsize=(10, 4), subplot_kw={'xticks':[], 'yticks':[]}, gridspec_kw=dict(hspace=0.1, wspace=0.1)) for i, ax in enumerate(axes.flat): ax.imshow(data[i].reshape(8, 8), cmap='binary', interpolation='nearest', clim=(0, 16)) plot_digits(digits.data) ``` Now lets add some random noise to create a noisy dataset, and re-plot it: ``` np.random.seed(42) noisy = np.random.normal(digits.data, 4) plot_digits(noisy) ``` It's clear by eye that the images are noisy, and contain spurious pixels. Let's train a PCA on the noisy data, requesting that the projection preserve 50% of the variance: ``` pca = PCA(0.50).fit(noisy) pca.n_components_ ``` Here 50% of the variance amounts to 12 principal components. Now we compute these components, and then use the inverse of the transform to reconstruct the filtered digits: ``` components = pca.transform(noisy) filtered = pca.inverse_transform(components) plot_digits(filtered) ``` This signal preserving/noise filtering property makes PCA a very useful feature selection routine—for example, rather than training a classifier on very high-dimensional data, you might instead train the classifier on the lower-dimensional representation, which will automatically serve to filter out random noise in the inputs. ## Example: Eigenfaces Earlier we explored an example of using a PCA projection as a feature selector for facial recognition with a support vector machine (see [In-Depth: Support Vector Machines](05.07-Support-Vector-Machines.ipynb)). Here we will take a look back and explore a bit more of what went into that. Recall that we were using the Labeled Faces in the Wild dataset made available through Scikit-Learn: ``` from sklearn.datasets import fetch_lfw_people faces = fetch_lfw_people(min_faces_per_person=60) print(faces.target_names) print(faces.images.shape) ``` Let's take a look at the principal axes that span this dataset. Because this is a large dataset, we will use ``RandomizedPCA``—it contains a randomized method to approximate the first $N$ principal components much more quickly than the standard ``PCA`` estimator, and thus is very useful for high-dimensional data (here, a dimensionality of nearly 3,000). We will take a look at the first 150 components: ``` from sklearn.decomposition import RandomizedPCA pca = RandomizedPCA(150) pca.fit(faces.data) ``` In this case, it can be interesting to visualize the images associated with the first several principal components (these components are technically known as "eigenvectors," so these types of images are often called "eigenfaces"). As you can see in this figure, they are as creepy as they sound: ``` fig, axes = plt.subplots(3, 8, figsize=(9, 4), subplot_kw={'xticks':[], 'yticks':[]}, gridspec_kw=dict(hspace=0.1, wspace=0.1)) for i, ax in enumerate(axes.flat): ax.imshow(pca.components_[i].reshape(62, 47), cmap='bone') ``` The results are very interesting, and give us insight into how the images vary: for example, the first few eigenfaces (from the top left) seem to be associated with the angle of lighting on the face, and later principal vectors seem to be picking out certain features, such as eyes, noses, and lips. Let's take a look at the cumulative variance of these components to see how much of the data information the projection is preserving: ``` plt.plot(np.cumsum(pca.explained_variance_ratio_)) plt.xlabel('number of components') plt.ylabel('cumulative explained variance'); ``` We see that these 150 components account for just over 90% of the variance. That would lead us to believe that using these 150 components, we would recover most of the essential characteristics of the data. To make this more concrete, we can compare the input images with the images reconstructed from these 150 components: ``` # Compute the components and projected faces pca = RandomizedPCA(150).fit(faces.data) components = pca.transform(faces.data) projected = pca.inverse_transform(components) # Plot the results fig, ax = plt.subplots(2, 10, figsize=(10, 2.5), subplot_kw={'xticks':[], 'yticks':[]}, gridspec_kw=dict(hspace=0.1, wspace=0.1)) for i in range(10): ax[0, i].imshow(faces.data[i].reshape(62, 47), cmap='binary_r') ax[1, i].imshow(projected[i].reshape(62, 47), cmap='binary_r') ax[0, 0].set_ylabel('full-dim\ninput') ax[1, 0].set_ylabel('150-dim\nreconstruction'); ``` The top row here shows the input images, while the bottom row shows the reconstruction of the images from just 150 of the ~3,000 initial features. This visualization makes clear why the PCA feature selection used in [In-Depth: Support Vector Machines](05.07-Support-Vector-Machines.ipynb) was so successful: although it reduces the dimensionality of the data by nearly a factor of 20, the projected images contain enough information that we might, by eye, recognize the individuals in the image. What this means is that our classification algorithm needs to be trained on 150-dimensional data rather than 3,000-dimensional data, which depending on the particular algorithm we choose, can lead to a much more efficient classification. ## Principal Component Analysis Summary In this section we have discussed the use of principal component analysis for dimensionality reduction, for visualization of high-dimensional data, for noise filtering, and for feature selection within high-dimensional data. Because of the versatility and interpretability of PCA, it has been shown to be effective in a wide variety of contexts and disciplines. Given any high-dimensional dataset, I tend to start with PCA in order to visualize the relationship between points (as we did with the digits), to understand the main variance in the data (as we did with the eigenfaces), and to understand the intrinsic dimensionality (by plotting the explained variance ratio). Certainly PCA is not useful for every high-dimensional dataset, but it offers a straightforward and efficient path to gaining insight into high-dimensional data. PCA's main weakness is that it tends to be highly affected by outliers in the data. For this reason, many robust variants of PCA have been developed, many of which act to iteratively discard data points that are poorly described by the initial components. Scikit-Learn contains a couple interesting variants on PCA, including ``RandomizedPCA`` and ``SparsePCA``, both also in the ``sklearn.decomposition`` submodule. ``RandomizedPCA``, which we saw earlier, uses a non-deterministic method to quickly approximate the first few principal components in very high-dimensional data, while ``SparsePCA`` introduces a regularization term (see [In Depth: Linear Regression](05.06-Linear-Regression.ipynb)) that serves to enforce sparsity of the components. In the following sections, we will look at other unsupervised learning methods that build on some of the ideas of PCA. <!--NAVIGATION--> < [In-Depth: Decision Trees and Random Forests](05.08-Random-Forests.ipynb) | [Contents](Index.ipynb) | [In-Depth: Manifold Learning](05.10-Manifold-Learning.ipynb) > <a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/05.09-Principal-Component-Analysis.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
github_jupyter
# Santander Value Prediction Challenge According to Epsilon research, 80% of customers are more likely to do business with you if you provide **personalized service**. Banking is no exception. The digitalization of everyday lives means that customers expect services to be delivered in a personalized and timely manner… and often before they´ve even realized they need the service. In their 3rd Kaggle competition, Santander Group aims to go a step beyond recognizing that there is a need to provide a customer a financial service and **intends to determine the amount or value of the customer's transaction**. This means anticipating customer needs in a more concrete, but also simple and personal way. With so many choices for financial services, this need is greater now than ever before. In this competition, **Santander Group is asking Kagglers to help them identify the value of transactions for each potential customer**. This is a first step that Santander needs to nail in order to personalize their services at scale. The evaluation metric for this competition is Root Mean Squared Logarithmic Error. **RMSLE** **You are provided with an anonymized dataset containing numeric feature variables, the numeric target column, and a string ID column.** **The task is to predict the value of target column in the test set** ## Load Required Libraries ``` # #Python Libraries import numpy as np import scipy as sp import pandas as pd import statsmodels import pandas_profiling %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns import os import sys import time import json import random import requests import datetime import missingno as msno import math import sys import gc import os # #sklearn from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold, StratifiedKFold from sklearn.ensemble import RandomForestRegressor # #sklearn - preprocessing from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import LabelEncoder # #sklearn - metrics from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_absolute_error from sklearn.metrics import r2_score from sklearn.metrics import roc_auc_score # #XGBoost & LightGBM import xgboost as xgb import lightgbm as lgb # #Missing value imputation from fancyimpute import KNN, MICE # #Hyperparameter Optimization from hyperopt.pyll.base import scope from hyperopt.pyll.stochastic import sample from hyperopt import STATUS_OK, Trials, fmin, hp, tpe pd.options.display.max_columns = 150 ``` ## EDA ``` !ls ../ !ls ../data df_train = pd.read_csv("../data/train.csv") df_test = pd.read_csv("../data/test.csv") df_train.shape df_test.shape df_train.head() ``` ID, target, everything else is anonymized ``` df_train.info() df_test.info() ``` ### Missing Data ``` df_train.isnull().sum(axis = 0).sum() df_test.isnull().sum(axis = 0).sum() ``` Yes!! No missing data ### Distributions ``` sns.distplot(df_train['target']) sns.distplot(np.log(1+df_train['target'])) ``` Now, the distribution looks much more normal. ### Hypothesis: Are any of the columns having a constant value? Since the dataset is so small and number of rows < number of columns. ``` constant_train = df_train.loc[:, (df_train == df_train.iloc[0]).all()].columns.tolist() constant_test = df_test.loc[:, (df_test == df_test.iloc[0]).all()].columns.tolist() len(constant_train) len(constant_test) ``` There are 256 constant columns in the training dataset, but none in the test dataset. These constant columns are thus most likely an artifact of the way that the train and test sets were constructed. Let's remove them from out train set since they will not add any value. ``` columns_to_use = df_test.columns.tolist() # #Target variable is not considered del columns_to_use[0] # #Remove 'ID' columns_to_use = [x for x in columns_to_use if x not in constant_train] #Remove all 0 columns len(columns_to_use) ``` ### Measure of sparsity ``` ((df_train[columns_to_use].values.flatten())==0).mean() ``` 97% of values in the train set are zeros, indicating that it is a very sparse matrix ## Modelling ``` # #Log Transform the target variable y = np.log(1+df_train.target.values) X = lgb.Dataset(df_train[columns_to_use], y, feature_name = "auto") ``` ### Model 1 - LightGBM (My Favourite :P) ``` params = {'boosting_type': 'gbdt', 'objective': 'regression', 'metric': 'rmse', 'learning_rate': 0.01, 'num_leaves': 100, 'feature_fraction': 0.4, 'bagging_fraction': 0.6, 'max_depth': 5, 'min_child_weight': 10} clf = lgb.train(params, X, num_boost_round = 400, verbose_eval=True) preds = clf.predict(df_test[columns_to_use]) preds sample_submission = pd.read_csv("../data/sample_submission.csv") sample_submission.target = np.exp(preds)-1 sample_submission.to_csv('../submissions/model1_lightgbm_01.csv', index=False) sample_submission.shape nr_splits = 5 random_state = 1054 y_oof = np.zeros((y.shape[0])) total_preds = 0 kf = KFold(n_splits=nr_splits, shuffle=True, random_state=random_state) for i, (train_index, val_index) in enumerate(kf.split(y)): print('Fitting fold', i+1, 'out of', nr_splits) X_train, X_val = df_train[columns_to_use].iloc[train_index], df_train[columns_to_use].iloc[val_index] y_train, y_val = y[train_index], y[val_index] train = lgb.Dataset(X_train,y_train ,feature_name = "auto") val = lgb.Dataset(X_val ,y_val ,feature_name = "auto") clf = lgb.train(params,train,num_boost_round = 400,verbose_eval=True) total_preds += clf.predict(df_test[columns_to_use])/nr_splits pred_oof = clf.predict(X_val) y_oof[val_index] = pred_oof print('Fold error', np.sqrt(mean_squared_error(y_val, pred_oof))) print('Total error', np.sqrt(mean_squared_error(y, y_oof))) sample_submission.target = np.exp(total_preds)-1 sample_submission.to_csv('../submissions/model1_lightgbm_02.csv', index=False) sample_submission.head() ```
github_jupyter
## PureFoodNet implementation ``` #libraries from tensorflow import keras from tensorflow.keras.optimizers import Adam, RMSprop from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten, Conv2D from tensorflow.keras.layers import MaxPool2D, BatchNormalization, GlobalAveragePooling2D from tensorflow.keras.regularizers import l2 from tensorflow.keras import backend as K from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger, EarlyStopping, ReduceLROnPlateau, LearningRateScheduler K.clear_session() class PureFoodNet: # The model def getModel(input_shape=(224,224,3), num_classes=3): model = Sequential() #Block 1 model.add(Conv2D(input_shape = input_shape, filters = 128, kernel_size = (5,5), strides = 2, padding = 'Same', name='block1_conv1', activation ='relu', kernel_initializer='he_normal')) model.add(Conv2D(filters = 128, kernel_size = (5,5), strides = 2, padding = 'Same', name='block1_conv2', activation ='relu',kernel_initializer='he_normal')) model.add(MaxPool2D(strides=(2, 2), name='block1_pool')) model.add(BatchNormalization()) model.add(Dropout(0.25)) #Block 2 model.add(Conv2D(filters = 256, kernel_size = (3,3),padding = 'Same', name='block2_conv1', activation ='relu',kernel_initializer='he_normal')) model.add(Conv2D(filters = 256, kernel_size = (3,3),padding = 'Same', name='block2_conv2', activation ='relu',kernel_initializer='he_normal')) model.add(Conv2D(filters = 256, kernel_size = (3,3),padding = 'Same', name='block2_conv3', activation ='relu',kernel_initializer='he_normal')) model.add(MaxPool2D(strides=(2, 2), name='block2_pool')) model.add(BatchNormalization()) model.add(Dropout(0.35)) #Block 3 model.add(Conv2D(filters = 512, kernel_size = (3,3),padding = 'Same', name='block3_conv1', activation ='relu',kernel_initializer='he_normal')) model.add(Conv2D(filters = 512, kernel_size = (3,3),padding = 'Same', name='block3_conv2', activation ='relu',kernel_initializer='he_normal')) model.add(Conv2D(filters = 512, kernel_size = (3,3),padding = 'Same', name='block3_conv3', activation ='relu',kernel_initializer='he_normal')) model.add(MaxPool2D(strides=(2, 2), name='block3_pool')) model.add(BatchNormalization()) model.add(Dropout(0.35)) #Block 4 model.add(GlobalAveragePooling2D()) model.add(Dense(512, activation = "relu", kernel_initializer='he_normal')) model.add(Dropout(0.4)) model.add(Dense(num_classes, activation = "softmax", kernel_initializer='he_normal', kernel_regularizer=l2())) return model img_width, img_height = 299, 299 train_data_dir = 'food-101/train/' validation_data_dir = 'food-101/test/' specific_classes = None #['apple_pie', 'greek_salad', 'baklava'] batch_size = 128 train_datagen = ImageDataGenerator( rescale=1. / 255, rotation_range=10, width_shift_range=0.05, height_shift_range=0.05, shear_range=0.2, zoom_range=0.2, channel_shift_range=10, horizontal_flip=True, fill_mode='constant' ) test_datagen = ImageDataGenerator(rescale=1. / 255) train_generator = train_datagen.flow_from_directory( classes = specific_classes, directory = train_data_dir, target_size=(img_height, img_width), batch_size=batch_size, class_mode='categorical') validation_generator = test_datagen.flow_from_directory( classes = specific_classes, directory = validation_data_dir, target_size=(img_height, img_width), batch_size=batch_size, class_mode='categorical') nb_train_samples = train_generator.n nb_validation_samples = validation_generator.n n_classes = train_generator.num_classes model_name = 'PureFoodNet_299x299Nadam_2' epoch_num = 50 model = PureFoodNet.getModel(input_shape=train_generator.image_shape, num_classes = n_classes) model.summary() # learning rate scheduler def schedule(epoch): if epoch < 10: new_lr = .001 elif epoch < 14: new_lr = .0006 elif epoch < 17: new_lr = .0003 elif epoch < 20: new_lr = .0001 elif epoch < 23: new_lr = .00005 else: new_lr = .00001 print("\nLR at epoch {} = {} \n".format(epoch,new_lr)) return new_lr lr_scheduler = LearningRateScheduler(schedule) model.compile(optimizer='Nadam', loss='categorical_crossentropy', metrics=['accuracy','top_k_categorical_accuracy']) checkpointer = ModelCheckpoint(filepath='best_model_food101_'+model_name+'.hdf5', verbose=1, save_best_only=True) csv_logger = CSVLogger('hist_food101_'+model_name+'.log') hist = model.fit_generator(train_generator, steps_per_epoch = nb_train_samples // batch_size, validation_data = validation_generator, validation_steps = nb_validation_samples // batch_size, epochs = epoch_num, verbose = 1, callbacks = [csv_logger, checkpointer, lr_scheduler] ) ```
github_jupyter
``` """ Today we will be looking at the 2 Naive Bayes classification algorithms SeaLion has to offer - gaussian and multinomial (more common). Both of them use the same underlying principles and as usual we'll explain them step by step. """ # first import import sealion as sl from sealion.naive_bayes import GaussianNaiveBayes, MultinomialNaiveBayes """ We'll first start with gaussian naive bayes. The way it works is by creating a normal (gaussian) curve to measure the probability of any certain feature occuring for a given class. It looks at the probability for a feature to be on each class possible. The way it makes its predictions on a given data point is by just looking at the probability of each feature in the point for each class, and as it after aggregating all of the probabilities for all of the features will predict the class with the highest probability. """ # we will use the iris dataset for this from sklearn.datasets import load_iris X, y = load_iris()['data'], load_iris()['target'] # and let's split from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.4, random_state = 3) # another thing to note : # with naive bayes, try to always have as balanced data for all classes as possible. # we can now setup the model gnb = GaussianNaiveBayes() gnb.fit(X_train, y_train) # fit the model gnb.evaluate(X_test, y_test) # we can evaluate it # WOAH! Looks like we do pretty well with this model. Let's see how much we got wrong. y_pred = gnb.predict(X_test) y_pred == y_test # 1 wrong. Super simple, right? # onto multinomial naive bayes """ Multinomial Naive Bayes is a type of naive bayes that will work with stuff like text classification, where you have a dataset where each observation/data point is just a word. This could look like : ["hello", "what", "do", "you", "want", "from", "me"] for a given data point. Each feature is the exact same here, so what if a model could look split all data into its classes, and then see the probability of finding a feature (i.e. "hello") for that class. For example if you have a dataset of 100 emails, 50 spam and 50 ham - you can split the 100 into a dataset of 50 spam and 50 ham and then count the number of times "hello" and all other features show up in each of those 50 class-datasets (doesn't matter where.) Then if you are given a new data point you can see the probability of seeing each of its features for each class, and choose the class with the highest probability. This is the underlying idea behind multinomial naive bayes. """ # let's get started # the spam dataset is available here : https://www.kaggle.com/uciml/sms-spam-collection-dataset import pandas as pd spam_df = pd.read_csv("spam.csv", engine = "python", encoding='ISO-8859-1') # we need to manually define the encoding spam_df # print it out # as usual data manipulation is honestly not as fun as the algorithms, so we're going to have to get our hands dirty X, y = spam_df['v2'], spam_df['v1'] X, y # let's print this stuff out # it looks like we have plenty of data # the first step is tokenize, where we take those strings in each data point and turn them into unique numbers. This # will apply throughout, so "hello" as 100 in one data point is the same for another VOCAB_SIZE = 10000 # we allow 10000 words from tensorflow.keras.preprocessing.text import Tokenizer tokenizer = Tokenizer(num_words = VOCAB_SIZE) tokenizer.fit_on_texts(X) X_seq = tokenizer.texts_to_sequences(X) from tensorflow.keras.preprocessing.sequence import pad_sequences # we'll also want to pad it, meaning that we make sure everything is the same length X_pad = pad_sequences(X_seq, maxlen = 100, truncating = "post", padding = "post") # and we will want to split it up now from sklearn.model_selection import train_test_split import numpy as np y = np.array(y) y[np.where(y == "ham")] = 0 y[np.where(y == "spam")] = 1 # spam is 1 X_train, X_test, y_train, y_test = train_test_split(X_pad, y, test_size = 0.15, random_state = 3) # let's print out X_train X_train # time to start using Multinomial Naive Bayes mnb = MultinomialNaiveBayes() mnb.fit(X_train, y_train) # time to evaluate mnb.evaluate(X_test, y_test) # dang ... but hmmm is it just predicting 0s? Is that why? mnb.predict(X_test)[:10] # looks like it did phenomenal. And of course, we're going to use a confusion matrix. from sealion.utils import confusion_matrix confusion_matrix(mnb.predict(X_test), y_test) # The only thing we get wrong is thinking something is fine when its not. I think that's better than # the opposite, where you miss something important and it goes into your spam folder... # Look's like that's the end for us. As usual, I hope you enjoyed this tutorial! ```
github_jupyter
<h1><center>Deep Learning Helping Navigate Robots</center></h1> <img src="https://storage.googleapis.com/kaggle-competitions/kaggle/13242/logos/thumb76_76.png?t=2019-03-12-23-33-31" width="300"></img> ### Dependencies ``` import warnings import cufflinks import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from keras import optimizers from keras.layers import Dense from keras.utils import to_categorical from keras.models import Sequential, Model from sklearn.metrics import confusion_matrix from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split %matplotlib inline warnings.filterwarnings("ignore") cufflinks.go_offline(connected=True) # Set seeds to make the experiment more reproducible. from tensorflow import set_random_seed from numpy.random import seed set_random_seed(0) seed(0) ``` ### Load data ``` train = pd.read_csv('../input/X_train.csv') labels = pd.read_csv('../input/y_train.csv') test = pd.read_csv('../input/X_test.csv') print('Train features shape', train.shape) display(train.head()) print('Train labels shape', labels.shape) display(labels.head()) print('Test shape', test.shape) display(test.head()) ``` ### Join train features with labels ``` train = train.join(labels, on='series_id', rsuffix='_') train.drop('series_id_', axis=1, inplace=True) print(train.shape) display(train.head()) ``` ### Plotly graphs may take a while to load. # EDA ## Surface distribution - Let's see what's the label distribution of our data ``` f, ax = plt.subplots(figsize=(12, 8)) ax = sns.countplot(y='surface', data=train, palette="rocket", order=reversed(train['surface'].value_counts().index)) ax.set_ylabel("Surface type") plt.show() ``` ### Surface distribution by "group_id" ``` group_df = train.groupby(['group_id', 'surface'])['surface'].agg({'surface':['count']}).reset_index() group_df.columns = ['group_id', 'surface', 'count'] f, ax = plt.subplots(figsize=(18, 8)) ax = sns.barplot(x="group_id", y="count", data=group_df, palette="GnBu_d") for index, row in group_df.iterrows(): ax.text(row.name, row['count'], row['surface'], color='black', ha="center", rotation=60) plt.show() ``` ## Features distribution - Now would be a good idea to see how each other type of features behavior ### Orientation distribution ``` orientation_features = ['orientation_X', 'orientation_Y', 'orientation_Z', 'orientation_W'] train[orientation_features].iplot(kind='histogram', bins=200, subplots=True, shape=(len(orientation_features), 1)) train[orientation_features].iplot(kind='histogram', barmode='overlay', bins=200) train[orientation_features].iplot(kind='box') ``` The interesting part here is that "orientation_Y" and "orientation_X" are far more spread than the other two. ### Angular velocity distribution ``` velocity_features = ['angular_velocity_X', 'angular_velocity_Y', 'angular_velocity_Z'] train[velocity_features].iplot(kind='histogram', bins=200, subplots=True, shape=(len(velocity_features), 1)) train[velocity_features].iplot(kind='histogram', barmode='overlay', bins=200) train[velocity_features].iplot(kind='box') ``` Here all the angular velocity features seem to be centered around 0, but "angular_velocity_Y" is less spread than the others. ### Linear acceleration distribution ``` acceleration_features = ['linear_acceleration_X', 'linear_acceleration_Y', 'linear_acceleration_Z'] train[acceleration_features].iplot(kind='histogram', bins=200, subplots=True, shape=(len(acceleration_features), 1)) train[acceleration_features].iplot(kind='histogram', barmode='overlay', bins=200) train[acceleration_features].iplot(kind='box') ``` The linear acceleration features seem to be the most different between itself, all 3 features have different mean and spread. ### Preprocess the labels ``` target = train['surface'] n_labels = target.nunique() labels_names = target.unique() le = LabelEncoder() target = le.fit_transform(target.values) target = to_categorical(target) train.drop('surface', axis=1, inplace=True) ``` ### Train/validation split ``` features = ['orientation_X', 'orientation_Y', 'orientation_Z', 'orientation_W', 'angular_velocity_X', 'angular_velocity_Y', 'angular_velocity_Z', 'linear_acceleration_X', 'linear_acceleration_Y', 'linear_acceleration_Z'] X_train, X_val, Y_train, Y_val = train_test_split(train[features], target, test_size=0.2, random_state=0) print('Train shape', X_train.shape) print('Validation shape', X_val.shape) display(X_train.head()) ``` ### Model ``` epochs = 70 batch = 128 lr = 0.001 adam = optimizers.Adam(lr) model = Sequential() model.add(Dense(20, activation='relu', input_dim=X_train.shape[1])) model.add(Dense(20, activation='relu')) model.add(Dense(n_labels, activation="softmax")) model.compile(loss='categorical_crossentropy', optimizer=adam) model.summary() history = model.fit(X_train.values, Y_train, validation_data=(X_val.values, Y_val), epochs=epochs, verbose=2) ``` #### Model loss plot ``` history_pd = pd.DataFrame.from_dict(history.history) history_pd.iplot(kind='line') ``` #### Model confusion matrix ``` cnf_matrix = confusion_matrix(np.argmax(Y_train, axis=1), model.predict_classes(X_train)) cnf_matrix_norm = cnf_matrix.astype('float') / cnf_matrix.sum(axis=1)[:, np.newaxis] df_cm = pd.DataFrame(cnf_matrix_norm, index=labels_names, columns=labels_names) plt.figure(figsize=(20, 7)) ax = plt.axes() ax.set_title('Train') sns.heatmap(df_cm, annot=True, fmt='.2f', cmap="Blues", ax=ax) plt.show() cnf_matrix = confusion_matrix(np.argmax(Y_val, axis=1), model.predict_classes(X_val)) cnf_matrix_norm = cnf_matrix.astype('float') / cnf_matrix.sum(axis=1)[:, np.newaxis] df_cm = pd.DataFrame(cnf_matrix_norm, index=labels_names, columns=labels_names) plt.figure(figsize=(20, 7)) ax = plt.axes() ax.set_title('Validation') sns.heatmap(df_cm, annot=True, fmt='.2f', cmap="Blues", ax=ax) plt.show() ``` ### Test predictions ``` predictions = model.predict_classes(test[features].values) test['surface'] = le.inverse_transform(predictions) df = test[['series_id', 'surface']] df = df.groupby('series_id', as_index=False).agg(lambda x:x.value_counts().index[0]) df.to_csv('submission.csv', index=False) df.head(10) ```
github_jupyter
## Explore The Data: Plot Categorical Features Using the Titanic dataset from [this](https://www.kaggle.com/c/titanic/overview) Kaggle competition. This dataset contains information about 891 people who were on board the ship when departed on April 15th, 1912. As noted in the description on Kaggle's website, some people aboard the ship were more likely to survive the wreck than others. There were not enough lifeboats for everybody so women, children, and the upper-class were prioritized. Using the information about these 891 passengers, the challenge is to build a model to predict which people would survive based on the following fields: - **Name** (str) - Name of the passenger - **Pclass** (int) - Ticket class (1st, 2nd, or 3rd) - **Sex** (str) - Gender of the passenger - **Age** (float) - Age in years - **SibSp** (int) - Number of siblings and spouses aboard - **Parch** (int) - Number of parents and children aboard - **Ticket** (str) - Ticket number - **Fare** (float) - Passenger fare - **Cabin** (str) - Cabin number - **Embarked** (str) - Port of embarkation (C = Cherbourg, Q = Queenstown, S = Southampton) **This section focuses on exploring the `Name`, `Sex`, `Ticket`, `Cabin`, and `Embarked` features.** ### Read In Data ``` # Read in our data import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline import numpy as np import pandas as pd titanic = pd.read_csv('titanic.csv', usecols=['Survived', 'Name', 'Sex', 'Cabin', 'Embarked']) titanic.head() ``` ### Plot Categorical Features ``` # Create a title feature by parsing passenger name and create a cabin indicator variable titanic['Title_Raw'] = titanic['Name'].apply(lambda x: x.split(',')[1].split('.')[0].strip()) titanic['Title'] = titanic['Title_Raw'].apply(lambda x: x if x in ['Master', 'Miss', 'Mr', 'Mrs'] else 'Other') titanic['Cabin_ind'] = np.where(titanic['Cabin'].isnull(), 0, 1) titanic.head() ``` * we just built 'Title' column for the sake of visualization because as we saw the only group that have strong relationship as well as larger number are Mr, Miss, Mrs, and Master * this also applied for cabin because there was strong survival rate with missing cabin ``` # Generate categorical plots for features for col in ['Title', 'Sex', 'Cabin_ind', 'Embarked']: sns.catplot(x=col, y='Survived', data=titanic, kind='point', aspect=2, ) plt.ylim(0, 1) # Split embarked by whether the passenger had a cabin titanic.pivot_table('Survived', index='Cabin_ind', columns='Embarked', aggfunc='count') ```
github_jupyter
``` %matplotlib inline ``` # Tensors Tensors are a specialized data structure that are very similar to arrays and matrices. In PyTorch, we use tensors to encode the inputs and outputs of a model, as well as the model’s parameters. Tensors are similar to [NumPy’s](https://numpy.org/) ndarrays, except that tensors can run on GPUs or other hardware accelerators. In fact, tensors and NumPy arrays can often share the same underlying memory, eliminating the need to copy data (see `bridge-to-np-label`). Tensors are also optimized for automatic differentiation (we'll see more about that later in the Autograd unit). If you’re familiar with `ndarrays`, you’ll be right at home with the Tensor API. If not, follow along! Let's start by setting up our environment. ``` import torch import numpy as np ``` # Initializing a Tensor Tensors can be initialized in various ways. Take a look at the following examples: ## Directly from data Tensors can be created directly from data. The data type is automatically inferred. ``` data = [[1, 2],[3, 4]] x_data = torch.tensor(data) ``` ## From a NumPy array Tensors can be created from NumPy arrays (and vice versa - see `bridge-to-np-label`). ``` np_array = np.array(data) x_np = torch.from_numpy(np_array) ``` ## From another tensor: The new tensor retains the properties (shape, data type) of the argument tensor, unless explicitly overridden. ``` x_ones = torch.ones_like(x_data) # retains the properties of x_data print(f"Ones Tensor: \n {x_ones} \n") x_rand = torch.rand_like(x_data, dtype=torch.float) # overrides the datatype of x_data print(f"Random Tensor: \n {x_rand} \n") ``` ## With random or constant values: ``shape`` is a tuple of tensor dimensions. In the functions below, it determines the dimensionality of the output tensor. ``` shape = (2,3,) rand_tensor = torch.rand(shape) ones_tensor = torch.ones(shape) zeros_tensor = torch.zeros(shape) print(f"Random Tensor: \n {rand_tensor} \n") print(f"Ones Tensor: \n {ones_tensor} \n") print(f"Zeros Tensor: \n {zeros_tensor}") ``` # Attributes of a Tensor Tensor attributes describe their shape, data type, and the device on which they are stored. ``` tensor = torch.rand(3,4) print(f"Shape of tensor: {tensor.shape}") print(f"Datatype of tensor: {tensor.dtype}") print(f"Device tensor is stored on: {tensor.device}") ``` # Operations on Tensors Over 100 tensor operations, including arithmetic, linear algebra, matrix manipulation (transposing, indexing, slicing), sampling and more are comprehensively described [here](https://pytorch.org/docs/stable/torch.html). Each of these operations can be run on the GPU (at typically higher speeds than on a CPU). By default, tensors are created on the CPU. We need to explicitly move tensors to the GPU using `.to` method (after checking for GPU availability). Keep in mind that copying large tensors across devices can be expensive in terms of time and memory! ``` # We move our tensor to the GPU if available if torch.cuda.is_available(): tensor = tensor.to('cuda') ``` Try out some of the operations from the list. If you're familiar with the NumPy API, you'll find the Tensor API a breeze to use. ## Standard numpy-like indexing and slicing: ``` tensor = torch.ones(4, 4) print('First row: ',tensor[0]) print('First column: ', tensor[:, 0]) print('Last column:', tensor[..., -1]) tensor[:,1] = 0 print(tensor) ``` ## Joining tensors You can use `torch.cat` to concatenate a sequence of tensors along a given dimension. See also [torch.stack](https://pytorch.org/docs/stable/generated/torch.stack.html), another tensor joining op that is subtly different from ``torch.cat``. ``` t1 = torch.cat([tensor, tensor, tensor], dim=1) print(t1) ``` ## Arithmetic operations ``` # This computes the matrix multiplication between two tensors. y1, y2, y3 will have the same value y1 = tensor @ tensor.T y2 = tensor.matmul(tensor.T) y3 = torch.rand_like(tensor) torch.matmul(tensor, tensor.T, out=y3) # This computes the element-wise product. z1, z2, z3 will have the same value z1 = tensor * tensor z2 = tensor.mul(tensor) z3 = torch.rand_like(tensor) torch.mul(tensor, tensor, out=z3) ``` ## Single-element tensors If you have a one-element tensor, for example by aggregating all values of a tensor into one value, you can convert it to a Python numerical value using `item()`: ``` agg = tensor.sum() agg_item = agg.item() print(agg_item, type(agg_item)) ``` ## In-place operations Operations that store the result into the operand are called in-place. They are denoted by a ``_`` suffix. For example: ``x.copy_(y)``, ``x.t_()``, will change ``x``. > **Note:** In-place operations save some memory, but can be problematic when computing derivatives because of an immediate loss of history. Hence, their use is discouraged. ``` print(tensor, "\n") tensor.add_(5) print(tensor) ``` ## Bridge with NumPy Tensors on the CPU and NumPy arrays can share their underlying memory locations, and changing one will change the other. ### Tensor to NumPy array ``` t = torch.ones(5) print(f"t: {t}") n = t.numpy() print(f"n: {n}") ``` A change in the tensor reflects in the NumPy array. ``` t.add_(1) print(f"t: {t}") print(f"n: {n}") ``` ### NumPy array to Tensor ``` n = np.ones(5) t = torch.from_numpy(n) ``` Changes in the NumPy array reflects in the tensor. ``` np.add(n, 1, out=n) print(f"t: {t}") print(f"n: {n}") ```
github_jupyter
## Evaluate CNTK Fast-RCNN model directly from python This notebook demonstrates how to evaluate a single image using a CNTK Fast-RCNN model. For a full description of the model and the algorithm, please see the following <a href="https://docs.microsoft.com/en-us/cognitive-toolkit/Object-Detection-using-Fast-R-CNN" target="_blank">tutorial</a>. Below, you will see sample code for: 1. Preparing the input data for the network (including image size adjustments) 2. Evaluation of the input data using the model 3. Processing the evaluation result and presenting the selected regions back on the image. <b>Important</b>: Before running this notebook, please make sure that: <ol> <li>You have version >= 2.0 RC 1 of CNTK installed. Installation instructions are available <a href="https://docs.microsoft.com/en-us/cognitive-toolkit/Setup-CNTK-on-your-machine" target="_blank">here</a>. <li>This notebook uses the CNTK python APIs and should be run from the CNTK python environment.</li> <li>OpenCV and the other required python packages for the Fast-RCNN scenario are installed. Please follow the instructions <a href="https://docs.microsoft.com/en-us/cognitive-toolkit/Object-Detection-using-Fast-R-CNN#setup" target="_blank">in here</a> to install the required packages. </ol> ##### 1. Download the sample dataset and make sure that the model exists First things first - we will download the sample Grocery dataset (if it's not already there), and we'll also make sure that the Fast-RCNN model file exists. The script will use your local trained model (if available), or will download and use the pre-trained model if a local trained model isn't available. In case we run inside the CNTK test enviornment, the model and data are copied from the test data directory. We also set the device to cpu / gpu for the test environment. If you have both CPU and GPU on your machine, you can optionally switch the devices. By default, we choose the best available device. ``` %matplotlib inline # the above line enable us to draw the images inside the notebooks import os import sys from os import path import cntk # Check for an environment variable defined in CNTK's test infrastructure def is_test(): return 'CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY' in os.environ # Select the right target device when this notebook is being tested # Currently supported only for GPU # Setup data environment for pre-built data sources for testing if is_test(): if 'TEST_DEVICE' in os.environ: if os.environ['TEST_DEVICE'] == 'cpu': cntk.device.try_set_default_device(cntk.device.cpu()) else: cntk.device.try_set_default_device(cntk.device.gpu(0)) sys.path.append(os.path.join(*"../../../../Tests/EndToEndTests/CNTKv2Python/Examples".split("/"))) import prepare_test_data as T T.prepare_Grocery_data() T.prepare_fastrcnn_grocery_100_model() #Make sure the grocery dataset is installed sys.path.append('../../DataSets/Grocery') from install_grocery import download_grocery_data download_grocery_data() # Make sure the FRCNN model exists - check if the model was trained and exists, if not - download the existing model sys.path.append('../../PretrainedModels') from models_util import download_model_by_name download_model_by_name("Fast-RCNN_grocery100") model_path = '../../PretrainedModels/Fast-RCNN_grocery100.model' ``` ### 3. load the model and prepare it for evaluation: As a first step for using the Fast-RCNN model, we load the trained model file. The trained model accepts 3 inputs: The image data, the bounding box (region of interest, or ROI) proposals and the ground truth labels of the ROIs. Since we are evaluating a new image - we probably don't have the ground truth labels for the image, hence - we need to adjust the network to accept only the image and the ROIs as input. In order to do that we use the CNTK APIs to clone the network and change its input nodes. More information and examples regarding cloning nodes of a network are available in the <a href="https://docs.microsoft.com/en-us/cognitive-toolkit/Build-your-own-image-classifier-using-Transfer-Learning" target="_blank">Transfer Learning</a> tutorial. ``` from cntk import load_model from cntk import placeholder from cntk.logging.graph import find_by_name, get_node_outputs from cntk.ops import combine from cntk.ops.sequence import input_variable from cntk.ops.functions import CloneMethod # load the trained model trained_frcnn_model = load_model(model_path) # find the original features and rois input nodes features_node = find_by_name(trained_frcnn_model, "features") rois_node = find_by_name(trained_frcnn_model, "rois") # find the output "z" node z_node = find_by_name(trained_frcnn_model, 'z') # define new input nodes for the features (image) and rois image_input = input_variable(features_node.shape, name='features') roi_input = input_variable(rois_node.shape, name='rois') # Clone the desired layers with fixed weights and place holder for the new input nodes cloned_nodes = combine([z_node.owner]).clone( CloneMethod.freeze, {features_node: placeholder(name='features'), rois_node: placeholder(name='rois')}) # apply the cloned nodes to the input nodes frcnn_model = cloned_nodes(image_input, roi_input) print("Fast-RCNN Grocery model loaded succesfully!") ``` ### 4. Load an image and convert it to the network format Next, we load an image from the test set using OpenCV, and then resize according to the network input dimensions. (Which are set when the network is trained). When resizing, we preserve scale and pad the border areas with a constant value (114), which is later used for normalization by the network. ``` import cv2 import numpy as np import matplotlib.pyplot as plt image_height = 1000 image_width = 1000 def resize_and_pad(img, width, height, pad_value=114): # port of the c++ code from CNTK: https://github.com/Microsoft/CNTK/blob/f686879b654285d06d75c69ee266e9d4b7b87bc4/Source/Readers/ImageReader/ImageTransformers.cpp#L316 img_width = len(img[0]) img_height = len(img) scale_w = img_width > img_height target_w = width target_h = height if scale_w: target_h = int(np.round(img_height * float(width) / float(img_width))) else: target_w = int(np.round(img_width * float(height) / float(img_height))) resized = cv2.resize(img, (target_w, target_h), 0, 0, interpolation=cv2.INTER_NEAREST) top = int(max(0, np.round((height - target_h) / 2))) left = int(max(0, np.round((width - target_w) / 2))) bottom = height - top - target_h right = width - left - target_w resized_with_pad = cv2.copyMakeBorder(resized, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[pad_value, pad_value, pad_value]) #tranpose(2,0,1) converts the image to the HWC format which CNTK accepts model_arg_rep = np.ascontiguousarray(np.array(resized_with_pad, dtype=np.float32).transpose(2,0,1)) return resized_with_pad, model_arg_rep def load_image_and_scale(image_path, width, height, pad_value=114): img = cv2.imread(image_path) return resize_and_pad(img, width, height, pad_value), img test_image_path = r"../../DataSets/Grocery/testImages/WIN_20160803_11_28_42_Pro.jpg" (test_img, test_img_model_arg), original_img = load_image_and_scale(test_image_path, image_width, image_height) plt.imshow(cv2.cvtColor(test_img, cv2.COLOR_BGR2RGB)) plt.axis("off") ``` ### 5. Generate ROIs for testing Now, we produce regions of interest (ROIs) proposals using selective search & grid methods, using the same method as in the script: A1_GenerateInputROIs.py. Each ROI is in the format of [x,y,w,h], where the coordinates real numbers in the range of 0 to 1, and scaled according to the resized and padded image. The ROIs array is padded with regions of [0,0,0,0] at the end to match the 2000 ROIs input format of the model. ``` # Parameters taken from PARAMETERS.py # ROI generation roi_minDimRel = 0.04 roi_maxDimRel = 0.4 roi_minNrPixelsRel = 2 * roi_minDimRel * roi_minDimRel roi_maxNrPixelsRel = 0.33 * roi_maxDimRel * roi_maxDimRel roi_maxAspectRatio = 4.0 # maximum aspect Ratio of a ROI vertically and horizontally roi_maxImgDim = 200 # image size used for ROI generation ss_scale = 100 # selective search ROIS: parameter controlling cluster size for segmentation ss_sigma = 1.2 # selective search ROIs: width of gaussian kernal for segmentation ss_minSize = 20 # selective search ROIs: minimum component size for segmentation grid_nrScales = 7 # uniform grid ROIs: number of iterations from largest possible ROI to smaller ROIs grid_aspectRatios = [1.0, 2.0, 0.5] # uniform grid ROIs: aspect ratio of ROIs cntk_nrRois = 100 # 100 # how many ROIs to zero-pad cntk_padWidth = 1000 cntk_padHeight = 1000 from cntk_helpers import imArrayWidthHeight, getSelectiveSearchRois, imresizeMaxDim from cntk_helpers import getGridRois, filterRois, roiTransformPadScaleParams, roiTransformPadScale def get_rois_for_image(img, use_selective_search=True, use_grid_rois=True): roi_minDim = roi_minDimRel * roi_maxImgDim roi_maxDim = roi_maxDimRel * roi_maxImgDim roi_minNrPixels = roi_minNrPixelsRel * roi_maxImgDim*roi_maxImgDim roi_maxNrPixels = roi_maxNrPixelsRel * roi_maxImgDim*roi_maxImgDim imgOrig = img.copy() # get rois if use_selective_search: print ("Calling selective search..") rects, scaled_img, scale = getSelectiveSearchRois(imgOrig, ss_scale, ss_sigma, ss_minSize, roi_maxImgDim) #interpolation=cv2.INTER_AREA print ("Number of rois detected using selective search: " + str(len(rects))) else: rects = [] scaled_img, scale = imresizeMaxDim(imgOrig, roi_maxImgDim, boUpscale=True, interpolation=cv2.INTER_AREA) imgWidth, imgHeight = imArrayWidthHeight(scaled_img) # add grid rois if use_grid_rois: rectsGrid = getGridRois(imgWidth, imgHeight, grid_nrScales, grid_aspectRatios) print ("Number of rois on grid added: " + str(len(rectsGrid))) rects += rectsGrid # run filter print ("Number of rectangles before filtering = " + str(len(rects))) rois = filterRois(rects, imgWidth, imgHeight, roi_minNrPixels, roi_maxNrPixels, roi_minDim, roi_maxDim, roi_maxAspectRatio) if len(rois) == 0: #make sure at least one roi returned per image rois = [[5, 5, imgWidth-5, imgHeight-5]] print ("Number of rectangles after filtering = " + str(len(rois))) # scale up to original size and save to disk # note: each rectangle is in original image format with [x,y,x2,y2] original_rois = np.int32(np.array(rois) / scale) img_width = len(img[0]) img_height = len(img) # all rois need to be scaled + padded to cntk input image size targetw, targeth, w_offset, h_offset, scale = roiTransformPadScaleParams(img_width, img_height, cntk_padWidth, cntk_padHeight) rois = [] for original_roi in original_rois: x, y, x2, y2 = roiTransformPadScale(original_roi, w_offset, h_offset, scale) xrel = float(x) / (1.0 * targetw) yrel = float(y) / (1.0 * targeth) wrel = float(x2 - x) / (1.0 * targetw) hrel = float(y2 - y) / (1.0 * targeth) rois.append([xrel, yrel, wrel, hrel]) # pad rois if needed: if len(rois) < cntk_nrRois: rois += [[0, 0, 0, 0]] * (cntk_nrRois - len(rois)) elif len(rois) > cntk_nrRois: rois = rois[:cntk_nrRois] return np.array(rois), original_rois test_rois, original_rois = get_rois_for_image(original_img) roi_padding_index = len(original_rois) print("Number of rois for evaluation:", len(test_rois)) ``` ### 6. Evaluate the sample Here, we prepare the data to be in CNTK's expected arguments format and run it through the model used the model's **eval** method. We then process the result by trimming the padded ROIs part, and calculate the predicted labels and their probabilities. ``` from cntk_helpers import softmax2D # a dummy variable for labels the will be given as an input to the network but will be ignored dummy_labels = np.zeros((2000,17)) #Index the names of the arguments so we can get them by name args_indices = {} for i,arg in enumerate(frcnn_model.arguments): args_indices[arg.name] = i # prepare the arguments arguments = { frcnn_model.arguments[args_indices['features']]: [test_img_model_arg], frcnn_model.arguments[args_indices['rois']]: [test_rois], } # run it through the model output = frcnn_model.eval(arguments) # we now extract the "z" values from the output, which are the values of the layer that is just before # the softmax layer. # we take just the relevant part from that array rois_values = output[0][0][:roi_padding_index] # get the prediction for each roi by taking the index with the maximal value in each row rois_labels_predictions = np.argmax(rois_values, axis=1) # calculate the probabilities using softmax rois_probs = softmax2D(rois_values) # print the number of ROIs that were detected as non-background print("Number of detections: %d"%np.sum(rois_labels_predictions > 0)) ``` ### 7. Merge overlapping regions using Non-Maxima-Suppression Before inspecting the predictions, we need to merge overlapping regions that were detected using the Non-Maxima-Suppression algorithm that is implemented in the cntk_helpers module. ``` from cntk_helpers import applyNonMaximaSuppression nms_threshold = 0.1 non_padded_rois = test_rois[:roi_padding_index] max_probs = np.amax(rois_probs, axis=1).tolist() rois_prediction_indices = applyNonMaximaSuppression(nms_threshold, rois_labels_predictions, max_probs, non_padded_rois) print("Indices of selected regions:",rois_prediction_indices) ``` ### 8. Visualize the results As a final step, we use the OpenCV **rectangle** and **putText** methods in order to draw the selected regions on the original image alongside their corresponding predicted labels. ``` rois_with_prediction = test_rois[rois_prediction_indices] rois_prediction_labels = rois_labels_predictions[rois_prediction_indices] rois_predicion_scores = rois_values[rois_prediction_indices] original_rois_predictions = original_rois[rois_prediction_indices] # class names taken from PARAMETERS.py: classes = ('__background__', # always index 0 'avocado', 'orange', 'butter', 'champagne', 'eggBox', 'gerkin', 'joghurt', 'ketchup', 'orangeJuice', 'onion', 'pepper', 'tomato', 'water', 'milk', 'tabasco', 'mustard') original_img_cpy = original_img.copy() for roi,label in zip(original_rois_predictions, rois_prediction_labels): (x1,y1,x2,y2) = roi cv2.rectangle(original_img_cpy, (x1, y1), (x2, y2), (0, 255, 0), 5) cv2.putText(original_img_cpy,classes[label],(x1,y2 + 30), cv2.FONT_HERSHEY_DUPLEX, 2,(200,0,255),3,cv2.LINE_AA) print("Evaluation result:") plt.figure(figsize=(10, 10)) plt.imshow(cv2.cvtColor(original_img_cpy, cv2.COLOR_BGR2RGB), interpolation='nearest') plt.axis("off") ```
github_jupyter
# Predicting Review rating from review text # <span style="color:dodgerblue"> Naive Bayes Classifier Using 5 Classes (1,2,3,4 and 5 Rating)</span> ``` %pylab inline import warnings warnings.filterwarnings('ignore') from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" import pandas as pd import numpy as np import matplotlib.pyplot as plt import nltk from nltk.corpus import stopwords # Importing the reviews dataset reviews_dataset = pd.read_csv('reviews_restaurants_text.csv') # Creating X and Y for the classifier. X is the review text and Y is the rating x = reviews_dataset['text'] y = reviews_dataset['stars'] # Text preprocessing import string def text_preprocessing(text): no_punctuation = [ch for ch in text if ch not in string.punctuation] no_punctuation = ''.join(no_punctuation) return [w for w in no_punctuation.split() if w.lower() not in stopwords.words('english')] %%time # Estimated time: 30 min # Vectorization # Converting each review into a vector using bag-of-words approach from sklearn.feature_extraction.text import CountVectorizer vector = CountVectorizer(analyzer=text_preprocessing).fit(x) x = vector.transform(x) # Spitting data into training and test set from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split(x, y, test_size=0.20, random_state=0, shuffle =False) # Building Multinomial Naive Bayes modle and fit it to our training set from sklearn.naive_bayes import MultinomialNB classifier = MultinomialNB() classifier.fit(X_train, Y_train) # Using our trained classifier to predict the ratings from text # Testing our model on the test set preds = classifier.predict(X_test) print("Actual Ratings(Stars): ",end = "") display(Y_test[:15]) print("Predicted Ratings: ",end = "") print(preds[:15]) ``` ## Evaluating the model ## <span style="color:orangered"> Accuracy </span> ``` # Accuracy of the model from sklearn.metrics import accuracy_score accuracy_score(Y_test, preds) ``` ## <span style="color:orangered"> Precision and Recall of the model</span> ``` from sklearn.metrics import precision_score from sklearn.metrics import recall_score print ('Precision: ' + str(precision_score(Y_test, preds, average='weighted'))) print ('Recall: ' + str(recall_score(Y_test,preds, average='weighted'))) ``` ## <span style="color:orangered"> Classification Report </span> ``` # Evaluating the model from sklearn.metrics import confusion_matrix, classification_report print(confusion_matrix(Y_test, preds)) print('\n') print(classification_report(Y_test, preds)) ``` ## <span style="color:orangered">Confusion Matrix of the model</span> ``` # citation: http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py import itertools def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') from sklearn import metrics class_names = ['1','2','3','4','5'] # Compute confusion matrix cnf_matrix = metrics.confusion_matrix(Y_test, preds ) np.set_printoptions(precision=2) # Plot non-normalized confusion matrix plt.figure() plot_confusion_matrix(cnf_matrix, classes=class_names, title='Confusion matrix, without normalization') # Plot normalized confusion matrix plt.figure() plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True, title='Normalized confusion matrix') plt.show() ``` # <span style="color:dodgerblue"> Naive Bayes Classifier Using 2 Classes <span style="color:dodgerblue"> (1 and 5 Rating: Positive & Negative Reviews)</span> ``` # Importing the datasets reviews = pd.read_csv('reviews_restaurants_text.csv') reviews['text'] = reviews['text'].str[2:-2] # Reducing the dataset to 2 classes i.e 1 and 5 star rating reviews['stars'][reviews.stars == 3] = 1 reviews['stars'][reviews.stars == 2] = 1 reviews['stars'][reviews.stars == 4] = 5 #Undersampling of the dataset to get a balanced dataset review1 = reviews[reviews['stars'] == 1] review5 = reviews[reviews['stars'] == 5][0:34062] frames = [review1, review5] reviews = pd.concat(frames) # Creating X and Y for the classifier. X is the review text and Y is the rating x2 = reviews['text'] y2 = reviews['stars'] # Vectorization # Converting each review into a vector using bag-of-words approach from sklearn.feature_extraction.text import CountVectorizer vector2 = CountVectorizer(analyzer=text_preprocessing).fit(x2) x2 = vector.transform(x2) # Spitting data into training and test set from sklearn.model_selection import train_test_split X2_train, X2_test, Y2_train, Y2_test = train_test_split(x2, y2, test_size=0.20, random_state=0) # Building Multinomial Naive Bayes modle and fit it to our training set from sklearn.naive_bayes import MultinomialNB classifier2 = MultinomialNB() classifier2.fit(X2_train, Y2_train) # Testing our model on the test set Y2_pred = classifier2.predict(X2_test) ``` ## <span style="color:orangered"> Classification Report </span> ``` # Evaluating the model from sklearn.metrics import confusion_matrix, classification_report print(confusion_matrix(Y2_test, Y2_pred)) print('\n') print(classification_report(Y2_test, Y2_pred)) ``` ## <span style="color:orangered"> Accuracy of the model </span> ``` # Accuracy of the model from sklearn.metrics import accuracy_score accuracy_score(Y2_test, Y2_pred) ``` ## <span style="color:orangered"> Precision and Recall of the model</span> ``` from sklearn.metrics import precision_score from sklearn.metrics import recall_score print ('Precision: ' + str(precision_score(Y2_test, Y2_pred, average='weighted'))) print ('Recall: ' + str(recall_score(Y2_test, Y2_pred, average='weighted'))) ``` ## <span style="color:orangered"> Confusion Matrix of the model </font> ``` class_names = ['Negative','Positive'] # Compute confusion matrix cnf_matrix = metrics.confusion_matrix(Y2_test, Y2_pred) np.set_printoptions(precision=2) # Plot non-normalized confusion matrix plt.figure() plot_confusion_matrix(cnf_matrix, classes=class_names, title='Confusion matrix, without normalization') # Plot normalized confusion matrix plt.figure() plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True, title='Normalized confusion matrix') plt.show() ```
github_jupyter
# Exact Cover問題 最初にExact Cover問題について説明します。 ある自然数の集合Uを考えます。またその自然数を含むいくつかのグループ$V_{1}, V_{2}, \ldots, V_{N}$を想定します。1つの自然数が複数のグループに属していても構いません。さて、そのグループ$V_{i}$からいくつかピックアップしたときに、それらに同じ自然数が複数回含まれず、Uに含まれる自然数セットと同じになるようにピックアップする問題をExact Cover問題といいます。 さらに、選んだグループ数を最小になるようにするものを、Smallest Exact Coverといいます。 ## 準備 ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt import blueqat.wq as wq from blueqat import vqe ``` ## QUBOの作成 解きたい問題のQUBOマトリクスを作成します。 最初に自然数の集合を $U = \{1, \ldots, n\}$、グループを$V_{i} \subseteq U(i=1, \ldots, N)$とします。また、i番目のグループをピックアップしたかどうかを$x_{i} \in \{1, 0\}$で表します。ピックアップされた場合は1、されなかった場合は0です。ここで、各自然数(αとします)についてピックアップされた1つのグループのみに含まれている場合に最小となるようなコスト関数$E_{A}$を考えます。 この場合、 $E_{A} = A \sum _ { \alpha = 1 } ^ { n } \left( 1 - \sum _ { i : \alpha \in V _ { i } } x _ { i } \right) ^ { 2 }$ とすると、各自然数αに対して1つのグループのみがピックアップされた場合、$E_{A} = 0$となります。 これをQUBO形式に変換していきます。まず括弧の中を展開します。 $E_{A} = A \sum _ { \alpha = 1 } ^ { n } \{ 1 - 2\sum _ { i : \alpha \in V _ { i } } x _ { i } + ( \sum _ { i : \alpha \in V _ { i } } x _ { i } ) ^ { 2 } \} $ 今回$E_{A}$を最小化する問題なので、定数である{}内の第一項は無視できます。 第二項は、$x_{i} \in {1,0}$であることを利用して、次のように書き換えることができます。 $ - 2\sum _ { i : \alpha \in V _ { i } } x _ { i } = - 2\sum _ { i = j, i : \alpha \in V _ { i }, j : \alpha \in V _ { j } } x _ { i } x _ {j}$ 第三項についても、i = jの場合と、$i \neq j$の場合に分けると、次の様に書き換えられます。 $ ( \sum _ { i : \alpha \in V _ { i } } x _ { i } ) ^ { 2 } = \sum _ { i = j, i : \alpha \in V _ { i }, j : \alpha \in V _ { j } } x _ { i } x _ {j} + 2 \sum _ { i \neq j, i : \alpha \in V _ { i }, j : \alpha \in V _ { j } } x _ { i } x _ {j} $ まとめると、 $E_{A} = A \sum _ { \alpha = 1 } ^ { n } ( - \sum _ { i = j, i : \alpha \in V _ { i }, j : \alpha \in V _ { j } } x _ { i } x _ {j} + 2 \sum _ { i \neq j, i : \alpha \in V _ { i }, j : \alpha \in V _ { j } } x _ { i } x _ {j} )$ となり、QUBO形式にすることができました。 ``` U = [1,2,3,4,5,6,7,8,9,10] A = 1 def get_qubo(V): Q = np.zeros( (len(V), len(V)) ) for i in range(len(V)): for j in range(len(V)): for k in range(len(U)): alpha = U[k] in_Vi = V[i].count(alpha) > 0 #V[i]に存在しているか in_Vj = V[j].count(alpha) > 0 #V[j]に存在しているか if i == j and in_Vi: Q[i][j] += -1 elif i < j and in_Vi and in_Vj: Q[i][j] += 2 return Q * A ``` また、結果を表示する関数を定義しておきます。 ``` def display_answer(list_x, energies = None, show_graph = False): print("Result x:", list_x) text = "" for i in range(len(list_x)): if(list_x[i]): text += str(V[i]) print("Picked {} group(s): {}".format(sum(list_x), text)) if energies is not None: print("Energy:", a.E[-1]) if show_graph: plt.plot(a.E) plt.show() ``` 次の通り実行してみると、正しい答えが得られていることが分かります。 ``` V = [ [1,2], [3,4,5,6], [7,8,9,10], [1,3,5], [10] ] qubo = get_qubo(V) result = vqe.Vqe(vqe.QaoaAnsatz(wq.pauli(qubo), step=4)).run() answer = result.most_common(12) print(answer) display_answer(answer[0][0]) ``` ## Vをもう少し複雑にしてみる Vをもう少し複雑にして(2つグループを追加して)、実行してみます。 ``` V = [ [1,2], [3,4,5,6], [7,8,9,10], [1,3,5], [10], [7,9], [2,4,6,8] ] qubo = get_qubo(V) result = vqe.Vqe(vqe.QaoaAnsatz(wq.pauli(qubo), step=2)).run() answer = result.most_common(12) print(answer) display_answer(answer[0][0]) ``` 正しい答えが得られていることが分かります。 ### 意地悪ケース 最後に意地悪なケースを試します。 {1,2}{3}{4}{5}{6}{7}{8}{9}{10}が選ばれるのが正解です。 結果を見ると、概ね正しい答えが選ばれるようですが、まれに少しエネルギーの高い不正解の方が選ばれてしまいます。 ``` V = [ [1,2], [3], [4], [5], [6], [7], [8], [9], [10], [2,3,4,5,6,7,8,9,10]] for i in range(5): print("---{}回目".format(i+1)) qubo = get_qubo(V) result = vqe.Vqe(vqe.QaoaAnsatz(wq.pauli(qubo), step=6)).run() answer = result.most_common(12) display_answer(answer[0][0]) ```
github_jupyter
# Introduction to optimization The basic components * The objective function (also called the 'cost' function) ``` import numpy as np objective = np.poly1d([1.3, 4.0, 0.6]) print(objective) ``` * The "optimizer" ``` import scipy.optimize as opt x_ = opt.fmin(objective, [3]) print("solved: x={}".format(x_)) %matplotlib notebook x = np.linspace(-4,1,101) import matplotlib.pylab as mpl mpl.plot(x, objective(x)) mpl.plot(x_, objective(x_), 'ro') ``` Additional components * "Box" constraints ``` import scipy.special as ss import scipy.optimize as opt import numpy as np import matplotlib.pylab as mpl x = np.linspace(2, 7, 200) # 1st order Bessel j1x = ss.j1(x) mpl.plot(x, j1x) # use scipy.optimize's more modern "results object" interface result = opt.minimize_scalar(ss.j1, method="bounded", bounds=[2, 4]) j1_min = ss.j1(result.x) mpl.plot(result.x, j1_min,'ro') ``` * The gradient and/or hessian ``` import mystic.models as models print(models.rosen.__doc__) import mystic mystic.model_plotter(mystic.models.rosen, kwds='-f -d -x 1 -b "-3:3:.1, -1:5:.1, 1"') import scipy.optimize as opt import numpy as np # initial guess x0 = [1.3, 1.6, -0.5, -1.8, 0.8] result = opt.minimize(opt.rosen, x0) print(result.x) # number of function evaluations print(result.nfev) # again, but this time provide the derivative result = opt.minimize(opt.rosen, x0, jac=opt.rosen_der) print(result.x) # number of function evaluations and derivative evaluations print(result.nfev, result.njev) print('') # however, note for a different x0... for i in range(5): x0 = np.random.randint(-20,20,5) result = opt.minimize(opt.rosen, x0, jac=opt.rosen_der) print("{} @ {} evals".format(result.x, result.nfev)) ``` * The penalty functions $\psi(x) = f(x) + k*p(x)$ ``` # http://docs.scipy.org/doc/scipy/reference/tutorial/optimize.html#tutorial-sqlsp ''' Maximize: f(x) = 2*x0*x1 + 2*x0 - x0**2 - 2*x1**2 Subject to: x0**3 - x1 == 0 x1 >= 1 ''' import numpy as np def objective(x, sign=1.0): return sign*(2*x[0]*x[1] + 2*x[0] - x[0]**2 - 2*x[1]**2) def derivative(x, sign=1.0): dfdx0 = sign*(-2*x[0] + 2*x[1] + 2) dfdx1 = sign*(2*x[0] - 4*x[1]) return np.array([ dfdx0, dfdx1 ]) # unconstrained result = opt.minimize(objective, [-1.0,1.0], args=(-1.0,), jac=derivative, method='SLSQP', options={'disp': True}) print("unconstrained: {}".format(result.x)) cons = ({'type': 'eq', 'fun' : lambda x: np.array([x[0]**3 - x[1]]), 'jac' : lambda x: np.array([3.0*(x[0]**2.0), -1.0])}, {'type': 'ineq', 'fun' : lambda x: np.array([x[1] - 1]), 'jac' : lambda x: np.array([0.0, 1.0])}) # constrained result = opt.minimize(objective, [-1.0,1.0], args=(-1.0,), jac=derivative, constraints=cons, method='SLSQP', options={'disp': True}) print("constrained: {}".format(result.x)) ``` Optimizer classifications * Constrained versus unconstrained (and importantly LP and QP) ``` # from scipy.optimize.minimize documentation ''' **Unconstrained minimization** Method *Nelder-Mead* uses the Simplex algorithm [1]_, [2]_. This algorithm has been successful in many applications but other algorithms using the first and/or second derivatives information might be preferred for their better performances and robustness in general. Method *Powell* is a modification of Powell's method [3]_, [4]_ which is a conjugate direction method. It performs sequential one-dimensional minimizations along each vector of the directions set (`direc` field in `options` and `info`), which is updated at each iteration of the main minimization loop. The function need not be differentiable, and no derivatives are taken. Method *CG* uses a nonlinear conjugate gradient algorithm by Polak and Ribiere, a variant of the Fletcher-Reeves method described in [5]_ pp. 120-122. Only the first derivatives are used. Method *BFGS* uses the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) [5]_ pp. 136. It uses the first derivatives only. BFGS has proven good performance even for non-smooth optimizations. This method also returns an approximation of the Hessian inverse, stored as `hess_inv` in the OptimizeResult object. Method *Newton-CG* uses a Newton-CG algorithm [5]_ pp. 168 (also known as the truncated Newton method). It uses a CG method to the compute the search direction. See also *TNC* method for a box-constrained minimization with a similar algorithm. Method *Anneal* uses simulated annealing, which is a probabilistic metaheuristic algorithm for global optimization. It uses no derivative information from the function being optimized. Method *dogleg* uses the dog-leg trust-region algorithm [5]_ for unconstrained minimization. This algorithm requires the gradient and Hessian; furthermore the Hessian is required to be positive definite. Method *trust-ncg* uses the Newton conjugate gradient trust-region algorithm [5]_ for unconstrained minimization. This algorithm requires the gradient and either the Hessian or a function that computes the product of the Hessian with a given vector. **Constrained minimization** Method *L-BFGS-B* uses the L-BFGS-B algorithm [6]_, [7]_ for bound constrained minimization. Method *TNC* uses a truncated Newton algorithm [5]_, [8]_ to minimize a function with variables subject to bounds. This algorithm uses gradient information; it is also called Newton Conjugate-Gradient. It differs from the *Newton-CG* method described above as it wraps a C implementation and allows each variable to be given upper and lower bounds. Method *COBYLA* uses the Constrained Optimization BY Linear Approximation (COBYLA) method [9]_, [10]_, [11]_. The algorithm is based on linear approximations to the objective function and each constraint. The method wraps a FORTRAN implementation of the algorithm. Method *SLSQP* uses Sequential Least SQuares Programming to minimize a function of several variables with any combination of bounds, equality and inequality constraints. The method wraps the SLSQP Optimization subroutine originally implemented by Dieter Kraft [12]_. Note that the wrapper handles infinite values in bounds by converting them into large floating values. ''' ``` The typical optimization algorithm (local or global) is unconstrained. Constrained algorithms tend strongly to be local, and also often use LP/QP approximations. Hence, most optimization algorithms are good either for quick linear/quadratic approximation under some constraints, or are intended for nonlinear functions without constraints. Any information about the problem that impacts the potential solution can be seen as constraining information. Constraining information is typically applied as a penatly, or as a box constraint on an input. The user is thus typically forced to pick whether they want to apply constraints but treat the problem as a LP/QP approximation, or to ignore the constraining information in exchange for a nonliear solver. ``` import scipy.optimize as opt # constrained: linear (i.e. A*x + b) print(opt.cobyla.fmin_cobyla) print(opt.linprog) # constrained: quadratic programming (i.e. up to x**2) print(opt.fmin_slsqp) # http://cvxopt.org/examples/tutorial/lp.html ''' minimize: f = 2*x0 + x1 subject to: -x0 + x1 <= 1 x0 + x1 >= 2 x1 >= 0 x0 - 2*x1 <= 4 ''' import cvxopt as cvx from cvxopt import solvers as cvx_solvers A = cvx.matrix([ [-1.0, -1.0, 0.0, 1.0], [1.0, -1.0, -1.0, -2.0] ]) b = cvx.matrix([ 1.0, -2.0, 0.0, 4.0 ]) cost = cvx.matrix([ 2.0, 1.0 ]) sol = cvx_solvers.lp(cost, A, b) print(sol['x']) # http://cvxopt.org/examples/tutorial/qp.html ''' minimize: f = 2*x1**2 + x2**2 + x1*x2 + x1 + x2 subject to: x1 >= 0 x2 >= 0 x1 + x2 == 1 ''' import cvxopt as cvx from cvxopt import solvers as cvx_solvers Q = 2*cvx.matrix([ [2, .5], [.5, 1] ]) p = cvx.matrix([1.0, 1.0]) G = cvx.matrix([[-1.0,0.0],[0.0,-1.0]]) h = cvx.matrix([0.0,0.0]) A = cvx.matrix([1.0, 1.0], (1,2)) b = cvx.matrix(1.0) sol = cvx_solvers.qp(Q, p, G, h, A, b) print(sol['x']) ``` Notice how much nicer it is to see the optimizer "trajectory". Now, instead of a single number, we have the path the optimizer took in finding the solution. `scipy.optimize` has a version of this, with `options={'retall':True}`, which returns the solver trajectory. **EXERCISE:** Solve the constrained programming problem by any of the means above. Minimize: f = -1*x[0] + 4*x[1] Subject to: <br> -3*x[0] + 1*x[1] <= 6 <br> 1*x[0] + 2*x[1] <= 4 <br> x[1] >= -3 <br> where: -inf <= x[0] <= inf * Local versus global ``` import scipy.optimize as opt # probabilstic solvers, that use random hopping/mutations print(opt.differential_evolution) print(opt.basinhopping) import scipy.optimize as opt # bounds instead of an initial guess bounds = [(-10., 10)]*5 for i in range(10): result = opt.differential_evolution(opt.rosen, bounds) # result and number of function evaluations print(result.x, '@ {} evals'.format(result.nfev)) ``` Global optimizers tend to be much slower than local optimizers, and often use randomness to pick points within some box constraints instead of starting with an initial guess. The choice then is between algorithms that are non-deterministic and algorithms that are deterministic but depend very strongly on the selected starting point. Local optimization algorithms have names like "gradient descent" and "steepest descent", while global optimizations tend to use things like "stocastic" and "genetic" algorithms. * Not covered: other exotic types Other important special cases: * Least-squares fitting ``` import scipy.optimize as opt import scipy.stats as stats import numpy as np # Define the function to fit. def function(x, a, b, f, phi): result = a * np.exp(-b * np.sin(f * x + phi)) return result # Create a noisy data set around the actual parameters true_params = [3, 2, 1, np.pi/4] print("target parameters: {}".format(true_params)) x = np.linspace(0, 2*np.pi, 25) exact = function(x, *true_params) noisy = exact + 0.3*stats.norm.rvs(size=len(x)) # Use curve_fit to estimate the function parameters from the noisy data. initial_guess = [1,1,1,1] estimated_params, err_est = opt.curve_fit(function, x, noisy, p0=initial_guess) print("solved parameters: {}".format(estimated_params)) # err_est is an estimate of the covariance matrix of the estimates print("covarance: {}".format(err_est.diagonal())) import matplotlib.pylab as mpl mpl.plot(x, noisy, 'ro') mpl.plot(x, function(x, *estimated_params)) ``` Least-squares tends to be chosen when the user wants a measure of the covariance, typically as an error estimate. * Integer programming Integer programming (IP) or Mixed-integer programming (MIP) requires special optimizers that only select parameter values from the set of integers. These optimizers are typically used for things like cryptography, or other optimizations over a discrete set of possible solutions. Typical uses * Function minimization * Data fitting * Root finding ``` import numpy as np import scipy.optimize as opt def system(x,a,b,c): x0, x1, x2 = x eqs= [ 3 * x0 - np.cos(x1*x2) + a, # == 0 x0**2 - 81*(x1+0.1)**2 + np.sin(x2) + b, # == 0 np.exp(-x0*x1) + 20*x2 + c # == 0 ] return eqs # coefficients a = -0.5 b = 1.06 c = (10 * np.pi - 3.0) / 3 # initial guess x0 = [0.1, 0.1, -0.1] # Solve the system of non-linear equations. result = opt.root(system, x0, args=(a, b, c)) print("root:", result.x) print("solution:", result.fun) ``` * Parameter estimation ``` import numpy as np import scipy.stats as stats # Create clean data. x = np.linspace(0, 4.0, 100) y = 1.5 * np.exp(-0.2 * x) + 0.3 # Add a bit of noise. noise = 0.1 * stats.norm.rvs(size=100) noisy_y = y + noise # Fit noisy data with a linear model. linear_coef = np.polyfit(x, noisy_y, 1) linear_poly = np.poly1d(linear_coef) linear_y = linear_poly(x) # Fit noisy data with a quadratic model. quad_coef = np.polyfit(x, noisy_y, 2) quad_poly = np.poly1d(quad_coef) quad_y = quad_poly(x) import matplotlib.pylab as mpl mpl.plot(x, noisy_y, 'ro') mpl.plot(x, linear_y) mpl.plot(x, quad_y) #mpl.plot(x, y) ``` Standard diagnostic tools * Eyeball the plotted solution against the objective * Run several times and take the best result * Analyze a log of intermediate results, per iteration * Rare: look at the covariance matrix * Issue: how can you really be sure you have the results you were looking for? **EXERCISE:** Use any of the solvers we've seen thus far to find the minimum of the `zimmermann` function (i.e. use `mystic.models.zimmermann` as the objective). Use the bounds suggested below, if your choice of solver allows it. ``` import mystic.models as models print(models.zimmermann.__doc__) ``` **EXERCISE:** Do the same for the `fosc3d` function found at `mystic.models.fosc3d`, using the bounds suggested by the documentation, if your chosen solver accepts bounds or constraints. More to ponder: what about high-dimenstional and nonlinear constraints? Let's look at optimization "redesigned" in [mystic](mystic.ipynb)...
github_jupyter
# Other programming languages **Today we talk about various programming languages:** If you have learned one programming language, it is easy to learn the next. **Different kinds** of programming languages: 1. **Low-level, compiled (C/C++, Fortran):** You are in full control, but need to specify types, allocate memory and clean up after your-self 2. **High-level, interpreted (MATLAB, Python, Julia, R):** Types are inferred, memory is allocated automatically, and there is automatic garbage collection **Others:** 1. **[Wolfram Mathematica](https://www.wolfram.com/mathematica/)**: A mathematical programming langauge. The inspiration for **sympy**. 2. **[STATA](https://www.stata.com/)**: For many economists still the prefered statistical program, because it is so good at panel data and provides standard errors for a lot of the commonly used estimators. > **Note:** Data cleaning and structuring is increasingly done in **R** or **Python**, and **STATA** is then only used for estimation. **Comparison:** We solve the same Simulated Minimum Distance (SMD) problem in MATLAB, Python and Julia. **Observations:** 1. Any language can typically be used to solve a task. But some have a **comparative advantage**. 2. If a **syntax** in a language irritates you, you will write worse code. 3. A **community** in your field around a language is important. 4. **No language is the best at everything**. **Comparisons:** - Coleman et al. (2020): MATLAB, [Python and Julia: What to choose in economics?](https://lmaliar.ws.gc.cuny.edu/files/2019/01/CEPR-DP13210.pdf) - Fernández-Villaverde and Valencia (2019): [A Practical Guide to Parallization in Economics](https://www.sas.upenn.edu/~jesusfv/Guide_Parallel.pdf) # High-level programming languages ## MATLAB The **godfather** of high-level scientific programming. *The main source of inspiration for numpy and Julia*. The **good** things: 1. Full scientific programming langauge 2. Especially good at optimization and (sparse) matrix algebra 3. Well-developed interface (IDE) and debugger 4. Integration with C++ through mex functions The **bad** things: 1. Not open source and costly outside of academia 2. Not always easy to parallelize natively 3. Not complete programming langauge 4. Not in JupyterLab **Download:** Available in the Absalon software library. **Example:** `SMD_MATLAB.mlx` **More:** 1. **Mini-course in MATLAB:** See the folder `\MATLAB_course` 2. [NumPy for Matlab users](https://docs.scipy.org/doc/numpy/user/numpy-for-matlab-users.html) ## Python The **swiss-knife** of programming languages. The **good** things: 1. Allround programming language 2. Full scientific programming (numpy+scipy) 3. Good at statistics (in particular data handling and machine learning) 4. Just-in-time (jit) compilation availible (numba) 4. Easy to integrate with C++ (ctypes, cffi) The **bad** things: 1. Messy package system at times 2. Sometimes hard to jit-compile and parallelize **Example:** `SMD_Python.ipynb` ## Julia The **newcomer** of scientific programming languages. 1. All-round programming language 2. Automatic just-in-time compilation with native parallization - almost as fast as C++ 3. Focused on scientific computing and high performance computing The **bad** things: 1. Young language, with smallish, but growing, community 2. Sometimes hard to ensure that the just-in-time compliation works efficiently **Example:** `SMD_Julia.ipynb` **Download Julia:** - [Open source version](https://julialang.org/downloads/) - [JuliaPro from Julia Computing (bundled with IDE and notebook support)](https://juliacomputing.com/products/juliapro) - [Documentation (language and about 1900 packages)](https://pkg.julialang.org/docs/) **Julia community:** - [Discourse](https://discourse.julialang.org) - [Slack](https://julialang.slack.com) For **introductory material on Julia for economists**, see [https://lectures.quantecon.org/jl/](https://lectures.quantecon.org/jl/). ## R The **statistician favorite choice** of programming language. 1. Great package system 2. The best statistical packages 3. Well-developed interface (IDE) (Rstudio) 4. Easy to integrate with C++ (Rcpp) The **bad** things: 1. Not designed to be a scientific programming langauge 2. Not a complete programming langauge **Download:** https://www.rstudio.com/ # Low-level programming languages ## Fortran What I have nightmares about... In the old days, it was a bit faster than C++. This is no longer true. ## C/C++ **The fastest you can get.** A very powerfull tool, but hard to learn, and impossible to master. ``` import numpy as np import ctypes as ct import callcpp # local library import psutil CPUs = psutil.cpu_count() CPUs_list = set(np.sort([1,2,4,*np.arange(8,CPUs+1,4)])) print(f'this computer has {CPUs} CPUs') ``` ## Calling C++ from Python > **Note I:** This section can only be run on a Windows computer with the free **Microsoft Visual Studio 2017 Community Edition** ([download here](https://visualstudio.microsoft.com/downloads/)) installed. > > **Note II:** Learning C++ is somewhat hard. These [tutorials](http://www.cplusplus.com/doc/tutorial/) are helpful. Pyton contains multiple ways of calling functions written in C++. Here I use **ctypes**. **C++ file:** example.cpp in the current folder. **Step 1:** Compile C++ to a .dll file ``` callcpp.compile_cpp('example') # compiles example.cpp ``` > **Details:** Write a file called ``compile.bat`` and run it in a terminal under the hood. **Step 2:** Link to .dll file ``` # funcs (list): list of functions with elements (functionname,[argtype1,argtype2,etc.]) funcs = [('myfun_cpp',[ct.POINTER(ct.c_double),ct.POINTER(ct.c_double),ct.POINTER(ct.c_double), ct.c_long,ct.c_long,ct.c_long])] # ct.POINTER(ct.c_double) to a double # ct.c_long interger cppfile = callcpp.link_cpp('example',funcs) ``` **Step 3:** Call function ``` def myfun_numpy_vec(x1,x2): y = np.empty((1,x1.size)) I = x1 < 0.5 y[I] = np.sum(np.exp(x2*x1[I]),axis=0) y[~I] = np.sum(np.log(x2*x1[~I]),axis=0) return y # setup x1 = np.random.uniform(size=10**6) x2 = np.random.uniform(size=np.int(100*CPUs/8)) # adjust the size of the problem x1_np = x1.reshape((1,x1.size)) x2_np = x2.reshape((x2.size,1)) # timing %timeit myfun_numpy_vec(x1_np,x2_np) def myfun_cpp(x1,x2,threads): y = np.empty(x1.size) p_x1 = np.ctypeslib.as_ctypes(x1) # pointer to x1 p_x2 = np.ctypeslib.as_ctypes(x2) # pointer to x2 p_y = np.ctypeslib.as_ctypes(y) # pointer to y cppfile.myfun_cpp(p_x1,p_x2,p_y,x1.size,x2.size,threads) return y assert np.allclose(myfun_numpy_vec(x1_np,x2_np),myfun_cpp(x1,x2,1)) for threads in CPUs_list: print(f'threads = {threads}') %timeit myfun_cpp(x1,x2,threads) print('') ``` **Observation:** Compare with results in lecture 12. Numba is roughly as fast as C++ here (I get different results across different computers). In larger problems, C++ is usually faster, and while Numba is limited in terms of which Python and Numpy features it supports, everything can be coded in C++. **Step 4:** Delink .dll file ``` callcpp.delink_cpp(cppfile,'example') ``` **More information:** See the folder "Numba and C++" in the [ConsumptionSavingNotebooks](https://github.com/NumEconCopenhagen/ConsumptionSavingNotebooks) repository. Incudes, an explanation on how to use the **NLopt optimizers** in C++.
github_jupyter
``` import networkx as nx from custom import load_data as cf from networkx.algorithms import bipartite from nxviz import CircosPlot import numpy as np import matplotlib.pyplot as plt %load_ext autoreload %autoreload 2 %matplotlib inline %config InlineBackend.figure_format = 'retina' ``` # Introduction Bipartite graphs are graphs that have two (bi-) partitions (-partite) of nodes. Nodes within each partition are not allowed to be connected to one another; rather, they can only be connected to nodes in the other partition. Bipartite graphs can be useful for modelling relations between two sets of entities. We will explore the construction and analysis of bipartite graphs here. ![bipartite graph](https://upload.wikimedia.org/wikipedia/commons/thumb/e/e8/Simple-bipartite-graph.svg/600px-Simple-bipartite-graph.svg.png) Let's load a [crime data](http://konect.uni-koblenz.de/networks/moreno_crime) bipartite graph and quickly explore it. > This bipartite network contains persons who appeared in at least one crime case as either a suspect, a victim, a witness or both a suspect and victim at the same time. A left node represents a person and a right node represents a crime. An edge between two nodes shows that the left node was involved in the crime represented by the right node. ``` G = cf.load_crime_network() list(G.edges(data=True))[0:5] list(G.nodes(data=True))[0:10] ``` # Projections Bipartite graphs can be projected down to one of the projections. For example, we can generate a person-person graph from the person-crime graph, by declaring that two nodes that share a crime node are in fact joined by an edge. ![bipartite graph](https://upload.wikimedia.org/wikipedia/commons/thumb/e/e8/Simple-bipartite-graph.svg/600px-Simple-bipartite-graph.svg.png) ## Exercise Find the bipartite projection function in the NetworkX `bipartite` module [docs](https://networkx.github.io/documentation/networkx-1.10/reference/algorithms.bipartite.html), and use it to obtain the `unipartite` projection of the bipartite graph. (5 min.) ``` person_nodes = [n for n in G.nodes() if G.nodes[n]['bipartite'] == 'person'] pG = bipartite.projection.projected_graph(G, person_nodes) list(pG.nodes(data=True))[0:5] ``` ## Exercise Try visualizing the person-person crime network by using a Circos plot. Ensure that the nodes are grouped by gender and then by number of connections. (5 min.) Again, recapping the Circos Plot API: ```python c = CircosPlot(graph_object, node_color='metadata_key1', node_grouping='metadata_key2', node_order='metadat_key3') c.draw() plt.show() # or plt.savefig('...') ``` ``` for n, d in pG.nodes(data=True): pG.nodes[n]['connectivity'] = len(list(pG.neighbors(n))) c = CircosPlot(pG, node_color='gender', node_grouping='gender', node_order='connectivity') c.draw() plt.savefig('images/crime-person.png', dpi=300) ``` ## Exercise Use a similar logic to extract crime links. (2 min.) ``` crime_nodes = [n for n in G.nodes() if G.nodes[n]['bipartite'] == 'crime'] cG = bipartite.projection.projected_graph(G, crime_nodes) ``` ## Exercise Can you plot how the crimes are connected, using a Circos plot? Try ordering it by number of connections. (5 min.) ``` for n in cG.nodes(): cG.nodes[n]['connectivity'] = float(len(list(cG.neighbors(n)))) c = CircosPlot(cG, node_order='connectivity', node_color='connectivity') c.draw() plt.savefig('images/crime-crime.png', dpi=300) ``` ## Exercise NetworkX also implements centrality measures for bipartite graphs, which allows you to obtain their metrics without first converting to a particular projection. This is useful for exploratory data analysis. Try the following challenges, referring to the [API documentation](https://networkx.github.io/documentation/networkx-1.9/reference/algorithms.bipartite.html) to help you: 1. Which crimes have the most number of people involved? 1. Which people are involved in the most number of crimes? Exercise total: 5 min. ``` # Degree Centrality bpdc = bipartite.degree_centrality(G, person_nodes) sorted(bpdc.items(), key=lambda x: x[1], reverse=True)[0:5] bpdc['p1'] nx.degree_centrality(G)['p1'] ```
github_jupyter
# Introduction In the [Intro to SQL micro-course](https://www.kaggle.com/learn/intro-to-sql), you learned how to use [**INNER JOIN**](https://www.kaggle.com/dansbecker/joining-data) to consolidate information from two different tables. Now you'll learn about a few more types of **JOIN**, along with how to use **UNIONs** to pull information from multiple tables. Along the way, we'll work with two imaginary tables, called `owners` and `pets`. ![two tables](https://i.imgur.com/dYVwS4T.png) Each row of the `owners` table identifies a different pet owner, where the `ID` column is a unique identifier. The `Pet_ID` column (in the `owners` table) contains the ID for the pet that belongs to the owner (this number matches the ID for the pet from the `pets` table). For example, - the `pets` table shows that Dr. Harris Bonkers is the pet with ID 1. - The `owners` table shows that Aubrey Little is the owner of the pet with ID 1. Putting these two facts together, Dr. Harris Bonkers is owned by Aubrey Little. Likewise, since Veronica Dunn does not have a corresponding `Pet_ID`, she does not have a pet. And, since 5 does not appear in the `Pet_ID` column, Maisie does not have an owner. # JOINs Recall that we can use an **INNER JOIN** to pull rows from both tables where the value in the `Pet_ID` column in the `owners` table has a match in the `ID` column of the `pets` table. ![...](https://i.imgur.com/C5wimKT.png) In this case, Veronica Dunn and Maisie are not included in the results. But what if we instead want to create a table containing all pets, regardless of whether they have owners? Or, what if we want to combine all of the rows in both tables? In these cases, we need only use a different type of **JOIN**. For instance, to create a table containing all rows from the `owners` table, we use a **LEFT JOIN**. In this case, "left" refers to the table that appears before the **JOIN** in the query. ("Right" refers to the table that is after the **JOIN**.) ![...](https://i.imgur.com/tnOqw2S.png) Replacing **INNER JOIN** in the query above with **LEFT JOIN** returns all rows where the two tables have matching entries, along with all of the rows in the left table (whether there is a match or not). If we instead use a **RIGHT JOIN**, we get the matching rows, along with all rows in the right table (whether there is a match or not). Finally, a **FULL JOIN** returns all rows from both tables. Note that in general, any row that does not have a match in both tables will have NULL entries for the missing values. You can see this in the image below. ![...](https://i.imgur.com/1Dvmg8S.png) # UNIONs As you've seen, **JOINs** horizontally combine results from different tables. If you instead would like to vertically concatenate columns, you can do so with a **UNION**. The example query below combines the `Age` columns from both tables. ![...](https://i.imgur.com/oa6VDig.png) Note that with a **UNION**, the data types of both columns must be the same, but the column names can be different. (So, for instance, we cannot take the **UNION** of the `Age` column from the `owners` table and the `Pet_Name` column from the `pets` table.) We use **UNION ALL** to include duplicate values - you'll notice that `9` appears in both the `owners` table and the `pets` table, and shows up twice in the concatenated results. If you'd like to drop duplicate values, you need only change **UNION ALL** in the query to **UNION DISTINCT**. # Example We'll work with the [Hacker News](https://www.kaggle.com/hacker-news/hacker-news) dataset. We begin by reviewing the first several rows of the `comments` table. (_The corresponding code is hidden, but you can un-hide it by clicking on the "Code" button below._) ``` #$HIDE_INPUT$ from google.cloud import bigquery # Create a "Client" object client = bigquery.Client() # Construct a reference to the "hacker_news" dataset dataset_ref = client.dataset("hacker_news", project="bigquery-public-data") # API request - fetch the dataset dataset = client.get_dataset(dataset_ref) # Construct a reference to the "comments" table table_ref = dataset_ref.table("comments") # API request - fetch the table table = client.get_table(table_ref) # Preview the first five lines of the table client.list_rows(table, max_results=5).to_dataframe() ``` You'll also work with the `stories` table. ``` # Construct a reference to the "stories" table table_ref = dataset_ref.table("stories") # API request - fetch the table table = client.get_table(table_ref) # Preview the first five lines of the table client.list_rows(table, max_results=5).to_dataframe() ``` Since you are already familiar with **JOINs** from the [Intro to SQL micro-course](https://www.kaggle.com/learn/intro-to-sql), we'll work with a relatively complex example of a JOIN that uses a [common table expression (CTE)](https://www.kaggle.com/dansbecker/as-with). The query below pulls information from the `stories` and `comments` tables to create a table showing all stories posted on January 1, 2012, along with the corresponding number of comments. We use a **LEFT JOIN** so that the results include stories that didn't receive any comments. ``` # Query to select all stories posted on January 1, 2012, with number of comments join_query = """ WITH c AS ( SELECT parent, COUNT(*) as num_comments FROM `bigquery-public-data.hacker_news.comments` GROUP BY parent ) SELECT s.id as story_id, s.by, s.title, c.num_comments FROM `bigquery-public-data.hacker_news.stories` AS s LEFT JOIN c ON s.id = c.parent WHERE EXTRACT(DATE FROM s.time_ts) = '2012-01-01' ORDER BY c.num_comments DESC """ # Run the query, and return a pandas DataFrame join_result = client.query(join_query).result().to_dataframe() join_result.head() ``` Since the results are ordered by the `num_comments` column, stories without comments appear at the end of the DataFrame. (Remember that **NaN** stands for "not a number".) ``` # None of these stories received any comments join_result.tail() ``` Next, we write a query to select all usernames corresponding to users who wrote stories or comments on January 1, 2014. We use **UNION DISTINCT** (instead of **UNION ALL**) to ensure that each user appears in the table at most once. ``` # Query to select all users who posted stories or comments on January 1, 2014 union_query = """ SELECT c.by FROM `bigquery-public-data.hacker_news.comments` AS c WHERE EXTRACT(DATE FROM c.time_ts) = '2014-01-01' UNION DISTINCT SELECT s.by FROM `bigquery-public-data.hacker_news.stories` AS s WHERE EXTRACT(DATE FROM s.time_ts) = '2014-01-01' """ # Run the query, and return a pandas DataFrame union_result = client.query(union_query).result().to_dataframe() union_result.head() ``` To get the number of users who posted on January 1, 2014, we need only take the length of the DataFrame. ``` # Number of users who posted stories or comments on January 1, 2014 len(union_result) ``` # Your turn Use what you've learned to **[pull information from multiple tables](#$NEXT_NOTEBOOK_URL$)**.
github_jupyter
``` !pip install transformers datasets tweet-preprocessor ray[tune] hyperopt import pandas as pd import numpy as np import matplotlib.pyplot as plt import wordcloud import preprocessor as p # tweet-preprocessor import nltk import re import seaborn as sns import torch from transformers import BertTokenizer, BertForSequenceClassification, AdamW, get_linear_schedule_with_warmup from sklearn.metrics import accuracy_score, roc_auc_score, confusion_matrix from sklearn.model_selection import train_test_split, StratifiedKFold from scipy.special import softmax from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from tqdm.notebook import tqdm from ray import tune from ray.tune import CLIReporter from ray.tune.schedulers import ASHAScheduler from ray.tune.suggest.hyperopt import HyperOptSearch from google.colab import drive drive.mount('/content/drive') # dataset_dem = pd.read_csv('/content/drive/MyDrive/democrat_tweets_v2.csv') # dataset_gop = pd.read_csv('/content/drive/MyDrive/republican_tweets_v2.csv') # dataset_dem["label"] = "Democrat" # dataset_gop["label"] = "Republican" # dataset_final = pd.concat([dataset_dem, dataset_gop]) # dataset_final.reset_index(drop=True, inplace=True) dataset_final = pd.read_csv("/content/drive/MyDrive/Copy of 2020_labled_political_tweets.csv.zip") # dataset_final=dataset_final[(dataset_final["party"].any()=="D")] dataset_final = dataset_final.iloc[0:2000] for index, row in dataset_final.iterrows(): if str(row['party']) !="D": if str(row["party"])!="R": dataset_final.drop(index, inplace=True) dataset_final.head() dataset_final.count # dataset=pd.read_csv("/content/drive/MyDrive/Copy of 2020_labled_political_tweets.csv.zip") # X=dataset.drop(["party"],axis=1) # y = dataset[["party"]] # X_train, X_val, y_train, y_val = train_test_split(X, # y, # test_size=0.20, # random_state=42) LABEL_MAP = { "D": 0, "R": 1 } def buildLabels(row): return LABEL_MAP.get(row["party"]) # def cleanTweet(row): # tweet = row["text"] # tweet = str(p.clean(tweet)) # tweet = re.sub(r'[^\w\s]', '', tweet) # punctuation # tweet = re.sub("^\d+\s|\s\d+\s|\s\d+$", " ", tweet) # numbers # return tweet dataset_final["party"] = dataset_final.apply(lambda row: buildLabels(row), axis=1) # dataset_final["clean_text"] = dataset_final.apply(lambda row: cleanTweet(row), # axis=1) dataset_final.head() dataset_clf = dataset_final[["text", "party"]] dataset_clf.reset_index(drop=True, inplace=True) X_train, X_val, y_train, y_val = train_test_split(dataset_clf.index.values, dataset_clf.party.values, test_size=0.20, random_state=42, stratify=dataset_clf.party.values) dataset_clf['data_type'] = ['not_set']*dataset_final.shape[0] dataset_clf.loc[X_train, 'data_type'] = 'train' dataset_clf.loc[X_val, 'data_type'] = 'test' dataset_train = dataset_clf.loc[dataset_clf.data_type == 'train'] dataset_test = dataset_clf.loc[dataset_clf.data_type == 'test'] dataset_train.head() def get_dataloaders(data, batch_size): tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True) # tokenize train and test data so BERT can understand it encoded_data_train = tokenizer.batch_encode_plus( data[data.data_type=='train'].text.values, add_special_tokens=True, return_attention_mask=True, padding=True, max_length=64, return_tensors='pt' ) encoded_data_test = tokenizer.batch_encode_plus( data[data.data_type=='test'].text.values, add_special_tokens=True, return_attention_mask=True, padding=True, max_length=64, return_tensors='pt' ) # destructure out the input_ids, attention masks, and labels from tokenizer & encoder output input_ids_train = encoded_data_train['input_ids'] attention_masks_train = encoded_data_train['attention_mask'] labels_train = torch.tensor(data[data.data_type=='train'].party.values) input_ids_test = encoded_data_test['input_ids'] attention_masks_test = encoded_data_test['attention_mask'] labels_test = torch.tensor(data[data.data_type=='test'].party.values) train_data = TensorDataset(input_ids_train, attention_masks_train, labels_train) test_data = TensorDataset(input_ids_test, attention_masks_test, labels_test) train_dataloader = DataLoader(train_data, sampler=RandomSampler(train_data), batch_size=batch_size) test_dataloader = DataLoader(test_data, sampler=SequentialSampler(test_data), batch_size=batch_size) return train_dataloader, test_dataloader def auc_score(preds, labels): soft_preds = softmax(preds, axis=1) # logit -> probability if np.shape(preds)[1] > 2: # check for multi-class return roc_auc_score(labels, soft_preds, multi_class='ovr') else: soft_preds = soft_preds[:,1] return roc_auc_score(labels, soft_preds) def acc_score_by_class(preds, labels): label_dict_inverse = {v: k for k, v in LABEL_MAP.items()} preds_flat = np.argmax(preds, axis=1).flatten() labels_flat = labels.flatten() for label in np.unique(labels_flat): y_preds = preds_flat[labels_flat==label] y_true = labels_flat[labels_flat==label] print(f'Class: {label_dict_inverse[label]}') print(f'Accuracy: {len(y_preds[y_preds==label])}/{len(y_true)}\n') def evaluate(model, dataloader, device): model.eval() loss_val_total = 0 predictions, true_vals = [], [] for batch in dataloader: # convert data to CUDA batch = tuple(b.to(device) for b in batch) inputs = { 'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[2], } with torch.no_grad(): outputs = model(**inputs) # get predictions loss = outputs[0] logits = outputs[1] loss_val_total += loss.item() logits = logits.detach().cpu().numpy() label_ids = inputs['labels'].cpu().numpy() predictions.append(logits) true_vals.append(label_ids) loss_val_avg = loss_val_total/len(dataloader) predictions = np.concatenate(predictions, axis=0) true_vals = np.concatenate(true_vals, axis=0) return loss_val_avg, predictions, true_vals def train_and_hyperparam_search(config, model_init, # function to init a clean version of the net data, # data as Pandas array cv # rounds of cross-validation ): losses = [] aucs = [] skf = StratifiedKFold(n_splits=cv, shuffle=True) for train_idx, test_idx in skf.split(data.text, data.party): model = model_init() device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model.to(device) print(f"Device: {device}") optimizer = AdamW(model.parameters(), lr=config['lr'], eps=config['eps'], weight_decay=config['weight_decay']) data.loc[train_idx, 'data_type'] = 'train' data.loc[test_idx, 'data_type'] = 'test' train_dataloader, test_dataloader = get_dataloaders(data, config['batch_size']) for epoch in range(1, config['epochs']+1): model.train() # enter training mode loss_train_total = 0 for batch in train_dataloader: model.zero_grad() # get CUDA data batch = tuple(b.to(device) for b in batch) inputs = { 'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[2], } outputs = model(**inputs) # evaluate # for reference, we are using cross-entropy loss here, # as implemented in https://huggingface.co/transformers/_modules/transformers/modeling_bert.html loss = outputs[0] loss_train_total += loss.item() loss.backward() # do backprop torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) optimizer.step() loss_train_avg = loss_train_total/len(train_dataloader) print(f"Training loss for epoch {epoch}: {loss_train_avg}") val_loss, predictions, true_vals = evaluate(model, test_dataloader, device) auc = auc_score(predictions, true_vals) losses.append(val_loss) aucs.append(auc) tune.report(loss=np.mean(losses), auc=np.mean(aucs)) from functools import partial def model_init(): return BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2, output_attentions=False, output_hidden_states=False) config = { "lr": tune.choice([5e-5,3e-5,2e-5]), "eps": tune.loguniform(1e-10, 1e-7), "weight_decay": tune.loguniform(1e-10, 1e-5), "batch_size": tune.choice([4,8,16, 32]), "epochs": tune.choice([2, 3, 4]) } scheduler = ASHAScheduler( metric="auc", mode="max", max_t=10, grace_period=1, reduction_factor=2 ) reporter = CLIReporter(metric_columns=["loss", "auc", "training_iteration"]) hyperopt_search = HyperOptSearch(metric="auc", mode="max") result = tune.run( partial(train_and_hyperparam_search, model_init=model_init, data=dataset_clf, cv=3), resources_per_trial={"cpu": 2, "gpu": 1}, config=config, num_samples=8, scheduler=scheduler, search_alg=hyperopt_search, progress_reporter=reporter ) ```
github_jupyter
``` import pandas as pd from sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier from sklearn.model_selection import train_test_split # Import train_test_split function from sklearn import metrics #Import scikit-learn metrics module for accuracy calculation from fastapi import FastAPI import uvicorn data = pd.read_csv("clothing_weather.csv") data app = FastAPI() @app.get("/") async def root(): """Weather Advisor Welcome""" return {"message": "Hello, welcome to Weather advisor! Enter a word to calculate it's score."} @app.get("/weatheradvisor/{temp}/{rain}/{snow}") async def weatheradvisor(temp: int,rain:int,snow:int): y=predict(temp,rain,snow) message=getMessage(y[0], rain, snow) return "You should wear {0}".format(message)) async def predict(temp: int,rain:int,snow:int): data["rain"] = data["rain"].replace("no", 0) data["rain"] = data["rain"].replace("yes", 1) data["snow"] = data["rain"].replace("no", 0) data["snow"] = data["rain"].replace("yes", 1) feature_cols = ['temp_f','rain','snow'] X = data[feature_cols] # Features y = data.overall # Target variabley X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) clf = DecisionTreeClassifier(criterion="entropy", max_depth=4) # Train Decision Tree Classifer clf = clf.fit(X_train,y_train) #Predict the response for test dataset y_pred = clf.predict(X_test) print("Accuracy:",metrics.accuracy_score(y_test, y_pred)) y_pred = clf.predict([temp,rain,snow]) #print the predicted outfit code return y_pred def getMessage(pred, rain, snow): ans="" outfit_code = { 1: "a short sleeve shirt and shorts.", 2: "a short sleeve shirt and long pants.", 3: "a short sleeve shirt, shorts and a light jacket or sweatshirt.", 4: "a short sleeve shirt, long pants, and a light jacket or sweatshirt.", 5: "a long sleeve shirt, long pants, and a light jacket or sweatshirt.", 6: "a short sleeve shirt, long pants, and a heavy jacket.", 7: "a long sleeve shirt or sweater, long pants, and a heavy jacket.", 8: "a long sleeve shirt and shorts." } if pred in outfit_code: ans=ans+outfit_code[pred] else: return "an error occurred" if rain == 1: ans=ans+ " You may also want a rain jacket, rain boots, and/or an umbrella." if snow == 1: ans=ans+ " You should also bring a scarf, gloves, and snow boots!" return ans ```
github_jupyter
# Finding cellular regions with superpixel analysis **Overview:** Whole-slide images often contain artifacts like marker or acellular regions that need to be avoided during analysis. In this example we show how HistomicsTK can be used to develop saliency detection algorithms that segment the slide at low magnification to generate a map to guide higher magnification analyses. Here we show how superpixel analysis can be used to locate hypercellular regions that correspond to tumor-rich content. This uses Simple Linear Iterative Clustering (SLIC) to get superpixels at a low slide magnification to detect cellular regions. The first step of this pipeline detects tissue regions (i.e. individual tissue pieces) using the `get_tissue_mask` method of the `histomicstk.saliency` module. Then, each tissue piece is processed separately for accuracy and disk space efficiency. It is important to keep in mind that this does NOT rely on a tile iterator, but loads the entire tissue region (but NOT the whole slide) in memory and passes it on to `skimage.segmentation.slic` method. Not using a tile iterator helps keep the superpixel sizes large enough to correspond to tissue boundaries. Once superpixels are segmented, the image is deconvolved and features are extracted from the hematoxylin channel. Features include intensity and possibly also texture features. Then, a mixed component Gaussian mixture model is fit to the features, and median intensity is used to rank superpixel clusters by 'cellularity' (since we are working with the hematoxylin channel). Note that the decison to fit a gaussian mixture model instead of using K-means clustering is a design choice. If you'd like to experiment, feel free to try other methods of classifying superpixels into clusters using other approaches. Additional functionality includes contour extraction to get the final segmentation boundaries of cellular regions and to visualize them in HistomicsUI using one's preferred colormap. **Here are some sample results:** From left to right: Slide thumbnail, superpixel classifications, contiguous cellular/acellular regions ![cdetection](https://user-images.githubusercontent.com/22067552/65730355-7e92b600-e08f-11e9-918a-507f117f6d77.png) **Where to look?** ``` |_ histomicstk/ |_saliency/ |_cellularity_detection.py |_tests/ |_test_saliency.py ``` ``` import tempfile import girder_client import numpy as np from histomicstk.annotations_and_masks.annotation_and_mask_utils import ( delete_annotations_in_slide) from histomicstk.saliency.cellularity_detection_superpixels import ( Cellularity_detector_superpixels) import matplotlib.pylab as plt from matplotlib.colors import ListedColormap %matplotlib inline # color map vals = np.random.rand(256,3) vals[0, ...] = [0.9, 0.9, 0.9] cMap = ListedColormap(1 - vals) ``` ## Prepwork ``` APIURL = 'http://candygram.neurology.emory.edu:8080/api/v1/' SAMPLE_SLIDE_ID = "5d586d76bd4404c6b1f286ae" # SAMPLE_SLIDE_ID = "5d8c296cbd4404c6b1fa5572" gc = girder_client.GirderClient(apiUrl=APIURL) gc.authenticate(apiKey='kri19nTIGOkWH01TbzRqfohaaDWb6kPecRqGmemb') # This is where the run logs will be saved logging_savepath = tempfile.mkdtemp() # color normalization values from TCGA-A2-A3XS-DX1 cnorm_thumbnail = { 'mu': np.array([9.24496373, -0.00966569, 0.01757247]), 'sigma': np.array([0.35686209, 0.02566772, 0.02500282]), } # from the ROI in Amgad et al, 2019 cnorm_main = { 'mu': np.array([8.74108109, -0.12440419, 0.0444982]), 'sigma': np.array([0.6135447, 0.10989545, 0.0286032]), } # deleting existing annotations in target slide (if any) delete_annotations_in_slide(gc, SAMPLE_SLIDE_ID) ``` ## Initialize the cellularity detector ``` print(Cellularity_detector_superpixels.__init__.__doc__) ``` In this example, and as the default behavior, we use a handful of informative intensity features extracted from the hematoxylin channel after color deconvolution to fit a gaussian mixture model. Empirically (on a few test slides), this seems to give better results than using the full suite of intensity and texture features available. Feel free to experiment with this and find the optimum combination of features for your application. ``` # init cellularity detector cds = Cellularity_detector_superpixels( gc, slide_id=SAMPLE_SLIDE_ID, MAG=3.0, compactness=0.1, spixel_size_baseMag=256 * 256, max_cellularity=40, visualize_spixels=True, visualize_contiguous=True, get_tissue_mask_kwargs={ 'deconvolve_first': False, 'n_thresholding_steps': 2, 'sigma': 1.5, 'min_size': 500, }, verbose=2, monitorPrefix='test', logging_savepath=logging_savepath) ``` ## Set the color normalization values You can choose to reinhard color normalize the slide thumbnail and/or the tissue image at target magnificaion. You can either provide the mu and sigma values directly or provide the path to an image from which to infer these values. Please refer to the *color_normalization* module for reinhard normalization implementation details. In this example, we use a "high-sensitivity, low-specificity" strategy to detect tissue, followed by the more specific cellularity detection module. In other words, the *tissue_detection* module is used to detect all tissue, and only exclude whitespace and marker. Here we do NOT perform color normalization before tissue detection (empirically gives worse results), but we do normalize when detecting the cellular regions within the tissue. ``` # set color normalization for thumbnail # cds.set_color_normalization_values( # mu=cnorm_thumbnail['mu'], # sigma=cnorm_thumbnail['sigma'], what='thumbnail') # set color normalization values for main tissue cds.set_color_normalization_values( mu=cnorm_main['mu'], sigma=cnorm_main['sigma'], what='main') ``` ## Run the detector ``` print(cds.run.__doc__) tissue_pieces = cds.run() ``` ## Check the results The resultant list of objects correspond to the results for each "tissue piece" detected in the slide. You may explore various attributes like the offset coordinates, tissue mask, superpixel labeled mask, superpixel feature data, and superpixel cluster properties. ``` plt.imshow(tissue_pieces[0].tissue_mask, cmap=cMap) plt.imshow(tissue_pieces[0].spixel_mask, cmap=cMap) tissue_pieces[0].fdata.head() tissue_pieces[0].cluster_props ``` ## Check the visualization on HistomicsUI Now you may go to the slide on Digital Slide Archive and check the posted annotations.
github_jupyter
``` import matplotlib.pyplot as plt import numpy as np import os import PIL import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.image import ImageDataGenerator import pathlib from tqdm import tqdm from abc import ABCMeta, abstractmethod ``` I have downloaded the dataset locally and mentioned paths below. Since dataset is huge (~30 GB), I am not pushing it to the repository. You can put the `data` dir inside dataset adjacent to this jupyter notebook in order to run it successfully. ``` train_data_dir = 'data/train' test_data_dir = 'data/test' train_data_path = pathlib.Path(train_data_dir) test_data_path = pathlib.Path(test_data_dir) ``` Below are all the classes given for tissue samples in `train` and `test` dataset. ``` tissue_classes = [ 'spleen', 'skin_1', 'skin_2', 'pancreas', 'lymph_node', 'small_intestine', 'endometrium_1', 'endometrium_2', 'liver', 'kidney', 'lung', 'colon' ] ``` Let us display an example image from each of the `12` classes of tissues in our dataset. ``` fig, ax = plt.subplots(nrows=4, ncols=3, figsize=(10, 10)) counter = 0 for row in ax: for col in row: images = list(train_data_path.glob(tissue_classes[counter] + '/*')) image = np.array(PIL.Image.open(str(images[0]))) col.set_title(tissue_classes[counter]) col.imshow(image) counter += 1 fig.tight_layout() plt.show() ``` From dataset, we have **1119** unique images for **training** and **600** unique images for **testing** data. Since we are working with very large dataset, it is not advisable to load all the data at once. It is not possible to do that since the data is huge. That is why, we have created data generator which will generate training/testing examples on demand. It will only generate a batch of examples at a time. Below class is the custom data generator we have created in order to ingest images into ML pipeline. ``` class TissueDataGenerator(tf.keras.utils.Sequence): def __init__(self, data_dir, batch_size, class_labels, img_height=128, img_width=128, img_channels=3, preprocess_func=None, shuffle=True): self.file_ds = tf.data.Dataset.list_files(str(data_dir + '/*/*')) self.batch_size = batch_size self.class_labels = class_labels self.n_classes = len(class_labels) self.img_size = (img_height, img_width) self.img_n_channels = img_channels self.shuffle = shuffle self.preprocess_func = preprocess_func self.label_mapping = self.find_label_mappings() self.labeled_ds = self.file_ds.map(lambda f: tf.py_function(func=self.process_example, inp=[f], Tout=[tf.float32, tf.int32])) self.labeled_ds = self.labeled_ds.batch(self.batch_size) self.on_epoch_end() def find_label_mappings(self): mp = {} for i, label in enumerate(self.class_labels): mp[label] = i return mp def process_example(self, file_path): label = tf.strings.split(file_path, os.sep)[-2] label_map = self.label_mapping[str(label.numpy().decode('utf-8'))] label_encode = tf.keras.utils.to_categorical(label_map, self.n_classes) image = np.array(PIL.Image.open(str(file_path.numpy().decode('utf-8')))) image = tf.image.resize(image, self.img_size) if self.preprocess_func is not None: image = self.preprocess_func(image) return image, label_encode def __getitem__(self, index): 'Generate one batch of data' batch = next(self.iterator, None) if batch is None: self.on_epoch_end() batch = next(self.iterator) return batch def on_epoch_end(self): self.iterator = iter(self.labeled_ds) def __len__(self): return len(self.file_ds) // self.batch_size ``` During our research of finding best model for image classification, we usually experiment on various different kinds of models. Because of that, we usually rewrite some of the code redundantly. To prevent that, we have created abstract model class below. Whatever models we want to experiment on can inherit this class to get access to some of the common features we will use for all the model classes like compiling & training model, testing model, plotting metrics etc. ``` class ModifiedModel: __metaclass__ = ABCMeta def __init__(self, input_shape, num_classes, optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'], verbose=True): if not isinstance(input_shape, list) and not isinstance(input_shape, tuple): raise TypeError('input_shape must be of type list or tuple.') input_shape = tuple(input_shape) if len(input_shape) != 3: raise TypeError('input_shape must contain exactly 3 dimensions.') self.input_shape = input_shape self.num_classes = num_classes self.optimizer = optimizer self.loss = loss self.metrics = metrics self.verbose = verbose self.history = None self.model = None @abstractmethod def build_model(self): pass def compile_model(self, **kwargs): self.raise_if_not_built() self.model.compile(optimizer=self.optimizer, loss=self.loss, metrics=self.metrics, **kwargs) def raise_if_not_built(self): if self.model is None: raise ValueError('object of model class has not created instance yet.') def train(self, train_generator, epochs, **kwargs): self.raise_if_not_built() self.history = self.model.fit(train_generator, epochs=epochs, **kwargs) def test(self, test_generator, **kwargs): self.raise_if_not_built() return self.model.evaluate(test_generator, **kwargs) def plot_metrics(self): if self.history is None: raise ValueError('model must be trained to generate metric plot.') if 'loss' not in self.history.history: raise ValueError('history must contain loss information.') if 'accuracy' not in self.history.history: raise ValueError('history must contain accuracy information') fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 5)) attrs = ['loss', 'accuracy'] counter = 0 for col in ax: info = self.history.history[attrs[counter]] col.plot(range(len(info)), info) col.set_title(attrs[counter]) col.set_xlabel('Epochs') col.set_ylabel(attrs[counter]) counter += 1 fig.tight_layout() plt.show() def display_score(self, score): if len(score) < 2: raise ValueError('score must have atleast 2 values') print('Loss: {}, Accuracy: {}'.format(score[0], score[1])) ``` Below are some of the parameters which will be common across all the experiments and that is why we have decided to initialize them at the top and all other experiments will consume these three parameters. **Note:** We haven't fixed shape of input images because the input image shape may differ based on the model we experiment on. Also, We haven't used original dimension `(3000, 3000, 3)` because of computational power restrictions. We are using smaller shapes of images as input as per the model requirements ``` batch_size = 4 num_channels = 3 epochs = 15 ``` ## Training Custom CNN model for image classification Custom model inherits the `ModifiedModel` class defined above. We have used multiple Conv - Max pooling blocks following softmax output. The input images resized to shape `(128, 128, 3)`. ``` custom_img_height = 128 custom_img_width = 128 custom_train_gen = TissueDataGenerator(train_data_dir, batch_size=batch_size, class_labels=tissue_classes, img_height=custom_img_height, img_width=custom_img_width) custom_test_gen = TissueDataGenerator(test_data_dir, batch_size=batch_size, class_labels=tissue_classes, img_height=custom_img_height, img_width=custom_img_width) class CustomModel(ModifiedModel): def __init__(self, input_shape, num_classes, optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'], verbose=True): super().__init__(input_shape, num_classes, optimizer, loss, metrics, verbose) self.build_model() self.compile_model() def build_model(self): self.model = Sequential([ layers.Rescaling(1./255, input_shape=self.input_shape), layers.Conv2D(16, 3, padding='same', activation='relu'), layers.MaxPooling2D(), layers.Conv2D(32, 3, padding='same', activation='relu'), layers.MaxPooling2D(), layers.Conv2D(64, 3, padding='same', activation='relu'), layers.MaxPooling2D(), layers.Flatten(), layers.Dense(128, activation='relu'), layers.Dense(self.num_classes, activation = 'softmax') ]) customModel = CustomModel(input_shape=(custom_img_height, custom_img_width, num_channels), num_classes=len(tissue_classes)) customModel.model.summary() customModel.train(custom_train_gen, epochs=epochs) customModel.plot_metrics() custom_score = customModel.test(custom_test_gen) customModel.display_score(custom_score) ``` Now, we also are experimenting on some of the pretrained models like VGG, InceptionNet and EfficientNet. We have defined single class `PretrainedModel` below which will take instance of pretrained model and define it as functional unit in the classification model followed by multiple fully connected layers and softmax output. ``` class PretrainedModel(ModifiedModel): def __init__(self, input_shape, num_classes, pretrainedModel, optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'], verbose=True): super().__init__(input_shape, num_classes, optimizer, loss, metrics, verbose) self.pretrained = pretrainedModel self.build_model() self.compile_model() def build_model(self): for layer in self.pretrained.layers: layer.trainable = False self.model = Sequential([ self.pretrained, layers.Flatten(), layers.Dense(512, activation='relu'), layers.Dense(128, activation='relu'), layers.Dense(self.num_classes, activation = 'softmax') ]) ``` ## Transfer Learning on VGG16 We are using pretrained `VGG16` model as the first layer in our model and retraing only the layers which are added. The input images resized to shape `(224, 224, 3)`. ``` vgg_img_height = 224 vgg_img_width = 224 vgg_train_gen = TissueDataGenerator(train_data_dir, batch_size=batch_size, class_labels=tissue_classes, img_height=vgg_img_height, img_width=vgg_img_width, preprocess_func=tf.keras.applications.vgg16.preprocess_input) vgg_test_gen = TissueDataGenerator(test_data_dir, batch_size=batch_size, class_labels=tissue_classes, img_height=vgg_img_height, img_width=vgg_img_width, preprocess_func=tf.keras.applications.vgg16.preprocess_input) vggModel = PretrainedModel(input_shape=(vgg_img_height, vgg_img_width, num_channels), num_classes=len(tissue_classes), pretrainedModel=tf.keras.applications.vgg16.VGG16()) vggModel.model.summary() vggModel.train(vgg_train_gen, epochs=epochs) vggModel.plot_metrics() vgg_score = vggModel.test(vgg_test_gen) vggModel.display_score(vgg_score) ``` ## Transfer Learning on InceptionV3 We are using pretrained `InceptionV3` model as the first layer in our model and retraing only the layers which are added. The input images resized to shape `(299, 299, 3)`. ``` inception_img_height = 299 inception_img_width = 299 inception_train_gen = TissueDataGenerator(train_data_dir, batch_size=batch_size, class_labels=tissue_classes, img_height=inception_img_height, img_width=inception_img_width, preprocess_func=tf.keras.applications.inception_v3.preprocess_input) inception_test_gen = TissueDataGenerator(test_data_dir, batch_size=batch_size, class_labels=tissue_classes, img_height=inception_img_height, img_width=inception_img_width, preprocess_func=tf.keras.applications.inception_v3.preprocess_input) inceptionModel = PretrainedModel(input_shape=(inception_img_height, inception_img_width, num_channels), num_classes=len(tissue_classes), pretrainedModel=tf.keras.applications.inception_v3.InceptionV3()) inceptionModel.model.summary() inceptionModel.train(inception_train_gen, epochs=epochs) inceptionModel.plot_metrics() inception_score = inceptionModel.test(inception_test_gen) inceptionModel.display_score(inception_score) ``` ## Transfer Learning on EfficientNetB7 We are using pretrained `EfficientNetB7` model as the first layer in our model and retraing only the layers which are added. The input images resized to shape `(128, 128, 3)`. ``` effnet_img_height = 128 effnet_img_width = 128 effnet_train_gen = TissueDataGenerator(train_data_dir, batch_size=batch_size, class_labels=tissue_classes, img_height=effnet_img_height, img_width=effnet_img_width, preprocess_func=tf.keras.applications.efficientnet.preprocess_input) effnet_test_gen = TissueDataGenerator(test_data_dir, batch_size=batch_size, class_labels=tissue_classes, img_height=effnet_img_height, img_width=effnet_img_width, preprocess_func=tf.keras.applications.efficientnet.preprocess_input) effnetModel = PretrainedModel(input_shape=(effnet_img_height, effnet_img_width, num_channels), num_classes=len(tissue_classes), pretrainedModel=tf.keras.applications.efficientnet.EfficientNetB7()) effnetModel.model.summary() effnetModel.train(effnet_train_gen, epochs=epochs) effnetModel.plot_metrics() effnet_score = effnetModel.test(effnet_test_gen) effnetModel.display_score(effnet_score) ``` Note that above three pretrained model accuracy will improve on training for more epochs but we were not able to do that because of less computational power and time constraint. ## t-SNE plot for visualizing data distributions Let us draw t-SNE plot of image features w.r.t. `customModel` that we created. ``` img_height = 128 img_width = 128 model = customModel label2int = {} for i, t in enumerate(tissue_classes): label2int[t] = i def process_path(file_path): label = tf.strings.split(file_path, os.sep)[-2] label_map = label2int[str(label.numpy().decode('utf-8'))] image = np.array(PIL.Image.open(str(file_path.numpy().decode('utf-8')))) image = tf.image.resize(image, (img_height, img_width)) feature = model.model(np.array([image])) return feature.numpy()[0], label_map train_gen = TissueDataGenerator(train_data_dir, batch_size=batch_size, class_labels=tissue_classes, img_height=img_height, img_width=img_width) train_ds = train_gen.file_ds.map(lambda f: tf.py_function(func=process_path, inp=[f], Tout=[tf.float32, tf.int32])) test_gen = TissueDataGenerator(test_data_dir, batch_size=batch_size, class_labels=tissue_classes, img_height=img_height, img_width=img_width) test_ds = test_gen.file_ds.map(lambda f: tf.py_function(func=process_path, inp=[f], Tout=[tf.float32, tf.int32])) def extract_data(ds): images = None labels = None for img, lab in tqdm(ds): if images is None: images = np.array([img]) labels = np.array([lab]) else: images = np.append(images, [img], axis=0) labels = np.append(labels, [lab], axis=0) return images, labels train_images, train_labels = extract_data(train_ds) test_images, test_labels = extract_data(test_ds) from sklearn.manifold import TSNE import seaborn as sns import matplotlib.patheffects as PathEffects train_tsne = TSNE(n_components=2, random_state=41).fit_transform(train_images) test_tsne = TSNE(n_components=2, random_state=41).fit_transform(test_images) def tissue_scatter(x, colors): num_classes = len(np.unique(colors)) palette = np.array(sns.color_palette("hls", num_classes)) # create a scatter plot. f = plt.figure(figsize=(8, 8)) ax = plt.subplot(aspect='equal') sc = ax.scatter(x[:,0], x[:,1], lw=0, s=40, c=palette[colors.astype(np.int)]) plt.xlim(-25, 25) plt.ylim(-25, 25) ax.axis('off') ax.axis('tight') # add the labels for each digit corresponding to the label txts = [] for i in range(num_classes): # Position of each label at median of data points. xtext, ytext = np.median(x[colors == i, :], axis=0) txt = ax.text(xtext, ytext, str(i), fontsize=24) txt.set_path_effects([ PathEffects.Stroke(linewidth=5, foreground="w"), PathEffects.Normal()]) txts.append(txt) return f, ax, sc, txts tissue_scatter(train_tsne, train_labels) tissue_scatter(test_tsne, test_labels) ``` ## Reasons behind missclassification - One possible reason might be mixed pixels. The composition of the various objects in a single pixel makes identification of genuine class more difficult. - Original size of images are `(3000, 3000, 3)` but we have resized them down to very small size `(128, 128, 3)` for the model because which many details in image data might be lost. - We trained image only for 15 epochs becuase of limited time and computational power restriction.
github_jupyter
# 📃 Solution for Exercise M2.01 The aim of this exercise is to make the following experiments: * train and test a support vector machine classifier through cross-validation; * study the effect of the parameter gamma of this classifier using a validation curve; * study if it would be useful in term of classification if we could add new samples in the dataset using a learning curve. To make these experiments we will first load the blood transfusion dataset. <div class="admonition note alert alert-info"> <p class="first admonition-title" style="font-weight: bold;">Note</p> <p class="last">If you want a deeper overview regarding this dataset, you can refer to the Appendix - Datasets description section at the end of this MOOC.</p> </div> ``` import pandas as pd blood_transfusion = pd.read_csv("../datasets/blood_transfusion.csv") data = blood_transfusion.drop(columns="Class") target = blood_transfusion["Class"] ``` We will use a support vector machine classifier (SVM). In its most simple form, a SVM classifier is a linear classifier behaving similarly to a logistic regression. Indeed, the optimization used to find the optimal weights of the linear model are different but we don't need to know these details for the exercise. Also, this classifier can become more flexible/expressive by using a so-called kernel making the model becomes non-linear. Again, no requirement regarding the mathematics is required to accomplish this exercise. We will use an RBF kernel where a parameter `gamma` allows to tune the flexibility of the model. First let's create a predictive pipeline made of: * a [`sklearn.preprocessing.StandardScaler`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html) with default parameter; * a [`sklearn.svm.SVC`](https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html) where the parameter `kernel` could be set to `"rbf"`. Note that this is the default. ``` from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC model = make_pipeline(StandardScaler(), SVC()) ``` Evaluate the statistical performance of your model by cross-validation with a `ShuffleSplit` scheme. Thus, you can use [`sklearn.model_selection.cross_validate`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_validate.html) and pass a [`sklearn.model_selection.ShuffleSplit`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.ShuffleSplit.html) to the `cv` parameter. Only fix the `random_state=0` in the `ShuffleSplit` and let the other parameters to the default. ``` from sklearn.model_selection import cross_validate, ShuffleSplit cv = ShuffleSplit(random_state=0) cv_results = cross_validate(model, data, target, cv=cv, n_jobs=-1) cv_results = pd.DataFrame(cv_results) cv_results print( f"Accuracy score of our model:\n" f"{cv_results['test_score'].mean():.3f} +/- " f"{cv_results['test_score'].std():.3f}" ) ``` As previously mentioned, the parameter `gamma` is one of the parameter controlling under/over-fitting in support vector machine with an RBF kernel. Compute the validation curve to evaluate the effect of the parameter `gamma`. You can vary its value between `10e-3` and `10e2` by generating samples on a logarithmic scale. Thus, you can use `np.logspace(-3, 2, num=30)`. Since we are manipulating a `Pipeline` the parameter name will be set to `svc__gamma` instead of only `gamma`. You can retrieve the parameter name using `model.get_params().keys()`. We will go more into details regarding accessing and setting hyperparameter in the next section. ``` import numpy as np from sklearn.model_selection import validation_curve gammas = np.logspace(-3, 2, num=30) param_name = "svc__gamma" train_scores, test_scores = validation_curve( model, data, target, param_name=param_name, param_range=gammas, cv=cv, n_jobs=-1) ``` Plot the validation curve for the train and test scores. ``` import matplotlib.pyplot as plt plt.errorbar(gammas, train_scores.mean(axis=1), yerr=train_scores.std(axis=1), label='Training error') plt.errorbar(gammas, test_scores.mean(axis=1), yerr=test_scores.std(axis=1), label='Testing error') plt.legend() plt.xscale("log") plt.xlabel(r"Value of hyperparameter $\gamma$") plt.ylabel("Accuracy score") _ = plt.title("Validation score of support vector machine") ``` Looking at the curve, we can clearly identify the over-fitting regime of the SVC classifier when `gamma > 1`. The best setting is around `gamma = 1` while for `gamma < 1`, it is not very clear if the classifier is under-fitting but the testing score is worse than for `gamma = 1`. Now, you can perform an analysis to check whether adding new samples to the dataset could help our model to better generalize. Compute the learning curve (using [`sklearn.model_selection.learning_curve`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.learning_curve.html)) by computing the train and test scores for different training dataset size. Plot the train and test scores with respect to the number of samples. ``` from sklearn.model_selection import learning_curve train_sizes = np.linspace(0.1, 1, num=10) results = learning_curve( model, data, target, train_sizes=train_sizes, cv=cv, n_jobs=-1) train_size, train_scores, test_scores = results[:3] plt.errorbar(train_size, train_scores.mean(axis=1), yerr=train_scores.std(axis=1), label='Training error') plt.errorbar(train_size, test_scores.mean(axis=1), yerr=test_scores.std(axis=1), label='Testing error') plt.legend() plt.xlabel("Number of samples in the training set") plt.ylabel("Accuracy") _ = plt.title("Learning curve for support vector machine") ``` We observe that adding new samples in the dataset does not improve the testing score. We can only conclude that the standard deviation of the training error is decreasing when adding more samples which is not a surprise.
github_jupyter