code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/sharon1321/studio/blob/master/homework.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="nrJ_U4F-VmEX" outputId="c3ba9d2d-ce78-4f6e-ee9b-39e4bcdcc477" # !mkdir images # !wget wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=1vHXk7Eg_PA8FgpFSyf8ZQkqgjoAVe7ng'-O images/1.jpg # !wget wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=1WHq4B_GmVKxGI6K7jxS9vK8GeOangpbr'-O images/2.jpg # !wget wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=1FWdNHvkkgxxf8h-Rp2CJKQ9rY8XVGhdy'-O images/3.jpg # !wget wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=1_JmOlY4E4FCNlydDkISbkquLKc3Nc-lT'-O images/4.jpg # + id="KpebuORyYRg5" # + id="-ANm2qz3W-mf"
homework.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <NAME> # ## Computer Vision and IOT Intern @TSF # ### Prediction using Decision tree algorithm # ### Dataset : Iris.csv (https://bit.ly/34SRn3b) # **Algorithm** # # One of the most important considerations when choosing a machine learning algorithm is how interpretable it is. The ability to explain how an algorithm makes predictions is useful to not only you, but also to potential stakeholders. A very interpretable machine learning algorithm is a decision tree which you can think of as a series of questions designed to assign a class or predict a continuous value depending on the task. The example image is a decision tree designed for classification. # + # %matplotlib inline import matplotlib.pyplot as plt import pandas as pd from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn import tree # - df=pd.read_csv('Iris.csv') df df.info() # + features = ['SepalLengthCm','SepalWidthCm','PetalLengthCm','PetalWidthCm'] # Create features matrix x = df.loc[:, features].values # - y=df.Species x_train,x_test,y_train,y_test=train_test_split(x, y, random_state=0) clf = DecisionTreeClassifier(max_depth = 2, random_state = 0) # + clf.fit(x_train, y_train) # - clf.predict(x_test[0:1]) from sklearn import metrics import seaborn as sns score = clf.score(x_test, y_test) print(score) print(metrics.classification_report(y_test,clf.predict(x_test))) # + cm = metrics.confusion_matrix(y_test, clf.predict(x_test)) plt.figure(figsize=(7,7)) sns.heatmap(cm, annot=True, fmt=".0f", linewidths=.5, square = True, cmap = 'Blues'); plt.ylabel('Actual label', fontsize = 17); plt.xlabel('Predicted label', fontsize = 17); plt.title('Accuracy Score: {}'.format(score), size = 17); plt.tick_params(labelsize= 15) # + # List of values to try for max_depth: max_depth_range = list(range(1, 6)) # List to store the average RMSE for each value of max_depth: accuracy = [] for depth in max_depth_range: clf = DecisionTreeClassifier(max_depth = depth, random_state = 0) clf.fit(x_train, y_train) score = clf.score(x_test, y_test) accuracy.append(score) # + #ploting accuracy score depth wise fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (10,7)); ax.plot(max_depth_range, accuracy, lw=2, color='k') ax.set_xlim([1, 5]) ax.set_ylim([.50, 1.00]) ax.grid(True, axis = 'both', zorder = 0, linestyle = ':', color = 'k') ax.tick_params(labelsize = 18) ax.set_xticks([1,2,3,4,5]) ax.set_xlabel('max_depth', fontsize = 24) ax.set_ylabel('Accuracy', fontsize = 24) fig.tight_layout() #fig.savefig('images/max_depth_vs_accuracy.png', dpi = 300) # + fig, axes = plt.subplots(nrows = 1, ncols = 1, figsize = (7,4), dpi = 150) tree.plot_tree(clf); # - # Putting the feature names and class names into variables fn = ['sepal length (cm)','sepal width (cm)','petal length (cm)','petal width (cm)'] cn = ['setosa', 'versicolor', 'virginica'] # + fig, axes = plt.subplots(nrows = 1, ncols = 1, figsize = (7,4), dpi = 300) tree.plot_tree(clf, feature_names = fn, class_names=cn, filled = True); #fig.savefig('images/plottreefncn.png') # - # ### Conclusion # - **After Importing, Fit our dataset in our model, accuracy is 89.47%.** # # - **We can clearly see model performance by confusion matrix and classification report.** # # - **By ploting accuracy score depth wise graph, optimal depth for model is 3.** # ### Thank You!
.ipynb_checkpoints/Prediction_using_Decision Tree-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Frequency-weighting for hypothetical group "mid-range" cetaceans. Functional hearing range according to Southal et al. (2007), table 2, p 430. # # Use this filter when there is no audiogram data for one of these genera: *Steno, Sousa, Sotalia, Tursiops, Stenella, Delphinus, Lagenodelphis, Lagenorhynchus, Lissodelphis, Grampus, Peponocephala, Feresa, Pseudorca, Orcinus, Globicephala, Orcaella, Physeter, Delphinapterus, Monodon, Ziphius, Berardius, Tasmacetus, Hyperoodon, Mesoplodon* # import the FFT scripts source("FFT.R") # ## Defining the filter values # Filter weights are computed by applying the M-weighting function described in Southall et al. (2007), appendix A, p. 500 # + # filter frequencies from 20Hz to 24kHz seq <- make.f.seq(0.02, 24) # functional hearing from 150 Hz to 160 kHz f.low = 0.15 f.high = 160 M.table <- make.M.table(seq, f.low, f.high) M.table plot.M(M.table, TRUE) # - # ## Implement the filter # Implement a filter using the values from table above. Q is set to 1.414 (1 octave) for a smoother filter curve. The filtered sound is normalized at -3dB. # # The generated command can be used to invoque sox by copy-pasting it in a Linux terminal. # sox command inputfile <- "data/whitenoise.wav" outputfile <- "data/M.ft_mid-frequency_cetaceans.mp3" Q <- 1.414 # 1 octave command <- sox_command(inputfile, outputfile, M.table, Q) command # ## References # <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2007. Overview. Aquatic mammals, 33(4), p.411.
notebook-home/Mid-frequency_cetaceans.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # coding: utf-8 import os import sys sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定 import numpy as np import matplotlib.pyplot as plt from dataset.mnist import load_mnist from common.multi_layer_net import MultiLayerNet from common.optimizer import SGD (x_train, t_train), (x_test, t_test) = load_mnist(normalize=True) # 過学習を再現するために、学習データを削減 x_train = x_train[:300] t_train = t_train[:300] # weight decay(荷重減衰)の設定 ======================= #weight_decay_lambda = 0 # weight decayを使用しない場合 weight_decay_lambda = 0.1 # ==================================================== network = MultiLayerNet(input_size=784, hidden_size_list=[100, 100, 100, 100, 100, 100], output_size=10, weight_decay_lambda=weight_decay_lambda) optimizer = SGD(lr=0.01) max_epochs = 201 train_size = x_train.shape[0] batch_size = 100 train_loss_list = [] train_acc_list = [] test_acc_list = [] iter_per_epoch = max(train_size / batch_size, 1) epoch_cnt = 0 for i in range(1000000000): batch_mask = np.random.choice(train_size, batch_size) x_batch = x_train[batch_mask] t_batch = t_train[batch_mask] grads = network.gradient(x_batch, t_batch) optimizer.update(network.params, grads) if i % iter_per_epoch == 0: train_acc = network.accuracy(x_train, t_train) test_acc = network.accuracy(x_test, t_test) train_acc_list.append(train_acc) test_acc_list.append(test_acc) print("epoch:" + str(epoch_cnt) + ", train acc:" + str(train_acc) + ", test acc:" + str(test_acc)) epoch_cnt += 1 if epoch_cnt >= max_epochs: break # 3.グラフの描画========== markers = {'train': 'o', 'test': 's'} x = np.arange(max_epochs) plt.plot(x, train_acc_list, marker='o', label='train', markevery=10) plt.plot(x, test_acc_list, marker='s', label='test', markevery=10) plt.xlabel("epochs") plt.ylabel("accuracy") plt.ylim(0, 1.0) plt.legend(loc='lower right') plt.show()
ch6/overfit_weight_decay.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Titanic - Machine Learning from Disaster import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import missingno as msno train = pd.read_csv('train.csv') test = pd.read_csv('test.csv') # + [markdown] tags=[] # ## Variables Dictionary # - # - Variable | Definition | Key # - survival | Survival | 0 = No, 1 = Yes # - pclass | Ticket class | 1 = 1st, 2 = 2nd, 3 = 3rd # - sex | Sex # - Age | Age in years # - sibsp | # of siblings / spouses aboard the Titanic # - parch | # of parents / children aboard the Titanic # - ticket | Ticket number # - fare | Passenger fare # - cabin | Cabin number # - embarked | Port of Embarkation | C = Cherbourg, Q = Queenstown, S = Southampton # + [markdown] tags=[] # ## EDA # - train.head() train.shape train.info() train.describe() # + [markdown] tags=[] # #### Let's make buckets for our columns # # 1) Target = Survived # 2) Numerical = Pclass, Age, SibSp, Parch, Fare # 3) Categorical = Sex, Embarked # 4) Not sure = Name, Ticket, Cabin # + [markdown] tags=[] # ### Univariate Analysis # - # When exploring our dataset and its features, we have many options available to us. We can explore each feature individually, or compare pairs of features, finding the correlation between. Let's start with some simple Univariate (one feature) analysis. # # Features can be of multiple types: # # - Nominal: is for mutual exclusive, but not ordered, categories. # - Ordinal: is one where the order matters but not the difference between values. # - Interval: is a measurement where the difference between two values is meaningful. # - Ratio: has all the properties of an interval variable, and also has a clear definition of 0.0. # # There are multiple ways of manipulating each feature type, but for simplicity, we'll define only two feature types: # # - Numerical: any feature that contains numeric values. # - Categorical: any feature that contains categories, or text. # fig, ax = plt.subplots(2, 4, figsize=(16, 8)) sns.countplot(ax=ax[0, 0], data=train, x='Survived') sns.countplot(ax=ax[0, 1], x='Pclass', data=train) sns.countplot(ax=ax[0, 2], x='Sex', data=train) sns.histplot(ax=ax[0, 3], x=train['Age']) sns.countplot(ax=ax[1, 0], x='SibSp', data=train) sns.countplot(ax=ax[1, 1], x='Parch', data=train) sns.histplot(ax=ax[1, 2], x=train['Fare'], bins=20) sns.countplot(ax=ax[1, 3], x='Embarked', data=train) # + [markdown] tags=[] # ### Is there any missing values in our data? # - train.isnull().sum() msno.matrix(train, figsize=(16, 8)) # - Age contain 177 missing values, since the distribution of age is right skewed we might consider fill the missing value with median. # - Cabin contain 687 missing values, roughly 70-80 % missing, we might consider to drop it. # - Embarked only contain 2 missing values, great we can fill it with most frequent values. # ### Feature vs Target def biplot(feature, bins='auto'): fig, ax = plt.subplots(1, 2, figsize=(12, 6)) sns.countplot(ax=ax[0], x=feature, data=train) sns.histplot(ax=ax[1], x=feature, hue='Survived', data=train, kde=True, bins=bins) # #### 1. Pclass train.Pclass.describe() train.Pclass.value_counts() biplot('Pclass') # From plot above, we can see that first class have higher chance of survive, on the other hand the third class have higher chance not survive. # #### 2. Name train.Name.head() # After looking at the Name columns, my thinking was: Is a person title affecting survive rate ? # #### 3. Sex train.Sex.describe() train.Sex.value_counts() biplot('Sex') # Male are more likely to not survive, maybe they prioritized women first ? # #### 4. Age train.Age.describe() train.Age.value_counts() biplot('Age') # - People in age around 20 and 40 are have the higher chance of surviving # - Also we see that many children survived too # #### 5. SibSp train.SibSp.describe() train.SibSp.value_counts() biplot('SibSp') # - Having 0 sibling or spouse have higher chance of not surviving, maybe they prioritized to save other people rather than themself ? # - Having 1 sibling or spouse have higher chance of surviving, maybe they helping each other to survive ? # - Having more than 1 sibling or spouse suprisingly have higher chance of not surviving, maybe they helping other people too after finish help their sibling or spouse ? # #### 6. Parch train.Parch.describe() train.Parch.value_counts() biplot('Parch') # #### 7. Ticket len(train.Ticket.unique()) train.Ticket.head() # There are 681 unique values in Ticket I'm not sure how to encode that to reduce the dimensionality # #### 8. Fare train.Fare.head() train.Fare.describe() biplot('Fare', bins=10) # Looks like cheaper fare is not really safe, and higher price having higher chance to survive # #### 9. Cabin train.Cabin.unique()[:10] len(train.Cabin.unique()) train.Cabin.isnull().sum() # There just too many missing values in Cabin column we might just drop it later # #### 10. Embarked train.Embarked.describe() train.Embarked.value_counts() biplot('Embarked') # Maybe cheaper fare are people from Southampton and that's decrease the chance of surviving sns.histplot(x='Fare', hue='Embarked', data=train, bins=10) # Surely cheaper fare come from Southampton, now that explain it # + [markdown] tags=[] # ## Model Building # + [markdown] tags=[] # ### Selecting features and splitting data into features and target variable # - train.columns X = train.drop(['PassengerId', 'Survived', 'Name', 'Ticket', 'Cabin'], axis=1) y = train.Survived X.head() # ### Train test split from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=42) # ### Preprocessing from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.preprocessing import MinMaxScaler, OneHotEncoder # + numerical_pipeline = Pipeline([ ('imputer', SimpleImputer(strategy='median')), ('scaler', MinMaxScaler()) ]) categorical_pipeline = Pipeline([ ('imputer', SimpleImputer(strategy='most_frequent')), ('onehot', OneHotEncoder()) ]) # - from sklearn.compose import ColumnTransformer preprocessor = ColumnTransformer([ ('numeric', numerical_pipeline, ['SibSp', 'Parch', 'Age', 'Fare']), ('categoric', categorical_pipeline, ['Sex', 'Pclass', 'Embarked']) ]) # ### Pipeline from sklearn.neighbors import KNeighborsClassifier pipeline = Pipeline([ ('prep', preprocessor), ('algo', KNeighborsClassifier()) ]) pipeline.fit(X_train, y_train) pipeline.score(X_test,y_test) # ### GridSearch CV from sklearn.model_selection import GridSearchCV pipeline.get_params() # + parameters = { 'algo__n_neighbors': range(1, 51, 2), 'algo__weights' : ['uniform', 'distance'], 'algo__p' : [1, 2] } model = GridSearchCV(pipeline, parameters, cv=5, n_jobs=-1, verbose=1) model.fit(X_train, y_train) # + # result = pd.DataFrame(model.cv_results_) # + # result.sort_values('rank_test_score').head() # - model.best_params_ model.score(X_train, y_train), model.best_score_, model.score(X_test, y_test) # ### Predicting Jack & Rose data = [ [1, 'female', 17, 1, 1, 40, 'S'], [3, 'male', 20, 0, 0, 8, 'S'] ] X_pred = pd.DataFrame(data, columns=X_train.columns, index=['Rose', 'Jack']) X_pred X_pred['Survived'] = model.predict(X_pred) X_pred # ### Save Model import pickle filename = 'knn_titanic.pkl' pickle.dump(model, open(filename, 'wb')) model.best_estimator_ # ## Predict test test.head() X_pred = test.drop(['PassengerId', 'Name', 'Ticket'], axis=1) model.predict(X_pred) pred = pd.DataFrame({ 'PassengerId':test.PassengerId, 'Survived':model.predict(X_pred) }) pred.head() pred.to_csv('gender_submission.csv', index=False)
Titanic Survival - KNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Python basics # *** # ### General comments # The first step in every Python script is to load those packages that we'll use during the analysis. A package is a set of tools that are not included in the built-in Python tools. # # There are four packages that are commonly used and we will usually load: # * __[NumPy](http://www.numpy.org/)__ is a fundamental package for scientific computing that includes N-dimensional array objects, linear algebra, Fourier transforms, random number capabilities... __NumPy__ uses a vector structure called *array*; data in an *array* must be always of the same nature, i.e., integer, floating point number, string... To import __NumPy__, use the following command: # > ```Python # import numpy as np # ``` # # * __[pandas](https://pandas.pydata.org/)__ is a pacakge that allows organizind data in a structure named *data frame*. *Data frames* resemble the usual Excel table, in the sense that columns represent variables and rows represent samples. All the elements of a column (variable) must be of the same nature (integer, string...), but different columns may differ in the type of data they contain. As Excel talbes, a _data frame_ has an index and heading that identifies rows and columns, respectively, that allow us to search for specific values. To import __pandas__, use the following command: # > ```Python # import pandas as pd # ``` # # * __[matplotlib](https://matplotlib.org/)__ is a package designed to plot graphs similar to those in Matlab. To import __matplotlib__, you need the following commands: # > ```Python # import matplotlib.pyplot as plt # # %matplotlib inline # plt.style.use('seaborn-whitegrid') # ``` # # * __[SciPy](https://www.scipy.org/)__ contains several numerical tools that are efficient and easty to apply, e.g., numerical integration and optimization. We will not load the complete set of tools in __SciPy__, but those we need: # > ```Python # from scipy.stats import genextreme # from scipy.optimize import curve_fit # ``` # # * [__os__](https://docs.python.org/3.4/library/os.html) is a package that allows us to change the working directory, create new directories, list the files contained in a directory, etc. To import it: # > ```Python # import os # ``` # + import numpy as np import pandas as pd from matplotlib import pyplot as plt # %matplotlib inline plt.style.use('seaborn-whitegrid') from scipy.stats import genextreme from scipy.optimize import curve_fit import os # - # In case you need to install some of those packages, you'll need to do the following (example to install SciPy):<br> # * Launch Anaconda Prompt<br> # * Type `conda install scipy` + `Enter`<br> # # We're going to install a variable inspector to be able to check the existing objects in our analysis:<br> # * Launch Anaconda Prompt<br> # * Type: # > `pip install jupyter_contrib_nbextensions` + `Enter`<br> # `jupyter contrib nbextension install --user` + `Enter`<br> # `jupyter nbextension enable varInspector/main` + `Enter`<br> # ### Basic data structures in Python # **Lists**<br> # Lists are a data structure that can contain data of any type (integer, float, strings...) in a single object. Lists are mutable, meaning that we can modify the values inside a list after its declaration. # create a list a = [1, 'hello', 1.5] # extract a value from the list # modify one of the values in the list # **Tuples**<br> # Tuples are a data structure similar to lists because they can also contain data of any type. Contrary to lists, tuples can no be modified after declared. # create a lista b = (2, 'red', np.nan) # extract a value from the tuple # modify one of the values in the tuple # **Arrays**<br> # This is a specific structure of the package *NumPy* that allows us to work with vectores and matrices, and perform calculations upon them easily. All the values in an array must be of the same data type. # create an array from the list 'a' # create an array c = np.array([1.5, 2.1, 4.5]) # extract values from the array # invert the array # modify a value in the array # calculate the mean of the array # **Pandas: _series_ and _data frames_**<br> # _Pandas_ is a package suitable for working with bidimensional (_data frames_) or unidimensional (_series_) tables. Pandas' structures use the tools in *NumPy* to perform easily several tasks with the table. In _Pandas_, all the data contained in a column of the table must be of the same type; different columns may have different types of data. # create a 'data frame' with name, age and weight d = [['Peter', 36, 71], ['Laura', 40, 58], ['John', 25, 65]] d = pd.DataFrame(data=d, columns=['name', 'age', 'weight']) d # a column in a data frame is a series # calculate the mean of the dataframe # **Dictionaries**<br> # A dictionary can store several data structures (from those above mentioned) in a single object. We need to set a _key_ to access any of the data structures included in the dictionary. # crear un diccionario que contenga todos los datos anteriormente creados # siendo la clave el tipo de estructura # create a dictionary that contains all the data structures previously created # in this example, the key will be the type of structure e = {'list': a, 'tuple': b, 'array': c, 'dataframe': d} # extract one of the structures from the dictionary
G1448-Hydrology/Precipitation/notebooks/Python_basics(incomplete).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from selenium import webdriver import datetime import urllib import requests import re import time # + class News_parse: def str_date_to_int(date_str): y = int(date_str[:4]) m = int(date_str[5:7]) d = int(date_str[8:10]) h = int(date_str[11:13]) mi = int(date_str[14:16]) # print("date slicing...") return y,m,d,h,mi def bubbleSort(alist): for passnum in range(len(alist)-1,0,-1): for i in range(passnum): if alist[i]<alist[i+1]: temp = alist[i] alist[i] = alist[i+1] alist[i+1] = temp def content_to_str(body_content): body_str = str(body_content) sign_counter = 0 start_sign_location = 0 end_sign_location = 0 para_counter =0 for x in range(len(body_str)): if body_str[x] == "=": sign_counter += 1 if sign_counter == 2 : sign_counter = x start_sign_location = sign_counter for w in range(x,len(body_str)): if body_str[w] == "\n": para_counter += 1 if para_counter == 4 : end_sign_location = w break break # print(start_sign_location) # print(end_sign_location) return body_str[start_sign_location+1:end_sign_location+1] def line_ele(strings): return strings.replace("\n"," ") def Parse(self): dt=datetime.datetime.now() target_date = datetime.datetime(dt.year,dt.month,dt.day-1,20,0) driver = webdriver.Chrome("C:\\Users\\infomax\\chromedriver_win32\\chromedriver") driver.implicitly_wait(2) driver.get("http://news.einfomax.co.kr/news/searchForm.html") article_list = {} name_list = ["오진우","곽세연","우성문"] target_date = target_date art_counts_1 =0 driver.find_element_by_xpath("""//*[@id="search-tabs1"]/form/fieldset[2]/div[2]/div/label[4]""").click() for name in name_list: #이름입력 driver.find_element_by_xpath("""//*[@id="sc_word"]""").send_keys(name) #검색 클릭 driver.find_element_by_xpath("""//*[@id="search-tabs1"]/form/footer/div/button""").click() # 리스트형 클릭 driver.find_element_by_xpath("""//*[@id="user-container"]/div[3]/div[2]/section/article/div[2]/header/div[2]/a[1]""").click() # 탑 기사 부터 처리 try: top_date = driver.find_element_by_xpath("""//*[@id="user-container"]/div[3]/div[2]/section/article/div[2]/section/div[1]/div/div[2]""") top_date.get_attribute('innerHTML') top_date_str = top_date.get_attribute('innerHTML') print(top_date_str) top_date_str2 = top_date_str[-16:].strip() y,m,d,h,mi = News_parse.str_date_to_int(top_date_str2) top_date_value = datetime.datetime(y,m,d,h,mi) if top_date_value > target_date: #탑에 걸려있는 기사에 접근 driver.find_element_by_xpath("""//*[@id="user-container"]/div[3]/div[2]/section/article/div[2]/section/div[1]/div/div[1]/a/strong""").click() targetURL = driver.current_url print(targetURL) article_head = driver.find_element_by_xpath("""//*[@id="user-container"]/div[3]/header/div/div""").text article_body_before =driver.find_element_by_xpath("""//*[@id="article-view-content-div"]""").text article_body_after = News_parse.content_to_str(article_body_before) art_counts_1 += 1 # print("기사개수: ",art_counts_1) print(article_head) # print(article_body_after) article_list[top_date_value] = [article_head,article_body_after,targetURL] driver.back() except ValueError: print(name,"탑기사 오류 확인할것") for art_num in range(1, 2): #날짜 파싱하기 - 바디기사 날짜 파싱 time.sleep(2) body_date = driver.find_element_by_xpath("""//*[@id="user-container"]/div[3]/div[2]/section/article/div[2]/section/div[2]/div["""+str(art_num)+"""]/div[2]""") body_date.get_attribute('innerHTML') body_date_str = body_date.get_attribute('innerHTML') # print(body_date_str) body_date_str2 = body_date_str[-16:].strip() y,m,d,h,mi = News_parse.str_date_to_int(body_date_str2) body_date_value = datetime.datetime(y,m,d,h,mi) body_date_value if body_date_value > target_date : #바디에 있는 기사에 접근 driver.find_element_by_xpath("""//*[@id="user-container"]/div[3]/div[2]/section/article/div[2]/section/div[2]/div["""+str(art_num)+"""]/div[1]/a/strong""").click() targetURL = driver.current_url art_counts_1 += 1 print("기사개수: ",art_counts_1) print(targetURL) article_head = driver.find_element_by_xpath("""//*[@id="user-container"]/div[3]/header/div/div""").text article_body_before =driver.find_element_by_xpath("""//*[@id="article-view-content-div"]""").text article_body_after = News_parse.content_to_str(article_body_before) print(article_head) # print(article_body_after) article_list[body_date_value] = [article_head,article_body_after,targetURL] driver.back() driver.back() driver.back() driver.find_element_by_xpath("""//*[@id="sc_word"]""").clear() article_to_list=list(article_list.keys()) print("담긴 기사개수 : ",len(article_to_list)) print("원래 기사개수 : ", art_counts_1) News_parse.bubbleSort(article_to_list) temp_art = [] full_article = "" for y in article_to_list: temp_art = article_list[y] full_article = full_article + " ▲" +temp_art[0]+"\n"+ " -" + News_parse.line_ele(temp_art[1]) + "\n " + temp_art[2] +"\n\n" print("---------이하 복사--------------------") print(full_article) # - News= News_parse() News.Parse() driver.close() class test: def a(x): print(x) def b(self): test.a("xxaa") test1 = test() test1.b()
crwal_1-Copy3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #### New to Plotly? # Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/). # <br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online). # <br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started! # # #### Version Check # Note: graph size attributes are available in version <b>1.9.2+</b><br> # Run `pip install plotly --upgrade` to update your Plotly version import plotly plotly.__version__ # ### Adjusting Height, Width, & Margins### # + import plotly.plotly as py import plotly.graph_objs as go data = [ go.Scatter( x=[0, 1, 2, 3, 4, 5, 6, 7, 8], y=[0, 1, 2, 3, 4, 5, 6, 7, 8] ) ] layout = go.Layout( autosize=False, width=500, height=500, margin=go.layout.Margin( l=50, r=50, b=100, t=100, pad=4 ), paper_bgcolor='#7f7f7f', plot_bgcolor='#c7c7c7' ) fig = go.Figure(data=data, layout=layout) py.iplot(fig, filename='size-margins') # - # ### Automatically Adjust Margins # Set [automargin](https://plot.ly/python/reference/#layout-xaxis-automargin) to `True` and Plotly will automatically increase the margin size to prevent ticklabels from being cut off or overlapping with axis titles. # + import plotly.plotly as py import plotly.graph_objs as go data = [ go.Bar( x=['Apples', 'Oranges', 'Watermelon', 'Pears'], y=[3, 2, 1, 4] ) ] layout = go.Layout( autosize=False, width=500, height=500, yaxis=go.layout.YAxis( title='Y-axis Title', ticktext=['Very long label','long label','3','label'], tickvals=[1, 2, 3, 4], tickmode='array', automargin=True, titlefont=dict(size=30), ), paper_bgcolor='#7f7f7f', plot_bgcolor='#c7c7c7' ) fig = go.Figure(data=data, layout=layout) py.iplot(fig, filename='automargin') # - # ### Dash Example # [Dash](https://plot.ly/products/dash/) is an Open Source Python library which can help you convert plotly figures into a reactive, web-based application. Below is a simple example of a dashboard created using Dash. Its [source code](https://github.com/plotly/simple-example-chart-apps/tree/master/dash-graphsizeplot) can easily be deployed to a PaaS. from IPython.display import IFrame IFrame(src= "https://dash-simple-apps.plotly.host/dash-graphsizeplot/", width="100%", height="650px", frameBorder="0") from IPython.display import IFrame IFrame(src= "https://dash-simple-apps.plotly.host/dash-graphsizeplot/code", width="100%", height=500, frameBorder="0") # #### Reference # See https://plot.ly/python/reference/#layout for more information and chart attribute options! # + from IPython.display import display, HTML display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />')) display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">')) # ! pip install git+https://github.com/plotly/publisher.git --upgrade import publisher publisher.publish( 'sizing.ipynb', 'python/setting-graph-size/', 'Setting Graph Size', 'How to manipulate the graph size in Python with Plotly.', title = 'Setting Graph Size', name = 'Setting Graph Size', has_thumbnail='true', thumbnail='thumbnail/sizing.png', language='python', order=2, display_as='file_settings', ipynb= '~notebook_demo/133')
_posts/python-v3/fundamentals/sizing/sizing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Dataset import tensorflow_datasets as tfds import tensorflow as tf import numpy as np # + def normalize_img(image, label): """Normalizes images: `uint8` -> `float32`.""" return tf.cast(image, tf.float32) / 255., label def get_dataset(): (ds_train, ds_test), ds_info = tfds.load( 'mnist', split=['train', 'test'], shuffle_files=False, as_supervised=True, with_info=True, ) ds_train = ds_train.map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE) ds_train = ds_train.cache() # ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples) ds_train = ds_train.batch(128) ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE) ds_test = ds_test.map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE) ds_test = ds_test.batch(128) ds_test = ds_test.cache() ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE) return ds_train, ds_test def convert_to_numpy(ds): ds_numpy = tfds.as_numpy(ds) train_images = None train_labels = None for ex in ds_numpy: if train_images is None: train_images = ex[0] train_labels = ex[1] else: train_images = np.concatenate((train_images, ex[0]), axis=0) train_labels = np.concatenate((train_labels, ex[1]), axis=0) return train_images, train_labels # - ds_train, ds_test = get_dataset() train_images, train_labels = convert_to_numpy(ds_train) test_images, test_labels = convert_to_numpy(ds_test) # ## Models # + import cvnn.layers as complex_layers def get_real_mnist_model(dropout=True): tf.random.set_seed(0) in1 = tf.keras.layers.Input(shape=(28, 28, 1)) flat = tf.keras.layers.Flatten(input_shape=(28, 28, 1))(in1) dense = tf.keras.layers.Dense(128, activation='cart_relu')(flat) if dropout: dense = tf.keras.layers.Dropout(rate=0.5, seed=116)(dense) out = tf.keras.layers.Dense(10, activation='softmax_real_with_abs', kernel_initializer="ComplexGlorotUniform")(dense) real_model = tf.keras.Model(in1, out) real_model.compile( loss='sparse_categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(0.001), metrics=['accuracy'], ) return real_model def get_complex_mnist_model(dropout=True): tf.random.set_seed(0) inputs = complex_layers.complex_input(shape=(28, 28, 1), dtype=np.float32) flat = complex_layers.ComplexFlatten(input_shape=(28, 28, 1), dtype=np.float32)(inputs) dense = complex_layers.ComplexDense(128, activation='cart_relu', dtype=np.float32)(flat) if dropout: dense = complex_layers.ComplexDropout(rate=0.5, seed=116)(dense) out = complex_layers.ComplexDense(10, activation='softmax_real_with_abs', dtype=np.float32)(dense) complex_model = tf.keras.Model(inputs, out) complex_model.compile( loss='sparse_categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(0.001), metrics=['accuracy'], ) return complex_model # - # ## Using two real models # + # Using numpy arrays complex_model = get_real_mnist_model() real_model = get_real_mnist_model() r_history = real_model.fit(train_images, train_labels, epochs=6, validation_data=(test_images, test_labels), verbose=False, shuffle=False) c_history = complex_model.fit(train_images, train_labels, epochs=6, validation_data=(test_images, test_labels), verbose=False, shuffle=False) assert r_history.history == c_history.history, f"{r_history.history} != {c_history.history}" # + # Using tf dataset complex_model = get_real_mnist_model() real_model = get_real_mnist_model() r_history = real_model.fit(ds_train, epochs=6, validation_data=ds_test, verbose=False, shuffle=False) c_history = complex_model.fit(ds_train, epochs=6, validation_data=ds_test, verbose=False, shuffle=False) assert r_history.history == c_history.history, f"{r_history.history} != {c_history.history}" # - # ## Using the cvnn # + # Using numpy arrays complex_model = get_complex_mnist_model() real_model = get_real_mnist_model() r_history = real_model.fit(train_images, train_labels, epochs=6, validation_data=(test_images, test_labels), verbose=False, shuffle=False) c_history = complex_model.fit(train_images, train_labels, epochs=6, validation_data=(test_images, test_labels), verbose=False, shuffle=False) assert r_history.history == c_history.history, f"{r_history.history} != {c_history.history}" # + # Using tf dataset complex_model = get_complex_mnist_model() real_model = get_real_mnist_model() r_history = real_model.fit(ds_train, epochs=6, validation_data=ds_test, verbose=False, shuffle=False) c_history = complex_model.fit(ds_train, epochs=6, validation_data=ds_test, verbose=False, shuffle=False) assert r_history.history == c_history.history, f"{r_history.history} != {c_history.history}" # - # ## Without dropout complex_model = get_complex_mnist_model(dropout=False) real_model = get_real_mnist_model(dropout=False) r_history = real_model.fit(ds_train, epochs=6, validation_data=ds_test, verbose=False, shuffle=False) c_history = complex_model.fit(ds_train, epochs=6, validation_data=ds_test, verbose=False, shuffle=False) assert r_history.history == c_history.history, f"{r_history.history} != {c_history.history}"
tests/dropout.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import os import numpy as np from sklearn.metrics import r2_score from sklearn.model_selection import TimeSeriesSplit meta = pd.read_csv("../input/meta_open.csv", index_col='uid', parse_dates=["datastart","dataend"], dayfirst=True) temporal = pd.read_csv("../input/temp_open_utc_complete.csv", index_col='timestamp', parse_dates=True).tz_localize('utc') buildingnames = temporal.columns[temporal.columns.str.contains("Office")] # + # Import all models we are using from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import AdaBoostRegressor from sklearn.ensemble import BaggingRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.dummy import DummyRegressor from sklearn.tree import ExtraTreeRegressor from sklearn.ensemble import ExtraTreesRegressor from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.ensemble import GradientBoostingRegressor from sklearn.linear_model import HuberRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.neural_network import MLPRegressor from sklearn.linear_model import PassiveAggressiveRegressor from sklearn.linear_model import RANSACRegressor from sklearn.linear_model import SGDRegressor from sklearn.linear_model import TheilSenRegressor # Make array of models. Each model is an array of two elements. # First element is a model-name, second is a model itself models = [#['RandomForestRegressor', RandomForestRegressor(n_estimators = 1000, random_state = 42)], #['AdaBoostRegressor', AdaBoostRegressor(n_estimators = 1000, random_state = 42)], #['BaggingRegressor', BaggingRegressor(n_estimators = 1000, random_state = 42)], #['DecisionTreeRegressor', DecisionTreeRegressor(random_state = 42)], ['DummyRegressor', DummyRegressor()], #['ExtraTreeRegressor', ExtraTreeRegressor(random_state = 42)], #['ExtraTreesRegressor', ExtraTreesRegressor(n_estimators = 1000, random_state = 42)], #['GaussianProcessRegressor', GaussianProcessRegressor(random_state = 42)], #['GradientBoostingRegressor', GradientBoostingRegressor(n_estimators = 1000, random_state = 42)], #['HuberRegressor', HuberRegressor()], #['KNeighborsRegressor', KNeighborsRegressor()], #['MLPRegressor', MLPRegressor(random_state = 42)], #['PassiveAggressiveRegressor', PassiveAggressiveRegressor(random_state = 42)], #['RANSACRegressor', RANSACRegressor(random_state = 42)], #['SGDRegressor', SGDRegressor(random_state = 42)], #['TheilSenRegressor', TheilSenRegressor(random_state = 42)] ] # - # Produce file with metrics(MAPE, NMBE, CVRSME, RSQUARED) based on provided model # Results will be saved as modelName_metrics.csv def createMetrics(modelName, model): print('\n\n' + modelName + '\n_____________') # buidingindex buildingindex = 0 for singlebuilding in buildingnames[:3]: buildingindex+=1 print("Modelling: " + singlebuilding) # Get Data single_timezone = meta.T[singlebuilding].timezone single_start = meta.T[singlebuilding].datastart single_end = meta.T[singlebuilding].dataend single_building_data = pd.DataFrame(temporal[singlebuilding].tz_convert(single_timezone).truncate(before=single_start,after=single_end)) # split time series data samples # Create an array that's in the same order as the specific building loaded months = np.array([single_building_data.index.month.unique()])[0] n_splits = 3 tscv = TimeSeriesSplit(n_splits=n_splits) # Get weather file weatherfilename = meta.T[singlebuilding].newweatherfilename #print("Weatherfile: "+weatherfilename) weather = pd.read_csv(os.path.join("../input/",weatherfilename),index_col='timestamp', parse_dates=True, na_values='-9999') weather = weather.tz_localize(single_timezone, ambiguous = 'infer') outdoor_temp = pd.DataFrame(weather[[col for col in weather.columns if 'Temperature' in col]]).resample("H").mean() outdoor_temp = outdoor_temp.reindex(pd.DatetimeIndex(start=outdoor_temp.index[0], periods=len(single_building_data), freq="H")).fillna(method='ffill').fillna(method='bfill') # Cross validation step index = 0 for train_index, test_index in tscv.split(months): month_train, month_test = months[train_index], months[test_index] # Split into Training and Testing trainingdata = single_building_data[single_building_data.index.month.isin(month_train)] testdata = single_building_data[single_building_data.index.month.isin(month_test)] # Create training data array train_features = np.array(pd.concat([pd.get_dummies(trainingdata.index.hour), pd.get_dummies(trainingdata.index.dayofweek), pd.Series(outdoor_temp[outdoor_temp.index.month.isin(month_train)].TemperatureC.values)], axis=1)) train_labels = np.array(trainingdata[singlebuilding].values) # Create test data array test_features = np.array(pd.concat([pd.get_dummies(testdata.index.hour), pd.get_dummies(testdata.index.dayofweek), pd.Series(outdoor_temp[outdoor_temp.index.month.isin(month_test)].TemperatureC.values)], axis=1)) test_labels = np.array(testdata[singlebuilding].values) # Train the model on training data model.fit(train_features, train_labels); # Use the forest's predict method on the test data predictions = model.predict(test_features) # Calculate the absolute errors errors = abs(predictions - test_labels) # Calculate mean absolute percentage error (MAPE) and add to list MAPE = 100 * np.mean((errors / test_labels)) NMBE = 100 * (sum(test_labels - predictions) / (pd.Series(test_labels).count() * np.mean(test_labels))) CVRSME = 100 * ((sum((test_labels - predictions)**2) / (pd.Series(test_labels).count()-1))**(0.5)) / np.mean(test_labels) RSQUARED = r2_score(test_labels, predictions) #print("MAPE: "+str(MAPE)) #print("NMBE: "+str(NMBE)) #print("CVRSME: "+str(CVRSME)) #print("R SQUARED: "+str(RSQUARED)) #MAPE_data[singlebuilding] = MAPE #NMBE_data[singlebuilding] = NMBE #CVRSME_data[singlebuilding] = CVRSME #RSQUARED_data[singlebuilding] = RSQUARED index+=1 if(buildingindex == 1): temporary = pd.DataFrame(columns=["building", "MAPE", "NMBE", "CVRSME", "RSQUARED"]) temporary.to_csv('./results-timeseries/' + modelName + '_metrics_cross_validation_' + str(index) + '.csv', index=False) # Read dataframe with particular step (cross validation) metrics_prev = pd.read_csv('./results-timeseries/' + modelName + '_metrics_cross_validation_' + str(index) + '.csv') df = pd.DataFrame([[singlebuilding, MAPE, NMBE, CVRSME, RSQUARED]],columns=['building','MAPE','NMBE','CVRSME','RSQUARED']) # Append new row metrics = pd.concat([df,metrics_prev]) metrics.to_csv('./results-timeseries/' + modelName + '_metrics_cross_validation_' + str(index) + '.csv', index=False) MAPE_data = {} RSQUARED_data = {} NMBE_data = {} CVRSME_data = {} for elem in models: # modelName = elem[0], model = elem[1] createMetrics(elem[0], elem[1]) # + # pd.read_csv("../cross-validation/results-timeseries/")
model_notebooks/prototype-cross-validation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Decision Trees and Random Forests in Python # This is the code for the lecture video which goes over tree methods in Python. Reference the video lecture for the full explanation of the code! # # I also wrote a [blog post](https://medium.com/@josemarcialportilla/enchanted-random-forest-b08d418cb411#.hh7n1co54) explaining the general logic of decision trees and random forests which you can check out. # # ## Import Libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline # ## Get the Data df = pd.read_csv('kyphosis.csv') df.head() # ## EDA # # We'll just check out a simple pairplot for this small dataset. sns.pairplot(df,hue='Kyphosis',palette='Set1') # ## Train Test Split # # Let's split up the data into a training set and a test set! from sklearn.model_selection import train_test_split X = df.drop('Kyphosis',axis=1) y = df['Kyphosis'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30) # ## Decision Trees # # We'll start just by training a single decision tree. from sklearn.tree import DecisionTreeClassifier dtree = DecisionTreeClassifier() dtree.fit(X_train,y_train) # ## Prediction and Evaluation # # Let's evaluate our decision tree. predictions = dtree.predict(X_test) from sklearn.metrics import classification_report,confusion_matrix print(classification_report(y_test,predictions)) print(confusion_matrix(y_test,predictions)) # ## Tree Visualization # # Scikit learn actually has some built-in visualization capabilities for decision trees, you won't use this often and it requires you to install the pydot library, but here is an example of what it looks like and the code to execute this: # + from IPython.display import Image from sklearn.externals.six import StringIO from sklearn.tree import export_graphviz import pydot features = list(df.columns[1:]) features # + dot_data = StringIO() export_graphviz(dtree, out_file=dot_data,feature_names=features,filled=True,rounded=True) graph = pydot.graph_from_dot_data(dot_data.getvalue()) Image(graph[0].create_png()) # - # ## Random Forests # # Now let's compare the decision tree model to a random forest. from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier(n_estimators=100) rfc.fit(X_train, y_train) rfc_pred = rfc.predict(X_test) print(confusion_matrix(y_test,rfc_pred)) print(classification_report(y_test,rfc_pred))
15-Decision-Trees-and-Random-Forests/.ipynb_checkpoints/01-Decision Trees and Random Forests in Python-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # import statsmodels.api as sm # import QUANTAXIS as QA import talib as ta import datetime #, time plt.style.use('ggplot') x = np.linspace(-10,10,100) y = 2*x + 1 + np.random.randn(100)*2 fig = plt.figure(figsize=(14,8)) plt.plot(x, y, 'rx');
WQ/ggplot -- ploting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: mlsa # language: python # name: mlsa # --- # # Workshop SL01: Classification # # ## Agenda # - Introduction to training and testing data distribution # - Common classification models # # ## Previously on the last 2 workshops # From the last 2 workshops we have covered the pre-processing of data before model training: # - Read data into dataframes # - Join multiple dataframes # - Encode string data into float/int # - Feature selection/engineering # # ## Exercise # - Think about how to tune hyperparameters for better performance (hint: Sklearn Documents) # ### Prepping the data # These are from last 2 workshops straight, to get the dataframe to work with. import pandas as pd import numpy as np from sklearn.metrics import classification_report, confusion_matrix, accuracy_score # + # read csv file into a dataframe df_id_train = pd.read_csv("train_identity.csv") df_tran_train = pd.read_csv("train_transaction.csv") df_id_test = pd.read_csv("test_identity.csv") df_tran_test = pd.read_csv("test_transaction.csv") # joining table df_train = pd.merge(df_tran_train,df_id_train, on='TransactionID' ,how='left') # target dataframe Y_train = df_train['isFraud'] Y_train = pd.DataFrame(Y_train) # dropping the irrelevant data for training list = ['isFraud','TransactionID','DeviceInfo'] X_train = df_train.drop(list, axis=1) # encoding strings obj_df = X_train.select_dtypes(include=['object']).copy() int_df = X_train.select_dtypes(include=['int64']).copy() float_df = X_train.select_dtypes(include=['float64']).copy() for column in obj_df.head(0): obj_df[column] = obj_df[column].astype('category') obj_df[column] = obj_df[column].cat.codes X_train = pd.concat([obj_df,int_df,float_df],axis=1, sort=False) # filling na X_train.fillna(value=-1,inplace=True) # - # Or we can just download the dataframe as csv for future use. We only need to download it once, so in the future we just need to read these csv into dataframes. # downloadig dataframe as csv X_train.to_csv (r'X_train.csv', index = None, header=True) Y_train.to_csv (r'Y_train.csv', index = None, header=True) X_train = pd.read_csv('X_train.csv') Y_train = pd.read_csv('Y_train.csv') # ### Testing/Training Set Distribution # # For **model selection** purpose we need to distribute the data into training and testing set, and compute model error on both sets, i.e. train error and test error. If we select model based on train error solely, we will have over-fitting problem because the model will just perform really well on training data but not on testing data. Test error is a better tool to judge whether the model will perform well on new data. Perhaps this graph will explain better. # # <img src="train-test-error.png"> # # Source: [In-depth introduction to machine learning in 15 hours of expert videos](https://www.r-bloggers.com/in-depth-introduction-to-machine-learning-in-15-hours-of-expert-videos/) # # spliting test/train data into 80:20 from sklearn.model_selection import train_test_split train_size = int(0.8*X_train.shape[0]) test_size = X_train.shape[0]-train_size X_train, X_test, Y_train, Y_test = train_test_split( X_train, Y_train, train_size=train_size, test_size=test_size, random_state=4) # this cell is optional: run this if you don't want to read the future warning messages # import warnings filter from warnings import simplefilter # ignore all future warnings simplefilter(action='ignore', category=FutureWarning) # this is to stop the format warning Y_train = np.array(Y_train).ravel() Y_test = np.array(Y_test).ravel() # ### Model Training # There are many models up our sleeves. We provide a list of models here for you to explore here: # - [Generalized Linear Models](https://scikit-learn.org/stable/modules/linear_model.html) (Logistic regression, [SGD](https://scikit-learn.org/stable/modules/sgd.html), Perceptron) # - [Linear and Quadratic Discriminant Analysis](https://scikit-learn.org/stable/modules/lda_qda.html#dimensionality-reduction-using-linear-discriminant-analysis) # - [Support Vector Machines](https://scikit-learn.org/stable/modules/svm.html) (SVC) # - [Nearest Neighbors](https://scikit-learn.org/stable/modules/neighbors.html#nearest-neighbors-classification) # - [Gaussian Processes](https://scikit-learn.org/stable/modules/gaussian_process.html#gaussian-process-classification-gpc) # - [Naive Bayes](https://scikit-learn.org/stable/modules/naive_bayes.html) # - [Trees](https://scikit-learn.org/stable/modules/tree.html) # - [Ensemble Methods](https://scikit-learn.org/stable/modules/ensemble.html) # - Neural Network (more about this in later workshops) # # If anyone in interested in the math, you can click on the links and read more. # # Later when we talk about regression we will notice the list for regression problems is pretty similar to this one. Actually a classification problem can be viewed as a regression problem, with the regression output being the probability of being classified into a specific category. # # #### 1) GLM # **Logistic regression** is a linear model for classification, with the probability of being a specific category being modeled using a logistic function. from sklearn.linear_model import LogisticRegression # training lr model lr = LogisticRegression() lr.fit(X_train, Y_train) # predict on test data Y_lr = lr.predict(X_test) # accuracy print (classification_report(Y_test, Y_lr,digits = 6)) print (confusion_matrix(Y_test, Y_lr)) print (accuracy_score(Y_test, Y_lr)) # Now this is very bad because the model is very bad at detecting fraud transactions (only a few out of over 4000 fraud transactions). Even with cross validation it is still pretty bad. (When we say this you can believe us because we tried and there are only slightly more successful fraud detections). # # Perhaps there is a way to fix this by putting more penalty on false negative? This is an exercie for you to find out how! ([hint](https://stackoverflow.com/questions/49151325/how-to-penalize-false-negatives-more-than-false-positives)) # # If you have forgotten how to read confusion matrix, here is a [link](https://towardsdatascience.com/understanding-confusion-matrix-a9ad42dcfd62) on how. <img src="cm.png"> # The **perceptron** is another simple classification algorithm that works well for large scale learning. The **passive-aggressive algorithms** are a family of algorithms for large-scale learning. They are similar to the Perceptron but do not require a learning rate and include a regularization parameter. from sklearn.linear_model import Perceptron # training model pct = Perceptron() pct.fit(X_train, Y_train) # predict on test data Y_pct = pct.predict(X_test) # accuracy print (classification_report(Y_test, Y_pct,digits = 6)) print (confusion_matrix(Y_test, Y_pct)) print (accuracy_score(Y_test, Y_pct)) from sklearn.linear_model import PassiveAggressiveClassifier # training model pac = PassiveAggressiveClassifier() pac.fit(X_train, Y_train) # predict on test data Y_pac = pac.predict(X_test) # accuracy print (classification_report(Y_test, Y_pac,digits = 6)) print (confusion_matrix(Y_test, Y_pac)) print (accuracy_score(Y_test, Y_pac)) # **SGD** or stochastic gradient descent is a simple yet very efficient approach to fit linear models (e.g. linear SVM, Logistic Regression etc.) by updating the model along with a decreasing strength schedule (aka learning rate). So SGD itself is not a model but an algorithm that minimises the loss function. In later workshop we will revisit SGD for fitting neural networks. Another great thing is that SGD allows minibatch (online/out-of-core) learning, see the `partial_fit` method. For best results using the default learning rate schedule, the data should be standardised (zero mean and unit variance). Hence, it is particularly useful when the number of samples (and the number of features) is very large. Learn more [here](https://scikit-learn.org/stable/modules/sgd.html). from sklearn.linear_model import SGDClassifier # training SGD model SGD = SGDClassifier() SGD.fit(X_train, Y_train) # predict on test data Y_SGD = SGD.predict(X_test) # accuracy print (classification_report(Y_test, Y_SGD,digits = 6)) print (confusion_matrix(Y_test, Y_SGD)) print (accuracy_score(Y_test, Y_SGD)) # Even though it's trained really fast, can you see the problem here? # #### 2) Linear and Quadratic Discriminant Analysis # # Linear Discriminant Analysis (LDA) and Quadratic Discriminant Analysis (QDA) are two classic classifiers, with, as their names suggest, a linear and a quadratic decision surface, respectively. from sklearn.discriminant_analysis import LinearDiscriminantAnalysis # training model lda = LinearDiscriminantAnalysis() lda.fit(X_train, Y_train) # predict on test data Y_lda = lda.predict(X_test) # accuracy print (classification_report(Y_test, Y_lda,digits = 6)) print (confusion_matrix(Y_test, Y_lda)) print (accuracy_score(Y_test, Y_lda)) from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis # training model qda = QuadraticDiscriminantAnalysis() qda.fit(X_train, Y_train) # predict on test data Y_qda = qda.predict(X_test) # accuracy print (classification_report(Y_test, Y_qda,digits = 6)) print (confusion_matrix(Y_test, Y_qda)) print (accuracy_score(Y_test, Y_qda)) # #### 3) Support Vector Machines from sklearn import svm # training model # svc = svm.SVC(kernel='linear') # SVC using the libsvm is gonna take forever to run because the core of an SVM is a quadratic programming problem (QP) # (about hours, we haven't run it ourselves but you can try) # SGDclassifier has the same cost function as linear SVC by adjusting penalty and loss parameters # in fact, the default SGDClasifier is a linear SVM svc = SGDClassifier(loss='hinge', penalty='l2') svc.fit(X_train, Y_train) # predict on test data Y_svc = svc.predict(X_test) # accuracy print (classification_report(Y_test, Y_svc,digits = 6)) print (confusion_matrix(Y_test, Y_svc)) print (accuracy_score(Y_test, Y_svc)) # #### 4) Nearest Neighbour from sklearn import neighbors # training knn model knn = neighbors.KNeighborsClassifier() knn.fit(X_train, Y_train) # Predict on test data Y_knn = knn.predict(X_test) # accuracy print (classification_report(Y_test, Y_knn,digits = 6)) print (confusion_matrix(Y_test, Y_knn)) print (accuracy_score(Y_test, Y_knn)) # #### 5) Gaussian Processes # A generic supervised learning method designed to solve regression and probabilistic classification problems but loses efficiency in high dimensional spaces (when the number of features exceeds a few dozens). So this is not particularly useful for our dataset. from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.gaussian_process.kernels import RBF # training model mini_size = 2000 gpc = GaussianProcessClassifier(1.0 * RBF(1.0),n_jobs=-1) gpc.fit(X_train.iloc[0:mini_size], Y_train[0:mini_size]) # Predict on test data Y_gcp = gpc.predict(X_test.iloc[0:mini_size]) # accuracy print (classification_report(Y_test[0:mini_size], Y_gcp,digits = 6)) print (confusion_matrix(Y_test[0:mini_size], Y_gcp)) print (accuracy_score(Y_test[0:mini_size], Y_gcp)) # we have reduced the dataset to mini size otherwise the kernel will die # the warning message appears because the model never predicts a fraud setection # #### 6) Naive Bayes # [Scikit learn document](https://scikit-learn.org/stable/modules/naive_bayes.html#gaussian-naive-bayes): "Naive Bayes methods are a set of supervised learning algorithms based on applying Bayes theorem with the “naive” assumption of conditional independence between every pair of features given the value of the class variable. In spite of their apparently over-simplified assumptions, naive Bayes classifiers have worked quite well in many real-world situations, famously document classification and spam filtering." from sklearn.naive_bayes import GaussianNB # training model gnb = GaussianNB() gnb.fit(X_train, Y_train) # Predict on test data Y_gnb = gnb.predict(X_test) # accuracy print (classification_report(Y_test, Y_gnb,digits = 6)) print (confusion_matrix(Y_test, Y_gnb)) print (accuracy_score(Y_test, Y_gnb)) # #### 7) Trees # Tree models do not require any parameters but predict the target by applying a set of if-then-else decision rules to the features. The deeper the tree, the more complex the decision rules, from sklearn import tree # training model dt = tree.DecisionTreeClassifier(max_depth=50) dt.fit(X_train, Y_train) # Predict on test data Y_dt = dt.predict(X_test) # accuracy print (classification_report(Y_test, Y_dt,digits = 6)) print (confusion_matrix(Y_test, Y_dt)) print (accuracy_score(Y_test, Y_dt)) # #### 8) Ensemble Methods # Lastly, ensemble method is where we combine the predictions of estimators built with a given learning algorithm together. The goal of ensemble methods is to improve generalizability and robustness over a single estimator. # # There are two families of ensemble methods: # # - **Averaging methods**: build several estimators independently and then average their predictions. On average, the combined estimator is usually better than any of the single base estimator because its variance is reduced. # # - Examples: Bagging methods, Forests of randomized trees (Random Forests, Extremely Randomized Trees, Totally Random Trees Embedding), … # # - **Boosting methods**: base estimators are built sequentially and one tries to reduce the bias of the combined estimator. The motivation is to combine several weak models to produce a powerful ensemble. # # - Examples: AdaBoost, Gradient Tree Boosting, Voting Classifier … # # Bagging methods work best with strong and complex models (e.g. fully developed decision trees), whereas boosting methods usually work best with weak models (e.g. shallow decision trees). from sklearn.ensemble import BaggingClassifier # training model bag = BaggingClassifier(SGDClassifier()) bag.fit(X_train, Y_train) # Predict on test data Y_bag = bag.predict(X_test) # accuracy print (classification_report(Y_test, Y_bag,digits = 6)) print (confusion_matrix(Y_test, Y_bag)) print (accuracy_score(Y_test, Y_bag)) from sklearn.ensemble import RandomForestClassifier # training model rf = RandomForestClassifier(max_depth=50,n_estimators=20) rf.fit(X_train, Y_train) # Predict on test data Y_rf = rf.predict(X_test) # accuracy print (classification_report(Y_test, Y_rf,digits = 6)) print (confusion_matrix(Y_test, Y_rf)) print (accuracy_score(Y_test, Y_rf)) from sklearn.ensemble import AdaBoostClassifier # training model ab = AdaBoostClassifier(n_estimators=20) # default is DecisionTreeClassifier(max_depth=1) ab.fit(X_train, Y_train) # Predict on test data Y_ab = ab.predict(X_test) # accuracy print (classification_report(Y_test, Y_ab,digits = 6)) print (confusion_matrix(Y_test, Y_ab)) print (accuracy_score(Y_test, Y_ab)) from sklearn.ensemble import GradientBoostingClassifier # training model gb = GradientBoostingClassifier(n_estimators=20) gb.fit(X_train, Y_train) # Predict on test data Y_gb = gb.predict(X_test) # accuracy print (classification_report(Y_test, Y_gb,digits = 6)) print (confusion_matrix(Y_test, Y_gb)) print (accuracy_score(Y_test, Y_gb)) # Hope you enjoyed this session so far. As you can see, different models have very different performance, and perhaps it's a good idea to take some time to research on what hyperparameters to tune and how to better manipulate data (feature selection, feature engineering, stacking etc.) to feed the model. [Here](https://www.kaggle.com/c/ieee-fraud-detection/discussion/111284#latest-655464) is the first place solution for IEEE fraud detection competition. Maybe this will give you some ideas on how to better manipulation data and train models.
supervised-learning/workshop-SL01-Classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import kmapper as km from kmapper import jupyter # Creates custom CSS full-size Jupyter screen # + # Import the class import kmapper as km # Some sample data from sklearn import datasets data, labels = datasets.make_circles(n_samples=5000, noise=0.03, factor=0.3) # Initialize mapper = km.KeplerMapper(verbose=1) # Fit to and transform the data projected_data = mapper.fit_transform(data, projection=[0,1]) # X-Y axis # Create dictionary called 'graph' with nodes, edges and meta-information graph = mapper.map(projected_data, data, nr_cubes=10) # Visualize it html = mapper.visualize(graph, path_html="make_circles_keplermapper_output.html", title="make_circles(n_samples=5000, noise=0.03, factor=0.3)") # Inline display # jupyter.display(path_html="http://mlwave.github.io/tda/word2vec-gender-bias.html") jupyter.display(path_html="make_circles_keplermapper_output.html") # -
notebooks/KeplerMapper usage in Jupyter Notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] raw_mimetype="text/markdown" # # Documentation (using Sphinx) # - # ## Write Docstrings for Every Function, Class, and Module # <!-- This is the Item 84 of Effective Python 2nd edition from the section Collaboration. --> # Once trimmed, there will be no distinction between starting on first or second line. # # 1. prefer start on first line of the docstring # # 2. don't tab empty line of the docstring # ### Function # **Best practice** # + def palindrome(word): """Return True if the given word is a palindrome 'Stats' is an example of a palindrome """ return word == word[::-1] print(palindrome.__doc__) palindrome.__doc__ # - # ### Class # **Best practice** # + class Player: """Represents a player of the game Subclass may override the 'tick' method to provide custom animations for the player movement depending on their power level Public attributes: - power: Unused power-ups """ pass print(Player.__doc__) # - # ### Module # **Best practice** # + # words.py # #!/usr/bin/env python3 """Library for finding linguist patterns Testing how words ... Available functions: - palindrome: determine if word is a palindrome ... """ # - # ## Write Docstrings for Variable # <!-- We did this in the runhelper --> # ### Variable # You can add variable documentation for your IDE (it works for VSCode and pycharm). precision = 10**-4 """precision is the precision threshold in the simulation""" # This is not a python docstring. Sphinx will not be able to use it. # ## Writing sphinx docstrings # We automatically generate documentation from the [docstrings](https://www.python.org/dev/peps/pep-0257/) using sphinx [autodoc module](https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html). The [sphinx docstrings](https://sphinx-rtd-tutorial.readthedocs.io/en/latest/docstrings.html) format is described [here](https://sphinx-rtd-tutorial.readthedocs.io/en/latest/docstrings.html#the-sphinx-docstring-format). We omit repeating the type since it can be added as variable and function annotations; it may be added for product documentation. Please refer to the section on typing for the good typing practices. # **Avoid** # avoid napolean style def function_with_types_in_docstring(param1, param2): """Example function with types documented in the docstring. `PEP 484`_ type annotations are supported. If attribute, parameter, and return types are annotated according to `PEP 484`_, they do not need to be included in the docstring: Args: param1 (int): The first parameter. param2 (str): The second parameter. Returns: bool: The return value. True for success, False otherwise. .. _PEP 484: https://www.python.org/dev/peps/pep-0484/ """ pass # **Best practice** def function_with_docstring(param1: int, param2: str) -> bool: """Example function with types documented outside the docstring. We omit the types (provided by typing directly in the doc) :param param1: The first parameter :param param2: The second parameter :raises ValueError: Bad first parameter :return: The return value. True for success, False otherwise. """ pass
examples/documentation_using_sphinx.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src='https://wizardsourcer.com/wp-content/uploads/2019/03/Stackoverflow.png' width="400"></img> # # ## <center><h1> A Data Analysis using Stackoverflow’s 2019 and 2020 Annual Developer Survey - Colombia.</h1></center> # # > ## By <NAME> # # ## Each stage of the CRISP-DM process. These stages are: # # # * [1. Business Understanding](#section1) # * [2. Data Understanding.](#section2) # * [3. Prepare Data.](#section3) # * [4. Data Modeling.](#section4) # * [5. Evaluate the Results.](#section5) # # # <a id="section1"></a> # ### <font color="#004D7F"> 1. Business Understanding </font> # In this notebook, I will be exploring the 2019 and 2020 Stack Overflow results in order to glean some information on professional developers and what to focus on to have the best career in Colombia. # # There will be three questions I will seek to answer in order to get this information: # # 1. What Programming Languages are most used to work and Which Programming Languages are most Required in Stackoverflow survey data of 2019 and 2020 ? # 2. How does Programming Languages used at work relates with Programming Languages, People Wants to Learn in Colombua According to Stackoverflow survey data of 2019 and 2020? # 3. What are the most wanted Programming Languages in Colombia According to Stackoverflow survey data of 2019 and 2020? # <a id="section2"></a> # ### <font color="#004D7F"> 2. Data understanding </font> # # In order to gain some understanding of the data. I have to do these steps: # * Handle categorical and missing data # * Analyze, Model, and Visualize # + # import python libraries to handle datasets import numpy as np import pandas as pd from collections import Counter #make plots import matplotlib.pyplot as plt import seaborn as sns #ignore warnings import warnings warnings.filterwarnings("ignore") # to display graphs in jupyter notebook # %matplotlib inline # to visualise al the columns in the dataframe pd.pandas.set_option('display.max_columns', None) # - # Loading Stackoverflow survey data of 2019 and 2020 # Ref link: https://insights.stackoverflow.com/survey df_2019 = pd.read_csv('survey_results_public_2019.csv') df_2020 = pd.read_csv('survey_results_public_2020.csv') df_2019.head() df_2020.head() # <a id="section3"></a> # ### <font color="#004D7F"> 3. Prepare Data </font> # rows, columns = df_2019.shape[0], df_2019.shape[1] print(f'Number of rows and columns of 2019 Developer Survey: {rows}, {columns}') rows, columns = df_2020.shape[0], df_2020.shape[1] print(f'Number of rows and columns of 2020 Developer Survey: {rows}, {columns}') #precentage missing values: 2019 Developer Survey print(df_2019.isnull().sum()/df_2019.shape[0]) #precentage missing values: 2020 Developer Survey print(df_2020.isnull().sum()/df_2020.shape[0]) #dtype check 2019 Developer Survey df_2019.dtypes #dtype check 2020 Developer Survey df_2020.dtypes #dist 2019 Developer Survey df_2019.hist(); #dist 2020 Developer Survey df_2020.hist(); #decribe 2019 Developer Survey df_2019.describe() #decribe 2020 Developer Survey df_2020.describe() #Missing Values in Data - Imput def imput (x): columns=x.columns.tolist() del_ = [] for i in range(len(x.isnull().sum())): if x.isnull().sum()[i]/x.shape[0] < 0.35: if x[columns[i]].dtypes == object: x[columns[i]].fillna(x[columns[i]].mode()[0] , inplace = True) else: x[columns[i]].fillna(x[columns[i]].median() , inplace = True) else: del_.append(columns[i]) x.drop(del_, axis = 1, inplace = True) #Missing Values in Data - Imput imput(df_2019) imput(df_2020) #precentage missing values: 2019 Developer Survey print(df_2019.isnull().sum()/df_2019.shape[0]) #precentage missing values: 2020 Developer Survey print(df_2020.isnull().sum()/df_2020.shape[0]) # # # ### What Programming Languages are most used to work and Which Programming Languages are most Required in Stackoverflow survey data of 2019 and 2020 ? # + #First make copy of dataframe and then Filter a dataframe by country def filter_country(df, column_filter, country, column1,column2 ): ''' Filter a dataframe by country(i.e. why i use dropna to drop all other country excpet Colombia) Returns filtred dataframe ''' df_copy = df df_copy = df_copy[df_copy[column_filter] == country].dropna(subset=[column1, column2]) return df_copy # Filtering the dataframe col_2019 = filter_country(df_2019, 'Country', 'Colombia', 'LanguageWorkedWith', 'LanguageDesireNextYear') col_2020 = filter_country(df_2020, 'Country', 'Colombia', 'LanguageWorkedWith', 'LanguageDesireNextYear') # - col2019, col2020 = col_2019.shape[0], col_2020.shape[0] print(f'Nro 2019 and 2020: {col2019}, {col2020}' ) # + def split_column(df, column): ''' Split column by ;, Returns a splited series. ''' df_copy = df columnSeries = df_copy[column].apply(lambda x: x.split(';')) return columnSeries # Splitting the dataframe by columns. worked_languages_2019 = split_column(col_2019, 'LanguageWorkedWith') wanted_languages_2019 = split_column(col_2019, 'LanguageDesireNextYear') worked_languages_2020= split_column(col_2020, 'LanguageWorkedWith') wanted_languages_2020 = split_column(col_2020, 'LanguageDesireNextYear') # + #Just Flating a nested list def flat(array_list): ''' Flat a nested list, Returns a flat list. ''' object_list = [] for row in array_list: for obj in row: object_list.append(obj.strip()) return object_list # Flatting nested list objects. list_worked_languages_2019 = flat(worked_languages_2019) list_wanted_languages_2019 = flat(wanted_languages_2019) list_worked_languages_2020 = flat(worked_languages_2020) list_wanted_languages_2020 = flat(wanted_languages_2020) # + def list_of_group(data_list, year): ''' Group by count to a list, Returns a result dict ''' grouped_list = dict(Counter(data_list)) grouped_dict = [{'Programming Language':key, 'Count': value, 'Year': year} for key, value in grouped_list.items()] return grouped_dict # Grouping the list and creating a dict. dict_worked_languages_2019 = list_of_group(list_worked_languages_2019, '2019') dict_wanted_languages_2019 = list_of_group(list_wanted_languages_2019, '2019') dict_worked_languages_2020 = list_of_group(list_worked_languages_2020, '2020') dict_wanted_languages_2020 = list_of_group(list_wanted_languages_2020, '2020') # - # <a id="section4"></a> # ### <font color="#004D7F"> 4. Data Modeling </font> # # + #Ref: https://stackoverflow.com/questions/23668427/pandas-three-way-joining-multiple-dataframes-on-columns def create_dataframe(data_dicts): ''' Create two dataframes and append them, Returns a appended dataframe. ''' df1 = pd.DataFrame(data_dicts[0]) df2 = pd.DataFrame(data_dicts[1]) df = df1.append(df2) return df worked_languages = create_dataframe([dict_worked_languages_2019, dict_worked_languages_2020]) wanted_languages = create_dataframe([dict_wanted_languages_2019, dict_wanted_languages_2020]) # + #Adding Percentage to worked_languages and wanted_languages dataframe def percentage(df, column): ''' Scale data, Returns data scaled. ''' df_copy = df series = [] for val in df_copy[column].unique(): series.append(df_copy[df_copy[column] == val]['Count'] / df_copy[df_copy[column] == val]['Count'].sum()) joined = pd.Series() for i_series in series: joined = joined.append(i_series) return joined worked_languages['Percentage'] = percentage(worked_languages, 'Year') wanted_languages['Percentage'] = percentage(wanted_languages, 'Year') # + # Get the top 12 languages top_12_work = worked_languages.sort_values(by=['Percentage'], ascending=False).head(12)['Programming Language'].unique() top_12_want = wanted_languages.sort_values(by=['Percentage'], ascending=False).head(12)['Programming Language'].unique() worked_chart = worked_languages[worked_languages['Programming Language'].isin(top_12_work)] wanted_chart = wanted_languages[wanted_languages['Programming Language'].isin(top_12_want)] # + plt.figure(figsize=(20,10)) sns.barplot(x = 'Programming Language', y = 'Percentage', hue = 'Year', data = worked_chart.sort_values(by='Percentage', ascending=False)) plt.xlabel("Programming Languages", fontsize = 14) plt.ylabel("Percentage", fontsize = 14) plt.legend(title_fontsize='40') plt.title('Most common Programming Languages used in Colombia', size = 16) plt.show() # - # ## Evaluate the Results # # * The Highest Rate of percentage among all Programming Languages JavaScript have highest growth rate in 2019 which is around 18% whereas this percentage is drop significantly in 2020 about 2% in Colombia. # # ### How does Programming Languages used at work relates with Programming Languages, People Wants to Learn in Colombia According to Stackoverflow survey data of 2019 and 2020? # ### Prepare Data # + row = [] for j,k in list(zip(worked_languages_2019, wanted_languages_2019)): for i in j: row.append({ 'Worked_Programming_Languages': i, 'Wanted_Programming_Languages': Counter(k) }) programming_language_transition = pd.DataFrame(row).groupby('Worked_Programming_Languages')\ .agg({'Wanted_Programming_Languages': 'sum'}).reset_index() # - # ### Data Modeling # + for row in programming_language_transition['Worked_Programming_Languages']: programming_language_transition[row] = 0 for index, row in programming_language_transition.iterrows(): try: total = sum([value for key, value in dict(row['Wanted_Programming_Languages']).items()]) for key, value in dict(row['Wanted_Programming_Languages']).items(): programming_language_transition[key].loc[index] = (value / total) except: continue # + prlt = programming_language_transition.drop('Wanted_Programming_Languages', axis=1)\ .set_index('Worked_Programming_Languages') plt.figure(figsize=(20, 10)) sns.heatmap(prlt, cmap = "Reds") plt.title('Programming Languages Heatmap', size = 20) plt.ylabel('') plt.show() # - # ### Evaluate the Results # # * With this Graph we can have some insights and they are: # * `JavaScript` is highly correlated with every Programming Language (except: Elixir, Erlang and Scala) and `HTML/CSS` also have same trends. # * There are around 27 Programming Language which has nearly no correlation among them with anyone. # * `JavaScript` has strongest correlation with `Clojure` (i.e. about `25% to 30%`). # # ### What are the most wanted Programming Languages in Colombia According to Stackoverflow survey data of 2019 and 2020? # + plt.figure(figsize=(16,10)) sns.barplot(x = 'Programming Language', y = 'Percentage', hue = 'Year', data = wanted_chart.sort_values(by='Percentage', ascending=False)) plt.title('Most wanted Programming Languages used in Colombia', size = 16) plt.xlabel("Programming Languages", fontsize = 12) plt.ylabel("Percentage", fontsize = 12) plt.legend(title_fontsize='40') plt.show() # - # ### Evaluate the Results # # * Most of the programming languages that have appeared as programming languages most used at work, also appeared in the ranking of most wanted programming languages, this show us that are many people wanting to learn these languages.
Project - Stack Overflow Developer Survey.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns import numpy as np import pandas as pd import datetime # - data = np.load('masked.npz') training_data = data['training_data'] training_data.shape df = pd.DataFrame(data['covars'], columns=data['keys']) print(df.shape) df.head() df_sites = pd.read_csv('PAC2018_Sites.csv') dfs = pd.concat((df, df_sites['Scanner']), axis=1) dfs.head() # + tdata = np.load('masked-test.npz') test_data = tdata['test_data'] print(test_data.shape) tdf = pd.DataFrame(tdata['covars'], columns=tdata['keys']) print(tdf.shape) tdf.head() # - all_tdf = pd.concat((tdf, pd.DataFrame(np.setdiff1d(tdata['subs'], tdf.PAC_ID), columns=['PAC_ID'])), axis=0, sort=True) print(all_tdf.shape) all_tdf.head() cdata = np.load('train+test_cluster.npz') clusters = cdata['clusters'] from sklearn.pipeline import Pipeline from sklearn.model_selection import StratifiedShuffleSplit from sklearn.feature_selection import SelectFromModel, VarianceThreshold from sklearn.metrics import (classification_report,roc_auc_score) from sklearn.ensemble import ExtraTreesClassifier # + # replace 1/2 with 0/1 y = (df.Label.values - 1).astype(int) X = np.hstack((training_data, clusters[:len(y)], np.linalg.norm(training_data, axis=1)[:, None], np.mean(training_data, axis=1)[:, None], )) #dfs[['Age', 'Gender', 'TIV', 'Scanner']].values.astype(float))) print(X.shape) N1=500 N2=1000 clf = Pipeline([#('remove', VarianceThreshold(threshold=0.01)), ('select1', SelectFromModel(ExtraTreesClassifier(N1, n_jobs=20, class_weight="balanced"))), ('clf', ExtraTreesClassifier(N2, n_jobs=20, class_weight="balanced")), ]) sss = StratifiedShuffleSplit(n_splits=50, test_size=0.2, random_state=0) predictions = [[],[]] scores = [] importances = [] for train_index, test_index in sss.split(X, y): results = [] clf.fit(X[train_index], y[train_index]) outputs = clf.predict(X[test_index]) scores.append(roc_auc_score(y[test_index], outputs, average='weighted', sample_weight=None)) print(np.median(scores), len(clf.steps[-1][1].feature_importances_), str(datetime.datetime.now())) #importances.append(clf.steps[-2][1].inverse_transform(clf.steps[-1][1].feature_importances_[None, :])) predictions[0].extend(y[test_index]) predictions[1].extend(outputs) scores = np.array(scores) #importances = np.squeeze(np.array(importances)) auc_val = roc_auc_score(predictions[0], predictions[1], average='weighted', sample_weight=None) print('ROC_AUC_SCORE:', auc_val) print('CLASSIFICATION REPORT:\n', classification_report(predictions[0], predictions[1])) # - plt.hist(scores, 32, color='gray', alpha=0.5) plt.axvline(np.median(scores), color='red') plt.title('Median auc: {:.2f}'.format(np.median(scores))) ylim = plt.ylim() #plt.savefig('output.png') plt.boxplot(scores, vert=False, widths=0.3) plt.ylim(ylim); #plt.savefig('output2-maxvote.png') clf.fit(X, y) from sklearn.externals import joblib joblib.dump(clf, 'clf_classify_raw-nocovar.pkl') clf2 = joblib.load('clf_classify_raw-nocovar.pkl') Xt = np.hstack((test_data, clusters[len(y):], np.linalg.norm(test_data, axis=1)[:, None], np.mean(test_data, axis=1)[:, None], )) #tdf[['Age', 'Gender', 'TIV', 'Scanner']].values.astype(float))) pdf = pd.read_csv('pac_predictions.csv') print(pdf.shape) pdf.head() # do a max vote - would have done proper boosting, but the computation time was too long results = [] N = 50 for i in range(N): clf.fit(X,y) out = clf.predict(Xt) results.append(out.tolist()) results = np.array(results) results.shape pdf.Prediction = (np.sum(results, axis=0) > N/2) + 1 pdf.Prediction.value_counts() pdf.to_csv('clf_classify_raw-nocovar-maxvote-output.csv', index=None) # !cat clf_classify_raw-nocovar-maxvote-output.csv test = pd.read_csv('submission.csv') np.sum(test.Prediction != pdf.Prediction)
02-classify.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import cv2 # + img = cv2.imread("face.jpg")#name of image is face #to maek the image greyscale #img = cv2.imread("face.jpg",cv2.IMREAD_GRAYSCALE) #cv2.imread("face.jpg",cv2.IMREAD_COLOR) img.shape #if you print(img) #printed as numpy array #if shape is printed by print(img.shape) #-> width,height,(3 for color) # + #cv2.imshow('<NAME>',img)#to give name to the dialogue box opened # + #cv2.waitKey(0) for holding of the box #-1 is the output when the image is closed # + #cv2.imwrite('abc.jpg',img)#for making it discolor #used when changing color of the pic #creates the final image from webcam #cv2.destroyAllWindows()#to close the picture # + cv2.rectangle(img,(244,310),(334,365),(0,0,255),3)#parameters names are image (nameOfImage, (start x,y) , (end x,y) , color, thickness) cv2.imshow('Blog',img) cv2.waitKey(0) cv2.destroyAllWindows() '''if -1 is as a parameter for thickness the box gets filled''' #for drawing a circle '''cv2.circle(img,(130,50),50,(0,0,255),3) (end x,y has only one parameter)''' #for drawing a line '''cv2.line(img,(50,200),(50,450),(0,0,255),10)''' # - # ### VIDEO WRITER BELOW # + #cam = cv2.VideoCapture(0)#0 for reading the video filer # + #FACES DETECTION CODE WHICH INCLUDE haarcascade_frontalface_default file as learning data # - # ## FACE RECOGNITION # + import cv2 fd = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') cam = cv2.VideoCapture(0) while True: ret , img = cam.read() gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) faces = fd.detectMultiScale(gray,1.3,5,minSize = (30,30)) for (x,y,w,h) in faces: cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2) cv2.imshow('Faces',img) if(cv2.waitKey(1) == ord('q')): break cam.release() cv2.destroyAllWindows() # - """while cam.isOpened():#checking the camera on or off index, img = cam.read()#returns image index if index: nimg = cv2.cvtColor(img,cv2.IMREAD_COLOR) cv2.imshow('frame',nimg)#frame reader for the size of image cv2.imwrite('abc.jpg',nimg) if cv2.waitKey(1) & 0xFF == ord('q'):#unicode value character 0xFF #helps in overwriting the close button break cam.release() cv2.destroyAllWindows()""" # + #working on the face recognition for images and videos # - # ### VIDEO WRITING # + #15 is the best apperture for videos #waitKey always display the video when the value is 0 otherwise it doesn't # - # ### RECORDING FROM THE VIDEO CAMERA import cv2 cam=cv2.VideoCapture(0) fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter('video12.avi',fourcc, 15.0, (640,480)) while cam.isOpened(): i, frame = cam.read() if i: gray = cv2.cvtColor(frame, cv2.IMREAD_COLOR) out.write(frame) cv2.imshow('Title',gray) if cv2.waitKey(1) & 0xFF == ord('q'): break cam.release() cv2.destroyAllWindows() # ### VIDEO CAMERA FEATURES RECOG. # + import cv2 face_cascade = cv2.CascadeClassifier(r"C:\Users\vaibhav\Desktop\PY_ML_2019\Facial Recog\haarcascade_frontalface_default.xml") eye_cascade = cv2.CascadeClassifier(r"C:\Users\vaibhav\Desktop\PY_ML_2019\Facial Recog\haarcascade_eye (1).xml") cap = cv2.VideoCapture(0) while True: ret, img = cap.read() gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray,1.3,5) for (x,y,w,h) in faces: cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) roi_gray = gray[y:y+h, x:x+w] print(roi_gray) roi_color = img[y:y+h, x:x+w] eyes = eye_cascade.detectMultiScale(roi_gray) for (ex,ey,ew,eh) in eyes: cv2.rectangle(roi_color, (ex,ey),(ex+ew, ey+eh), (0,255,0),2) cv2.imshow('img',img) k = cv2.waitKey(30) & 0Xff if k ==27: break cap.release() cv2.destroyAllWindows() # - # ### FEATURES RECOG. FROM DOWNLOADED PHOTO # + ### FOR PICTURE FEATURES CAPTURE import cv2 as cv face_cascade = cv.CascadeClassifier(r"C:\Users\vaibhav\Desktop\PY_ML_2019\Facial Recog\haarcascade_frontalface_default.xml") eye_cascade = cv.CascadeClassifier(r"C:\Users\vaibhav\Desktop\PY_ML_2019\Facial Recog\haarcascade_eye (1).xml") img = cv.imread('face3.jpg') gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray,1.3,5) for (x,y,w,h) in faces: cv.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) roi_gray = gray[y:y+h, x:x+w] roi_color = img[y:y+h, x:x+w] eyes = eye_cascade.detectMultiScale(roi_gray) for(ex,ey,ew,eh) in eyes: cv.rectangle(roi_color, (ex,ey),(ex+ew,ey+eh),(0,0,255),2) cv.imshow('img',img) k = cv.waitKey(30) & 0Xff if k == 27: break cv.imshow('img',img) cv.waitKey(0) # - #MODEL_MEAN_VALUE pre defined mean which is used in gender detection #blob binary large OBject # ### DRAWING AT THE IMAGE import cv2 import numpy as np img = cv2.imread('face3.jpg', cv2.IMREAD_COLOR) pts = np.array([[10,5],[20,30],[900,20],[50,10],[70,30]], np.int32) pts = pts.reshape((-1,1,2)) img = cv2.polylines(img,[pts],True,(0,255,255)) cv2.imshow('title',img) cv2.waitKey(0) cv2.destroyAllWindows() # ### Writing at an IMAGE # + # to write the text on an image import cv2 import numpy as np img = cv2.imread('face3.jpg', cv2.IMREAD_COLOR) font = cv2.FONT_HERSHEY_COMPLEX cv2.putText(img,'OpenCv', (30,40), font, 2 ,(0,0,255),1,cv2.LINE_8) cv2.imshow('title',img) cv2.waitKey(0) cv2.destroyAllWindows() # - #NOT USEFUL NOW """import cv2 import numpy as np #MOUSE CALLBACK FUNCTION def draw_circle(event,x,y,flags,param): if event == cv2.EVENT_LBUTTONDBLCLK: cv2.circle(img,(x,y),50,(0,25,75),-1) #CREATE A BLACK IMAGE, A WINDOW AND BIND THE FUNCTION image WINDOW img = np.zeros((512,512,3)) cv2.namedWindow('image') cv2.setMouseCallback('image',draw_circle) while(1): cv2.imshow('image',img) if cv2.waitKey(1) & 0Xff == 27: break cv2.destroyAllWindows()"""
FaceRecognation_system.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Lecture 15, Further topics and current research in optimization # + [markdown] slideshow={"slide_type": "slide"} # ## Black box surrogate-based global optimization # + [markdown] slideshow={"slide_type": "subslide"} # In the current course, all the models have been based on algebraic equations. # # However, in many cases, you do not have algebraic equations describing the problem, but instead you have a software or a piece of code that can calculate the values for you. # + [markdown] slideshow={"slide_type": "fragment"} # In many cases like this, you need to treat the model as a *"black box"*, which means that you only know what goes in and what comes out. # + [markdown] slideshow={"slide_type": "fragment"} # Your method is going to have to be intelligent in how to figure out which solutions to evaluate and which not. # + [markdown] slideshow={"slide_type": "subslide"} # In addition, these models may be highly nonconvex and, thus, you are going to have to use *global optimization methods*. # # The methods described in this course are so-called local optimization methods. Local optimization methdos are highly efficient in finding a local minimum of a problem, but they cannot guarantee global optimum. # + [markdown] slideshow={"slide_type": "fragment"} # Global optimization methods need to have some strategy for searching as much as possible of the search space. # + [markdown] slideshow={"slide_type": "fragment"} # In global optimization, there is the so-called **exploration vs. exploitation** ratio. Exploitation means that the method is basically acting as a local optimization method to find the nearest local optimum and exploration means that the method uses some strategy to try to find other local optima. # + [markdown] slideshow={"slide_type": "fragment"} # So-called soft-computing methdos are very popular, although others also exist. # + [markdown] slideshow={"slide_type": "subslide"} # Finally, these black box models are often *computationally expensive*, which means that you need to use a so-called surrogate to save function calls to the black box model. # + [markdown] slideshow={"slide_type": "fragment"} # In practice, this means that there is a clever way of # 1. deciding whether to evaluate a solution with the black box model or the surrogate model, and # 2. when to update the surrogate with solutions calculated using the black-box. # + [markdown] slideshow={"slide_type": "fragment"} # Usual surrogates are neural networks, radial basis functions and Kriging models. # - # E.g., a recent survey by a PhD student of mine: http://link.springer.com/article/10.1007/s00158-015-1226-z#/page-1 # + [markdown] slideshow={"slide_type": "slide"} # ## Connecting "Big Data" and optimization # ### Also called prescriptive analytics # + [markdown] slideshow={"slide_type": "subslide"} # Sometimes, the model of the problem is not based on an algebraic model, nor a computer program, but instead you have (e.g., measured) data about the phenomena concerning the problem. # + [markdown] slideshow={"slide_type": "fragment"} # **This raises completely new kind of problems.** # + [markdown] slideshow={"slide_type": "fragment"} # Dealing with "Big Data", you have to deal with the four v:s: # * volume: # * the data is actually big and you need to have specific tools for accessing it # * in addition, one needs to figure out what is the relevant data # * variety: # * the data is in completely different formats and you may have to deal with all of them (e.g., video, spread sheets, natural language), # * velocity: # * the data is constantly changing and more data is being gathered, # * veracity: # * the data is bad and untrusworthy, # * there is a lot of missing data. # + [markdown] slideshow={"slide_type": "fragment"} # Also, in this case, one often needs machine learning techiques to first make sense of the data and then to optimize based on that information gathered. # + [markdown] slideshow={"slide_type": "fragment"} # **In TIES583 the students can make their own project that deals with data and optimization** # # The course will be starting right after this course! # # Please register at https://korppi.jyu.fi/kotka/course/student/generalCourseInfo.jsp?course=192670. # + [markdown] slideshow={"slide_type": "fragment"} # E.g., a recent paper at http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=6879615&tag=1 # + [markdown] slideshow={"slide_type": "slide"} # ## Multiobjective optimization and decision support systems # + [markdown] slideshow={"slide_type": "subslide"} # ** The whole point of optimization is to support decision making! ** # + [markdown] slideshow={"slide_type": "fragment"} # However, # * most decision problems have multiple conflicting objectives, and # * human beings are not rational decision makers. # + [markdown] slideshow={"slide_type": "fragment"} # First item needs methods to deal with multiple objectives. # # There are still a lot of unresolved questions in how the decision makers interact with optimization and, also, in just how to compute Pareto optimal solutions for complicated problems. # + [markdown] slideshow={"slide_type": "fragment"} # Second item needs a completely separate type of research. # # In fact, it has been shown that most of the decision making that humans do, is dictated by feelings. # # Thus, one needs to take into account human beings as complete beings. # # ** This is studied in behavioural operations research** # + [markdown] slideshow={"slide_type": "fragment"} # Multiobjective optimization e.g., in a recent paper by <NAME> and others http://dx.doi.org/10.1007/s11573-015-0786-0 # # Behavioral aspects have been studied e.g., in a recent paper http://www.sciencedirect.com/science/article/pii/S0167487015001427 # + [markdown] slideshow={"slide_type": "slide"} # ## Dealing with risk # + [markdown] slideshow={"slide_type": "subslide"} # ** Almost all real-life decisions include risk!** # + [markdown] slideshow={"slide_type": "fragment"} # How to deal with this risk, is a active research topic in optimization. # # Basically, there are two competing underlying approaches: # 1. scenario-based approaches, where the possible states involving the decision problem are modelled as different scenarios and # 2. probabilistic (and similar like fuzzy) approaches, where the possible states are modelled using a distribution (or similar). # + [markdown] slideshow={"slide_type": "fragment"} # There are also different risk measures that can be taken into account. # + [markdown] slideshow={"slide_type": "fragment"} # For example, there is one paper by <NAME> et al (incl. the lecturer) http://www.nrcresearchpress.com/doi/pdf/10.1139/cjfr-2014-0443, where the uncertainty is modelled using scenarios, but the twist is that there is a possibility of measuring the states, which removes a part or all of the uncertainty.
Lecture 15, Further topics and current research topics in optimization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # CRISPR TXTL Experiment: comparing 1 or more different RNA guides across a range of concentrations # # + experiment_name = 'RMS-experiment-1-dcas9-a' # set your reaction volume: reaction_volume = 5e-6 # 2 microliters # set your mastermix ratio master_mix_ratio = 0.60 # 75% # enter concentrations for your starting source solutions # enter your desired finnal concentrations chi_concentration = 2.5e-6 nuclease_dna_concentration = 1e-9 experiment_polymerase_dna_concentration = [0] experiment_reporter_dna_concentrations = [2.0e-9] experiment_guideRNA_dna_concentration = [0, 1e-9] experiment_sgrna_concentrations = [0.5e-6, 1e-6, 2e-6] # create labels for each of your guides to test targets = [ 'g1', 'g2', 'nt', ] # fixed concentrations experiment_starting_conc = { 'chi' : 30e-6, 'reporter' : 12e-9, 'nuclease' : 100e-9, 'polymerase' : 9.8e-9, 'no guide control' : 0, 'no reporter control' : 0, 'blank control' : 0, 'sgrna' : 50e-6, } experiment_rna_guides = targets # and finally set the number of replicates for each run replicates = 3 # plate layout dest_layout = (12,8) # 96 well # dest_layout = (24,16) # 384 src_layout = (24,16) # 384 # we assume you're using the standard source plate (15-65µL working range) b_use_ldv = False if b_use_ldv: transfer_range = (2.5e-6, 12e-6) else: transfer_range = (15e-6, 65e-6) # - # import python things here import pandas as pd # %pylab inline # + class EchoManager(): def __init__(self, src_layout, src_transfer_range, dest_layout, reaction_volume, master_mix_ratio=0.75, starting_concentrations={}): ##TODO refactor self.starting_concentration = starting_concentrations self.src_aliquots = {} self.aliquots = [] self.plate_row_index = 0 self.dst_plate_index = 0 self.min_transfer_size = 2.5 * 1e-3 # captured, but not needed self.reaction_volume = reaction_volume self.txtl_mix_volume = reaction_volume * master_mix_ratio self.available_input_volume = reaction_volume - self.txtl_mix_volume self.src_layout = src_layout self.dest_layout = dest_layout self.src_transfer_range = src_transfer_range self.final_source_df = None self.effective_transfer_volume = src_transfer_range[1] - src_transfer_range[0] def src_plating_index_to_str(self, index): row = floor(index/self.src_layout[0]) col = index % self.src_layout[0] return '%s%02d' % (chr(int(row + 65)), col+1) def dst_plating_index_to_str(self, index): row = floor(index/self.dest_layout[0]) col = index % self.dest_layout[0] return '%s%02d' % (chr(int(row + 65)), col+1) def add_rna_targets(self, targets): for t in targets: self.starting_concentration[t] = 24e-9 def generate_source_plating(self): final_source_plating = [] for k,v in self.src_aliquots.items(): for a in v: user_step = { 'reagent' : k, 'well' : a['well_str'], 'volume' : (a['active_volume_ul']), 'active_volume_ul' : (a['active_volume_ul']) / 1e-6, 'volume_plus' : a['active_volume_ul'] + self.src_transfer_range[0] } user_step['volume_ul'] = user_step['volume_plus'] / 1e-6 final_source_plating.append(user_step) self.final_source_df = pd.DataFrame(final_source_plating) return self.final_source_df.sort_values(by='well') def generate_transfers(self): transfer_df = pd.DataFrame(self.aliquots) self.transfer_df = transfer_df final_xfers = [] for n,transfer in transfer_df.iterrows(): for rgt in ['master', 'chi', 'h20', 'nuclease', 'rna', 'sgrna', 'reporter']: rgt_source_well = '%s_source' % rgt rgt_source_name = '%s_source_name' % rgt rgt_source_vol = '%s_ul' % rgt xfer = { 'condition' : transfer['name'], 'rgt' : rgt, 'source_name' : transfer[rgt_source_name], 'dst_volume' : transfer[rgt_source_vol] / 1e-9, # we output the csv in nL 'dst_plate_label' : 'dstPlate1', 'src_plate_label' : 'srcPlate1', 'src_well' : transfer[rgt_source_well], 'dst_well' : transfer['index_str'], 'pk' : transfer['index'] } if transfer[rgt_source_vol] > 0: final_xfers.append(xfer) RGT_RANK = { 'master' : 0, 'chi' : 1, 'h20' : 2, 'polymerase' : 3, 'reporter' : 4, 'nuclease' : 5, 'rna' : 6, 'sgrna' : 7 } def rank_by_reagent(row): if row['rgt'] in RGT_RANK: return RGT_RANK[row['rgt']] return 999 def plate_transfer_by_reagent(row): if row['rgt'] == 'master': return '384PP_AQ_CP' return '384PP_AQ_BP2' final_xfers_df = pd.DataFrame(final_xfers) final_xfers_df['rgt_rank'] = final_xfers_df.apply(rank_by_reagent, axis=1) final_xfers_df['Source Plate Type'] = final_xfers_df.apply(plate_transfer_by_reagent, axis=1) final_xfers_df = final_xfers_df.sort_values(by=['rgt_rank', 'pk']) self.final_xfers_df = final_xfers_df def generate_pre_chi_transfer(self): prechi = self.final_xfers_df[self.final_xfers_df.rgt_rank < 4][['condition','src_plate_label', 'src_well', 'dst_volume', 'dst_plate_label', 'dst_well', 'Source Plate Type']] prechi.columns = ['Sample ID', 'Source Plate Name', 'Source Well', 'Transfer Volume', 'Destination Plate Name', 'Destination Well', 'Source Plate Type'] return prechi def generate_post_chi_transfer(self): postchi = self.final_xfers_df[self.final_xfers_df.rgt_rank >= 4][['condition', 'src_plate_label', 'src_well', 'dst_volume', 'dst_plate_label', 'dst_well', 'Source Plate Type']] postchi.columns = ['Sample ID', 'Source Plate Name', 'Source Well', 'Transfer Volume', 'Destination Plate Name', 'Destination Well', 'Source Plate Type'] return postchi def create_aliquots_for_crispr_txtl(self, rna_guides, reporter_dna_concentrations, guideRNA_dna_concentration, sgrna_concentrations): for guide_name in rna_guides: for dna_conc in reporter_dna_concentrations: for rna_conc in guideRNA_dna_concentration: real_sg_concs = sgrna_concentrations if rna_conc > 0: real_sg_concs = [0] for sg_conc in real_sg_concs: nmol_reporter = dna_conc * reaction_volume nmol_rna = rna_conc * reaction_volume nmol_chi = chi_concentration * reaction_volume nmol_nuclease = nuclease_dna_concentration * reaction_volume # if nmol_rna > 0: # sg_conc = 0 nmol_sgrna = sg_conc * reaction_volume if self.starting_concentration['reporter'] > 0: ul_reporter = nmol_reporter / self.starting_concentration['reporter'] else: ul_reporter = 0 if self.starting_concentration[guide_name] > 0: ul_rna = nmol_rna / self.starting_concentration[guide_name] else: ul_rna = 0 # if self.starting_concentration['sgrna'] > 0: ul_sgrna = nmol_sgrna / self.starting_concentration['sgrna'] # else: # ul_sgrna = 0 ul_nuclease = nmol_nuclease / self.starting_concentration['nuclease'] ul_chi = nmol_chi / self.starting_concentration['chi'] ul_final_h20 = self.reaction_volume - self.txtl_mix_volume - (ul_reporter + ul_rna + ul_chi + ul_nuclease + ul_sgrna) for rep in range(replicates): aliquot_name = '%s - %0.2fnM-rep x %0.2fnM guide plasmid x %0.2fnM guide sgRNA (%d)' % (guide_name, dna_conc*1e9, rna_conc*1e9, sg_conc*1e9, rep) aliquot = { 'guide': guide_name, 'name' : aliquot_name, 'reporter_ul' : ul_reporter, 'master_ul' : self.txtl_mix_volume, 'nuclease_ul': ul_nuclease, 'rna_ul' : ul_rna, 'sgrna_ul' : ul_sgrna, 'chi_ul' : ul_chi, 'h20_ul' : ul_final_h20, 'index' : self.dst_plate_index, 'index_str' : self.dst_plating_index_to_str(self.dst_plate_index) } # increment the plate index to the next loction self.dst_plate_index += 1 for rgt in ['master', 'chi', 'h20', 'nuclease', 'rna', 'sgrna', 'reporter']: # what is the reagent key to use? if 'rna' in rgt: aliquot_rgt_name = '%s %s' % (guide_name, rgt) else: aliquot_rgt_name = rgt # how much volume is needed for this reagent? transfer_needed = aliquot[rgt + '_ul'] # if the transfer is > 0uL # if transfer_needed > 0: # step 1: if no aliquots are present for this reagent, create a list of them if aliquot_rgt_name not in self.src_aliquots: self.src_aliquots[aliquot_rgt_name] = [] # step 2: if that list is empty, initialize the aliquot if len(self.src_aliquots[aliquot_rgt_name]) == 0 : new_src_aliquot = { 'reagent' : aliquot_rgt_name, 'well_str' : self.src_plating_index_to_str(self.plate_row_index), 'aliquot_index' : -1, 'active_volume_ul' : 0, 'physical_volume_ul' : 0 } self.plate_row_index += 1 self.src_aliquots[aliquot_rgt_name].append(new_src_aliquot) # step 3: check if the transfer requires more volume than is accessible in this aliquot current_aliquot_ptr = self.src_aliquots[aliquot_rgt_name][-1] # get the last aliquot in the list aliquot_index = self.src_aliquots[aliquot_rgt_name].index(current_aliquot_ptr) if (transfer_needed + current_aliquot_ptr['active_volume_ul']) > self.effective_transfer_volume: # no, we don't have enough space left, need to create a new aliquot # if the transfer would overfill the source, create a new one and put it there instead new_src_aliquot = { 'reagent' : aliquot_rgt_name, 'aliquot_index' : aliquot_index, 'well_str' : self.src_plating_index_to_str(self.plate_row_index), 'active_volume_ul' : transfer_needed, 'physical_volume_ul' : 0 } self.plate_row_index += 1 self.src_aliquots[aliquot_rgt_name].append(new_src_aliquot) current_aliquot_ptr = self.src_aliquots[aliquot_rgt_name][-1] # get the last aliquot in the list aliquot_index = self.src_aliquots[aliquot_rgt_name].index(current_aliquot_ptr) aliquot[rgt + '_source'] = current_aliquot_ptr['well_str'] aliquot[rgt + '_source_name'] = '%s %d %s' % (aliquot_rgt_name, current_aliquot_ptr['aliquot_index'], current_aliquot_ptr['well_str']) else: # yes we have enough, just add current_aliquot_ptr['active_volume_ul'] += transfer_needed current_aliquot_ptr['aliquot_index'] = aliquot_index aliquot[rgt + '_source'] = current_aliquot_ptr['well_str'] aliquot[rgt + '_source_name'] = '%s %d %s' % (aliquot_rgt_name, current_aliquot_ptr['aliquot_index'], current_aliquot_ptr['well_str']) # print("Using existing aliquot", current_aliquot_ptr) # print(aliquot) # print('------------------------------------------------------\n') self.aliquots.append(aliquot) # + # added 21 ul h20 # added 7 ul of 225 nM reporter = 56.25 nM echo = EchoManager(src_layout=src_layout, src_transfer_range=transfer_range, dest_layout=dest_layout, reaction_volume=reaction_volume, master_mix_ratio=master_mix_ratio, starting_concentrations=experiment_starting_conc ) echo.add_rna_targets(experiment_rna_guides) echo.create_aliquots_for_crispr_txtl(experiment_rna_guides, experiment_reporter_dna_concentrations, experiment_guideRNA_dna_concentration, experiment_sgrna_concentrations) # echo.create_aliquots_for_crispr_txtl(['no reporter control'], [0], experiment_guideRNA_dna_concentration) echo.create_aliquots_for_crispr_txtl(['no guide control'], experiment_reporter_dna_concentrations, [0], [0]) echo.create_aliquots_for_crispr_txtl(['blank control'], [0], [0], [0]) echo.generate_source_plating().sort_values(by='well') # - echo.generate_source_plating().groupby('reagent')['volume_ul'].sum() # + # A13 -> A22 # A19 -> A23 # A20 -> A24 # - echo.generate_transfers() echo.transfer_df[['name', 'index_str']].head() # + prechi = echo.generate_pre_chi_transfer() postchi = echo.generate_post_chi_transfer() echo.final_source_df.to_csv('./echo_planning/%s-source-plating.csv' % experiment_name, index=False) prechi.to_csv('./echo_planning/%s-prechi.csv' % experiment_name, index=False) postchi.to_csv('./echo_planning/%s-postchi.csv' % experiment_name, index=False) final_samples = pd.DataFrame(echo.aliquots) final_samples.to_csv('./echo_planning/%s-sample-list.csv' % experiment_name, index=False) # + # postchi.tail() # - for n,f in final_samples.iterrows(): print('%d\t%s \t %s' % (n, f['index_str'], f['name'])) echo.dst_plate_index
CSHLSynBio2016/FinalProject/Experimental design and layout for a series of TXTL CRISPRi reactions A.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Ch `12`: Concept `01` # ### Ranking by neural network # + import tensorflow as tf import numpy as np import random # %matplotlib inline import matplotlib.pyplot as plt # + n_features = 2 def get_data(): data_a = np.random.rand(10, n_features) + 1 data_b = np.random.rand(10, n_features) plt.scatter(data_a[:, 0], data_a[:, 1], c='r', marker='x') plt.scatter(data_b[:, 0], data_b[:, 1], c='g', marker='o') plt.show() return data_a, data_b def get_data2(): data_a = np.asarray([[0.1, 0.9], [0.1, 0.8]]) data_b = np.asarray([[0.4,0.05], [0.45, 0.1]]) plt.scatter(data_a[:, 0], data_a[:, 1], c='r', marker='x') plt.scatter(data_b[:, 0], data_b[:, 1], c='g', marker='o') plt.xlim([0, 0.5]) plt.ylim([0, 1]) plt.axes().set_aspect('equal') plt.show() return data_a, data_b data_a, data_b = get_data() # - n_hidden = 10 # + with tf.name_scope("input"): x1 = tf.placeholder(tf.float32, [None, n_features], name="x1") x2 = tf.placeholder(tf.float32, [None, n_features], name="x2") dropout_keep_prob = tf.placeholder(tf.float32, name='dropout_prob') with tf.name_scope("hidden_layer"): with tf.name_scope("weights"): w1 = tf.Variable(tf.random_normal([n_features, n_hidden]), name="w1") tf.summary.histogram("w1", w1) b1 = tf.Variable(tf.random_normal([n_hidden]), name="b1") tf.summary.histogram("b1", b1) with tf.name_scope("output"): h1 = tf.nn.dropout(tf.nn.relu(tf.matmul(x1,w1) + b1), keep_prob=dropout_keep_prob) tf.summary.histogram("h1", h1) h2 = tf.nn.dropout(tf.nn.relu(tf.matmul(x2, w1) + b1), keep_prob=dropout_keep_prob) tf.summary.histogram("h2", h2) with tf.name_scope("output_layer"): with tf.name_scope("weights"): w2 = tf.Variable(tf.random_normal([n_hidden, 1]), name="w2") tf.summary.histogram("w2", w2) b2 = tf.Variable(tf.random_normal([1]), name="b2") tf.summary.histogram("b2", b2) with tf.name_scope("output"): s1 = tf.matmul(h1, w2) + b2 s2 = tf.matmul(h2, w2) + b2 # + with tf.name_scope("loss"): s12 = s1 - s2 s12_flat = tf.reshape(s12, [-1]) pred = tf.sigmoid(s12) lable_p = tf.sigmoid(-tf.ones_like(s12)) cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=tf.zeros_like(s12_flat), logits=s12_flat + 1) loss = tf.reduce_mean(cross_entropy) tf.summary.scalar("loss", loss) with tf.name_scope("train_op"): train_op = tf.train.AdamOptimizer(0.001).minimize(loss) # - sess = tf.InteractiveSession() summary_op = tf.summary.merge_all() writer = tf.summary.FileWriter("tb_files", sess.graph) init = tf.global_variables_initializer() sess.run(init) for epoch in range(0, 10000): loss_val, _ = sess.run([loss, train_op], feed_dict={x1:data_a, x2:data_b, dropout_keep_prob:0.5}) if epoch % 100 == 0 : summary_result = sess.run(summary_op, feed_dict={x1:data_a, x2:data_b, dropout_keep_prob:1}) writer.add_summary(summary_result, epoch) # print("Epoch {}: Loss {}".format(epoch, loss_val)) grid_size = 10 data_test = [] for y in np.linspace(0., 1., num=grid_size): for x in np.linspace(0., 1., num=grid_size): data_test.append([x, y]) # + def visualize_results(data_test): plt.figure() scores_test = sess.run(s1, feed_dict={x1:data_test, dropout_keep_prob:1}) scores_img = np.reshape(scores_test, [grid_size, grid_size]) plt.imshow(scores_img, origin='lower') plt.colorbar() # - visualize_results(data_test)
ch12_rank/Concept01_ranknet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc="true" # # Table of Contents # <p><div class="lev1 toc-item"><a href="#Meus-primeiros-programas-de-processamento-de-imagens-usando-Numpy" data-toc-modified-id="Meus-primeiros-programas-de-processamento-de-imagens-usando-Numpy-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Meus primeiros programas de processamento de imagens usando Numpy</a></div><div class="lev2 toc-item"><a href="#Aprendendo-a-programar-usando-o-NumPy" data-toc-modified-id="Aprendendo-a-programar-usando-o-NumPy-11"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Aprendendo a programar usando o NumPy</a></div><div class="lev2 toc-item"></div> # - # # Meus primeiros programas de processamento de imagens usando Numpy # # Neste curso iremos explorar o pacote NumPy, que adicionou o tipo **ndarray** ao Python. Contrário a # utilizar um pacote ou biblioteca específica de processamento de imagens, onde a imagem é uma classe # específica, onde as funções são primordialmente utilizadas para processamento de imagens, iremos # utilizar o ndarray do NumPy para representar nossa imagem. # # Neste curso, iremos praticar a programação do estilo matricial, isto é, procurando resolver o problema # de forma matricial. Pretendemos com isto mostrar que este estilo de programação ajuda a reutilizar # operações e manipulações matriciais conhecidas, aumenta a clareza do código, diminuindo o número de # linhas e favorece a eficiência de execução visto a disponibilidade cada vez maior de hardware # que processa operações vetoriais. # ## Aprendendo a programar usando o NumPy # # Existem importantes conceitos e funções a serem aprendidas no NumPy e serão introduzidas aos poucos à medida # que o curso evolui. Nesta semana, o foco é o uso de fatiamento(*slicing*). Para isto, são necessárias algumas leituras # fundamentais: # # - [Organização do ndarray](../master/tutorial_numpy_1_1.ipynb) # - [Fatiamento 1D do ndarray](../master/tutorial_numpy_1_2.ipynb) # - [Fatiamento 2D do ndarray](../master/tutorial_numpy_1_3.ipynb) # - [Cópia rasa e cópia profunda do ndarray](../master/tutorial_numpy_1_4.ipynb) # - [Operações matriciais](tutorial_numpy_1_5.ipynb) #
2S2018/02 Aprendendo Numpy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Detecting and mitigating racial bias in income estimation # # The goal of this tutorial is to introduce the basic functionality of AI Fairness 360 to an interested developer who may not have a background in bias detection and mitigation. # # *Note: This demo is very similar to the [Credit Scoring Tutorial](tutorial_credit_scoring.ipynb). It is meant as an alternative introduction using a different dataset and mitigation algorithm.* # # ### Biases and Machine Learning # A machine learning model makes predictions of an outcome for a particular instance. (Given an instance of a loan application, predict if the applicant will repay the loan.) The model makes these predictions based on a training dataset, where many other instances (other loan applications) and actual outcomes (whether they repaid) are provided. Thus, a machine learning algorithm will attempt to find patterns, or generalizations, in the training dataset to use when a prediction for a new instance is needed. (For example, one pattern it might discover is "if a person has salary > USD 40K and has outstanding debt < USD 5, they will repay the loan".) In many domains this technique, called supervised machine learning, has worked very well. # # However, sometimes the patterns that are found may not be desirable or may even be illegal. For example, a loan repay model may determine that age plays a significant role in the prediction of repayment because the training dataset happened to have better repayment for one age group than for another. This raises two problems: 1) the training dataset may not be representative of the true population of people of all age groups, and 2) even if it is representative, it is illegal to base any decision on a applicant's age, regardless of whether this is a good prediction based on historical data. # # AI Fairness 360 is designed to help address this problem with _fairness metrics_ and _bias mitigators_. Fairness metrics can be used to check for bias in machine learning workflows. Bias mitigators can be used to overcome bias in the workflow to produce a more fair outcome. # # The loan scenario describes an intuitive example of illegal bias. However, not all undesirable bias in machine learning is illegal it may also exist in more subtle ways. For example, a loan company may want a diverse portfolio of customers across all income levels, and thus, will deem it undesirable if they are making more loans to high income levels over low income levels. Although this is not illegal or unethical, it is undesirable for the company's strategy. # # As these two examples illustrate, a bias detection and/or mitigation toolkit needs to be tailored to the particular bias of interest. More specifically, it needs to know the attribute or attributes, called _protected attributes_, that are of interest: race is one example of a _protected attribute_ and age is a second. # # ### The Machine Learning Workflow # To understand how bias can enter a machine learning model, we first review the basics of how a model is created in a supervised machine learning process. # # # # ![image](images/Complex_NoProc_V3.jpg) # # # # # # # # # First, the process starts with a _training dataset_, which contains a sequence of instances, where each instance has two components: the features and the correct prediction for those features. Next, a machine learning algorithm is trained on this training dataset to produce a machine learning model. This generated model can be used to make a prediction when given a new instance. A second dataset with features and correct predictions, called a _test dataset_, is used to assess the accuracy of the model. # Since this test dataset is the same format as the training dataset, a set of instances of features and prediction pairs, often these two datasets derive from the same initial dataset. A random partitioning algorithm is used to split the initial dataset into training and test datasets. # # Bias can enter the system in any of the three steps above. The training data set may be biased in that its outcomes may be biased towards particular kinds of instances. The algorithm that creates the model may be biased in that it may generate models that are weighted towards particular features in the input. The test data set may be biased in that it has expectations on correct answers that may be biased. These three points in the machine learning process represent points for testing and mitigating bias. In AI Fairness 360 codebase, we call these points _pre-processing_, _in-processing_, and _post-processing_. # # ### AI Fairness 360 # We are now ready to utilize AI Fairness 360 (`aif360`) to detect and mitigate bias. We will use the Adult Census Income dataset, splitting it into a training and test dataset. We will look for bias in the creation of a machine learning model to predict if an individual's annual income exceeds $50,000 based on various personal attributes. The protected attribute will be "race", with "1" (white) and "0" (not white) being the values for the privileged and unprivileged groups, respectively. # For this first tutorial, we will check for bias in the initial training data, mitigate the bias, and recheck. More sophisticated machine learning workflows are given in the author tutorials and demo notebooks in the codebase. # # Here are the steps involved # #### Step 1: Write import statements # #### Step 2: Set bias detection options, load dataset, and split between train and test # #### Step 3: Compute fairness metric on original training dataset # #### Step 4: Mitigate bias by transforming the original dataset # #### Step 5: Compute fairness metric on transformed training dataset # # ### Step 1 Import Statements # As with any Python program, the first step will be to import the necessary packages. Below we import several components from the aif360 package. We import a custom version of the AdultDataset with certain features binned, metrics to check for bias, and classes related to the algorithm we will use to mitigate bias. We also import some other non-aif360 useful packages. # + import sys sys.path.append("../") import numpy as np from aif360.metrics import BinaryLabelDatasetMetric from aif360.algorithms.preprocessing.optim_preproc import OptimPreproc from aif360.algorithms.preprocessing.optim_preproc_helpers.data_preproc_functions\ import load_preproc_data_adult from aif360.algorithms.preprocessing.optim_preproc_helpers.distortion_functions\ import get_distortion_adult from aif360.algorithms.preprocessing.optim_preproc_helpers.opt_tools import OptTools from IPython.display import Markdown, display # - np.random.seed(1) # ### Step 2 Load dataset, specifying protected attribute, and split dataset into train and test # In Step 2 we load the initial dataset, setting the protected attribute to be race. We then splits the original dataset into training and testing datasets. Although we will use only the training dataset in this tutorial, a normal workflow would also use a test dataset for assessing the efficacy (accuracy, fairness, etc.) during the development of a machine learning model. Finally, we set two variables (to be used in Step 3) for the privileged (1) and unprivileged (0) values for the race attribute. These are key inputs for detecting and mitigating bias, which will be Step 3 and Step 4. # + dataset_orig = load_preproc_data_adult(['race']) dataset_orig_train, dataset_orig_test = dataset_orig.split([0.7], shuffle=True) privileged_groups = [{'race': 1}] # White unprivileged_groups = [{'race': 0}] # Not white # - # ### Step 3 Compute fairness metric on original training dataset # Now that we've identified the protected attribute 'race' and defined privileged and unprivileged values, we can use aif360 to detect bias in the dataset. One simple test is to compare the percentage of favorable results for the privileged and unprivileged groups, subtracting the former percentage from the latter. A negative value indicates less favorable outcomes for the unprivileged groups. This is implemented in the method called mean_difference on the BinaryLabelDatasetMetric class. The code below performs this check and displays the output: metric_orig_train = BinaryLabelDatasetMetric(dataset_orig_train, unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups) display(Markdown("#### Original training dataset")) print("Difference in mean outcomes between unprivileged and privileged groups = %f" % metric_orig_train.mean_difference()) # ### Step 4 Mitigate bias by transforming the original dataset # The previous step showed that the privileged group was getting 10.5% more positive outcomes in the training dataset. Since this is not desirable, we are going to try to mitigate this bias in the training dataset. As stated above, this is called _pre-processing_ mitigation because it happens before the creation of the model. # # AI Fairness 360 implements several pre-processing mitigation algorithms. We will choose the Optimized Preprocess algorithm [1], which is implemented in "OptimPreproc" class in the "aif360.algorithms.preprocessing" directory. This algorithm will transform the dataset to have more equity in positive outcomes on the protected attribute for the privileged and unprivileged groups. # # The algorithm requires some tuning parameters, which are set in the optim_options variable and passed as an argument along with some other parameters, including the 2 variables containg the unprivileged and privileged groups defined in Step 3. # # We then call the fit and transform methods to perform the transformation, producing a newly transformed training dataset (dataset_transf_train). Finally, we ensure alignment of features between the transformed and the original dataset to enable comparisons. # # [1] Optimized Pre-Processing for Discrimination Prevention, NIPS 2017, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME> # + optim_options = { "distortion_fun": get_distortion_adult, "epsilon": 0.05, "clist": [0.99, 1.99, 2.99], "dlist": [.1, 0.05, 0] } OP = OptimPreproc(OptTools, optim_options) OP = OP.fit(dataset_orig_train) dataset_transf_train = OP.transform(dataset_orig_train, transform_Y=True) dataset_transf_train = dataset_orig_train.align_datasets(dataset_transf_train) # - # ### Step 5 Compute fairness metric on transformed dataset # Now that we have a transformed dataset, we can check how effective it was in removing bias by using the same metric we used for the original training dataset in Step 3. Once again, we use the function mean_difference in the BinaryLabelDatasetMetric class: metric_transf_train = BinaryLabelDatasetMetric(dataset_transf_train, unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups) display(Markdown("#### Transformed training dataset")) print("Difference in mean outcomes between unprivileged and privileged groups = %f" % metric_transf_train.mean_difference()) # We see the mitigation step was very effective, the difference in mean outcomes is now -0.051074. So we went from a 10.5% advantage for the privileged group to a 5.1% advantage for the privileged group &mdash; a reduction in more than half! # ### Summary # The purpose of this tutorial is to give a new user to bias detection and mitigation a gentle introduction to some of the functionality of AI Fairness 360. A more complete use case would take the next step and see how the transformed dataset impacts the accuracy and fairness of a trained model. This is implemented in the demo notebook in the examples directory of toolkit, called demo_optim_data_preproc.ipynb. I highly encourage readers to view that notebook as it is generalization and extension of this simple tutorial. # # There are many metrics one can use to detect the pressence of bias. AI Fairness 360 provides many of them for your use. Since it is not clear which of these metrics to use, we also provide some guidance. Likewise, there are many different bias mitigation algorithms one can employ, many of which are in AI Fairness 360. Other tutorials will demonstrate the use of some of these metrics and mitigations algorithms. # # As mentioned earlier, both fairness metrics and mitigation algorithms can be performed at various stages of the machine learning pipeline. We recommend checking for bias as often as possible, using as many metrics are relevant for the application domain. We also recommend incorporating bias detection in an automated continous integration pipeline to ensure bias awareness as a software project evolves.
models/AIF360/examples/demo_optim_preproc_adult.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: temp # language: python # name: temp # --- # + from faster_rcnn.datasets.factory import get_imdb import faster_rcnn.roi_data_layer.roidb as rdl_roidb from faster_rcnn.roi_data_layer.layer import RoIDataLayer from faster_rcnn.fast_rcnn.config import cfg, cfg_from_file import cv2 import numpy as np from matplotlib import pyplot as plt # %matplotlib inline import matplotlib as mpl mpl.rcParams['figure.dpi']= 300 # - # load config cfg_file = 'experiments/cfgs/faster_rcnn_end2end.yml' cfg_from_file(cfg_file) # + # imdb_name = 'kittivoc_train' imdb_name = 'kittipose_train' # load data imdb = get_imdb(imdb_name) rdl_roidb.prepare_roidb(imdb) roidb = imdb.roidb # roidb is a database, containing bounding box info for all training images # - print len(roidb)# roidb is a list of dictionaries. Length is the entire number of images poses = np.concatenate([roidb[i]['poses'] for i in range(0,len(roidb))],axis=0) sig2 = np.var(poses,axis=0) mean = np.mean(poses,axis=0) np.var((poses-mean)/np.sqrt(sig2)) # + print roidb[0] img_name = roidb[0]['image'][0:-10] print img_name idx = roidb[0]['image'][-10:-3] print idx idx += "png" print idx disp_name = img_name + 'disparity/' + idx print disp_name # - data_layer = RoIDataLayer(roidb, imdb.num_classes) blobs = data_layer.forward() im_data = blobs['data'] # one image, shape = (1, 302, 1000, 3) im_info = blobs['im_info'] # stores H, W, scale gt_boxes = blobs['gt_boxes'] gt_ishard = blobs['gt_ishard'] dontcare_areas = blobs['dontcare_areas'] poses = blobs['gt_poses'] # + #print blobs['gt_poses'] #print blobs['gt_boxes'] #print blobs['im_name'] # - plt.imshow(im_data[0,:,:,:]) disp_data = blobs['data_disp'] print disp_data.shape # note: original disparity file has 3 identical channels # we only take one channel print disp_data.dtype plt.imshow(np.uint8(disp_data[0,:,:,0])) # must convert to uint8 in order to plot # read a sample rgb image fn = "/home/pculbert/Documents/faster_rcnn_pytorch/data/KITTIVOC/JPEGImages/000558.jpg" im = cv2.imread(fn) print im.shape plt.imshow(im) # + # read a sample disparity image # note that the disparity map is a 3 IDENTICAL channel rgb! # we only need to use one channel fn = "/home/pculbert/Documents/faster_rcnn_pytorch/data/KITTIVOC/JPEGImages/disparity/000558.png" im = cv2.imread(fn) print im.shape print im.dtype #plt.imshow(im) im0 = im[:,:,0] im1 = im[:,:,1] im2 = im[:,:,2] print np.sum(im0-im1) print np.sum(im0-im2) plt.imshow(im0) # only plot one channel # -
data_load_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # --- # + # # !pip install psycopg2-binary # import libraries import pandas as pd from sqlalchemy import create_engine db_config = {'user': 'praktikum_student', # user name 'pwd': '<PASSWORD>', # password 'host': 'rc1b-wcoijxj3yxfsf3fs.mdb.yandexcloud.net', 'port': 6432, # connection port 'db': 'data-analyst-final-project-db'} # the name of the data base connection_string = 'postgresql://{}:{}@{}:{}/{}'.format(db_config['user'], db_config['pwd'], db_config['host'], db_config['port'], db_config['db']) engine = create_engine(connection_string, connect_args={'sslmode':'require'}) # - # ##### You've been given a database of one of the services competing in this market. It contains data on books, publishers, authors, and customer ratings and reviews of books. This information will be used to generate a value proposition for a new product. query = ''' SELECT * FROM books ''' result = pd.io.sql.read_sql(query, con = engine) display(result.sample(5)) query = ''' SELECT * FROM authors ''' result = pd.io.sql.read_sql(query, con = engine) display(result.sample(5)) query = ''' SELECT * FROM ratings ''' result = pd.io.sql.read_sql(query, con = engine) display(result) query = ''' SELECT * FROM reviews ''' result = pd.io.sql.read_sql(query, con = engine) display(result) query = ''' SELECT * FROM publishers ''' result = pd.io.sql.read_sql(query, con = engine) display(result.sample(5)) # ### Task # ##### Find the number of books released after January 1, 2000. query = ''' SELECT COUNT(book_id) FROM books WHERE publication_date > '2000-01-01' ''' result = pd.io.sql.read_sql(query, con = engine) display(result) # ##### Find the number of user reviews and the average rating for each book. query = ''' SELECT COUNT(DISTINCT review_id) as Num_of_Reviews, ROUND(AVG(rating),2) as avg_rating, rat.book_id FROM ratings as rat join reviews as rev ON rev.book_id = rat.book_id GROUP BY rat.book_id ''' result = pd.io.sql.read_sql(query, con = engine) display(result.head(5)) # ##### Identify the publisher that has released the greatest number of books with more than 50 pages (this will help you exclude brochures and similar publications from your analysis). query = ''' SELECT publisher, books_num FROM publishers p JOIN (SELECT publisher_id, COUNT(book_id) AS books_num FROM books WHERE num_pages > 50 GROUP BY publisher_id ORDER BY books_num DESC LIMIT 1) b on b.publisher_id = p.publisher_id ''' result = pd.io.sql.read_sql(query, con = engine) display(result) # Identify the author with the highest average book rating (look only at books with at least 50 ratings). query = ''' SELECT author, avg_highest_rating FROM authors a JOIN (SELECT AVG(rating) AS avg_highest_rating, author_id FROM ratings r JOIN books b on b.book_id = r.book_id WHERE num_pages > 50 GROUP BY author_id ORDER BY avg_highest_rating DESC LIMIT 1) sub on a.author_id = sub.author_id ''' result = pd.io.sql.read_sql(query, con = engine) display(result) # This is the right query, check it out again please # ##### Find the average number of text reviews among users who rated more than 50 books. query = ''' SELECT ROUND(AVG(sub.text_num),3) AS avg_num_of_text_reviews FROM (SELECT COUNT(DISTINCT rev.text) AS text_num, COUNT(DISTINCT rat.book_id) as rated_books, rev.username FROM reviews rev JOIN ratings rat ON rev.username = rat.username GROUP BY rev.username) sub WHERE rated_books > 50 ''' result = pd.io.sql.read_sql(query, con = engine) display(result) # Was on the rush sorry, anyway I don't think there is same review text only book_id # ##### Conclusions # After running the queries I found out that 819 books released after January 1, 2000 and the, highest average user rating is 5. # Penguin Books is the publisher that released the greatest number of books with more than 50 pages (waw 42 books). # The author with the highest average book rating is <NAME>, the average number of text reviews among users who rated more than 50 books # is 24. # I hope that I am done here. Thank you!
SQL_project/SQL_project.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Assignment: Count Most Frequenly used words in Veidenbaums.txt # + # Open File # Read Text # Split Text into word tokens # Count these tokens (we need to figure out how to) # Save/Print Results # + # File is under /data/Veidenbaums.txt # we are under /TextProcessing/CountingWords.ipynb # - # This means one level up and then again down into data # "../data/Veidenbaums.txt" # So called relative path filePath = "../data/Veidenbaums.txt" with open(filePath, encoding="utf-8") as fstream: mytext = fstream.read() len(mytext) mytext[:120] # we could try splitting already but we will get dirty data(words) mywords = mytext.split(" ") len(mywords) mywords[:5] # we will need to clean all lines which contain *** as ending characters # so lets try reading lines filePath = "../data/Veidenbaums.txt" with open(filePath, encoding="utf-8") as fstream: mylines = fstream.readlines() len(mylines) mylines[:15] cleanlines = [line for line in mylines if line[0]!='\n'] len(cleanlines) cleanlines[:5] # we do not want the lines which end with ***\n headlines = [line for line in cleanlines if line.endswith("***\n")] headlines[:5] # we do not need the headlines! # we do not want the lines which end with ***\n noheadlines = [line for line in cleanlines if not line.endswith("***\n")] noheadlines[:5] # we could save the results savePath = "../data/noHeadVeidenbaums.txt" with open(savePath, mode="w", encoding="utf-8") as fstream: fstream.writelines(noheadlines) # May 6th lets start with noheadlines myPath = "../data/noHeadVeidenbaums.txt" with open(myPath, encoding="utf-8") as fstream: noheadlines = fstream.readlines() len(noheadlines) # noheadlines = [line for line in noheadlines if not "Treimanim" in line] len(noheadlines) spaceChars = "\n-" stopChars = """!?.,"':;()…""" for char in stopChars: print(char) # One big text from many lines textNoHead = "".join(noheadlines) # we could have used fstream.read earlier textNoHead[:55] # take off spacy Characters replace with space (why space ? :) for char in spaceChars: print(f"Replacing {char} with space") textNoHead = textNoHead.replace(char, " ") # print(textNoHead[:75]) textNoHead[:75] for char in stopChars: print(f"Replacing {char} with nothing") textNoHead = textNoHead.replace(char, "") textNoHead[:55] savePath = "../data/noHeadVeidenbaumsOneLine.txt" with open(savePath, mode="w", encoding="utf-8") as fstream: fstream.write(textNoHead) textNoHead.index("Vēstule") # nothing found thats good textNoHead[5400:5430] # charSet that's Camelcase another style char_set = set(textNoHead) char_set ord("…") words = textNoHead.split() words[:5] # we need to convert to lower case # for word in words: words_lower = [word.lower() for word in words] words_lower[:5] len(words_lower) # + # if we want to do it ourselves # we could store it in a dictionary word and count # {'pēc':5, 'ideālie':1, 'cenšas':3} # - unique_words = set(words_lower) len(unique_words) # i create a dictionary of unique words and set counter to 0 my_counter_dict = {word:0 for word in list(unique_words)} my_counter_dict['pēc'] for word in words_lower: my_counter_dict[word] += 1 # each time i add 1 to right box(key) my_counter_dict['pēc'] my_list_tuples = [(key, value) for key,value in my_counter_dict.items()] my_list_tuples[:5] sorted(my_list_tuples)[:5] # not quite what we need because it sorts by the first item alphabetically # solution we pass a function to show how to sort my_most_common = sorted(my_list_tuples,key=lambda mytuple: mytuple[1], reverse=True) my_most_common[:10] # + # so sorting is possible but my recommendation is to use Counter # + # well and now I would to like sort # its possible then I need to create a list from dictionary and then sort by key value # solution use a library # - # Batteries are included no need to write our own counter from collections import Counter mycounter = Counter(words_lower) mycounter.most_common(10) type(mycounter.most_common(10)) # how to get only words 4 chars or longer ? :) long_words = [word for word in words_lower if len(word) >= 4 ] len(long_words) long_counter = Counter(long_words) long_counter.most_common(10) 'alus' in long_counter type(long_counter) long_counter.get('alus'), long_counter['alus'] #2nd would throw error if no beer existed # we only get 5 letter words here word_counter_5 = [mytuple for mytuple in long_counter.most_common() if mytuple[1] == 5] word_counter_5 import json with open('most_common.json', mode='w', encoding='utf-8') as fstream: json.dump(mycounter.most_common(), fstream, indent=2) # if we want to save our Latvian or other languages besides ENglish we set # turn off ascii # https://stackoverflow.com/questions/18337407/saving-utf-8-texts-in-json-dumps-as-utf8-not-as-u-escape-sequence with open('most_common.json', mode='w', encoding='utf-8') as fstream: json.dump(mycounter.most_common(), fstream, indent=2, ensure_ascii=False)
TextProcessing/CountingWords_05.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exploring Ensemble methods import pandas as pd import numpy as np # ## Load LendingClub dataset loans = pd.read_csv('lending-club-data.csv') loans.columns # ## Modifying the target column loans['safe_loans'] = loans['bad_loans'].apply(lambda x : +1 if x==0 else -1) loans.drop('bad_loans',axis=1,inplace=True) # ## Selecting features target = 'safe_loans' features = ['grade', # grade of the loan (categorical) 'sub_grade_num', # sub-grade of the loan as a number from 0 to 1 'short_emp', # one year or less of employment 'emp_length_num', # number of years of employment 'home_ownership', # home_ownership status: own, mortgage or rent 'dti', # debt to income ratio 'purpose', # the purpose of the loan 'payment_inc_ratio', # ratio of the monthly payment to income 'delinq_2yrs', # number of delinquincies 'delinq_2yrs_zero', # no delinquincies in last 2 years 'inq_last_6mths', # number of creditor inquiries in last 6 months 'last_delinq_none', # has borrower had a delinquincy 'last_major_derog_none', # has borrower had 90 day or worse rating 'open_acc', # number of open credit accounts 'pub_rec', # number of derogatory public records 'pub_rec_zero', # no derogatory public records 'revol_util', # percent of available credit being used 'total_rec_late_fee', # total late fees received to day 'int_rate', # interest rate of the loan 'total_rec_int', # interest received to date 'annual_inc', # annual income of borrower 'funded_amnt', # amount committed to the loan 'funded_amnt_inv', # amount committed by investors for the loan 'installment', # monthly payment owed by the borrower ] # ## Skipping observations with missing values def onehot_transform(X, names=None,prefix_sep='.'): dummies_X = pd.get_dummies(X,prefix_sep=prefix_sep) if names is None: return dummies_X, dummies_X.columns.values else: return pd.DataFrame(dummies_X, columns=names).fillna(0) loans = loans[[target] + features].dropna() # ## Make sure the classes are balanced # + safe_loans_raw = loans[loans[target] == 1] risky_loans_raw = loans[loans[target] == -1] # Undersample the safe loans. percentage = len(risky_loans_raw)/float(len(safe_loans_raw)) safe_loans = safe_loans_raw.sample(frac = percentage, random_state = 1) risky_loans = risky_loans_raw loans_data = risky_loans.append(safe_loans) print("Percentage of safe loans :", len(safe_loans) / float(len(loans_data))) print("Percentage of risky loans :", len(risky_loans) / float(len(loans_data))) print("Total number of loans in our new dataset :", len(loans_data)) # - # ## Split data into training and validation sets train_idx = pd.read_json('module-8-assignment-1-train-idx.json',typ='series').values validation_idx = pd.read_json('module-8-assignment-1-validation-idx.json',typ='series').values train_data, validation_data = loans.iloc[train_idx],loans.iloc[validation_idx] train_data,names = onehot_transform(train_data,None) validation_set = onehot_transform(validation_data,names) features = names.tolist() features.remove(target) # ## Gradient boosted tree classifier # + from sklearn.ensemble import GradientBoostingClassifier model_5 = GradientBoostingClassifier(n_estimators = 5, max_depth=6,) model_5.fit(train_data[features],train_data[target]) # - # ## Making predictions # + # Select all positive and negative examples. validation_safe_loans = validation_set[validation_data[target] == 1] validation_risky_loans = validation_set[validation_data[target] == -1] # Select 2 examples from the validation set for positive & negative loans sample_validation_data_risky = validation_risky_loans[0:2] sample_validation_data_safe = validation_safe_loans[0:2] # Append the 4 examples into a single dataset sample_validation_data = sample_validation_data_safe.append(sample_validation_data_risky) sample_validation_data # - # ### Predicting on sample validation data model_5.predict(sample_validation_data[features]) model_5.predict_proba(sample_validation_data[features],)[:,1] # ## Evaluating the model on the validation data model_5.score(validation_set[features],validation_set[target]) from sklearn.metrics import confusion_matrix import matplotlib.pyplot as plt import itertools # %matplotlib inline def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') # + cnf_matrix = confusion_matrix(validation_set[target], model_5.predict(validation_set[features])) np.set_printoptions(precision=2) # Plot non-normalized confusion matrix plt.figure() plot_confusion_matrix(cnf_matrix, classes=validation_data[target].unique(), title='Confusion matrix, without normalization') # - 1652 1491 1491*10000+1652*20000 # ## Most positive and negative loans validation_data['predictions']= model_5.predict_proba(validation_set[features])[:,1] print("Your loans : %s\n" % validation_data['predictions'].head(4)) print("Expected answer : %s" % [0.4492515948736132, 0.6119100103640573, 0.3835981314851436, 0.3693306705994325]) validation_data.sort_values('predictions',ascending = False)['grade'][0:5] validation_data.sort_values('predictions',ascending = True)['grade'][0:5] # ## Effect of adding more trees model_10 = GradientBoostingClassifier(n_estimators=10,max_depth=6) model_10.fit(train_data[features],train_data[target]) model_50 = GradientBoostingClassifier(n_estimators=50,max_depth=6) model_50.fit(train_data[features],train_data[target]) model_100 = GradientBoostingClassifier(n_estimators=100,max_depth=6) model_100.fit(train_data[features],train_data[target]) model_200 = GradientBoostingClassifier(n_estimators=200,max_depth=6) model_200.fit(train_data[features],train_data[target]) model_500 = GradientBoostingClassifier(n_estimators=500,max_depth=6) model_500.fit(train_data[features],train_data[target]) print(model_10.score(validation_set[features],validation_set[target])) print(model_50.score(validation_set[features],validation_set[target])) print(model_100.score(validation_set[features],validation_set[target])) print(model_200.score(validation_set[features],validation_set[target])) print(model_500.score(validation_set[features],validation_set[target])) import matplotlib.pyplot as plt # %matplotlib inline def make_figure(dim, title, xlabel, ylabel, legend): plt.rcParams['figure.figsize'] = dim plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) if legend is not None: plt.legend(loc=legend, prop={'size':15}) plt.rcParams.update({'font.size': 16}) plt.tight_layout() train_err_10 = 1-model_10.score(train_data[features],train_data[target]) train_err_50 = 1-model_50.score(train_data[features],train_data[target]) train_err_100 = 1-model_100.score(train_data[features],train_data[target]) train_err_200 = 1-model_200.score(train_data[features],train_data[target]) train_err_500 = 1-model_500.score(train_data[features],train_data[target]) training_errors = [train_err_10, train_err_50, train_err_100, train_err_200, train_err_500] validation_err_10 = 1-model_10.score(validation_set[features],validation_set[target]) validation_err_50 = 1-model_50.score(validation_set[features],validation_set[target]) validation_err_100 = 1-model_100.score(validation_set[features],validation_set[target]) validation_err_200 = 1-model_200.score(validation_set[features],validation_set[target]) validation_err_500 = 1-model_500.score(validation_set[features],validation_set[target]) validation_errors = [validation_err_10, validation_err_50, validation_err_100, validation_err_200, validation_err_500] # + plt.plot([10, 50, 100, 200, 500], training_errors, linewidth=4.0, label='Training error') plt.plot([10, 50, 100, 200, 500], validation_errors, linewidth=4.0, label='Validation error') make_figure(dim=(10,5), title='Error vs number of trees', xlabel='Number of trees', ylabel='Classification error', legend='best') # -
Machine_Learning_WashingTon/Classification/Week5 Boosting/Exploring Ensemble Methods.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Load the LoanXCurrentMarket csv file # Work out the best 10 and worst 10 Loan based on the Spread to Maturity rating # Load all the daily csv files # Generate the trend for a selected Loan based on the Evaluated Price # - import pandas as pd import numpy as np # %matplotlib inline from matplotlib import pyplot as plt # load LoanXCurrentMarket_Download1 csv file to python memory loan = pd.read_csv('../sampledata/LoanXCurrentMarket_Download.csv') loan_update = loan.copy() loan_update.head(5) # + bool_series = pd.notnull(loan_update['Spread To Maturity']) # filtering data # displaying data only with Spread To Maturity != NaN loan_update[bool_series].head(5) # - # Best 10 Loans based on the 'Spread To Maturity' best_ten = loan_update[bool_series].sort_values('Spread To Maturity').head(10) # Worst 10 Loans based on the 'Spread To Maturity' worst_ten = loan_update[bool_series].sort_values('Spread To Maturity').tail(10) # Best 10 Loans Plot best_ten_plot = best_ten.plot.barh(x='LoanX ID', y='Spread To Maturity', rot=0) # Worst 10 Loans Plot worst_ten_plot = worst_ten.plot.barh(x='LoanX ID', y='Spread To Maturity', rot=0) # + # Import all the daily csv file import glob path = r'../sampledata/daily' # use your path all_files = glob.glob(path + "/*.csv") li = [] for filename in all_files: df = pd.read_csv(filename, index_col=None, header=0) li.append(df) frame = pd.concat(li, axis=0, ignore_index=True) # - # Pick one paticular Loan and remove the duplication entries LX010053 = frame[frame['LoanX ID']=='LX010053'].drop_duplicates() LX010053.head(100) # + # Generate trend for the selected loan fig, ax = plt.subplots(figsize=(15,7)) # use unstack() LX010053.groupby(['Close Date','LoanX ID']).count()['Evaluated Price'].unstack().plot(ax=ax)
scripts/.ipynb_checkpoints/LoanRate-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys sys.path.append('..') import pandas as pd import numpy as np from onehot import OneHotDummy # - # ## Load Data df = pd.read_csv("../data/train.csv") #df.describe() # ## Check it s = 'BsmtFullBath' obj = OneHotDummy(sparse=False, prefix=s) obj.fit(df[s]) #print(df[s].head()) df[s].describe() # ## Check 2 # + transformer = dict() cols = [ 'MSZoning', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'KitchenQual', 'Functional', 'FireplaceQu', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'Fence', 'MiscFeature', 'SaleType', 'SaleCondition', 'MSSubClass', 'MoSold', 'OverallQual', 'OverallCond', 'BsmtFullBath', 'BsmtHalfBath', 'FullBath', 'HalfBath', 'BedroomAbvGr', 'KitchenAbvGr', 'TotRmsAbvGrd', 'Fireplaces', 'GarageCars'] for i, s in enumerate(cols): obj = OneHotDummy(sparse=False, prefix=s) obj.fit(df[s]) transformer[s] = obj # - # ## Check 3 from grouplabelencode import grouplabelencode s = 'GarageCars' mapping = [1,2,3,4] encoded = grouplabelencode(df[s], mapping) pd.unique(encoded) # looks good now
examples/bugfix 0.1.2 from grouplabelencode.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:tf] * # language: python # name: conda-env-tf-py # --- # + import os import spectral import numpy as np import scipy.io as sio import matplotlib.pyplot as plt import keras from keras.layers import Conv2D, Conv3D, Flatten, Dense, Reshape, BatchNormalization from keras.layers import Dropout, Input from keras.models import Model from keras.optimizers import Adam from keras.callbacks import ModelCheckpoint from keras.utils import np_utils ### Drw Model figure---Model Visualization from keras.utils.vis_utils import plot_model from keras.callbacks import Callback,EarlyStopping from operator import truediv from plotly.offline import init_notebook_mode from sklearn.decomposition import PCA from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, accuracy_score, classification_report, cohen_kappa_score, roc_auc_score init_notebook_mode(connected=True) #requirement for plot # %matplotlib inline ''' 魔法命令 %matplotlib inline 激活Matplotlib,为Ipython和Jupyter提供“内嵌后端”支持, 也就是作为一个静态图像嵌入Jupyer中,因此Matplotlib就不需要使用plt.show()来主动调用图像展示窗口 ''' % md # Data Loading #% ## GLOBAL VARIABLES # dataset1 = 'IP' # dataset2 = 'SA' # dataset3 = 'PU' dataset = 'IP' test_ratio = 0.7 windowSize = 25 # - #Load dataset def loadData(name): data_path = os.path.join(os.getcwd(),'data') #os.getcwd() if name == 'IP': data = sio.loadmat(os.path.join(data_path, 'Indian_pines_corrected.mat'))['indian_pines_corrected'] labels = sio.loadmat(os.path.join(data_path, 'Indian_pines_gt.mat'))['indian_pines_gt'] elif name == 'SA': data = sio.loadmat(os.path.join(data_path, 'Salinas_corrected.mat'))['salinas_corrected'] labels = sio.loadmat(os.path.join(data_path, 'Salinas_gt.mat'))['salinas_gt'] elif name == 'PU': data = sio.loadmat(os.path.join(data_path, 'PaviaU.mat'))['paviaU'] labels = sio.loadmat(os.path.join(data_path, 'PaviaU_gt.mat'))['paviaU_gt'] return data, labels # + code_folding=[] # Dataset split def splitTrainTestSet(X, y, testRatio, randomState=345): X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=testRatio, random_state=randomState, stratify=y) return X_train, X_test, y_train, y_test # - ### Using PCA for removing the spectral redundancy(冗余) ### The function for newX ??? def applyPCA(X, numComponents=75): newX = np.reshape(X, (-1, X.shape[2])) pca = PCA(n_components=numComponents, whiten=True) newX = pca.fit_transform(newX) newX = np.reshape(newX, (X.shape[0],X.shape[1], numComponents)) return newX, pca ### Pdding zeros def padWithZeros(X, margin=2): newX = np.zeros((X.shape[0] + 2 * margin, X.shape[1] + 2* margin, X.shape[2])) x_offset = margin y_offset = margin newX[x_offset:X.shape[0] + x_offset, y_offset:X.shape[1] + y_offset, :] = X return newX ### create data cube?? def createImageCubes(X, y, windowSize=5, removeZeroLabels = True): margin = int((windowSize - 1) / 2) zeroPaddedX = padWithZeros(X, margin=margin) # split patches patchesData = np.zeros((X.shape[0] * X.shape[1], windowSize, windowSize, X.shape[2])) patchesLabels = np.zeros((X.shape[0] * X.shape[1])) patchIndex = 0 for r in range(margin, zeroPaddedX.shape[0] - margin): for c in range(margin, zeroPaddedX.shape[1] - margin): patch = zeroPaddedX[r - margin:r + margin + 1, c - margin:c + margin + 1] patchesData[patchIndex, :, :, :] = patch patchesLabels[patchIndex] = y[r-margin, c-margin] patchIndex = patchIndex + 1 if removeZeroLabels: patchesData = patchesData[patchesLabels>0,:,:,:] patchesLabels = patchesLabels[patchesLabels>0] patchesLabels -= 1 return patchesData, patchesLabels X, y = loadData(dataset) X.shape, y.shape # |Dataset|DataShape|LabelShape| # |:----:|:----:|:----:| # |IP|(145, 145, 200)|(145, 145)| # |SA|(512, 217, 204)|(512, 217)| # |PU|(610, 340, 103)|(610, 340)| K = X.shape[2] K = 30 if dataset == 'IP' else 15 X,pca = applyPCA(X,numComponents=K) X.shape,pca # + X, y = createImageCubes(X, y, windowSize=windowSize) X.shape, y.shape # + # 3:7 Split Xtrain, Xtest, ytrain, ytest = splitTrainTestSet(X, y, test_ratio) Xtrain.shape, Xtest.shape, ytrain.shape, ytest.shape # + # 2:1 Split Xtrain, Xvalid, ytrain, yvalid = splitTrainTestSet(Xtrain, ytrain, 0.3333) Xtrain.shape, Xvalid.shape, ytrain.shape, yvalid.shape # - # # Model and Training Xtrain = Xtrain.reshape(-1, windowSize, windowSize, K, 1) Xtrain.shape ytrain = np_utils.to_categorical(ytrain) ytrain.shape # Xvalid = Xvalid.reshape(-1, windowSize, windowSize, K, 1) # Xvalid.shape # yvalid = np_utils.to_categorical(yvalid) # yvalid.shape S = windowSize L = K # IP SA:16 # PU:9 output_units = 9 if (dataset == 'PU' or dataset == 'PC') else 16 # + ## input layer input_layer = Input((S, S, L, 1)) ## convolutional layers ### filters---卷积核数;kernel_size---卷积核大小 conv_layer1 = Conv3D(filters=8, kernel_size=(3, 3, 7), activation='relu')(input_layer) conv_layer2 = Conv3D(filters=16, kernel_size=(3, 3, 5), activation='relu')(conv_layer1) conv_layer3 = Conv3D(filters=32, kernel_size=(3, 3, 3), activation='relu')(conv_layer2) ### AttributeError: 'KerasTensor' object has no attribute '_keras_shape' ### Try to use shape instead #print(conv_layer3._keras_shape) #conv3d_shape = conv_layer3._keras_shape conv3d_shape = conv_layer3.shape print("After three times convD,and before reshaping,\nKerasTensorShape:{}".format(conv3d_shape)) ### conv3D-->conv2D conv_layer3 = Reshape((conv3d_shape[1], conv3d_shape[2], conv3d_shape[3]*conv3d_shape[4]))(conv_layer3) print("After three times convD,and before reshaping,\nKerasTensorShape:{}".format(conv_layer3.shape)) conv_layer4 = Conv2D(filters=64, kernel_size=(3,3), activation='relu')(conv_layer3) ### Flatte层:将张量扁平化,即输入一维化,不影响张量大小. ### 常在Conv层和Dense层之间过渡. flatten_layer = Flatten()(conv_layer4) ## fully connected layers ### Dense层:全连接层. ### Dropout层:Dense层之后,防止过拟合,提高模型泛化性能. dense_layer1 = Dense(units=256, activation='relu')(flatten_layer) dense_layer1 = Dropout(0.4)(dense_layer1) dense_layer2 = Dense(units=128, activation='relu')(dense_layer1) dense_layer2 = Dropout(0.4)(dense_layer2) output_layer = Dense(units=output_units, activation='softmax')(dense_layer2) # - # define the model with input layer and output layer model = Model(inputs=input_layer, outputs=output_layer) # + model.summary() ### Model Visualization plot_model(model,to_file='ModelVisual.png',show_shapes=True) # - # compiling the model adam = Adam(lr=0.001, decay=1e-06) model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy']) # + pycharm={"name": "#%%\n"} ###Define personal Callback r model fitting class MyCallback(Callback): def __init__(self, predict_batch_size=256): super(MyCallback, self).__init__() self.predict_batch_size = predict_batch_size def on_batch_begin(self, batch, logs={}): pass def on_batch_end(self, batch, logs={}): pass def on_train_begin(self, logs={}): if not ('val_roc_auc' in self.params['metrics']): self.params['metrics'].append('val_roc_auc') def on_train_end(self, logs={}): pass def on_epoch_begin(self, epoch, logs={}): pass def on_epoch_end(self, epoch, logs={}): logs['roc_auc'] = float('-inf') if (self.validation_data): logs['roc_auc'] = roc_auc_score(self.validation_data[1], self.model.predict(self.validation_data[0], batch_size=self.predict_batch_size)) print('ROC_AUC - epoch:%d - score:%.6f' % (epoch + 1, logs['roc_auc'])) # + pycharm={"name": "#%%\n"} # checkpoint filepath = "best-model.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='acc', verbose=1, save_best_only=True, mode='max') #callbacks_list = [checkpoint,MyCallback(),EarlyStopping(monitor='roc_auc', patience=20, verbose=2, mode='max')] callbacks_list = [checkpoint,EarlyStopping(monitor='accuracy', patience=20, verbose=2, mode='max')] #callbacks_list = [checkpoint] # + pycharm={"name": "#%%\n"} ###About 60 epochs to reach acceptable accuracy. history = model.fit(x=Xtrain, y=ytrain, batch_size=256, epochs=300, callbacks=callbacks_list) # + pycharm={"name": "#%%\n"} plt.figure(figsize=(7,7)) plt.grid() plt.plot(history.history['loss']) #plt.plot(history.history['val_loss']) plt.ylabel('Loss') plt.xlabel('Epochs') plt.legend(['Training','Validation'], loc='upper right') plt.savefig("loss_curve.pdf") plt.show() # + [markdown] pycharm={"name": "#%% md\n"} # plt.figure(figsize=(5,5)) # plt.ylim(0,1.1) # plt.grid() # plt.plot(history.history['acc']) # #plt.plot(history.history['val_acc']) # plt.ylabel('Accuracy') # plt.xlabel('Epochs') # plt.legend(['Training','Validation']) # plt.savefig("acc_curve.pdf") # plt.show() # - # # Validation # load best weights model.load_weights("best-model.hdf5") model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy']) Xtest = Xtest.reshape(-1, windowSize, windowSize, K, 1) Xtest.shape ytest = np_utils.to_categorical(ytest) ytest.shape # + Y_pred_test = model.predict(Xtest) y_pred_test = np.argmax(Y_pred_test, axis=1) classification = classification_report(np.argmax(ytest, axis=1), y_pred_test) print(classification) # - def AA_andEachClassAccuracy(confusion_matrix): counter = confusion_matrix.shape[0] list_diag = np.diag(confusion_matrix) list_raw_sum = np.sum(confusion_matrix, axis=1) each_acc = np.nan_to_num(truediv(list_diag, list_raw_sum)) average_acc = np.mean(each_acc) return each_acc, average_acc def reports (X_test,y_test,name): #start = time.time() Y_pred = model.predict(X_test) y_pred = np.argmax(Y_pred, axis=1) #end = time.time() #print(end - start) if name == 'IP': target_names = ['Alfalfa', 'Corn-notill', 'Corn-mintill', 'Corn' ,'Grass-pasture', 'Grass-trees', 'Grass-pasture-mowed', 'Hay-windrowed', 'Oats', 'Soybean-notill', 'Soybean-mintill', 'Soybean-clean', 'Wheat', 'Woods', 'Buildings-Grass-Trees-Drives', 'Stone-Steel-Towers'] elif name == 'SA': target_names = ['Brocoli_green_weeds_1','Brocoli_green_weeds_2','Fallow','Fallow_rough_plow','Fallow_smooth', 'Stubble','Celery','Grapes_untrained','Soil_vinyard_develop','Corn_senesced_green_weeds', 'Lettuce_romaine_4wk','Lettuce_romaine_5wk','Lettuce_romaine_6wk','Lettuce_romaine_7wk', 'Vinyard_untrained','Vinyard_vertical_trellis'] elif name == 'PU': target_names = ['Asphalt','Meadows','Gravel','Trees', 'Painted metal sheets','Bare Soil','Bitumen', 'Self-Blocking Bricks','Shadows'] classification = classification_report(np.argmax(y_test, axis=1), y_pred, target_names=target_names) oa = accuracy_score(np.argmax(y_test, axis=1), y_pred) confusion = confusion_matrix(np.argmax(y_test, axis=1), y_pred) each_acc, aa = AA_andEachClassAccuracy(confusion) kappa = cohen_kappa_score(np.argmax(y_test, axis=1), y_pred) score = model.evaluate(X_test, y_test, batch_size=32) Test_Loss = score[0]*100 Test_accuracy = score[1]*100 return classification, confusion, Test_Loss, Test_accuracy, oa*100, each_acc*100, aa*100, kappa*100 # + classification, confusion, Test_loss, Test_accuracy, oa, each_acc, aa, kappa = reports(Xtest,ytest,dataset) classification = str(classification) confusion = str(confusion) file_name = "classification_report.txt" with open(file_name, 'w') as x_file: x_file.write('{} Test loss (%)'.format(Test_loss)) x_file.write('\n') x_file.write('{} Test accuracy (%)'.format(Test_accuracy)) x_file.write('\n') x_file.write('\n') x_file.write('{} Kappa accuracy (%)'.format(kappa)) x_file.write('\n') x_file.write('{} Overall accuracy (%)'.format(oa)) x_file.write('\n') x_file.write('{} Average accuracy (%)'.format(aa)) x_file.write('\n') x_file.write('\n') x_file.write('{}'.format(classification)) x_file.write('\n') x_file.write('{}'.format(confusion)) # - def Patch(data,height_index,width_index): height_slice = slice(height_index, height_index+PATCH_SIZE) width_slice = slice(width_index, width_index+PATCH_SIZE) patch = data[height_slice, width_slice, :] return patch # load the original image X, y = loadData(dataset) height = y.shape[0] width = y.shape[1] PATCH_SIZE = windowSize numComponents = K X,pca = applyPCA(X, numComponents=numComponents) X = padWithZeros(X, PATCH_SIZE//2) # calculate the predicted image outputs = np.zeros((height,width)) for i in range(height): for j in range(width): target = int(y[i,j]) if target == 0 : continue else : image_patch=Patch(X,i,j) X_test_image = image_patch.reshape(1,image_patch.shape[0],image_patch.shape[1], image_patch.shape[2], 1).astype('float32') prediction = (model.predict(X_test_image)) prediction = np.argmax(prediction, axis=1) outputs[i][j] = prediction+1 ground_truth = spectral.imshow(classes = y,figsize =(7,7)) predict_image = spectral.imshow(classes = outputs.astype(int),figsize =(7,7)) spectral.save_rgb("predictions.jpg", outputs.astype(int), colors=spectral.spy_colors) # spectral.save_rgb(str(dataset)+"_ground_truth.jpg", y, colors=spectral.spy_colors)
.ipynb_checkpoints/Hybrid-Spectral-Net-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <p><font size="6"><b>01 - Pandas: Data Structures </b></font></p> # # # > *© 2016-2018, <NAME> and <NAME> (<mailto:<EMAIL>>, <mailto:<EMAIL>>). Licensed under [CC BY 4.0 Creative Commons](http://creativecommons.org/licenses/by/4.0/)* # # --- # + run_control={"frozen": false, "read_only": false} import pandas as pd # + run_control={"frozen": false, "read_only": false} # %matplotlib inline import numpy as np import matplotlib.pyplot as plt # - # # The pandas data structures: `DataFrame` and `Series` # # Pandas provides two fundamental data objects, for 1D (``Series``) and 2D data (``DataFrame``). # ## One-dimensional data: `Series` # # A Series is a basic holder for **one-dimensional labeled data**. It can be created much as a NumPy array is created: # + run_control={"frozen": false, "read_only": false} s = pd.Series([0.1, 0.2, 0.3, 0.4]) s # - # ### Attributes of a Series: `index` and `values` # # The series also has an **index**, which by default is the numbers *0* through *N - 1*: # + run_control={"frozen": false, "read_only": false} s.index # - # You can access the underlying numpy array representation with the `.values` attribute: # + run_control={"frozen": false, "read_only": false} s.values # - # We can access series values via the index, just like for NumPy arrays: # + run_control={"frozen": false, "read_only": false} s[0] # - # Unlike the NumPy array, though, this index can be something other than integers: # + run_control={"frozen": false, "read_only": false} s2 = pd.Series(np.arange(4), index=['a', 'b', 'c', 'd']) s2 # + run_control={"frozen": false, "read_only": false} s2['c'] # - # ### Pandas Series versus dictionaries # In this way, a ``Series`` object can be thought of as similar to an ordered dictionary mapping one typed value to another typed value. # # In fact, it's possible to construct a series directly from a Python dictionary: # + run_control={"frozen": false, "read_only": false} pop_dict = {'Germany': 81.3, 'Belgium': 11.3, 'France': 64.3, 'United Kingdom': 64.9, 'Netherlands': 16.9} population = pd.Series(pop_dict) population # - # We can index the populations like a dict as expected ... # + run_control={"frozen": false, "read_only": false} population['France'] # - # ... but with the power of numpy arrays. Many things you can do with numpy arrays, can also be applied on DataFrames / Series. # # Eg element-wise operations: # + run_control={"frozen": false, "read_only": false} population * 1000 # - # ## Two-dimensional data: `DataFrame` # A `DataFrame` is a **tabular data structure** (2D object to hold labelled data) comprised of rows and columns, akin to a spreadsheet, database table, or R's data.frame object. You can think of it as multiple Series objects which share the same index. # # <img align="left" width=50% src="../img/schema-dataframe.svg"> # For the examples here, we are going to create a small DataFrame with some data about a few countries. # # When creating a DataFrame manually, a common way to do this is from dictionary of arrays or lists: # + run_control={"frozen": false, "read_only": false} data = {'country': ['Belgium', 'France', 'Germany', 'Netherlands', 'United Kingdom'], 'population': [11.3, 64.3, 81.3, 16.9, 64.9], 'area': [30510, 671308, 357050, 41526, 244820], 'capital': ['Brussels', 'Paris', 'Berlin', 'Amsterdam', 'London']} countries = pd.DataFrame(data) countries # - # In practice, you will of course often import your data from an external source (text file, excel, database, ..), which we will see later. # # Note that in the IPython notebook, the dataframe will display in a rich HTML view. # You access a Series representing a column in the data, using typical `[]` indexing syntax and the column name: # + run_control={"frozen": false, "read_only": false} countries['area'] # - # ### Attributes of the DataFrame # # The DataFrame has a built-in concept of named rows and columns, the **`index`** and **`columns`** attributes: # + run_control={"frozen": false, "read_only": false} countries.index # - # By default, the index is the numbers *0* through *N - 1* # + run_control={"frozen": false, "read_only": false} countries.columns # - # To check the data types of the different columns: # + run_control={"frozen": false, "read_only": false} countries.dtypes # - # An overview of that information can be given with the `info()` method: # + run_control={"frozen": false, "read_only": false} countries.info() # - # A DataFrame has also a `values` attribute, but attention: when you have heterogeneous data, all values will be upcasted: # + run_control={"frozen": false, "read_only": false} countries.values # - # <div class="alert alert-info"> # # **NumPy** provides # # <ul> # <li>multi-dimensional, homogeneously typed arrays (single data type!)</li> # </ul> # <br> # # **Pandas** provides # # <ul> # <li>2D, heterogeneous data structure (multiple data types!)</li> # <li>labeled (named) row and column index</li> # </ul> # # # </div> # ## Some useful methods on these data structures # Exploration of the Series and DataFrame is essential (check out what you're dealing with). # + run_control={"frozen": false, "read_only": false} countries.head() # Top rows # + run_control={"frozen": false, "read_only": false} countries.tail() # Bottom rows # - # The ``describe`` method computes summary statistics for each numerical column: # + run_control={"frozen": false, "read_only": false} countries.describe() # - # **Sort**ing your data **by** a specific column is another important first-check: # + run_control={"frozen": false, "read_only": false} countries.sort_values(by='population') # - # The **`plot`** method can be used to quickly visualize the data in different ways: # + run_control={"frozen": false, "read_only": false} countries.plot() # - # However, for this dataset, it does not say that much: # + run_control={"frozen": false, "read_only": false} countries['population'].plot(kind='barh') # - # <div class="alert alert-success"> # <b>EXERCISE</b>: # # <ul> # <li>You can play with the `kind` keyword of the `plot` function in the figure above: 'line', 'bar', 'hist', 'density', 'area', 'pie', 'scatter', 'hexbin', 'box'</li> # </ul> # </div> # # Importing and exporting data # A wide range of input/output formats are natively supported by pandas: # # * CSV, text # * SQL database # * Excel # * HDF5 # * json # * html # * pickle # * sas, stata # * Parquet # * ... # + run_control={"frozen": false, "read_only": false} # pd.read_ # + run_control={"frozen": false, "read_only": false} # countries.to_ # - # <div class="alert alert-info"> # # # **Note: I/O interface** # # # <ul> # <li>All readers are `pd.read_...`</li> # <li>All writers are `DataFrame.to_...` </li> # </ul> # # # </div> # # Application on a real dataset # Throughout the pandas notebooks, many of exercises will use the titanic dataset. This dataset has records of all the passengers of the Titanic, with characteristics of the passengers (age, class, etc. See below), and an indication whether they survived the disaster. # # # The available metadata of the titanic data set provides the following information: # # VARIABLE | DESCRIPTION # ------ | -------- # survival | Survival (0 = No; 1 = Yes) # pclass | Passenger Class (1 = 1st; 2 = 2nd; 3 = 3rd) # name | Name # sex | Sex # age | Age # sibsp | Number of Siblings/Spouses Aboard # parch | Number of Parents/Children Aboard # ticket | Ticket Number # fare | Passenger Fare # cabin | Cabin # embarked | Port of Embarkation (C = Cherbourg; Q = Queenstown; S = Southampton) # # <div class="alert alert-success"> # <b>EXERCISE</b>: # # <ul> # <li>Read the CVS file (available at `../data/titanic.csv`) into a pandas DataFrame. Call the result `df`.</li> # </ul> # </div> # + clear_cell=true run_control={"frozen": false, "read_only": false} # # %load _solutions/pandas_01_data_structures1.py # - # <div class="alert alert-success"> # <b>EXERCISE</b>: # # <ul> # <li>Quick exploration: show the first 5 rows of the DataFrame.</li> # </ul> # </div> # + clear_cell=true run_control={"frozen": false, "read_only": false} # # %load _solutions/pandas_01_data_structures2.py # - # <div class="alert alert-success"> # <b>EXERCISE</b>: # # <ul> # <li>How many records (i.e. rows) has the titanic dataset?</li> # </ul> # </div> # + clear_cell=true run_control={"frozen": false, "read_only": false} # # %load _solutions/pandas_01_data_structures3.py # - # <div class="alert alert-success"> # <b>EXERCISE</b>: # # <ul> # <li>Select the 'Age' column (remember: we can use the [] indexing notation and the column label).</li> # </ul> # </div> # + clear_cell=true # # %load _solutions/pandas_01_data_structures4.py # - # <div class="alert alert-success"> # <b>EXERCISE</b>: # # <ul> # <li>Make a box plot of the Fare column.</li> # </ul> # </div> # + clear_cell=true # # %load _solutions/pandas_01_data_structures5.py # - # <div class="alert alert-success"> # <b>EXERCISE</b>: # # <ul> # <li>Sort the rows of the DataFrame by 'Age' column, with the oldest passenger at the top. Check the help of the `sort_values` function and find out how to sort from the largest values to the lowest values</li> # </ul> # </div> # + clear_cell=true # # %load _solutions/pandas_01_data_structures6.py # - # --- # # Acknowledgement # # # > This notebook is partly based on material of <NAME> (https://github.com/jakevdp/OsloWorkshop2014). #
Day_1_Scientific_Python/pandas/pandas_01_data_structures.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Hello # Implemente un programa que imprima un saludo simple para el usuario, como se indica a continuación. # <img src='./img/ejercicio1.PNG'> # <!-- "C:\Users\gdelgadr\Desktop\JoinProcess\PYTHON-FUNDAMENTOS-JOINPROCESS\Modulo1\Ejercicios\img\ejercicio1.PNG" --> # ## Especificaciones # Genere un archivo llamado <b>hello.py</b> , el cual contendrá un programa que solicita al usuario su nombre, y luego imprima <b>hello, "nombre" </b> , donde "nombre" es el nombre proporcionado. # ## Uso # Ejecute su programa como python hello.py y espere a que se le solicite la entrada. Escribe Emma y presiona enter. Su programa debería generar hello, Emma. # <img src='./img/ejercicio1.PNG'> # <!-- "C:\Users\gdelgadr\Desktop\JoinProcess\PYTHON-FUNDAMENTOS-JOINPROCESS\Modulo1\Ejercicios\img\ejercicio1.PNG" --> # ## Pruebas # Asegúrese de probar su código para cada uno de los siguientes. # # - Ejecute su programa como python hello.py y espere a que se le solicite la entrada. Escribe Emmay presiona enter. Su programa debería generar hello, Emma. # - Ejecute su programa como python hello.py y espere a que se le solicite la entrada. Escribe Rodrigoy presiona enter. Su programa debería generar hello, Rodrigo.
Modulo1/Ejercicios/.ipynb_checkpoints/Problema1-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Generalization and Evaluation # Ref: https://www.youtube.com/playlist?list=PLBv09BD7ez_50pj5kcKFYee7QPcg3ImCV # * The purpose of building predictors is to predict on future data # * We want to do well on future data # * With training data, you can do well - with DT, Kernels and KNN you can get 100% accuracy - does not mean you will do well on future data # * Can fit idiosyncrasies of our training data # * Overfitting - when you create a predictor which fits your training data too well # * Predictor too complex and flexible # * Fits noise in the training data # * Patterns that will not reappear # * A Predictor F over fits the data, if we can find another predictor F<sup>'</sup> such that F<sup>'</sup> makes more mistakes on training data than on unseen data, compared to F # * Predictor is under fitting if it is too simple # * A Predictor F under fits the data, if we can find another predictor F<sup>'</sup> such that F<sup>'</sup> gives better accuracy and less error on both training and unseen datasets, compared to F # ![Under Over Fit](images/under_over_fit.png) # * Algorithms will have knobs or parameters which can be tuned to fine tune fitting # * Regression: Order of the polynomial # * NB: Number of attributes, limits on variance, epsilon # * DT: # nodes in the tree, pruning confidence # * kNN: number of nearest neighbours # * SVM: Kernel type, cost parameters # * We need knobs because various predictors will need different flexibility for the problem domain # * Training Error (measure errors in training data) # * Generalization Error (measure errors in unseen data) # * Don't know what the classes or features will be # * But we know what "range" it will have, i.e. {x, y} # * e.g. for classification, x: all possible 20x20 black/white bitmaps # * y:{0..9} digits # # Estimating Generalization Error # # ![Gen Error](images/generalization_error.png) # # * Set aside a subset of training data for testing # * Learn a predictor without using any of the testing data # * Calculate error over testing dataset # * Gives an estimate of true generalization error - depends on how representative the testing data is of the future data # * The more data you have in the testing set, the more closer the testing error will be to generalisation error # # Confidence interval of the future # * What range or errors can we expect for future test sets? # * # Incomplete ----------------
meta_generalization_and_evaluation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import math import matplotlib import json import requests import time import numpy as np import pandas as pd import seaborn as sns import matplotlib.dates as dates from datetime import date, datetime, time, timedelta from matplotlib import pyplot as plt from pylab import rcParams from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error from sklearn.metrics import r2_score from tqdm import tqdm_notebook # %matplotlib inline # - pd.options.mode.chained_assignment = None # turn off warning message import misc_function2 as mf #request api of current daily stock of selected symbol funct = "TIME_SERIES_DAILY_ADJUSTED" sym = "BA" fromdt = "full" req_url = mf.stock_url(funct, sym, fromdt) print(req_url) #convert response to python json list data = requests.get(req_url).json()['Time Series (Daily)'] df = pd.DataFrame(data) df = df.T df.tail() # Rename column name with capitals #df = df['5. adjusted close'].drop #df = df['7. dividend amount'].drop #df = df['8. split coefficient'].drop del df['5. adjusted close'] del df['7. dividend amount'] del df['8. split coefficient'] df.columns = ['Open','High','Low','Close', 'Volume'] df = df[['Open','High','Low','Close', 'Volume']].stack().astype('float').unstack() # Convert Date column to datetime df.index = pd.to_datetime(df.index,format='%Y-%m-%d') df = df.sort_index() def SMA(values, n): """ Return simple moving average of `values`, at each step taking into account `n` previous values. """ return pd.Series(values).rolling(n).mean() # + from backtesting import Strategy from backtesting.lib import crossover class SmaCross(Strategy): # Define the two MA lags as *class variables* # for later optimization n1 = 15 n2 = 60 def init(self): # Precompute two moving averages self.sma1 = self.I(SMA, self.data.Close, self.n1) self.sma2 = self.I(SMA, self.data.Close, self.n2) def next(self): # If sma1 crosses above sma2, buy the asset if crossover(self.sma1, self.sma2): self.buy() # Else, if sma1 crosses below sma2, sell it elif crossover(self.sma2, self.sma1): self.position.close() from backtesting import Backtest bt = Backtest(df, SmaCross, cash=10000, commission=.002) bt.run() # - bt.plot() # ###
.ipynb_checkpoints/Stock Manual Backtest-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Income prediction # # Recall that we have done a homework of data exploration on 'income.csv' to master the knowledge of Exploratory Data Analysis. In this homework, you are required to predict whether a person's income is high or low according to his relevant information including his age, education, occupation, race and so on. # # # The attribute information is: # # - **income**: the label of this dataset, belongs to \[high, low\] # - **age**: the age of a person, a continuous variable. # - **work_class**: work class, belongs to \[Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked\]. # - **education**: belongs to \[Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, - Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool\]. # - **education_degree**: the education level of a person, an ordinal number variable. # - **marital_status**: marital status, belongs to \[Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse\]. # - **job**: occupation, belongs to \[Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces\]. # - **relationship**: belongs to \[Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried\]. # - **race**: belongs to \[White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black\]. # - **sex**: belongs to \[Female, Male\]. # - **capital_gain**: capital gain, a continuous variable. # - **capital_loss**: capital loss, a continuous variable. # - **hours_per_week**: how long a person works every week, a continuous variable. # - **birthplace**: belongs to \[United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, - Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands\]. # Specifically, you are required to **fill the blanks of this notebook** based on your results. In this assignment, you will analyze how different features, models and hyper-parameters influence the performance. # ## 1. Load Data # + import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import sklearn # %matplotlib inline # # %config InlineBackend.figure_format = 'svg' # - df = pd.read_csv('income.csv') # ## 2. Exploratory Data Analysis # ### Take a brief look at the data using `head()` df.head() # ### Observe the basic statistical information of continuous attributes df.describe() # only describe the continuous variables # ### Count the NaN values df.isnull().sum() ### before # ### Remove NaN values due to small proportion to the whole dataset df = df.dropna() df.isnull().sum() ### after # ### Pick out categorical and continuous variables df.info() # ### Observe categorical attributes for col in df.select_dtypes([np.object]).columns: print('{}: {}\n'.format(col, df[col].unique())) # ### Merge values of similar semantics df.education.replace({ 'Preschool': 'dropout', '10th': 'dropout', '11th': 'dropout', '12th': 'dropout', '1st-4th': 'dropout', '5th-6th': 'dropout', '7th-8th': 'dropout', '9th': 'dropout', 'HS-Grad': 'HighGrad', 'HS-grad': 'HighGrad', 'Some-colloge': 'CommunityCollege', 'Assoc-acdm': 'CommunityCollege', 'Assoc-voc': 'CommunityCollege', 'Prof-school': 'Masters', }, inplace=True) # ## 3. Classification Models # + from sklearn.model_selection import train_test_split from sklearn import metrics # tentatively take 3 numerical attributes for convenience X = df[['education_degree', 'age', 'hours_per_week']].values Y = df[['income']].values # train, test split X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=88, stratify=Y) # - # ### KNN # + ## Example: Use KNN to predict income from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=50) # change the shape of Y_train to (n_samples, ) using `.ravel()` knn.fit(X_train, Y_train.ravel()) knn_pred = knn.predict(X_test) print('The accuracy of the KNN is', metrics.accuracy_score(knn_pred, Y_test)) # - # ### Hyper-parameter tuning with `GridSearchCV()` # + from sklearn.model_selection import GridSearchCV param_grid = {'n_neighbors': np.arange(30, 70)} knn = KNeighborsClassifier() knn_cv = GridSearchCV(knn, param_grid, cv=5) # change the shape of Y_train to (n_samples, ) using `.ravel()` knn_cv.fit(X_train, Y_train.ravel()) print(knn_cv.best_params_) print(knn_cv.best_score_) # - # ### Your Tasks # # As far as you can see, we have built a KNN classification model and select the best hyper-parameters with `GridSearchCV()`. In this task, you are asked to build your own models using `scikit-learn` APIs. # # **Question 1 [10pts]**. Build a `Logistic Regression` model on training data and calculate accuracy over testing data. # # **Question 2 [10pts]**. Build a `Decision Tree` model on training data and calculate accuracy over testing data. # # **Question 3 [20pts]**. Use graphviz to visualize the decision tree of Question 2, and use a proper tool to visualize the decision boundary of the decision tree. # # **Question 4 [10pts]**. Build a `Random Forest` model with your customized parameters on training data and calculate accuracy over testing data. # # **Question 5 [20pts]**. For `Random Forest`, use `GridSearchCV()` to find the **optimal** hyper-parameter combination over: # - `n_estimator`: the number of trees in the forest # - `max_depth`: the maximum depth of the tree # - `max_leaf_nodes`: grow trees with ``max_leaf_nodes`` in best-first fashion. # # You should specify your own sets of values for these hyper-parameters. What's more, you are required to print the importance of each features of the dataset. # # (*tip: using the `feature_importances_` attributes of the `RandomForestClassifier()` as we have learned in class*) # # **Question 6 [10pts]**. Build a `AdaBoost` model on training data and calculate accuracy over testing data. # + # Question 1: Build a `Logistic Regression` model on training data and calculate accuracy over testing data. from sklearn.linear_model import LogisticRegression Lr = LogisticRegression(solver='liblinear') # change the shape of Y_train to (n_samples, ) using `.ravel()` Lr.fit(X_train, Y_train.ravel()) Lr_pred = Lr.predict(X_test) # print the accuracy (we can also use different kinds of solver to find the optimal one for this task) print('The accuracy of the Logistic Regression is', metrics.accuracy_score(Lr_pred, Y_test)) # + # Question 2: Build a `Decision Tree` model on training data and calculate accuracy over testing data. from sklearn import tree Tree = tree.DecisionTreeClassifier(criterion='gini') # train the model on the reaining set Tree.fit(X_train,Y_train.ravel()) # use the model to predict the values on test set Tree_pred = Tree.predict(X_test) # print the accuracy (we can also use different kinds criterion for this task - 'gini' & ''entropy) print('The accuracy of the Decision Tree is', metrics.accuracy_score(Tree_pred, Y_test)) # + # Question 3: Use graphviz to visualize the decision tree of Question 2, and use a proper tool to visualize the decision boundary of the decision tree. # # !pip install graphviz # # !pip install IPython # # !pip install pydotplus import graphviz from IPython.display import Image from sklearn import tree import pydotplus # There are two versions, I because of environment problems, I cannot visualize it, so I keep 2 versions # versoin 1 tree.export_graphviz(Tree) # versoin 2 # dot_data = tree.export_graphviz(Tree, out_file=None, #Tree is the classifier in Question #2 # feature_names=df.income, #name of corresponding features # class_names=df.capital_gain #name of corresponding classes # filled=True, rounded=True, # special_characters=True) # # defining the graph (maybe there are some problem with environment, and I have changed the # graph = pydotplus.graph_from_dot_data(dot_data) # graph.write_png('example.png') #save the image # Image(graph.create_png()) # + # Question 4: Build a `Random Forest` model with your customized parameters on training data and calculate accuracy over testing data. from sklearn.ensemble import RandomForestClassifier RF = RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', max_depth=None, max_features='auto', max_leaf_nodes=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=100, n_jobs=1, oob_score=False, random_state=None, verbose=0, warm_start=False) # training the model RF.fit(X_train, Y_train.ravel()) # making predictions RF_pred = RF.predict(X_test) # print the accuracy (we can also use different combinition of parameters: criterion<gini...> & min_samples_leaf...) print('The accuracy of the Random Forest is', metrics.accuracy_score(RF_pred, Y_test)) # + # Question 5: Hyper-parameter serach over Random Forest and print feature importance list. # Search round 1 # Below are the initial round of training # We need to change the range of parameters step by step, to find the optimal ones # just like binary search, we need to narrow down the range gradually from sklearn.model_selection import GridSearchCV param_set = { 'n_estimators': range(90, 110, 5), 'max_depth': range(10,21,3), 'max_leaf_nodes': range(45,55,5), } # Gsearch = GridSearchCV( RF, param_grid = param_set, scoring='roc_auc', cv=5 ) RF = RandomForestClassifier() Gsearch = GridSearchCV( RF, param_grid = param_set, cv=5 ) Gsearch.fit(X_train, Y_train.ravel()) # Gsearch.grid_scores_, gsearch.best_params_, gsearch.best_score_ def print_best_score(gsearch,param_set): # print best score print("Best score: %0.3f" % gsearch.best_score_) print("Best parameters set:") # print the parameters best_parameters = gsearch.best_estimator_.get_params() for param_name in sorted(param_set.keys()): print("\t%s: %r" % (param_name, best_parameters[param_name])) print_best_score(Gsearch,param_set) # Output log # Best score: 0.795 # Best parameters set: # max_depth: 16 # max_leaf_nodes: 45 # n_estimators: 105 # + # Another round of training # I just did 2 rounds of searching, beacuase of my limited computing resources # each round of training takes about 50 mins on my PC # the method & strategy is clear, so the rest is not difficult, I will not further carry them out, due to the poor computing capacity from sklearn.model_selection import GridSearchCV param_set = { 'n_estimators': range(100, 111, 1), 'max_depth': range(13,22,1), 'max_leaf_nodes': range(35,45,2), } # Gsearch = GridSearchCV( RF, param_grid = param_set, scoring='roc_auc', cv=5 ) RF = RandomForestClassifier() Gsearch = GridSearchCV( RF, param_grid = param_set, cv=5 ) Gsearch.fit(X_train, Y_train.ravel()) # Gsearch.grid_scores_, gsearch.best_params_, gsearch.best_score_ def print_best_score(gsearch,param_set): # best score has improved 0.1%, compared with round # print("Best score: %0.3f" % gsearch.best_score_) print("Best parameters set:") # parameters best_parameters = gsearch.best_estimator_.get_params() for param_name in sorted(param_set.keys()): print("\t%s: %r" % (param_name, best_parameters[param_name])) print_best_score(Gsearch,param_set) # each time, we can check, if the output parameters are on the border of the range, if so, we need to expand the range in this direction # Best score: 0.796 # Best parameters set: # max_depth: 19 # max_leaf_nodes: 35 # n_estimators: 105 # + # Question 6: Build a `AdaBoost` model on training data and calculate accuracy over testing data. from sklearn.ensemble import AdaBoostClassifier # here the hyper-parameter is n_estimator, ew may as well take 100 Ada = AdaBoostClassifier(n_estimators=100, random_state=0) Ada.fit(X_train, Y_train.ravel()) Ada_pred = Ada.predict(X_test) # to improve performance, we can use loop to find the optimal parameter print('The accuracy of the Ada Boost is', metrics.accuracy_score(Ada_pred, Y_test)) # - # ## 4. Feature Engineering # # Before you start this part, we recommend you to read this [article](https://www.cnblogs.com/jasonfreak/p/5448385.html) # ### Using `LabelEncoder()`: map categorical features to [0, C) # + from sklearn.preprocessing import LabelEncoder encoded_df = df.apply(LabelEncoder().fit_transform) encoded_df.head() # - # ### Using `pandas.get_dummies()`: map categorical features into one-hot encoding # + cols = list(set(df.select_dtypes([np.object]).columns) - set(['income'])) onehot_df = pd.get_dummies(df, columns=cols) onehot_df.head() # - # The aforementioned machine learning models are built upon **3 distinct attributes** (`education_degree`, `age` and `hours_per_week`) with **10 more attributes unused**. You are required to utilize those unused columns using the feature engineering methods introduced above to address this issue.] # # **Question 7 [20pts]**. Compare the performance (accuracy) of different algorithms and different preprocessing methods on the dataset. Specifically, please fill the blanks in the table below: # # | Alg. | Original 3 columns | All columns with `LabelEncoder` | All columns with `OneHot` | # | :---: | :----: | :----: | :----: | # | Logistic Regression | &#xfeff; | &#xfeff; | &#xfeff; | # | Decision Tree | &#xfeff; | &#xfeff; | &#xfeff; | # | Random Forest | &#xfeff; | &#xfeff; | &#xfeff; | # | AdaBoost | &#xfeff; | &#xfeff; | &#xfeff; | # + # Question 7: Compare the performance (accuracy) of different algorithms and different preprocessing methods on the dataset from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.linear_model import LogisticRegression from sklearn import tree import warnings warnings.filterwarnings('ignore') encoded_feat = encoded_df.drop(columns=['income']).values encoded_labl = encoded_df[['income']].values encoded_X_train, encoded_X_test, encoded_Y_train, encoded_Y_test = train_test_split(encoded_feat, encoded_labl, test_size=0.3) onehot_feat = onehot_df.drop(columns=['income']).values onehot_labl = onehot_df[['income']].values onehot_X_train, onehot_X_test, onehot_Y_train, onehot_Y_test = train_test_split(onehot_feat, onehot_labl, test_size=0.3) X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3) LR = LogisticRegression() Tree = tree.DecisionTreeClassifier(criterion='gini') RF = RandomForestClassifier() Ada = AdaBoostClassifier() models = [LR, Tree, RF, Ada] model_names = ['LR', 'Tree', 'RF', 'Ada'] dataset = [[encoded_X_train, encoded_X_test, encoded_Y_train, encoded_Y_test], [onehot_X_train, onehot_X_test, onehot_Y_train, onehot_Y_test], [X_train, X_test, Y_train, Y_test]] for index in range(len(models)): model = models[index] name = model_names[index] for data in dataset: model.fit(data[0], data[2]) model_pred = model.predict(data[1]) print('The accuracy of {} is'.format(name), metrics.accuracy_score(model_pred, data[3])) print("The 3 accuracy are encoded, onehot and original in order.\n. To avoid filling in the table, I print them above") # -
DataScience/homework/hw7_tree&forest/hw7-tree&forest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # last edited Apr 12 # to do: something off with some timesteps being missed # - # ### Export Custom Time Series file for Ecosim / Ecospace # <NAME> # <br> Purpose: Export a hatchery forcing or time series file to .csv's or ASCII's for EWE # # - export the monthly timestep forcing file that Ecosim expects # - export the monthly timestep spatial forcing file that Ecospace expects # # Data In: # - HatcheryRel_TS_ForNextStep.csv - EPAD data from Carl (DFO / SEP)- from 'step 3' # # Data Out: # - CSV TBD <br> # - ASCII TBD <br> # # Notes: # - EPAD data from <NAME> and RMIS locations data from SOGDC # - Apr 12 - the average weight or the weight field is off! not sure why, more so with coho so I just went to EPAD data and got avg non-zero weight for 1980 - 1990 from Puntledge # - Apr 12 - the annual out should be average of monthly b_mt released! Not sum over yr # ## TOC: <a class="anchor" id="top"></a> # * [1. Read file](#section-1) # * [2. Fix dates / timesteps](#section-2) # * [3. Produce Ecosim TS file](#section-3) # * [4. Produce Ecospace TS file](#section-4) # * [5. Write to File](#section-5) # + import pandas as pd import numpy as np import datetime from dateutil.parser import parse # params start_year = 1950 end_year = 2020 aggregate_time = "year" # month or year aggregate_all_areas = "yes" # yes means aspatial aggregate_to_level = "species" # otherwise will use codes in EWE_GROUP_CODE species_groupcodes = pd.DataFrame(data = {'EWE_GROUP_CODE': ['CHINOOK-H-1','COHO-H-1'], 'SPECIES':['CHINOOK','COHO']}) study_area = 7000 # used to calculate biomass density (mt / km^2) # locations table from the SSMSP SOGDC (may have more lats / lons added than source at RMIS) localpath_in = "C:/Users/Greig/Sync/6. SSMSP Model/Model Greig/Data/1. Salmon/All Species Hatchery Releases/EPADHatcherReleasesGST" releases_df = pd.read_csv(localpath_in + "/MODIFIED/HatcheryRel_TS_ForNextstep.csv") # fix date releases_df['RELEASE_DATE'] = releases_df['release_avg_date'].astype('datetime64[ns]') releases_df['YEAR'] = pd.DatetimeIndex(releases_df['RELEASE_DATE']).year releases_df['MONTH'] = pd.DatetimeIndex(releases_df['RELEASE_DATE']).month releases_df['EWE_TIMESTEP'] = releases_df['MONTH'] + ((releases_df['YEAR'] - start_year) * 12) print(releases_df['BIOMASS_MT'].sum().round()) # Apr 12 2022 - fix mean weight of releases # something is wrong with avg_weight field. Coho are consistently released at # 20 g but cross-checks indicate weight from spreadsheet far too low. # (from EPAD Puntledge river releases, hatchery-reared, tossing avg_weight = 0) coho_weight = 0.020 # kg chin_weight = 0.0062 # kg releases_df.loc[releases_df['SPECIES_NAME']=='Chinook', 'BIOMASS_MT2'] = releases_df['TOTRELEASE_NO'] * chin_weight * 0.001 releases_df.loc[releases_df['SPECIES_NAME']=='Coho', 'BIOMASS_MT2'] = releases_df['TOTRELEASE_NO'] * coho_weight * 0.001 print(releases_df['BIOMASS_MT2'].sum().round()) releases_df # - # + if aggregate_all_areas == "yes": releases_df2 = releases_df.drop(['release_avg_date','FINAL_LAT','FINAL_LON','ROW_EWE','COL_EWE','SOURCE_ID','RELEASE_DATE'], axis=1) releases_df2 = releases_df2.groupby(['EWE_GROUP_CODE','SPECIES_NAME','EWE_TIMESTEP','YEAR', 'MONTH']).agg('sum').reset_index() releases_df = releases_df2 if aggregate_to_level == "species": releases_df2 = releases_df.drop(['EWE_GROUP_CODE'], axis=1) releases_df2 = releases_df2.groupby(['SPECIES_NAME','EWE_TIMESTEP','YEAR', 'MONTH']).agg('sum').reset_index() releases_df2['EWE_GROUP_CODE'] = releases_df2['SPECIES_NAME'] releases_df2 = pd.merge(releases_df2, species_groupcodes, on=['EWE_GROUP_CODE'], how='left') releases_df = releases_df2.drop(['SPECIES_NAME'], axis=1) releases_df['BIOMASS_MT2'] = releases_df['BIOMASS_MT2'] / study_area releases_df['BIOMASS_MT'] = releases_df['BIOMASS_MT'] / study_area releases_df # - # cross check for one year releases_df.loc[releases_df['YEAR']==1980].groupby(['EWE_GROUP_CODE','YEAR']).sum().reset_index() # + # round to 5 decimal places due to issues with floating point data # storage causing rounding to not work so # using decimal library https://stackoverflow.com/questions/56820/round-doesnt-seem-to-be-rounding-properly from decimal import * releases_df['BIOMASS_MT2']=releases_df['BIOMASS_MT2'].apply(lambda x: Decimal(str(x)).quantize(Decimal('.00001'), rounding=ROUND_UP)) releases_df['BIOMASS_MT']=releases_df['BIOMASS_MT'].apply(lambda x: Decimal(str(x)).quantize(Decimal('.00001'), rounding=ROUND_UP)) # add dummy variable containing all timesteps dummy = pd.Series(range(1,((end_year - start_year)*12))) dummy_df = (dummy.to_frame()) dummy_df['EWE_TIMESTEP'] = dummy_df[0] dummy_df['EWE_GROUP_CODE'] = "DUMMY" dummy_df['YEAR'] = (dummy_df['EWE_TIMESTEP'] // 12)+start_year dummy_df # - dummy_df = dummy_df[['EWE_GROUP_CODE','EWE_TIMESTEP','YEAR']] releases_df = releases_df.append(dummy_df, ignore_index = True) releases_df # + # ############################################################################### # For Ecosim ################################################################################# releasesEcosim = releases_df[['EWE_TIMESTEP','BIOMASS_MT2','EWE_GROUP_CODE','YEAR']] releasesEcosim = releasesEcosim.fillna(0) # sum by EWE_GROUP_CODE and timestep releasesEcosim = releasesEcosim.rename(columns={'EWE_TIMESTEP': 'TIMESTEP','EWE_GROUP_CODE': 'TITLE'}) # for timestep = monthly releasesEcosim_gp_mo = releasesEcosim.groupby(['TIMESTEP','TITLE','YEAR']).sum().reset_index() # pivot wide releasesEcosim_wide_mo = releasesEcosim_gp_mo.pivot_table( values=['BIOMASS_MT2'], index=['TIMESTEP', 'YEAR'], columns='TITLE', aggfunc=np.sum).reset_index() # reset the multilevel index via hack releasesEcosim_wide_mo['CHIN_H_MT'] = releasesEcosim_wide_mo[('BIOMASS_MT2', 'Chinook')].astype(float) releasesEcosim_wide_mo['COHO_H_MT'] = releasesEcosim_wide_mo[('BIOMASS_MT2', 'Coho')].astype(float) releasesEcosim_wide_mo['TIMESTEP'] = releasesEcosim_wide_mo[('TIMESTEP', '')].astype(float) releasesEcosim_wide_mo['YEAR'] = releasesEcosim_wide_mo[('YEAR', '')].astype(float) releasesEcosim_wide_mo = releasesEcosim_wide_mo[['YEAR','TIMESTEP','CHIN_H_MT','COHO_H_MT']] releasesEcosim_wide_mo.columns = [f"{x}_{y}" for x, y in releasesEcosim_wide_mo.columns.to_flat_index()] # releasesEcosim_wide = releasesEcosim_wide.drop(columns=[('BIOMASS_MT2', 'DUMMY')]) # fill NaNs with zeros (required by ecosim) releasesEcosim_wide_mo = releasesEcosim_wide_mo.fillna(0) #releasesEcosim_wide_mo = pd.DataFrame(releasesEcosim_wide_mo.to_records()) releasesEcosim_wide_mo #print(releasesEcosim_wide_mo.columns) # - # use average monthly for annual time series releasesEcosim_wide_yr = releasesEcosim_wide_mo.groupby(['YEAR_']).mean().reset_index() releasesEcosim_wide_yr = releasesEcosim_wide_yr[['YEAR_','CHIN_H_MT_','COHO_H_MT_']] # + # if aggregate_time == "year": # releasesEcosim_wide = releasesEcosim_wide.drop(columns="('TIMESTEP', '')", axis=1) # releasesEcosim_wide['Chinook'] = releasesEcosim_wide["('BIOMASS_MT2', 'Chinook')"].astype(float) # releasesEcosim_wide['Coho'] = releasesEcosim_wide["('BIOMASS_MT2', 'Coho')"].astype(float) # releasesEcosim_wide = releasesEcosim_wide.groupby("('YEAR', '')").mean().reset_index() # write to temp file releasesEcosim_wide_yr.to_csv(localpath_in + '/MODIFIED/temp_yr.csv', index=True) releasesEcosim_wide_mo.to_csv(localpath_in + '/MODIFIED/temp_mo.csv', index=True) # this repeats same avg value each month, for silly workaround repeated_yr_avg = pd.merge(releasesEcosim_wide_mo, releasesEcosim_wide_yr, on=['YEAR_'], how='left') repeated_yr_avg = repeated_yr_avg[['YEAR_','TIMESTEP_','CHIN_H_MT__y','COHO_H_MT__y']] repeated_yr_avg = repeated_yr_avg.rename(columns={'TIMESTEP_': 'TIMESTEP','YEAR_': 'YEAR', 'CHIN_H_MT__y': 'CHIN_H_MT', 'COHO_H_MT__y': 'COHO_H_MT'}) repeated_yr_avg.to_csv(localpath_in + '/MODIFIED/temp_yr_rep.csv', index=True) # =================================== # open temp file and insert header # =================================== #Title Combined_GST_FR_Escape_RelB_NuSEDS Chin_Hatch_RelB_CW Chin_1stYrM_1_CW Chin_1stYrM_2_CW Chin_C_Rel_CW #Weight 1 1 1 1 1 #Pool Code 14 18 16 15 14 #Type 0 0 5 5 61 #1979 11.26655002 3.84 3.449022245 3.449022245 0.35 #1980 11.07767237 6.93 3.021428984 3.021428984 0.371 #1981 11.23108247 8.75 3.354206073 3.354206073 0.2533 # codes for 'type' # relative biomass = 0 # absolute biomass = 1 # biomass forcing = -1 # fishing mortality = 4 # relative fishing mortality = 104 # total mortality = 5 # constant total mortality = -5 (forcing?) # catches = 6 # catches forcing = -6 # relative catches = 61 # average weight = 7 import copy f = open(localpath_in + '/MODIFIED/temp_yr.csv', "r") contents = f.readlines() f.close() line1 = contents[0].split(',') line1[0] = 'Title' line2 = copy.deepcopy(line1) line2[0] = 'Weight' i = 0 for line in line2: if i > 0: if i == (len(line2) - 1): line2[i] = '1\n' else: line2[i] = 1 i += 1 line3 = copy.deepcopy(line1) line3[0] = 'Type' i = 0 for line in line3: if i > 0: if i == (len(line3) - 1): line3[i] = '-1\n' else: line3[i] = -1 i += 1 line4 = copy.deepcopy(line1) line4[0] = 'Timestep' i = 0 for line in line4: if i > 0: if i == (len(line4) - 1): line4[i] = 'Interval\n' else: line4[i] = 'Interval' i += 1 s="" contents.insert(1,','.join(str(line) for line in line1)) contents.insert(2,','.join(str(line) for line in line2)) contents.insert(3, ','.join(str(line) for line in line3)) contents.insert(4, ','.join(str(line) for line in line4)) i = 0 with open(localpath_in + '/MODIFIED/HatcheryRel_Ecosim_TS_apr22_1.csv', 'w') as a_writer: for line in contents: if i > 0: a_writer.writelines(line) i += 1 f = open(localpath_in + '/MODIFIED/temp_yr_rep.csv', "r") contents = f.readlines() f.close() line1 = contents[0].split(',') line1[0] = 'Title' line2 = copy.deepcopy(line1) line2[0] = 'Weight' i = 0 for line in line2: if i > 0: if i == (len(line2) - 1): line2[i] = '1\n' else: line2[i] = 1 i += 1 line3 = copy.deepcopy(line1) line3[0] = 'Type' i = 0 for line in line3: if i > 0: if i == (len(line3) - 1): line3[i] = '-1\n' else: line3[i] = -1 i += 1 line4 = copy.deepcopy(line1) line4[0] = 'Timestep' i = 0 for line in line4: if i > 0: if i == (len(line4) - 1): line4[i] = 'Interval\n' else: line4[i] = 'Interval' i += 1 s="" contents.insert(1,','.join(str(line) for line in line1)) contents.insert(2,','.join(str(line) for line in line2)) contents.insert(3, ','.join(str(line) for line in line3)) contents.insert(4, ','.join(str(line) for line in line4)) i = 0 with open(localpath_in + '/MODIFIED/HatcheryRel_Ecosim_TS_apr22_3.csv', 'w') as a_writer: for line in contents: if i > 0: a_writer.writelines(line) i += 1 line1 = contents[0].split(',') line1[0] = 'Title' line2 = copy.deepcopy(line1) line2[0] = 'Weight' i = 0 for line in line2: if i > 0: if i == (len(line2) - 1): line2[i] = '1\n' else: line2[i] = 1 i += 1 line3 = copy.deepcopy(line1) line3[0] = 'Type' i = 0 for line in line3: if i > 0: if i == (len(line3) - 1): line3[i] = '-1\n' else: line3[i] = -1 i += 1 line4 = copy.deepcopy(line1) line4[0] = 'Timestep' i = 0 for line in line4: if i > 0: if i == (len(line4) - 1): line4[i] = 'Interval\n' else: line4[i] = 'Interval' i += 1 s="" contents.insert(1,','.join(str(line) for line in line1)) contents.insert(2,','.join(str(line) for line in line2)) contents.insert(3, ','.join(str(line) for line in line3)) contents.insert(4, ','.join(str(line) for line in line4)) i = 0 with open(localpath_in + '/MODIFIED/HatcheryRel_Ecosim_TS_apr22_2.csv', 'w') as a_writer: for line in contents: if i > 0: a_writer.writelines(line) i += 1 # - # ### Just junk below repeated_yr_avg = pd.merge(releasesEcosim_wide_mo, releasesEcosim_wide_yr, on=['YEAR_'], how='left') repeated_yr_avg[360:400] # + # check - by year to get annual est... releases_df3 = releases_df[['YEAR','EWE_GROUP_CODE','BIOMASS_MT']] # sum releases_df3 = releases_df3.groupby(['EWE_GROUP_CODE','YEAR']).agg('sum').reset_index() # mean #releases_df3 = releases_df3.groupby(['EWE_GROUP_CODE','YEAR']).agg('mean').reset_index() releases_df3.loc[releases_df3['EWE_GROUP_CODE']=='Chinook'] # - releases_df releasesEcosim_wide # + # # -
notebooks/Data Prep - Salmon hatchery releases - 4 EWE TS (Py3).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="PbS4nN9WkSvj" import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold, cross_val_score from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.neighbors import KNeighborsClassifier, NeighborhoodComponentsAnalysis, LocalOutlierFactor from sklearn.decomposition import PCA from sklearn.metrics import plot_confusion_matrix from sklearn.metrics import confusion_matrix import warnings warnings.filterwarnings("ignore") # + [markdown] id="5SrxUmTQk4so" # # Analyze Data # + colab={"base_uri": "https://localhost:8080/", "height": 313} id="1o_0WqlVkorj" outputId="609b6931-1fac-40c5-fe76-6d8a25516aa8" data = pd.read_csv("data.csv") data.drop(["id"], inplace = True, axis = 1) data["diagnosis"] = [1 if i.strip() == "M" else 0 for i in data.diagnosis] data = data.rename(columns = {"diagnosis":"target"}) sns.countplot(data["target"]) print("\nData shape", data.shape) describeData = data.describe().T # + colab={"base_uri": "https://localhost:8080/", "height": 232} id="C7pqWnlV37FJ" outputId="e4e71b6b-43bf-4d98-aa47-2370b896acc3" data.head() # + colab={"base_uri": "https://localhost:8080/", "height": 965} id="PUpWSoyR4JAW" outputId="c4a290e4-88f4-4dc2-ef38-c041d2cf00c4" data.describe().T # + colab={"base_uri": "https://localhost:8080/"} id="94d4fuAW4bQ9" outputId="9461f7a2-6f59-4903-f69a-a8b2ac98dd28" data.isnull().sum().any() # + [markdown] id="je4jAP-jk8QR" # # Correlation matrix # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="qZKXDeuAkouj" outputId="f7acff7a-837f-4a96-b4ca-2aaf5832ff4b" correlation_matrix = data.corr() mask = np.triu(np.ones_like(correlation_matrix, dtype=bool)) f, ax = plt.subplots(figsize=(25, 25)); # Generate a custom diverging colormap cmap = sns.diverging_palette(230, 20, as_cmap=True); # Draw the heatmap with the mask and correct aspect ratio sns.heatmap(correlation_matrix, mask=mask, cmap=cmap, annot = True, fmt = ".2f", square=True, linewidths=.5); # + [markdown] id="m2VakyvylMvP" # # Plots # + colab={"base_uri": "https://localhost:8080/", "height": 726} id="N3bOfKYjZJvI" outputId="e3b3d86a-b5ff-4a18-e008-acfa1f193755" #pairplot, kde = kernel density estimation #%% Observing variables that are at least 75% related to the Target Feature threshold = 0.75 filt = np.abs(correlation_matrix["target"]) > threshold corr_features = correlation_matrix.columns[filt].tolist() sns.pairplot(data[corr_features], diag_kind = "kde", markers = "+", hue ="target") plt.show() # + [markdown] id="V3tvM-gblXCD" # # Outliers # + id="YMBH5G4clKHZ" #%% Outlier outlier_threshold = -2 y = data.target x = data.drop(["target"], axis = 1) columns = x.columns.tolist() clf = LocalOutlierFactor() #-1 = outlier 1 = inlier outlier_predict = clf.fit_predict(x) feature_score = clf.negative_outlier_factor_ outlier_score = pd.DataFrame() outlier_score["score"] = feature_score filter_outlier = outlier_score["score"] < outlier_threshold outliers_index = outlier_score[filter_outlier].index.tolist() #drop outliers x = x.drop(outliers_index) y = y.drop(outliers_index).values # + [markdown] id="vxmzisFxldRK" # # Train-Test Split and Standardization # + id="a68gxNj3lKKd" # random state = the result of mixing is the same every time. x_train, x_test, y_train, y_test = train_test_split(x, y ,test_size = 0.3, random_state=42, stratify=y) # (x - mean) / std scaler = StandardScaler() x_train = scaler.fit_transform(x_train) x_test = scaler.transform(x_test) x_train_df = pd.DataFrame(x_train, columns = columns) x_train_df_describe = x_train_df.describe() x_train_df["target"] = y_train # + [markdown] id="SL27zRhe1ROc" # # Confusion Matrix Plot Function # # + id="rtA-5B6n0X7N" def plotConfusionMatrix(conf_matrix): group_names = ["True Negative","False Positive","False Negative","True Positive"] group_counts = ["{0:0.0f}".format(value) for value in conf_matrix.flatten()] labels = [f"{v1}\n{v2}" for v1, v2 in zip(group_names,group_counts)] labels = np.asarray(labels).reshape(2,2) ax= plt.subplot() sns.heatmap(conf_matrix, annot=labels,cmap='Blues', fmt="", linewidths=.5, ax = ax) ax.set_xlabel('Predicted labels'); ax.set_ylabel('True labels'); plt.show() print("\n\n") # + [markdown] id="CvHYC9Cflh3q" # # KNN # + colab={"base_uri": "https://localhost:8080/", "height": 707} id="i7ngXGkBlKM6" outputId="7a115475-7dc9-4bfd-b418-3021e76c37e0" knn = KNeighborsClassifier(n_neighbors = 7) knn.fit(x_train, y_train) y_pred_tr = knn.predict(x_train) y_pred = knn.predict(x_test) confusionMatrixTrain = confusion_matrix(y_train, y_pred_tr) confusionMatrix_test = confusion_matrix(y_test, y_pred) acc_tr = accuracy_score(y_train, y_pred_tr) acc_test = accuracy_score(y_test, y_pred) print("Test Score: {}, Train Score: {}\n".format(acc_test, acc_tr)) plt.title("Train Confusion Matrix") plotConfusionMatrix(confusionMatrixTrain) plt.title("Test Confusion Matrix") plotConfusionMatrix(confusionMatrix_test) # + [markdown] id="Nggs1nU_lmwj" # # Knn Tuned # + colab={"base_uri": "https://localhost:8080/", "height": 774} id="VNqrxoFvlKQD" outputId="9c7b1a70-2718-4ac7-ad72-36c192b52825" def knn_tune(x_train, x_test, y_train, y_test): k_range = list(range(3,31,2)) weight_options = ["uniform", "distance"] distance = [1,2] param_grid = dict(n_neighbors = k_range, weights = weight_options, p = distance) knn = KNeighborsClassifier() grid = GridSearchCV(knn, param_grid, cv = StratifiedKFold(n_splits=5, random_state=42), scoring = "f1", verbose = 1) grid.fit(x_train, y_train) print("Best training score: {} with parameters: {}".format(grid.best_score_, grid.best_params_)) knn = KNeighborsClassifier(**grid.best_params_) knn.fit(x_train, y_train) y_pred_test = knn.predict(x_test) y_pred_train = knn.predict(x_train) confusionMatrixTest = confusion_matrix(y_test, y_pred_test) confusionMatrixTrain = confusion_matrix(y_train, y_pred_train) accTest = accuracy_score(y_test, y_pred_test) accTrain = accuracy_score(y_train, y_pred_train) print("Test Score: {}, Train Score: {}\n".format(accTest, accTrain)) plt.title("Train Confusion Matrix") plotConfusionMatrix(confusionMatrixTrain) plt.title("Test Confusion Matrix") plotConfusionMatrix(confusionMatrixTest) return grid grid = knn_tune(x_train, x_test, y_train, y_test) # + [markdown] id="CK3NiUOa-IfV" # # Scaled data # + id="fs7wZwpZ-HjL" x_scaled = scaler.fit_transform(x) # + [markdown] id="0oyXqDnm9xeW" # # PCA # + colab={"base_uri": "https://localhost:8080/", "height": 607} id="6YphnYN09ze3" outputId="65918fa4-5da6-453b-eb81-5dba81bfec25" #PCA can give values like 3-4. The variance described will be increased. pca = PCA(n_components = 2) pca.fit(x_scaled) x_reduced_features = pca.transform(x_scaled) #x = p1, y = p2, hue = target feature, The 2 variables with the highest variance are plotted. plt.subplots(figsize=(10, 10)) sns.scatterplot(x = x_reduced_features[:,0], y = x_reduced_features[:,1], hue = y) plt.title("PCA Plot"); # + colab={"base_uri": "https://localhost:8080/"} id="MpluE1so-sGT" outputId="a02563d7-46cf-4e5a-c2f0-94d2df07c29b" #variance ratio explained by the variables print(pca.explained_variance_ratio_) #sum of variance ratio explained by the variables print(sum(pca.explained_variance_ratio_)) # + colab={"base_uri": "https://localhost:8080/", "height": 774} id="xn2TYgvO9zb8" outputId="d652d519-520f-432e-ae6e-d1ab1ed67ca3" #pca train an prediction x_train_pca, x_test_pca, y_train_pca, y_test_pca = train_test_split(x_reduced_features, y ,test_size = 0.3, random_state=42, stratify=y) grid_pca = knn_tune(x_train_pca, x_test_pca, y_train_pca, y_test_pca) # + [markdown] id="6l02N08Alt8q" # # NCA # + colab={"base_uri": "https://localhost:8080/", "height": 621} id="kYV597YTwT9K" outputId="2f56d623-dddf-47a0-a0c7-eee184b4196b" nca = NeighborhoodComponentsAnalysis(n_components = 2, random_state = 42) x_reduced_nca = nca.fit(x_scaled,y).transform(x_scaled) nca_data = pd.DataFrame(x_reduced_nca, columns = ["p1", "p2"]) nca_data["target"] = y plt.subplots(figsize=(10, 10)) sns.scatterplot(x = nca_data[:]["p1"], y = nca_data[:]["p2"], hue = y) plt.title("NCA plot"); # + colab={"base_uri": "https://localhost:8080/", "height": 774} id="2PZNh2zglqVY" outputId="3e2ffd8e-86c9-47fd-eb25-856a81c61bd4" x_train_nca, x_test_nca, y_train_nca, y_test_nca = train_test_split(x_reduced_nca, y ,test_size = 0.3, random_state=42, stratify=y) grid_nca = knn_tune(x_train_nca, x_test_nca, y_train_nca, y_test_nca)
Brest_Cancer_Prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Keras `IMDB` dataset. # * This is a dataset of 25,000 movies reviews from IMDB, labeled by sentiment (positive/negative). Reviews have been preprocessed, and each review is encoded as a list of word indexes (integers). import tensorflow as tf import numpy as np from tensorflow.keras import datasets from tensorflow import keras dir(datasets) # > Loading the data. imdb = datasets.imdb.load_data(num_words=10000) (X_train, y_train),(X_test, y_test) = imdb print(X_train[0]) y_train[:2] # > Let's join the train and test sets. X = np.concatenate([X_train, X_test]) y = np.concatenate([y_train, y_test]) X, y # > So the `X_train[0]` is just a list of integers that doesn't make sense to a human for now, but we can say it # is a positive review about the movie according to the label we get. word_indices = datasets.imdb.get_word_index() word_indices # > Let's create a function that decords integers lists into sentences. word_indices_reversed = dict([(value, key) for (key, value) in word_indices.items()]) word_indices_reversed def decord(sent): INDEX_FROM=3 return " ".join([word_indices_reversed.get(i - INDEX_FROM, '#') for i in sent[0]])+"..." decord([X[1]]) # > Lets create a function that will encode a given sentence to `word_embedings_list`. def encode(sent): pass # > "Data preparation". # > We want to preapare the sentences to have a same width. This is sometimes called `pad_sequencing` we are just make all sentences to have the same width by trancating long sentencs and appending 0 to shorter sentences. def vectorize(sequences, dim=10000): res = np.zeros((len(sequences), dim)) for i, seq in enumerate(sequences): res[i, seq] = 1 return res X_data = vectorize(X) X_data[0], len(X_data[0]), len(X_data[1]) # > Converting the `X_data` and `y` to tensorflow_tensors. X_tensors = tf.convert_to_tensor(X_data) y_tensors = tf.convert_to_tensor(y) y_tensors, X_tensors, y_tensors.shape, X_tensors.shape # > Creating a `Functional NN` # ### `CNN` for sentiment classification. vocabulary_size = len(word_indices_reversed) model = keras.Sequential([ keras.layers.Embedding(vocabulary_size, 100, trainable= False, input_length = 10000 ), keras.layers.Conv1D(128, 5, activation='relu'), keras.layers.GlobalMaxPooling1D(), keras.layers.Dense(1, activation="sigmoid") ]) model.compile( loss = keras.losses.BinaryCrossentropy(from_logits=False), metrics=["acc"] ) model.summary() model.fit( X_tensors, y_tensors, epochs=2, validation_split=.3, batch_size=256 ) predictions = model.predict(X_tensors[:5]) # + predictions= tf.squeeze(tf.round(predictions)) predictions, y_tensors[:5] # -
tf-rnn/01_IMDB_dataset/.ipynb_checkpoints/02_Sentiment_Analysis_IMDB_CNN-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="Cx58k_4qXxDg" colab_type="code" colab={} import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # + id="mAE95WohX-O4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="12776d04-16bc-4993-9699-185d3787bcf0" executionInfo={"status": "ok", "timestamp": 1583222194387, "user_tz": -60, "elapsed": 587, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "01376058965038047201"}} # cd "/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/dw_martix_car/" # + id="2phBMrq9YIAo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="89a2e3e0-087c-47a5-f010-e92c9c04d534" executionInfo={"status": "ok", "timestamp": 1583222197991, "user_tz": -60, "elapsed": 1913, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "01376058965038047201"}} # !pwd # + id="R35f0jddYWDA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="c4e75e24-5582-4807-cf41-89d876e2c634" executionInfo={"status": "ok", "timestamp": 1583222201078, "user_tz": -60, "elapsed": 2080, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "01376058965038047201"}} # ls data/car.h5 # + id="1zgzd_jaYe64" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="2d6d0ee8-755f-4702-f6a0-888f2c927104" executionInfo={"status": "ok", "timestamp": 1583222204253, "user_tz": -60, "elapsed": 2203, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "01376058965038047201"}} df = pd.read_hdf('data/car.h5') df.shape # + id="EgtplDzfYp5P" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="c2d7507b-da73-4128-90d4-2e5ac4f7dfe0" executionInfo={"status": "ok", "timestamp": 1583222229045, "user_tz": -60, "elapsed": 526, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "01376058965038047201"}} df.columns.values # + id="ln9j8iM-ZgWP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="d3d96dd3-ad7c-40bc-9884-8b34683009ed" executionInfo={"status": "ok", "timestamp": 1583222682107, "user_tz": -60, "elapsed": 817, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "01376058965038047201"}} df['price_value'].hist(bins=100); # + id="Be9lQuAObKId" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="8416537d-9f13-46a6-9f7a-e1d031e88a92" executionInfo={"status": "ok", "timestamp": 1583222709240, "user_tz": -60, "elapsed": 582, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "01376058965038047201"}} df['price_value'].max() # + id="QrDhD4gVbVku" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 186} outputId="17eba4ec-cfa2-4448-c6ed-ee7db878e07f" executionInfo={"status": "ok", "timestamp": 1583222722750, "user_tz": -60, "elapsed": 503, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "01376058965038047201"}} df['price_value'].describe() # + id="f1LgIjfcbY4-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 337} outputId="cc0223e7-f164-44b4-ee42-4943852b9671" executionInfo={"status": "ok", "timestamp": 1583222827914, "user_tz": -60, "elapsed": 466, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "01376058965038047201"}} df['param_marka-pojazdu'].unique() # + id="GVNekYQUbyk-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 600} outputId="da8a6713-37b7-443f-dbe7-54329c233209" executionInfo={"status": "ok", "timestamp": 1583223652531, "user_tz": -60, "elapsed": 2443, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "01376058965038047201"}} ( df. groupby('param_marka-pojazdu')['price_value'] .agg([np.mean,np.median,np.size]) .sort_values(by='size',ascending = False) .head(50) ).plot(kind='bar', figsize=(20,8), subplots=True); # + id="OeZIRn_8cYBv" colab_type="code" colab={} def group_and_barplot(feat_groupby, feat_agg='price_value', agg_funcs=[np.mean,np.median,np.size],feat_sort='mean',top=50,subplots=True): return ( df .groupby(feat_groupby)[feat_agg] .agg(agg_funcs) .sort_values(by=feat_sort,ascending = False) .head(top) ).plot(kind='bar', figsize=(15,7), subplots=subplots) # + id="0fZpuvTMgnz6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 546} outputId="107f1a47-3d6e-46b3-f33a-f9e905f21e81" executionInfo={"status": "ok", "timestamp": 1583226390026, "user_tz": -60, "elapsed": 2383, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "01376058965038047201"}} group_and_barplot('param_marka-pojazdu'); # + id="zOwtOK0zg0kU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 565} outputId="098c4329-3191-4567-ea77-563843f49430" executionInfo={"status": "ok", "timestamp": 1583226454859, "user_tz": -60, "elapsed": 2087, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "01376058965038047201"}} group_and_barplot('param_kraj-pochodzenia',feat_sort='size'); # + id="qkyB6O82o-P8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 519} outputId="d910799c-fa35-480f-c16a-bd9c4bbcbdbc" executionInfo={"status": "ok", "timestamp": 1583226473609, "user_tz": -60, "elapsed": 1492, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "01376058965038047201"}} group_and_barplot('param_kolor',feat_sort='mean'); # + id="xPUx0qnxpsYo" colab_type="code" colab={}
day2_visualisation.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.6.0 # language: julia # name: julia-1.6 # --- # ## Background # # [**Symbolics.jl**](https://github.com/JuliaSymbolics/Symbolics.jl) is a fast and modern Computer Algebra System (CAS) written in the Julia Programming Language. It is an integral part of the [SciML](https://sciml.ai/) ecosystem of differential equation solvers and scientific machine learning packages. While **Symbolics.jl** is primarily designed for modern scientific computing (e.g., auto-differentiation, machine learning), it is a powerful CAS and can also be useful for *classic* scientific computing. One such application is using the *perturbation* theory to solve algebraic and differential equations. # # Perturbation methods are a collection of techniques to solve intractable problems that generally don't have a closed solution but depend on a tunable parameter and have closed or easy solutions for some values of the parameter. The main idea is to assume a solution as a power series in the tunable parameter (say $ϵ$), such that $ϵ = 0$ corresponds to an easy solution. # # We will discuss the general steps of the perturbation methods to solve algebraic (this tutorial) and differential equations (*Mixed Symbolic/Numerical Methods for Perturbation Theory - Differential Equations*). # # The hallmark of the perturbation method is the generation of long and convoluted intermediate equations, which are subjected to algorithmic and mechanical manipulations. Therefore, these problems are well suited for CAS. In fact, CAS softwares have been used to help with the perturbation calculations since the early 1970s. # # In this tutorial our goal is to show how to use a mix of symbolic manipulations (**Symbolics.jl**) and numerical methods (**DifferentialEquations.jl**) to solve simple perturbation problems. # # ## Solving the Quintic # # We start with the "hello world!" analog of the perturbation problems, solving the quintic (fifth-order) equations. We want to find a real valued $x$ such that $x^5 + x = 1$. According to the Abel's theorem, a general quintic equation does not have a closed form solution. Of course, we can easily solve this equation numerically; for example, by using the Newton's method. We use the following implementation of the Newton's method: # + using Symbolics, SymbolicUtils function solve_newton(f, x, x₀; abstol=1e-8, maxiter=50) xₙ = Float64(x₀) fₙ₊₁ = x - f / Symbolics.derivative(f, x) for i = 1:maxiter xₙ₊₁ = substitute(fₙ₊₁, Dict(x => xₙ)) if abs(xₙ₊₁ - xₙ) < abstol return xₙ₊₁ else xₙ = xₙ₊₁ end end return xₙ₊₁ end # - # In this code, `Symbolics.derivative(eq, x)` does exactly what it names implies: it calculates the symbolic derivative of `eq` (a **Symbolics.jl** expression) with respect to `x` (a **Symbolics.jl** variable). We use `Symbolics.substitute(eq, D)` to evaluate the update formula by substituting variables or sub-expressions (defined in a dictionary `D`) in `eq`. It should be noted that `substitute` is the workhorse of our code and will be used multiple times in the rest of these tutorials. `solve_newton` is written with simplicity and clarity, and not performance, in mind but suffices for our purpose. # # Let's go back to our quintic. We can define a Symbolics variable as `@variables x` and then solve the equation `solve_newton(x^5 + x - 1, x, 1.0)` (here, `x₀ = 0` is our first guess). The answer is 0.7549. Now, let's see how we can solve the same problem using the perturbation methods. # # We introduce a tuning parameter $\epsilon$ into our equation: $x^5 + \epsilon x = 1$. If $\epsilon = 1$, we get our original problem. For $\epsilon = 0$, the problem transforms to an easy one: $x^5 = 1$ which has an exact real solution $x = 1$ (and four complex solutions which we ignore here). We expand $x$ as a power series on $\epsilon$: # # $$ # x(\epsilon) = a_0 + a_1 \epsilon + a_2 \epsilon^2 + O(\epsilon^3) # \,. # $$ # # $a_0$ is the solution of the easy equation, therefore $a_0 = 1$. Substituting into the original problem, # # $$ # (a_0 + a_1 \epsilon + a_2 \epsilon^2)^5 + \epsilon (a_0 + a_1 \epsilon + a_2 \epsilon^2) - 1 = 0 # \,. # $$ # # Expanding the equations, we get # $$ # \epsilon (1 + 5 a_1) + \epsilon^2 (a_1 + 5 a_2 + 10 a1_2) + 𝑂(\epsilon^3) = 0 # \,. # $$ # # This equation should hold for each power of $\epsilon$. Therefore, # # $$ # 1 + 5 a_1 = 0 # \,, # $$ # # and # # $$ # a_1 + 5 a_2 + 10 a_1^2 = 0 # \,. # $$ # # This system of equations does not initially seem to be linear because of the presence of terms like $10 a_1^2$, but upon closer inspection is found to be in fact linear (this is a feature of the perturbation methods). In addition, the system is in a triangular form, meaning the first equation depends only on $a_1$, the second one on $a_1$ and $a_2$, such that we can replace the result of $a_1$ from the first one into the second equation and remove the non-linear term. We solve the first equation to get $a_1 = -\frac{1}{5}$. Substituting in the second one and solve for $a_2$: # # $$ # a_2 = \frac{(-\frac{1}{5} + 10(-(\frac{1}{5})²)}{5} = -\frac{1}{25} # \,. # $$ # # Finally, # # $$ # x(\epsilon) = 1 - \frac{\epsilon}{5} - \frac{\epsilon^2}{25} + O(\epsilon^3) # \,. # $$ # # Solving the original problem, $x(1) = 0.76$, compared to 0.7548 calculated numerically. We can improve the accuracy by including more terms in the expansion of $x$. However, the calculations, while straightforward, become messy and intractable to do manually very quickly. This is why a CAS is very helpful to solve perturbation problems. # # Now, let's see how we can do these calculations in Julia. Let $n$ be the order of the expansion. We start by defining the symbolic variables: n = 2 @variables ϵ a[1:n] # Then, we define x = 1 + a[1]*ϵ + a[2]*ϵ^2 # The next step is to substitute `x` in the problem equation eq = x^5 + ϵ*x - 1 # The expanded form of `eq` is expand(eq) # We need a way to get the coefficients of different powers of `ϵ`. Function `collect_powers(eq, x, ns)` returns the powers of variable `x` in expression `eq`. Argument `ns` is the range of the powers. function collect_powers(eq, x, ns; max_power=100) eq = substitute(expand(eq), Dict(x^j => 0 for j=last(ns)+1:max_power)) eqs = [] for i in ns powers = Dict(x^j => (i==j ? 1 : 0) for j=1:last(ns)) push!(eqs, substitute(eq, powers)) end eqs end # To return the coefficients of $ϵ$ and $ϵ^2$ in `eq`, we can write eqs = collect_powers(eq, ϵ, 1:2) # A few words on how `collect_powers` works, It uses `substitute` to find the coefficient of a given power of `x` by passing a `Dict` with all powers of `x` set to 0, except the target power which is set to 1. For example, the following expression returns the coefficient of `ϵ^2` in `eq`, substitute(expand(eq), Dict( ϵ => 0, ϵ^2 => 1, ϵ^3 => 0, ϵ^4 => 0, ϵ^5 => 0, ϵ^6 => 0, ϵ^7 => 0, ϵ^8 => 0) ) # Back to our problem. Having the coefficients of the powers of `ϵ`, we can set each equation in `eqs` to 0 (remember, we rearrange the problem such that `eq` is 0) and solve the system of linear equations to find the numerical values of the coefficients. **Symbolics.jl** has a function `Symbolics.solve_for` that can solve systems of linear equations. However, the presence of higher order terms in `eqs` prevents `Symbolics.solve_for(eqs .~ 0, a)` from workings properly. Instead, we can exploit the fact that our system is in a triangular form and start by solving `eqs[1]` for `a₁` and then substitute this in `eqs[2]` and solve for `a₂` (as continue the same process for higher order terms). This *cascading* process is done by function `solve_coef(eqs, ps)`: function solve_coef(eqs, ps) vals = Dict() for i = 1:length(ps) eq = substitute(eqs[i], vals) vals[ps[i]] = Symbolics.solve_for(eq ~ 0, ps[i]) end vals end # Here, `eqs` is an array of expressions (assumed to be equal to 0) and `ps` is an array of variables. The result is a dictionary of *variable* => *value* pairs. We apply `solve_coef` to `eqs` to get the numerical values of the parameters: solve_coef(eqs, a) # Finally, we substitute back the values of `a` in the definition of `x` as a function of `𝜀`. Note that `𝜀` is a number (usually Float64), whereas `ϵ` is a symbolic variable. X = 𝜀 -> 1 + a[1]*𝜀 + a[2]*𝜀^2 # Therefore, the solution to our original problem becomes `X(1)`, which is equal to 0.76. We can use larger values of `n` to improve the accuracy of estimations. # # | n | x | # |---|----------------| # |1 |0.8 | # |2 |0.76| # |3 |0.752| # |4 |0.752| # |5 |0.7533| # |6 |0.7543| # |7 |0.7548| # |8 |0.7550| # # Remember the numerical value is 0.7549. The two functions `collect_powers` and `solve_coef(eqs, a)` are used in all the examples in this and the next tutorial. # # ## Solving the Kepler's Equation # # Historically, the perturbation methods were first invented to solve orbital calculations of the Moon and the planets. In homage to this history, our second example has a celestial theme. Our goal is solve the Kepler's equation: # # $$ # E - e\sin(E) = M # \,. # $$ # # where $e$ is the *eccentricity* of the elliptical orbit, $M$ is the *mean anomaly*, and $E$ (unknown) is the *eccentric anomaly* (the angle between the position of a planet in an elliptical orbit and the point of periapsis). This equation is central to solving two-body Keplerian orbits. # # Similar to the first example, it is easy to solve this problem using the Newton's method. For example, let $e = 0.01671$ (the eccentricity of the Earth) and $M = \pi/2$. We have `solve_newton(x - e*sin(x) - M, x, M)` equals to 1.5875 (compared to π/2 = 1.5708). Now, we try to solve the same problem using the perturbation techniques (see function `test_kepler`). # # For $e = 0$, we get $E = M$. Therefore, we can use $e$ as our perturbation parameter. For consistency with other problems, we also rename $e$ to $\epsilon$ and $E$ to $x$. # # From here on, we use the helper function `def_taylor` to define Taylor's series by calling it as `x = def_taylor(ϵ, a, 1)`, where the arguments are, respectively, the perturbation variable, an array of coefficients (starting from the coefficient of $\epsilon^1$), and an optional constant term. def_taylor(x, ps) = sum([a*x^i for (i,a) in enumerate(ps)]) def_taylor(x, ps, p₀) = p₀ + def_taylor(x, ps) # We start by defining the variables (assuming `n = 3`): n = 3 @variables ϵ M a[1:n] x = def_taylor(ϵ, a, M) # We further simplify by substituting `sin` with its power series using the `expand_sin` helper function: expand_sin(x, n) = sum([(isodd(k) ? -1 : 1)*(-x)^(2k-1)/factorial(2k-1) for k=1:n]) # To test, expand_sin(0.1, 10) ≈ sin(0.1) # The problem equation is eq = x - ϵ * expand_sin(x, n) - M # We follow the same process as the first example. We collect the coefficients of the powers of `ϵ` eqs = collect_powers(eq, ϵ, 1:n) # and then solve for `a`: vals = solve_coef(eqs, a) # Finally, we substitute `vals` back in `x`: x′ = substitute(x, vals) X = (𝜀, 𝑀) -> substitute(x′, Dict(ϵ => 𝜀, M => 𝑀)) X(0.01671, π/2) # The result is 1.5876, compared to the numerical value of 1.5875. It is customary to order `X` based on the powers of `𝑀` instead of `𝜀`. We can calculate this series as `collect_powers(sol, M, 0:3) # `. The result (after manual cleanup) is # # ``` # (1 + 𝜀 + 𝜀^2 + 𝜀^3)*𝑀 # - (𝜀 + 4*𝜀^2 + 10*𝜀^3)*𝑀^3/6 # + (𝜀 + 16*𝜀^2 + 91*𝜀^3)*𝑀^5/120 # ``` # # Comparing the formula to the one for 𝐸 in the [Wikipedia article on the Kepler's equation](https://en.wikipedia.org/wiki/Kepler%27s_equation): # # $$ # E = \frac{1}{1-\epsilon}M # -\frac{\epsilon}{(1-\epsilon)^4} \frac{M^3}{3!} + \frac{(9\epsilon^2 # + \epsilon)}{(1-\epsilon)^7}\frac{M^5}{5!}\cdots # $$ # # The first deviation is in the coefficient of $\epsilon^3 M^5$.
notebook/perturbation/01-perturbation_algebraic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # How load and visualise test cases # # This notebook shows how to load the test cases available via `src.data_loading.test_data()`. It then builds a graph and visualises it. # %load_ext autoreload # %autoreload 2 # %config IPCompleter.greedy=True # + import folium from src.data_loading import test_data from src.models import graph_tools, visualisation # - test_gdf = test_data.get_polygon_gdf("chernobyl_squares_touching") test_graph = graph_tools.create_nx_graph(test_gdf) m = visualisation.create_graph_visualisation( polygon_gdf=test_gdf, color_column='id', graph=test_graph, name="squares", folium_tile_list=['OpenStreetMap','esri'] ) m = visualisation.add_cez_to_map(m) folium.LayerControl().add_to(m) m
notebooks/exploratory/rdnfn-2-test-cases-in-action.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Go from exponential to Poisson # # # Also look to: <NAME>, <NAME>, <NAME>. Tractable nonparametric Bayesian inference in Poisson processes with Gaussian process intensities. Proceedings of the 26th Annual International Conference on Machine Learning; Montreal, Quebec, Canada. 1553376: ACM; 2009. p. 9-16. # # # Some thoughts 20171018 # # * Poisson process under the hood, so the time between is Exponential # * We can then derive the probability of missing a count due to time based on the probability between # * Can we then use this to figure out how many were likely missed? # # # # + # %matplotlib inline from pprint import pprint import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import pymc3 as mc import spacepy.toolbox as tb import spacepy.plot as spp import tqdm from scipy import stats import seaborn as sns sns.set(font_scale=1.5) # matplotlib.pyplot.rc('figure', figsize=(10,10)) # matplotlib.pyplot.rc('lines', lw=3) # matplotlib.pyplot.rc('font', size=20) # %matplotlib inline # - # ## Generate Poisson process data and generate exponential # For each interval choose $n$ events from a Poisson. Then draw from a uniform the location in the interval for each of the events. np.random.seed(8675309) nT = 400 cts = np.random.poisson(20, size=nT) edata = [] for i in range(nT): edata.extend(i + np.sort(np.random.uniform(low=0, high=1, size=cts[i]))) edata = np.asarray(edata) edata.shape plt.plot(edata, np.arange(len(edata))) plt.xlabel('Time of event') plt.ylabel('Event number') plt.title("Modeled underlying data") with mc.Model() as model: lam = mc.Uniform('lambda', 0, 1000) # this is the exponential parameter meas = mc.Exponential('meas', lam, observed=np.diff(edata)) lam2 = mc.Uniform('lam2', 0, 1000) poi = mc.Poisson('Poisson', lam2, observed=cts) start = mc.find_MAP() trace = mc.sample(10000, start=start, njobs=8) mc.traceplot(trace, combined=True, lines={'lambda':20, 'lam2':20}) mc.summary(trace) fig, ax = plt.subplots(ncols=1, nrows=2, sharex=True) sns.distplot(trace['lambda'], ax=ax[0]) sns.distplot(trace['lam2'], ax=ax[1]) plt.xlabel('Lambda') ax[0].set_ylabel('Exp') ax[1].set_ylabel('Poisson') ax[0].axvline(20, c='r', lw=1) ax[1].axvline(20, c='r', lw=1) plt.tight_layout() # This is consistent with a Poisson of parameter 20! But there seems to be an under prediction going on, wonder why? # # Go through Posterior Predictive Checks (http://docs.pymc.io/notebooks/posterior_predictive.html) and see if we are reprodicting the mean and variance. # ppc = mc.sample_ppc(trace, samples=500, model=model, size=100) ax = plt.subplot() sns.distplot([n.mean() for n in ppc['Poisson']], kde=False, ax=ax) ax.axvline(cts.mean()) ax.set(title='Posterior predictive of the mean (Poisson)', xlabel='mean(x)', ylabel='Frequency'); ax = plt.subplot() sns.distplot([n.var() for n in ppc['Poisson']], kde=False, ax=ax) ax.axvline(cts.var()) ax.set(title='Posterior predictive of the variance (Poisson)', xlabel='var(x)', ylabel='Frequency'); ax = plt.subplot() sns.distplot([n.mean() for n in ppc['meas']], kde=False, ax=ax) ax.axvline(np.diff(edata).mean()) ax.set(title='Posterior predictive of the mean (Exponential)', xlabel='mean(x)', ylabel='Frequency'); ax = plt.subplot() sns.distplot([n.var() for n in ppc['meas']], kde=False, ax=ax) ax.axvline(np.diff(edata).var()) ax.set(title='Posterior predictive of the variance (Exponential)', xlabel='var(x)', ylabel='Frequency'); # We are reprodicting well. # ## Given the data we generated that will be treated as truth, what would we measure with various deadtime and does teh corection match what we think it should? # # Correction should look like $n_1 = \frac{R_1}{1-R_1 \tau}$ where $n_1$ is real rate, $R_1$ is observed rate, and $\tau$ is the dead time. # # Take edata from above and strep through from beginning to end only keeping points that are dead time away from the previous point. # + deadtime1 = 0.005 # small dead time deadtime2 = 0.1 # large dead time edata_td1 = [] edata_td1.append(edata[0]) edata_td2 = [] edata_td2.append(edata[0]) for ii, v in enumerate(edata[1:], 1): # stop one shy to not run over the end, start enumerate at 1 if v - edata_td1[-1] >= deadtime1: edata_td1.append(v) if v - edata_td2[-1] >= deadtime2: edata_td2.append(v) edata_td1 = np.asarray(edata_td1) edata_td2 = np.asarray(edata_td2) # + plt.figure(figsize=(8,6)) plt.plot(edata, np.arange(len(edata)), label='Real data') plt.plot(edata_td1, np.arange(len(edata_td1)), label='Small dead time') plt.plot(edata_td2, np.arange(len(edata_td2)), label='Large dead time') plt.xlabel('Time of event') plt.ylabel('Event number') plt.title("Modeled underlying data") plt.legend(bbox_to_anchor=(1, 1)) # - # ### And plot the rates per unit time # + plt.figure(figsize=(8,6)) h1, b1 = np.histogram(edata, np.arange(1000)) plt.plot(tb.bin_edges_to_center(b1), h1, label='Real data', c='k') h2, b2 = np.histogram(edata_td1, np.arange(1000)) plt.plot(tb.bin_edges_to_center(b2), h2, label='Small dead time', c='r') h3, b3 = np.histogram(edata_td2, np.arange(1000)) plt.plot(tb.bin_edges_to_center(b3), h3, label='Large dead time') plt.legend(bbox_to_anchor=(1, 1)) plt.xlim((0,400)) plt.ylabel('Rate') plt.xlabel('Time') # - # ## Can we use $n_1 = \frac{R_1}{1-R_1 \tau}$ to derive the relation and spread in the dist of R? # # Algerbra changes math to: $R_1=\frac{n_1}{1+n_1\tau}$ # ### Use the small dead time # + # assume R1 is Poisson with mc.Model() as model: tau = deadtime1 obsRate = mc.Uniform('obsRate', 0, 1000, shape=1) obsData = mc.Poisson('obsData', obsRate, observed=h2[:400], shape=1) realRate = mc.Deterministic('realRate', obsData/(1-obsData*tau)) start = mc.find_MAP() trace = mc.sample(10000, start=start, njobs=8) # - mc.traceplot(trace, combined=True, varnames=('obsRate', )) mc.summary(trace, varnames=('obsRate', )) # + sns.distplot(trace['realRate'].mean(axis=0), bins=10) plt.xlabel('realRate') plt.ylabel('Density') dt1_bounds = np.percentile(trace['realRate'], (2.5, 50, 97.5)) print('The estimate of the real rate given that we know the dead time is:', dt1_bounds, (dt1_bounds[2]-dt1_bounds[0])/dt1_bounds[1]) dat_bounds = np.percentile(h1[:400], (2.5, 50, 97.5)) print("This compares with if we measured without dead time as:", dat_bounds, (dat_bounds[2]-dat_bounds[0])/dat_bounds[1]) # - # ### Use the large dead time # + # assume R1 is Poisson with mc.Model() as model: tau = deadtime2 obsRate = mc.Uniform('obsRate', 0, 1000) obsData = mc.Poisson('obsData', obsRate, observed=h3[:400]) realRate = mc.Deterministic('realRate', obsData/(1-obsData*tau)) start = mc.find_MAP() trace = mc.sample(10000, start=start, njobs=8) # - mc.traceplot(trace, combined=True, varnames=('obsRate', )) mc.summary(trace, varnames=('obsRate', )) # + sns.distplot(trace['realRate'].mean(axis=0)) plt.xlabel('realRate') plt.ylabel('Density') dt2_bounds = np.percentile(trace['realRate'], (2.5, 50, 97.5)) print('The estimate of the real rate given that we know the dead time is:', dt1_bounds, (dt2_bounds[2]-dt2_bounds[0])/dt2_bounds[1]) dat_bounds = np.percentile(h1[:400], (2.5, 50, 97.5)) print("This compares with if we measured without dead time as:", dat_bounds, (dat_bounds[2]-dat_bounds[0])/dat_bounds[1]) # - # But this is totally broken!!! # # Output data files for each # + real = pd.Series(edata) td1 = pd.Series(edata_td1) td2 = pd.Series(edata_td2) real.to_csv('no_deadtime_times.csv') td1.to_csv('small_deadtime_times.csv') td2.to_csv('large_deadtime_times.csv') # + real = pd.Series(h1[h1>0]) td1 = pd.Series(h2[h2>0]) td2 = pd.Series(h3[h3>0]) real.to_csv('no_deadtime_rates.csv') td1.to_csv('small_deadtime_rates.csv') td2.to_csv('large_deadtime_rates.csv') # - # # Work on the random thoughts # + with mc.Model() as model: BoundedExp = mc.Bound(mc.Exponential, lower=deadtime2, upper=None) # we observe the following time between counts lam = mc.Uniform('lam', 0, 1000) time_between = BoundedExp('tb_ob', lam, observed=np.diff(edata_td2)) start = mc.find_MAP() trace = mc.sample(10000, njobs=8, start=start) # -
Counting/Poisson and exponential.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # #!pip install -U tf-nightly-2.0-preview # - import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from tensorflow import keras def plot_series(time, series, format="-", start=0, end=None, label=None): plt.plot(time[start:end], series[start:end], format, label=label) plt.xlabel("Time") plt.ylabel("Value") if label: plt.legend(fontsize=14) plt.grid(True) # Trend and Seasonality def trend(time, slope=0): return slope * time # Let's create a time series that just trends upward: # + time = np.arange(4 * 365 + 1) baseline = 10 series = trend(time, 0.1) plt.figure(figsize=(10, 6)) plot_series(time, series) plt.show() # - # Now let's generate a time series with a seasonal pattern: # + def seasonal_pattern(season_time): """Just an arbitrary pattern, you can change it if you wish""" return np.where(season_time < 0.4, np.cos(season_time * 2 * np.pi), 1 / np.exp(3 * season_time)) def seasonality(time, period, amplitude=1, phase=0): """Repeats the same pattern at each period""" season_time = ((time + phase) % period) / period return amplitude * seasonal_pattern(season_time) # + baseline = 10 amplitude = 40 series = seasonality(time, period=365, amplitude=amplitude) plt.figure(figsize=(10, 6)) plot_series(time, series) plt.show() # - # Now let's create a time series with both trend and seasonality: # + slope = 0.05 series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude) plt.figure(figsize=(10, 6)) plot_series(time, series) plt.show() # - # NOISE # In practice few real-life time series have such a smooth signal. They usually have some noise, and the signal-to-noise ratio can sometimes be very low. Let's generate some white noise: def white_noise(time, noise_level=1, seed=None): rnd = np.random.RandomState(seed) return rnd.randn(len(time)) * noise_level # + noise_level = 5 noise = white_noise(time, noise_level, seed=42) plt.figure(figsize=(10, 6)) plot_series(time, noise) plt.show() # - # Now let's add this white noise to the time series: # + series += noise plt.figure(figsize=(10, 6)) plot_series(time, series) plt.show() # - # All right, this looks realistic enough for now. Let's try to forecast it. We will split it into two periods: the training period and the validation period (in many cases, you would also want to have a test period). The split will be at time step 1000. split_time = 1000 time_train = time[:split_time] x_train = series[:split_time] time_valid = time[split_time:] x_valid = series[split_time:] def autocorrelation(time, amplitude, seed=None): rnd = np.random.RandomState(seed) φ1 = 0.5 φ2 = -0.1 ar = rnd.randn(len(time) + 50) ar[:50] = 100 for step in range(50, len(time) + 50): ar[step] += φ1 * ar[step - 50] ar[step] += φ2 * ar[step - 33] return ar[50:] * amplitude def autocorrelation(time, amplitude, seed=None): rnd = np.random.RandomState(seed) φ = 0.8 ar = rnd.randn(len(time) + 1) for step in range(1, len(time) + 1): ar[step] += φ * ar[step - 1] return ar[1:] * amplitude series = autocorrelation(time, 10, seed=42) plot_series(time[:200], series[:200]) plt.show() series = autocorrelation(time, 10, seed=42) + trend(time, 2) plot_series(time[:200], series[:200]) plt.show() series = autocorrelation(time, 10, seed=42) + seasonality(time, period=50, amplitude=150) + trend(time, 2) plot_series(time[:200], series[:200]) plt.show() series = autocorrelation(time, 10, seed=42) + seasonality(time, period=50, amplitude=150) + trend(time, 2) series2 = autocorrelation(time, 5, seed=42) + seasonality(time, period=50, amplitude=2) + trend(time, -1) + 550 series[200:] = series2[200:] #series += noise(time, 30) plot_series(time[:300], series[:300]) plt.show() def impulses(time, num_impulses, amplitude=1, seed=None): rnd = np.random.RandomState(seed) impulse_indices = rnd.randint(len(time), size=10) series = np.zeros(len(time)) for index in impulse_indices: series[index] += rnd.rand() * amplitude return series series = impulses(time, 10, seed=42) plot_series(time, series) plt.show() def autocorrelation(source, φs): ar = source.copy() max_lag = len(φs) for step, value in enumerate(source): for lag, φ in φs.items(): if step - lag > 0: ar[step] += φ * ar[step - lag] return ar signal = impulses(time, 10, seed=42) series = autocorrelation(signal, {1: 0.99}) plot_series(time, series) plt.plot(time, signal, "k-") plt.show() signal = impulses(time, 10, seed=42) series = autocorrelation(signal, {1: 0.70, 50: 0.2}) plot_series(time, series) plt.plot(time, signal, "k-") plt.show() series_diff1 = series[1:] - series[:-1] plot_series(time[1:], series_diff1) # + from pandas.plotting import autocorrelation_plot autocorrelation_plot(series) # + from statsmodels.tsa.arima_model import ARIMA model = ARIMA(series, order=(5, 1, 0)) model_fit = model.fit(disp=0) print(model_fit.summary()) # - root = r'D:\Users\Arkady\Verint\Coursera_2019_Tensorflow_Specialization\Course4_Sequences_TimeSeries_Prediction' fpath = root + '/tmp/sunspots.csv' # + # #!wget --no-check-certificate \ # # https://storage.googleapis.com/laurencemoroney-blog.appspot.com/horse-or-human.zip \ # # -O /tmp/horse-or-human.zip #import os #import zipfile #local_zip = '/tmp/horse-or-human.zip' #zip_ref = zipfile.ZipFile(local_zip, 'r') #zip_ref.extractall('/tmp/horse-or-human') # to upload file from local computer to colab #import pandas as pd #from google.colab import files #uploaded = files.upload() # - import pandas as pd df = pd.read_csv(fpath, parse_dates=["Date"], index_col="Date") series = df["Monthly Mean Total Sunspot Number"].asfreq("1M") series.head() series.plot(figsize=(12, 5)) series["1995-01-01":].plot() series.diff(1).plot() plt.axis([0, 100, -50, 50]) # + from pandas.plotting import autocorrelation_plot autocorrelation_plot(series) # - autocorrelation_plot(series.diff(1)[1:]) autocorrelation_plot(series.diff(1)[1:].diff(11 * 12)[11*12+1:]) plt.axis([0, 500, -0.1, 0.1]) autocorrelation_plot(series.diff(1)[1:]) plt.axis([0, 50, -0.1, 0.1]) 116.7 - 104.3 [series.autocorr(lag) for lag in range(1, 50)] filepath_or_buffer = fpath pd.read_csv(filepath_or_buffer, sep=',', delimiter=None, header='infer', names=None, index_col=None, usecols=None, squeeze=False, prefix=None, mangle_dupe_cols=True, dtype=None, engine=None, converters=None, true_values=None, false_values=None, skipinitialspace=False, skiprows=None, skipfooter=0, nrows=None, na_values=None, keep_default_na=True, na_filter=True, verbose=False, skip_blank_lines=True, parse_dates=False, infer_datetime_format=False, keep_date_col=False, date_parser=None, dayfirst=False, iterator=False, chunksize=None, compression='infer', thousands=None, decimal=b'.', lineterminator=None, quotechar='"', quoting=0, doublequote=True, escapechar=None, comment=None, encoding=None, dialect=None, tupleize_cols=None, error_bad_lines=True, warn_bad_lines=True, delim_whitespace=False, low_memory=True, memory_map=False, float_precision=None) #Read a comma-separated values (csv) file into DataFrame. # + from pandas.plotting import autocorrelation_plot series_diff = series for lag in range(50): series_diff = series_diff[1:] - series_diff[:-1] autocorrelation_plot(series_diff) # + import pandas as pd series_diff1 = pd.Series(series[1:] - series[:-1]) autocorrs = [series_diff1.autocorr(lag) for lag in range(1, 60)] plt.plot(autocorrs) plt.show() # -
legacy/arkady TF legacy/TF_2020_course4_week1_notebook1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/HarmanDotpy/ML-Assignment2/blob/main/Q1SVM_on_PCA_HEALTH_data_FINAL.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="OOAqlOJgBVtv" import numpy as np import pandas as pd import matplotlib.pyplot as plt import sklearn as sk from sklearn import datasets from sklearn import svm from sklearn import metrics from sklearn.metrics import classification_report from sklearn.model_selection import cross_validate # + [markdown] id="uCKZo2deBVt5" # ### Make data # + id="LVixgsmaBVt6" numcat = 2 categories = ['healthy', 'disease'] # + colab={"base_uri": "https://localhost:8080/"} id="hQeNzwpxBVt6" outputId="5608b88e-14d2-42c4-d88f-62bcbe45446e" df_h = pd.read_csv('/content/health_data.csv') print(df_h.head()) train_per = 0.7 #train test split #randomize indces, take the first 80% of the indeces and last 20 percent as test indices = np.random.permutation(df_h.shape[0]) train_ind, test_ind = indices[:int(train_per*df_h.shape[0])], indices[int(train_per*df_h.shape[0]):] # print(len(train_ind), len(test_ind)) #select the data corresponding to the train and test indices and save into 2 dataframes. Reset index afterwards train_df, test_df = df_h.loc[train_ind, :], df_h.loc[test_ind, :] train_df, test_df = train_df.reset_index(drop = True), test_df.reset_index(drop = True) # train_df.drop('index') # Data in numpy arrays (also separating train data by classes) X_train = train_df.drop('category', axis = 1).to_numpy() y_train = train_df.drop(['age', 'restbps', 'chol'], axis = 1).to_numpy().reshape((X_train.shape[0],)) # X_train_0, X_train_1 = train_df.loc[train_df['category'] == 0].drop('category', axis = 1).to_numpy(),train_df.loc[train_df['category'] == 1].drop('category', axis = 1).to_numpy() X_test, y_test = test_df.drop('category', axis = 1).to_numpy(), test_df['category'].to_numpy().reshape((-1, )) print(X_test.shape, y_test.shape, X_train.shape, y_train.shape) # + [markdown] id="MhjaZ8WwBVt7" # ### PCA to reduce to 2 dimensions # + colab={"base_uri": "https://localhost:8080/"} id="r1PjJW7xBVt7" outputId="e7834dfe-2dea-457f-cf34-ce8f99d6218f" indices = np.random.permutation(df_h.shape[0]) df_h = df_h.loc[indices] X = df_h.drop('category', axis = 1).to_numpy() #Normalize X = (X-X.mean(axis = 0))/X.var(axis = 0) y = df_h.drop(['age', 'restbps', 'chol'], axis = 1).to_numpy().reshape((X.shape[0],)) print(X) # + id="oxg1esAfBVt8" def scatter(x, colors): '''Libraries use for this function ONLY''' import matplotlib.patheffects as PathEffects # %matplotlib inline import seaborn as sns '''------------------------------------''' # sns.set_style('darkgrid') sns.set_palette('muted') sns.set_context("notebook", font_scale=1.5, rc={"lines.linewidth": 2.5}) '''----------------------------------------''' # choose a color palette with seaborn. num_classes = len(np.unique(colors)) print('Number of unique classes are = {}'.format(num_classes)) palette = np.array(sns.color_palette("hls", num_classes)) # print(palette) # create a scatter plot. f = plt.figure(figsize=(12, 12)) ax = plt.subplot(aspect='equal') sc = ax.scatter(x[:,0], x[:,1], lw=0, s=40, c=palette[colors.astype(np.int)]) plt.xlim(-25, 25) plt.ylim(-25, 25) # ax.axis('off') ax.axis('tight') # add the labels for each digit corresponding to the label txts = [] for i in range(num_classes): # Position of each label at median of data points. xtext, ytext = np.median(x[colors == i, :], axis=0) txt = ax.text(xtext, ytext, categories[i], fontsize=24) txt.set_path_effects([ PathEffects.Stroke(linewidth=5, foreground="w"), PathEffects.Normal()]) txts.append(txt) return f, ax, sc, txts # + colab={"base_uri": "https://localhost:8080/"} id="98omn0IWBVt8" outputId="7752a293-b7dc-44e7-8d0c-43c872b89189" '''The following libraries used only for PCA/tSNE as we are allowed to do so''' from sklearn.decomposition import PCA import time time_start = time.time() pca = PCA(n_components=2) pca_result = pca.fit_transform(X) print('PCA done! Time elapsed: {} seconds'.format(time.time()-time_start)) # + colab={"base_uri": "https://localhost:8080/"} id="CIjnupLqBVt9" outputId="4d45f538-c61b-4f14-d2b2-4e5190e469b0" pca_df = pd.DataFrame(columns = ['pca1','pca2']) pca_df['pca1'] = pca_result[:,0] pca_df['pca2'] = pca_result[:,1] print('Variance explained per principal component: {}'.format(pca.explained_variance_ratio_)) # + colab={"base_uri": "https://localhost:8080/", "height": 728} id="a3sytvYLBVt9" outputId="47e31797-5e03-439c-c923-a64a86235146" top_two_comp = pca_df[['pca1','pca2']] # taking first and second principal component fig, _,_,_ = scatter(top_two_comp.values,y) # Visualizing the PCA output # + id="utvwGnPMBVt-" import matplotlib.pyplot as plt # + id="5xZZn_3-BVt-" X_pca = top_two_comp.values #randomize indices # + id="4mu6URZ0BVt-" ##Now we have pca output data Xpca and y is same as before # + id="mf_HAQRHBVt_" X_train = X_pca[0:int(0.7*X_pca.shape[0])] y_train = y[0:int(0.7*X_pca.shape[0])] X_test = X_pca[int(0.7*X_pca.shape[0]):] y_test = y[int(0.7*X_pca.shape[0]):] # + colab={"base_uri": "https://localhost:8080/"} id="exg0hdZ_BVt_" outputId="7bddfe56-8916-430a-a679-bba890ee7bb8" print(X_train.shape) print(y_train.shape) # + [markdown] id="WdnpazOMBVt_" # ### RBF Kernel # + colab={"base_uri": "https://localhost:8080/"} id="7yPl5OvaBVuA" outputId="84941c9f-91a9-4b37-dce3-6d03f14625d5" #Training clf = svm.SVC(kernel='rbf') clf.fit(X_train, y_train) # + colab={"base_uri": "https://localhost:8080/"} id="n7eIrXivBVuA" outputId="530fd5b7-d0d6-4083-e184-50e75d401df6" #testing y_pred_test = clf.predict(X_test) print(y_pred_test) # + colab={"base_uri": "https://localhost:8080/"} id="mFUhzwa9BVuA" outputId="eb5c2210-6e0e-4671-c64a-e6f834966735" print(classification_report(y_test, y_pred_test, digits = 4)) # + [markdown] id="QB2BSz97BVuB" # ### Grid search over C and gamma # for each C plotting error formultiple gammas # + colab={"base_uri": "https://localhost:8080/", "height": 651} id="DENHWM-fBVuB" outputId="42729e66-3c8a-4cb9-abbe-682a943f14de" rangeC = [2*i for i in range(1, 10)] range_g = [i/5 for i in range(1, 10)]*1/(X_train.shape[1]*X_train.var())# the multiplied value is the default value of gamma # errors = [] bestC = 0 bestgamma = 0 besterror = 0 maxerror = 0 minerror = 100 for C in rangeC: errors = [] for gamma in range_g: #Training clf = svm.SVC(kernel='rbf', C = C, gamma = gamma) clf.fit(X_train, y_train) #testing y_pred_test = clf.predict(X_test) error = 1-(y_pred_test==y_test).mean() errors.append(error) # errors.append(error) if(besterror<=error): besterror = error bestC=C bestgamma = gamma if(error>maxerror): maxerror = error if(error<minerror): minerror = error # print(error) plt.plot(errors, range_g, label = 'C = {}'.format(np.around(C))) # plt.ylim(minerror, maxerror) plt.xlabel('value of gamma') plt.ylabel('error in classification') plt.legend() fig = plt.gcf() fig.set_size_inches(18.5, 10.5) plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="d8k74eUwBVuB" outputId="64ad6d6d-084b-4ead-b5e0-0abbbc7057f9" CRBF, gammaRBF = bestC, bestgamma print(CRBF, gammaRBF) # + [markdown] id="iMiVIxJQBVuC" # ### Linear Kernel # + colab={"base_uri": "https://localhost:8080/"} id="95HZ6nP7BVuD" outputId="9daf776c-bb7d-435d-a0ac-17a22b233d07" #Training clf = svm.SVC(kernel='linear') clf.fit(X_train, y_train) # + id="UNjr9WopBVuD" #testing y_pred_test = clf.predict(X_test) # + colab={"base_uri": "https://localhost:8080/"} id="MzqdGYP9BVuD" outputId="cf2793ca-f413-479a-bf9e-1310dd8f50f9" print(classification_report(y_test, y_pred_test, digits = 4)) # + [markdown] id="V_QYjEQNBVuD" # ### Cross Validation # To find optimal value of C # + id="vLUkbRsnBVuE" error = [] rangeC = [i/10 for i in range(1,200)] for C in rangeC: cv_stats = cross_validate(svm.SVC(kernel='linear', C=C), X_train, y_train, cv = 5) error.append(1-cv_stats['test_score'].mean()) # if # + colab={"base_uri": "https://localhost:8080/", "height": 352} id="rJUCiimxBVuE" outputId="d2ee971d-1360-477d-a41e-09228b66d4d6" print(error) plt.plot(rangeC, error) plt.xlabel('value of C') plt.ylabel('error in classification') # + [markdown] id="pUNpmIpIBVuE" # ### Polynomial Kernel # + [markdown] id="z3WTHaRmBVuF" # ### Degree 2 # + colab={"base_uri": "https://localhost:8080/"} id="sj0TJIGZBVuF" outputId="1f3e580d-b597-4032-bb9d-d748817824f9" #Training clf = svm.SVC(kernel='poly', degree = 2, C = 10) clf.fit(X_train, y_train) # + id="kVPPtjAlBVuF" #testing y_pred_test = clf.predict(X_test) # + colab={"base_uri": "https://localhost:8080/"} id="g6avzyegBVuF" outputId="91d5a4d6-43a5-4fc2-f8e6-0085b982304e" print(classification_report(y_test, y_pred_test, digits = 4)) # + [markdown] id="xOnRgym7BVuG" # ### Cross Validation # To find optimal value of C # + id="uCm52_HhBVuG" error = [] rangeC = [i/10 for i in range(1,200)] for C in rangeC: cv_stats = cross_validate(svm.SVC(kernel='poly', degree = 2, C=C), X_train, y_train, cv = 5) error.append(1-cv_stats['test_score'].mean()) # + colab={"base_uri": "https://localhost:8080/", "height": 357} id="DoRgB0ChBVuG" outputId="1ff0c9ae-6589-4189-b126-1ab88307c983" print(error) plt.plot(rangeC, error) plt.xlabel('value of C') plt.ylabel('error in classification') # + [markdown] id="GQNY6CSsBVuG" # ### Degree 3 # + colab={"base_uri": "https://localhost:8080/"} id="bVpTuSW8BVuH" outputId="ab6a960e-c810-4f1d-c674-3da80824955c" #Training clf = svm.SVC(kernel='poly', degree = 3, C = 10) clf.fit(X_train, y_train) # + id="mcOl1GV4BVuH" #testing y_pred_test = clf.predict(X_test) # + colab={"base_uri": "https://localhost:8080/"} id="pxtc2Bo4BVuH" outputId="54468cdc-cb83-4490-f175-9333580c7190" print(classification_report(y_test, y_pred_test, digits = 4)) # + [markdown] id="ziVhyxHABVuH" # ### Cross Validation # To find optimal value of C # + id="tDyrn0wHBVuI" error = [] rangeC = [i/10 for i in range(1,200)] for C in rangeC: cv_stats = cross_validate(svm.SVC(kernel='poly', degree = 3, C=C), X_train, y_train, cv = 5) error.append(1-cv_stats['test_score'].mean()) # + colab={"base_uri": "https://localhost:8080/", "height": 352} id="2Kfj5P3xBVuI" outputId="fbd57870-55b2-41c5-e8f3-e7b601ca2494" print(error) plt.plot(rangeC, error) plt.xlabel('value of C') plt.ylabel('error in classification') # + [markdown] id="K50q2LQiBVuI" # ### Degree 4 # + colab={"base_uri": "https://localhost:8080/"} id="rga5TQXkBVuI" outputId="285cb2b7-1ee1-4515-eb2c-5160621a2d34" #Training clf = svm.SVC(kernel='poly', degree = 4, C = 10) clf.fit(X_train, y_train) # + id="8pL4Xps8BVuJ" #testing y_pred_test = clf.predict(X_test) # + colab={"base_uri": "https://localhost:8080/"} id="qwzJQR6pBVuJ" outputId="1d3844ac-bd48-46c3-902c-7fd4b2608571" print(classification_report(y_test, y_pred_test, digits = 4)) # + [markdown] id="3OZQvMJABVuJ" # ### Cross Validation # To find optimal value of C # + id="Zy_CONEjBVuJ" error = [] rangeC = [i/10 for i in range(1,200)] for C in rangeC: cv_stats = cross_validate(svm.SVC(kernel='poly', degree = 4, C=C), X_train, y_train, cv = 5) error.append(1-cv_stats['test_score'].mean()) # + colab={"base_uri": "https://localhost:8080/", "height": 352} id="nhwa0_xeBVuJ" outputId="10fec862-4a19-444e-e336-5c76a0b633fc" print(error) plt.plot(rangeC, error) plt.xlabel('value of C') plt.ylabel('error in classification') # + [markdown] id="xqqf3EdnBVuK" # # Decision boundaries # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="m-pdSAu7BVuK" outputId="a3fddfed-5224-4b2c-da90-4269dbf00b47" # approach - we make a grid and classify all points # step size in the grid h = .002 # we create an instance of SVM and fit out data. We do not scale our # data since we want to plot the support vectors C = 1.0 # SVM regularization parameter svc = svm.SVC(kernel='linear', C=0.5).fit(X_train, y_train) rbf_svc = svm.SVC(kernel='rbf', C=18, gamma = 149).fit(X_train, y_train) poly2_svc = svm.SVC(kernel='poly', degree=2, C=0.5).fit(X_train, y_train) poly3_svc = svm.SVC(kernel='poly', degree=3, C=7).fit(X_train, y_train) poly4_svc = svm.SVC(kernel='poly', degree=4, C=1).fit(X_train, y_train) # create a mesh to plot in x_min, x_max = X_train[:, 0].min(), X_train[:, 0].max() y_min, y_max = X_train[:, 1].min(), X_train[:, 1].max() xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # title for the plots titles = ['Linear', 'RBF', 'Poly.Deg = 2', 'Poly.Deg = 3', 'Poly.Deg = 4' ] for i, clf in enumerate((svc, rbf_svc, poly2_svc, poly3_svc, poly4_svc)): # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, x_max]x[y_min, y_max]. plt.subplot(3, 2, i + 1) plt.subplots_adjust(wspace=0.4, hspace=0.4) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8) # Plot also the training points plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=plt.cm.coolwarm) X_sv = clf.support_vectors_ plt.scatter(X_sv[:, 0], X_sv[:, 1], c='y', cmap=plt.cm.coolwarm) plt.xlabel('x1') plt.ylabel('x2') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.title(titles[i]) fig = plt.gcf() fig.set_size_inches(20, 20) plt.show() # + id="71ksXPWsM3yd" # + id="RtRGQx37P_xp" # + [markdown] id="dqZIE7ruQB47" # # Decision boundaries - Without support vectors # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="kq0mMB0xQB5C" outputId="bdd096e3-5dda-4728-eaf0-1cd5bcce29c4" # approach - we make a grid and classify all points # step size in the grid h = .002 # we create an instance of SVM and fit out data. We do not scale our # data since we want to plot the support vectors C = 1.0 # SVM regularization parameter svc = svm.SVC(kernel='linear', C=0.5).fit(X_train, y_train) supind = svc.support_ X_new = np.delete(X_train, supind, axis = 0) y_new = np.delete(y_train, supind, axis = 0) svc = svm.SVC(kernel='linear', C=0.5).fit(X_train, y_train) svc = svm.SVC(kernel='linear', C=0.5).fit(X_new, y_new) plt.subplot(3, 2, 1) plt.subplots_adjust(wspace=0.4, hspace=0.4) Z = svc.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8) # Plot also the training points plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=plt.cm.coolwarm) X_sv = svc.support_vectors_ plt.scatter(X_sv[:, 0], X_sv[:, 1], c='y', cmap=plt.cm.coolwarm) plt.xlabel('x1') plt.ylabel('x2') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.title('Linear') ############################################ svc = svm.SVC(kernel='rbf', C=18, gamma = 149).fit(X_train, y_train) supind = svc.support_ X_new = np.delete(X_train, supind, axis = 0) y_new = np.delete(y_train, supind, axis = 0) svc = svm.SVC(kernel='rbf', C=0.5).fit(X_new, y_new) plt.subplot(3, 2, 2) plt.subplots_adjust(wspace=0.4, hspace=0.4) Z = svc.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8) # Plot also the training points plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=plt.cm.coolwarm) X_sv = svc.support_vectors_ plt.scatter(X_sv[:, 0], X_sv[:, 1], c='y', cmap=plt.cm.coolwarm) plt.xlabel('x1') plt.ylabel('x2') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.title('RBF') ############################################ svc = svm.SVC(kernel='poly', degree=2, C=0.5).fit(X_train, y_train) supind = svc.support_ X_new = np.delete(X_train, supind, axis = 0) y_new = np.delete(y_train, supind, axis = 0) # svc = svm.SVC(kernel='linear', C=0.5).fit(X_train, y_train) svc = svm.SVC(kernel='poly', C=0.5).fit(X_new, y_new) plt.subplot(3, 2, 3) plt.subplots_adjust(wspace=0.4, hspace=0.4) Z = svc.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8) # Plot also the training points plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=plt.cm.coolwarm) X_sv = svc.support_vectors_ plt.scatter(X_sv[:, 0], X_sv[:, 1], c='y', cmap=plt.cm.coolwarm) plt.xlabel('x1') plt.ylabel('x2') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.title('Poly.Deg = 2') ############################################ svc = svm.SVC(kernel='poly', degree=3, C=7).fit(X_train, y_train) supind = svc.support_ X_new = np.delete(X_train, supind, axis = 0) y_new = np.delete(y_train, supind, axis = 0) # svc = svm.SVC(kernel='linear', C=0.5).fit(X_train, y_train) svc = svm.SVC(kernel='poly', C=0.5).fit(X_new, y_new) plt.subplot(3, 2, 4) plt.subplots_adjust(wspace=0.4, hspace=0.4) Z = svc.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8) # Plot also the training points plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=plt.cm.coolwarm) X_sv = svc.support_vectors_ plt.scatter(X_sv[:, 0], X_sv[:, 1], c='y', cmap=plt.cm.coolwarm) plt.xlabel('x1') plt.ylabel('x2') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.title('Poly.Deg = 3') ############################################ svc = svm.SVC(kernel='poly', degree=4, C=1).fit(X_train, y_train) supind = svc.support_ X_new = np.delete(X_train, supind, axis = 0) y_new = np.delete(y_train, supind, axis = 0) # svc = svm.SVC(kernel='linear', C=0.5).fit(X_train, y_train) svc = svm.SVC(kernel='poly', C=0.5).fit(X_new, y_new) plt.subplot(3, 2, 5) plt.subplots_adjust(wspace=0.4, hspace=0.4) Z = svc.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8) # Plot also the training points plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=plt.cm.coolwarm) X_sv = svc.support_vectors_ plt.scatter(X_sv[:, 0], X_sv[:, 1], c='y', cmap=plt.cm.coolwarm) plt.xlabel('x1') plt.ylabel('x2') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.title('Poly.Deg = 2') fig = plt.gcf() fig.set_size_inches(20, 20) plt.show() # + id="fqq9OtJDTaHm" ## Removing the support vectors should change the boundary and hence the optimal C will be different, but if we use the same C we get non optimal results as above # + id="-GeZm6soT5LG" # + id="2XThPaYtVp6z" # + id="QkZywtvGVp9R" # + [markdown] id="8NMRJXYeVqk-" # # SMO Algotrithm # + id="6ycvJp0ZVp_F" class SVM_solver(): def __init__(self, ker_type = 'linear', itermax = 1000, C= 10, epsilon = 0.0001, gamma = 10, poly = 1): self.kernel_type = ker_type self.C = C self.epsilon = epsilon self.gamma = gamma self.kernels = {'linear': self.k_linear, 'gaussian': self.k_gaussian, 'polynomial': self.k_polynomial} self.itermax = itermax self.poly = poly self.gamma = gamma def solve_SVM(self, X, y): # X.shape = (N, D) y.shape = (N,) N = X.shape[0] self.X = X y = 2*y-1 # print(y) self.y = y # print(self.y) # print(self.y) K = self.get_ker_mat(self.X, y, self.kernels[self.kernel_type]) #initializing mu's and b mu = np.zeros(N) b = 0 for count in range(self.itermax): #outer loop, currently just looping over the all the mu's but some heuristics #can be used for selecting good values of mu_i's and mu_j's #saving the old values of mu to check for convergence mu_old = np.copy(mu) for i in range(N): # print(b) xi, yi = X[i,:], y[i] E_i = self.calc_E(y, yi, mu, b, K[i]) if(self.KKTviolate(mu[i], E_i, yi) == True): j = self.choose_random(0, N, i) mu_i_old, mu_j_old = np.copy(mu[i]), np.copy(mu[j]) xj, yj = X[j,:], y[j] E_j = self.calc_E(y, yj, mu, b, K[j]) L, H = self.getLH(yi, yj, mu[i], mu[j], self.C) #skip if L and H are same if(L == H): continue eta = 2*K[i, j]-K[i, i]-K[j, j] # print(eta) if(eta==0): continue # print(eta) mu[j] = mu_j_old - yj*(E_i-E_j)/eta # print('eta = {}'.format(eta)) mu[j] = self.clip(L, mu[j] , H) #if not much change in mu_j then donot update this i if(np.absolute(mu[j] - mu_j_old)<1e-5): continue mu[i] = mu_i_old + yi*yj*(mu_j_old - mu[j]) b = self.get_b(b, E_i, E_j, yi, yj, xi, xj, K[i, i], K[j, j], K[i, j], mu[j], mu_j_old, mu[i], mu_i_old) # convergence criteria if(np.absolute(mu_old - mu).sum() <= self.epsilon): break # if(count%100==0): # print('iteration number = {}'.format(count)) return mu, b def get_ker_mat(self, X, y, Kernel): mat = np.zeros((X.shape[0], X.shape[0])) for i in range(X.shape[0]): for j in range(X.shape[0]): mat[i, j] = Kernel(X[i], X[j]) return mat def get_b(self, b, E_i, E_j, yi, yj, xi, xj, Ki, Kj, Kij, mu_j_new, mu_j_old, mu_i_new, mu_i_old): b1 = b - E_i - yi*(mu_i_new-mu_i_old)*Ki - yj*(mu_j_new-mu_j_old)*Kij b2 = b - E_j - yj*(mu_j_new-mu_j_old)*Kj - yi*(mu_i_new-mu_i_old)*Kij if(mu_i_new<self.C and mu_i_new>0): b = b1 elif(mu_j_new<self.C and mu_j_new>0): b = b2 else: b = (b1+b2)/2 return b def clip(self, L, mu, H): # print('mu = {}, clipmu = {}'.format(mu, np.clip(mu, L, H))) return np.clip(mu, L, H) def getLH(self, yi, yj, mui, muj, c): if(yi!=yj): return max(0, muj-mui), min(c, c+muj-mui) if(yi==yj): return max(0, muj+mui-c), min(c, muj+mui) def choose_random(self, l, h, i): random = i while(random == i): random = np.random.randint(l, h) return random def KKTviolate(self, mui, Ei, yi): if((Ei*yi<-1*1e-5 and mui<self.C) or (Ei*yi > 1e-5 and mui>0)): return True else: return False def calc_E(self, y, yi, mu, b, Ki): assert(y.shape == mu.shape == Ki.shape) f = np.sum(mu*y*Ki) + b return f-yi def k_linear(self, x1, x2): # print(x1.shape) return np.sum(x1*x2) def k_gaussian(self, x1, x2): return np.exp(-self.gamma*np.sum((x1 - x2)**2)) def k_polynomial(self, x1, x2): return (1+np.sum(x1*x2))**self.p def get_kermat_test(self, X_test, X_train): mat = np.zeros((X_test.shape[0], X_train.shape[0])) Kernel = self.kernels[self.kernel_type] for i in range(X_test.shape[0]): for j in range(X_train.shape[0]): mat[i, j] = Kernel(X_test[i], X_train[j]) return mat def predict(self, X_test, mu, b): yhat = np.zeros((X_test.shape[0])) k_test_mat = self.get_kermat_test(X_test, self.X) for i in range(X_test.shape[0]): fi = np.sum(mu*self.y*k_test_mat[i]) + b if(fi<0): yhat[i] = 0 else: yhat[i] = 1 return yhat # def getK(): # + id="gEcZaTBNV9ey" #Note for RBF KERNEL gamma = 1/2sigma^2 svm_model = SVM_solver(ker_type = 'linear', itermax = 10000, C = 1, epsilon = 0.1, gamma = 1, poly = 1)# gamma will only be used for the gaussian kernel mu, b = svm_model.solve_SVM(X_train, y_train) y_hat_test = svm_model.predict(X_test, mu, b) # get_stats(y_hat_test, y_test) # + colab={"base_uri": "https://localhost:8080/"} id="CS1-DpVBWBTA" outputId="ad22a774-dcd7-4e3f-a9ee-dab80a0e88be" print((y_hat_test==y_test).sum()/(y_test.shape[0])) # + id="pXl2oXhgWF0O" # + [markdown] id="fOFMFgmvWJbY" # ### Time as dataset size increases # + colab={"base_uri": "https://localhost:8080/"} id="jtMKcuPCWMZd" outputId="e83b595e-8fda-4d1c-8db0-fa368c0945e6" smotime = [] svmtime = [] smoacc = [] svmacc = [] num = 20 sizes = [num*i for i in range(1, X_train.shape[0]//num)] for size in sizes: print('size = {}'.format(size)) ## import time #RunSMO time_start = time.time() svm_model = SVM_solver(ker_type = 'linear', itermax = 1000, C = 1, epsilon = 0.001, gamma = 1, poly = 1)# gamma will only be used for the gaussian kernel mu, b = svm_model.solve_SVM(X_train[:size], y_train[:size]) time_end = time.time() smotime.append(time_end-time_start) y_hat_test = svm_model.predict(X_test, mu, b) smoacc.append((y_hat_test==y_test).sum()/(y_test.shape[0])) #RunSKLEARN SVM time_start = time.time() svc = svm.SVC(kernel='linear', C=0.5).fit(X_train[:size], y_train[:size]) # svc = svm.SVC(kernel='linear', C=0.5).fit(X_train, y_train) y_pred_test = svc.predict(X_test) (y_pred_test==y_test).sum()/(y_test.shape[0]) time_end = time.time() y_pred_test = svc.predict(X_test) svmacc.append((y_pred_test==y_test).sum()/(y_test.shape[0])) svmtime.append(time_end-time_start) # + id="xfTXW4nacXgS" # + colab={"base_uri": "https://localhost:8080/", "height": 624} id="ONGPYHLmZXn3" outputId="9d79b2f0-2f08-4f83-e39c-851ac79b2d80" plt.plot(sizes, smotime, label = 'smotime') plt.plot(sizes, svmtime, label = 'svmtime') plt.xlabel('dataset size') plt.ylabel('time taken') fig = plt.gcf() fig.set_size_inches(10, 10) plt.legend(fontsize = 20) plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="pFwbFcvMZn--" outputId="b28138a5-5270-4497-d7fc-cab89b883e11" print(np.array(smotime).var()) print(np.array(smotime).mean()) print(np.array(svmtime).var()) print(np.array(svmtime).mean()) # + colab={"base_uri": "https://localhost:8080/", "height": 624} id="Ot_DYEMJbzNd" outputId="460b2f82-5032-4937-a7ba-11e3d3566c49" plt.plot(sizes, smoacc, label = 'smoacc') plt.plot(sizes, svmacc, label = 'svmacc') plt.xlabel('dataset size') plt.ylabel('accuracy') fig = plt.gcf() fig.set_size_inches(10, 10) plt.legend(fontsize = 20) plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="r9NqqoKfb_u1" outputId="cd441520-93ed-43a9-9d70-f521f2219e5f" print(smoacc) # + colab={"base_uri": "https://localhost:8080/"} id="0-dXDphQcF9u" outputId="409d763d-80a7-459a-944f-4746f6d55692" print(svmacc) # + id="Tn_HA6TQcHPG"
All_Notebooks_scripts/Q1SVM_on_PCA_HEALTH_data_FINAL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="b-PlUKtVWoIX" # ### Contexto # Base de Dados de Churn # <br> # [IBM Sample Data Sets] # + [markdown] id="VKA1c8nkWoIa" # ### Conteúdo # + [markdown] id="VWyjN-8nWoIc" # Cada linha representa um cliente. # <br> # Cada coluna contém os atributos do cliente descritos na coluna Metadados. # # O conjunto de dados inclui informações sobre: # <br> # Clientes que saíram no último mês - a coluna é chamada de rotatividade # <br> # Serviços que cada cliente assinou - telefone, várias linhas, internet, segurança online, backup online, proteção de dispositivo, suporte técnico e streaming de TV e filmes # <br> # Informações da conta do cliente - há quanto tempo ele é cliente, contrato, forma de pagamento, faturamento sem papel, cobranças mensais e cobranças totais # <br> # Informações demográficas sobre clientes - sexo, faixa etária e se eles têm parceiros e dependentes # + [markdown] id="zSEaHUZKWoIf" # ### Análise Exploratória # + [markdown] id="Kyhx_OXqel11" # A seção abaixo visa aplicar técnicas de Estatística Descritiva para entendimento dos dados # + executionInfo={"elapsed": 2389, "status": "ok", "timestamp": 1601597902842, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="MLw5jkjiWoIi" #Importação das bibliotecas import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import matplotlib.ticker as mtick import os sns.set(style = 'white') # %matplotlib inline # + colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"elapsed": 2373, "status": "ok", "timestamp": 1601597902844, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="PZcKPe5TcRh4" outputId="67a2d4a7-41b4-4b95-ffa1-274908614f62" #Importação da biblioteca para integração com o Google Drive from google.colab import drive drive.mount('/content/drive') # + executionInfo={"elapsed": 2360, "status": "ok", "timestamp": 1601597902845, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="Mo-A4BC7WoIx" #Criação do DataFrame e carregamento dos dados df = pd.read_csv('/content/drive/My Drive/WA_Fn-UseC_-Telco-Customer-Churn.csv') # + colab={"base_uri": "https://localhost:8080/", "height": 309} executionInfo={"elapsed": 4730, "status": "ok", "timestamp": 1601597905225, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="MlgY6RTQWoI9" outputId="3301aeff-8904-4ccd-e190-21efd5ecc203" #Comando para verificar os primeiros registros df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 493} executionInfo={"elapsed": 4720, "status": "ok", "timestamp": 1601597905226, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="BP5p-8XnWoJO" outputId="e64ab88d-4a67-4a9a-d7fd-ad57ef97760b" #Avaliando a estrutura dos dados df.info() # + colab={"base_uri": "https://localhost:8080/", "height": 68} executionInfo={"elapsed": 4710, "status": "ok", "timestamp": 1601597905227, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="fwCpNk8yWoJa" outputId="a26cbf05-4972-4f3b-c8e1-e1cc9ef91a16" #Verifica a variável "Gender" e sua distribuição df['gender'].value_counts() # + colab={"base_uri": "https://localhost:8080/", "height": 297} executionInfo={"elapsed": 4700, "status": "ok", "timestamp": 1601597905228, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="aadHMk8bWoJl" outputId="c410188d-c33b-4b0f-8b68-673a0bd0d453" #Medidas estatísticas das variáveis númericas df.describe() # + executionInfo={"elapsed": 4691, "status": "ok", "timestamp": 1601597905229, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="KOgg9-ASWoJy" #Conversão do campo "TotalCharges" para numérico df['TotalCharges'] = pd.to_numeric(df['TotalCharges'], errors='coerce') # + colab={"base_uri": "https://localhost:8080/", "height": 391} executionInfo={"elapsed": 4683, "status": "ok", "timestamp": 1601597905229, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="Lb-md9azWoJ_" outputId="2dac9cee-ebd5-4363-b536-b5b52f9adac6" #Verificação de valores nulos df.isnull().sum() # + executionInfo={"elapsed": 4674, "status": "ok", "timestamp": 1601597905230, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="Zf3hoB43WoKP" #Substituição de valores em branco e remoção de nulos do campo "TotalCharges" df['TotalCharges'] = df["TotalCharges"].replace(" ",np.nan) df.dropna(inplace = True) # + executionInfo={"elapsed": 4664, "status": "ok", "timestamp": 1601597905231, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="QUGQtRzUWoKa" #Conversão da variável preditora "Churn" em númerica df['Churn'].replace(to_replace='Yes', value=1, inplace=True) df['Churn'].replace(to_replace='No', value=0, inplace=True) # + [markdown] id="7YvlEFLrWoKm" # ### Análise Gráfica # + [markdown] id="3CnIbgzjgNgH" # A seção abaixo visa aplicar técnicas de Visualização de Dados para melhor compreensão dos mesmos # + colab={"base_uri": "https://localhost:8080/", "height": 301} executionInfo={"elapsed": 4658, "status": "ok", "timestamp": 1601597905232, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="QW-6io9rWoKp" outputId="e730c1ee-d1d4-43c7-baff-8f0113553d57" #Gráfico de barras para análise do gênero #Como podemos analisar nossa base de dados está igualmente dividida entre os gêneros colors = ['#4D3425','#E4512B'] ax = (df['gender'].value_counts()*100.0 /len(df)).plot(kind='bar', stacked = True, rot = 0, color = colors) ax.yaxis.set_major_formatter(mtick.PercentFormatter()) ax.set_ylabel('% Customers') ax.set_xlabel('gender') ax.set_ylabel('% Customers') ax.set_title('Gender Distribution') totals = [] for i in ax.patches: totals.append(i.get_width()) total = sum(totals) for i in ax.patches: ax.text(i.get_x()+.15, i.get_height()-3.5, \ str(round((i.get_height()/total), 1))+'%', fontsize=12, color='white', weight = 'bold') # + colab={"base_uri": "https://localhost:8080/", "height": 336} executionInfo={"elapsed": 4650, "status": "ok", "timestamp": 1601597905234, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="fDi4xaOHWoK2" outputId="00168c36-e0b4-48e5-a82e-7a4b27f448bb" #Gráfico de pizza para avaliação dos clientes mais velhos #Apenas 16% dos clientes são idosos. Assim, a maioria dos nossos clientes nos dados são jovens. ax = (df['SeniorCitizen'].value_counts()*100.0 /len(df))\ .plot.pie(autopct='%.1f%%', labels = ['No', 'Yes'],figsize =(5,5), fontsize = 12 ) ax.yaxis.set_major_formatter(mtick.PercentFormatter()) ax.set_ylabel('Senior Citizens',fontsize = 12) ax.set_title('% of Senior Citizens', fontsize = 12) # + colab={"base_uri": "https://localhost:8080/", "height": 394} executionInfo={"elapsed": 4640, "status": "ok", "timestamp": 1601597905235, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="4iyQMLd1WoK_" outputId="bfa217b0-1481-4fb0-f1f0-44146e6a3167" #Gráfico de barras empilhadas para análise de "Dependentes" e "Parceiros" #Cerca de 50% dos clientes possuem parceiro, enquanto apenas 30% do total de clientes possuem dependentes. df2 = pd.melt(df, id_vars=['customerID'], value_vars=['Dependents','Partner']) df3 = df2.groupby(['variable','value']).count().unstack() df3 = df3*100/len(df) colors = ['#4D3425','#E4512B'] ax = df3.loc[:,'customerID'].plot.bar(stacked=True, color=colors, figsize=(8,6),rot = 0, width = 0.2) ax.yaxis.set_major_formatter(mtick.PercentFormatter()) ax.set_ylabel('% Customers',size = 14) ax.set_xlabel('') ax.set_title('% Customers with dependents and partners',size = 14) ax.legend(loc = 'center',prop={'size':14}) for p in ax.patches: width, height = p.get_width(), p.get_height() x, y = p.get_xy() ax.annotate('{:.0f}%'.format(height), (p.get_x()+.25*width, p.get_y()+.4*height), color = 'white', weight = 'bold', size = 14) # + colab={"base_uri": "https://localhost:8080/", "height": 372} executionInfo={"elapsed": 4630, "status": "ok", "timestamp": 1601597905236, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="kG0W-WLTWoLJ" outputId="03f0caf2-e4f1-4ce8-b414-c925ad04907c" #Gráfico de distribuição da variável "Tenure" (Tempo de Contrato) #Com exceção das pontas do gráfico, o tempo de contrato é bem distribuído entre os intervalos ax = sns.distplot(df['tenure'], hist=True, kde=False, bins=int(180/5), color = 'darkblue', hist_kws={'edgecolor':'black'}, kde_kws={'linewidth': 4}) ax.set_ylabel('# of Customers') ax.set_xlabel('Tenure (months)') ax.set_title('# of Customers by their tenure') # + colab={"base_uri": "https://localhost:8080/", "height": 465} executionInfo={"elapsed": 4621, "status": "ok", "timestamp": 1601597905237, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="kNr19XD2WoLT" outputId="8ef13d45-34cc-42ad-8b28-e113071ffb8f" #Analisando os tipos de contrato plt.figure(figsize=(8,6)) sns.countplot(df['Contract']) ax.set_ylabel('Customers') ax.set_title('Contract Type') # + colab={"base_uri": "https://localhost:8080/", "height": 356} executionInfo={"elapsed": 4612, "status": "ok", "timestamp": 1601597905238, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="PWmZnqxRWoLb" outputId="d8cdd463-0918-4676-c2df-735bd5db4d54" #Gráfico boxplot para Análise de Churn por Tempo de Contrato sns.boxplot(df['Churn'], df['tenure']) # + colab={"base_uri": "https://localhost:8080/", "height": 356} executionInfo={"elapsed": 5460, "status": "ok", "timestamp": 1601597906098, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="pPNGfcZeWoLh" outputId="3d30a1de-9e34-4753-f8c6-0a511e049f29" #Gráfico boxplot para Análise de Churn por Total Cobrado sns.boxplot(df['Churn'], df['TotalCharges']) # + colab={"base_uri": "https://localhost:8080/", "height": 356} executionInfo={"elapsed": 5449, "status": "ok", "timestamp": 1601597906100, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="JfZQcDDHWoLo" outputId="2a1121f5-c45d-4ab2-bf9a-5248e63d7999" #Gráfico boxplot para Análise de Churn por Cobrança Mensal sns.boxplot(df['Churn'], df['MonthlyCharges']) # + colab={"base_uri": "https://localhost:8080/", "height": 356} executionInfo={"elapsed": 5438, "status": "ok", "timestamp": 1601597906101, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="1PEYKW3-WoLu" outputId="2913d0be-4bf2-4e69-ac72-fb7b9b09ff4f" #Gráfico boxplot para Análise de Cobrança Mensal por Tipo de Contrato sns.boxplot(df['Contract'], df['MonthlyCharges']) # + colab={"base_uri": "https://localhost:8080/", "height": 465} executionInfo={"elapsed": 5428, "status": "ok", "timestamp": 1601597906103, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="htZF6wgtWoL1" outputId="d991e650-cbb0-46c6-90d0-81cb42fabb7d" #Gráfico de Barras para Análise de Churn por Meios de Pagamento plt.figure(figsize=(10,6)) sns.countplot(df['PaymentMethod'], data = df, hue='Churn') # + colab={"base_uri": "https://localhost:8080/", "height": 356} executionInfo={"elapsed": 5418, "status": "ok", "timestamp": 1601597906104, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="GwE_2QZ_WoL9" outputId="0171704b-b333-41f6-c42f-ef3e1be47967" #Análise de Churn por Gênero sns.countplot(df['gender'], data = df, hue='Churn') # + colab={"base_uri": "https://localhost:8080/", "height": 411} executionInfo={"elapsed": 6818, "status": "ok", "timestamp": 1601597907516, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="ANSL3zmmWoMD" outputId="5cbcbeb6-7af4-4876-ca79-28e68151815c" #Gráfico de barras empilhadas por "Tipo de Contrato" #A análise revela que o tipo de contrato "Month-to-Month" é o que possuí o maior percentual de churn colors = ['#4D3425','#E4512B'] contract_churn = df.groupby(['Contract','Churn']).size().unstack() ax = (contract_churn.T*100.0 / contract_churn.T.sum()).T.plot(kind='bar', width = 0.3, stacked = True, rot = 0, figsize = (10,6), color = colors) ax.yaxis.set_major_formatter(mtick.PercentFormatter()) ax.legend(loc='best',prop={'size':14},title = 'Churn') ax.set_ylabel('% Customers',size = 14) ax.set_title('Churn por Tipo de Contrato',size = 14) for p in ax.patches: width, height = p.get_width(), p.get_height() x, y = p.get_xy() ax.annotate('{:.0f}%'.format(height), (p.get_x()+.25*width, p.get_y()+.4*height), color = 'white', weight = 'bold', size = 14) # + colab={"base_uri": "https://localhost:8080/", "height": 411} executionInfo={"elapsed": 6807, "status": "ok", "timestamp": 1601597907517, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="UlzR2aiJWoMI" outputId="bba72e47-8cf0-4d94-fe3e-3688710ed91e" #Gráfico de barras empilhadas por "Senioridade" #A análise revela que clientes mais velhos cancelam mais os contratos colors = ['#4D3425','#E4512B'] seniority_churn = df.groupby(['SeniorCitizen','Churn']).size().unstack() ax = (seniority_churn.T*100.0 / seniority_churn.T.sum()).T.plot(kind='bar', width = 0.2, stacked = True, rot = 0, figsize = (8,6), color = colors) ax.yaxis.set_major_formatter(mtick.PercentFormatter()) ax.legend(loc='center',prop={'size':14},title = 'Churn') ax.set_ylabel('% Customers') ax.set_title('Churn by Seniority Level',size = 14) for p in ax.patches: width, height = p.get_width(), p.get_height() x, y = p.get_xy() ax.annotate('{:.0f}%'.format(height), (p.get_x()+.25*width, p.get_y()+.4*height), color = 'white', weight = 'bold',size =14) # + [markdown] id="5xrUxK21WoMQ" # # Implementação dos Modelos # + [markdown] id="cMQCsvrUmOVt" # A partir dessa seção iremos implementar os modelos de Machine Learning, bem como selecionar as variáveis e aplicar algumas técnicas de Normalização e Enconding # + executionInfo={"elapsed": 6797, "status": "ok", "timestamp": 1601597907518, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="0It9AjHGWoMR" #Seleção das variáveis y = df['Churn'] X = df.drop(columns = ['Churn','customerID']) # + executionInfo={"elapsed": 6792, "status": "ok", "timestamp": 1601597907520, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="8fZVT-puWoMZ" #Aplicação de técnica de Enconding para tornar os campos númericos from sklearn.preprocessing import LabelEncoder df_temp = X.select_dtypes(exclude='number') # label encoding to all the columns label_encoders = {} for col in df_temp: le = LabelEncoder() le.fit(X[col]) X[col] = le.transform(X[col]) label_encoders[col] = le # + [markdown] id="i9Yqt2ZjWoMf" # ### Técnica de Scale # + executionInfo={"elapsed": 6786, "status": "ok", "timestamp": 1601597907521, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="VghxPAi6WoMh" #Normalização das variáveis independentes from sklearn.preprocessing import Normalizer scaler = Normalizer() scaler.fit(X) X_matrix = scaler.transform(X) X = pd.DataFrame(X_matrix,columns=X.columns) # + executionInfo={"elapsed": 6780, "status": "ok", "timestamp": 1601597907522, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="JtiI4VZlWoMm" #Importação de bibliotecas importantes from sklearn.metrics import classification_report,confusion_matrix from sklearn.model_selection import StratifiedKFold from sklearn.feature_selection import RFECV # + executionInfo={"elapsed": 6775, "status": "ok", "timestamp": 1601597907524, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="BC00ZtsdWoMr" #Separando dados de treino e teste from sklearn.model_selection import train_test_split seed = 1984 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state = seed ) # + [markdown] id="sptaq8aZWoMw" # ### Regressão Logística # + colab={"base_uri": "https://localhost:8080/", "height": 358} executionInfo={"elapsed": 7862, "status": "ok", "timestamp": 1601597908619, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="l7tr_RgRWoMx" outputId="59d36f48-dcb9-438d-a8c3-298d778ef928" #Seleção de variáveis e treinamento do modelo from sklearn.linear_model import LogisticRegression model = LogisticRegression(random_state=seed) rfecv = RFECV(estimator=model, step=1, cv=StratifiedKFold(2), scoring='accuracy') rfecv.fit(X_train, y_train) print("Optimal number of features : %d" % rfecv.n_features_) plt.figure() plt.xlabel("Number of features selected") plt.ylabel("Cross validation score (nb of correct classifications)") plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 221} executionInfo={"elapsed": 7856, "status": "ok", "timestamp": 1601597908623, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="hg6t4RlLWoM4" outputId="798d8f6d-c179-43ac-edbb-55257b0acf13" #Avaliação do modelo predictions = rfecv.predict(X_test) print(classification_report(y_test,predictions)) print('Matriz de Confusão') print(confusion_matrix(y_test,predictions)) # + colab={"base_uri": "https://localhost:8080/", "height": 297} executionInfo={"elapsed": 8468, "status": "ok", "timestamp": 1601597909246, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="XOu8hz15WoM-" outputId="57711665-5cdb-4302-adb2-ff5d61d5bff4" #Análise das variáveis que o modelo levou em consideração df2 = pd.DataFrame(rfecv.support_) df3 = pd.DataFrame(X.columns) df3['X'] = df2 df3[df3['X'] == True] # + [markdown] id="WFNZgg63WoNC" # ### Decision Tree # + colab={"base_uri": "https://localhost:8080/", "height": 358} executionInfo={"elapsed": 10322, "status": "ok", "timestamp": 1601597911113, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="mFmr-SufWoND" outputId="faea522a-1957-4841-8753-42c4b58a1fd5" #Seleção de variáveis e treinamento do modelo from sklearn.tree import DecisionTreeClassifier model = DecisionTreeClassifier(random_state=seed) rfecv = RFECV(estimator=model, step=1, cv=StratifiedKFold(2), scoring='accuracy') rfecv.fit(X_train, y_train) print("Optimal number of features : %d" % rfecv.n_features_) plt.figure() plt.xlabel("Number of features selected") plt.ylabel("Cross validation score (nb of correct classifications)") plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 221} executionInfo={"elapsed": 10311, "status": "ok", "timestamp": 1601597911114, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="GyJT8dwuWoNK" outputId="ea3305a2-2e41-4f90-f510-bb177c08619d" #Avaliação do modelo predictions = rfecv.predict(X_test) print(classification_report(y_test,predictions)) print('Matriz de Confusão') print(confusion_matrix(y_test,predictions)) # + colab={"base_uri": "https://localhost:8080/", "height": 235} executionInfo={"elapsed": 10300, "status": "ok", "timestamp": 1601597911115, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="nYNXdwpQWoNQ" outputId="3344a375-0c24-4104-efe8-1cb5c7bc83cb" #Análise das variáveis que o modelo levou em consideração df2 = pd.DataFrame(rfecv.support_) df3 = pd.DataFrame(X.columns) df3['X'] = df2 df3[df3['X'] == True] # + [markdown] id="dofabK5-WoNW" # ### Random Forest # + colab={"base_uri": "https://localhost:8080/", "height": 358} executionInfo={"elapsed": 11815, "status": "ok", "timestamp": 1601597912642, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="8Aj-eCI5WoNX" outputId="98426bbc-95d2-4034-bffc-8101f8f43dc0" #Seleção de variáveis e treinamento do modelo from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier(n_estimators=5, random_state=seed) rfecv = RFECV(estimator=model, step=1, cv=StratifiedKFold(2), scoring='accuracy') rfecv.fit(X_train, y_train) print("Optimal number of features : %d" % rfecv.n_features_) plt.figure() plt.xlabel("Number of features selected") plt.ylabel("Cross validation score (nb of correct classifications)") plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 221} executionInfo={"elapsed": 11805, "status": "ok", "timestamp": 1601597912644, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="-sjQmAJ0WoNf" outputId="02e38637-aa2a-400f-b00e-d12a39f0d7b0" #Avaliação do modelo predictions = rfecv.predict(X_test) print(classification_report(y_test,predictions)) print('Matriz de Confusão') print(confusion_matrix(y_test,predictions)) # + colab={"base_uri": "https://localhost:8080/", "height": 545} executionInfo={"elapsed": 11795, "status": "ok", "timestamp": 1601597912645, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="0vRVmsSaWoNk" outputId="2abb2922-580b-4214-8b88-818cdb1bccca" #Análise das variáveis que o modelo levou em consideração df2 = pd.DataFrame(rfecv.support_) df3 = pd.DataFrame(X.columns) df3['X'] = df2 df3[df3['X'] == True] # + [markdown] id="mPoxPNGnWoNp" # ### XG Boost # + colab={"base_uri": "https://localhost:8080/", "height": 358} executionInfo={"elapsed": 37820, "status": "ok", "timestamp": 1601597938681, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="0nbGwchzWoNq" outputId="abd801da-247c-4dc5-b2a4-34ab31316ca4" #Seleção de variáveis e treinamento do modelo from xgboost import XGBClassifier model = XGBClassifier(random_state=seed) rfecv = RFECV(estimator=model, step=1, cv=StratifiedKFold(4), scoring='accuracy') rfecv.fit(X_train, y_train) print("Optimal number of features : %d" % rfecv.n_features_) plt.figure() plt.xlabel("Number of features selected") plt.ylabel("Cross validation score (nb of correct classifications)") plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 221} executionInfo={"elapsed": 37813, "status": "ok", "timestamp": 1601597938684, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="XSrRN_bzWoN5" outputId="373de031-79a1-400f-cad7-63be9c4a363d" #Avaliação do modelo predictions = rfecv.predict(X_test) print(classification_report(y_test,predictions)) print('Matriz de Confusão') print(confusion_matrix(y_test,predictions)) # + colab={"base_uri": "https://localhost:8080/", "height": 421} executionInfo={"elapsed": 37803, "status": "ok", "timestamp": 1601597938686, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="ILkTNWZ5WoOP" outputId="a82280cd-0afe-4a08-d194-2aaa4dc50f76" #Análise das variáveis que o modelo levou em consideração df1 = pd.DataFrame(rfecv.support_) df4 = pd.DataFrame(X.columns) df4['X'] = df1 df4[df4['X'] == True] # + executionInfo={"elapsed": 37794, "status": "ok", "timestamp": 1601597938688, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16108664930684528224"}, "user_tz": 180} id="62dwe5oLWoOT"
Codigos/ML_Modelos.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### X lines of Python # # # Wedge model # # This is part of [an Agile blog series](http://ageo.co/xlines00) called **x lines of Python**. # # We start with the usual preliminaries. import matplotlib.pyplot as plt import numpy as np # ## Make an earth model # # We'll start off with an earth model --- an array of 'cells', each of which has some rock properties. # # Line 1 sets up some basic variables, then in line 2 I've used a little matrix-forming trick, `np.tri(m, n, k)`, which creates an *m* &times; *n* matrix with ones below the *k*th diagonal, and zeros above it. The `dtype` specification just makes sure we end up with integers, which we need later for the indexing trick. # # Then line 3 just sets every row above `depth//3` (the `//` is integer division, because NumPy prefers integers for indexing arrays), to 0. length, depth = 40, 100 model = 1 + np.tri(depth, length, -depth//3, dtype=int) model[:depth//3,:] = 0 # We'll have a quick look with some very basic plotting commands. plt.imshow(model, cmap='viridis', aspect=0.2) plt.show() model[60] # Now we can make some Vp-rho pairs (rock 0, rock 1, rock 2) and select from those with `np.take`. This works like `vlookup` in Excel --- it says "read this array, `model` in this case, in which the values *i* are like 0, 1, ... n, and give me the *i*th element from this other array, `rocks` in this case. rocks = np.array([[2700, 2750], # Vp, rho [2400, 2450], [2800, 3000]]) # **Edit:** I was using `np.take` here, but ['fancy indexing'](http://docs.scipy.org/doc/numpy/user/basics.indexing.html) is shorter and more intuitive. We are just going to index `rocks` using the integers in `model`. That is, if `model` has a `1`, we take the second element, `[2400, 2450]`, from `rocks`. We'll end up with an array containing the rocks corresponding to each element of `earth`. earth = rocks[model] # Now apply `np.product` to those Vp-rho pairs to get impedance at every sample. # # This might look a bit magical, but we're just telling Python to apply the function `product()` to every set of numbers it encounters on the last axis (index `-1`). The array `earth` has shape (100, 40, 2), so you can think of it as a 100 row x 40 column 'section' in which each 'sample' is occupied by a Vp-rho pair. That pair is in the last axis. So product, which just takes a bunch of numbers and multiplies them, will return the impedance (the product of Vp and rho) at each sample location. We'll end up with a new 100 x 40 'section' with impedance at every sample. imp = np.apply_along_axis(np.product, -1, earth) # We could have saved a step by taking from `np.product(rocks, axis=1)` but I like the elegance of having an earth model with a set of rock properties at each sample location. That's how I think about the earth --- and it's similar to the concept of a geocellular model. # ## Model seismic reflections # # Now we have an earth model — giving us acoustic impedance everywhere in this 2D grid — we define a function to compute reflection coefficients for every trace. # # I love this indexing trick though I admit it looks weird the first time you see it. It's easier to appreciate for a 1D array. Let's look at the differences: # # >>> a = np.array([1,1,1,2,2,2,3,3,3]) # >>> a[1:] - a[:-1] # array([0, 0, 1, 0, 0, 1, 0, 0]) # # This is equivalent to: # # >>> np.diff(a, axis=0) # # But I prefer to spell it out so it's analogous to the sum on the denominator. # + rc = (imp[1:,:] - imp[:-1,:]) / (imp[1:,:] + imp[:-1,:]) plt.imshow(rc, cmap='Greys', aspect=0.2) plt.show() # - # We'll use a wavelet function from [`bruges`](https://github.com/agile-geoscience/bruges). This is not cheating! Well, I don't think it is... we could use `scipy.signal.ricker` but I can't figure out how to convert frequency into the 'width' parameter that function wants. Using the Ricker from `bruges` keeps things a bit simpler. # + import bruges w = bruges.filters.ricker(duration=0.100, dt=0.001, f=40) # - # Let's make sure it looks OK: plt.plot(w) plt.show() # Now one more application of `apply_along_axis`. We could use a loop to step over the traces, but the rule of thumb in Python is "if you are using a loop, you're doing it wrong.". So, we'll use `apply_along_axis`. # # It looks a bit more complicated this time, because we can't just pass a function like we did with `product` before. We want to pass in some more things, not just the trace that `apply_along_axis` is going to send it. So we use Python's 'unnamed function creator', `lambda` (in keeping with all things called `lambda`, it's a bad name that no-one can quite explain). # + synth = np.apply_along_axis(lambda t: np.convolve(t, w, mode='same'), axis=0, arr=rc) plt.imshow(synth, cmap="Greys", aspect=0.2) plt.show() # - # That's it! And it only needed 9 lines of Python! Not incldung boring old imports and plotting stuff. # # Here they are so you can count them: length, depth = 40, 100 model = 1 + np.tri(depth, length, -depth//3) model[:depth//3,:] = 0 rocks = np.array([[2700, 2750], [2400, 2450], [2800, 3000]]) earth = np.take(rocks, model.astype(int), axis=0) imp = np.apply_along_axis(np.product, -1, earth) rc = (imp[1:,:] - imp[:-1,:]) / (imp[1:,:] + imp[:-1,:]) w = bruges.filters.ricker(duration=0.100, dt=0.001, f=40) synth = np.apply_along_axis(lambda t: np.convolve(t, w, mode='same'), axis=0, arr=rc) # <hr /> # # <div> # <img src="https://avatars1.githubusercontent.com/u/1692321?s=50"><p style="text-align:center">© Agile Geoscience 2016</p> # </div>
notebooks/00_Synthetic_wedge_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h1> Repeatable splitting </h1> # # In this notebook, we will explore the impact of different ways of creating machine learning datasets. # # <p> # # Repeatability is important in machine learning. If you do the same thing now and 5 minutes from now and get different answers, then it makes experimentation difficult. In other words, you will find it difficult to gauge whether a change you made has resulted in an improvement or not. from google.cloud import bigquery # <h3> Create a simple machine learning model </h3> # # The dataset that we will use is <a href="https://bigquery.cloud.google.com/table/bigquery-samples:airline_ontime_data.flights">a BigQuery public dataset</a> of airline arrival data. Click on the link, and look at the column names. Switch to the Details tab to verify that the number of records is 70 million, and then switch to the Preview tab to look at a few rows. # <p> # We want to predict the arrival delay of an airline based on the departure delay. The model that we will use is a zero-bias linear model: # $$ delay_{arrival} = \alpha * delay_{departure} $$ # <p> # To train the model is to estimate a good value for $\alpha$. # <p> # One approach to estimate alpha is to use this formula: # $$ \alpha = \frac{\sum delay_{departure} delay_{arrival} }{ \sum delay_{departure}^2 } $$ # Because we'd like to capture the idea that this relationship is different for flights from New York to Los Angeles vs. flights from Austin to Indianapolis (shorter flight, less busy airports), we'd compute a different $alpha$ for each airport-pair. For simplicity, we'll do this model only for flights between Denver and Los Angeles. # <h2> Naive random split (not repeatable) </h2> compute_alpha = """ #standardSQL SELECT SAFE_DIVIDE(SUM(arrival_delay * departure_delay), SUM(departure_delay * departure_delay)) AS alpha FROM ( SELECT RAND() AS splitfield, arrival_delay, departure_delay FROM `bigquery-samples.airline_ontime_data.flights` WHERE departure_airport = 'DEN' AND arrival_airport = 'LAX' ) WHERE splitfield < 0.8 """ results = bigquery.Client().query(compute_alpha).to_dataframe() alpha = results['alpha'][0] print(alpha) # <h3> What is wrong with calculating RMSE on the training and test data as follows? </h3> compute_rmse = """ #standardSQL SELECT dataset, SQRT(AVG((arrival_delay - ALPHA * departure_delay)*(arrival_delay - ALPHA * departure_delay))) AS rmse, COUNT(arrival_delay) AS num_flights FROM ( SELECT IF (RAND() < 0.8, 'train', 'eval') AS dataset, arrival_delay, departure_delay FROM `bigquery-samples.airline_ontime_data.flights` WHERE departure_airport = 'DEN' AND arrival_airport = 'LAX' ) GROUP BY dataset """ bigquery.Client().query(compute_rmse.replace('ALPHA', str(alpha))).to_dataframe() # Hint: # * Are you really getting the same training data in the compute_rmse query as in the compute_alpha query? # * Do you get the same answers each time you rerun the compute_alpha and compute_rmse blocks? # <h3> How do we correctly train and evaluate? </h3> # <br/> # Here's the right way to compute the RMSE using the actual training and held-out (evaluation) data. Note how much harder this feels. # # Although the calculations are now correct, the experiment is still not repeatable. # # Try running it several times; do you get the same answer? train_and_eval_rand = """ #standardSQL WITH alldata AS ( SELECT IF (RAND() < 0.8, 'train', 'eval') AS dataset, arrival_delay, departure_delay FROM `bigquery-samples.airline_ontime_data.flights` WHERE departure_airport = 'DEN' AND arrival_airport = 'LAX' ), training AS ( SELECT SAFE_DIVIDE( SUM(arrival_delay * departure_delay) , SUM(departure_delay * departure_delay)) AS alpha FROM alldata WHERE dataset = 'train' ) SELECT MAX(alpha) AS alpha, dataset, SQRT(AVG((arrival_delay - alpha * departure_delay)*(arrival_delay - alpha * departure_delay))) AS rmse, COUNT(arrival_delay) AS num_flights FROM alldata, training GROUP BY dataset """ bigquery.Client().query(train_and_eval_rand).to_dataframe() # <h2> Using HASH of date to split the data </h2> # # Let's split by date and train. compute_alpha = """ #standardSQL SELECT SAFE_DIVIDE(SUM(arrival_delay * departure_delay), SUM(departure_delay * departure_delay)) AS alpha FROM `bigquery-samples.airline_ontime_data.flights` WHERE departure_airport = 'DEN' AND arrival_airport = 'LAX' AND ABS(MOD(FARM_FINGERPRINT(date), 10)) < 8 """ results = bigquery.Client().query(compute_alpha).to_dataframe() alpha = results['alpha'][0] print(alpha) # We can now use the alpha to compute RMSE. Because the alpha value is repeatable, we don't need to worry that the alpha in the compute_rmse will be different from the alpha computed in the compute_alpha. compute_rmse = """ #standardSQL SELECT IF(ABS(MOD(FARM_FINGERPRINT(date), 10)) < 8, 'train', 'eval') AS dataset, SQRT(AVG((arrival_delay - ALPHA * departure_delay)*(arrival_delay - ALPHA * departure_delay))) AS rmse, COUNT(arrival_delay) AS num_flights FROM `bigquery-samples.airline_ontime_data.flights` WHERE departure_airport = 'DEN' AND arrival_airport = 'LAX' GROUP BY dataset """ print(bigquery.Client().query(compute_rmse.replace('ALPHA', str(alpha))).to_dataframe().head()) # Note also that the RMSE on the evaluation dataset more from the RMSE on the training dataset when we do the split correctly. This should be expected; in the RAND() case, there was leakage between training and evaluation datasets, because there is high correlation between flights on the same day. # <p> # This is one of the biggest dangers with doing machine learning splits the wrong way -- <b> you will develop a false sense of confidence in how good your model is! </b> # Copyright 2018 Google Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
courses/machine_learning/deepdive/02_generalization/repeatable_splitting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Анализ вариантов обслуживания автобусов на основе имитационной модели # + from load_packages import load_packages load_packages() # + from packages.bas_models.src.bus_generator import BusGenerator generator = BusGenerator([0.4, 0.7]) buses = generator.generate(1) flights = 10 probs = [ [0.0,0.05], [0.1,0.05], [0.2,0.11], [0.3,0.11], [0.4,0.15], [0.5,0.15], [0.6,0.19], [0.7,0.19], [0.8,0.23], [0.9,0.23], [1.0,0.7]] # + from packages.bas_models.src.model import Model def arrange_statistics(model: Model, timing: [int]): statistics = dict() for time in timing: statistics[time] = get_statistics(model, time) return statistics def get_statistics(model: Model, days: int) -> int: result = 0 for day in range(0, days): buses = model.run(flights) result += sum(list(map(lambda x: x.flights, buses))) model.reset_buses() return result / days # + import numpy as np import matplotlib.pyplot as plt def show_bar(keys, values, precision: int = 3): keys = list(map(lambda x: str(x), keys)) values = list(map(lambda x: round(x, precision), values)) bars = plt.bar(keys, values) autolabel(bars) plt.show() def autolabel(bars, xpos='center'): xpos = xpos.lower() ha = {'center': 'center', 'right': 'left', 'left': 'right'} offset = {'center': 0.5, 'right': 0.57, 'left': 0.43} for bar in bars: height = bar.get_height() plt.text(bar.get_x() + bar.get_width()*offset[xpos], 1.01*height, '{}'.format(height), ha=ha[xpos], va='bottom') # - # # Модель без ремонта мелких поломок # + from packages.bas_models.src.nonstop_model import NonstopModel model = NonstopModel(buses) statistics = arrange_statistics(model, [1,2,3,4,5,10,20,90]) show_bar(list(statistics.keys()), list(statistics.values())) # + from bus_models.statistics_with_probs import StatisticsWithProbs from packages.bas_models.src.nonstop_model import NonstopModel stats_with_probs = StatisticsWithProbs(NonstopModel([]), 90, 10) statistics = stats_with_probs.arrange_statistics(probs) show_bar(list(statistics.keys()), list(statistics.values()), 2) # - # # Модель с ремонтом мелких поломок # + from packages.bas_models.src.repair_model import RepairModel model = RepairModel(buses) statistics = arrange_statistics(model, [1,2,3,4,5,10,20,90]) show_bar(list(statistics.keys()), list(statistics.values())) # + from bus_models.statistics_with_probs import StatisticsWithProbs from packages.bas_models.src.repair_model import RepairModel stats_with_probs = StatisticsWithProbs(RepairModel([]), 90, 10) statistics = stats_with_probs.arrange_statistics(probs) show_bar(list(statistics.keys()), list(statistics.values()), 2) # - # # Вывод # # В данных условиях модель без ремонта мелких поломок лучше подходит чем модель с ремонтом мелких поломок... Есть идея - если вероятность полной поломки автобуса
examples/bus_models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from netCDF4 import Dataset import math import warnings import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.ticker as t from mpl_toolkits.basemap import Basemap,shiftgrid,addcyclic from cdo import Cdo,CDOException,CdoTempfileStore cdo=Cdo() mpl.rc("text", usetex=False) mpl.rc('axes',titlesize=20,labelsize=17,linewidth=1.2) mpl.rc('xtick',labelsize=15) mpl.rc('ytick',labelsize=15) mpl.rcParams['xtick.major.size']=5.5 mpl.rcParams['xtick.minor.size']=3.5 mpl.rcParams['ytick.major.size']=5.5 mpl.rcParams['ytick.minor.size']=3.5 mpl.rcParams['legend.fontsize']=15 warnings.filterwarnings("ignore") def graph_Antarctica(path_array,legend_array,name_files,var,year,mesi,\ title,ylabel,data172,temp,cost,position=(0.5,-0.16)): data_mean=np.zeros((len(path_array),12)) for m in range(0,len(path_array)): if year[m]<10: string="000"+str(year[m]) elif year[m]>=10 and year[m]<100: string="00"+str(year[m]) elif year[m]>=100 and year[m]<1000: string="0"+str(year[m]) else: string=str(year[m]) if temp==True: data=[np.reshape([float(i)*cost for i in " ".join(cdo.output(input="-addc,-273.15 -selmonth,"\ +str(j)+" -selname,"+var+" "+path_array[m]+"../"+name_files[m]+"_PLA."+string+".nc"))\ .split()],[nlat,nlon]) for j in range(1,13)] else: data=[np.reshape([float(i)*cost for i in " ".join(cdo.output(input="-selmonth,"+str(j)\ +" -selname,"+var+" "+path_array[m]+"../"+name_files[m]+"_PLA."\ +string+".nc")).split()],[nlat,nlon]) for j in range(1,13)] lats=Dataset(path_array[m]+"../"+name_files[m]+"_PLA."+string+".nc","r").variables["lat"][::-1] data2=np.zeros((len(data),nlon*6*mul)) #print(np.shape(data)) for k in range(0,len(data)): count=0 count_mean=0 for i in range(0,len(data[k])): for j in range(0,len(data[k][i])): if i>25*mul: if data172[i][j]>=0.5: #print(i,j) #print(count) data2[k][count]=data[k][i][j]*np.cos(np.pi*lats[i]/180) count_mean+=np.cos(np.pi*lats[i]/180) count+=1 data_mean[m]=[np.sum(i)/count_mean for i in data2] #print(count) graph(mesi,data_mean,title+" Annual Antarctica Cycle",legend_array,"upper center","Time [month]",\ ylabel,True,False,position=position) return data_mean def graph_60N(path_array,legend_array,name_files,var,year,mesi,title,ylabel,data172,temp,cost): data_mean=np.zeros((len(path_array),12)) for m in range(0,len(path_array)): if year[m]<10: string="000"+str(year[m]) elif year[m]>=10 and year[m]<100: string="00"+str(year[m]) elif year[m]>=100 and year[m]<1000: string="0"+str(year[m]) else: string=str(year[m]) if temp==True: data=[np.reshape([float(i)*cost for i in " ".join(cdo.output(input="-addc,-273.15 -selmonth,"\ +str(j)+" -selname,"+var+" "+path_array[m]+"../"+name_files[m]+"_PLA."+string+".nc"))\ .split()],[nlat,nlon]) for j in range(1,13)] else: data=[np.reshape([float(i)*cost for i in " ".join(cdo.output(input="-selmonth,"+str(j)\ +" -selname,"+var+" "+path_array[m]+"../"+name_files[m]+"_PLA."+string+".nc")).\ split()],[nlat,nlon]) for j in range(1,13)] lats=Dataset(path_array[m]+"../"+name_files[m]+"_PLA."+string+".nc","r").variables["lat"][::-1] data2=np.zeros((len(data),nlon*6*mul)) #print(np.shape(data),lats) for k in range(0,len(data)): count=0 count_mean=0 for i in range(0,len(data[k])): for j in range(0,len(data[k][i])): if i<6*mul: if data172[i][j]>=0.5: data2[k][count]=data[k][i][j]*data172[i][j]*np.cos(np.pi*lats[i]/180) count_mean+=data172[i][j]*np.cos(np.pi*lats[i]/180) count+=1 data_mean[m]=[np.sum(i)/count_mean for i in data2] print(count) graph(mesi,data_mean,title+" Annual North Cycle",legend_array,"upper center","Time [month]"\ ,ylabel,True,False) return data_mean def read_graph_file(name_file,path,month,title,mesi,column,bar_title,vmin,vmax,cbar_type): """name_file,path,month,title,mesi,column,bar_title,vmin,vmax,cbar_type """ if month==True: with open(path+name_file,'r') as f: str_data=f.readline() f.seek(0) data=[[float(num) for num in line.split()] for line in f] for i in range(0,int(len(data)*column/tot)): data_res=np.reshape(data[i*int(tot/column)+1+i:(i+1)*int(tot/column)+1+i],[nlat,nlon]) Title=title+" "+mesi[i] graphycs_v(data_res[::-1],Title,'cyl',cbar_type,False,bar_title,vmin,vmax) return data,str_data else: with open(path+name_file,'r') as f: str_data=f.readline() data=[[float(num) for num in line.split()] for line in f] data_res=np.reshape(data,[nlat,nlon]) graphycs_v(data_res[::-1],title,'cyl',cbar_type,False,bar_title,vmin,vmax) return data_res,str_data def graphycs_v(data_array,title,proj,bar,savefig,bar_title,vmn,vmx): fig = plt.figure(figsize=(8,8)) m = Basemap(projection=proj, llcrnrlat=-90, urcrnrlat=90,\ llcrnrlon=0, urcrnrlon=360, resolution='c', lon_0=0) m.drawcoastlines() m.drawparallels(np.arange(-90,91,30),labels=[1,0,0,0]) m.drawmeridians(np.arange(-180.,181.,60.),labels=[0,0,0,1]) m.imshow(data_array,cmap=bar,vmax=vmx,vmin=vmn) cbar = plt.colorbar(orientation='vertical', shrink=0.5) cbar.set_label(bar_title,rotation=-90,fontsize=14,labelpad=25) plt.title(title+"\n",fontsize=17,fontweight="bold") if savefig==True : fig.savefig("grafici/"+title,bbox_inches='tight') def graph(x,data_array,title,legend_array,loc_legend,xlabel,ylabel,save,zonal,position=(0.5,-0.16)): fig,ax=plt.subplots(figsize=(7,5)) for i in range(0,len(data_array)): ax.plot(x[i],data_array[i],label=legend_array[i]) legend=ax.legend(loc="best", shadow=True) ax.set_title(title+"\n",fontweight="bold") ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.yaxis.set_minor_locator(t.AutoMinorLocator()) if zonal==True: ax.set_xlim(-90,90) ax.xaxis.set_minor_locator(t.MultipleLocator(10)) ax.set_xticks((90,60,30,0,-30,-60,-90)) ax.set_xticklabels(["90N","60N","30N","0","30S","60S","90S"]) ax.grid(linestyle='--') elif "Cycle" in title: ax.grid(axis='y',linestyle='--') else: ax.grid(axis='y',linestyle='--') #ax.xaxis.set_major_locator(t.MultipleLocator(20)) plt.show() if save==True: fig.savefig("grafici/"+title,bbox_inches='tight') data=[] def load_file1(cost,file_array,path_array): data=[] if "ZM" in file_array[0]: data=[[cost*i for i in np.reshape(Dataset(path_array[j]+file_array[j],"r").\ variables[file_array[j].split("_")[-1].replace(".nc","") ][:],[nlat])]\ for j in range(0,len(file_array))] else: #print(np.shape(Dataset(path_array[0]+file_array[0],"r").variables[file_array[0].split("_")[-1].replace(".nc","") ][:])) data=[[cost*i for i in np.reshape(Dataset(path_array[j]+file_array[j],"r").variables[file_array[j]\ .split("_")[-1].replace(".nc","") ][:],[int(file_array[j].split("_")[-2].replace("Y",""))])]\ for j in range(0,len(file_array))] #print("1") return data def load_file2(cost,file_array,path_array,typology,temp,step,start): if typology=="global": data=[[cost*float(j) for j in np.reshape([cdo.output(input="-timselmean,"+str(step[k])+","\ +str(start[k])+" -selmonth,"+str(i)\ +" "+path_array[k]+file_array[k]) for i in\ range(1,13)],[12])] for k in range(0,len(file_array))] elif typology=="south": if temp==True: data0=[[cdo.output(input="-gridboxmean,nlon,16 -timselmean,"+str(step[k])+\ ","+str(start[k])+" -addc,-273.15 -selmonth,"+str(i)+" "+\ path_array[k]+file_array[k])[0].split() for i in \ range(1,13)]\ for k in range(0,len(file_array))] else: data0=[[cdo.output(input="-gridboxmean,nlon,16 -timselmean,"+str(step[k])+\ ","+str(start[k])+" -selmonth,"+str(i)+" "+\ path_array[k]+file_array[k])[0].split() for i in range(1,13)]\ for k in range(0,len(file_array))] data=[[cost*float(data0[k][i][1]) for i in range(0,len(data0[k]))] for k in range(0,len(data0))] elif typology=="north": if temp==True: data0=[[cdo.output(input="-gridboxmean,nlon,16 -timselmean,"+str(step[k])+\ ","+str(start[k])+" -addc,-273.15 -selmonth,"+str(i)+" "+\ path_array[k]+file_array[k])[0].split() for i \ in range(1,13)]\ for k in range(0,len(file_array))] else: data0=[[cdo.output(input="-gridboxmean,nlon,16 -timselmean,"+str(step[k])+\ ","+str(start[k])+" -selmonth,"+str(i)+" "+\ path_array[k]+file_array[k])[0].split() for i in range(1,13)]\ for k in range(0,len(file_array))] data=[[cost*float(data0[k][i][0]) for i in range(0,len(data0[k]))] for k in range(0,len(data0))] else: print("input sbagliato") return data def graph_globe(data,lons,lats,proj,save,title,cbar_title,vmn,vmx,step_bar,cmap=plt.cm.jet_r): fig = plt.figure(figsize=(10,10)) m = Basemap(projection=proj, llcrnrlat=-90, urcrnrlat=90,\ llcrnrlon=0, urcrnrlon=360, resolution='c', lon_0=0) m.drawcoastlines() if proj=="cyl": m.drawparallels(np.arange(-90,91,30),labels=[1,0,0,0]) m.drawmeridians(np.arange(-180.,181.,60.),labels=[0,0,0,1]) m.imshow(data[::-1],cmap=cmap,vmin=vmn,vmax=vmx) cbar = plt.colorbar(orientation='vertical', shrink=0.5,ticks=np.linspace(vmn,vmx,step_bar)) else: m.drawmapboundary() m.drawparallels(np.arange(-90,90,30),labels=[1,0,0,0]) m.drawmeridians(np.arange(m.lonmin,m.lonmax+30,60),labels=[0,0,0,1]) var_cyclic, lons_cyclic = addcyclic(data,lons) var_cyclic, lons_cyclic = shiftgrid(180.,var_cyclic,lons_cyclic,start=False) lon2d, lat2d = np.meshgrid(lons_cyclic, lats) x,y = m(lon2d, lat2d) cs = m.contourf(x,y,var_cyclic, cmap=cmap,levels=t.MaxNLocator(nbins=step_bar)\ .tick_values(vmn,vmx),extend="both") cbar = plt.colorbar(cs,orientation='vertical', shrink=0.5,ticks=t.MaxNLocator\ (nbins=step_bar).tick_values(vmn,vmx)) cbar.set_label(cbar_title,rotation=-90,labelpad=25) plt.title(title+"\n",fontweight="bold") if save==True: fig.savefig("grafici/"+title,bbox_inches='tight') def print_value(data,starts,ends): for i in range(0,len(data)): print("Mean "+str(starts[i])+"-"+str(ends[i])+": "+str(np.mean(data[i][starts[i]:ends[i]]))\ +" dev.std: "+str(np.std(data[i][starts[i]:ends[i]]))) def Net_Flux_Sup(nome_file,type_file,path_arrays,lenght,load_file,typology,temp,step,start): var=["rls","rss","hfss","hfls","prsn"] path_array=[path_arrays for i in range(0,len(var))] if load_file==1: data=load_file1(1,[nome_file+type_file+i+".nc" for i in var],path_array) else: data=load_file2(1,[nome_file+type_file+i+".nc" for i in var],path_array,typology,temp,\ [step for i in range(0,len(var))],[start for i in range(0,len(var))] ) data0=np.reshape(data[0],[lenght])+np.reshape(data[1],[lenght])+np.reshape(data[2],[lenght])+\ np.reshape(data[3],[lenght])-1000*334000*np.reshape(data[4],[lenght]) return np.reshape(data0,[lenght]) def all_graph(var,title_var,ylabel,name_files,path_array,legend_array,starts,ends,step,lats,lons,mesi\ ,cost,temp,string="ab",position=(0.5,-0.16)): if "a" in string: #Global Annual Cycle in_global=[name_files[i]+"_YM_FM_"+str(ends[i])+"Y_"+var+".nc" for i in range(0,len(name_files))] data_global=load_file1(cost,in_global,path_array) graph(time,data_global,title_var+" Global Annual Mean",legend_array,"upper center","Time [year]"\ ,ylabel,True,False,position) print_value(data_global,starts,ends) else: data_global=0 if "b" in string: #Zonal in_zonal=[name_files[i]+"_YM_"+str(step[i])+"YM_ZM_"+str(ends[i])+"Y_"+var+".nc" for i in \ range(0,len(name_files))] data_zonal=load_file1(cost,in_zonal,path_array) graph(lats,data_zonal,title_var+" Zonal Mean",legend_array,"upper center","Latitude [°]",ylabel,\ True,True,position) else: data_zonal=0 if "c" in string: #1 year cycle #Global in_global_cycle=[name_files[i]+"_FM_"+str(ends[i])+"Y_"+var+".nc" for i in range(0,len(name_files))] data_global_cycle=load_file2(cost,in_global_cycle,path_array,"global",temp,step,starts) graph(mesi,data_global_cycle,title_var+" Global Annual Cycle",legend_array,"upper center","Time [month]",\ ylabel,True,False,position) else: data_global_cycle=0 if "n" in string: #North in_north_cycle=[i+"_all_"+var+".nc" for i in name_files] data_north_cycle=load_file2(cost,in_north_cycle,path_array,"north",temp,step,starts) graph(mesi,data_north_cycle,title_var+" North Annual Cycle",legend_array,"upper center","Time [month]",\ ylabel,True,False,position) else: data_north_cycle=0 if "s" in string: #South in_south_cycle=[i+"_all_"+var+".nc" for i in name_files] data_south_cycle=load_file2(cost,in_south_cycle,path_array,"south",temp,step,starts) graph(mesi,data_south_cycle,title_var+" South Annual Cycle",legend_array,"upper center","Time [month]",\ ylabel,True,False,position) else: data_south_cycle=0 return data_global,data_zonal,data_global_cycle,data_north_cycle,data_south_cycle def all_graph_res(var,title_var,ylabel,name_files,path_array,legend_array,starts,ends,step,lats,lons,mesi,cost,\ temp): #Global Annual Cycle in_global=[name_files[i]+"_YM_FM_"+str(ends[i])+"Y_"+var+".nc" for i in range(0,len(name_files))] data_global=load_file1(cost,in_global,path_array) graph(time,data_global,title_var+" Global Annual Mean",legend_array,"upper center","Time [year]",ylabel,\ True,False) print_value(data_global,starts,ends) #Zonal in_zonal=[name_files[i]+"_YM_"+str(step[i])+"YM_ZM_"+str(ends[i])+"Y_"+var+".nc" for i in range(0,len(name_files))] data_zonal=load_file1(cost,in_zonal,path_array) graph(lats,data_zonal,title_var+" Zonal Mean",legend_array,"upper left","Latitude [°]",ylabel,True,True) return data_global,data_zonal def all_graph_sum(var1,var2,title_var,ylabel,name_files,path_array,legend_array,starts,\ ends,step,lats,lons,mesi,cost,temp,string="ab",position=(0.5,-0.16)): if "a" in string: #Global Annual Cycle in_global1=[name_files[i]+"_YM_FM_"+str(ends[i])+"Y_"+var1+".nc" for i in range(0,len(name_files))] in_global2=[name_files[i]+"_YM_FM_"+str(ends[i])+"Y_"+var2+".nc" for i in range(0,len(name_files))] data_global=[np.add(load_file1(cost,in_global1,path_array)[i],load_file1(cost,in_global2,path_array)\ [i]) for i in range(0,len(name_files))] graph(time,data_global,title_var+" Global Annual Mean",legend_array,"upper center","Time [year]",\ ylabel,True,False,position) print_value(data_global,starts,ends) else: data_global=0 if "b" in string: #Zonal in_zonal1=[name_files[i]+"_YM_"+str(step[i])+"YM_ZM_"+str(ends[i])+"Y_"+var1+".nc" for i in range(0,len(name_files))] in_zonal2=[name_files[i]+"_YM_"+str(step[i])+"YM_ZM_"+str(ends[i])+"Y_"+var2+".nc" for i in range(0,len(name_files))] data_zonal=[np.add(load_file1(cost,in_zonal1,path_array)[i],load_file1(cost,in_zonal2,path_array)[i])\ for i in range(0,len(name_files))] #print(np.shape(data_zonal)) graph(lats,data_zonal,title_var+" Zonal Mean",legend_array,"upper center","Latitude [°]",ylabel,\ True,True,position) else: data_zonal=0 if "c" in string: #1 year cycle #Global in_global_cycle1=[name_files[i]+"_FM_"+str(ends[i])+"Y_"+var1+".nc" for i in range(0,len(name_files))] in_global_cycle2=[name_files[i]+"_FM_"+str(ends[i])+"Y_"+var2+".nc" for i in range(0,len(name_files))] data_global_cycle=[np.add(load_file2(cost,in_global_cycle1,path_array,"global",temp,step,starts)[i],\ load_file2(cost,in_global_cycle2,path_array,"global",temp,step,starts)[i])\ for i in range(0,len(name_files))] graph(mesi,data_global_cycle,title_var+" Global Annual Cycle",legend_array,"upper center","Time [month]",\ ylabel,True,False,position) else: data_global_cycle=0 if "n" in string: #North in_north_cycle1=[i+"_all_"+var1+".nc" for i in name_files] in_north_cycle2=[i+"_all_"+var2+".nc" for i in name_files] data_north_cycle=[np.add(load_file2(cost,in_north_cycle1,path_array,"north",\ temp,step,starts)[i],load_file2(cost,in_north_cycle2,path_array,\ "north",temp,step,starts)[i]) \ for i in range(0,len(name_files))] graph(mesi,data_north_cycle,title_var+" North Annual Cycle",legend_array,\ "upper center","Time [month]",ylabel,True,False,position) else: data_north_cycle=0 if "s" in string: #South in_south_cycle1=[i+"_all_"+var1+".nc" for i in name_files] in_south_cycle2=[i+"_all_"+var2+".nc" for i in name_files] data_south_cycle=[np.add(load_file2(cost,in_south_cycle1,path_array,"south",\ temp,step,starts)[i],load_file2(cost,in_south_cycle2,path_array,\ "south",temp,step,starts)[i]) \ for i in range(0,len(name_files))] graph(mesi,data_south_cycle,title_var+" South Annual Cycle",legend_array,\ "upper center","Time [month]",ylabel,True,False,position) else: data_south_cycle=0 return data_global,data_zonal,data_global_cycle,data_north_cycle,data_south_cycle def graph_level(var,title_var,ylabel,name_files,path_array,ends,steps,z_press,lat,cost,var_min,var_max): data_globe=[[cost*i for i in Dataset(path_array[j]+name_files[j]+"_YM_"+str(steps[j])+"YM_ZM_"+str(ends[j])+"Y_"+var+".nc").\ variables[var][:]]for j in range(0,len(name_files))] for i in range(0,len(name_files)): for j in range(i+1,len(name_files)): fig,ax=plt.subplots(figsize=(8,6)) x,y=np.meshgrid(lat,z_press) cs=ax.contourf(x,y,np.reshape(data_globe[i],(13,nlat))-np.reshape(data_globe[j],(13,nlat)),\ cmap=plt.cm.jet,levels=np.linspace(var_min,var_max,21),extend="both") #ax.xaxis.set_major_locator(plt.MultipleLocator(10)) ax.yaxis.set_ticks([1000,850,700,500,400,300,200,100,30]) ax.set_ylim(1000,30) ax.set_xlabel("Latitude [°]") ax.set_ylabel(" Pressure [hPa]") ax.set_xlim(-86,86) ax.set_xticks((80,60,40,20,0,-20,-40,-60,-80)) ax.set_xticklabels(["80N","60N","40N","20N","0","20S","40S","60S","80S"]) ax.grid() cbar = plt.colorbar(cs,orientation='vertical', shrink=0.9,ticks=np.linspace(var_min,var_max,15)) cbar.set_label(ylabel,rotation=-90,labelpad=25) #plt.gca().invert_yaxis() plt.title(title_var+" ("+name_files[i]+"-"+name_files[j]+") \n",fontweight="bold") plt.savefig("grafici/"+title_var+" ("+name_files[i]+"-"+name_files[j]+")") def all_graph_globe(var,title_var,ylabel,name_files,path_array,lon,lat,cost,proj_type,end,step,color=plt.cm.jet): data_globe=[[cost*i for i in Dataset(path_array[j]+name_files[j]+"_YM_"+str(step[j])+"YM_"+\ str(end[j])+"Y_"+var+".nc").variables[var][0]]for j in \ range(0,len(name_files))] #print(math.factorial(len(name_files))/(math.factorial(len(name_files)-2)*math.factorial(2))) min_max=np.zeros((int(math.factorial(len(name_files))/(math.factorial(len(name_files)-2)*\ math.factorial(2))),2)) #print(np.shape(data_globe)) #print(np.reshape(data_globe[0],[nlat,nlon])[0]) count=0 for i in range(0,len(name_files)): for j in range(i+1,len(name_files)): min_max[count]=[np.min(np.reshape(data_globe[i],[nlat,nlon])-np.reshape(data_globe[j],[nlat,nlon])),\ np.max(np.reshape(data_globe[i],[nlat,nlon])-np.reshape(data_globe[j],[nlat,nlon]))] count+=1 #print(min_max) max_lim=int(np.max(min_max)+1) min_lim=int(np.min(min_max)-1) lim=np.max([np.abs(min_lim),np.abs(max_lim)]) for i in range(0,len(name_files)): for j in range(i+1,len(name_files)): max_lim=int(np.max(np.reshape(data_globe[i],[nlat,nlon])-np.reshape(data_globe[j],[nlat,nlon]))+1) min_lim=int(np.min(np.reshape(data_globe[i],[nlat,nlon])-np.reshape(data_globe[j],[nlat,nlon]))-1) lim=np.max([np.abs(min_lim),np.abs(max_lim)]) graph_globe(np.reshape(data_globe[i],[nlat,nlon])-np.reshape(data_globe[j],[nlat,nlon]),lons[i],\ lats[i],proj_type,True,title_var+" ("+name_files[i]+"-"+name_files[j]+")",ylabel,\ -lim,lim,15,cmap=color) #print(np.max(np.reshape(data_globe[i],[nlat,nlon])-np.reshape(data_globe[j],[nlat,nlon])),np.min(np.reshape(data_globe[i],[nlat,nlon])-np.reshape(data_globe[j],[nlat,nlon]))) return data_globe def all_graph_globe2(data_globe,title_var,ylabel,lon,lat,proj_type): min_max=np.zeros((int(math.factorial(len(name_files))/(math.factorial(len(name_files)-2)\ *math.factorial(2))),2)) count=0 for i in range(0,len(data_globe)): for j in range(i+1,len(data_globe)): min_max[count]=[np.min(np.reshape(data_globe[i],[nlat,nlon])-np.reshape(data_globe[j],[nlat,nlon])),\ np.max(np.reshape(data_globe[i],[nlat,nlon])-np.reshape(data_globe[j],[nlat,nlon]))] count+=1 #print(min_max) max_lim=int(np.max(min_max)+1) min_lim=int(np.min(min_max)-1) lim=np.max([np.abs(min_lim),np.abs(max_lim)]) for i in range(0,len(data_globe)): for j in range(i+1,len(data_globe)): max_lim=int(np.max(np.reshape(data_globe[i],[nlat,nlon])-np.reshape(data_globe[j],[nlat,nlon]))+1) min_lim=int(np.min(np.reshape(data_globe[i],[nlat,nlon])-np.reshape(data_globe[j],[nlat,nlon]))-1) lim=np.max([np.abs(min_lim),np.abs(max_lim)]) graph_globe(np.reshape(data_globe[i],[nlat,nlon])-np.reshape(data_globe[j],[nlat,nlon]),lons[i],lats[i]\ ,proj_type,True,title_var+" ("+name_files[i]+"-"+name_files[j]+")",ylabel,-lim,\ lim,15) #print(np.max(np.reshape(data_globe[i],[nlat,nlon])-np.reshape(data_globe[j],[nlat,nlon])),np.min(np.reshape(data_globe[i],[nlat,nlon])-np.reshape(data_globe[j],[nlat,nlon]))) # -
file_comparison/T21/module_comparison.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # RadiusNeighborsClassifier # ### Required Packages # !pip install imblearn import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as se from imblearn.over_sampling import RandomOverSampler from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.neighbors import RadiusNeighborsClassifier from sklearn.metrics import classification_report,plot_confusion_matrix warnings.filterwarnings('ignore') # ### Initialization # # Filepath of CSV file #filepath file_path= "" # List of features which are required for model training . #x_values features=[] # Target feature for prediction. #y_value target='' # ### Data Fetching # # Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools. # # We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry. df=pd.read_csv(file_path) df.head() # ### Feature Selections # # It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model. # # We will assign all the required input features to X and target/outcome to Y. X = df[features] Y = df[target] # ### Data Preprocessing # # Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes. # def NullClearner(df): if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])): df.fillna(df.mean(),inplace=True) return df elif(isinstance(df, pd.Series)): df.fillna(df.mode()[0],inplace=True) return df else:return df def EncodeX(df): return pd.get_dummies(df) def EncodeY(df): if len(df.unique())<=2: return df else: un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort') df=LabelEncoder().fit_transform(df) EncodedT=[xi for xi in range(len(un_EncodedT))] print("Encoded Target: {} to {}".format(un_EncodedT,EncodedT)) return df x=X.columns.to_list() for i in x: X[i]=NullClearner(X[i]) X=EncodeX(X) Y=EncodeY(NullClearner(Y)) X.head() # #### Correlation Map # # In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns. f,ax = plt.subplots(figsize=(18, 18)) matrix = np.triu(X.corr()) se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix) plt.show() # #### Distribution Of Target Variable plt.figure(figsize = (10,6)) se.countplot(Y) # ### Data Splitting # # The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data. x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123) # #### Handling Target Imbalance # # The challenge of working with imbalanced datasets is that most machine learning techniques will ignore, and in turn have poor performance on, the minority class, although typically it is performance on the minority class that is most important. # # One approach to addressing imbalanced datasets is to oversample the minority class. The simplest approach involves duplicating examples in the minority class.We will perform overspampling using imblearn library. x_train,y_train = RandomOverSampler(random_state=123).fit_resample(x_train, y_train) # ### Model # # RadiusNeighborsClassifier implements learning based on the number of neighbors within a fixed radius of each training point, where is a floating-point value specified by the user. # In cases where the data is not uniformly sampled, radius-based neighbors classification can be a better choice. # # #### Tuning parameters # # > **radius**: Range of parameter space to use by default for radius_neighbors queries. # # > **algorithm**: Algorithm used to compute the nearest neighbors: # # > **leaf_size**: Leaf size passed to BallTree or KDTree. # # > **p**: Power parameter for the Minkowski metric. # # > **metric**: the distance metric to use for the tree. # # > **outlier_label**: label for outlier samples # # > **weights**: weight function used in prediction. # # For more information refer: [API](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.RadiusNeighborsClassifier.html) # Build Model here model = RadiusNeighborsClassifier(n_jobs=-1) model.fit(x_train, y_train) # #### Model Accuracy # # score() method return the mean accuracy on the given test data and labels. # # In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100)) # #### Confusion Matrix # # A confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known. plot_confusion_matrix(model,x_test,y_test,cmap=plt.cm.Blues) # #### Classification Report # A Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False. # # * **where**: # - Precision:- Accuracy of positive predictions. # - Recall:- Fraction of positives that were correctly identified. # - f1-score:- percent of positive predictions were correct # - support:- Support is the number of actual occurrences of the class in the specified dataset. print(classification_report(y_test,model.predict(x_test))) # #### Creator: <NAME> , Github: [Profile](https://github.com/Thilakraj1998)
Classification/Radius Neighbors/RadiusNeighborsClassifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Random forest classification # # ## RAPIDS single GPU # # <img src="https://rapids.ai/assets/images/RAPIDS-logo-purple.svg" width="400"> # + import os MODEL_PATH = 'models' if not os.path.exists(MODEL_PATH): os.makedirs(MODEL_PATH) numeric_feat = [ 'pickup_weekday', 'pickup_hour', 'pickup_week_hour', 'pickup_minute', 'passenger_count', ] categorical_feat = [ 'PULocationID', 'DOLocationID', ] features = numeric_feat + categorical_feat y_col = 'high_tip' # - # # Load data and feature engineering # # Load a full month for this exercise. Note we are loading the data with RAPIDS now (`cudf.read_csv` vs. `pd.read_csv`) # + import cudf import s3fs s3 = s3fs.S3FileSystem(anon=True) taxi = cudf.read_csv( s3.open('s3://nyc-tlc/trip data/yellow_tripdata_2019-01.csv', mode='rb'), parse_dates=['tpep_pickup_datetime', 'tpep_dropoff_datetime'] ) # - print(f'Num rows: {len(taxi)}, Size: {taxi.memory_usage(deep=True).sum() / 1e6} MB') # + def prep_df(df: cudf.DataFrame) -> cudf.DataFrame: ''' Generate features from a raw taxi dataframe. Use 32 bit precision for GPU processing ''' df = df[df.fare_amount > 0] # avoid divide-by-zero df['tip_fraction'] = df.tip_amount / df.fare_amount df['high_tip'] = (df['tip_fraction'] > 0.2) # class label df['pickup_weekday'] = df.tpep_pickup_datetime.dt.weekday df['pickup_hour'] = df.tpep_pickup_datetime.dt.hour df['pickup_week_hour'] = (df.pickup_weekday * 24) + df.pickup_hour df['pickup_minute'] = df.tpep_pickup_datetime.dt.minute df = df[features + [y_col]].astype('float32').fillna(-1) df[y_col] = df[y_col].astype('int32') return df taxi_train = prep_df(taxi) # - taxi_train.high_tip.value_counts() taxi_train.head() # # Train model from cuml.ensemble import RandomForestClassifier rfc = RandomForestClassifier(n_estimators=100, max_depth=10, seed=42) # %%time _ = rfc.fit(taxi_train[features], taxi_train[y_col]) # ## Save model # + import cloudpickle with open(f'{MODEL_PATH}/random_forest_rapids.pkl', 'wb') as f: cloudpickle.dump(rfc, f) # - # ## Calculate metrics on test set # # Use a different month for test set # + taxi_test = cudf.read_csv( s3.open('s3://nyc-tlc/trip data/yellow_tripdata_2019-02.csv', mode='rb'), parse_dates=['tpep_pickup_datetime', 'tpep_dropoff_datetime'] ) taxi_test = prep_df(taxi_test) # + from cuml.metrics import roc_auc_score preds = rfc.predict_proba(taxi_test[features])[1] roc_auc_score(taxi_test[y_col], preds)
examples/examples-gpu/nyc-taxi/rf-rapids.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Importing the required modules from keras import backend as K, models from keras.models import * from keras.layers import * from keras.layers.normalization import BatchNormalization from keras.applications import VGG16 from keras.regularizers import l2 from keras.activations import relu import os from os.path import join as join_ import numpy as np from PIL import Image # + # Setting up the dataset SET_DIR = 'dataset/' NUM_CLASSES = len(os.listdir('dataset')) # The shape which VGG19 accepts as input and thus each image is resized to image_shape = (224, 224, 3) # NUM_EXAMPLES is the number of (A,P,N) triplets chosen for the same class (N belongs to a different class of course) NUM_EXAMPLES = 15 # Triplets list will contain anchor(A), positive(P) and negative(N) triplets. triplets = [] A = P = N = [] # creating anchor, positive, negative triplets for _ in range(NUM_EXAMPLES): for direc in os.listdir(SET_DIR): dir_path = SET_DIR + direc dir_contents = os.listdir(dir_path) length = len(dir_contents) anchor = np.asarray(Image.open(join_(dir_path, dir_contents[np.random.randint(0, length)])))/255 # anchor.shape = (200, 180, 3) # Padding with zeros for each channel in RGB anchor = np.array([np.pad(a, ((22,22), (12,12)), 'constant') for a in anchor.T]).T positive = np.asarray(Image.open(join_(dir_path, dir_contents[np.random.randint(0, length)])))/255 positive = np.array([np.pad(a, ((22,22), (12,12)), 'constant') for a in positive.T]).T neg_dir = os.listdir(SET_DIR)[np.random.randint(NUM_CLASSES)] while neg_dir == direc: neg_dir = os.listdir(SET_DIR)[np.random.randint(NUM_CLASSES)] length_negative = len(os.listdir(SET_DIR + neg_dir)) negative = np.asarray(Image.open( join_(SET_DIR + neg_dir, os.listdir(SET_DIR + neg_dir)[np.random.randint(0, length_negative)])))/255 negative = np.array([np.pad(a, ((22,22), (12,12)), 'constant') for a in negative.T]).T # append triplet triplets.append([anchor, positive, negative]) A.append(anchor) P.append(positive) N.append(negative) # - def triplet_function(vects, alpha=0.2): x, y, z = vects sum_square_xy = K.sum(K.square(x - y), axis=1, keepdims=True) sum_square_xz = K.sum(K.square(x - z), axis=1, keepdims=True) return K.sum(K.maximum(sum_square_xy - sum_square_xz + alpha, 0), axis=0) # + # Using the VGG16 model defined in keras.applications def VGG(): image_input = Input(shape=(224, 224, 3)) model = VGG16(input_tensor=image_input, weights='imagenet', include_top=True) model.layers[-1].activation = relu x_out = Dense(64)(model.layers[-1].output) new_model = Model(inputs=image_input, outputs=x_out) return new_model # - def get_model(): anchor = Input(shape=image_shape, name='anchor') positive = Input(shape=image_shape, name='positive') negative = Input(shape=image_shape, name='negative') # Passing each image through the VGG model req_model = VGG() # Pass the images through the same model anchor_encoding = req_model(anchor) positive_encoding = req_model(positive) negative_encoding = req_model(negative) # Incorporating the triplet loss in the SimVecLayer SimVecLayer = Lambda(triplet_fucntion, output_shape=(1,)) sim_APN = SimVecLayer([anchor_encoding, positive_encoding, negative_encoding]) return Model(inputs=[anchor, positive, negative], outputs=sim_APN) # + model = get_model() # Compile the model with a loss and optimizer model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mae']) model.summary() # + from IPython.display import SVG from keras.utils.vis_utils import model_to_dot SVG(model_to_dot(model).create(prog='dot', format='svg')) # + # Train the model (done over the intel cloud) A, P, N = np.array(A), np.array(P), np.array(N) model.fit(x = [A, P, N], y = np.zeros((A.shape[0],1)), epochs=100, verbose=1, batch_size=64, validation_split=0.3, callbacks=[EarlyStopping(monitor='val_loss', patience=5)]) model.save('model.h5') # -
Siamese_train_store.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:.conda-2019_ml_ocean] # language: python # name: conda-env-.conda-2019_ml_ocean-py # --- # # Demo III - Visualization # + import pathlib import sys # define the top level directory PROJECT_PATH = pathlib.Path("/media/disk/erc/papers/2019_ML_OCN/") CODE_PATH = PROJECT_PATH.joinpath("ml4ocean") sys.path.append(str(CODE_PATH)) # ml4ocean packages from src.utils import get_paths from src.data.world import get_full_data, world_features from src.features.world import subset_independent_floats PATHS = get_paths() # standard pacakges import numpy as np import pandas as pd # plottling import matplotlib.pyplot as plt import cartopy import cartopy.crs as ccrs from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER # %matplotlib inline # %load_ext autoreload # %autoreload 2 # - # ### Data # !ls {PATHS.data_interim} inputs_df = pd.read_csv(f"{PATHS.data_interim.joinpath('inputs.csv')}") outputs_df = pd.read_csv(f"{PATHS.data_interim.joinpath('outputs.csv')}") soca2016_df = pd.read_csv(f"{PATHS.data_interim.joinpath('soca2016.csv')}") isprs2020_df = pd.read_csv(f"{PATHS.data_interim.joinpath('isprs2020.csv')}") # ## Plot I - Float Locations soca2016_locations = soca2016_df.copy().reset_index()[['wmo', 'lat', 'lon']].drop_duplicates() input_locations = inputs_df.copy().reset_index()[['wmo', 'lat', 'lon']].drop_duplicates() isprs2020_locations = isprs2020_df.copy().reset_index()[['wmo', 'lat', 'lon']].drop_duplicates() # #### First Attempt # + fig = plt.figure(figsize=(24, 12)) ax = plt.axes(projection=ccrs.Mollweide()) # SOCA2016 pts1 = plt.scatter( x=soca2016_locations.lon, y=soca2016_locations.lat, s=80, marker='+', color="darkorange", transform=ccrs.PlateCarree(), label='SOCA2016', zorder=3) # ISPRS2020 pts2 = plt.scatter( x=isprs2020_locations.lon, y=isprs2020_locations.lat, s=80, marker='+', color="yellow", transform=ccrs.PlateCarree(), label='ISPRS2020', zorder=3,) # INPUTS pts3 = plt.scatter( x=input_locations.lon, y=input_locations.lat, s=50, marker='+', color="#528B8B", transform=ccrs.PlateCarree(), label='Input Data') ax.coastlines() ax.add_feature(cartopy.feature.LAND, zorder=0, facecolor='dimgray') ax.add_feature(cartopy.feature.LAKES, zorder=1, facecolor='white', edgecolor='black') ax.add_feature(cartopy.feature.RIVERS, zorder=2, edgecolor='black') # gridlines gl = ax.gridlines( crs=ccrs.PlateCarree(), color='black', alpha=0.9, linewidth=1, linestyle='--', zorder=4 ) ax.legend(fontsize=30, ncol=2, markerscale=3, loc='upper right') plt.tight_layout() plt.show() # -
notebooks/global_data/3_visualization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="OVN9otKCCeCl" import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline import warnings warnings.filterwarnings('ignore') # + id="g1gQeMgaCnPJ" dataset = pd.read_excel('bb_agente_de_tecnologia_modified.xlsx', index_col=0) # + colab={"base_uri": "https://localhost:8080/", "height": 337} id="o7lJvefWC6oR" outputId="1f682fbb-e475-4269-8def-b51f75016d4b" dataset.head() # + id="sz6nBGB1C9Hg" dataset = dataset.drop('inscrição', 1) dataset = dataset.drop('situação', 1) # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="yXlE_u1VDXzK" outputId="311db7b0-e371-4714-e363-7231fae17991" dataset.describe() # + colab={"base_uri": "https://localhost:8080/"} id="0qB4G1mDGLpJ" outputId="a5729cba-2c51-4c0d-8054-fced05a261ab" dataset.info() # + id="plZv7tlAIW4J" from datetime import date def calculate_age(born): today = date.today() return today.year - born.year - ((today.month, today.day) < (born.month, born.day)) # + id="WRLeF2b0JDxZ" s = dataset["nascimento"] age_list = pd.Series([]) for i in range(len(dataset.index)): age_list[i]=calculate_age(s[i]) dataset['Age'] = age_list.values # + colab={"base_uri": "https://localhost:8080/", "height": 235} id="s7xYG85fLmmv" outputId="042eb192-7f44-4da6-af8a-14361f06a355" dataset.head() # + colab={"base_uri": "https://localhost:8080/", "height": 585} id="Mgiv_obdGUtw" outputId="4be92aa7-db06-4939-ba7b-57698905660b" ##Plotting idades plt.figure(figsize=(20,8)) ax = sns.countplot(dataset['Age']) ax.set_xticklabels(ax.get_xticklabels(), rotation=50, ha="right") ax.set_title('Frequência de idades') plt.tight_layout() # + colab={"base_uri": "https://localhost:8080/", "height": 585} id="LEnqYnQXP6Hn" outputId="90dbc764-1254-4d63-c633-4140ac2c0b36" ##Plotting pontos: Redação plt.figure(figsize=(20,8)) ax = sns.countplot(dataset['redação']) ax.set_xticklabels(ax.get_xticklabels(), rotation=50, ha="right") ax.set_title('Frequência de pontos: Redação') plt.tight_layout() # + colab={"base_uri": "https://localhost:8080/", "height": 585} id="fhWQTFEaRe_v" outputId="eb20efc6-cb12-48a9-bb14-7285ee2bca1e" ##Plotting pontos: Língua Portuguesa plt.figure(figsize=(20,8)) ax = sns.countplot(dataset['l por']) ax.set_xticklabels(ax.get_xticklabels(), rotation=50, ha="right") ax.set_title('Frequência de pontos: Língua Portuguesa') plt.tight_layout() # + colab={"base_uri": "https://localhost:8080/", "height": 585} id="FntbmGLHR8m4" outputId="145c5b14-526d-42d6-a155-2ace1a6150f9" ##Plotting pontos: Língua Inglesa plt.figure(figsize=(20,8)) ax = sns.countplot(dataset['l ing']) ax.set_xticklabels(ax.get_xticklabels(), rotation=50, ha="right") ax.set_title('Frequência de pontos: Língua Inglesa') plt.tight_layout() # + colab={"base_uri": "https://localhost:8080/", "height": 585} id="QCisdIvPR86H" outputId="409e369a-343d-41a1-bf8a-35871132f39a" ##Plotting pontos: Matemática plt.figure(figsize=(20,8)) ax = sns.countplot(dataset['mat']) ax.set_xticklabels(ax.get_xticklabels(), rotation=50, ha="right") ax.set_title('Frequência de pontos: Matemática') plt.tight_layout() # + colab={"base_uri": "https://localhost:8080/", "height": 585} id="bsYQtV01R885" outputId="28a34141-5920-4ab0-9f3c-d5c10b042968" ##Plotting pontos: Atualidades do Mercado Financeiro plt.figure(figsize=(20,8)) ax = sns.countplot(dataset['atu']) ax.set_xticklabels(ax.get_xticklabels(), rotation=50, ha="right") ax.set_title('Frequência de pontos: Atualidades do Mercado Financeiro') plt.tight_layout() # + colab={"base_uri": "https://localhost:8080/", "height": 585} id="L5Fj9rN5R8_P" outputId="dc9a1af1-3000-46b7-9fbe-ae1dc77b48f4" ##Plotting pontos de conhecimentos básicos plt.figure(figsize=(20,8)) ax = sns.countplot(dataset['conh bas']) ax.set_xticklabels(ax.get_xticklabels(), rotation=50, ha="right") ax.set_title('Frequência de pontos dos conhecimentos básicos') plt.tight_layout() # + colab={"base_uri": "https://localhost:8080/", "height": 585} id="o3Na2OaeR9Bo" outputId="415b3623-aabe-46b2-938c-fc701c0a2664" ##Plotting pontos: Probabilidade e Estatística plt.figure(figsize=(20,8)) ax = sns.countplot(dataset['prob']) ax.set_xticklabels(ax.get_xticklabels(), rotation=50, ha="right") ax.set_title('Frequência de pontos: Probabilidade e Estatística') plt.tight_layout() # + colab={"base_uri": "https://localhost:8080/", "height": 585} id="y_EzyoV4R9D4" outputId="193e9f02-3e5d-4e5c-d7a0-ac89fd077523" ##Plotting pontos: Conhecimentos Bancários plt.figure(figsize=(20,8)) ax = sns.countplot(dataset['c ban']) ax.set_xticklabels(ax.get_xticklabels(), rotation=50, ha="right") ax.set_title('Frequência de pontos: Conhecimentos Bancários') plt.tight_layout() # + colab={"base_uri": "https://localhost:8080/", "height": 585} id="eBQhE6ZbR9IH" outputId="6558f752-cc1f-49dc-f49a-93365ab5538d" ##Plotting pontos: Tecnologia da Informação plt.figure(figsize=(20,8)) ax = sns.countplot(dataset['tec']) ax.set_xticklabels(ax.get_xticklabels(), rotation=50, ha="right") ax.set_title('Frequência de pontos: Tecnologia da Informação') plt.tight_layout() # + colab={"base_uri": "https://localhost:8080/", "height": 585} id="Dw_tlOdXSMMA" outputId="346bca8c-cc1f-4a07-cc80-81dc1bc4412e" ##Plotting pontos de conhecimento especificos plt.figure(figsize=(20,8)) ax = sns.countplot(dataset['conh esp']) ax.set_xticklabels(ax.get_xticklabels(), rotation=50, ha="right") ax.set_title('Frequência de pontos dos conhecimento especificos') plt.tight_layout() # + colab={"base_uri": "https://localhost:8080/", "height": 585} id="p6MZjIcFSMON" outputId="11db42c8-2bb5-4a2a-e285-b4685bc8ead3" ##Plotting quantidade total de pontos plt.figure(figsize=(20,8)) ax = sns.countplot(dataset['pontos']) ax.set_xticklabels(ax.get_xticklabels(), rotation=50, ha="right") ax.set_title('Frequência da quantidade total de pontos') plt.tight_layout() # + id="Rb4JfWOXS6X4" # Duplicando dataset e dropando colunas menos relevantes para analise seguinte dataset2 = dataset dataset2 = dataset2.drop('class ac', 1) dataset2 = dataset2.drop('class pcd', 1) dataset2 = dataset2.drop('class ppp', 1) # + colab={"base_uri": "https://localhost:8080/", "height": 504} id="YYoeauZJSnnQ" outputId="e41bef44-e762-4d05-9a96-cc583f397f00" # Plotting de correlações plt.figure(figsize=(20,8)) sns.heatmap(dataset2.corr(),annot=True, fmt = ".2f", linewidth = 1 , cmap="gist_heat")
plotagens.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This example shows how to use a `GridInterpolationKernel` module on an `ExactGP` model. This regression module is designed for when the inputs of the function you're modeling are one-dimensional. # # The use of inducing points allows for scaling up the training data by making computational complexity linear instead of cubic. # # Function to be modeled is y=sin(4*pi*x) # # GridInterpolationKernel exploits the regular grid structure of linspace for Toeplitz covariances. # # This notebook doesn't use cuda, in general we recommend GPU use if possible and most of our notebooks utilize cuda as well. # # Kernel interpolation for scalable structured Gaussian processes (KISS-GP) was introduced in this paper: # http://proceedings.mlr.press/v37/wilson15.pdf # + import math import torch import gpytorch from matplotlib import pyplot as plt from torch import nn, optim from torch.autograd import Variable from gpytorch.kernels import RBFKernel, GridInterpolationKernel from gpytorch.means import ConstantMean from gpytorch.likelihoods import GaussianLikelihood from gpytorch.random_variables import GaussianRandomVariable # Make plots inline # %matplotlib inline # - # Training points are in [0,1] every 1/999 train_x = Variable(torch.linspace(0, 1, 1000)) # Function to model is sin(4*pi*x) # Gaussian noise from N(0,0.04) train_y = Variable(torch.sin(train_x.data * (4 * math.pi)) + torch.randn(train_x.size()) * 0.2) # + # We use exact GP inference for regression class GPRegressionModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(GPRegressionModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(constant_bounds=[-1e-5,1e-5]) # Put a grid interpolation kernel over the RBF kernel self.base_covar_module = RBFKernel(log_lengthscale_bounds=(-5, 6)) self.covar_module = GridInterpolationKernel(self.base_covar_module, grid_size=400, grid_bounds=[(0, 1)]) # Register kernel lengthscale as parameter self.register_parameter('log_outputscale', nn.Parameter(torch.Tensor([0])), bounds=(-5,6)) def forward(self,x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) covar_x = covar_x.mul(self.log_outputscale.exp()) return GaussianRandomVariable(mean_x, covar_x) # The likelihood output is a Gaussian with predictive mean and variance likelihood = GaussianLikelihood() # Initialize our model model = GPRegressionModel(train_x.data, train_y.data, likelihood) # + # Find optimal model hyperparameters model.train() likelihood.train() # Use the adam optimizer optimizer = torch.optim.Adam([ {'params': model.parameters()}, # Includes GaussianLikelihood parameters ], lr=0.1) # "Loss" for GPs - the marginal log likelihood mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model) training_iterations = 30 for i in range(training_iterations): # Zero backprop gradients optimizer.zero_grad() # Get output from model output = model(train_x) # Calc loss and backprop derivatives loss = -mll(output, train_y) loss.backward() print('Iter %d/%d - Loss: %.3f' % (i + 1, training_iterations, loss.data[0])) optimizer.step() # + # Put model & likelihood into eval mode model.eval() likelihood.eval() # Initalize plot f, observed_ax = plt.subplots(1, 1, figsize=(4, 3)) # Test points every 0.02 in [0,1] inclusive test_x = Variable(torch.linspace(0, 1, 51)) # To make the predictions as accurate as possible, we're going to use lots of iterations of Conjugate Gradients # This ensures that the matrix solves are as accurate as possible with gpytorch.settings.max_cg_iterations(100): observed_pred = likelihood(model(test_x)) # Define plotting function def ax_plot(ax, rand_var, title): # Get lower and upper predictive bounds lower, upper = rand_var.confidence_region() # Plot the training data as black stars ax.plot(train_x.data.numpy(), train_y.data.numpy(), 'k*') # Plot predictive means as blue line ax.plot(test_x.data.numpy(), rand_var.mean().data.numpy(), 'b') # Plot confidence bounds as lightly shaded region ax.fill_between(test_x.data.numpy(), lower.data.numpy(), upper.data.numpy(), alpha=0.5) ax.set_ylim([-3, 3]) ax.legend(['Observed Data', 'Mean', 'Confidence']) ax.set_title(title) ax_plot(observed_ax, observed_pred, 'Observed Values (Likelihood)')
examples/kissgp_gp_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] tags=[] # # Fetching supplimentary model input from the Planetary Computer # # This notebook produces additional input layers for the training data used in the [sentinel 1 flood detection](https://www.drivendata.org/competitions/81/detect-flood-water/) competition run by DrivenData. If fetches JRC Global Surface Water and NASADEM elevation data from the Planetary Computer (PC) STAC API and creates pixel-aligned chips that match what will be used in the evaluation process for the competition. # # The notebook will iterate through chip paths and query the PC STAC API for the `nasadem` and `jrc-gsw` Collections. It then creates a set of GeoTiffs by "coregistering" the raster data with the chip GeoTIFF, so that all of the additional input layers have the same CRS, bounds, and resolution as the chip. These additional layers are then saved alongside the training chip. # + from dataclasses import dataclass import os from tempfile import TemporaryDirectory from typing import List, Any, Dict from shapely.geometry import box, mapping import rasterio from rasterio.warp import reproject, Resampling import pyproj from osgeo import gdal from pystac_client import Client import planetary_computer as pc # - # #### Extract training chips # # Download the `flood-train-images.tgz` file from [competition Data Download page](https://www.drivendata.org/competitions/81/detect-flood-water/data/) and upload it to the Hub in the same directory as this notebook. # # Then run: # !tar -xvf flood-train-images.tgz # to uncompress this. Afterwards you should see an `train_features` directory containing all of the training chips ending in `.tif`. # # Use this directory to define the location of the chips, or if you have already uncompressed the chips elsewhere set the location here: TRAINING_DATA_DIR = "train_features" # #### Gather chip paths # # These chip paths will be used later in the notebook to process the chips. These paths should be to only one GeoTIFF per chip; for example, if both `VV.tif` and `VH.tif` are available for a chip, use only one of these paths. The GeoTIFFs at these paths will be read to get the bounds, CRS and resolution that will be used to fetch auxiliary input data. These can be relative paths. The auxiliary input data will be saved in the same directory as the GeoTIFF files at these paths. chip_paths = [] for file_name in os.listdir(TRAINING_DATA_DIR): if file_name.endswith("_vv.tif"): chip_paths.append(os.path.join(TRAINING_DATA_DIR, file_name)) print(f"{len(chip_paths)} chips found.") # #### Create the STAC API client # # This will be used in the methods below to query the PC STAC API. STAC_API = "https://planetarycomputer.microsoft.com/api/stac/v1" catalog = Client.open(STAC_API) # #### Define functions and classes # Define a `ChipInfo` dataclass to encapsulate the required data for the target chip. This includes geospatial information that will be used to coregister the incoming jrc-gsw and nasadem data. # + @dataclass class ChipInfo: """ Holds information about a training chip, including geospatial info for coregistration """ path: str prefix: str crs: Any shape: List[int] transform: List[float] bounds: rasterio.coords.BoundingBox footprint: Dict[str, Any] def get_footprint(bounds, crs): """Gets a GeoJSON footprint (in epsg:4326) from rasterio bounds and CRS""" transformer = pyproj.Transformer.from_crs(crs, "epsg:4326", always_xy=True) minx, miny = transformer.transform(bounds.left, bounds.bottom) maxx, maxy = transformer.transform(bounds.right, bounds.top) return mapping(box(minx, miny, maxx, maxy)) def get_chip_info(chip_path): """Gets chip info from a GeoTIFF file""" with rasterio.open(chip_path) as ds: chip_crs = ds.crs chip_shape = ds.shape chip_transform = ds.transform chip_bounds = ds.bounds # Use the first part of the chip filename as a prefix prefix = os.path.basename(chip_path).split("_")[0] return ChipInfo( path=chip_path, prefix=prefix, crs=chip_crs, shape=chip_shape, transform=chip_transform, bounds=chip_bounds, footprint=get_footprint(chip_bounds, chip_crs), ) # - # This method reprojects coregisters raster data to the bounds, CRS and resolution described by the ChipInfo. def reproject_to_chip( chip_info, input_path, output_path, resampling=Resampling.nearest ): """ Reproject a raster at input_path to chip_info, saving to output_path. Use Resampling.nearest for classification rasters. Otherwise use something like Resampling.bilinear for continuous data. """ with rasterio.open(input_path) as src: kwargs = src.meta.copy() kwargs.update( { "crs": chip_info.crs, "transform": chip_info.transform, "width": chip_info.shape[1], "height": chip_info.shape[0], "driver": "GTiff", } ) with rasterio.open(output_path, "w", **kwargs) as dst: for i in range(1, src.count + 1): reproject( source=rasterio.band(src, i), destination=rasterio.band(dst, i), src_transform=src.transform, src_crs=src.crs, dst_transform=chip_info.transform, dst_crs=chip_info.crs, resampling=Resampling.nearest, ) # This method will take in a set of items and a asset key and write a [VRT](https://gdal.org/drivers/raster/vrt.html) using signed HREFs. This is useful when there's multiple results from the query, so we can treat the resulting rasters as a single set of raster data. It uses the `planetary_computer.sign` method to sign the HREFs with a SAS token generated by the PC [Data Auth API](https://planetarycomputer.microsoft.com/docs/concepts/sas/). def write_vrt(items, asset_key, dest_path): """Write a VRT with hrefs extracted from a list of items for a specific asset.""" hrefs = [pc.sign(item.assets[asset_key].href) for item in items] vsi_hrefs = [f"/vsicurl/{href}" for href in hrefs] gdal.BuildVRT(dest_path, vsi_hrefs).FlushCache() # This method ties it all together - for a given `ChipInfo`, Collection, and Asset, write an auxiliary input chip with the given file name. def create_chip_aux_file( chip_info, collection_id, asset_key, file_name, resampling=Resampling.nearest ): """ Write an auxiliary chip file. The auxiliary chip file includes chip_info for the Collection and Asset, and is saved in the same directory as the original chip with the given file_name. """ output_path = os.path.join( os.path.dirname(chip_info.path), f"{chip_info.prefix}_{file_name}" ) search = catalog.search(collections=[collection_id], intersects=chip_info.footprint) items = list(search.get_items()) with TemporaryDirectory() as tmp_dir: vrt_path = os.path.join(tmp_dir, "source.vrt") write_vrt(items, asset_key, vrt_path) reproject_to_chip(chip_info, vrt_path, output_path, resampling=resampling) return output_path # #### Configurate the auxiliary input files that we will generate. # Define a set of parameters to pass into create_chip_aux_file aux_file_params = [ ("nasadem", "elevation", "nasadem.tif", Resampling.bilinear), ("jrc-gsw", "extent", "jrc-gsw-extent.tif", Resampling.nearest), ("jrc-gsw", "occurrence", "jrc-gsw-occurrence.tif", Resampling.nearest), ("jrc-gsw", "recurrence", "jrc-gsw-recurrence.tif", Resampling.nearest), ("jrc-gsw", "seasonality", "jrc-gsw-seasonality.tif", Resampling.nearest), ("jrc-gsw", "transitions", "jrc-gsw-transitions.tif", Resampling.nearest), ("jrc-gsw", "change", "jrc-gsw-change.tif", Resampling.nearest), ] # #### Generate auxiliary input chips for NASADEM and JRC # + tags=[] # Iterate over the chips and generate all aux input files. count = len(chip_paths) for i, chip_path in enumerate(chip_paths): print(f"({i+1} of {count}) {chip_path}") chip_info = get_chip_info(chip_path) for collection_id, asset_key, file_name, resampling_method in aux_file_params: print(f" ... Creating chip data for {collection_id} {asset_key}") create_chip_aux_file( chip_info, collection_id, asset_key, file_name, resampling=resampling_method ) # -
generate_auxiliary_input.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Extract ROI from atlas # + import argparse import glob import os def cmd_line_args(): parser = argparse.ArgumentParser(description='Extract the ROI from your monkey brains using the appropriate ROI reference number') # Add a mutually exclusive group, either path to a file, a list of files, or a glob search string can be provided. input_file_specifier = parser.add_mutually_exclusive_group(required=True) input_file_specifier.add_argument('-f','--file', type=str,help='Provide the path to an atlas label image that has been warped to native space') input_file_specifier.add_argument('-l','--list', type=str, help='Provide the path to a file that contains a list of each warped atlas label image for every monkey you wish to extact from, with each line containing a path for each subject. e.g /my/path/sub-1/dwi/my_file *newline* /my/path/sub-2/dwi/my_file') input_file_specifier.add_argument('-g','--glob',type = str, help='Provide a search string that will find all warped atlas label images you wish to extract from. * wildcards are allowed, google \'glob python\' for information on how to format your search strings. A typical example would be: \"/my/path/sub-*/dwi/*registered_labels.nii.gz\". Note - search string MUST be placed between \"\"') parser.add_argument("-r","--roi", type=int, required = True, help="Provide an integer that specifies the ROI you wish to extract. A list of ROIs and their respective integers can be found in the document \'atlas_labels.txt'") parser.add_argument("-o","--output",type=str, required = True, help="Required, specify mask output name.") args = parser.parse_args() print(' ') if args.file is not None: list_of_files = args.file elif args.list is not None: import csv list_of_files = [] with open(args.list, 'r') as file: reader = csv.reader(file) for row in reader: list_of_files.append(row) else: list_of_files = sorted(glob.glob(args.glob)) print('Selected files are: ' + str(list_of_files)) print(' ') return(args,list_of_files,args.roi,args.output) def extract_roi(args,list_of_files,roi,output): print('Performing ROI extraction...') print(' ') roi = str(roi) if args.file: cmd = 'fslmaths ' + list_of_files + ' -thr ' + roi + ' -uthr ' + roi + ' -bin ' + list_of_files[0:-8] + output print(cmd) os.system(cmd) unzip = 'gunzip ' + list_of_files[0:-8] + output + '.nii.gz' os.system(unzip) elif args.list: for counter, ignore in enumerate(list_of_files): file = str(list_of_files[counter][0]) cmd = 'fslmaths ' + file + ' -thr ' + roi + ' -uthr ' + roi + ' -bin ' + file[0:-7] + output print(cmd) os.system(cmd) unzip = 'gunzip ' + file[0:-7] + output + '.nii.gz' os.system(unzip) else: for file in list_of_files: cmd = 'fslmaths ' + file + ' -thr ' + roi + ' -uthr ' + roi + ' -bin ' + file[0:-7] + output print(cmd) os.system(cmd) unzip = 'gunzip ' + file[0:-7] + output + '.nii.gz' os.system(unzip) print(' ') print('Completed') if __name__ == "__main__": args,list_of_files,roi,output = cmd_line_args() extract_roi(args,list_of_files,roi,output)
extract_roi.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # Recognizing hand-written digits # # This example shows how scikit-learn can be used to recognize images of # hand-written digits, from 0-9. # # + # Author: <NAME> <gael dot varoquaux at normalesup dot org> # License: BSD 3 clause # Standard scientific Python imports import matplotlib.pyplot as plt # Import datasets, classifiers and performance metrics from sklearn import datasets, svm, metrics from sklearn.model_selection import train_test_split # - # ## Digits dataset # # The digits dataset consists of 8x8 # pixel images of digits. The ``images`` attribute of the dataset stores # 8x8 arrays of grayscale values for each image. We will use these arrays to # visualize the first 4 images. The ``target`` attribute of the dataset stores # the digit each image represents and this is included in the title of the 4 # plots below. # # Note: if we were working from image files (e.g., 'png' files), we would load # them using :func:`matplotlib.pyplot.imread`. # # # + digits = datasets.load_digits() _, axes = plt.subplots(nrows=1, ncols=4, figsize=(10, 3)) for ax, image, label in zip(axes, digits.images, digits.target): ax.set_axis_off() ax.imshow(image, cmap=plt.cm.gray_r, interpolation="nearest") ax.set_title("Training: %i" % label) # - # ## Classification # # To apply a classifier on this data, we need to flatten the images, turning # each 2-D array of grayscale values from shape ``(8, 8)`` into shape # ``(64,)``. Subsequently, the entire dataset will be of shape # ``(n_samples, n_features)``, where ``n_samples`` is the number of images and # ``n_features`` is the total number of pixels in each image. # # We can then split the data into train and test subsets and fit a support # vector classifier on the train samples. The fitted classifier can # subsequently be used to predict the value of the digit for the samples # in the test subset. # # # + # flatten the images n_samples = len(digits.images) data = digits.images.reshape((n_samples, -1)) # Create a classifier: a support vector classifier clf = svm.SVC(gamma=0.001) # Split data into 50% train and 50% test subsets X_train, X_test, y_train, y_test = train_test_split( data, digits.target, test_size=0.5, shuffle=False ) # Learn the digits on the train subset clf.fit(X_train, y_train) # Predict the value of the digit on the test subset predicted = clf.predict(X_test) # - # Below we visualize the first 4 test samples and show their predicted # digit value in the title. # # _, axes = plt.subplots(nrows=1, ncols=4, figsize=(10, 3)) for ax, image, prediction in zip(axes, X_test, predicted): ax.set_axis_off() image = image.reshape(8, 8) ax.imshow(image, cmap=plt.cm.gray_r, interpolation="nearest") ax.set_title(f"Prediction: {prediction}") # :func:`~sklearn.metrics.classification_report` builds a text report showing # the main classification metrics. # # print( f"Classification report for classifier {clf}:\n" f"{metrics.classification_report(y_test, predicted)}\n" ) # We can also plot a `confusion matrix <confusion_matrix>` of the # true digit values and the predicted digit values. # # # + disp = metrics.ConfusionMatrixDisplay.from_predictions(y_test, predicted) disp.figure_.suptitle("Confusion Matrix") print(f"Confusion matrix:\n{disp.confusion_matrix}") plt.show()
exercises/deep_learning/.ipynb_checkpoints/plot_digits_classification-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt import tensorflow as tf import numpy as np import cv2 interpreter = tf.lite.Interpreter(model_path="../releases/pose/mobilenetv1-075.tflite") interpreter.allocate_tensors() input_details = interpreter.get_input_details() print(input_details) # + im = cv2.imread("image1.jpeg") im = im.astype(np.float32) def resize_add_padding(im, t_h, t_w): ''' Resizes an image to a target size, adding padding if necessary to maintain the aspect ratio - Arguments: - im (np.array): shape (h, w, 3) - t_h (int): target height - t_w (int): target width ''' min_idx = [t_h, t_w].index(min(t_h, t_w)) ratio = [t_h, t_w][min_idx] / im.shape[min_idx] new_im = np.zeros((t_h, t_w, 3), dtype = im.dtype) res_h, res_w = int(im.shape[0] * ratio), int(im.shape[1] * ratio) res_im = cv2.resize(im, (res_w, res_h)) new_im[:res_h, :res_w, :] = res_im return new_im new_im = resize_add_padding(im, 353, 257) plt.imshow(new_im.astype(int)) print(new_im.shape) output_details = interpreter.get_output_details() # - new_im = np.expand_dims(new_im, 0) interpreter.set_tensor(input_details[0]['index'], new_im) interpreter.invoke() output = interpreter.get_tensor(output_details[0]['index']) output.shape name_to_index_d = {name : index for (a['name'], a['index']) in input_details} output_details = interpreter.get_output_details() output_details
temp/pose.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/priiiiiiii/let-s-upgrade/blob/master/Assignment_day_5.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="3J4NFD-Jaqm5" colab_type="text" # Assignment day 5 # + [markdown] id="h7G_60Tkasrx" colab_type="text" # Q1. Write a program to identify sub list [115] is there in the given list in the same order, if yes print "it's a match" If no then print "it's gone" # + id="jE9mLafLbqFT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 88} outputId="a3fbcebb-5726-4527-80f4-96638783b58d" test_list = [1,5,6,4,1,2,3,5] sub_list = [1, 1,5] print ("Original list : " + str(test_list)) print ("Original sub list : " + str(sub_list)) flag = 0 if(all(x in test_list for x in sub_list)): flag = 1 if (flag) : print ("It's match") else : print ("It's gone.") # + [markdown] id="wb1g8ZD8bwZI" colab_type="text" # Q2.Make a function for prime number and use filter to filter out all the prime number from 1-2500 # + id="mIe7KZPIev3a" colab_type="code" colab={} prime_numbers = 0 def is_prime_number(x): if x >= 2: for y in range(2,x): if not ( x % y ): return False else: return False return True # + id="iWApW2MvfAjW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="45f67283-75fd-4273-9c69-f03bcc00010c" is_prime_number(2) # + id="sGE6geshfUVa" colab_type="code" colab={} lst=list(range(1,2500)) # + id="M-gTY-pZfcb9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="f5674237-7d13-40a5-8aa7-f2bbc31ab9e1" print(lst) # + id="tQ5a4hlhhqfS" colab_type="code" colab={} lst_prime=filter(is_prime_number, lst) # + id="kMsWNgMVhzgq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="e48dc832-3f36-4b27-9633-c54d256174ee" print(list(lst_prime)) # + [markdown] id="C-Jvr99hgsRM" colab_type="text" # Q3. Make a lambda function for capitalting the whole sentence past using argument and map all the sentences in the list with the lambda function # + id="u-v34bhmiNnm" colab_type="code" colab={} lst=["hey this is sai", "i am in mumbai"] # + id="tfj-kqUDiTH8" colab_type="code" colab={} lst_new=map(lambda lst: lst.title() , lst) # + id="R2FNNy6kiZUc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="43aeb93f-f53f-47af-852e-ebc2aa8eaea5" print(list(lst_new))
Assignment_day_5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## Classification of the Palmer penguins data # # This document is part of the showcase, where I replicate the same brief and simple analyses with different tools. # # This particular file focuses on simple classification of the Palmer penguins data from the tidytuesday project. # # The data can be found in <https://github.com/rfordatascience/tidytuesday/tree/master/data/2020/2020-07-28>. They consist of one documents: *penguins.csv* contains information and measurements about some penguins. # # For the specific analysis I will use **Python** and **scikit-learn** (plus **Jupyter notebook**). # # We start by loading the packages: import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split # and the dataset: penguins = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-07-28/penguins.csv') # We can have a look at the schema of the data: penguins.info(verbose=True) # and the summary statistics: penguins.describe() # Our main goal is to try and build a model that classifies the species of the penguins based on their other characteristics. # # We start by checking for missing values: sns.heatmap(penguins.isnull(), cbar=False) penguins.isna().sum() # The plot shows that 5 of the features contain missing data (11 regard sex and 2 for each of the penguin measurements). In addition there are 2 penguins for which we have neither sex information nor the measurements, and 9 additional penguins for which we are only missing the sex. # # In practice, since this is a small amount of missing data we could drop all of them, but for the purposes of this showcase, we are going to drop only those that are missing the majority of information and impute the ones that are missing only the sex (later). penguins = penguins.dropna(axis = 0, thresh=4, how = "all") penguins.isna().sum() # Now we can plot the classes: penguins['species'].value_counts().plot.bar(color=['red', 'green', 'blue']) # This is not very bad for a small and simple dataset like this, but once again for this showcase we are going to try and balance the classes (later). # # We can also plot the scatterplots, conditional distributions and boxplots and also check the individual correlations (for the continuous features). # # We exclude the year each penguin was recorded. penguins = penguins.drop(columns='year') sns.pairplot(penguins, hue="species") # There are some pretty clear patterns patterns, so we are going to use all the features. # # First, we are going to split the data into features and label: species = penguins.pop('species') # Now we can split the dataset to training and testing: X_train, X_test, y_train, y_test = train_test_split(penguins, species, test_size=0.2, random_state=1, stratify=species) # The first model we are going to use is a support vector machine. We define the model and the pre-processing steps: # * Switch all nominal predictors to one-hot encoding # * k nearest neighbor imputation for the sex feature # * Normalize all numeric predictors # * Apply the classifier # + from sklearn.pipeline import Pipeline from sklearn import svm from sklearn.impute import KNNImputer from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler pipe_svc = Pipeline([ ('onehot', OneHotEncoder(handle_unknown='ignore', sparse=False)), ('imputer', KNNImputer(n_neighbors=2, weights="uniform")), ('scaler', StandardScaler(with_mean=False)), ('svc', svm.SVC())]) # - # We fit the training data into the pipeline: pipe_svc.fit(X_train, y_train) # We can check the predictions of the testing data: pipe_svc.score(X_test, y_test) # And finally, we can check some metrics: # + from sklearn.metrics import classification_report y_pred_svc = pipe_svc.predict(X_test) print(classification_report(y_test, y_pred_svc)) # - # We are also going to fit a simple decision tree model. We specify a new pipeline with the new algorithm and the same pre-processing steps: from sklearn import tree pipe_tree = Pipeline([ ('onehot', OneHotEncoder(handle_unknown='ignore', sparse=False)), ('imputer', KNNImputer(n_neighbors=2, weights="uniform")), ('scaler', StandardScaler(with_mean=False)), ('dec_tree', tree.DecisionTreeClassifier())]) # And fit on the training data data: pipe_tree.fit(X_train, y_train) # And once again test on the testing data: pipe_tree.score(X_test, y_test) # And check on the metrics: y_pred_tree = pipe_tree.predict(X_test) print(classification_report(y_test, y_pred_tree)) # In most aspects the support vector machine classifier performed significantly better, though it struggled with classifying the Chinstrap penguins.
PalmerPenguins_Python_scikit-learn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/adarsh5691/dmdw-lab-18cse004/blob/main/Assignment_5.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="rPwHmLbhkxMe" # # Assignment-5(Dissimilarity Matrix for Binary Attributes) # + id="yrEs4wgQklW1" import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sbs # + colab={"base_uri": "https://localhost:8080/", "height": 215} id="nWHD6rQDl4mI" outputId="f9e85f67-25bf-4907-eb09-10dc4cd05aff" url="https://raw.githubusercontent.com/adarsh5691/dmdw-lab-18cse004/main/student-mat.csv" df=pd.read_csv(url) df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 195} id="hbS-hoKCmGTE" outputId="66f259be-bcf0-4252-90a7-855ebc29ffc3" #extract the dataset from the original dataset dfs=df[['schoolsup','famsup','paid','activities','nursery','romantic','internet','higher']] dfs.head() # + colab={"base_uri": "https://localhost:8080/", "height": 195} id="B3826yQJnfic" outputId="dc7a87de-112d-4152-fd78-9862dc67333a" #convert binary into 0,1 format dfs=dfs.replace('no',0) dfs=dfs.replace(to_replace='yes',value=1) dfs.head() # + colab={"base_uri": "https://localhost:8080/"} id="SZ3Dg272oO70" outputId="f1c0bd42-6259-4a29-84c4-3021d54710e0" # create obj and find the distance or the dissimilarity matrix using scipy n=np.array(dfs[['schoolsup','famsup']]) n=n.reshape(-1,2)# -1 => numpy will calculate whatever will be the no. and 2 => n.shape # + colab={"base_uri": "https://localhost:8080/"} id="7hS_Z6xLpAA8" outputId="37bc948d-a806-412b-9a37-84f6b1508865" m=np.array(dfs[['romantic','internet']]) m=m.reshape(-1,2) m.shape # + id="vLGzxNcjpKBx" from scipy.spatial import distance # + colab={"base_uri": "https://localhost:8080/"} id="cYoJiS2BpkcL" outputId="1fd97b86-0dcb-4114-ecf2-1e1394f6ea4b" dist_matrix=distance.cdist(n,m) dist_matrix.shape # + colab={"base_uri": "https://localhost:8080/"} id="_mYSZjzup9Df" outputId="59e91969-27e0-4b8e-e79e-fa10533f3ed1" print(dist_matrix) # + colab={"base_uri": "https://localhost:8080/", "height": 278} id="EtyuHTMxqfVe" outputId="ac2f2e51-5001-4865-dce4-5ab05a229570" sbs.heatmap(dist_matrix) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 126} id="-BCZvRYbqrMI" outputId="783d8fcc-9ac2-4f20-b0e6-c9b6935f2884" #numerical attribute #extract df.head(2) # + colab={"base_uri": "https://localhost:8080/", "height": 195} id="mG9rleUwrW4C" outputId="3eb22143-6d66-4a71-ac5e-9939726e0afe" numeric=df[['age','Medu','Fedu','studytime','failures']] numeric.head() # + colab={"base_uri": "https://localhost:8080/"} id="mI0j8-Hbrwu1" outputId="634b1b39-58b9-407a-fc10-9de9bbc6247a" num1=np.array(numeric[['age','failures']]) num1.reshape(-1,2) num1.shape # + colab={"base_uri": "https://localhost:8080/"} id="9jHvN3sdsK9x" outputId="ff0a2bfd-9e2e-4cfc-fcd5-e3a109c1d542" num2=np.array(numeric[['Fedu','Medu']]) num2.reshape(-1,2) num2.shape # + colab={"base_uri": "https://localhost:8080/"} id="qqJnJa_SsW7G" outputId="f6a96876-a1b3-4240-d84d-98c2723b29ba" #Euclidean distance dist_matrix=distance.cdist(num1,num2) print(dist_matrix) # + colab={"base_uri": "https://localhost:8080/", "height": 278} id="ehpalU-esokb" outputId="dab2f3b1-12e7-471d-e06e-fff07eaa66a1" sbs.heatmap(dist_matrix) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 195} id="eLjkKQxzsumI" outputId="13c3526b-0510-4c66-d098-9b6dd545b145" #Nominal Attributes(name or chars or string) nomi=df[['Mjob','Fjob','reason','guardian']] nomi.head() # + colab={"base_uri": "https://localhost:8080/", "height": 195} id="7_sX_woDtdU8" outputId="87f520a9-2992-4503-f95a-2fc86622d15c" nomi=nomi.replace('at_home','home') nomi.head() # + id="YoFh1hn3t22j" # 1st convert into categorical/ ordinal nomi=nomi.astype('category') # + id="so4t45IBuvRo" # labelencoder gives a unique and normalised nalue like from 0,1,2 etc from sklearn.preprocessing import LabelEncoder lb=LabelEncoder() # + id="fZCm_GJZvAzV" #fit the labelencoder and return the label value nomi['guardian']=lb.fit_transform(nomi['guardian']) nomi['Mjob']=lb.fit_transform(nomi['Mjob']) nomi['Fjob']=lb.fit_transform(nomi['Fjob']) nomi['reason']=lb.fit_transform(nomi['reason']) # + colab={"base_uri": "https://localhost:8080/", "height": 195} id="8fHpe0QTvofj" outputId="d5dcbdd2-655b-4f21-bdf0-67772ade0ec1" nomi.head() # + colab={"base_uri": "https://localhost:8080/"} id="YlFeXeEtwAYr" outputId="5a686ad4-bbac-4d55-b647-30bc580051ca" nom1=np.array(nomi) nom1.reshape(-1,2) nom1.shape # + colab={"base_uri": "https://localhost:8080/"} id="w9jIzQEuwoxr" outputId="5c17a64e-cc8c-4abd-84a7-643f18b15ba2" nom2=np.array(nomi) nom2.reshape(-1,2) nom2.shape # + colab={"base_uri": "https://localhost:8080/"} id="0XCWXrJZwuHA" outputId="cb5544ed-2e71-4963-c6de-4360d0ff9205" dist_matrix2=distance.cdist(nom1,nom2) dist_matrix2.shape # + colab={"base_uri": "https://localhost:8080/"} id="f7a4uH_wxltG" outputId="f1b000bf-4dee-4bc5-bf09-0fe6aca87e82" print(dist_matrix2) # + colab={"base_uri": "https://localhost:8080/", "height": 278} id="rBFCBPbxxutI" outputId="1bfe8223-f3cd-43ba-a465-5b0323e77690" sbs.heatmap(dist_matrix2) plt.show()
Assignment_5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sklearn.feature_extraction.text as skf import sklearn.metrics.pairwise as skd import sklearn.model_selection as skm import sklearn.metrics as skms from sklearn.naive_bayes import MultinomialNB from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from sklearn.decomposition import TruncatedSVD from tqdm.auto import tqdm from sklearn.pipeline import Pipeline from nltk.corpus import stopwords import numpy as np import pandas as pd pd.set_option('max_colwidth', 1000) # - ru_stopwords = stopwords.words("russian") data = pd.read_csv("./labeled.csv") data.head() data['toxic'].value_counts() data.shape data['comment'] = data['comment'].str.replace("\n"," ") X, y = data['comment'].tolist(), data['toxic'].values.squeeze() # ## Task 2 # %%time vec = skf.TfidfVectorizer(ngram_range=(1, 3), min_df=10) svd = TruncatedSVD(n_components=100, n_iter=100, random_state=42) # без нулей так без нулей X_vec = vec.fit_transform(X) X_vec = svd.fit_transform(X_vec) # ### a - Посчитайте близость между 3 и 12666 текстами в датасете (labeled.csv из семинара) skd.cosine_similarity([X_vec[2]], [X_vec[12665]]) # ### б - найдите 3 самых близких текста к тексту номер 43; выведите сами тексты и значения близостей, а не только индексы этих текстов. def k_nearest(idx: int, k: int = 3): sims = np.array(list(map( lambda x: skd.cosine_similarity([x], [X_vec[idx]]), tqdm(X_vec) ))).squeeze() nearest = np.argsort(sims, kind='heapsort')[-k-1:] ## search for k+1 neighbors to skip first as it is our vector result = [] for n_idx in nearest[::-1][1:]: # to skip first result result.append({'idx': n_idx, 'similarity': sims[n_idx], 'text': X[n_idx]}) return result pd.DataFrame(k_nearest(42, k=3)) # dataframe for visualization proposes # ## Task 3 X_train, X_test, y_train, y_test = skm.train_test_split(np.array(X), y, test_size=0.2, random_state=1993) # #### Первая модель - Naive Bayes + CountVectorizer nb = Pipeline([ ( "vec", skf.CountVectorizer( lowercase=True, max_df=0.9, max_features=10000, min_df=2, ngram_range=(1,1), stop_words=None ) ), ( "clf", MultinomialNB(alpha=0.9, fit_prior=False) ) ]) nb.fit(X_train, y_train) print(skms.classification_report(y_test, nb.predict(X_test))) # #### Вторая модель - LogReg + TF-IDF logreg = Pipeline([ ( 'vec', skf.TfidfVectorizer( stop_words = None, ngram_range = (1, 1), min_df = 1, max_df = 1.0, max_features = 75000, lowercase = True ) ), ( 'clf', LogisticRegression( C = 100.0, penalty = 'l2', fit_intercept = True ) ) ]) logreg.fit(X_train, y_train) print(skms.classification_report(y_test, logreg.predict(X_test))) # ### Выводим топ-10 самых токсичных текстов по версии каждой из моделей # sort by probability of toxic class top_nb = np.argsort(nb.predict_proba(X)[:, 1])[-10:] top_logreg = np.argsort(logreg.predict_proba(X)[:, 1])[-10:] # Из приведенных текстов видно, что модели на основе CountVectorizer + Naive Bayes требуется значительно большее количество токсичных слов для того чтобы присвоить тексту класс токсичности(или высокой вероятности оной). В то же время модель на основе TF-IDF + LogReg хорошо справляетс и с маленькими текстами. Субъективно - можно заметить что тексты 3 и 4 из модели NB не являются в действительности токсичными, однако содержат очень много слов(навальный, соловьев, дота и т.д), вероятнее всего употребляющихся в токсичных контекстах в корпусе, видимо поэтому они и были промаркированы как токсичные. X = np.array(X) for i, (text_nb, text_logreg) in enumerate(zip(X[top_nb], X[top_logreg])): print(("="*50) + f"[{i}]" + ("="*50)) print("--------[NB-scored text]--------") print(text_nb) print("------[logreg-scored text]------") print(text_logreg)
assignment_4/Tasks2-3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Notoma # # Write articles for your static gen blog in Notion. # # # # %METADATA% # layout: default # nav_order: 1 # title: Introduction # <a href="https://codeclimate.com/github/nategadzhi/notoma/maintainability"><img src="https://api.codeclimate.com/v1/badges/70943357e5d2c54c153a/maintainability" /></a> # <a href="https://pypi.org/project/notoma/"><img src="https://img.shields.io/pypi/v/notoma" alt="pypi" /></a> # ![Linters](https://github.com/nategadzhi/notoma/workflows/Linters/badge.svg) # - [Documentation website](https://nategadzhi.github.io/notoma/) # - [Using the CLI](https://nategadzhi.github.io/notoma/using-the-cli) # - [Contributing](https://nategadzhi.github.io/notoma/contributing) # - [Supported Markdown Tags](https://nategadzhi.github.io/notoma/supported-markdown-tags) # --- # ## Install # Notoma is available via Pip or Homebrew: # ```bash # # Installing with pip, use this if you plan using Notoma as a python library. # pip install notoma # ``` # Installing with Homebrew on Mac OS. # ```bash # brew install nategadzhi/notoma/notoma # ``` # --- # ## What can you do with Notoma # Notoma provides commands to: # - Convert contents of your Notion Blog database to a bunch of Markdown files. # - *Coming soon*: Watch Notion Blog database for updates and regenerate Markdown files on any updates. # - *Coming soon*: Create a new Notion database for your Blog with all required fields. # Basic usage example: this command will convert only published posts from a Notion blog database to the `./posts/ directory`. # # ```bash # notoma convert --dest ./posts/ # ``` # # This example assumes that you have a `.env` config file with authentication and blog url parameters in it. # #### Authenticating in Notion # # Notoma uses an internal Notion API, and that, unfortunately, requires you to provide an authentication token `token_v2` that you can find in your notion.so cookes. # # You can provide `token_v2` option to every command line call, or store it in your environment, or [`.env` config file](.env.sample). # --- # ## Notion database structure # Notoma has very few expectations about how your Notion is structured. Here's a [public example database](https://www.notion.so/respawn/7b46cea379bd4d45b68860c2fa35a2d4?v=b4609f6aae0d4fc1adc65a73f72d0e21). # # Notoma requires that your Notion blog database has the following **properties**: # - **Published**: whether the article is published, or is still a draft # - **Title**: Will be used to create a file name for that article's Markdown equivalent file. *Won't be used in the article itself.* # # Notoma tries to parse other properties and add them as front matter into the resulting Markdown articles: # - **Published at** will be used as publicataion date for the article, if present. # - **Categories** will be used as `categories` front matter key, so it's expected to be a **multiple choice** propery. #
notebooks/index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np from IPython.display import display plt.style.use('seaborn') # - def ecdf(arr): x = sorted(arr) y = np.arange(len(arr)) / len(arr) return x, y def bootstrap_replicate_1d(data, func): """Generate bootstrap replicate of 1D data.""" bs_sample = np.random.choice(data, len(data)) return func(bs_sample) def draw_bs_reps(data, func, size=1): """Draw bootstrap replicates.""" # Initialize array of replicates: bs_replicates bs_replicates = np.empty(size) # Generate replicates for i in range(size): bs_replicates[i] = bootstrap_replicate_1d(data, func) return bs_replicates def diff_of_means(arr1, arr2): return abs(np.mean(arr1) - np.mean(arr2)) def draw_bs_pairs_linreg(x, y, size=1): """Perform pairs bootstrap for linear regression.""" # Set up array of indices to sample from: inds inds = np.arange(len(x)) # Initialize replicates: bs_slope_reps, bs_intercept_reps bs_slope_reps = np.empty(size) bs_intercept_reps = np.empty(size) # Generate replicates for i in range(size): bs_inds = np.random.choice(inds, size=len(inds)) bs_x, bs_y = x[bs_inds], y[bs_inds] bs_slope_reps[i], bs_intercept_reps[i] = np.polyfit(bs_x, bs_y, 1) return bs_slope_reps, bs_intercept_reps df_1975 = pd.read_csv('finch_beaks_1975.csv', skiprows=1, sep=',', names=['band','species','length','beak_depth']).drop('band', axis=1) df_1975['year'] = 1975 df_2012 = pd.read_csv('finch_beaks_2012.csv', skiprows=1, sep=',', names=['band','species','length','beak_depth']).drop('band', axis=1) df_2012['year'] = 2012 display(df_1975.head()) # ### EDA of beak depths of Darwin's finches # + df = pd.concat([df_1975, df_2012]) df = df.drop(['species', 'length'], axis=1) # Create bee swarm plot _ = sns.swarmplot(x='year', y='beak_depth', data=df) # Label the axes _ = plt.xlabel('year') _ = plt.ylabel('beak depth (mm)') # Show the plot plt.show() # - # ### ECDFs of beak depths # + bd_1975 = np.array([ 8.4 , 8.8 , 8.4 , 8. , 7.9 , 8.9 , 8.6 , 8.5 , 8.9 , 9.1 , 8.6 , 9.8 , 8.2 , 9. , 9.7 , 8.6 , 8.2 , 9. , 8.4 , 8.6 , 8.9 , 9.1 , 8.3 , 8.7 , 9.6 , 8.5 , 9.1 , 9. , 9.2 , 9.9 , 8.6 , 9.2 , 8.4 , 8.9 , 8.5 , 10.4 , 9.6 , 9.1 , 9.3 , 9.3 , 8.8 , 8.3 , 8.8 , 9.1 , 10.1 , 8.9 , 9.2 , 8.5 , 10.2 , 10.1 , 9.2 , 9.7 , 9.1 , 8.5 , 8.2 , 9. , 9.3 , 8. , 9.1 , 8.1 , 8.3 , 8.7 , 8.8 , 8.6 , 8.7 , 8. , 8.8 , 9. , 9.1 , 9.74, 9.1 , 9.8 , 10.4 , 8.3 , 9.44, 9.04, 9. , 9.05, 9.65, 9.45, 8.65, 9.45, 9.45, 9.05, 8.75, 9.45, 8.35]) bd_2012 = np.array([ 9.4 , 8.9 , 9.5 , 11. , 8.7 , 8.4 , 9.1 , 8.7 , 10.2 , 9.6 , 8.85, 8.8 , 9.5 , 9.2 , 9. , 9.8 , 9.3 , 9. , 10.2 , 7.7 , 9. , 9.5 , 9.4 , 8. , 8.9 , 9.4 , 9.5 , 8. , 10. , 8.95, 8.2 , 8.8 , 9.2 , 9.4 , 9.5 , 8.1 , 9.5 , 8.4 , 9.3 , 9.3 , 9.6 , 9.2 , 10. , 8.9 , 10.5 , 8.9 , 8.6 , 8.8 , 9.15, 9.5 , 9.1 , 10.2 , 8.4 , 10. , 10.2 , 9.3 , 10.8 , 8.3 , 7.8 , 9.8 , 7.9 , 8.9 , 7.7 , 8.9 , 9.4 , 9.4 , 8.5 , 8.5 , 9.6 , 10.2 , 8.8 , 9.5 , 9.3 , 9. , 9.2 , 8.7 , 9. , 9.1 , 8.7 , 9.4 , 9.8 , 8.6 , 10.6 , 9. , 9.5 , 8.1 , 9.3 , 9.6 , 8.5 , 8.2 , 8. , 9.5 , 9.7 , 9.9 , 9.1 , 9.5 , 9.8 , 8.4 , 8.3 , 9.6 , 9.4 , 10. , 8.9 , 9.1 , 9.8 , 9.3 , 9.9 , 8.9 , 8.5 , 10.6 , 9.3 , 8.9 , 8.9 , 9.7 , 9.8 , 10.5 , 8.4 , 10. , 9. , 8.7 , 8.8 , 8.4 , 9.3 , 9.8 , 8.9 , 9.8 , 9.1 ]) # Compute ECDFs x_1975, y_1975 = ecdf(bd_1975) x_2012, y_2012 = ecdf(bd_2012) # Plot the ECDFs _ = plt.plot(x_1975, y_1975, marker='.', linestyle='none') _ = plt.plot(x_2012,y_2012, marker='.', linestyle='none') # Set margins plt.margins(0.02) # Add axis labels and legend _ = plt.xlabel('beak depth (mm)') _ = plt.ylabel('ECDF') _ = plt.legend(('1975', '2012'), loc='lower right') # Show the plot plt.show() # - def draw_bs_reps(data, func, size=1): """Draw bootstrap replicates.""" # Initialize array of replicates: bs_replicates bs_replicates = np.empty(size) # Generate replicates for i in range(size): bs_replicates[i] = bootstrap_replicate_1d(data, func) return bs_replicates # ### Parameter estimates of beak depths bs_replicates_1975 - bs_replicates_2012 # + # Compute the difference of the sample means: mean_diff mean_diff = diff_of_means(bd_2012,bd_1975) # Get bootstrap replicates of means bs_replicates_1975 = draw_bs_reps(bd_1975, np.mean, size=10000) bs_replicates_2012 = draw_bs_reps(bd_2012, np.mean, size=10000) # Compute samples of difference of means: bs_diff_replicates bs_diff_replicates = bs_replicates_2012 - bs_replicates_1975 # Compute 95% confidence interval: conf_int conf_int = np.percentile(bs_diff_replicates, [2.5, 97.5]) # Print the results print('difference of means =', mean_diff, 'mm') print('95% confidence interval =', conf_int, 'mm') # - # ### Hypothesis test: Are beaks deeper in 2012? # + # Compute mean of combined data set: combined_mean combined_mean = np.mean(np.concatenate((bd_1975, bd_2012))) # Shift the samples bd_1975_shifted = bd_1975 - np.mean(bd_1975) + combined_mean bd_2012_shifted = bd_2012 - np.mean(bd_2012) + combined_mean # Get bootstrap replicates of shifted data sets bs_replicates_1975 = draw_bs_reps(bd_1975_shifted, np.mean, size=10000) bs_replicates_2012 = draw_bs_reps(bd_2012_shifted, np.mean, size=10000) # Compute replicates of difference of means: bs_diff_replicates bs_diff_replicates = bs_replicates_2012 - bs_replicates_1975 # Compute the p-value p = np.sum(bs_diff_replicates >= mean_diff) / len(bs_diff_replicates) # Print p-value print('p =', p) # - # ### EDA of beak length and depth # + bl_1975 = np.array([13.9 , 14. , 12.9 , 13.5 , 12.9 , 14.6 , 13. , 14.2 , 14. , 14.2 , 13.1 , 15.1 , 13.5 , 14.4 , 14.9 , 12.9 , 13. , 14.9 , 14. , 13.8 , 13. , 14.75, 13.7 , 13.8 , 14. , 14.6 , 15.2 , 13.5 , 15.1 , 15. , 12.8 , 14.9 , 15.3 , 13.4 , 14.2 , 15.1 , 15.1 , 14. , 13.6 , 14. , 14. , 13.9 , 14. , 14.9 , 15.6 , 13.8 , 14.4 , 12.8 , 14.2 , 13.4 , 14. , 14.8 , 14.2 , 13.5 , 13.4 , 14.6 , 13.5 , 13.7 , 13.9 , 13.1 , 13.4 , 13.8 , 13.6 , 14. , 13.5 , 12.8 , 14. , 13.4 , 14.9 , 15.54, 14.63, 14.73, 15.73, 14.83, 15.94, 15.14, 14.23, 14.15, 14.35, 14.95, 13.95, 14.05, 14.55, 14.05, 14.45, 15.05, 13.25]) bl_2012 = np.array([14.3 , 12.5 , 13.7 , 13.8 , 12. , 13. , 13. , 13.6 , 12.8 , 13.6 , 12.95, 13.1 , 13.4 , 13.9 , 12.3 , 14. , 12.5 , 12.3 , 13.9 , 13.1 , 12.5 , 13.9 , 13.7 , 12. , 14.4 , 13.5 , 13.8 , 13. , 14.9 , 12.5 , 12.3 , 12.8 , 13.4 , 13.8 , 13.5 , 13.5 , 13.4 , 12.3 , 14.35, 13.2 , 13.8 , 14.6 , 14.3 , 13.8 , 13.6 , 12.9 , 13. , 13.5 , 13.2 , 13.7 , 13.1 , 13.2 , 12.6 , 13. , 13.9 , 13.2 , 15. , 13.37, 11.4 , 13.8 , 13. , 13. , 13.1 , 12.8 , 13.3 , 13.5 , 12.4 , 13.1 , 14. , 13.5 , 11.8 , 13.7 , 13.2 , 12.2 , 13. , 13.1 , 14.7 , 13.7 , 13.5 , 13.3 , 14.1 , 12.5 , 13.7 , 14.6 , 14.1 , 12.9 , 13.9 , 13.4 , 13. , 12.7 , 12.1 , 14. , 14.9 , 13.9 , 12.9 , 14.6 , 14. , 13. , 12.7 , 14. , 14.1 , 14.1 , 13. , 13.5 , 13.4 , 13.9 , 13.1 , 12.9 , 14. , 14. , 14.1 , 14.7 , 13.4 , 13.8 , 13.4 , 13.8 , 12.4 , 14.1 , 12.9 , 13.9 , 14.3 , 13.2 , 14.2 , 13. , 14.6 , 13.1 , 15.2 ]) # Make scatter plot of 1975 data _ = plt.plot(bl_1975, bd_1975, marker='.', linestyle='None', color='blue', alpha=0.5) # Make scatter plot of 2012 data _ = plt.plot(bl_2012, bd_2012, marker='.', linestyle='None', color='red', alpha=0.5) # Label axes and make legend _ = plt.xlabel('beak length (mm)') _ = plt.ylabel('beak depth (mm)') _ = plt.legend(('1975', '2012'), loc='upper left') # Show the plot plt.show() # - # ### Linear regressions # + # Compute the linear regressions slope_1975, intercept_1975 = np.polyfit(bl_1975, bd_1975, 1) slope_2012, intercept_2012 = np.polyfit(bl_2012, bd_2012, 1) # Perform pairs bootstrap for the linear regressions bs_slope_reps_1975, bs_intercept_reps_1975 = \ draw_bs_pairs_linreg(bl_1975, bd_1975, size=1000) bs_slope_reps_2012, bs_intercept_reps_2012 = \ draw_bs_pairs_linreg(bl_2012, bd_2012, size=1000) # Compute confidence intervals of slopes slope_conf_int_1975 = np.percentile(bs_slope_reps_1975, [2.5, 97.5]) slope_conf_int_2012 = np.percentile(bs_slope_reps_2012, [2.5, 97.5]) intercept_conf_int_1975 = np.percentile(bs_intercept_reps_1975,[2.5, 97.5]) intercept_conf_int_2012 = np.percentile(bs_intercept_reps_2012, [2.5, 97.5]) # Print the results print('1975: slope =', slope_1975, 'conf int =', slope_conf_int_1975) print('1975: intercept =', intercept_1975, 'conf int =', intercept_conf_int_1975) print('2012: slope =', slope_2012, 'conf int =', slope_conf_int_2012) print('2012: intercept =', intercept_2012, 'conf int =', intercept_conf_int_2012) # - # ### Displaying the linear regression results # + # Make scatter plot of 1975 data _ = plt.plot(bl_1975, bd_1975, marker='.', linestyle='none', color='blue', alpha=0.5) # Make scatter plot of 2012 data _ = plt.plot(bl_2012, bd_2012, marker='.', linestyle='none', color='red', alpha=0.5) # Label axes and make legend _ = plt.xlabel('beak length (mm)') _ = plt.ylabel('beak depth (mm)') _ = plt.legend(('1975', '2012'), loc='upper left') # Generate x-values for bootstrap lines: x x = np.array([10, 17]) # Plot the bootstrap lines for i in range(100): plt.plot(x, bs_slope_reps_1975[i] * x + bs_intercept_reps_1975[i], linewidth=0.5, alpha=0.2, color='blue') plt.plot(x, bs_slope_reps_2012[i] * x + bs_intercept_reps_2012[i], linewidth=0.5, alpha=0.2, color='red') # Draw the plot again plt.show() # - # ### Beak length to depth ratio # + # Compute length-to-depth ratios ratio_1975 = bl_1975/bd_1975 ratio_2012 = bl_2012/bd_2012 # Compute means mean_ratio_1975 = np.mean(ratio_1975) mean_ratio_2012 = np.mean(ratio_2012) # Generate bootstrap replicates of the means bs_replicates_1975 = draw_bs_reps(ratio_1975, np.mean, size=10000) bs_replicates_2012 = draw_bs_reps(ratio_2012, np.mean, size=10000) # Compute the 99% confidence intervals conf_int_1975 = np.percentile(bs_replicates_1975, [0.5,99.5]) conf_int_2012 = np.percentile(bs_replicates_2012, [0.5,99.5]) # Print the results print('1975: mean ratio =', mean_ratio_1975, 'conf int =', conf_int_1975) print('2012: mean ratio =', mean_ratio_2012, 'conf int =', conf_int_2012) # - # ### EDA of heritability bd_parent_fortis = np.array([10.1 , 9.55 , 9.4 , 10.25 , 10.125, 9.7 , 9.05 , 7.4 , 9. , 8.65 , 9.625, 9.9 , 9.55 , 9.05 , 8.35 , 10.1 , 10.1 , 9.9 , 10.225, 10. , 10.55 , 10.45 , 9.2 , 10.2 , 8.95 , 10.05 , 10.2 , 9.5 , 9.925, 9.95 , 10.05 , 8.75 , 9.2 , 10.15 , 9.8 , 10.7 , 10.5 , 9.55 , 10.55 , 10.475, 8.65 , 10.7 , 9.1 , 9.4 , 10.3 , 9.65 , 9.5 , 9.7 , 10.525, 9.95 , 10.1 , 9.75 , 10.05 , 9.9 , 10. , 9.1 , 9.45 , 9.25 , 9.5 , 10. , 10.525, 9.9 , 10.4 , 8.95 , 9.4 , 10.95 , 10.75 , 10.1 , 8.05 , 9.1 , 9.55 , 9.05 , 10.2 , 10. , 10.55 , 10.75 , 8.175, 9.7 , 8.8 , 10.75 , 9.3 , 9.7 , 9.6 , 9.75 , 9.6 , 10.45 , 11. , 10.85 , 10.15 , 10.35 , 10.4 , 9.95 , 9.1 , 10.1 , 9.85 , 9.625, 9.475, 9. , 9.25 , 9.1 , 9.25 , 9.2 , 9.95 , 8.65 , 9.8 , 9.4 , 9. , 8.55 , 8.75 , 9.65 , 8.95 , 9.15 , 9.85 , 10.225, 9.825, 10. , 9.425, 10.4 , 9.875, 8.95 , 8.9 , 9.35 , 10.425, 10. , 10.175, 9.875, 9.875, 9.15 , 9.45 , 9.025, 9.7 , 9.7 , 10.05 , 10.3 , 9.6 , 10. , 9.8 , 10.05 , 8.75 , 10.55 , 9.7 , 10. , 9.85 , 9.8 , 9.175, 9.65 , 9.55 , 9.9 , 11.55 , 11.3 , 10.4 , 10.8 , 9.8 , 10.45 , 10. , 10.75 , 9.35 , 10.75 , 9.175, 9.65 , 8.8 , 10.55 , 10.675, 9.95 , 9.55 , 8.825, 9.7 , 9.85 , 9.8 , 9.55 , 9.275, 10.325, 9.15 , 9.35 , 9.15 , 9.65 , 10.575, 9.975, 9.55 , 9.2 , 9.925, 9.2 , 9.3 , 8.775, 9.325, 9.175, 9.325, 8.975, 9.7 , 9.5 , 10.225, 10.025, 8.2 , 8.2 , 9.55 , 9.05 , 9.6 , 9.6 , 10.15 , 9.875, 10.485, 11.485, 10.985, 9.7 , 9.65 , 9.35 , 10.05 , 10.1 , 9.9 , 8.95 , 9.3 , 9.95 , 9.45 , 9.5 , 8.45 , 8.8 , 8.525, 9.375, 10.2 , 7.625, 8.375, 9.25 , 9.4 , 10.55 , 8.9 , 8.8 , 9. , 8.575, 8.575, 9.6 , 9.375, 9.6 , 9.95 , 9.6 , 10.2 , 9.85 , 9.625, 9.025, 10.375, 10.25 , 9.3 , 9.5 , 9.55 , 8.55 , 9.05 , 9.9 , 9.8 , 9.75 , 10.25 , 9.1 , 9.65 , 10.3 , 8.9 , 9.95 , 9.5 , 9.775, 9.425, 7.75 , 7.55 , 9.1 , 9.6 , 9.575, 8.95 , 9.65 , 9.65 , 9.65 , 9.525, 9.85 , 9.05 , 9.3 , 8.9 , 9.45 , 10. , 9.85 , 9.25 , 10.1 , 9.125, 9.65 , 9.1 , 8.05 , 7.4 , 8.85 , 9.075, 9. , 9.7 , 8.7 , 9.45 , 9.7 , 8.35 , 8.85 , 9.7 , 9.45 , 10.3 , 10. , 10.45 , 9.45 , 8.5 , 8.3 , 10. , 9.225, 9.75 , 9.15 , 9.55 , 9. , 9.275, 9.35 , 8.95 , 9.875, 8.45 , 8.6 , 9.7 , 8.55 , 9.05 , 9.6 , 8.65 , 9.2 , 8.95 , 9.6 , 9.15 , 9.4 , 8.95 , 9.95 , 10.55 , 9.7 , 8.85 , 8.8 , 10. , 9.05 , 8.2 , 8.1 , 7.25 , 8.3 , 9.15 , 8.6 , 9.5 , 8.05 , 9.425, 9.3 , 9.8 , 9.3 , 9.85 , 9.5 , 8.65 , 9.825, 9. , 10.45 , 9.1 , 9.55 , 9.05 , 10. , 9.35 , 8.375, 8.3 , 8.8 , 10.1 , 9.5 , 9.75 , 10.1 , 9.575, 9.425, 9.65 , 8.725, 9.025, 8.5 , 8.95 , 9.3 , 8.85 , 8.95 , 9.8 , 9.5 , 8.65 , 9.1 , 9.4 , 8.475, 9.35 , 7.95 , 9.35 , 8.575, 9.05 , 8.175, 9.85 , 7.85 , 9.85 , 10.1 , 9.35 , 8.85 , 8.75 , 9.625, 9.25 , 9.55 , 10.325, 8.55 , 9.675, 9.15 , 9. , 9.65 , 8.6 , 8.8 , 9. , 9.95 , 8.4 , 9.35 , 10.3 , 9.05 , 9.975, 9.975, 8.65 , 8.725, 8.2 , 7.85 , 8.775, 8.5 , 9.4 ]) bd_parent_scandens = np.array([ 8.3318, 8.4035, 8.5317, 8.7202, 8.7089, 8.7541, 8.773 , 8.8107, 8.7919, 8.8069, 8.6523, 8.6146, 8.6938, 8.7127, 8.7466, 8.7504, 8.7805, 8.7428, 8.7164, 8.8032, 8.8258, 8.856 , 8.9012, 8.9125, 8.8635, 8.8258, 8.8522, 8.8974, 8.9427, 8.9879, 8.9615, 8.9238, 8.9351, 9.0143, 9.0558, 9.0596, 8.9917, 8.905 , 8.9314, 8.9465, 8.9879, 8.9804, 9.0219, 9.052 , 9.0407, 9.0407, 8.9955, 8.9992, 8.9992, 9.0747, 9.0747, 9.5385, 9.4781, 9.4517, 9.3537, 9.2707, 9.1199, 9.1689, 9.1425, 9.135 , 9.1011, 9.1727, 9.2217, 9.2255, 9.2821, 9.3235, 9.3198, 9.3198, 9.3198, 9.3273, 9.3725, 9.3989, 9.4253, 9.4593, 9.4442, 9.4291, 9.2632, 9.2293, 9.1878, 9.1425, 9.1275, 9.1802, 9.1765, 9.2481, 9.2481, 9.1991, 9.1689, 9.1765, 9.2406, 9.3198, 9.3235, 9.1991, 9.2971, 9.2443, 9.316 , 9.2934, 9.3914, 9.3989, 9.5121, 9.6176, 9.5535, 9.4668, 9.3725, 9.3348, 9.3763, 9.3839, 9.4216, 9.4065, 9.3348, 9.4442, 9.4367, 9.5083, 9.448 , 9.4781, 9.595 , 9.6101, 9.5686, 9.6365, 9.7119, 9.8213, 9.825 , 9.7609, 9.6516, 9.5988, 9.546 , 9.6516, 9.7572, 9.8854, 10.0023, 9.3914]) bd_offspring_fortis = np.array([10.7 , 9.78, 9.48, 9.6 , 10.27, 9.5 , 9. , 7.46, 7.65, 8.63, 9.81, 9.4 , 9.48, 8.75, 7.6 , 10. , 10.09, 9.74, 9.64, 8.49, 10.15, 10.28, 9.2 , 10.01, 9.03, 9.94, 10.5 , 9.7 , 10.02, 10.04, 9.43, 8.1 , 9.5 , 9.9 , 9.48, 10.18, 10.16, 9.08, 10.39, 9.9 , 8.4 , 10.6 , 8.75, 9.46, 9.6 , 9.6 , 9.95, 10.05, 10.16, 10.1 , 9.83, 9.46, 9.7 , 9.82, 10.34, 8.02, 9.65, 9.87, 9. , 11.14, 9.25, 8.14, 10.23, 8.7 , 9.8 , 10.54, 11.19, 9.85, 8.1 , 9.3 , 9.34, 9.19, 9.52, 9.36, 8.8 , 8.6 , 8. , 8.5 , 8.3 , 10.38, 8.54, 8.94, 10. , 9.76, 9.45, 9.89, 10.9 , 9.91, 9.39, 9.86, 9.74, 9.9 , 9.09, 9.69, 10.24, 8.9 , 9.67, 8.93, 9.3 , 8.67, 9.15, 9.23, 9.59, 9.03, 9.58, 8.97, 8.57, 8.47, 8.71, 9.21, 9.13, 8.5 , 9.58, 9.21, 9.6 , 9.32, 8.7 , 10.46, 9.29, 9.24, 9.45, 9.35, 10.19, 9.91, 9.18, 9.89, 9.6 , 10.3 , 9.45, 8.79, 9.2 , 8.8 , 9.69, 10.61, 9.6 , 9.9 , 9.26, 10.2 , 8.79, 9.28, 8.83, 9.76, 10.2 , 9.43, 9.4 , 9.9 , 9.5 , 8.95, 9.98, 9.72, 9.86, 11.1 , 9.14, 10.49, 9.75, 10.35, 9.73, 9.83, 8.69, 9.58, 8.42, 9.25, 10.12, 9.31, 9.99, 8.59, 8.74, 8.79, 9.6 , 9.52, 8.93, 10.23, 9.35, 9.35, 9.09, 9.04, 9.75, 10.5 , 9.09, 9.05, 9.54, 9.3 , 9.06, 8.7 , 9.32, 8.4 , 8.67, 8.6 , 9.53, 9.77, 9.65, 9.43, 8.35, 8.26, 9.5 , 8.6 , 9.57, 9.14, 10.79, 8.91, 9.93, 10.7 , 9.3 , 9.93, 9.51, 9.44, 10.05, 10.13, 9.24, 8.21, 8.9 , 9.34, 8.77, 9.4 , 8.82, 8.83, 8.6 , 9.5 , 10.2 , 8.09, 9.07, 9.29, 9.1 , 10.19, 9.25, 8.98, 9.02, 8.6 , 8.25, 8.7 , 9.9 , 9.65, 9.45, 9.38, 10.4 , 9.96, 9.46, 8.26, 10.05, 8.92, 9.5 , 9.43, 8.97, 8.44, 8.92, 10.3 , 8.4 , 9.37, 9.91, 10. , 9.21, 9.95, 8.84, 9.82, 9.5 , 10.29, 8.4 , 8.31, 9.29, 8.86, 9.4 , 9.62, 8.62, 8.3 , 9.8 , 8.48, 9.61, 9.5 , 9.37, 8.74, 9.31, 9.5 , 9.49, 9.74, 9.2 , 9.24, 9.7 , 9.64, 9.2 , 7.5 , 7.5 , 8.7 , 8.31, 9. , 9.74, 9.31, 10.5 , 9.3 , 8.12, 9.34, 9.72, 9. , 9.65, 9.9 , 10. , 10.1 , 8. , 9.07, 9.75, 9.33, 8.11, 9.36, 9.74, 9.9 , 9.23, 9.7 , 8.2 , 9.35, 9.49, 9.34, 8.87, 9.03, 9.07, 9.43, 8.2 , 9.19, 9. , 9.2 , 9.06, 9.81, 8.89, 9.4 , 10.45, 9.64, 9.03, 8.71, 9.91, 8.33, 8.2 , 7.83, 7.14, 8.91, 9.18, 8.8 , 9.9 , 7.73, 9.25, 8.7 , 9.5 , 9.3 , 9.05, 10.18, 8.85, 9.24, 9.15, 9.98, 8.77, 9.8 , 8.65, 10. , 8.81, 8.01, 7.9 , 9.41, 10.18, 9.55, 9.08, 8.4 , 9.75, 8.9 , 9.07, 9.35, 8.9 , 8.19, 8.65, 9.19, 8.9 , 9.28, 10.58, 9. , 9.4 , 8.91, 9.93, 10. , 9.37, 7.4 , 9. , 8.8 , 9.18, 8.3 , 10.08, 7.9 , 9.96, 10.4 , 9.65, 8.8 , 8.65, 9.7 , 9.23, 9.43, 9.93, 8.47, 9.55, 9.28, 8.85, 8.9 , 8.75, 8.63, 9. , 9.43, 8.28, 9.23, 10.4 , 9. , 9.8 , 9.77, 8.97, 8.37, 7.7 , 7.9 , 9.5 , 8.2 , 8.8 ]) bd_offspring_scandens = np.array([ 8.419 , 9.2468, 8.1532, 8.0089, 8.2215, 8.3734, 8.5025, 8.6392, 8.7684, 8.8139, 8.7911, 8.9051, 8.9203, 8.8747, 8.943 , 9.0038, 8.981 , 9.0949, 9.2696, 9.1633, 9.1785, 9.1937, 9.2772, 9.0722, 8.9658, 8.9658, 8.5025, 8.4949, 8.4949, 8.5633, 8.6013, 8.6468, 8.1532, 8.3734, 8.662 , 8.6924, 8.7456, 8.8367, 8.8595, 8.9658, 8.9582, 8.8671, 8.8671, 8.943 , 9.0646, 9.1405, 9.2089, 9.2848, 9.3759, 9.4899, 9.4519, 8.1228, 8.2595, 8.3127, 8.4949, 8.6013, 8.4646, 8.5329, 8.7532, 8.8823, 9.0342, 8.6392, 8.6772, 8.6316, 8.7532, 8.8291, 8.8975, 8.9734, 9.0494, 9.1253, 9.1253, 9.1253, 9.1785, 9.2848, 9.4595, 9.3608, 9.2089, 9.2544, 9.3684, 9.3684, 9.2316, 9.1709, 9.2316, 9.0342, 8.8899, 8.8291, 8.981 , 8.8975, 10.4089, 10.1886, 9.7633, 9.7329, 9.6114, 9.5051, 9.5127, 9.3684, 9.6266, 9.5354, 10.0215, 10.0215, 9.6266, 9.6038, 9.4063, 9.2316, 9.338 , 9.262 , 9.262 , 9.4063, 9.4367, 9.0342, 8.943 , 8.9203, 8.7835, 8.7835, 9.057 , 8.9354, 8.8975, 8.8139, 8.8671, 9.0873, 9.2848, 9.2392, 9.2924, 9.4063, 9.3152, 9.4899, 9.5962, 9.6873, 9.5203, 9.6646]) # + # Make scatter plots _ = plt.plot(bd_parent_fortis, bd_offspring_fortis, marker='.', linestyle='none', color='blue', alpha=0.5) _ = plt.plot(bd_parent_scandens, bd_offspring_scandens, marker='.', linestyle='none', color='red', alpha=0.5) # Label axes _ = plt.xlabel('parental beak depth (mm)') _ = plt.ylabel('offspring beak depth (mm)') # Add legend _ = plt.legend(('G. fortis', 'G. scandens'), loc='lower right') # Show plot plt.show() # - # ### Correlation of offspring and parental data def draw_bs_pairs(x, y, func, size=1): """Perform pairs bootstrap for a single statistic.""" # Set up array of indices to sample from: inds inds = np.arange(len(x)) # Initialize replicates: bs_replicates bs_replicates = np.empty(size) # Generate replicates for i in range(size): bs_inds = np.random.choice(inds, len(inds)) bs_x, bs_y = bs_inds bs_replicates[i] = func(bs_x, bs_y) return bs_replicates def draw_bs_pairs(x, y, func, size=1): """Perform pairs bootstrap for a single statistic.""" # Set up array of indices to sample from: inds inds = np.arange(len(x)) # Initialize replicates: bs_replicates bs_replicates = np.empty(size) # Generate replicates for i in range(size): bs_inds = np.random.choice(inds, len(inds)) bs_x, bs_y = bs_inds bs_replicates[i] = func(bs_x, bs_y) return bs_replicates # ### Measuring heritability # + def heritability(parents, offspring): """Compute the heritability from parent and offspring samples.""" covariance_matrix = np.cov(parents, offspring) return covariance_matrix[0,1] / covariance_matrix[0,0] # Compute the heritability heritability_scandens = heritability(bd_parent_scandens, bd_offspring_scandens) heritability_fortis = heritability(bd_parent_fortis, bd_offspring_fortis) # + # Initialize array of replicates: perm_replicates perm_replicates = np.empty(10000) # Draw replicates for i in range(10000): # Permute parent beak depths bd_parent_permuted = np.random.permutation(bd_parent_scandens) perm_replicates[i] = heritability(bd_parent_permuted, bd_offspring_scandens) # Compute p-value: p p = np.sum(perm_replicates >= heritability_scandens) / len(perm_replicates) # Print the p-value print('p-val =', p)
notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Proxy # A Proxy is an object that stands in place for another object, typically wrapping it up and forwarding calls to it while performing some additional functionality on top. class PartProxy: """ A proxy class for parts - keeping their running counts and a registry """ # Keep count of total number of parts parts_count = 0 register = {} # A proxy almost always wraps up the object it proxies to def __init__(self, part): self.part = part self.__class__.parts_count += 1 count = self.__class__.parts_count # Register the part! self.__class__.register[count] = part print(self.register) def __str__(self): return str(self.part) @classmethod def get_count(cls): return cls.parts_count def destroy(self): """ destroy the part """ # Unregister the part for key,item in self.__class__.register.items(): if item == self.part: del self.__class__.register[key] break del self.part self.__class__.parts_count -= 1 def __getattr__(self, name): """ Redirect attributes to proxied part """ try: return self.__dict__[name] except KeyError: return getattr(self.part, name) # + class PartsTypeProxyFactory(type): """ A type and proxy factory for mechanical part classes and instances respectively """ def __init__(cls, *args, **kwargs): print('__init__:',cls) type.__init__(cls, *args) def my_new(cls,name,bases=(),dct={}): print(cls,name) instance = object.__new__(cls) # instance = object.__new__(cls) return instance @classmethod def create(cls, *args, **kwargs): print('create:',args) return PartProxy(Part(*args, **kwargs)) @classmethod def __prepare__(cls, name, bases, **kwargs): return {'__new__': cls.my_new} class Part(metaclass=PartsTypeProxyFactory): """ A Mechanical Parts class """ def __init__(self, name, parent=None): print('__init__:',name,parent) self.name = name self.parent = parent def join(self, part): print('Joining with part', part) def __str__(self): return "{}, parent: {} ".format(self.name, self.parent) # - nut = PartsTypeProxyFactory.create('Nut') bolt = PartsTypeProxyFactory.create('Bolt', nut) screw = PartsTypeProxyFactory.create('Screw', bolt) PartProxy.get_count() PartProxy.register print(nut) print(bolt) print(screw) frame = PartsTypeProxyFactory.create('Frame') nut.join(frame) nut.destroy() PartProxy.get_count() bolt.destroy() PartProxy.get_count() # Check the parts register PartProxy.register screw.destroy() PartProxy.get_count() PartProxy.register frame.destroy() PartProxy.get_count() PartProxy.register # # Adapter # An adapter adapts an existing class (implementation) to a new interface (class). It does this by either using inheritance (class adapter) or by using aggregation (object adapter) # + from abc import abstractmethod class InvalidPolygonError(Exception): pass class Polygon: """ A generic polygon class """ def __init__(self, *sides): """ Initializer - accepts length of sides """ self.sides = sides def perimeter(self): """ Return perimeter """ return sum(self.sides) @abstractmethod def is_valid(self): """ Is this a valid polygon """ pass def is_regular(self): """ Is a regular polygon ? """ # Yes: if all sides are equal side = self.sides[0] return all([x==side for x in self.sides[1:]]) @abstractmethod def area(self): """ Calculate and return area """ pass # - class Rectangle(Polygon): """ Rectangle class from Polygon using class adapter """ def is_square(self): """ Return if I am a square """ if self.is_valid(): # Defaults to is_regular return self.is_regular() def is_valid(self): """ Is the rectangle valid """ # Should have 4 sides if len(self.sides) != 4: return False # Opposite sides should be same for a,b in [(0,2),(1,3)]: if self.sides[a] != self.sides[b]: return False return True def area(self): """ Return area of rectangle """ # Length x breadth if self.is_valid(): return self.sides[0]*self.sides[1] # + rect = Rectangle(10, 4, 10, 4) print(rect.is_valid()) print(rect.is_square()) print(rect.perimeter()) print(rect.area()) # + def rectangle_valid(self): """ Is the rectangle valid """ # Should have 4 sides if len(self.sides) != 4: return False # Opposite sides should be same for a,b in [(0,2),(1,3)]: if self.sides[a] != self.sides[b]: return False return True def triangle_valid(self): """ Is the triangle valid """ # Sum of 2 sides should be > 3rd side perimeter = self.perimeter() for side in self.sides: sum_two = perimeter - side if sum_two <= side: return False return True class PolygonType(type): """ A generic polygon type """ def my_init(self, *sides): """ Initializer - accepts length of sides """ self.sides = sides def my_perimeter(self): """ Return perimeter """ return sum(self.sides) def my_is_regular(self): """ Is a regular polygon ? """ # Yes: if all sides are equal side = self.sides[0] return all([x==side for x in self.sides[1:]]) @classmethod def __prepare__(mcs, name, bases, **kwargs): print('Metaclass=>',mcs, name) # Attach is_valid method using class name class_name = name.lower() valid_func = eval(class_name + '_valid') print('Attaching function',valid_func,'as is_valid method') return {'__init__': mcs.my_init, 'perimeter': mcs.my_perimeter, 'is_regular': mcs.my_is_regular, 'is_valid': valid_func} # + import itertools class Rectangle(metaclass=PolygonType): """ Rectangle class using metaclasses """ def area(self): """ Return area of rectangle """ print('Calculating area') # Length x breadth if self.is_valid(): return self.sides[0]*self.sides[1] class Triangle(metaclass=PolygonType): """ Triangle class using metaclasses """ def is_isosceles(self): """ Is the triangle isoscles """ if self.is_valid(): # Check if any 2 sides are equal for a,b in itertools.combinations(self.sides, 2): if a == b: return True return False def area(self): """ Calculate area """ # Using Heron's formula p = self.perimeter()/2.0 total = p for side in self.sides: total *= abs(p-side) return pow(total, 0.5) # - rect2 = Rectangle(10,4,10,4) print(rect2.is_valid()) print(rect2.area()) print(rect2.is_regular()) tri = Triangle(10, 10, 20) print(tri.is_valid()) tri = Triangle(12, 12, 6) print(tri.is_valid()) print(tri.is_isosceles()) print(tri.area())
kochi-python-oct-2018/.ipynb_checkpoints/Design Patterns - Structural-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pickle from statistics import mean import numpy as np import sys # + file_name_train_250 = "RL_trajectories_1000_250.pickle" file_obj_train_250 = open(file_name_train_250, 'rb') trajectories_train_250 = pickle.load(file_obj_train_250) trajectories_train_250 = [list(ele) for ele in trajectories_train_250] file_name_test_250 = "Human_trajectories_test_250.pickle" file_obj_test_250 = open(file_name_test_250, 'rb') trajectories_test_250 = pickle.load(file_obj_test_250) trajectories_test_250 = [list(ele) for ele in trajectories_test_250] file_name_train_500 = "RL_trajectories_1000_500.pickle" file_obj_train_500 = open(file_name_train_500, 'rb') trajectories_train_500 = pickle.load(file_obj_train_500) trajectories_train_500 = [list(ele) for ele in trajectories_train_500] file_name_test_500 = "Human_trajectories_test_500.pickle" file_obj_test_500 = open(file_name_test_500, 'rb') trajectories_test_500 = pickle.load(file_obj_test_500) trajectories_test_500 = [list(ele) for ele in trajectories_test_500] test_size = 60 # - def generate_policies(price_low_bound, price_upper_bound, step_size): policies = [] for i in range(price_low_bound, price_upper_bound, step_size): for j in range(price_low_bound, price_upper_bound, step_size): for k in range(price_low_bound, price_upper_bound, step_size): for l in range(price_low_bound, price_upper_bound, step_size): for m in range(4): for n in range(4): for o in range(4): for p in range(3): policy = (i, j, k, l, m, n, o, p) policies.append(policy) return policies policies = generate_policies(190, 240, 10) len(policies) def evaluate_policy(policy, data, best_buys_cost, best_buys_idx, get_final_output): if(len(policy)!=8): raise ValueError("Number of parameters in this policy is "+ str(len(policy))+" when it should be 8") bought = [] bought_cost_only = [] bought_idx_only = [] t1 = policy[0] t2 = policy[1] t3 = policy[2] t4 = policy[3] d1 = policy[4] d2 = policy[5] d3 = policy[6] d4 = policy[7] for trajectory in data: for idx, cost in enumerate(trajectory): local_cat = int(idx/4) local_idx = int(idx%4) if(idx == len(trajectory) - 1): bought.append((cost, idx)) bought_cost_only.append(cost) bought_idx_only.append(idx) break elif(local_cat == 0): if(cost > t1 and local_idx <= d1): continue else: bought.append((cost, idx)) bought_cost_only.append(cost) bought_idx_only.append(idx) break elif(local_cat == 1): if(cost > t2 and local_idx <= d2): continue else: bought.append((cost, idx)) bought_cost_only.append(cost) bought_idx_only.append(idx) break elif(local_cat == 2): if(cost > t3 and local_idx <= d3): continue else: bought.append((cost, idx)) bought_cost_only.append(cost) bought_idx_only.append(idx) break elif(local_cat == 3): if(cost > t4 and local_idx <= d4): continue else: bought.append((cost, idx)) bought_cost_only.append(cost) bought_idx_only.append(idx) break error = [a_i - b_i for a_i, b_i in zip(bought_cost_only, best_buys_cost)] correct_stops = (np.equal(bought_idx_only, best_buys_idx)).astype(int) correct_stops = sum(correct_stops) avg_error = sum(error)/len(data) if(get_final_output): return bought, avg_error, correct_stops return avg_error, correct_stops # + ##Cost best_buy_250_train = [min(ele) for ele in trajectories_train_250] best_buy_250_test = [min(ele) for ele in trajectories_test_250] best_buy_500_train = [min(ele) for ele in trajectories_train_500] best_buy_500_test = [min(ele) for ele in trajectories_test_500] ##Indices best_buy_250_train_idx = [ele.index(min(ele)) for ele in trajectories_train_250] best_buy_250_test_idx = [ele.index(min(ele)) for ele in trajectories_test_250] best_buy_500_train_idx = [ele.index(min(ele)) for ele in trajectories_train_500] best_buy_500_test_idx = [ele.index(min(ele)) for ele in trajectories_test_500] # + def policy_search(price_low_bound, price_upper_bound, data, best_buys_cost, best_buys_idx): all_errors = [] all_correct_stops = [] all_policies = generate_policies(price_low_bound, price_upper_bound, 10) for idx, policy in enumerate(all_policies): policy_avg_error, correct_stops = evaluate_policy(policy, data, best_buys_cost, best_buys_idx, False) sys.stdout.flush() sys.stdout.write("\rPolicies Evaluated: {}/{}".format(idx+1, len(all_policies))) all_errors.append(policy_avg_error) all_correct_stops.append(correct_stops) optimal_policy_by_avg_error = (min(all_errors), all_policies[all_errors.index(min(all_errors))]) optimal_policy_by_opt_stop = (max(all_correct_stops), all_policies[all_correct_stops.index(max(all_correct_stops))]) return optimal_policy_by_avg_error, optimal_policy_by_opt_stop # - optimal_policy_250 = policy_search(190, 240, trajectories_train_250, best_buy_250_train, best_buy_250_train_idx) print(optimal_policy_250) evaluate_policy(optimal_policy_250[0][1], trajectories_test_250, best_buy_250_test, best_buy_250_test_idx) final_result_avg_250, avg_error, correct_stops = evaluate_policy(optimal_policy_250[0][1], trajectories_test_250, best_buy_250_test, best_buy_250_test_idx, True) avg_error, correct_stops final_result_opt_stop_250, avg_error, correct_stops = evaluate_policy(optimal_policy_250[1][1], trajectories_test_250, best_buy_250_test, best_buy_250_test_idx, True) avg_error, correct_stops optimal_policy_500 = policy_search(390, 480, trajectories_train_500, best_buy_500_train, best_buy_500_train_idx) print("") print(optimal_policy_500) evaluate_policy(optimal_policy_500[0][1], trajectories_test_500, best_buy_500_test, best_buy_500_test_idx) final_result_avg_500, avg_error, correct_stops = evaluate_policy(optimal_policy_500[0][1], trajectories_test_500, best_buy_500_test, best_buy_500_test_idx, True) avg_error, correct_stops final_result_opt_stop_500, avg_error, correct_stops = evaluate_policy(optimal_policy_500[1][1], trajectories_test_500, best_buy_500_test, best_buy_500_test_idx, True) avg_error, correct_stops # # Final Result rl_four_split_avg = [final_result_avg_250, final_result_avg_500] rl_four_split_opt_stop = [final_result_opt_stop_250, final_result_opt_stop_500] def save_data(obj, filename): final_file_object = open(filename, 'wb') pickle.dump(obj, final_file_object) save_data(rl_four_split_avg, "rl_four_split_avg") save_data(rl_four_split_opt_stop, "rl_four_split_opt_stop")
RL/Four Split Complex Policy .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: pythondata_3 # language: python # name: pythondata_3 # --- # Dependencies import json import pandas as pd import matplotlib.pyplot as plt import seaborn as sns lodging = pd.read_csv('../Results/Lodging_Rating.csv') del lodging['Unnamed: 0'] lodging.replace('NAN', value=0, inplace=True) lodging = lodging.rename(columns={'lodging Total Count':'Total Count', 'Facility lodging':'Lodging Facility'}) lodging['Rating']=lodging['Rating'].astype(float) lodging['Total Count']=lodging['Total Count'].astype(int) lodging.head() new_lodging = lodging.groupby(['City Name', 'Site Name']) lodging_count_df = pd.DataFrame(new_lodging['Site Name'].value_counts()) lodging_count_df = lodging_count_df.rename(columns={'Site Name': 'Total Count'}) lodging_count_df=lodging_count_df.reset_index(level=1) lodging_count_df = lodging_count_df.reset_index(level=0) lodging_count_df = lodging_count_df.reset_index(drop=True) lodging_count_df.head() lodging_count_final = lodging_count_df.groupby(['City Name']) lodging_count_final_df = pd.DataFrame(lodging_count_final['Total Count'].sum()) lodging_count_final_df = lodging_count_final_df.sort_values(['Total Count'])[::-1] lodging_count_final_df = lodging_count_final_df.reset_index() lodging_count_final_df['Type']='Lodging' lodging_count_final_df parking = pd.read_csv('../Results/Parking_Rating.csv') del parking['Unnamed: 0'] parking.replace('NAN', value=0, inplace=True) parking = parking.rename(columns={'parking Total Count':'Total Count', 'Facility parking':'Parking Facility'}) parking['Rating']=parking['Rating'].astype(float) parking['Total Count']=parking['Total Count'].astype(int) parking.head() new_parking = parking.groupby(['City Name', 'Site Name']) parking_count_df = pd.DataFrame(new_parking['Site Name'].value_counts()) parking_count_df = parking_count_df.rename(columns={'Site Name': 'Total Count'}) parking_count_df=parking_count_df.reset_index(level=1) parking_count_df = parking_count_df.reset_index(level=0) parking_count_df = parking_count_df.reset_index(drop=True) parking_count_df.head() parking_count_final = parking_count_df.groupby(['City Name']) parking_count_final_df = pd.DataFrame(parking_count_final['Total Count'].sum()) parking_count_final_df = parking_count_final_df.sort_values(['Total Count'])[::-1] parking_count_final_df = parking_count_final_df.reset_index() parking_count_final_df['Type']='Parking' parking_count_final_df new_accomodation_df = lodging_count_final_df.append(parking_count_final_df) new_accomodation_df.reset_index(drop=True) # + sns.factorplot(kind='bar',x='Type',y='Total Count',data=new_accomodation_df, hue='City Name', size=5, aspect=2.5) total_count = new_accomodation_df.groupby(['City Name'])['Total Count'].sum().sort_values()[::-1].reset_index() total_count_df = pd.DataFrame(total_count) print(total_count_df) ranks_dict = {} y=1 for name in total_count_df['City Name']: ranks_dict[name] = y y=y+1 print(ranks_dict) plt.title('City Lodging & Parking Ranking', fontsize=20, fontweight='bold') plt.xlabel(' ', fontsize=15) plt.ylabel('Total Count', fontsize=15) plt.xticks(fontsize=12) plt.yticks(fontsize=12) new_labels = ['#1 Chicago', '#2 New York ', '#3 Boston', '#4 Austin', '#5 Atlanta', '#6 Raleigh', '#7 Northern Virginia Area', '#8 Los Angeles', '#9 Washington DC'] plt.legend(new_labels, loc='upper left', frameon=False, title='Rank') plt.savefig('Save_Figs/Accomodation.png', bbox_inches='tight') plt.show() # -
Amenities_Niyati/Plots/old/Amazon_nearby_Amenities_Accomodation.ipynb
# --- # jupyter: # accelerator: GPU # colab: # collapsed_sections: [] # name: Habitat-sim Asset Viewer # private_outputs: true # provenance: [] # jupytext: # cell_metadata_filter: -all # formats: nb_python//py:percent,colabs//ipynb # notebook_metadata_filter: all # text_representation: # extension: .py # format_name: percent # format_version: '1.3' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # %% [markdown] # <a href="https://colab.research.google.com/github/facebookresearch/habitat-sim/blob/main/examples/tutorials/colabs/asset_viewer.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # %% [markdown] # #Habitat-sim Asset Viewer # # This utility provides a user with the ability to view assets rendered by the Habitat-Sim engine. # # %% # @title Installation { display-mode: "form" } # @markdown (double click to show code). # !curl -L https://raw.githubusercontent.com/facebookresearch/habitat-sim/main/examples/colab_utils/colab_install.sh | NIGHTLY=true bash -s # %% # @title Path Setup and Imports { display-mode: "form" } # @markdown (double click to show code). # %cd /content/habitat-sim ## [setup] import math import os import sys import git import numpy as np import habitat_sim from habitat_sim.utils import common as ut from habitat_sim.utils import viz_utils as vut try: import ipywidgets as widgets from IPython.display import display as ipydisplay # For using jupyter/ipywidget IO components HAS_WIDGETS = True except ImportError: HAS_WIDGETS = False if "google.colab" in sys.modules: os.environ["IMAGEIO_FFMPEG_EXE"] = "/usr/bin/ffmpeg" repo = git.Repo(".", search_parent_directories=True) dir_path = repo.working_tree_dir # %cd $dir_path data_path = os.path.join(dir_path, "data") # fmt: off output_directory = "examples/tutorials/asset_viewer_output/" # @param {type:"string"} # fmt: on output_path = os.path.join(dir_path, output_directory) if not os.path.exists(output_path): os.mkdir(output_path) # define some globals the first time we run. if "sim" not in globals(): global sim sim = None global obj_attr_mgr obj_attr_mgr = None global prim_attr_mgr obj_attr_mgr = None global stage_attr_mgr stage_attr_mgr = None global rigid_obj_mgr rigid_obj_mgr = None # %% # @title Define Configuration Utility Functions { display-mode: "form" } # @markdown (double click to show code) # @markdown This cell defines a number of utility functions used throughout the tutorial to make simulator reconstruction easy: # @markdown - make_cfg # @markdown - make_default_settings # @markdown - make_simulator_from_settings def make_cfg(settings): sim_cfg = habitat_sim.SimulatorConfiguration() sim_cfg.gpu_device_id = 0 sim_cfg.scene_id = settings["scene"] sim_cfg.enable_physics = settings["enable_physics"] # Optional; Specify the location of an existing scene dataset configuration # that describes the locations and configurations of all the assets to be used if "scene_dataset_config" in settings: sim_cfg.scene_dataset_config_file = settings["scene_dataset_config"] if "override_scene_light_defaults" in settings: sim_cfg.override_scene_light_defaults = settings[ "override_scene_light_defaults" ] if "scene_light_setup" in settings: sim_cfg.scene_light_setup = settings["scene_light_setup"] # Note: all sensors must have the same resolution sensor_specs = [] if settings["color_sensor_1st_person"]: color_sensor_1st_person_spec = habitat_sim.CameraSensorSpec() color_sensor_1st_person_spec.uuid = "color_sensor_1st_person" color_sensor_1st_person_spec.sensor_type = habitat_sim.SensorType.COLOR color_sensor_1st_person_spec.resolution = [ settings["height"], settings["width"], ] color_sensor_1st_person_spec.position = [0.0, settings["sensor_height"], 0.0] color_sensor_1st_person_spec.orientation = [ settings["sensor_pitch"], 0.0, 0.0, ] color_sensor_1st_person_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE sensor_specs.append(color_sensor_1st_person_spec) if settings["depth_sensor_1st_person"]: depth_sensor_1st_person_spec = habitat_sim.CameraSensorSpec() depth_sensor_1st_person_spec.uuid = "depth_sensor_1st_person" depth_sensor_1st_person_spec.sensor_type = habitat_sim.SensorType.DEPTH depth_sensor_1st_person_spec.resolution = [ settings["height"], settings["width"], ] depth_sensor_1st_person_spec.position = [0.0, settings["sensor_height"], 0.0] depth_sensor_1st_person_spec.orientation = [ settings["sensor_pitch"], 0.0, 0.0, ] depth_sensor_1st_person_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE sensor_specs.append(depth_sensor_1st_person_spec) if settings["semantic_sensor_1st_person"]: semantic_sensor_1st_person_spec = habitat_sim.CameraSensorSpec() semantic_sensor_1st_person_spec.uuid = "semantic_sensor_1st_person" semantic_sensor_1st_person_spec.sensor_type = habitat_sim.SensorType.SEMANTIC semantic_sensor_1st_person_spec.resolution = [ settings["height"], settings["width"], ] semantic_sensor_1st_person_spec.position = [ 0.0, settings["sensor_height"], 0.0, ] semantic_sensor_1st_person_spec.orientation = [ settings["sensor_pitch"], 0.0, 0.0, ] semantic_sensor_1st_person_spec.sensor_subtype = ( habitat_sim.SensorSubType.PINHOLE ) sensor_specs.append(semantic_sensor_1st_person_spec) if settings["color_sensor_3rd_person"]: color_sensor_3rd_person_spec = habitat_sim.CameraSensorSpec() color_sensor_3rd_person_spec.uuid = "color_sensor_3rd_person" color_sensor_3rd_person_spec.sensor_type = habitat_sim.SensorType.COLOR color_sensor_3rd_person_spec.resolution = [ settings["height"], settings["width"], ] color_sensor_3rd_person_spec.position = [ 0.0, settings["sensor_height"] + 0.2, 0.2, ] color_sensor_3rd_person_spec.orientation = [-math.pi / 4, 0, 0] color_sensor_3rd_person_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE sensor_specs.append(color_sensor_3rd_person_spec) # Here you can specify the amount of displacement in a forward action and the turn angle agent_cfg = habitat_sim.agent.AgentConfiguration() agent_cfg.sensor_specifications = sensor_specs return habitat_sim.Configuration(sim_cfg, [agent_cfg]) def make_default_settings(): settings = { "width": 1280, # Spatial resolution of the observations "height": 720, "scene": "./data/scene_datasets/mp3d_example/17DRP5sb8fy/17DRP5sb8fy.glb", # Scene path "default_agent": 0, "sensor_height": 1.5, # Height of sensors in meters "sensor_pitch": -math.pi / 8.0, # sensor pitch (x rotation in rads) "color_sensor_1st_person": True, # RGB sensor "color_sensor_3rd_person": False, # RGB sensor 3rd person "depth_sensor_1st_person": False, # Depth sensor "semantic_sensor_1st_person": False, # Semantic sensor "seed": 1, "enable_physics": True, # enable dynamics simulation } return settings def make_simulator_from_settings(sim_settings): cfg = make_cfg(sim_settings) # clean-up the current simulator instance if it exists global sim global obj_attr_mgr global prim_attr_mgr global stage_attr_mgr global rigid_obj_mgr global metadata_mediator if sim != None: sim.close() # initialize the simulator sim = habitat_sim.Simulator(cfg) # Managers of various Attributes templates obj_attr_mgr = sim.get_object_template_manager() obj_attr_mgr.load_configs(str(os.path.join(data_path, "objects/example_objects"))) prim_attr_mgr = sim.get_asset_template_manager() stage_attr_mgr = sim.get_stage_template_manager() # Manager providing access to rigid objects rigid_obj_mgr = sim.get_rigid_object_manager() # get metadata_mediator metadata_mediator = sim.metadata_mediator # UI-populated handles used in various cells. Need to initialize to valid # value in case IPyWidgets are not available. # Holds the user's desired file-based object template handle global sel_file_obj_handle sel_file_obj_handle = obj_attr_mgr.get_file_template_handles()[0] # Holds the user's desired primitive-based object template handle global sel_prim_obj_handle sel_prim_obj_handle = obj_attr_mgr.get_synth_template_handles()[0] # Holds the user's desired primitive asset template handle global sel_asset_handle sel_asset_handle = prim_attr_mgr.get_template_handles()[0] # [/setup] # %% # @title Define Template Dictionary Utility Functions { display-mode: "form" } # @markdown (double click to show code) # @markdown This cell defines utility functions that expose Attribute template object properties. # This method builds a dictionary of k-v pairs of attribute property names and # values shared by all attribute template types. The values are tuples with the # first entry being the value and the second being whether the property is # editable and the third being the type. def build_dict_of_Default_attrs(template): res_dict = {} res_dict["handle"] = (template.handle, True, "string") # Read-only values res_dict["template_id"] = (template.template_id, False, "int") res_dict["template_class"] = (template.template_class, False, "string") res_dict["file_directory"] = (template.file_directory, False, "string") res_dict["num_user_configs"] = (template.num_user_configs, False, "int") return res_dict # This method builds a dictionary of k-v pairs of attribute property names and # values shared by templates of physically modeled constructs (scenes and # objects). The values are tuples with the first entry being the value and the # second being whether the property is editable and the third being the type. def build_dict_of_PhyObj_attrs(phys_obj_template): res_dict = build_dict_of_Default_attrs(phys_obj_template) res_dict["scale"] = (phys_obj_template.scale, True, "vector") res_dict["margin"] = (phys_obj_template.margin, True, "double") res_dict["friction_coefficient"] = ( phys_obj_template.friction_coefficient, True, "double", ) res_dict["restitution_coefficient"] = ( phys_obj_template.restitution_coefficient, True, "double", ) res_dict["render_asset_handle"] = ( phys_obj_template.render_asset_handle, True, "string", ) res_dict["collision_asset_handle"] = ( phys_obj_template.collision_asset_handle, True, "string", ) res_dict["force_flat_shading"] = ( phys_obj_template.force_flat_shading, True, "boolean", ) res_dict["shader_type"] = (phys_obj_template.shader_type, True, "int") res_dict["orient_up"] = (phys_obj_template.orient_up, True, "vector") res_dict["orient_front"] = (phys_obj_template.orient_front, True, "vector") res_dict["units_to_meters"] = (phys_obj_template.units_to_meters, True, "double") res_dict["render_asset_type"] = (phys_obj_template.render_asset_type, True, "int") res_dict["collision_asset_type"] = ( phys_obj_template.collision_asset_type, True, "int", ) # Read-only values res_dict["render_asset_is_primitive"] = ( phys_obj_template.render_asset_is_primitive, False, "boolean", ) res_dict["collision_asset_is_primitive"] = ( phys_obj_template.collision_asset_is_primitive, False, "boolean", ) res_dict["use_mesh_for_collision"] = ( phys_obj_template.use_mesh_for_collision, False, "boolean", ) res_dict["is_collidable"] = (phys_obj_template.is_collidable, True, "boolean") res_dict["is_dirty"] = (phys_obj_template.is_dirty, False, "boolean") return res_dict # This method will build a dict containing k-v pairs of attribute property names # and values for the passed object template. The values are tuples with the first # entry being the value,the second being whether the property is editable and # the third being the type. def build_dict_of_Object_attrs(obj_template): res_dict = build_dict_of_PhyObj_attrs(obj_template) res_dict["com"] = (obj_template.com, True, "vector") res_dict["compute_COM_from_shape"] = ( obj_template.compute_COM_from_shape, True, "boolean", ) res_dict["mass"] = (obj_template.mass, True, "double") res_dict["inertia"] = (obj_template.inertia, True, "vector") res_dict["linear_damping"] = (obj_template.linear_damping, True, "double") res_dict["angular_damping"] = (obj_template.angular_damping, True, "double") res_dict["bounding_box_collisions"] = ( obj_template.bounding_box_collisions, True, "boolean", ) res_dict["join_collision_meshes"] = ( obj_template.join_collision_meshes, True, "boolean", ) res_dict["is_visible"] = (obj_template.is_visible, True, "boolean") res_dict["semantic_id"] = (obj_template.semantic_id, True, "int") return res_dict # This method will build a dict containing k-v pairs of attribute property names # and values for the passed scene template. The values are tuples with the first # entry being the value,the second being whether the property is editable and # the third being the type. def build_dict_of_Stage_attrs(scene_template): res_dict = build_dict_of_PhyObj_attrs(scene_template) res_dict["gravity"] = (scene_template.gravity, True, "vector") res_dict["origin"] = (scene_template.origin, True, "vector") res_dict["semantic_asset_handle"] = ( scene_template.semantic_asset_handle, True, "string", ) res_dict["semantic_asset_type"] = (scene_template.semantic_asset_type, True, "int") res_dict["navmesh_asset_handle"] = ( scene_template.navmesh_asset_handle, True, "string", ) res_dict["house_filename"] = (scene_template.house_filename, True, "string") res_dict["frustum_culling"] = (scene_template.frustum_culling, True, "boolean") return res_dict # This method will build a dict containing k-v pairs of attribute property names # and values for the passed physics manager template. The values are tuples with # the first entry being the value,the second being whether the property is # editable and the third being the type. def build_dict_of_PhysicsSim_attrs(physics_template): res_dict = build_dict_of_Default_attrs(physics_template) res_dict["gravity"] = (physics_template.gravity, True, "vector") res_dict["timestep"] = (physics_template.timestep, True, "double") res_dict["max_substeps"] = (physics_template.max_substeps, True, "int") res_dict["friction_coefficient"] = ( physics_template.friction_coefficient, True, "double", ) res_dict["restitution_coefficient"] = ( physics_template.restitution_coefficient, True, "double", ) # Read-only values res_dict["simulator"] = (physics_template.simulator, False, "string") return res_dict # This method will build a dict containing k-v pairs of attribute property names # and values that are shared among all primitive asset attributes templates. # The values are tuples with the first entry being the value,the second being # whether the property is editable and the third being the type. def build_dict_of_prim_attrs(prim_template): res_dict = build_dict_of_Default_attrs(prim_template) res_dict["use_texture_coords"] = (prim_template.use_texture_coords, True, "boolean") res_dict["use_tangents"] = (prim_template.use_tangents, True, "boolean") res_dict["num_rings"] = (prim_template.num_rings, True, "int") res_dict["num_segments"] = (prim_template.num_segments, True, "int") res_dict["half_length"] = (prim_template.half_length, True) # Read-only values res_dict["prim_obj_class_name"] = ( prim_template.prim_obj_class_name, False, "string", ) res_dict["prim_obj_type"] = (prim_template.prim_obj_type, False, "int") res_dict["is_valid_template"] = (prim_template.is_valid_template, False, "boolean") return res_dict # This method will build a dict containing k-v pairs of attribute property names # and values for the passed capsule primitive template. The values are tuples with # the first entry being the value,the second being whether the property is # editable and the third being the type. def build_dict_of_Capsule_prim_attrs(capsule_template): res_dict = build_dict_of_prim_attrs(capsule_template) res_dict["hemisphere_rings"] = (capsule_template.hemisphere_rings, True, "int") res_dict["cylinder_rings"] = (capsule_template.cylinder_rings, True, "int") return res_dict # This method will build a dict containing k-v pairs of attribute property names # and values for the passed cone primitive template. The values are tuples with # the first entry being the value,the second being whether the property is # editable and the third being the type. def build_dict_of_Cone_prim_attrs(cone_template): res_dict = build_dict_of_prim_attrs(cone_template) res_dict["use_cap_end"] = (cone_template.use_cap_end, True, "boolean") return res_dict # This method will build a dict containing k-v pairs of attribute property names # and values for the passed cube primitive template. The values are tuples with # the first entry being the value,the second being whether the property is # editable and the third being the type. def build_dict_of_Cube_prim_attrs(cube_template): res_dict = build_dict_of_prim_attrs(cube_template) return res_dict # This method will build a dict containing k-v pairs of attribute property names # and values for the passed cylinder primitive template. The values are tuples with # the first entry being the value,the second being whether the property is # editable and the third being the type. def build_dict_of_Cylinder_prim_attrs(cylinder_template): res_dict = build_dict_of_prim_attrs(cylinder_template) res_dict["use_cap_ends"] = (cylinder_template.use_cap_ends, True, "boolean") return res_dict # This method will build a dict containing k-v pairs of attribute property names # and values for the passed icosphere primitive template. The values are tuples with # the first entry being the value,the second being whether the property is # editable and the third being the type. def build_dict_of_Icosphere_prim_attrs(icosphere_template): res_dict = build_dict_of_prim_attrs(icosphere_template) res_dict["subdivisions"] = (icosphere_template.subdivisions, True, "int") return res_dict # This method will build a dict containing k-v pairs of attribute property names # and values for the passed UV-Sphere primitive template. The values are tuples with # the first entry being the value,the second being whether the property is # editable and the third being the type. def build_dict_of_UVSphere_prim_attrs(uvsphere_template): res_dict = build_dict_of_prim_attrs(uvsphere_template) return res_dict # This method will deduce the appropriate template type and build the subsequent # dictionary containing containing k-v pairs of template property names # and values for the passed template. The values are tuples with # the first entry being the value,the second being whether the property is # editable and the third being the type. def build_dict_from_template(template): template_class = template.template_class if "ObjectAttributes" in template_class: return build_dict_of_Object_attrs(template) if "StageAttributes" in template_class: return build_dict_of_Stage_attrs(template) if "PhysicsManagerAttributes" in template_class: return build_dict_of_PhysicsSim_attrs(template) if "CapsulePrimitiveAttributes" in template_class: return build_dict_of_Capsule_prim_attrs(template) if "ConePrimitiveAttributes" in template_class: return build_dict_of_Cone_prim_attrs(template) if "CubePrimitiveAttributes" in template_class: return build_dict_of_Cube_prim_attrs(template) if "CylinderPrimitiveAttributes" in template_class: return build_dict_of_Cylinder_prim_attrs(template) if "IcospherePrimitiveAttributes" in template_class: return build_dict_of_Icosphere_prim_attrs(template) if "UVSpherePrimitiveAttributes" in template_class: return build_dict_of_UVSphere_prim_attrs(template) print("Unknown template type : %s " % template_class) return None # This will set a template's attributes from the passed dictionary def set_template_properties_from_dict(template, template_dict): for k, v in template_dict.items(): setattr(template, k, v[0]) return template # This will display all the properties of an attributes template def show_template_properties(template): template_dict = build_dict_from_template(template) print("Template {} has : ".format(template.handle)) for k, v in template_dict.items(): print( "\tProperty {} has value {} of type {} that is editable : {}".format( k, v[0], v[2], v[1] ) ) # @title Define Simulation Utility Functions { display-mode: "form" } # @markdown (double click to show code) # @markdown - simulate def simulate(sim, dt=1.0, get_frames=True): # simulate dt seconds at 60Hz to the nearest fixed timestep print("Simulating {:.3f} world seconds.".format(dt)) observations = [] start_time = sim.get_world_time() while sim.get_world_time() < start_time + dt: sim.step_physics(1.0 / 60.0) if get_frames: observations.append(sim.get_sensor_observations()) return observations # %% # @title Define Colab GUI Utility Functions { display-mode: "form" } # @markdown (double click to show code) # @markdown This cell provides utility functions to build and manage IPyWidget interactive components. # Event handler for dropdowns displaying file-based object handles def on_file_obj_ddl_change(ddl_values): global sel_file_obj_handle sel_file_obj_handle = ddl_values["new"] return sel_file_obj_handle # Event handler for dropdowns displaying prim-based object handles def on_prim_obj_ddl_change(ddl_values): global sel_prim_obj_handle sel_prim_obj_handle = ddl_values["new"] return sel_prim_obj_handle # Event handler for dropdowns displaying asset handles def on_prim_ddl_change(ddl_values): global sel_asset_handle sel_asset_handle = ddl_values["new"] return sel_asset_handle # Build a dropdown list holding obj_handles and set its event handler def set_handle_ddl_widget(obj_handles, handle_types, sel_handle, on_change): sel_handle = obj_handles[0] descStr = handle_types + " Template Handles:" style = {"description_width": "300px"} obj_ddl = widgets.Dropdown( options=obj_handles, value=sel_handle, description=descStr, style=style, disabled=False, layout={"width": "max-content"}, ) obj_ddl.observe(on_change, names="value") return obj_ddl, sel_handle def set_button_launcher(desc): button = widgets.Button( description=desc, layout={"width": "max-content"}, ) return button def make_sim_and_vid_button(prefix, dt=1.0): if not HAS_WIDGETS: return def on_sim_click(b): observations = simulate(sim, dt=dt) vut.make_video( observations, "color_sensor_1st_person", "color", output_path + prefix, open_vid=show_video, ) sim_and_vid_btn = set_button_launcher("Simulate and Make Video") sim_and_vid_btn.on_click(on_sim_click) ipydisplay(sim_and_vid_btn) def make_clear_all_objects_button(): if not HAS_WIDGETS: return def on_clear_click(b): rigid_obj_mgr.remove_all_objects() clear_objs_button = set_button_launcher("Clear all objects") clear_objs_button.on_click(on_clear_click) ipydisplay(clear_objs_button) # Builds widget-based UI components def build_widget_ui(obj_attr_mgr, prim_attr_mgr): # Holds the user's desired file-based object template handle global sel_file_obj_handle sel_file_obj_handle = "" # Holds the user's desired primitive-based object template handle global sel_prim_obj_handle sel_prim_obj_handle = "" # Holds the user's desired primitive asset template handle global sel_asset_handle sel_asset_handle = "" # Construct DDLs and assign event handlers # All file-based object template handles file_obj_handles = obj_attr_mgr.get_file_template_handles() # All primitive asset-based object template handles prim_obj_handles = obj_attr_mgr.get_synth_template_handles() # All primitive asset handles template handles prim_asset_handles = prim_attr_mgr.get_template_handles() # If not using widgets, set as first available handle if not HAS_WIDGETS: sel_file_obj_handle = file_obj_handles[0] sel_prim_obj_handle = prim_obj_handles[0] sel_prim_obj_handle = prim_asset_handles[0] return # Build widgets file_obj_ddl, sel_file_obj_handle = set_handle_ddl_widget( file_obj_handles, "File-based Object", sel_file_obj_handle, on_file_obj_ddl_change, ) prim_obj_ddl, sel_prim_obj_handle = set_handle_ddl_widget( prim_obj_handles, "Primitive-based Object", sel_prim_obj_handle, on_prim_obj_ddl_change, ) prim_asset_ddl, sel_asset_handle = set_handle_ddl_widget( prim_asset_handles, "Primitive Asset", sel_asset_handle, on_prim_ddl_change ) # Display DDLs ipydisplay(file_obj_ddl) ipydisplay(prim_obj_ddl) ipydisplay(prim_asset_ddl) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("--no-display", dest="display", action="store_false") parser.add_argument("--no-make-video", dest="make_video", action="store_false") parser.set_defaults(show_video=True, make_video=True) args, _ = parser.parse_known_args() show_video = args.display display = args.display make_video = args.make_video else: show_video = False make_video = False display = False # %% [markdown] # # View Assets in Habitat-sim # Use the code in this section to view assets in the Habitat-sim engine. # %% # [initialize] # @title Initialize Simulator and Load Objects { display-mode: "form" } sim_settings = make_default_settings() sim_settings["scene"] = "none" sim_settings["sensor_pitch"] = 0 sim_settings["override_scene_light_defaults"] = True sim_settings["scene_light_setup"] = "" # use 3rd person camera sim_settings["color_sensor_3rd_person"] = True make_simulator_from_settings(sim_settings) # [/initialize] # [specify_object] # @markdown Drag a stage or object asset into a directory at the left, # @markdown and then load it below by setting object_to_view_path # @markdown Put the full path to the asset you would like to view here : # fmt: off object_to_view_path = "./data/test_assets/scenes/simple_room.glb" # @param {type:"string"} # fmt: on # this is the name to save the resultant video with clip_short_name = object_to_view_path.split("/")[-1].split(".")[0] # [/specify_object] # %% [markdown] # ## Synthesize Carousel View # This cell will make a carousel view of the loaded stage. # %% # [build_carousel_view] # @markdown This cell loads the object, centers it, and then moves a camera in a circle around # @markdown the center of the scene, recording observations, which are subsequently stitched # @markdown together to build a video of the object # check if desired object actually exists if os.path.exists(object_to_view_path) and os.path.isfile(object_to_view_path): # Acquire the sensor being used visual_sensor = sim._sensors["color_sensor_3rd_person"] initial_sensor_position = np.array(visual_sensor._spec.position) initial_sensor_orientation = np.array(visual_sensor._spec.orientation) # load an object template and instantiate an object to view object_template = obj_attr_mgr.create_new_template(str(object_to_view_path), False) # if using a stage and it displays sideways, you may need to reorient it via its attributes for it to display properly. # @markdown If the asset being displayed is on its side, enable orientation_correction below : orientation_correction = False # @param {type: "boolean"} # This will correct the orientation (Dependent on PR : ) if orientation_correction: object_template.orient_up = (0.0, 0.0, 1.0) object_template.orient_front = (0.0, 1.0, 0.0) # modify template here if desired and then register it obj_temp_id = obj_attr_mgr.register_template(object_template) # create object obj = rigid_obj_mgr.add_object_by_template_id(obj_temp_id) # place object in center - must be done before setting to static # get bb of object obj_bbox = obj.root_scene_node.compute_cumulative_bb() # find center of bb and move to scene origin - this centers object obj.translation = -obj_bbox.center() # get max dim to use as scale for sensor placement bb_scale = max(obj_bbox.max) # determine sensor placement based on size of object sensor_pos = bb_scale * np.array([0, 1, 2]) # set object to be static obj.motion_type = habitat_sim.physics.MotionType.STATIC # initialize an agent and set its intial state agent = sim.initialize_agent(sim_settings["default_agent"]) agent_state = habitat_sim.AgentState() agent_state.position = np.array([0.0, 0.0, 0.0]) # in world space agent.set_state(agent_state) # set the sensor to be behind and above the agent's initial loc # distance is scaled by size of largest object dimension visual_sensor._spec.position = agent_state.position + sensor_pos visual_sensor._spec.orientation = np.array([-0.5, 0, 0]) visual_sensor._sensor_object.set_transformation_from_spec() # Create observations array observations = [] # @markdown Set how long the resutlant video should be, in seconds. The object will make 1 full revolution during this time. video_length = 4.8 # @param {type:"slider", min:1.0, max:20.0, step:0.1} # Sim time step time_step = 1.0 / 60.0 # Amount to rotate per frame to make 1 full rotation rot_amount = 2 * math.pi / (video_length / time_step) # simulate with updated camera at each frame start_time = sim.get_world_time() while sim.get_world_time() - start_time < video_length: sim.step_physics(time_step) # rotate the agent to rotate the camera agent_state.rotation *= ut.quat_from_angle_axis( rot_amount, np.array([0, 1.0, 0]) ) agent.set_state(agent_state) observations.append(sim.get_sensor_observations()) # video rendering of carousel view video_prefix = clip_short_name + "_scene_view" if make_video: vut.make_video( observations, "color_sensor_3rd_person", "color", output_path + video_prefix, open_vid=show_video, video_dims=[1280, 720], ) # reset the sensor state for other examples visual_sensor._spec.position = initial_sensor_position visual_sensor._spec.orientation = initial_sensor_orientation visual_sensor._sensor_object.set_transformation_from_spec() # remove added objects rigid_obj_mgr.remove_all_objects() else: print( "\nChosen File : '{}' does not exist or cannot be found. Aborting.\n".format( object_to_view_path ) ) # [/build_carousel_view] # %%
examples/tutorials/colabs/asset_viewer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 1. Reading multiple data files from glob import glob import os import pandas as pd def print_function(strFunction): ''' Print string function and its output nicely Parameter --------- strFunction: str String representation of a function Return ------ Nicely formatted print output ''' #print(func) #print(eval(func)) print('{0}: {1}'.format(strFunction, eval(strFunction))) print_function('os.getcwd()') print_function('os.listdir()') print_function('os.listdir("data")') def getcwd_csv(cwd): ''' Read all csv in the provided path as [pd.Dataframe, pd.Dataframe, ...] Parameter --------- cwd: str Path of current working directory Return ------ Return [pd.Dataframe, pd.Dataframe, ...] ''' import os import pandas as pd ori = os.getcwd() os.chdir(cwd) result = [] for file in os.listdir(): if 'csv' in file: print('Appending: {0}'.format(file)) result.append(pd.read_csv(file)) print('Finished appending, the list length: {0}'.format(len(result))) if len(result) == 0: print('There is 0 pd.Dataframe in the list') elif len(result) > 0: print('There is {0} pd.Dataframe(s) in the list'.format(len(result))) else: print('Something is wrong, please check getcwd_csv() function') os.chdir(ori) return result automobiles, oil_price, sp500, pittsburgh2013, exchange = [df for df in getcwd_csv('data')] print_function('os.getcwd()') print_function('os.listdir()') print_function('os.listdir("data")') automobiles, oil_price, sp500, pittsburgh2013, exchange = [pd.read_csv(df) for df in glob('data/*.csv')] automobiles.shape, oil_price.shape, sp500.shape, pittsburgh2013.shape, exchange.shape names1981, names1881 = [pd.read_csv(df) for df in glob('data/Baby names/*.csv')] names1881.shape, names1981.shape gdp_china, gdp_usa = [pd.read_csv(df) for df in glob('data/GDP/*.csv')] gdp_china.shape, gdp_usa.shape sales_feb_2015, feb_sales_service, feb_sales_software, sales_jan_2015, sales_mar_2015, feb_sales_hardware = [pd.read_csv(df) for df in glob('data/Sales/*.csv')] sales_feb_2015.shape, feb_sales_service.shape, feb_sales_software.shape, sales_jan_2015.shape, sales_mar_2015.shape, feb_sales_hardware.shape print(glob('data/Summer Olympic medals/*')) gold, silver_top5, gold_top5, bronze, silver, ioc_country_codes, bronze_top5 = [pd.read_csv(df) for df in glob('data/Summer Olympic medals/*.csv')] gold.shape, silver_top5.shape, gold_top5.shape, bronze.shape, silver.shape, ioc_country_codes.shape, bronze_top5.shape # ## 1.1. Reindexing dataframes def print_dataframes_statistics(dataframes): ''' Print the statistics of list of dataframes Parameter --------- dataframes: list list of Pandas dataframe Return ------ Print output ''' for dataframe in dataframes: print(dataframe.info()) print(dataframe.shape) print(dataframe.head()) print('----------------------------------------\n') # + dfs = [gold, silver_top5, gold_top5, bronze, silver, ioc_country_codes, bronze_top5] print_dataframes_statistics(dfs) # -
pandas/merging-dataframes-with-pandas/merging-dataframes-with-pandas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import warnings warnings.filterwarnings('ignore') import sklearn data = pd.read_csv('a.csv') data.head() cor = data.corr() cor = abs(cor['mortality_rate']) print(cor[cor > 0.3]) data.drop([33, 47], inplace=True) # Get rid of Guam/Puerto Rico y = data['mortality_rate'] # Labels states = data['state'] # If we want to look a state up later data.drop(columns=['mortality_rate', 'Locationdesc', 'country_region', 'last_update', 'lat', 'long', 'confirmed', 'deaths', 'recovered', 'active', 'people_tested', 'people_hospitalized', 'testing_rate', 'incident_rate', 'hospitalization_rate', 'state'], inplace=True) data.fillna(data.mean(), inplace=True) # + from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler scaled = StandardScaler().fit_transform(data) X = pd.DataFrame(scaled, columns=data.columns) # + from sklearn.linear_model import LassoCV from sklearn.linear_model import Ridge from sklearn.metrics import mean_squared_error from sklearn.neighbors import KNeighborsRegressor from sklearn.model_selection import cross_val_score from sklearn.model_selection import GridSearchCV from sklearn.ensemble import GradientBoostingRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.neural_network import MLPRegressor from sklearn.svm import LinearSVR from sklearn.metrics import make_scorer def evaluate_model(model, param_dict, passes=10): min_test_err = 1e10 best_hyperparams = {} corr_full_err = None scorer = make_scorer(mean_squared_error, greater_is_better=False) for i in range(passes): print('Pass {}/10 for model {}'.format(i + 1, model)) X_train, X_test, y_train, y_test = train_test_split(data, y, test_size=0.2) default_model = model() model_gs = GridSearchCV(default_model, param_dict, cv=3, n_jobs=16, verbose=1, scoring=scorer) model_gs.fit(X_train, y_train) optimal_model = model(**model_gs.best_params_) optimal_model.fit(X_train, y_train) y_pred = optimal_model.predict(X_test) err = mean_squared_error(y_test, y_pred) full_y_pred=optimal_model.predict(data) full_err = mean_squared_error(full_y_pred, y) #print('MSE for {}: {}'.format(model, err)) if err < min_test_err: min_test_err = err best_hyperparams = model_gs.best_params_ corr_full_err = full_err print('Model {} with hyperparams {} yielded test error {} - overall error {}'.format(model, best_hyperparams, min_test_err, corr_full_err)) # + evaluate_model(LassoCV, {'eps': [0.001, 0.002, 0.003], 'n_alphas':[200, 400, 600], 'tol': [0.001, 0.005, 0.01], 'max_iter': [4000, 7000]}) #evaluate_model(Ridge, {'alpha' : [(0.1, 0.3, 0.7, 1.0, 2.0, 5.0)]}) evaluate_model(KNeighborsRegressor, {'n_neighbors' : np.arange(1, 10)}) # + evaluate_model(GradientBoostingRegressor, { 'learning_rate': [0.1, 0.05, 0.02], 'n_estimators': [100, 200, 400, 800], 'max_depth': [1, 2, 3, 4, 5], 'max_features' : ['auto', 'sqrt', 'log2']}) evaluate_model(DecisionTreeRegressor, {'splitter': ['best', 'random'], 'criterion': ['mse', 'friedman_mse', 'mae'], 'max_depth': [None, 2, 3, 4, 5], 'max_features' : ['auto', 'sqrt', 'log2']}) evaluate_model(RandomForestRegressor, {'n_estimators': [100, 200, 400, 800], 'max_depth': [None, 2, 3, 4, 5], 'min_samples_split': [2, 3, 4], 'max_features' : ['auto', 'sqrt', 'log2']}) evaluate_model(MLPRegressor, {'hidden_layer_sizes': [(100,) * 3, (100,) * 10, (100,) * 30, (100,) * 100]}) # - evaluate_model(LinearSVR, {'tol': [1e-4, 1e-5, 1e-6, 1e-7], 'C' : [0.5, 1, 3, 5, 10], 'max_iter': [1000, 2000, 4000, 8000]}, passes=10)
src/my/covid2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Find an estimate for ${\alpha}$ (Term to Abbreviation Ratio) import Abbreviation_and_NC_Extraction import re import pandas as pd from string import punctuation # ## Load PURE Data from file: pure_data = pd.read_csv('pure_data.CSV', names=["dataset", "id", "req_texts"], sep='\t', encoding='utf8') ids = list(pure_data['id'].values) reqs = list(pure_data['req_texts'].values) dataset = list(pure_data['dataset'].values) # ## The Main Function: Independent compilation of the set of NCs and the set of abbreviations. Calculation of the ratio of their cardinalities as the final result. set_of_detected_ncs = set() for req in reqs: set_of_detected_ncs = set_of_detected_ncs.union(Abbreviation_and_NC_Extraction.nc_detect(req)) for term in list(set_of_detected_ncs)[:20]: print(term) print(len(set_of_detected_ncs)) set_of_detected_abbreviations = set() for req in reqs: extracted_abbreviations = Abbreviation_and_NC_Extraction.abbv_detect(req) for abbv in extracted_abbreviations: cleaned_abbv = re.sub(r"[\([{})\]]", "", abbv) set_of_detected_abbreviations.add(cleaned_abbv.strip(punctuation)) print(len(set_of_detected_abbreviations)) # ## Final result: ratio = len(set_of_detected_ncs)/len(set_of_detected_abbreviations) print(ratio)
4) Section_5.4 -- Find an Estimate for Alpha, the Term to Abbreviation Ratio.ipynb
;; --- ;; jupyter: ;; jupytext: ;; text_representation: ;; extension: .clj ;; format_name: light ;; format_version: '1.5' ;; jupytext_version: 1.14.4 ;; kernelspec: ;; display_name: Clojure (clojupyter=0.2.3=2) ;; language: clojure ;; name: conda-clojupyter ;; --- ;; ## Trying hard (require '[clojupyter.misc.helper :as helper]) (helper/add-dependencies '[incanter "1.5.7"]) (use '(incanter core stats charts io)) ; include Incanter's facilities into working namespace
Untitled7.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sidewalkify as swky import osmnx as ox import numpy as np import pandas as pd import networkx as nx import shapely as shapely import geopandas as gpd from tqdm._tqdm_notebook import tqdm_notebook tqdm_notebook.pandas() # ## Get the initial road map # Here's what osmnx thinks of as 'walking': ox.downloader._get_osm_filter("walk") brighton_streets = gpd.read_file("../../data/brighton/brighton_streets.shp") brighton_streets brighton_streets.plot() print(brighton_streets.crs) EPSG_4326 = brighton_streets.crs # + def multis_to_lines(ls_or_mls): if type(ls_or_mls) == shapely.geometry.LineString: return [ls_or_mls] else: return list(ls_or_mls) df_temp = pd.DataFrame(brighton_streets) df_temp['geometry'] = df_temp['geometry'].map(multis_to_lines) df_temp = df_temp.explode('geometry') brighton_streets = gpd.GeoDataFrame(df_temp, geometry='geometry') brighton_streets.crs = EPSG_4326 brighton_streets # - EPSG_26919 = "EPSG:26919" brighton_streets = brighton_streets.to_crs(EPSG_26919) sidewalk_paths = swky.graph.graph_workflow(brighton_streets) brighton_sidewalks = swky.draw.draw_sidewalks(sidewalk_paths) brighton_sidewalks.crs=EPSG_26919 brighton_sidewalks.to_file("brighton_sidewalks") brighton_streets.to_file("brighton_streets_26919") # + import math def linestring_start(linestring): (l1,l2) = list(linestring.coords)[0] return (l1,l2) def linestring_end(linestring): (l1,l2) = list(linestring.coords)[1] return (l1,l2) def linestring_heading(linestring): # 0 is true north, 90 is east # so heading = 90 - usual_angle (in standard form) (l1,l2) = linestring_start(linestring) (m1,m2) = linestring_end(linestring) angle_deg = math.atan2(m2-l2, m1-l1) * 360 / (2 * math.pi) heading_deg = int((90 - angle_deg) % 360) return(heading_deg) # probably need to rework this for GSV, ok brighton_streets['heading'] = brighton_streets.geometry.map(linestring_heading) # - # Let's segment. brighton_streets brighton_streets = brighton_streets.to_crs(EPSG_4326) brighton_streets['length'] = brighton_streets.to_crs("EPSG:26919").length brighton_streets['ratio'] = brighton_streets.length/brighton_streets['length'] brighton_streets['ratio'].describe() # 0.000011 seems reasonable import math CUT_LENGTH_METERS = 8 CUT_LENGTH = 0.000011 * CUT_LENGTH_METERS sum((brighton_streets.length/CUT_LENGTH).map(math.ceil)) brighton_streets = brighton_streets.drop(columns=['ratio','highway','key']) # + def safe_linemerge(line): if type(line) == MultiLineString: return linemerge(line) else: return line brighton_streets.geometry = brighton_streets.geometry.map(lambda x : safe_linemerge) # + # Thank you <NAME> https://gist.github.com/sgillies/465156#file_cut.py from shapely.geometry import LineString, Point def cut(line, distance): # Cuts a line in two at a distance from its starting point if distance <= 0.0 or distance >= line.length: return [LineString(line)] coords = list(line.coords) for i, p in enumerate(coords): pd = line.project(Point(p)) if pd == distance: return [ LineString(coords[:i+1]), LineString(coords[i:])] if pd > distance: cp = line.interpolate(distance) return [ LineString(coords[:i] + [(cp.x, cp.y)]), LineString([(cp.x, cp.y)] + coords[i:])] # here's me def recursive_cut(line, distance): if [line] == cut(line, distance): return [line] else: segment, rest = cut(line, distance) return [segment] + recursive_cut(rest, distance) # + # now we recursive_cut streets brighton_streets_segmented = brighton_streets.copy() brighton_streets_segmented['segments'] = brighton_streets.geometry.map(lambda x : recursive_cut(x,CUT_LENGTH)) df_temp = pd.DataFrame(brighton_streets_segmented) df_temp = df_temp.explode('segments').drop(columns=['geometry']) brighton_streets_segmented = gpd.GeoDataFrame(df_temp, geometry = 'segments') brighton_streets_segmented.crs = EPSG_4326 # - brighton_streets_segmented.length # + # get midpoints to figure out where to take photo def midpoint(linestring): (x1,y1) = linestring_start(linestring) (x2,y2) = linestring_end(linestring) x_mid = (x2 + x1)/2 y_mid = (y2 + y1)/2 return Point(x_mid, y_mid) brighton_streets_segmented['midpoints'] = brighton_streets_segmented.geometry.map(midpoint) # - list(brighton_streets_segmented.head(1)['segments'].values[0].coords) # Plan: # * feed each row to selenium # * * two pics, left and right # * * cut off for 'short' ones -- can add 'length_4269' to see where # * project little lines out and clip/delete bad sidewalks brighton_crosswalks = gpd.read_file("brighton_crosswalks/brighton_crosswalks.shp") brighton_crosswalks # !which python
jupyter/brighton/Brighton streets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # # Regular Grid Variogram in Python for Engineers and Geoscientists # # ## with GSLIB's GAMV Program Converted to Python # # ### <NAME>, Associate Professor, University of Texas at Austin # # # #### Contacts: [Twitter/@GeostatsGuy](https://twitter.com/geostatsguy) | [GitHub/GeostatsGuy](https://github.com/GeostatsGuy) | [www.michaelpyrcz.com](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) # # This is a tutorial for / demonstration of **Irregularly Sampled Variogram Calculation in Python with GSLIB's GAMV program translated to Python, wrappers and reimplementations of other GSLIB: Geostatistical Library methods** (Deutsch and Journel, 1997). # # This exercise demonstrates the semivariogram calculation method in Python with wrappers and reimplimentation of GSLIB methods. The steps include: # # 1. generate a 2D model with sequential Gaussian simulation # 2. sample from the simulation # 3. calculate and visualize experimental semivariograms # # To accomplish this I have provide wrappers or reimplementation in Python for the following GSLIB methods: # # 1. sgsim - sequantial Gaussian simulation limited to 2D and unconditional # 2. hist - histograms plots reimplemented with GSLIB parameters using python methods # 3. locmap - location maps reimplemented with GSLIB parameters using python methods # 4. pixelplt - pixel plots reimplemented with GSLIB parameters using python methods # 5. locpix - my modification of GSLIB to superimpose a location map on a pixel plot reimplemented with GSLIB parameters using Python methods # 5. affine - affine correction adjust the mean and standard deviation of a feature reimplemented with GSLIB parameters using Python methods # # I have also started to translate the GSLIB support subfunctions to Python. Stay tuned. # # The GSLIB source and executables are available at http://www.statios.com/Quick/gslib.html. For the reference on using GSLIB check out the User Guide, GSLIB: Geostatistical Software Library and User's Guide by <NAME> and <NAME>. Overtime, more of the GSLIB programs will be translated to Python and there will be no need to have the executables. For this workflow you will need sgsim.exe from GSLIB.com for windows and Mac OS executables from https://github.com/GeostatsGuy/GSLIB_MacOS. # # I did this to allow people to use these GSLIB functions that are extremely robust in Python. Also this should be a bridge to allow so many familar with GSLIB to work in Python as a kept the parameterization and displays consistent with GSLIB. The wrappers are simple functions declared below that write the parameter files, run the GSLIB executable in the working directory and load and visualize the output in Python. This will be included on GitHub for anyone to try it out https://github.com/GeostatsGuy/. # # This was my first effort to translate the GSLIB Fortran to Python. It was pretty easy so I'll start translating other critical GSLIB functions. I've completed NSCORE, DECLUS, GAM and now GAMV as of now. # # #### Load the required libraries # # The following code loads the required libraries. import os # to set current working directory import numpy as np # arrays and matrix math import pandas as pd # DataFrames import matplotlib.pyplot as plt # plotting # If you get a package import error, you may have to first install some of these packages. This can usually be accomplished by opening up a command window on Windows and then typing 'python -m pip install [package-name]'. More assistance is available with the respective package docs. # # #### Declare functions # # Here are the wrappers and reimplementations of GSLIB method along with two utilities to load GSLIB's Geo-EAS from data files into DataFrames and 2D Numpy arrays. These are used in the testing workflow. # + # Some GeostatsPy Functions - by <NAME>, maintained at https://git.io/fNgR7. # A set of functions to provide access to GSLIB in Python. # GSLIB executables: nscore.exe, declus.exe, gam.exe, gamv.exe, vmodel.exe, kb2d.exe & sgsim.exe must be in the working directory import pandas as pd import os import numpy as np import matplotlib.pyplot as plt import random as rand image_type = 'tif'; dpi = 600 # utility to convert GSLIB Geo-EAS files to a 1D or 2D numpy ndarray for use with Python methods def GSLIB2ndarray(data_file,kcol,nx,ny): colArray = [] if ny > 1: array = np.ndarray(shape=(ny,nx),dtype=float,order='F') else: array = np.zeros(nx) with open(data_file) as myfile: # read first two lines head = [next(myfile) for x in range(2)] line2 = head[1].split() ncol = int(line2[0]) # get the number of columns for icol in range(0, ncol): # read over the column names head = [next(myfile) for x in range(1)] if icol == kcol: col_name = head[0].split()[0] if ny > 1: for iy in range(0,ny): for ix in range(0,nx): head = [next(myfile) for x in range(1)] array[ny-1-iy][ix] = head[0].split()[kcol] else: for ix in range(0,nx): head = [next(myfile) for x in range(1)] array[ix] = head[0].split()[kcol] return array,col_name # utility to convert GSLIB Geo-EAS files to a pandas DataFrame for use with Python methods def GSLIB2Dataframe(data_file): colArray = [] with open(data_file) as myfile: # read first two lines head = [next(myfile) for x in range(2)] line2 = head[1].split() ncol = int(line2[0]) for icol in range(0, ncol): head = [next(myfile) for x in range(1)] colArray.append(head[0].split()[0]) data = np.loadtxt(myfile, skiprows = 0) df = pd.DataFrame(data) df.columns = colArray return df # histogram, reimplemented in Python of GSLIB hist with MatPlotLib methods, displayed and as image file def hist(array,xmin,xmax,log,cumul,bins,weights,xlabel,title,fig_name): plt.figure(figsize=(8,6)) cs = plt.hist(array, alpha = 0.2, color = 'red', edgecolor = 'black', bins=bins, range = [xmin,xmax], weights = weights, log = log, cumulative = cumul) plt.title(title) plt.xlabel(xlabel); plt.ylabel('Frequency') plt.savefig(fig_name + '.' + image_type,dpi=dpi) plt.show() return # histogram, reimplemented in Python of GSLIB hist with MatPlotLib methods (version for subplots) def hist_st(array,xmin,xmax,log,cumul,bins,weights,xlabel,title): cs = plt.hist(array, alpha = 0.2, color = 'red', edgecolor = 'black', bins=bins, range = [xmin,xmax], weights = weights, log = log, cumulative = cumul) plt.title(title) plt.xlabel(xlabel); plt.ylabel('Frequency') return # location map, reimplemention in Python of GSLIB locmap with MatPlotLib methods def locmap(df,xcol,ycol,vcol,xmin,xmax,ymin,ymax,vmin,vmax,title,xlabel,ylabel,vlabel,cmap,fig_name): ixy = 0 plt.figure(figsize=(8,6)) im = plt.scatter(df[xcol],df[ycol],s=None, c=df[vcol], marker=None, cmap=cmap, norm=None, vmin=vmin, vmax=vmax, alpha=0.8, linewidths=0.8, edgecolors="black") plt.title(title) plt.xlim(xmin,xmax) plt.ylim(ymin,ymax) plt.xlabel(xlabel) plt.ylabel(ylabel) cbar = plt.colorbar(im, orientation = 'vertical',ticks=np.linspace(vmin,vmax,10)) cbar.set_label(vlabel, rotation=270, labelpad=20) plt.savefig(fig_name + '.' + image_type,dpi=dpi) plt.show() return im # location map, reimplemention in Python of GSLIB locmap with MatPlotLib methods (version for subplots) def locmap_st(df,xcol,ycol,vcol,xmin,xmax,ymin,ymax,vmin,vmax,title,xlabel,ylabel,vlabel,cmap): ixy = 0 im = plt.scatter(df[xcol],df[ycol],s=None, c=df[vcol], marker=None, cmap=cmap, norm=None, vmin=vmin, vmax=vmax, alpha=0.8, linewidths=0.8, verts=None, edgecolors="black") plt.title(title) plt.xlim(xmin,xmax) plt.ylim(ymin,ymax) plt.xlabel(xlabel) plt.ylabel(ylabel) cbar = plt.colorbar(im, orientation = 'vertical',ticks=np.linspace(vmin,vmax,10)) cbar.set_label(vlabel, rotation=270, labelpad=20) return im # pixel plot, reimplemention in Python of GSLIB pixelplt with MatPlotLib methods def pixelplt(array,xmin,xmax,ymin,ymax,step,vmin,vmax,title,xlabel,ylabel,vlabel,cmap,fig_name): print(str(step)) xx, yy = np.meshgrid(np.arange(xmin, xmax, step),np.arange(ymax, ymin, -1*step)) plt.figure(figsize=(8,6)) im = plt.contourf(xx,yy,array,cmap=cmap,vmin=vmin,vmax=vmax,levels=np.linspace(vmin,vmax,100)) plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) cbar = plt.colorbar(im,orientation = 'vertical',ticks=np.linspace(vmin,vmax,10)) cbar.set_label(vlabel, rotation=270, labelpad=20) plt.savefig(fig_name + '.' + image_type,dpi=dpi) plt.show() return im # pixel plot, reimplemention in Python of GSLIB pixelplt with MatPlotLib methods(version for subplots) def pixelplt_st(array,xmin,xmax,ymin,ymax,step,vmin,vmax,title,xlabel,ylabel,vlabel,cmap): xx, yy = np.meshgrid(np.arange(xmin, xmax, step),np.arange(ymax, ymin, -1*step)) ixy = 0 x = [];y = []; v = [] # use dummy since scatter plot controls legend min and max appropriately and contour does not! cs = plt.contourf(xx,yy,array,cmap=cmap,vmin=vmin,vmax=vmax,levels = np.linspace(vmin,vmax,100)) im = plt.scatter(x,y,s=None, c=v, marker=None,cmap=cmap, vmin=vmin, vmax=vmax, alpha=0.8, linewidths=0.8, edgecolors="black") plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.clim(vmin,vmax) cbar = plt.colorbar(im, orientation = 'vertical') cbar.set_label(vlabel, rotation=270, labelpad=20) return cs # pixel plot and location map, reimplementation in Python of a GSLIB MOD with MatPlotLib methods def locpix(array,xmin,xmax,ymin,ymax,step,vmin,vmax,df,xcol,ycol,vcol,title,xlabel,ylabel,vlabel,cmap,fig_name): xx, yy = np.meshgrid(np.arange(xmin, xmax, step),np.arange(ymax, ymin, -1*step)) ixy = 0 plt.figure(figsize=(8,6)) cs = plt.contourf(xx, yy, array, cmap=cmap,vmin=vmin, vmax=vmax,levels = np.linspace(vmin,vmax,100)) im = plt.scatter(df[xcol],df[ycol],s=None, c=df[vcol], marker=None, cmap=cmap, vmin=vmin, vmax=vmax, alpha=0.8, linewidths=0.8, edgecolors="black") plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.xlim(xmin,xmax) plt.ylim(ymin,ymax) cbar = plt.colorbar(orientation = 'vertical') cbar.set_label(vlabel, rotation=270, labelpad=20) plt.savefig(fig_name + '.' + image_type,dpi=dpi) plt.show() return cs # pixel plot and location map, reimplementation in Python of a GSLIB MOD with MatPlotLib methods(version for subplots) def locpix_st(array,xmin,xmax,ymin,ymax,step,vmin,vmax,df,xcol,ycol,vcol,title,xlabel,ylabel,vlabel,cmap): xx, yy = np.meshgrid(np.arange(xmin, xmax, step),np.arange(ymax, ymin, -1*step)) ixy = 0 cs = plt.contourf(xx, yy, array, cmap=cmap,vmin=vmin, vmax=vmax,levels = np.linspace(vmin,vmax,100)) im = plt.scatter(df[xcol],df[ycol],s=None, c=df[vcol], marker=None, cmap=cmap, vmin=vmin, vmax=vmax, alpha=0.8, linewidths=0.8, edgecolors="black") plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.xlim(xmin,xmax) plt.ylim(ymin,ymax) cbar = plt.colorbar(orientation = 'vertical') cbar.set_label(vlabel, rotation=270, labelpad=20) # affine distribution correction reimplemented in Python with numpy methods def affine(array,tmean,tstdev): mean = np.average(array) stdev = np.std(array) array = (tstdev/stdev)*(array - mean) + tmean return(array) def make_variogram(nug,nst,it1,cc1,azi1,hmaj1,hmin1,it2=1,cc2=0,azi2=0,hmaj2=0,hmin2=0): if cc2 == 0: nst = 1 var = dict([('nug', nug), ('nst', nst), ('it1', it1),('cc1', cc1),('azi1', azi1),('hmaj1', hmaj1), ('hmin1', hmin1), ('it2', it2),('cc2', cc2),('azi2', azi2),('hmaj2', hmaj2), ('hmin2', hmin2)]) if nug + cc1 + cc2 != 1: print('\x1b[0;30;41m make_variogram Warning: sill does not sum to 1.0, do not use in simulation \x1b[0m') if cc1 < 0 or cc2 < 0 or nug < 0 or hmaj1 < 0 or hmaj2 < 0 or hmin1 < 0 or hmin2 < 0: print('\x1b[0;30;41m make_variogram Warning: contributions and ranges must be all positive \x1b[0m') if hmaj1 < hmin1 or hmaj2 < hmin2: print('\x1b[0;30;41m make_variogram Warning: major range should be greater than minor range \x1b[0m') return var # sequential Gaussian simulation, 2D unconditional wrapper for sgsim from GSLIB (.exe must be in working directory) def GSLIB_sgsim_2d_uncond(nreal,nx,ny,hsiz,seed,var,output_file): import os import numpy as np nug = var['nug'] nst = var['nst']; it1 = var['it1']; cc1 = var['cc1']; azi1 = var['azi1']; hmaj1 = var['hmaj1']; hmin1 = var['hmin1'] it2 = var['it2']; cc2 = var['cc2']; azi2 = var['azi2']; hmaj2 = var['hmaj2']; hmin2 = var['hmin2'] max_range = max(hmaj1,hmaj2) hmn = hsiz * 0.5 hctab = int(max_range/hsiz)*2 + 1 sim_array = np.random.rand(nx,ny) file = open("sgsim.par", "w") file.write(" Parameters for SGSIM \n") file.write(" ******************** \n") file.write(" \n") file.write("START OF PARAMETER: \n") file.write("none -file with data \n") file.write("1 2 0 3 5 0 - columns for X,Y,Z,vr,wt,sec.var. \n") file.write("-1.0e21 1.0e21 - trimming limits \n") file.write("0 -transform the data (0=no, 1=yes) \n") file.write("none.trn - file for output trans table \n") file.write("1 - consider ref. dist (0=no, 1=yes) \n") file.write("none.dat - file with ref. dist distribution \n") file.write("1 0 - columns for vr and wt \n") file.write("-4.0 4.0 - zmin,zmax(tail extrapolation) \n") file.write("1 -4.0 - lower tail option, parameter \n") file.write("1 4.0 - upper tail option, parameter \n") file.write("0 -debugging level: 0,1,2,3 \n") file.write("nonw.dbg -file for debugging output \n") file.write(str(output_file) + " -file for simulation output \n") file.write(str(nreal) + " -number of realizations to generate \n") file.write(str(nx) + " " + str(hmn) + " " + str(hsiz) + " \n") file.write(str(ny) + " " + str(hmn) + " " + str(hsiz) + " \n") file.write("1 0.0 1.0 - nz zmn zsiz \n") file.write(str(seed) + " -random number seed \n") file.write("0 8 -min and max original data for sim \n") file.write("12 -number of simulated nodes to use \n") file.write("0 -assign data to nodes (0=no, 1=yes) \n") file.write("1 3 -multiple grid search (0=no, 1=yes),num \n") file.write("0 -maximum data per octant (0=not used) \n") file.write(str(max_range) + " " + str(max_range) + " 1.0 -maximum search (hmax,hmin,vert) \n") file.write(str(azi1) + " 0.0 0.0 -angles for search ellipsoid \n") file.write(str(hctab) + " " + str(hctab) + " 1 -size of covariance lookup table \n") file.write("0 0.60 1.0 -ktype: 0=SK,1=OK,2=LVM,3=EXDR,4=COLC \n") file.write("none.dat - file with LVM, EXDR, or COLC variable \n") file.write("4 - column for secondary variable \n") file.write(str(nst) + " " + str(nug) + " -nst, nugget effect \n") file.write(str(it1) + " " + str(cc1) + " " +str(azi1) + " 0.0 0.0 -it,cc,ang1,ang2,ang3\n") file.write(" " + str(hmaj1) + " " + str(hmin1) + " 1.0 - a_hmax, a_hmin, a_vert \n") file.write(str(it2) + " " + str(cc2) + " " +str(azi2) + " 0.0 0.0 -it,cc,ang1,ang2,ang3\n") file.write(" " + str(hmaj2) + " " + str(hmin2) + " 1.0 - a_hmax, a_hmin, a_vert \n") file.close() os.system('"sgsim.exe sgsim.par"') sim_array = GSLIB2ndarray(output_file,0,nx,ny) return(sim_array[0]) # extract regular spaced samples from a model def regular_sample(array,xmin,xmax,ymin,ymax,step,mx,my,name): x = []; y = []; v = []; iix = 0; iiy = 0; xx, yy = np.meshgrid(np.arange(xmin, xmax, step),np.arange(ymax, ymin, -1*step)) iiy = 0 for iy in range(0,ny): if iiy >= my: iix = 0 for ix in range(0,nx): if iix >= mx: x.append(xx[ix,iy]);y.append(yy[ix,iy]); v.append(array[ix,iy]) iix = 0; iiy = 0 iix = iix + 1 iiy = iiy + 1 df = pd.DataFrame(np.c_[x,y,v],columns=['X', 'Y', name]) return(df) def random_sample(array,xmin,xmax,ymin,ymax,step,nsamp,name): import random as rand x = []; y = []; v = []; iix = 0; iiy = 0; xx, yy = np.meshgrid(np.arange(xmin, xmax, step),np.arange(ymax-1, ymin-1, -1*step)) ny = xx.shape[0] nx = xx.shape[1] sample_index = rand.sample(range((nx)*(ny)), nsamp) for isamp in range(0,nsamp): iy = int(sample_index[isamp]/ny) ix = sample_index[isamp] - iy*nx x.append(xx[iy,ix]) y.append(yy[iy,ix]) v.append(array[iy,ix]) df = pd.DataFrame(np.c_[x,y,v],columns=['X', 'Y', name]) return(df) # - # Here's the GAMV program translated to Python. Note: it was simplified to run just one experimental semivariogram at a time (in a simgle direction) and only for 2D datasets. I have applied Numba to speedup the required double loop over the data. # + import math # for trig and constants from numba import jit # for precompile speed up of loops with NumPy ndarrays # GSLIB's GAMV program (Deutsch and Journel, 1998) converted from the original Fortran to Python # by <NAME>, the University of Texas at Austin (Jan, 2019) # Note simplified for 2D, semivariogram only and one direction at a time def gamv(df,xcol,ycol,vcol,tmin,tmax,xlag,xltol,nlag,azm,atol,bandwh,isill): # Parameters - consistent with original GSLIB # df - DataFrame with the spatial data, xcol, ycol, vcol coordinates and property columns # tmin, tmax - property trimming limits # xlag, xltol - lag distance and lag distance tolerance # nlag - number of lags to calculate # azm, atol - azimuth and azimuth tolerance # bandwh - horizontal bandwidth / maximum distance offset orthogonal to azimuth # isill - 1 for standardize sill # Load the data df_extract = df.loc[(df[vcol] >= tmin) & (df[vcol] <= tmax)] # trim values outside tmin and tmax nd = len(df_extract) x = df_extract[xcol].values y = df_extract[ycol].values vr = df_extract[vcol].values # Summary statistics for the data after trimming avg = vr.mean() stdev = vr.std() sills = stdev**2.0 ssq = sills vrmin = vr.min() vrmax = vr.max() #print('Number of Data ' + str(nd) +', Average ' + str(avg) + ' Variance ' + str(sills)) # Define the distance tolerance if it isn't already: if xltol < 0.0: xltol = 0.5 * xlag # Loop over combinatorial of data pairs to calculate the variogram dis, vario, npp = variogram_loop(x,y,vr,xlag,xltol,nlag,azm,atol,bandwh) # Standardize sill to one by dividing all variogram values by the variance for il in range(0,nlag+2): if isill == 1: vario[il] = vario[il] / sills # Apply 1/2 factor to go from variogram to semivariogram vario[il] = 0.5 * vario[il] # END - return variogram model information return dis, vario, npp @jit(nopython=True) # all NumPy array operations included in this function for precompile with NumBa def variogram_loop(x,y,vr,xlag,xltol,nlag,azm,atol,bandwh): # Allocate the needed memory: nvarg = 1 mxdlv = nlag + 2 # in gamv the npp etc. arrays go to nlag + 2 dis = np.zeros(mxdlv) lag = np.zeros(mxdlv) vario = np.zeros(mxdlv) hm = np.zeros(mxdlv) tm = np.zeros(mxdlv) hv = np.zeros(mxdlv) npp = np.zeros(mxdlv) ivtail = np.zeros(nvarg + 2) ivhead = np.zeros(nvarg + 2) ivtype = np.ones(nvarg + 2) ivtail[0] = 0; ivhead[0] = 0; ivtype[0] = 0; EPSLON = 1.0e-20 nd = len(x) # The mathematical azimuth is measured counterclockwise from EW and # not clockwise from NS as the conventional azimuth is: azmuth = (90.0-azm)*math.pi/180.0 uvxazm = math.cos(azmuth) uvyazm = math.sin(azmuth) if atol <= 0.0: csatol = math.cos(45.0*math.pi/180.0) else: csatol = math.cos(atol*math.pi/180.0) # Initialize the arrays for each direction, variogram, and lag: nsiz = nlag+2 dismxs = ((float(nlag) + 0.5 - EPSLON) * xlag) ** 2 # MAIN LOOP OVER ALL PAIRS: for i in range(0,nd): for j in range(0,nd): # Definition of the lag corresponding to the current pair: dx = x[j] - x[i] dy = y[j] - y[i] dxs = dx*dx dys = dy*dy hs = dxs + dys if hs <= dismxs: if hs < 0.0: hs = 0.0 h = np.sqrt(hs) # Determine which lag this is and skip if outside the defined distance # tolerance: if h <= EPSLON: lagbeg = 0 lagend = 0 else: lagbeg = -1 lagend = -1 for ilag in range(1,nlag+1): if h >= (xlag*float(ilag-1)-xltol) and h <= (xlag*float(ilag-1)+xltol): # reduced to -1 if lagbeg < 0: lagbeg = ilag lagend = ilag if lagend >= 0: # Definition of the direction corresponding to the current pair. All # directions are considered (overlapping of direction tolerance cones # is allowed): # Check for an acceptable azimuth angle: dxy = np.sqrt(max((dxs+dys),0.0)) if dxy < EPSLON: dcazm = 1.0 else: dcazm = (dx*uvxazm+dy*uvyazm)/dxy # Check the horizontal bandwidth criteria (maximum deviation # perpendicular to the specified direction azimuth): band = uvxazm*dy - uvyazm*dx # Apply all the previous checks at once to avoid a lot of nested if statements if (abs(dcazm) >= csatol) and (abs(band) <= bandwh): # Check whether or not an omni-directional variogram is being computed: omni = False if atol >= 90.0: omni = True # For this variogram, sort out which is the tail and the head value: iv = 0 # hardcoded just one varioigram it = ivtype[iv] if dcazm >= 0.0: vrh = vr[i] vrt = vr[j] if omni: vrtpr = vr[i] vrhpr = vr[j] else: vrh = vr[j] vrt = vr[i] if omni: vrtpr = vr[j] vrhpr = vr[i] # Reject this pair on the basis of missing values: # Data was trimmed at the beginning # The Semivariogram (all other types of measures are removed for now) for il in range(lagbeg,lagend+1): npp[il] = npp[il] + 1 dis[il] = dis[il] + h tm[il] = tm[il] + vrt hm[il] = hm[il] + vrh vario[il] = vario[il] + ((vrh-vrt)*(vrh-vrt)) if(omni): npp[il] = npp[il] + 1.0 dis[il] = dis[il] + h tm[il] = tm[il] + vrtpr hm[il] = hm[il] + vrhpr vario[il] = vario[il] + ((vrhpr-vrtpr)*(vrhpr-vrtpr)) # Get average values for gam, hm, tm, hv, and tv, then compute # the correct "variogram" measure: for il in range(0,nlag+2): i = il if npp[i] > 0: rnum = npp[i] dis[i] = dis[i] / (rnum) vario[i] = vario[i] / (rnum) hm[i] = hm[i] / (rnum) tm[i] = tm[i] / (rnum) return dis, vario, npp # - # Here's a simple test of the GAMV code with visualizations to check the results including the gridded data pixelplt, histogram and experimental semivariograms in 4 directions. # # #### Set the working directory # # I always like to do this so I don't lose files and to simplify subsequent read and writes (avoid including the full address each time). Also, in this case make sure to place the required (see above) GSLIB executables in this directory or a location identified in the environmental variable *Path*. os.chdir("c:/PGE337") # set the working directory # You will have to update the part in quotes with your own working directory and the format is different on a Mac (e.g. "~/PGE"). # # ##### Make a 2D spatial model # # The following are the basic parameters for the demonstration. This includes the number of cells in the 2D regular grid, the cell size (step) and the x and y min and max along with the color scheme. # # Then we make a single realization of a Gausian distributed feature over the specified 2D grid and then apply affine correction to ensure we have a reasonable mean and spread for our feature's distribution, assumed to be Porosity (e.g. no negative values) while retaining the Gaussian distribution. Any transform could be applied at this point. We are keeping this workflow simple. *This is our truth model that we will sample*. # # The parameters of *GSLIB_sgsim_2d_uncond* are (nreal,nx,ny,hsiz,seed,hrange1,hrange2,azi,output_file). nreal is the number of realizations, nx and ny are the number of cells in x and y, hsiz is the cell siz, seed is the random number seed, hrange and hrange2 are the variogram ranges in major and minor directions respectively, azi is the azimuth of the primary direction of continuity (0 is aligned with Y axis) and output_file is a GEO_DAS file with the simulated realization. The ouput is the 2D numpy array of the simulation along with the name of the property. # + nx = 100; ny = 100; cell_size = 10 # grid number of cells and cell size xmin = 0.0; ymin = 0.0; # grid origin xmax = xmin + nx * cell_size; ymax = ymin + ny * cell_size # calculate the extent of model seed = 74073 # random number seed for stochastic simulation vario = make_variogram(0.0,nst=1,it1=1,cc1=1.0,azi1=0,hmaj1=500,hmin1=500) mean = 10.0; stdev = 2.0 # Porosity mean and standard deviation #cmap = plt.cm.RdYlBu vmin = 4; vmax = 16; cmap = plt.cm.plasma # color min and max and using the plasma color map # calculate a stochastic realization with standard normal distribution sim = GSLIB_sgsim_2d_uncond(1,nx,ny,cell_size,seed,vario,"Por") sim = affine(sim,mean,stdev) # correct the distribution to a target mean and standard deviation. sampling_ncell = 10 # sample every 10th node from the model #samples = regular_sample(sim,xmin,xmax,ymin,ymax,sampling_ncell,30,30,'Realization') #samples_cluster = samples.drop([80,79,78,73,72,71,70,65,64,63,61,57,56,54,53,47,45,42]) # this removes specific rows (samples) #samples_cluster = samples_cluster.reset_index(drop=True) # we reset and remove the index (it is not sequential anymore) samples = random_sample(sim,xmin,xmax,ymin,ymax,cell_size,100,"Por") locpix(sim,xmin,xmax,ymin,ymax,cell_size,vmin,vmax,samples,'X','Y','Por','Porosity Realization and Regular Samples','X(m)','Y(m)','Porosity (%)',cmap,"Por_Samples") # - # Below I calculate the isotropic, 000 and 090 directional experimental semivariograms from our sample set. Then the variograms are visualized together on the same variogram plot. # + tmin = -9999.; tmax = 9999. lagiso, varioiso, nppiso = gamv(samples,'X','Y','Por',tmin,tmax,xlag = 100,xltol = 50,nlag = 10,azm = 0,atol = 90.0,bandwh = 9999,isill = 1) lag000, vario000, npp000 = gamv(samples,'X','Y','Por',tmin,tmax,xlag = 100,xltol = 50,nlag = 10,azm = 0,atol = 22.5,bandwh = 9999,isill = 1) lag090, vario090, npp090 = gamv(samples,'X','Y','Por',tmin,tmax,xlag = 100,xltol = 50,nlag = 10,azm = 90,atol = 22.5,bandwh = 9999,isill = 1) plt.subplot(121) locpix_st(sim,xmin,xmax,ymin,ymax,cell_size,vmin,vmax,samples,'X','Y','Por','Porosity Realization and Random Samples','X(m)','Y(m)','Porosity (%)',cmap) plt.subplot(122) plt.scatter(lagiso,varioiso,s=nppiso/len(samples),marker='x',color = 'black',label = 'Iso') plt.scatter(lag000,vario000,s=npp000/len(samples),marker='o',color = 'blue',label = '000') plt.scatter(lag090,vario090,s=npp090/len(samples),marker='o',color = 'green',label = '090') plt.plot([0,1000],[1.0,1.0],color = 'black') plt.xlabel('Lag Distance(m)') plt.ylabel('Semivariogram') plt.title('Iregular Samples Experimental Variograms') plt.ylim(0,1.5) plt.xlim(0,1000) handles, labels = plt.gca().get_legend_handles_labels() plt.gca().legend(handles[::], labels[::]) plt.subplots_adjust(left=0.0, bottom=0.0, right=2.0, top=1.0, wspace=0.3, hspace=0.3) plt.show() # - # Here's the gamv program without the numba acceleration, just incase that is helpful. Warning it is quite slow with more than a couple hundred data samples. # + import math # GSLIB's GAM program (Deutsch and Journel, 1998) converted from the original Fortran to Python # by <NAME>, the University of Texas at Austin (Jan, 2019) def gamv(df,xcol,ycol,vcol,tmin,tmax,xlag,xltol,nlag,azm,atol,isill): # Parameters - consistent with original GSLIB # array - 2D gridded data / model # tmin, tmax - property trimming limits # xsiz, ysiz - grid cell extents in x and y directions # ixd, iyd - lag offset in grid cells # nlag - number of lags to calculate # isill - 1 for standardize sill # nvarg = 1 # for mulitple variograms repeat the program nxy = nx*ny mxdlv = nlag + 2 # in gamv the npp etc. arrays go to nlag + 2 dip = 0.0; dtol = 1.0 # hard code for 2D for now EPSLON = 1.0e-20 bandwh = 1.0e20 bandwd = 1.0e20 # Allocate the needed memory: dis = np.zeros(mxdlv) lag = np.zeros(mxdlv) vario = np.zeros(mxdlv) hm = np.zeros(mxdlv) tm = np.zeros(mxdlv) hv = np.zeros(mxdlv) npp = np.zeros(mxdlv) ivtail = np.zeros(nvarg + 2) ivhead = np.zeros(nvarg + 2) ivtype = np.ones(nvarg + 2) ivtail[0] = 0; ivhead[0] = 0; ivtype[0] = 0; # Load the data df_extract = df.loc[(df[vcol] >= tmin) & (df[vcol] <= tmax)] # trim values outside tmin and tmax nd = len(df_extract) x = df_extract[xcol] y = df_extract[ycol] vr = df_extract[vcol] # Summary statistics for the data after trimming avg = vr.mean() stdev = vr.std() sills = stdev**2.0 ssq = sills vrmin = vr.min() vrmax = vr.max() #print('Number of Data ' + str(nd) +', Average ' + str(avg) + ' Variance ' + str(sills)) # Define the distance tolerance if it isn't already: if xltol < 0.0: xltol = 0.5 * xlag # Removed loop over directions # The mathematical azimuth is measured counterclockwise from EW and # not clockwise from NS as the conventional azimuth is: azmuth = (90.0-azm)*math.pi/180.0 uvxazm = math.cos(azmuth) uvyazm = math.sin(azmuth) if atol <= 0.0: csatol = math.cos(45.0*math.pi/180.0) else: csatol = math.cos(atol*math.pi/180.0) # The declination is measured positive down from vertical (up) rather # than negative down from horizontal: declin = (90.0-dip)*math.pi/180.0 uvzdec = math.cos(declin) uvhdec = math.sin(declin) if dtol <= 0.0: csdtol = math.cos(45.0*math.pi/180.0) else: csdtol = math.cos(dtol*math.pi/180.0) # Initialize the arrays for each direction, variogram, and lag: nsiz = nlag+2 dismxs = ((float(nlag) + 0.5 - EPSLON) * xlag) ** 2 # MAIN LOOP OVER ALL PAIRS: irepo = max(1,min((nd/10),1000)) for i in range(0,nd): # if((int(i/irepo)*irepo) == i): # print( ' currently on seed point ' + str(i) + ' of '+ str(nd)) for j in range(0,nd): # Definition of the lag corresponding to the current pair: dx = x[j] - x[i] dy = y[j] - y[i] dxs = dx*dx dys = dy*dy hs = dxs + dys if hs <= dismxs: if hs < 0.0: hs = 0.0 h = np.sqrt(hs) # Determine which lag this is and skip if outside the defined distance # tolerance: if h <= EPSLON: lagbeg = 0 lagend = 0 else: lagbeg = -1 lagend = -1 for ilag in range(1,nlag+1): if h >= (xlag*float(ilag-1)-xltol) and h <= (xlag*float(ilag-1)+xltol): # reduced to -1 if lagbeg < 0: lagbeg = ilag lagend = ilag if lagend >= 0: # Definition of the direction corresponding to the current pair. All # directions are considered (overlapping of direction tolerance cones # is allowed): # Check for an acceptable azimuth angle: dxy = np.sqrt(max((dxs+dys),0.0)) if dxy < EPSLON: dcazm = 1.0 else: dcazm = (dx*uvxazm+dy*uvyazm)/dxy # Check the horizontal bandwidth criteria (maximum deviation # perpendicular to the specified direction azimuth): band = uvxazm*dy - uvyazm*dx # Apply all the previous checks at once to avoid a lot of nested if statements if (abs(dcazm) >= csatol) and (abs(band) <= bandwh): # Check whether or not an omni-directional variogram is being computed: omni = False if atol >= 90.0: omni = True # For this variogram, sort out which is the tail and the head value: iv = 0 # hardcoded just one varioigram it = ivtype[iv] if dcazm >= 0.0: vrh = vr[i] vrt = vr[j] if omni: vrtpr = vr[i] vrhpr = vr[j] else: vrh = vr[j] vrt = vr[i] if omni: vrtpr = vr[j] vrhpr = vr[i] # Reject this pair on the basis of missing values: # Data was trimmed at the beginning # The Semivariogram: for il in range(lagbeg,lagend+1): npp[il] = npp[il] + 1 dis[il] = dis[il] + h tm[il] = tm[il] + vrt hm[il] = hm[il] + vrh vario[il] = vario[il] + ((vrh-vrt)*(vrh-vrt)) if(omni): npp[il] = npp[il] + 1.0 dis[il] = dis[il] + h tm[il] = tm[il] + vrtpr hm[il] = hm[il] + vrhpr vario[il] = vario[il] + ((vrhpr-vrtpr)*(vrhpr-vrtpr)) # Get average values for gam, hm, tm, hv, and tv, then compute # the correct "variogram" measure: for il in range(0,nlag+2): i = il if npp[i] > 0: rnum = npp[i] dis[i] = dis[i] / (rnum) vario[i] = vario[i] / (rnum) hm[i] = hm[i] / (rnum) tm[i] = tm[i] / (rnum) # Attempt to standardize: if isill == 1: vario[i] = vario[i] / sills # semivariogram vario[i] = 0.5 * vario[i] return dis, vario, npp # - # I hope you find this code and demonstration useful. I'm always happy to discuss geostatistics, statistical modeling, uncertainty modeling and machine learning, # # *Michael* # # **<NAME>**, Ph.D., P.Eng. Associate Professor The Hildebrand Department of Petroleum and Geosystems Engineering, Bureau of Economic Geology, The Jackson School of Geosciences, The University of Texas at Austin # On Twitter I'm the **GeostatsGuy** and on YouTube my lectures are on the channel, **GeostatsGuy Lectures**.
examples/Gamv.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt # # Данные для тестов def malanchev_dataset(inliers=2**10, outliers=2**5, seed=0): rng = np.random.default_rng(seed) x = np.concatenate([rng.uniform([0, 0], [0.5, 0.5], (inliers, 2)), rng.normal([1, 1], 0.1, (outliers, 2)), rng.normal([0, 1], 0.1, (outliers, 2)), rng.normal([1, 0], 0.1, (outliers, 2))]) return x plt.scatter(*malanchev_dataset().T); # # Оценка средней длины пути в дереве # + def _average_path_length(n): """ Average path length formula. """ return 2.0 * (np.log(n - 1.0) + np.euler_gamma) - 2.0 * (n - 1.0) / n def average_path_length(n): """ Average path length computation. Parameters ---------- n Either array of tree depths to computer average path length of or one tree depth scalar. Returns ------- Average path length. """ if np.isscalar(n): if n <= 1: apl = 0 elif n == 2: apl = 1 else: apl = _average_path_length(n) else: n = np.asarray(n) apl = np.zeros_like(n) apl[n > 1] = _average_path_length(n[n > 1]) apl[n == 2] = 1 return apl # - # # Изоляционный лес # + class IsolationForest: def __init__(self, trees=100, subsamples=256, depth=None, seed=0): self.subsamples = subsamples self.trees = trees self.depth = depth self.seedseq = np.random.SeedSequence(seed) self.rng = np.random.default_rng(seed) self.estimators = [] self.n = 0 def fit(self, data): n = data.shape[0] self.n = n self.subsamples = self.subsamples if n > self.subsamples else n self.depth = self.depth or int(np.ceil(np.log2(self.subsamples))) self.estimators = [None] * self.trees seeds = self.seedseq.spawn(self.trees) for i in range(self.trees): subs = self.rng.choice(n, self.subsamples) gen = IsolationForestGenerator(data[subs, :], self.depth, seeds[i]) self.estimators[i] = gen.pine return self def mean_paths(self, data): means = np.zeros(data.shape[0]) for ti in range(self.trees): path = self.estimators[ti].paths(data) means += path means /= self.trees return means def scores(self, data): means = self.mean_paths(data) return - 2 ** (-means / average_path_length(self.subsamples)) class Tree: def __init__(self, features, selectors, values): self.features = features self.len = selectors.shape[0] # Two complementary arrays. # Selectors select feature to branch on. self.selectors = selectors # Values either set the deciding feature value or set the closing path length self.values = values def _get_one_path(self, key): i = 1 while 2 * i < self.selectors.shape[0]: f = self.selectors[i] if f < 0: break if key[f] <= self.values[i]: i = 2 * i else: i = 2 * i + 1 return self.values[i] def paths(self, x): n = x.shape[0] paths = np.empty(n) for i in range(n): paths[i] = self._get_one_path(x[i, :]) return paths class IsolationForestGenerator: def __init__(self, sample, depth, seed=0): self.depth = depth self.features = sample.shape[1] self.length = 1 << (depth + 1) self.rng = np.random.default_rng(seed) self.selectors = np.full(self.length, -1, dtype=np.int32) self.values = np.full(self.length, 0, dtype=np.float64) self._populate(1, sample) self.pine = Tree(self.features, self.selectors, self.values) def _populate(self, i, sample): if sample.shape[0] == 1: self.values[i] = np.floor(np.log2(i)) return if self.length <= 2 * i: self.values[i] = np.floor(np.log2(i)) + \ average_path_length(sample.shape[0]) return selector = self.rng.integers(self.features) self.selectors[i] = selector minval = np.min(sample[:, selector]) maxval = np.max(sample[:, selector]) if minval == maxval: self.selectors[i] = -1 self.values[i] = np.floor(np.log2(i)) + \ average_path_length(sample.shape[0]) return value = self.rng.uniform(minval, maxval) self.values[i] = value self._populate(2 * i, sample[sample[:, selector] <= value]) self._populate(2 * i + 1, sample[sample[:, selector] > value]) # - # # Пример # + data = malanchev_dataset() isoforest = IsolationForest(trees=100, subsamples=16, depth=4) isoforest.fit(data) scores = isoforest.scores(data) sorting = np.argsort(scores) plt.scatter(*data[sorting[:96]].T, color='C1', label='anomaly') plt.scatter(*data[sorting[96:]].T, color='C0', label='regular') plt.legend() pass # - # # Тесты data = malanchev_dataset(inliers=2**13) # %%time isoforest = IsolationForest(trees=200, subsamples=1024, depth=10) isoforest.fit(data) pass # %%time scores = isoforest.scores(data) # Если посчитать для 10e6 точек (в часах): 48 / 2**13 * 10e6 / 60.0 / 60.0
j07_casestudy/j07_isoforest_python_naive.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] papermill={"duration": 0.018392, "end_time": "2022-01-20T11:10:33.423706", "exception": false, "start_time": "2022-01-20T11:10:33.405314", "status": "completed"} tags=[] # # SageMaker Debugger Profiling Report # # SageMaker Debugger auto generated this report. You can generate similar reports on all supported training jobs. The report provides summary of training job, system resource usage statistics, framework metrics, rules summary, and detailed analysis from each rule. The graphs and tables are interactive. # # **Legal disclaimer:** This report and any recommendations are provided for informational purposes only and are not definitive. You are responsible for making your own independent assessment of the information. # # + papermill={"duration": 0.602833, "end_time": "2022-01-20T11:10:34.043846", "exception": false, "start_time": "2022-01-20T11:10:33.441013", "status": "completed"} tags=["hide-output", "hide-input"] import json import pandas as pd import glob import matplotlib.pyplot as plt import numpy as np import datetime from smdebug.profiler.utils import us_since_epoch_to_human_readable_time, ns_since_epoch_to_human_readable_time from smdebug.core.utils import setup_profiler_report # + papermill={"duration": 0.219005, "end_time": "2022-01-20T11:10:34.280811", "exception": false, "start_time": "2022-01-20T11:10:34.061806", "status": "completed"} tags=["hide-input"] import bokeh from bokeh.io import output_notebook, show from bokeh.layouts import column, row from bokeh.plotting import figure from bokeh.models.widgets import DataTable, DateFormatter, TableColumn from bokeh.models import ColumnDataSource, PreText from math import pi from bokeh.transform import cumsum import warnings from bokeh.models.widgets import Paragraph from bokeh.models import Legend from bokeh.util.warnings import BokehDeprecationWarning, BokehUserWarning warnings.simplefilter('ignore', BokehDeprecationWarning) warnings.simplefilter('ignore', BokehUserWarning) output_notebook(hide_banner=True) # + papermill={"duration": 0.0307, "end_time": "2022-01-20T11:10:34.329898", "exception": false, "start_time": "2022-01-20T11:10:34.299198", "status": "completed"} tags=["parameters", "hide-input", "hide-output"] processing_job_arn = "" # + papermill={"duration": 0.023699, "end_time": "2022-01-20T11:10:34.371748", "exception": false, "start_time": "2022-01-20T11:10:34.348049", "status": "completed"} tags=["injected-parameters"] # Parameters processing_job_arn = "arn:aws:sagemaker:us-east-1:264082167679:processing-job/pytorch-training-2022-01-2-profilerreport-2bfb3dcb" # + papermill={"duration": 0.02351, "end_time": "2022-01-20T11:10:34.413446", "exception": false, "start_time": "2022-01-20T11:10:34.389936", "status": "completed"} tags=["hide-input", "hide-output"] setup_profiler_report(processing_job_arn) # + papermill={"duration": 0.027437, "end_time": "2022-01-20T11:10:34.459232", "exception": false, "start_time": "2022-01-20T11:10:34.431795", "status": "completed"} tags=["hide-input"] def create_piechart(data_dict, title=None, height=400, width=400, x1=0, x2=0.1, radius=0.4, toolbar_location='right'): plot = figure(plot_height=height, plot_width=width, toolbar_location=toolbar_location, tools="hover,wheel_zoom,reset,pan", tooltips="@phase:@value", title=title, x_range=(-radius-x1, radius+x2)) data = pd.Series(data_dict).reset_index(name='value').rename(columns={'index':'phase'}) data['angle'] = data['value']/data['value'].sum() * 2*pi data['color'] = bokeh.palettes.viridis(len(data_dict)) plot.wedge(x=0, y=0., radius=radius, start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'), line_color="white", source=data, fill_color='color', legend='phase' ) plot.legend.label_text_font_size = "8pt" plot.legend.location = 'center_right' plot.axis.axis_label=None plot.axis.visible=False plot.grid.grid_line_color = None plot.outline_line_color = "white" return plot # + papermill={"duration": 0.024345, "end_time": "2022-01-20T11:10:34.501881", "exception": false, "start_time": "2022-01-20T11:10:34.477536", "status": "completed"} tags=["hide-input"] from IPython.display import display, HTML, Markdown, Image def pretty_print(df): raw_html = df.to_html().replace("\\n","<br>").replace('<tr>','<tr style="text-align: left;">') return display(HTML(raw_html)) # + [markdown] papermill={"duration": 0.018229, "end_time": "2022-01-20T11:10:34.538587", "exception": false, "start_time": "2022-01-20T11:10:34.520358", "status": "completed"} tags=[] # ## Training job summary # + papermill={"duration": 0.024663, "end_time": "2022-01-20T11:10:34.582022", "exception": false, "start_time": "2022-01-20T11:10:34.557359", "status": "completed"} tags=["hide-input"] def load_report(rule_name): try: report = json.load(open('/opt/ml/processing/output/rule/profiler-output/profiler-reports/'+rule_name+'.json')) return report except FileNotFoundError: print (rule_name + ' not triggered') # + papermill={"duration": 0.03334, "end_time": "2022-01-20T11:10:34.633904", "exception": false, "start_time": "2022-01-20T11:10:34.600564", "status": "completed"} tags=["hide-input", "hide-output"] job_statistics = {} report = load_report('MaxInitializationTime') if report: if "first" in report['Details']["step_num"] and "last" in report['Details']["step_num"]: first_step = report['Details']["step_num"]["first"] last_step = report['Details']["step_num"]["last"] tmp = us_since_epoch_to_human_readable_time(report['Details']['job_start'] * 1000000) date = datetime.datetime.strptime(tmp, '%Y-%m-%dT%H:%M:%S:%f') day = date.date().strftime("%m/%d/%Y") hour = date.time().strftime("%H:%M:%S") job_statistics["Start time"] = f"{hour} {day}" tmp = us_since_epoch_to_human_readable_time(report['Details']['job_end'] * 1000000) date = datetime.datetime.strptime(tmp, '%Y-%m-%dT%H:%M:%S:%f') day = date.date().strftime("%m/%d/%Y") hour = date.time().strftime("%H:%M:%S") job_statistics["End time"] = f"{hour} {day}" job_duration_in_seconds = int(report['Details']['job_end'] - report['Details']['job_start']) job_statistics["Job duration"] = f"{job_duration_in_seconds} seconds" if "first" in report['Details']["step_num"] and "last" in report['Details']["step_num"]: tmp = us_since_epoch_to_human_readable_time(first_step) date = datetime.datetime.strptime(tmp, '%Y-%m-%dT%H:%M:%S:%f') day = date.date().strftime("%m/%d/%Y") hour = date.time().strftime("%H:%M:%S") job_statistics["Training loop start"] = f"{hour} {day}" tmp = us_since_epoch_to_human_readable_time(last_step) date = datetime.datetime.strptime(tmp, '%Y-%m-%dT%H:%M:%S:%f') day = date.date().strftime("%m/%d/%Y") hour = date.time().strftime("%H:%M:%S") job_statistics["Training loop end"] = f"{hour} {day}" training_loop_duration_in_seconds = int((last_step - first_step) / 1000000) job_statistics["Training loop duration"] = f"{training_loop_duration_in_seconds} seconds" initialization_in_seconds = int(first_step/1000000 - report['Details']['job_start']) job_statistics["Initialization time"] = f"{initialization_in_seconds} seconds" finalization_in_seconds = int(np.abs(report['Details']['job_end'] - last_step/1000000)) job_statistics["Finalization time"] = f"{finalization_in_seconds} seconds" initialization_perc = int(initialization_in_seconds / job_duration_in_seconds * 100) job_statistics["Initialization"] = f"{initialization_perc} %" training_loop_perc = int(training_loop_duration_in_seconds / job_duration_in_seconds * 100) job_statistics["Training loop"] = f"{training_loop_perc} %" finalization_perc = int(finalization_in_seconds / job_duration_in_seconds * 100) job_statistics["Finalization"] = f"{finalization_perc} %" # + papermill={"duration": 0.066686, "end_time": "2022-01-20T11:10:34.719240", "exception": false, "start_time": "2022-01-20T11:10:34.652554", "status": "completed"} tags=["hide-input"] if report: text = """The following table gives a summary about the training job. The table includes information about when the training job started and ended, how much time initialization, training loop and finalization took.""" if len(job_statistics) > 0: df = pd.DataFrame.from_dict(job_statistics, orient='index') start_time = us_since_epoch_to_human_readable_time(report['Details']['job_start'] * 1000000) date = datetime.datetime.strptime(start_time, '%Y-%m-%dT%H:%M:%S:%f') day = date.date().strftime("%m/%d/%Y") hour = date.time().strftime("%H:%M:%S") duration = job_duration_in_seconds text = f"""{text} \n Your training job started on {day} at {hour} and ran for {duration} seconds.""" #pretty_print(df) if "first" in report['Details']["step_num"] and "last" in report['Details']["step_num"]: if finalization_perc < 0: job_statistics["Finalization%"] = 0 if training_loop_perc < 0: job_statistics["Training loop"] = 0 if initialization_perc < 0: job_statistics["Initialization"] = 0 else: text = f"""{text} \n Your training job started on {day} at {hour} and ran for {duration} seconds.""" if len(job_statistics) > 0: df2 = df.reset_index() df2.columns = ["0", "1"] source = ColumnDataSource(data=df2) columns = [TableColumn(field='0', title=""), TableColumn(field='1', title="Job Statistics"),] table = DataTable(source=source, columns=columns, width=450, height=380) plot = None if "Initialization" in job_statistics: piechart_data = {} piechart_data["Initialization"] = initialization_perc piechart_data["Training loop"] = training_loop_perc piechart_data["Finalization"] = finalization_perc plot = create_piechart(piechart_data, height=350, width=500, x1=0.15, x2=0.15, radius=0.15, toolbar_location=None) if plot != None: paragraph = Paragraph(text=f"""{text}""", width = 800) show(column(paragraph, row(table, plot))) else: paragraph = Paragraph(text=f"""{text}. No step information was profiled from your training job. The time spent on initialization and finalization cannot be computed.""" , width = 800) show(column(paragraph, row(table))) # + [markdown] papermill={"duration": 0.019382, "end_time": "2022-01-20T11:10:34.758347", "exception": false, "start_time": "2022-01-20T11:10:34.738965", "status": "completed"} tags=[] # ## System usage statistics # + papermill={"duration": 0.025692, "end_time": "2022-01-20T11:10:34.803478", "exception": false, "start_time": "2022-01-20T11:10:34.777786", "status": "completed"} tags=["hide-input"] report = load_report('OverallSystemUsage') # + papermill={"duration": 0.030149, "end_time": "2022-01-20T11:10:34.853171", "exception": false, "start_time": "2022-01-20T11:10:34.823022", "status": "completed"} tags=["hide-input"] text1 = '' if report: if "GPU" in report["Details"]: for node_id in report["Details"]["GPU"]: gpu_p95 = report["Details"]["GPU"][node_id]["p95"] gpu_p50 = report["Details"]["GPU"][node_id]["p50"] cpu_p95 = report["Details"]["CPU"][node_id]["p95"] cpu_p50 = report["Details"]["CPU"][node_id]["p50"] if gpu_p95 < 70 and cpu_p95 < 70: text1 = f"""{text1}The 95th percentile of the total GPU utilization on node {node_id} is only {int(gpu_p95)}%. The 95th percentile of the total CPU utilization is only {int(cpu_p95)}%. Node {node_id} is underutilized. You may want to consider switching to a smaller instance type.""" elif gpu_p95 < 70 and cpu_p95 > 70: text1 = f"""{text1}The 95th percentile of the total GPU utilization on node {node_id} is only {int(gpu_p95)}%. However, the 95th percentile of the total CPU utilization is {int(cpu_p95)}%. GPUs on node {node_id} are underutilized, likely because of CPU bottlenecks.""" elif gpu_p50 > 70: text1 = f"""{text1}The median total GPU utilization on node {node_id} is {int(gpu_p50)}%. GPUs on node {node_id} are well utilized.""" else: text1 = f"""{text1}The median total GPU utilization on node {node_id} is {int(gpu_p50)}%. The median total CPU utilization is {int(cpu_p50)}%.""" else: for node_id in report["Details"]["CPU"]: cpu_p95 = report["Details"]["CPU"][node_id]["p95"] if cpu_p95 > 70: text1 = f"""{text1}The 95th percentile of the total CPU utilization on node {node_id} is {int**(cpu_p95)}%. CPUs on node {node_id} are well utilized.""" text1 = Paragraph(text=f"""{text1}""", width=1100) text2 = Paragraph(text=f"""The following table shows statistics of resource utilization per worker (node), such as the total CPU and GPU utilization, and the memory utilization on CPU and GPU. The table also includes the total I/O wait time and the total amount of data sent or received in bytes. The table shows min and max values as well as p99, p90 and p50 percentiles.""", width=900) # + papermill={"duration": 0.049705, "end_time": "2022-01-20T11:10:34.922857", "exception": false, "start_time": "2022-01-20T11:10:34.873152", "status": "completed"} tags=["hide-input"] pd.set_option('display.float_format', lambda x: '%.2f' % x) rows = [] units = {"CPU": "percentage", "CPU memory": "percentage", "GPU": "percentage", "Network": "bytes", "GPU memory": "percentage", "I/O": "percentage"} if report: for metric in report['Details']: for node_id in report['Details'][metric]: values = report['Details'][metric][node_id] rows.append([node_id, metric, units[metric], values['max'], values['p99'], values['p95'], values['p50'], values['min']]) df = pd.DataFrame(rows) df.columns = ['Node', 'metric', 'unit', 'max', 'p99', 'p95', 'p50', 'min'] df2 = df.reset_index() source = ColumnDataSource(data=df2) columns = [TableColumn(field='Node', title="node"), TableColumn(field='metric', title="metric"), TableColumn(field='unit', title="unit"), TableColumn(field='max', title="max"), TableColumn(field='p99', title="p99"), TableColumn(field='p95', title="p95"), TableColumn(field='p50', title="p50"), TableColumn(field='min', title="min"),] table = DataTable(source=source, columns=columns, width=800, height=df2.shape[0]*30) show(column( text1, text2, row(table))) # + papermill={"duration": 0.121906, "end_time": "2022-01-20T11:10:35.065270", "exception": false, "start_time": "2022-01-20T11:10:34.943364", "status": "completed"} tags=["hide-input"] report = load_report('OverallFrameworkMetrics') if report: if 'Details' in report: display(Markdown(f"""## Framework metrics summary""")) plots = [] text = '' if 'phase' in report['Details']: text = f"""The following two pie charts show the time spent on the TRAIN phase, the EVAL phase, and others. The 'others' includes the time spent between steps (after one step has finished and before the next step has started). Ideally, most of the training time should be spent on the TRAIN and EVAL phases. If TRAIN/EVAL were not specified in the training script, steps will be recorded as GLOBAL.""" if 'others' in report['Details']['phase']: others = float(report['Details']['phase']['others']) if others > 25: text = f"""{text} Your training job spent quite a significant amount of time ({round(others,2)}%) in phase "others". You should check what is happening in between the steps.""" plot = create_piechart(report['Details']['phase'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="The ratio between the time spent on the TRAIN/EVAL phase and others") plots.append(plot) if 'forward_backward' in report['Details']: event = max(report['Details']['forward_backward'], key=report['Details']['forward_backward'].get) perc = report['Details']['forward_backward'][event] text = f"""{text} The pie chart on the right shows a more detailed breakdown. It shows that {int(perc)}% of the time was spent in event "{event}".""" if perc > 70: text = f"""There is quite a significant difference between the time spent on forward and backward pass.""" else: text = f"""{text} It shows that {int(perc)}% of the training time was spent on "{event}".""" plot = create_piechart(report['Details']['forward_backward'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="The ratio between forward and backward pass") plots.append(plot) if len(plots) > 0: paragraph = Paragraph(text=text, width=1100) show(column(paragraph, row(plots))) plots = [] text='' if 'ratio' in report['Details'] and len(report['Details']['ratio']) > 0: key = list(report['Details']['ratio'].keys())[0] ratio = report['Details']['ratio'][key] text = f"""The following piechart shows a breakdown of the CPU/GPU operators. It shows that {int(ratio)}% of training time was spent on executing the "{key}" operator.""" plot = create_piechart(report['Details']['ratio'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="The ratio between the time spent on CPU/GPU operators") plots.append(plot) if 'general' in report['Details']: event = max(report['Details']['general'], key=report['Details']['general'].get) perc = report['Details']['general'][event] plot = create_piechart(report['Details']['general'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="General framework operations") plots.append(plot) if len(plots) > 0: paragraph = Paragraph(text=text, width=1100) show(column(paragraph, row(plots))) plots = [] text = '' if 'horovod' in report['Details']: display(Markdown(f"""#### Overview: Horovod metrics""")) event = max(report['Details']['horovod'], key=report['Details']['horovod'].get) perc = report['Details']['horovod'][event] text = f"""{text} The following pie chart shows a detailed breakdown of the Horovod metrics profiled from your training job. The most expensive function was "{event}" with {int(perc)}%.""" plot = create_piechart(report['Details']['horovod'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="Horovod metrics ") paragraph = Paragraph(text=text, width=1100) show(column(paragraph, row(plot))) # + papermill={"duration": 0.075606, "end_time": "2022-01-20T11:10:35.163289", "exception": false, "start_time": "2022-01-20T11:10:35.087683", "status": "completed"} tags=["hide-input"] pd.set_option('display.float_format', lambda x: '%.2f' % x) rows = [] values = [] if report: if 'CPU_total' in report['Details']: display(Markdown(f"""#### Overview: CPU operators""")) event = max(report['Details']['CPU'], key=report['Details']['CPU'].get) perc = report['Details']['CPU'][event] for function in report['Details']['CPU']: percentage = round(report['Details']['CPU'][function],2) time = report['Details']['CPU_total'][function] rows.append([percentage, time, function]) df = pd.DataFrame(rows) df.columns = ['percentage', 'time', 'operator'] df = df.sort_values(by=['percentage'], ascending=False) source = ColumnDataSource(data=df) columns = [TableColumn(field='percentage', title="Percentage"), TableColumn(field='time', title="Cumulative time in microseconds"), TableColumn(field='operator', title="CPU operator"),] table = DataTable(source=source, columns=columns, width=550, height=350) text = Paragraph(text=f"""The following table shows a list of operators that ran on the CPUs. The most expensive operator on the CPUs was "{event}" with {int(perc)} %.""") plot = create_piechart(report['Details']['CPU'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, ) show(column(text, row(table, plot))) # + papermill={"duration": 0.078176, "end_time": "2022-01-20T11:10:35.264800", "exception": false, "start_time": "2022-01-20T11:10:35.186624", "status": "completed"} tags=["hide-input"] pd.set_option('display.float_format', lambda x: '%.2f' % x) rows = [] values = [] if report: if 'GPU_total' in report['Details']: display(Markdown(f"""#### Overview: GPU operators""")) event = max(report['Details']['GPU'], key=report['Details']['GPU'].get) perc = report['Details']['GPU'][event] for function in report['Details']['GPU']: percentage = round(report['Details']['GPU'][function],2) time = report['Details']['GPU_total'][function] rows.append([percentage, time, function]) df = pd.DataFrame(rows) df.columns = ['percentage', 'time', 'operator'] df = df.sort_values(by=['percentage'], ascending=False) source = ColumnDataSource(data=df) columns = [TableColumn(field='percentage', title="Percentage"), TableColumn(field='time', title="Cumulative time in microseconds"), TableColumn(field='operator', title="GPU operator"),] table = DataTable(source=source, columns=columns, width=450, height=350) text = Paragraph(text=f"""The following table shows a list of operators that your training job ran on GPU. The most expensive operator on GPU was "{event}" with {int(perc)} %""") plot = create_piechart(report['Details']['GPU'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, ) show(column(text, row(table, plot))) # + [markdown] papermill={"duration": 0.023798, "end_time": "2022-01-20T11:10:35.312872", "exception": false, "start_time": "2022-01-20T11:10:35.289074", "status": "completed"} tags=[] # ## Rules summary # + papermill={"duration": 0.032055, "end_time": "2022-01-20T11:10:35.368806", "exception": false, "start_time": "2022-01-20T11:10:35.336751", "status": "completed"} tags=["hide-input"] description = {} description['CPUBottleneck'] = 'Checks if the CPU utilization is high and the GPU utilization is low. \ It might indicate CPU bottlenecks, where the GPUs are waiting for data to arrive \ from the CPUs. The rule evaluates the CPU and GPU utilization rates, and triggers the issue \ if the time spent on the CPU bottlenecks exceeds a threshold percent of the total training time. The default threshold is 50 percent.' description['IOBottleneck'] = 'Checks if the data I/O wait time is high and the GPU utilization is low. \ It might indicate IO bottlenecks where GPU is waiting for data to arrive from storage. \ The rule evaluates the I/O and GPU utilization rates and triggers the issue \ if the time spent on the IO bottlenecks exceeds a threshold percent of the total training time. The default threshold is 50 percent.' description['Dataloader'] = 'Checks how many data loaders are running in parallel and whether the total number is equal the number \ of available CPU cores. The rule triggers if number is much smaller or larger than the number of available cores. \ If too small, it might lead to low GPU utilization. If too large, it might impact other compute intensive operations on CPU.' description['GPUMemoryIncrease'] = 'Measures the average GPU memory footprint and triggers if there is a large increase.' description['BatchSize'] = 'Checks if GPUs are underutilized because the batch size is too small. \ To detect this problem, the rule analyzes the average GPU memory footprint, \ the CPU and the GPU utilization. ' description['LowGPUUtilization'] = 'Checks if the GPU utilization is low or fluctuating. \ This can happen due to bottlenecks, blocking calls for synchronizations, \ or a small batch size.' description['MaxInitializationTime'] = 'Checks if the time spent on initialization exceeds a threshold percent of the total training time. \ The rule waits until the first step of training loop starts. The initialization can take longer \ if downloading the entire dataset from Amazon S3 in File mode. The default threshold is 20 minutes.' description['LoadBalancing'] = 'Detects workload balancing issues across GPUs. \ Workload imbalance can occur in training jobs with data parallelism. \ The gradients are accumulated on a primary GPU, and this GPU might be overused \ with regard to other GPUs, resulting in reducing the efficiency of data parallelization.' description['StepOutlier'] = 'Detects outliers in step duration. The step duration for forward and backward pass should be \ roughly the same throughout the training. If there are significant outliers, \ it may indicate a system stall or bottleneck issues.' # + papermill={"duration": 0.030966, "end_time": "2022-01-20T11:10:35.424097", "exception": false, "start_time": "2022-01-20T11:10:35.393131", "status": "completed"} tags=["hide-input"] recommendation = {} recommendation['CPUBottleneck'] = 'Consider increasing the number of data loaders \ or applying data pre-fetching.' recommendation['IOBottleneck'] = 'Pre-fetch data or choose different file formats, such as binary formats that \ improve I/O performance.' recommendation['Dataloader'] = 'Change the number of data loader processes.' recommendation['GPUMemoryIncrease'] = 'Choose a larger instance type with more memory if footprint is close to maximum available memory.' recommendation['BatchSize'] = 'The batch size is too small, and GPUs are underutilized. Consider running on a smaller instance type or increasing the batch size.' recommendation['LowGPUUtilization'] = 'Check if there are bottlenecks, minimize blocking calls, \ change distributed training strategy, or increase the batch size.' recommendation['MaxInitializationTime'] = 'Initialization takes too long. \ If using File mode, consider switching to Pipe mode in case you are using TensorFlow framework.' recommendation['LoadBalancing'] = 'Choose a different distributed training strategy or \ a different distributed training framework.' recommendation['StepOutlier'] = 'Check if there are any bottlenecks (CPU, I/O) correlated to the step outliers.' # + papermill={"duration": 0.041158, "end_time": "2022-01-20T11:10:35.489403", "exception": false, "start_time": "2022-01-20T11:10:35.448245", "status": "completed"} tags=["hide-input"] files = glob.glob('/opt/ml/processing/output/rule/profiler-output/profiler-reports/*json') summary = {} for i in files: rule_name = i.split('/')[-1].replace('.json','') if rule_name == "OverallSystemUsage" or rule_name == "OverallFrameworkMetrics": continue rule_report = json.load(open(i)) summary[rule_name] = {} summary[rule_name]['Description'] = description[rule_name] summary[rule_name]['Recommendation'] = recommendation[rule_name] summary[rule_name]['Number of times rule triggered'] = rule_report['RuleTriggered'] #summary[rule_name]['Number of violations'] = rule_report['Violations'] summary[rule_name]['Number of datapoints'] = rule_report['Datapoints'] summary[rule_name]['Rule parameters'] = rule_report['RuleParameters'] df = pd.DataFrame.from_dict(summary, orient='index') df = df.sort_values(by=['Number of times rule triggered'], ascending=False) display(Markdown(f"""The following table shows a profiling summary of the Debugger built-in rules. The table is sorted by the rules that triggered the most frequently. During your training job, the {df.index[0]} rule was the most frequently triggered. It processed {df.values[0,3]} datapoints and was triggered {df.values[0,2]} times.""")) with pd.option_context('display.colheader_justify','left'): pretty_print(df) # + papermill={"duration": 0.03357, "end_time": "2022-01-20T11:10:35.548249", "exception": false, "start_time": "2022-01-20T11:10:35.514679", "status": "completed"} tags=["hide-input"] analyse_phase = "training" if job_statistics and "initialization_in_seconds" in job_statistics: if job_statistics["initialization_in_seconds"] > job_statistics["training_loop_duration_in_seconds"]: analyse_phase = "initialization" time = job_statistics["initialization_in_seconds"] perc = job_statistics["initialization_%"] display(Markdown(f"""The initialization phase took {int(time)} seconds, which is {int(perc)}%* of the total training time. Since the training loop has taken the most time, we dive deep into the events occurring during this phase""")) display(Markdown("""## Analyzing initialization\n\n""")) time = job_statistics["training_loop_duration_in_seconds"] perc = job_statistics["training_loop_%"] display(Markdown(f"""The training loop lasted for {int(time)} seconds which is {int(perc)}% of the training job time. Since the training loop has taken the most time, we dive deep into the events occured during this phase.""")) if analyse_phase == 'training': display(Markdown("""## Analyzing the training loop\n\n""")) # + papermill={"duration": 0.031068, "end_time": "2022-01-20T11:10:35.604945", "exception": false, "start_time": "2022-01-20T11:10:35.573877", "status": "completed"} tags=["hide-input"] if analyse_phase == "initialization": display(Markdown("""### MaxInitializationTime\n\nThis rule helps to detect if the training initialization is taking too much time. \nThe rule waits until first step is available. The rule takes the parameter `threshold` that defines how many minutes to wait for the first step to become available. Default is 20 minutes.\nYou can run the rule locally in the following way: """)) _ = load_report("MaxInitializationTime") # + papermill={"duration": 0.070764, "end_time": "2022-01-20T11:10:35.701305", "exception": false, "start_time": "2022-01-20T11:10:35.630541", "status": "completed"} tags=["hide-input"] if analyse_phase == "training": display(Markdown("""### Step duration analysis""")) report = load_report('StepOutlier') if report: parameters = report['RuleParameters'] params = report['RuleParameters'].split('\n') stddev = params[3].split(':')[1] mode = params[1].split(':')[1] n_outlier = params[2].split(':')[1] triggered = report['RuleTriggered'] datapoints = report['Datapoints'] text = f"""The StepOutlier rule measures step durations and checks for outliers. The rule returns True if duration is larger than {stddev} times the standard deviation. The rule also takes the parameter mode, that specifies whether steps from training or validation phase should be checked. In your processing job mode was specified as {mode}. Typically the first step is taking significantly more time and to avoid the rule triggering immediately, one can use n_outliers to specify the number of outliers to ignore. n_outliers was set to {n_outlier}. The rule analysed {datapoints} datapoints and triggered {triggered} times. """ paragraph = Paragraph(text=text, width=900) show(column(paragraph)) if report and len(report['Details']['step_details']) > 0: for node_id in report['Details']['step_details']: tmp = report['RuleParameters'].split('threshold:') threshold = tmp[1].split('\n')[0] n_outliers = report['Details']['step_details'][node_id]['number_of_outliers'] mean = report['Details']['step_details'][node_id]['step_stats']['mean'] stddev = report['Details']['step_details'][node_id]['stddev'] phase = report['Details']['step_details'][node_id]['phase'] display(Markdown(f"""**Step durations on node {node_id}:**""")) display(Markdown(f"""The following table is a summary of the statistics of step durations measured on node {node_id}. The rule has analyzed the step duration from {phase} phase. The average step duration on node {node_id} was {round(mean, 2)}s. The rule detected {n_outliers} outliers, where step duration was larger than {threshold} times the standard deviation of {stddev}s \n""")) step_stats_df = pd.DataFrame.from_dict(report['Details']['step_details'][node_id]['step_stats'], orient='index').T step_stats_df.index = ['Step Durations in [s]'] pretty_print(step_stats_df) display(Markdown(f"""The following histogram shows the step durations measured on the different nodes. You can turn on or turn off the visualization of histograms by selecting or unselecting the labels in the legend.""")) plot = figure(plot_height=450, plot_width=850, title=f"""Step durations""") colors = bokeh.palettes.viridis(len(report['Details']['step_details'])) for index, node_id in enumerate(report['Details']['step_details']): probs = report['Details']['step_details'][node_id]['probs'] binedges = report['Details']['step_details'][node_id]['binedges'] plot.quad( top=probs, bottom=0, left=binedges[:-1], right=binedges[1:], line_color="white", fill_color=colors[index], fill_alpha=0.7, legend=node_id) plot.add_layout(Legend(), 'right') plot.y_range.start = 0 plot.xaxis.axis_label = f"""Step durations in [s]""" plot.yaxis.axis_label = "Occurrences" plot.grid.grid_line_color = "white" plot.legend.click_policy="hide" plot.legend.location = 'center_right' show(plot) if report['RuleTriggered'] > 0: text=f"""To get a better understanding of what may have caused those outliers, we correlate the timestamps of step outliers with other framework metrics that happened at the same time. The left chart shows how much time was spent in the different framework metrics aggregated by event phase. The chart on the right shows the histogram of normal step durations (without outliers). The following chart shows how much time was spent in the different framework metrics when step outliers occurred. In this chart framework metrics are not aggregated byphase.""" plots = [] if 'phase' in report['Details']: text = f"""{text} The chart (in the middle) shows whether step outliers mainly happened during TRAIN or EVAL phase. """ plot = create_piechart(report['Details']['phase'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="The ratio between the time spent on the TRAIN/EVAL phase") plots.append(plot) if 'forward_backward' in report['Details'] and len(report['Details']['forward_backward']) > 0: event = max(report['Details']['forward_backward'], key=report['Details']['forward_backward'].get) perc = report['Details']['forward_backward'][event] text = f"""{text} The pie chart on the right shows a detailed breakdown. It shows that {int(perc)}% of the training time was spent on event "{event}".""" plot = create_piechart(report['Details']['forward_backward'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="The Ratio between forward and backward pass") plots.append(plot) if len(plots) > 0: paragraph = Paragraph(text=text, width=900) show(column(paragraph, row(plots))) plots = [] text = "" if 'ratio' in report['Details'] and len(report['Details']['ratio']) > 0: key = list(report['Details']['ratio'].keys())[0] ratio = report['Details']['ratio'][key] text = f"""The following pie chart shows a breakdown of the CPU/GPU operators executed during the step outliers. It shows that {int(ratio)}% of the training time was spent on executing operators in "{key}".""" plot = create_piechart(report['Details']['ratio'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="The ratio between CPU/GPU operators") plots.append(plot) if 'general' in report['Details'] and len(report['Details']['general']) > 0: event = max(report['Details']['general'], key=report['Details']['general'].get) perc = report['Details']['general'][event] plot = create_piechart(report['Details']['general'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="General metrics recorded in framework ") plots.append(plot) if len(plots) > 0: paragraph = Paragraph(text=text, width=900) show(column(paragraph, row(plots))) plots = [] text = "" if 'horovod' in report['Details'] and len(report['Details']['horovod']) > 0: event = max(report['Details']['horovod'], key=report['Details']['horovod'].get) perc = report['Details']['horovod'][event] text = f"""The following pie chart shows a detailed breakdown of the Horovod metrics that have been recorded when step outliers happened. The most expensive function was {event} with {int(perc)}%""" plot = create_piechart(report['Details']['horovod'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="General metrics recorded in framework ") paragraph = Paragraph(text=text, width=900) show(column(paragraph, row(plot))) # + papermill={"duration": 0.064289, "end_time": "2022-01-20T11:10:35.792062", "exception": false, "start_time": "2022-01-20T11:10:35.727773", "status": "completed"} tags=["hide-input"] if analyse_phase == "training": display(Markdown("""### GPU utilization analysis\n\n""")) display(Markdown("""**Usage per GPU** \n\n""")) report = load_report('LowGPUUtilization') if report: params = report['RuleParameters'].split('\n') threshold_p95 = params[0].split(':')[1] threshold_p5 = params[1].split(':')[1] window = params[2].split(':')[1] patience = params[3].split(':')[1] violations = report['Violations'] triggered = report['RuleTriggered'] datapoints = report['Datapoints'] text=Paragraph(text=f"""The LowGPUUtilization rule checks for a low and fluctuating GPU usage. If the GPU usage is consistently low, it might be caused by bottlenecks or a small batch size. If usage is heavily fluctuating, it can be due to bottlenecks or blocking calls. The rule computed the 95th and 5th percentile of GPU utilization on {window} continuous datapoints and found {violations} cases where p95 was above {threshold_p95}% and p5 was below {threshold_p5}%. If p95 is high and p5 is low, it might indicate that the GPU usage is highly fluctuating. If both values are very low, it would mean that the machine is underutilized. During initialization, the GPU usage is likely zero, so the rule skipped the first {patience} data points. The rule analysed {datapoints} datapoints and triggered {triggered} times.""", width=800) show(text) if len(report['Details']) > 0: timestamp = us_since_epoch_to_human_readable_time(report['Details']['last_timestamp']) date = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S:%f') day = date.date().strftime("%m/%d/%Y") hour = date.time().strftime("%H:%M:%S") text = Paragraph(text=f"""Your training job is underutilizing the instance. You may want to consider to either switch to a smaller instance type or to increase the batch size. The last time that the LowGPUUtilization rule was triggered in your training job was on {day} at {hour}. The following boxplots are a snapshot from the timestamps. They show the utilization per GPU (without outliers). To get a better understanding of the workloads throughout the whole training, you can check the workload histogram in the next section.""", width=800) show(text) del report['Details']['last_timestamp'] for node_id in report['Details']: plot = figure(plot_height=350, plot_width=1000, toolbar_location='right', tools="hover,wheel_zoom,reset,pan", title=f"Node {node_id}", x_range=(0,17), ) for index, key in enumerate(report['Details'][node_id]): display(Markdown(f"""**GPU utilization of {key} on node {node_id}:**""")) text = "" gpu_max = report['Details'][node_id][key]['gpu_max'] p_95 = report['Details'][node_id][key]['gpu_95'] p_5 = report['Details'][node_id][key]['gpu_5'] text = f"""{text} The max utilization of {key} on node {node_id} was {gpu_max}%""" if p_95 < int(threshold_p95): text = f"""{text} and the 95th percentile was only {p_95}%. {key} on node {node_id} is underutilized""" if p_5 < int(threshold_p5): text = f"""{text} and the 5th percentile was only {p_5}%""" if p_95 - p_5 > 50: text = f"""{text} The difference between 5th percentile {p_5}% and 95th percentile {p_95}% is quite significant, which means that utilization on {key} is fluctuating quite a lot.\n""" upper = report['Details'][node_id][key]['upper'] lower = report['Details'][node_id][key]['lower'] p75 = report['Details'][node_id][key]['p75'] p25 = report['Details'][node_id][key]['p25'] p50 = report['Details'][node_id][key]['p50'] plot.segment(index+1, upper, index+1, p75, line_color="black") plot.segment(index+1, lower, index+1, p25, line_color="black") plot.vbar(index+1, 0.7, p50, p75, fill_color="#FDE725", line_color="black") plot.vbar(index+1, 0.7, p25, p50, fill_color="#440154", line_color="black") plot.rect(index+1, lower, 0.2, 0.01, line_color="black") plot.rect(index+1, upper, 0.2, 0.01, line_color="black") plot.xaxis.major_label_overrides[index+1] = key plot.xgrid.grid_line_color = None plot.ygrid.grid_line_color = "white" plot.grid.grid_line_width = 0 plot.xaxis.major_label_text_font_size="10px" text=Paragraph(text=f"""{text}""", width=900) show(text) plot.yaxis.axis_label = "Utilization in %" plot.xaxis.ticker = np.arange(index+2) show(plot) # + papermill={"duration": 0.096111, "end_time": "2022-01-20T11:10:35.915803", "exception": false, "start_time": "2022-01-20T11:10:35.819692", "status": "completed"} tags=["hide-input"] if analyse_phase == "training": display(Markdown("""**Workload balancing**\n\n""")) report = load_report('LoadBalancing') if report: params = report['RuleParameters'].split('\n') threshold = params[0].split(':')[1] patience = params[1].split(':')[1] triggered = report['RuleTriggered'] datapoints = report['Datapoints'] paragraph = Paragraph(text=f"""The LoadBalancing rule helps to detect issues in workload balancing between multiple GPUs. It computes a histogram of GPU utilization values for each GPU and compares then the similarity between histograms. The rule checked if the distance of histograms is larger than the threshold of {threshold}. During initialization utilization is likely zero, so the rule skipped the first {patience} data points. """, width=900) show(paragraph) if len(report['Details']) > 0: for node_id in report['Details']: text = f"""The following histogram shows the workload per GPU on node {node_id}. You can enable/disable the visualization of a workload by clicking on the label in the legend. """ if len(report['Details']) == 1 and len(report['Details'][node_id]['workloads']) == 1: text = f"""{text} Your training job only used one GPU so there is no workload balancing issue.""" plot = figure(plot_height=450, plot_width=850, x_range=(-1,100), title=f"""Workloads on node {node_id}""") colors = bokeh.palettes.viridis(len(report['Details'][node_id]['workloads'])) for index, gpu_id2 in enumerate(report['Details'][node_id]['workloads']): probs = report['Details'][node_id]['workloads'][gpu_id2] plot.quad( top=probs, bottom=0, left=np.arange(0,98,2), right=np.arange(2,100,2), line_color="white", fill_color=colors[index], fill_alpha=0.8, legend=gpu_id2 ) plot.y_range.start = 0 plot.xaxis.axis_label = f"""Utilization""" plot.yaxis.axis_label = "Occurrences" plot.grid.grid_line_color = "white" plot.legend.click_policy="hide" paragraph = Paragraph(text=text) show(column(paragraph, plot)) if "distances" in report['Details'][node_id]: text = f"""The rule identified workload balancing issues on node {node_id} where workloads differed by more than threshold {threshold}. """ for index, gpu_id2 in enumerate(report['Details'][node_id]['distances']): for gpu_id1 in report['Details'][node_id]['distances'][gpu_id2]: distance = round(report['Details'][node_id]['distances'][gpu_id2][gpu_id1], 2) text = f"""{text} The difference of workload between {gpu_id2} and {gpu_id1} is: {distance}.""" paragraph = Paragraph(text=f"""{text}""", width=900) show(column(paragraph)) # + papermill={"duration": 0.11675, "end_time": "2022-01-20T11:10:36.062939", "exception": false, "start_time": "2022-01-20T11:10:35.946189", "status": "completed"} tags=["hide-input"] if analyse_phase == "training": display(Markdown("""### Dataloading analysis\n\n""")) report = load_report('Dataloader') if report: params = report['RuleParameters'].split("\n") min_threshold = params[0].split(':')[1] max_threshold = params[1].split(':')[1] triggered = report['RuleTriggered'] datapoints = report['Datapoints'] text=f"""The number of dataloader workers can greatly affect the overall performance of your training job. The rule analyzed the number of dataloading processes that have been running in parallel on the training instance and compares it against the total number of cores. The rule checked if the number of processes is smaller than {min_threshold}% or larger than {max_threshold}% the total number of cores. Having too few dataloader workers can slowdown data preprocessing and lead to GPU underutilization. Having too many dataloader workers may hurt the overall performance if you are running other compute intensive tasks on the CPU. The rule analysed {datapoints} datapoints and triggered {triggered} times.""" paragraph = Paragraph(text=f"{text}", width=900) show(paragraph) text = "" if 'cores' in report['Details']: cores = int(report['Details']['cores']) dataloaders = report['Details']['dataloaders'] if dataloaders < cores: text=f"""{text} Your training instance provided {cores} CPU cores, however your training job only ran on average {dataloaders} dataloader workers in parallel. We recommend you to increase the number of dataloader workers.""" if dataloaders > cores: text=f"""{text} Your training instance provided {cores} CPU cores, however your training job ran on average {dataloaders} dataloader workers. We recommed you to decrease the number of dataloader workers.""" if 'pin_memory' in report['Details'] and report['Details']['pin_memory'] == False: text=f"""{text} Using pinned memory also improves performance because it enables fast data transfer to CUDA-enabled GPUs. The rule detected that your training job was not using pinned memory. In case of using PyTorch Dataloader, you can enable this by setting pin_memory=True.""" if 'prefetch' in report['Details'] and report['Details']['prefetch'] == False: text=f"""{text} It appears that your training job did not perform any data pre-fetching. Pre-fetching can improve your data input pipeline as it produces the data ahead of time.""" paragraph = Paragraph(text=f"{text}", width=900) show(paragraph) colors=bokeh.palettes.viridis(10) if "dataloading_time" in report['Details']: median = round(report['Details']["dataloading_time"]['p50'],4) p95 = round(report['Details']["dataloading_time"]['p95'],4) p25 = round(report['Details']["dataloading_time"]['p25'],4) binedges = report['Details']["dataloading_time"]['binedges'] probs = report['Details']["dataloading_time"]['probs'] text=f"""The following histogram shows the distribution of dataloading times that have been measured throughout your training job. The median dataloading time was {median}s. The 95th percentile was {p95}s and the 25th percentile was {p25}s""" plot = figure(plot_height=450, plot_width=850, toolbar_location='right', tools="hover,wheel_zoom,reset,pan", x_range=(binedges[0], binedges[-1]) ) plot.quad( top=probs, bottom=0, left=binedges[:-1], right=binedges[1:], line_color="white", fill_color=colors[0], fill_alpha=0.8, legend="Dataloading events" ) plot.y_range.start = 0 plot.xaxis.axis_label = f"""Dataloading in [s]""" plot.yaxis.axis_label = "Occurrences" plot.grid.grid_line_color = "white" plot.legend.click_policy="hide" paragraph = Paragraph(text=f"{text}", width=900) show(column(paragraph, plot)) # + papermill={"duration": 0.068194, "end_time": "2022-01-20T11:10:36.163110", "exception": false, "start_time": "2022-01-20T11:10:36.094916", "status": "completed"} tags=["hide-input"] if analyse_phase == "training": display(Markdown(""" ### Batch size""")) report = load_report('BatchSize') if report: params = report['RuleParameters'].split('\n') cpu_threshold_p95 = int(params[0].split(':')[1]) gpu_threshold_p95 = int(params[1].split(':')[1]) gpu_memory_threshold_p95 = int(params[2].split(':')[1]) patience = int(params[3].split(':')[1]) window = int(params[4].split(':')[1]) violations = report['Violations'] triggered = report['RuleTriggered'] datapoints = report['Datapoints'] text = Paragraph(text=f"""The BatchSize rule helps to detect if GPU is underutilized because of the batch size being too small. To detect this the rule analyzes the GPU memory footprint, CPU and GPU utilization. The rule checked if the 95th percentile of CPU utilization is below cpu_threshold_p95 of {cpu_threshold_p95}%, the 95th percentile of GPU utilization is below gpu_threshold_p95 of {gpu_threshold_p95}% and the 95th percentile of memory footprint \ below gpu_memory_threshold_p95 of {gpu_memory_threshold_p95}%. In your training job this happened {violations} times. \ The rule skipped the first {patience} datapoints. The rule computed the percentiles over window size of {window} continuous datapoints.\n The rule analysed {datapoints} datapoints and triggered {triggered} times. """, width=800) show(text) if len(report['Details']) >0: timestamp = us_since_epoch_to_human_readable_time(report['Details']['last_timestamp']) date = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S:%f') day = date.date().strftime("%m/%d/%Y") hour = date.time().strftime("%H:%M:%S") del report['Details']['last_timestamp'] text = Paragraph(text=f"""Your training job is underutilizing the instance. You may want to consider either switch to a smaller instance type or to increase the batch size. The last time the BatchSize rule triggered in your training job was on {day} at {hour}. The following boxplots are a snapshot from the timestamps. They the total CPU utilization, the GPU utilization, and the GPU memory usage per GPU (without outliers).""", width=800) show(text) for node_id in report['Details']: xmax = max(20, len(report['Details'][node_id])) plot = figure(plot_height=350, plot_width=1000, toolbar_location='right', tools="hover,wheel_zoom,reset,pan", title=f"Node {node_id}", x_range=(0,xmax) ) for index, key in enumerate(report['Details'][node_id]): upper = report['Details'][node_id][key]['upper'] lower = report['Details'][node_id][key]['lower'] p75 = report['Details'][node_id][key]['p75'] p25 = report['Details'][node_id][key]['p25'] p50 = report['Details'][node_id][key]['p50'] plot.segment(index+1, upper, index+1, p75, line_color="black") plot.segment(index+1, lower, index+1, p25, line_color="black") plot.vbar(index+1, 0.7, p50, p75, fill_color="#FDE725", line_color="black") plot.vbar(index+1, 0.7, p25, p50, fill_color="#440154", line_color="black") plot.rect(index+1, lower, 0.2, 0.01, line_color="black") plot.rect(index+1, upper, 0.2, 0.01, line_color="black") plot.xaxis.major_label_overrides[index+1] = key plot.xgrid.grid_line_color = None plot.ygrid.grid_line_color = "white" plot.grid.grid_line_width = 0 plot.xaxis.major_label_text_font_size="10px" plot.xaxis.ticker = np.arange(index+2) plot.yaxis.axis_label = "Utilization in %" show(plot) # + papermill={"duration": 0.076896, "end_time": "2022-01-20T11:10:36.272482", "exception": false, "start_time": "2022-01-20T11:10:36.195586", "status": "completed"} tags=["hide-input"] if analyse_phase == "training": display(Markdown("""### CPU bottlenecks\n\n""")) report = load_report('CPUBottleneck') if report: params = report['RuleParameters'].split('\n') threshold = int(params[0].split(':')[1]) cpu_threshold = int(params[1].split(':')[1]) gpu_threshold = int(params[2].split(':')[1]) patience = int(params[3].split(':')[1]) violations = report['Violations'] triggered = report['RuleTriggered'] datapoints = report['Datapoints'] if report['Violations'] > 0: perc = int(report['Violations']/report['Datapoints']*100) else: perc = 0 if perc < threshold: string = 'below' else: string = 'above' text = f"""The CPUBottleneck rule checked when the CPU utilization was above cpu_threshold of {cpu_threshold}% and GPU utilization was below gpu_threshold of {gpu_threshold}%. During initialization utilization is likely to be zero, so the rule skipped the first {patience} datapoints. With this configuration the rule found {violations} CPU bottlenecks which is {perc}% of the total time. This is {string} the threshold of {threshold}% The rule analysed {datapoints} data points and triggered {triggered} times.""" paragraph = Paragraph(text=text, width=900) show(paragraph) if report: plots = [] text = "" if report['RuleTriggered'] > 0: low_gpu = report['Details']['low_gpu_utilization'] cpu_bottleneck = {} cpu_bottleneck["GPU usage above threshold"] = report["Datapoints"] - report["Details"]["low_gpu_utilization"] cpu_bottleneck["GPU usage below threshold"] = report["Details"]["low_gpu_utilization"] - len(report["Details"]) cpu_bottleneck["Low GPU usage due to CPU bottlenecks"] = len(report["Details"]["bottlenecks"]) n_bottlenecks = round(len(report['Details']['bottlenecks'])/datapoints * 100, 2) text = f"""The following chart (left) shows how many datapoints were below the gpu_threshold of {gpu_threshold}% and how many of those datapoints were likely caused by a CPU bottleneck. The rule found {low_gpu} out of {datapoints} datapoints which had a GPU utilization below {gpu_threshold}%. Out of those datapoints {n_bottlenecks}% were likely caused by CPU bottlenecks. """ plot = create_piechart(cpu_bottleneck, height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="Low GPU usage caused by CPU bottlenecks") plots.append(plot) if 'phase' in report['Details']: text = f"""{text} The chart (in the middle) shows whether CPU bottlenecks mainly happened during train/validation phase. """ plot = create_piechart(report['Details']['phase'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="The ratio between time spent on TRAIN/EVAL phase") plots.append(plot) if 'forward_backward' in report['Details'] and len(report['Details']['forward_backward']) > 0: event = max(report['Details']['forward_backward'], key=report['Details']['forward_backward'].get) perc = report['Details']['forward_backward'][event] text = f"""{text} The pie charts on the right shows a more detailed breakdown. It shows that {int(perc)}% of the training time was spent on event {event}""" plot = create_piechart(report['Details']['forward_backward'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="The ratio between forward and backward pass") plots.append(plot) if len(plots) > 0: paragraph = Paragraph(text=text, width=900) show(column(paragraph, row(plots))) plots = [] text = "" if 'ratio' in report['Details'] and len(report['Details']['ratio']) > 0: key = list(report['Details']['ratio'].keys())[0] ratio = report['Details']['ratio'][key] text = f"""The following pie chart shows a breakdown of the CPU/GPU operators that happened during CPU bottlenecks. It shows that {int(ratio)}% of the training time was spent on executing operators in "{key}".""" plot = create_piechart(report['Details']['ratio'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="The ratio between CPU/GPU operators") plots.append(plot) if 'general' in report['Details'] and len(report['Details']['general']) > 0: event = max(report['Details']['general'], key=report['Details']['general'].get) perc = report['Details']['general'][event] plot = create_piechart(report['Details']['general'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="General metrics recorded in framework ") plots.append(plot) if len(plots) > 0: paragraph = Paragraph(text=text, width=900) show(column(paragraph, row(plots))) plots = [] text = "" if 'horovod' in report['Details'] and len(report['Details']['horovod']) > 0: event = max(report['Details']['horovod'], key=report['Details']['horovod'].get) perc = report['Details']['horovod'][event] text = f"""The following pie chart shows a detailed breakdown of the Horovod metrics that have been recorded when the CPU bottleneck happened. The most expensive function was {event} with {int(perc)}%""" plot = create_piechart(report['Details']['horovod'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="General metrics recorded in framework ") paragraph = Paragraph(text=text, width=900) show(column(paragraph, row(plot))) # + papermill={"duration": 0.077623, "end_time": "2022-01-20T11:10:36.383843", "exception": false, "start_time": "2022-01-20T11:10:36.306220", "status": "completed"} tags=["hide-input"] if analyse_phase == "training": display(Markdown("""### I/O bottlenecks\n\n""")) report = load_report('IOBottleneck') if report: params = report['RuleParameters'].split('\n') threshold = int(params[0].split(':')[1]) io_threshold = int(params[1].split(':')[1]) gpu_threshold = int(params[2].split(':')[1]) patience = int(params[3].split(':')[1]) violations = report['Violations'] triggered = report['RuleTriggered'] datapoints = report['Datapoints'] if report['Violations'] > 0: perc = int(report['Violations']/report['Datapoints']*100) else: perc = 0 if perc < threshold: string = 'below' else: string = 'above' text = f"""The IOBottleneck rule checked when I/O wait time was above io_threshold of {io_threshold}% and GPU utilization was below gpu_threshold of {gpu_threshold}. During initialization utilization is likely to be zero, so the rule skipped the first {patience} datapoints. With this configuration the rule found {violations} I/O bottlenecks which is {perc}% of the total time. This is {string} the threshold of {threshold}%. The rule analysed {datapoints} datapoints and triggered {triggered} times.""" paragraph = Paragraph(text=text, width=900) show(paragraph) if report: plots = [] text = "" if report['RuleTriggered'] > 0: low_gpu = report['Details']['low_gpu_utilization'] cpu_bottleneck = {} cpu_bottleneck["GPU usage above threshold"] = report["Datapoints"] - report["Details"]["low_gpu_utilization"] cpu_bottleneck["GPU usage below threshold"] = report["Details"]["low_gpu_utilization"] - len(report["Details"]) cpu_bottleneck["Low GPU usage due to I/O bottlenecks"] = len(report["Details"]["bottlenecks"]) n_bottlenecks = round(len(report['Details']['bottlenecks'])/datapoints * 100, 2) text = f"""The following chart (left) shows how many datapoints were below the gpu_threshold of {gpu_threshold}% and how many of those datapoints were likely caused by a I/O bottleneck. The rule found {low_gpu} out of {datapoints} datapoints which had a GPU utilization below {gpu_threshold}%. Out of those datapoints {n_bottlenecks}% were likely caused by I/O bottlenecks. """ plot = create_piechart(cpu_bottleneck, height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="Low GPU usage caused by I/O bottlenecks") plots.append(plot) if 'phase' in report['Details']: text = f"""{text} The chart (in the middle) shows whether I/O bottlenecks mainly happened during the training or validation phase. """ plot = create_piechart(report['Details']['phase'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="The ratio between the time spent on the TRAIN/EVAL phase") plots.append(plot) if 'forward_backward' in report['Details'] and len(report['Details']['forward_backward']) > 0: event = max(report['Details']['forward_backward'], key=report['Details']['forward_backward'].get) perc = report['Details']['forward_backward'][event] text = f"""{text} The pie charts on the right shows a more detailed breakdown. It shows that {int(perc)}% of the training time was spent on event "{event}".""" plot = create_piechart(report['Details']['forward_backward'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="The ratio between forward and backward pass") plots.append(plot) if len(plots) > 0: paragraph = Paragraph(text=text, width=900) show(column(paragraph, row(plots))) plots = [] text = "" if 'ratio' in report['Details'] and len(report['Details']['ratio']) > 0: key = list(report['Details']['ratio'].keys())[0] ratio = report['Details']['ratio'][key] text = f"""The following pie chart shows a breakdown of the CPU/GPU operators that happened during I/O bottlenecks. It shows that {int(ratio)}% of the training time was spent on executing operators in "{key}".""" plot = create_piechart(report['Details']['ratio'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="Ratio between CPU/GPU operators") plots.append(plot) if 'general' in report['Details'] and len(report['Details']['general']) > 0: event = max(report['Details']['general'], key=report['Details']['general'].get) perc = report['Details']['general'][event] plot = create_piechart(report['Details']['general'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="General metrics recorded in framework ") plots.append(plot) if len(plots) > 0: paragraph = Paragraph(text=text, width=900) show(column(paragraph, row(plots))) plots = [] text = "" if 'horovod' in report['Details'] and len(report['Details']['horovod']) > 0: event = max(report['Details']['horovod'], key=report['Details']['horovod'].get) perc = report['Details']['horovod'][event] text = f"""The following pie chart shows a detailed breakdown of the Horovod metrics that have been recorded when I/O bottleneck happened. The most expensive function was {event} with {int(perc)}%""" plot = create_piechart(report['Details']['horovod'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="General metrics recorded in framework ") paragraph = Paragraph(text=text, width=900) show(column(paragraph, row(plot))) # + papermill={"duration": 0.1234, "end_time": "2022-01-20T11:10:36.541740", "exception": false, "start_time": "2022-01-20T11:10:36.418340", "status": "completed"} tags=["hide-input"] if analyse_phase == "training": display(Markdown("""### GPU memory\n\n""")) report = load_report('GPUMemoryIncrease') if report: params = report['RuleParameters'].split('\n') increase = float(params[0].split(':')[1]) patience = params[1].split(':')[1] window = params[2].split(':')[1] violations = report['Violations'] triggered = report['RuleTriggered'] datapoints = report['Datapoints'] text=Paragraph(text=f"""The GPUMemoryIncrease rule helps to detect large increase in memory usage on GPUs. The rule checked if the moving average of memory increased by more than {increase}%. So if the moving average increased for instance from 10% to {11+increase}%, the rule would have triggered. During initialization utilization is likely 0, so the rule skipped the first {patience} datapoints. The moving average was computed on a window size of {window} continuous datapoints. The rule detected {violations} violations where the moving average between previous and current time window increased by more than {increase}%. The rule analysed {datapoints} datapoints and triggered {triggered} times.""", width=900) show(text) if len(report['Details']) > 0: timestamp = us_since_epoch_to_human_readable_time(report['Details']['last_timestamp']) date = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S:%f') day = date.date().strftime("%m/%d/%Y") hour = date.time().strftime("%H:%M:%S") text = Paragraph(text=f"""Your training job triggered memory spikes. The last time the GPUMemoryIncrease rule triggered in your training job was on {day} at {hour}. The following boxplots are a snapshot from the timestamps. They show for each node and GPU the corresponding memory utilization (without outliers).""", width=900) show(text) del report['Details']['last_timestamp'] for node_id in report['Details']: plot = figure(plot_height=350, plot_width=1000, toolbar_location='right', tools="hover,wheel_zoom,reset,pan", title=f"Node {node_id}", x_range=(0,17), ) for index, key in enumerate(report['Details'][node_id]): display(Markdown(f"""**Memory utilization of {key} on node {node_id}:**""")) text = "" gpu_max = report['Details'][node_id][key]['gpu_max'] text = f"""{text} The max memory utilization of {key} on node {node_id} was {gpu_max}%.""" p_95 = int(report['Details'][node_id][key]['p95']) p_5 = report['Details'][node_id][key]['p05'] if p_95 < int(50): text = f"""{text} The 95th percentile was only {p_95}%.""" if p_5 < int(5): text = f"""{text} The 5th percentile was only {p_5}%.""" if p_95 - p_5 > 50: text = f"""{text} The difference between 5th percentile {p_5}% and 95th percentile {p_95}% is quite significant, which means that memory utilization on {key} is fluctuating quite a lot.""" text = Paragraph(text=f"""{text}""", width=900) show(text) upper = report['Details'][node_id][key]['upper'] lower = report['Details'][node_id][key]['lower'] p75 = report['Details'][node_id][key]['p75'] p25 = report['Details'][node_id][key]['p25'] p50 = report['Details'][node_id][key]['p50'] plot.segment(index+1, upper, index+1, p75, line_color="black") plot.segment(index+1, lower, index+1, p25, line_color="black") plot.vbar(index+1, 0.7, p50, p75, fill_color="#FDE725", line_color="black") plot.vbar(index+1, 0.7, p25, p50, fill_color="#440154", line_color="black") plot.rect(index+1, lower, 0.2, 0.01, line_color="black") plot.rect(index+1, upper, 0.2, 0.01, line_color="black") plot.xaxis.major_label_overrides[index+1] = key plot.xgrid.grid_line_color = None plot.ygrid.grid_line_color = "white" plot.grid.grid_line_width = 0 plot.xaxis.major_label_text_font_size="10px" plot.xaxis.ticker = np.arange(index+2) plot.yaxis.axis_label = "Utilization in %" show(plot)
model-profiling-2/second-run/ProfilerReport/profiler-output/profiler-report.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="EiNSggykoe1v" # # Introdução # No projeto deste curso, utilizaremos dois conjuntos de dados diferentes: # # o MovieLens, do instituto de pesquisa Grouplens, que agrupa avaliações de filmes # o TMDB 5000, disponível no site da comunidade Kaggle, que contém informações como faturamento, orçamento, ano de lançamento, país de origem, entre outras # + [markdown] id="SfLx-9rwqWGZ" # # Preparar ambiente # + id="zaSg2Gemn2Ip" import pandas as pd import seaborn as sns #Cores sns.set_palette("RdBu") #Estilos sns.set_style("darkgrid") # + [markdown] id="PLBQWrJuqWrU" # # 1. Exploração de dados inicial # + id="PiJa8HtOqHMQ" colab={"base_uri": "https://localhost:8080/", "height": 311} outputId="4f4e6108-c931-4886-ffec-32e896a61204" uri='https://raw.githubusercontent.com/FelipeRamosOliveira/DataFrames/master/tmdb_5000_movies.csv' tmdb=pd.read_csv(uri) tmdb.head(2) # + [markdown] id="ocQ0rC4TvQ3v" # ## Estatistica descritiva # + id="Q20TE8uSqo60" colab={"base_uri": "https://localhost:8080/", "height": 284} outputId="f45baa92-bb34-4816-d69c-8962028c37cf" tmdb.describe() # + [markdown] id="JAdosgmpvWyp" # ## Histogramas e boxplot # + id="aGqVsV5mrVGG" colab={"base_uri": "https://localhost:8080/", "height": 312} outputId="a59f17e3-6314-4457-8be7-66537599f99a" # Histograma ax = sns.distplot(tmdb.vote_average) ax.set(xlabel='Nota média', ylabel='Densidade') ax.set_title('Média de votos em filmes no TMBD 5000') # + id="dPphzNhUrebn" colab={"base_uri": "https://localhost:8080/", "height": 312} outputId="b6fd8c49-3579-452f-c4d2-8c61ca68a5cf" # Histograma não normalizado ax = sns.distplot(tmdb.vote_average, norm_hist= False, kde= False) ax.set(xlabel='Nota média', ylabel='Frequência') ax.set_title('Média de votos em filmes no TMBD 5000') # + id="DoPjQ_ensGXm" colab={"base_uri": "https://localhost:8080/", "height": 312} outputId="8d5efef6-4f85-40ae-d2f8-1aeed96cd5ac" # Boxplot ax = sns.boxplot(tmdb.vote_average) ax.set(xlabel='Nota média do filme') ax.set_title('Distribuição de nota média dos filmes do TMDB 5000') # + [markdown] id="4aLNh1ZLvkEh" # ## Identificando e tratando problemas # + id="nLwglGtZsbQs" colab={"base_uri": "https://localhost:8080/", "height": 294} outputId="8daf0fe6-0881-4df7-9326-731a743dc00e" # Notas médias iguais a zero zeros=tmdb.query('vote_average == 0') zeros.head(2) # + [markdown] id="WukOI6OUtowz" # * Note que os filmes com média 0 ou 10 são aqueles com poucos votantes (zero, algumas vezes) # + id="yWnBNAUIs9Jx" colab={"base_uri": "https://localhost:8080/", "height": 284} outputId="26a101f7-7153-4bc7-ed2b-c5a9610134f9" # Estipular um número mínimo de votantes tmdb_com_mais_de_10_votos = tmdb.query('vote_count >= 10') tmdb_com_mais_de_10_votos.describe() # + id="7Yt15n1TtlxA" colab={"base_uri": "https://localhost:8080/", "height": 312} outputId="c00dda44-ae26-4ad0-8d1f-a4b74ddd3772" # Nova distribuição ax = sns.distplot(tmdb_com_mais_de_10_votos.vote_average, norm_hist= False, kde= False) ax.set(xlabel='Nota média', ylabel='Frequência') ax.set_title('Média de votos em filmes no TMBD 5000') # + id="MpILVAa2uA25" colab={"base_uri": "https://localhost:8080/", "height": 312} outputId="846eefe1-2da3-4848-a3af-7b6da9859a12" # Nova densidade ax = sns.distplot(tmdb_com_mais_de_10_votos.vote_average) ax.set(xlabel='Nota média', ylabel='Densidade') ax.set_title('Média de votos em filmes no TMBD 5000') # + id="Bk4pqQVXuwm1" colab={"base_uri": "https://localhost:8080/", "height": 312} outputId="c92f9891-7c5a-4309-f4d2-9b745999390c" # Novo boxplot ax = sns.boxplot(tmdb_com_mais_de_10_votos.vote_average) ax.set(xlabel='Nota média do filme') ax.set_title('Distribuição de nota média dos filmes do TMDB 5000') # + [markdown] id="gFeq6DX-Hz-K" # ## Visualizando e limpando o movielens # + id="PmsRPK8kvASE" colab={"base_uri": "https://localhost:8080/"} outputId="67b17eb5-f331-4448-b2e4-60728c7355f8" uri='https://raw.githubusercontent.com/FelipeRamosOliveira/DataFrames/master/ratings.csv' notas=pd.read_csv(uri) notas.head() # + id="7QFMVGnJHsHd" colab={"base_uri": "https://localhost:8080/"} outputId="dfa91f80-16ff-4826-c956-5d2a640aefc0" nota_media_por_filme = notas.groupby("movieId").mean()["rating"] nota_media_por_filme.head() # + id="I9uHZyAfIDKb" colab={"base_uri": "https://localhost:8080/"} outputId="0d494a7a-4f9c-4df7-847c-88a1ce5a1bd5" ax = sns.distplot(nota_media_por_filme.values) ax.set(xlabel='Nota média', ylabel='Densidade') ax.set_title('Média de votos em filmes no MovieLens') # + id="K_GQvC4kIQDh" colab={"base_uri": "https://localhost:8080/"} outputId="4c2ff898-9f2e-4345-f968-44ec7fd7d69e" quantidade_de_votos_por_filme = notas.groupby("movieId").count() quantidade_de_votos_por_filme.query("rating >= 10") # + id="aA51VQFAIh8Z" colab={"base_uri": "https://localhost:8080/"} outputId="efe73296-81c0-4907-eebd-44255d8d53d3" quantidade_de_votos_por_filme = notas.groupby("movieId").count() filmes_com_pelo_menos_10_votos = quantidade_de_votos_por_filme.query("rating >= 10").index filmes_com_pelo_menos_10_votos.values # + id="lMzH8BNGI_fs" colab={"base_uri": "https://localhost:8080/"} outputId="165f35f9-72ec-46af-9175-75a778196abb" nota_media_dos_filmes_com_pelo_menos_10_votos = nota_media_por_filme.loc[filmes_com_pelo_menos_10_votos.values] nota_media_dos_filmes_com_pelo_menos_10_votos.head() # + id="5r0B3CMyJiHk" colab={"base_uri": "https://localhost:8080/"} outputId="6431086c-4eb7-4ff9-9d20-46a62acb6ca2" ax = sns.distplot(nota_media_dos_filmes_com_pelo_menos_10_votos,bins=10) ax.set(xlabel='Nota média', ylabel='Densidade') ax.set_title('Média de votos em filmes no MovieLens') # + id="Cxa1MQZjJpWo" colab={"base_uri": "https://localhost:8080/"} outputId="6f9958c2-da6f-4d2e-ba73-0d906f863660" ax = sns.boxplot(x=nota_media_dos_filmes_com_pelo_menos_10_votos.values) ax.set(xlabel='Nota média do filme') ax.set_title('Distribuição de nota média dos filmes do MovieLens') # + [markdown] id="pZY1zKLRKVAj" # ## Visualizando a CDF # + id="iTezSsfgJtSk" colab={"base_uri": "https://localhost:8080/"} outputId="b34cbcc8-1843-44c2-acd8-3bc5253f51b6" ax = sns.distplot(nota_media_dos_filmes_com_pelo_menos_10_votos, hist_kws = {'cumulative':True}, kde_kws = {'cumulative':True}) ax.set(xlabel='Nota média', ylabel='Proporção acumulada de filmes') ax.set_title('Média de votos em filmes no MovieLens') # + id="fIVnTdWOKbEg" colab={"base_uri": "https://localhost:8080/"} outputId="74afac81-cf79-419f-e8ef-b607ba4c7126" ax = sns.distplot(tmdb_com_mais_de_10_votos.vote_average, hist_kws = {'cumulative':True}, kde_kws = {'cumulative':True}) ax.set(xlabel='Nota média', ylabel='Proporção acumulada de filmes') ax.set_title('Média de votos em filmes no TMDB 5000') # + [markdown] id="DZtDDKoqL2k0" # # 2. Explorando as distribuições das amostras # + id="IhHj15jUKlmG" colab={"base_uri": "https://localhost:8080/", "height": 312} outputId="880fe9a1-92e7-49b5-af49-20fa96c468df" # número de votos de cada filme ax = sns.distplot(tmdb_com_mais_de_10_votos.vote_count) ax.set(xlabel='Número de votos', ylabel='Densidade') ax.set_title('Número de votos em filmes no TMDB 5000 com 10 ou mais votos') # + id="1nJ0n7JRMEnJ" colab={"base_uri": "https://localhost:8080/", "height": 312} outputId="e1d3d9e3-c5f7-4ddb-fbb4-f50237489d86" # distribuição em um histograma do orçamento ax = sns.distplot(tmdb.query("budget > 0").budget) ax.set(xlabel='Gastos', ylabel='Densidade') ax.set_title('Gastos em filmes no TMDB 5000') # + id="tq4o8YulMXdD" colab={"base_uri": "https://localhost:8080/", "height": 312} outputId="6551d65a-63ee-4294-db93-6e21b295578f" # distribuição em um histograma da popularidade ax = sns.distplot(tmdb.popularity) ax.set(xlabel='Popularidade', ylabel='Densidade') ax.set_title('Popularidade dos filmes no TMDB 5000') # + id="2bVCs3TWNCfG" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="4160fbc1-a77c-44ba-fd4b-06a1433e12f9" # verificar a quantidade de valores null tmdb.runtime.isnull().sum() # + id="-E2sh_JNNO8v" colab={"base_uri": "https://localhost:8080/", "height": 312} outputId="a85e3228-dc38-4251-e0e7-f2cd07397347" # distribuição em um histograma do tempo de duração ax = sns.distplot(tmdb.runtime.dropna()) ax.set(xlabel='Duração (min)', ylabel='Densidade') ax.set_title('Duração dos filmes no TMDB 5000') # + id="1S3DmjhxNoZ9" colab={"base_uri": "https://localhost:8080/", "height": 312} outputId="0b30758d-f127-4aee-ec6a-7fbb164bffa6" ax = sns.distplot(tmdb.query("runtime>0").runtime.dropna()) ax.set(xlabel='Tempo de duração', ylabel='Densidade') ax.set_title('Duração dos filmes no TMDB 5000') # + id="CT36Ox4COJ6w" colab={"base_uri": "https://localhost:8080/", "height": 312} outputId="8ff6a9b4-e345-4450-86ea-4625ab2a82d0" # histograma cumulativo ax = sns.distplot(tmdb.query("runtime>0").runtime.dropna(), hist_kws={'cumulative':True}, kde_kws={'cumulative':True},bins=15) ax.set(xlabel='Tempo de duração', ylabel='Densidade') ax.set_title('Duração dos filmes no TMDB 5000') # + id="6RM3lKPsOSGy" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e797d5e6-9065-42d5-b3ea-2e54c8f1b894" tmdb.query("runtime>0").runtime.dropna().quantile(q=0.8) # + [markdown] id="D2hHU7DmO-_T" # # 3.Testes de uma amostra # + id="WJIqI16hOnAQ" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="cfaa7370-986c-4dd2-af6c-1410dcbd16d3" round(nota_media_dos_filmes_com_pelo_menos_10_votos.mean(),2) # + id="GaNRozIqPDBy" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="370817ee-7b57-4b06-a147-8f42be7f8c5f" nota_media_dos_filmes_com_pelo_menos_10_votos[0:5].mean() # + id="yDPDJhUqPT9Z" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ad831722-3279-4d4d-cfe0-537e5f9a88ff" len(nota_media_dos_filmes_com_pelo_menos_10_votos) # + [markdown] id="LzjOFdEzQyX8" # ## O efeito do tamanho de uma amostra # + id="VSkwksbFPc2q" colab={"base_uri": "https://localhost:8080/"} outputId="62a4c92d-cafe-4924-ebbd-9a642522f811" medias = list() for i in range(1, len(nota_media_dos_filmes_com_pelo_menos_10_votos)): medias.append(nota_media_dos_filmes_com_pelo_menos_10_votos[0:i].mean()) medias # + id="hE4Ie5p3PfUp" colab={"base_uri": "https://localhost:8080/"} outputId="23d4f5e3-6e8d-485d-bd21-a598d77e598c" import matplotlib.pyplot as plt medias = list() for i in range(1, len(nota_media_dos_filmes_com_pelo_menos_10_votos)): medias.append(nota_media_dos_filmes_com_pelo_menos_10_votos[0:i].mean()) plt.plot(medias) # + id="UgUNZjsXPh5v" colab={"base_uri": "https://localhost:8080/"} outputId="8504dcfe-f984-4650-b868-94ddcf3efd56" import matplotlib.pyplot as plt import numpy as np np.random.seed(75243) temp = nota_media_dos_filmes_com_pelo_menos_10_votos.sample(frac=1) medias = list() for i in range(1, len(temp)): medias.append(temp[0:i].mean()) plt.plot(medias) # + id="ZoXYk5aLPk43" colab={"base_uri": "https://localhost:8080/"} outputId="29dca58b-b510-4b75-98d3-2e998c76f486" np.random.seed(75243) temp = nota_media_dos_filmes_com_pelo_menos_10_votos.sample(frac=1) medias = list() medias = [temp[0:i].mean() for i in range(1, len(temp))] plt.plot(medias) # + [markdown] id="JYt-HKzHSCD1" # ## Intervalo de confiança da média com t e ztest # # + id="EFlesHVMPm5a" colab={"base_uri": "https://localhost:8080/"} outputId="424d71d0-e382-4bba-8f5a-2403eaa4f0e9" from statsmodels.stats.weightstats import zconfint zconfint(nota_media_dos_filmes_com_pelo_menos_10_votos) # + id="rA40ROOzSGa6" colab={"base_uri": "https://localhost:8080/"} outputId="61029318-2022-4bf2-87a1-277801e19c71" from statsmodels.stats.weightstats import DescrStatsW descr_todos_com_10_votos = DescrStatsW(nota_media_dos_filmes_com_pelo_menos_10_votos) descr_todos_com_10_votos # + id="6kFgo0z8l7iM" colab={"base_uri": "https://localhost:8080/"} outputId="0baeb2e1-423f-473e-8961-c57cdc365a92" descr_todos_com_10_votos.tconfint_mean() # + [markdown] id="FXh35318nQO0" # # 4.Outros testes # + id="1-8_ApLLl9D_" colab={"base_uri": "https://localhost:8080/", "height": 77} outputId="cf04a805-9c2d-4a9c-a405-e1d42ba4abf0" uri='https://raw.githubusercontent.com/FelipeRamosOliveira/DataFrames/master/movies.csv' filmes = pd.read_csv(uri) filmes.query("movieId==1") # + id="NUTDKFsJnso-" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="7d5e59a1-6c76-47e5-a824-592b4b12b0d4" notas1 = notas.query("movieId ==1") notas1.head() # + id="8sRsQf6AoFg_" colab={"base_uri": "https://localhost:8080/", "height": 312} outputId="5cac70c9-a2bb-4f77-f11a-cf16b26f6d21" ax = sns.distplot(notas1.rating) ax.set(xlabel='Notas do Toy Story', ylabel='Densidade') ax.set_title('Distribuição das notas para o Toy Story') plt.xlim(0, 5) # + id="0COk8cL6oIBL" colab={"base_uri": "https://localhost:8080/", "height": 312} outputId="a80e1074-ddde-4f48-c8b6-9dc80381e81f" ax = sns.boxplot(notas1.rating) ax.set(xlabel='Notas') ax.set_title('Distribuição das notas para o Toy Story') # + id="t8gRJFW8oLBA" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0a816b38-eb2f-458b-cf23-e843f45a1d9a" notas1.rating.mean() # + id="tDZ0jUvwoOs2" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="4bb840e5-4de2-42cc-eb28-4ef5b635555e" zconfint(notas1.rating) # + id="9obIFp-GoQj8" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0f6536bd-88bf-4c58-ebdb-3e2205cdd287" from statsmodels.stats.weightstats import ztest ztest(notas1.rating, value = 3.4320503405352603) # + id="gwKe6uCpoTlY" # + [markdown] id="OT8rh5D1099H" # # Problemas de amostras pequenas # # + id="xwyeAysE1AAH" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="cd75e33e-e0ba-4a3c-d2c6-3fefdecd2db4" np.random.seed(75241) temp = notas1.sample(frac=1).rating medias = [temp[0:i].mean() for i in range(1, len(temp))] plt.plot(medias) # + id="JauHpuxx1Gm7" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="e45bcba5-4aee-4a9a-ce41-04114aeb6ffb" np.random.seed(75241) temp = notas1.sample(frac=1).rating def calcula_teste(i): media = temp[0:i].mean() stat, p = ztest(temp[0:i], value = 3.4320503405352603) return (media, p) medias = [calcula_teste(i) for i in range(2, len(temp))] medias # + id="ecg9i0B61N4R" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="12534fad-5b0c-485b-b7a0-d68f72f89d8e" def calcula_teste(i): media = temp[0:i].mean() stat, p = ztest(temp[0:i], value = 3.4320503405352603) return (i, media, p) medias = [calcula_teste(i) for i in range(2, len(temp))] medias # + id="nTtyaOCADx6h" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="f12d96e4-b584-4d8e-85ec-16fa607cd837" medias = np.array([calcula_teste(i) for i in range(2, len(temp))]) medias plt.plot(medias[:,1]) # + id="Bg3nh877D6nX" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="d6d4c7a1-df99-4592-c3a8-f381473f39b8" valores = np.array([calcula_teste(i) for i in range(2, len(temp))]) medias plt.plot(valores[:,0],valores[:,1]) # + id="Le9R2FI8ECKr" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="309a9619-b5f9-460a-c235-aa06a623b940" plt.plot(valores[:,0],valores[:,1]) plt.plot(valores[:,0],valores[:,2]) # + id="3KsNUy10EJaX" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="57fe886b-cad2-4949-a333-796629a3682e" plt.plot(valores[:,0],valores[:,1]) plt.plot(valores[:,0],valores[:,2]) plt.hlines(y = 0.05, xmin = 2, xmax = len(temp), colors = 'r') # + [markdown] id="Y4Mecm2-E6ga" # # 5.Testes para duas amostras # + id="5agAq6ikENxG" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2665539f-79b9-4d40-912a-f60521493ba9" zconfint(notas1.rating, notas.rating) # + id="j3d-hBIoFNzS" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="023d5f8d-267f-4e63-dd9c-7093e73f2df0" print (ztest(notas1.rating, notas.rating)) zconfint(notas1.rating, notas.rating) # + id="uRju8XEgFQVo" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="04a46f3f-05f2-4c80-eea7-03b7ac4d3cd3" print (ztest(notas1.rating, notas.rating)) zconfint(notas.rating, notas1.rating) # + id="Ih0aHh7LFTR-" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="bad497d2-0246-424f-a33e-70a149666949" from scipy.stats import ttest_ind ttest_ind(notas.rating, notas1.rating) # + id="_eLK1T9qFWP-" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="cf58ef65-4dab-4b0f-ba7f-2ed937cde0fc" descr_todas_as_notas = DescrStatsW(notas.rating) descr_toystory = DescrStatsW(notas1.rating) descr_todas_as_notas.get_compare(descr_toystory) # + id="sMgp6_nyFZKk" colab={"base_uri": "https://localhost:8080/", "height": 75} outputId="7197450c-234a-4e66-d53a-69ae64013ecd" descr_todas_as_notas = DescrStatsW(notas.rating) descr_toystory = DescrStatsW(notas1.rating) comparacao = descr_todas_as_notas.get_compare(descr_toystory) comparacao.summary() # + id="A4wITmRhFcec" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="44e4c1f8-ed3b-4744-ab94-dc18f3b1e3a7" import statsmodels as sm sm.__version__ # + id="g7BHv2mNFhtS" colab={"base_uri": "https://localhost:8080/", "height": 75} outputId="0fc7fe4e-d4e9-4ca8-b1b5-277f2292aaaf" descr_todas_as_notas = DescrStatsW(notas.rating) descr_toystory = DescrStatsW(notas1.rating) comparacao = descr_todas_as_notas.get_compare(descr_toystory) comparacao.summary(use_t=True) # + id="0bPujcKHFpgI" colab={"base_uri": "https://localhost:8080/", "height": 298} outputId="a8dd28b9-f885-478d-cfb5-4066ef60a211" import matplotlib.pyplot as plt plt.boxplot([notas.rating, notas1.rating], labels=["Todas as notas", "Toy Story"]) plt.title("Distribuição das notas de acordo com filmes") # + id="LDRxVmMgMqDb" colab={"base_uri": "https://localhost:8080/", "height": 298} outputId="803eddba-fe40-473a-c401-a20713fe2e6e" import matplotlib.pyplot as plt plt.boxplot([notas.rating, notas1[3:12].rating], labels=["Todas as notas", "Toy Story (do 3 ao 12)"]) plt.title("Distribuição das notas de acordo com filmes") # + id="IfludIt-MynG" colab={"base_uri": "https://localhost:8080/", "height": 75} outputId="32fd22f8-fb42-431e-da0f-5c758d1287dd" descr_todas_as_notas = DescrStatsW(notas.rating) descr_toystory = DescrStatsW(notas1[3:12].rating) comparacao = descr_todas_as_notas.get_compare(descr_toystory) comparacao.summary(use_t=True) # + [markdown] id="k1Ky7IfjNnA4" # # 6.Comparando filmes # + id="wZiqQsMyM13M" colab={"base_uri": "https://localhost:8080/", "height": 298} outputId="77cdb39c-f071-48e2-ae1d-678de19459c7" otas1 = notas.query("movieId == 1") notas593 = notas.query("movieId == 593") notas72226 = notas.query("movieId == 72226") plt.boxplot([notas1.rating, notas593.rating, notas72226.rating], labels=["Toy Story", "Silence of the Lambs,", "Fantastic Mr. Fox"]) plt.title("Distribuição das notas de acordo com filmes") # + id="XP72O294Ns3r" colab={"base_uri": "https://localhost:8080/", "height": 284} outputId="9b87b675-d7cb-4ac5-b9cf-94fcec674c3f" notas72226.describe() # + id="3kP7yOdvNw5D" colab={"base_uri": "https://localhost:8080/", "height": 296} outputId="a78330cc-40bc-4821-f35d-6f97ea762300" sns.boxplot(x = "movieId", y = "rating", data = notas.query("movieId in (1, 593, 72226)")) # + id="1oGk0kTENzcY" colab={"base_uri": "https://localhost:8080/", "height": 75} outputId="a35d20b3-605b-4e3c-81f9-3868f72a5ad3" descr_1 = DescrStatsW(notas1.rating) descr_593 = DescrStatsW(notas593.rating) comparacao = descr_1.get_compare(descr_593) comparacao.summary() # + id="Tw6URgx3N2Rc" colab={"base_uri": "https://localhost:8080/", "height": 75} outputId="a583ccdf-e250-4712-ad7e-3e2aafb69beb" descr_72226 = DescrStatsW(notas72226.rating) descr_593 = DescrStatsW(notas593.rating) comparacao = descr_72226.get_compare(descr_593) comparacao.summary() # + id="1L9CWYPgOBdh" colab={"base_uri": "https://localhost:8080/", "height": 75} outputId="c6978b2a-6fd0-45fd-a7f0-79db9101a039" comparacao = descr_1.get_compare(descr_72226) comparacao.summary() # + id="TFDOtRpTOHWm" colab={"base_uri": "https://localhost:8080/", "height": 166} outputId="8eaa1066-fa7c-4d68-ab51-ec4a146ac2d9" notas.query("movieId in (1, 593, 72226)").groupby("movieId").count() # + id="F9lAPMd1OLx5"
Python/Data Science With Python/Statistical Tests 101.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] nbsphinx="hidden" # # Random Signals # # *This jupyter notebook is part of a [collection of notebooks](../index.ipynb) on various topics of Digital Signal Processing. # - # ## Introduction # # Random signals are signals whose values are not (or only to a limited extend) predictable. Frequently used alternative terms are # # * stochastic signals # * non-deterministic signals # # Random signals play an important role in various fields of signal processing and communications. This is due to the fact that only random signals carry information. A signal which is observed by a receiver has to be unknown to some degree in order to represent novel [information](https://en.wikipedia.org/wiki/Information). # # Random signals are often classified as useful/desired and disturbing/interfering signals. For instance # # * useful signals: data, speech, music, images, ... # * disturbing signals: thermal noise at a resistor, amplifier noise, quantization noise, ... # # Practical signals are frequently modeled as a combination of useful signals and additive noise. # # As the values of a random signal cannot be foreseen, the properties of random signals are described by the their statistical characteristics. One measure is for instance the average value of a random signal. # **Example - Random Signals** # # The following audio examples illustrate the characteristics of some deterministic and random signals. Lower the volume of your headphones or loudspeakers before playing back the examples. # # 1. Cosine signal # # <audio src="./cosine.wav" controls>Your browser does not support the audio element.</audio>[./cosine.wav](./cosine.wav) # 2. Noise # # <audio src="./noise.wav" controls>Your browser does not support the audio element.</audio>[./noise.wav](./noise.wav) # 3. Cosine signal superpositioned by noise # # <audio src="./cosine_noise.wav" controls>Your browser does not support the audio element.</audio>[./cosine_noise.wav](./cosine_noise.wav) # 4. Speech signal # # <audio src="../data/speech.wav" controls>Your browser does not support the audio element.</audio>[../data/speech.wav](../data/speech.wav) # 5. Speech signal superpositioned by noise # # <audio src="./speech_noise.wav" controls>Your browser does not support the audio element.</audio>[./speech_noise.wav](./speech_noise.wav) # **Excercise** # # * Which example can be considered as deterministic, random signal or combination of both? # # Solution: The cosine signal is the only deterministic signal. Noise and speech are random signals, as their samples can not (or only to a limited extend) be predicted from previous samples. The superposition of the cosine and noise signals is a combination of a deterministic and a random signal. # ### Processing of Random Signals # # In contrary to the assumption of deterministic signals in traditional signal processing, [statistical signal processing](https://en.wikipedia.org/wiki/Statistical_signal_processing) treats signals explicitly as random signals. Two prominent application examples involving random signals are # #### Measurement of physical quantities # # The measurement of physical quantities is often subject to additive noise and distortions. The additive noise models e.g. the sensor noise. The distortions, by e.g. the transmission properties of an amplifier, may be modeled by a system. # # ![Model for the measurement of physical quantities](measurement_channel.png) # # $\mathcal{H}$ denotes an arbitrary (not necessarily LTI) system. The aim of statistical signal processing is to estimate the physical quantity from the observed sensor data, given some knowledge on the disturbing system and the statistical properties of the noise. # #### Communication channel # # In communications engineering a message is sent over a channel distorting the signal by e.g. multipath propagation. Additive noise is present at the receiver due to background and amplifier noise. # # ![Model for the transmission of a message over a communication channel](communication_channel.png) # # The aim of statistical signal processing is to estimate the send message from the received message, given some knowledge on the disturbing system and the statistical properties of the noise. # ### Random Processes # # A random process is a [stochastic process](https://en.wikipedia.org/wiki/Stochastic_process) which generates an ensemble of random signals. A random process # # * provides a mathematical model for an ensemble of random signals and # * generates different sample functions with specific common properties. # # It is important to differentiate between an # # * *ensemble*: collection of all possible signals of a random process and an # * *sample function*: one specific random signal. # # An example for a random process is speech produced by humans. Here the ensemble is composed from the speech signals produced by all humans on earth, one particular speech signal produced by one person at a specific time is a sample function. # **Example - Sample functions of a random process** # # The following example shows sample functions of a continuous amplitude real-valued random process. All sample functions have the same properties with respect to certain statistical measures. # + # %matplotlib inline import numpy as np import matplotlib.pyplot as plt N = 5 # number of sample functions # draw N sample functions from a random process np.random.seed(0) x = np.random.normal(size=(N, 32)) # plot sample functions fig = plt.figure(figsize=(10, 12)) for n in range(N): plt.subplot(N, 1, n+1) plt.tight_layout() plt.stem(x[n,:], basefmt='k-') plt.title('Sample Function %d' %n) plt.xlabel(r'$k$') plt.ylabel(r'$x_%d[k]$' %n) plt.axis([-1, 32, -3, 3]) plt.grid() # - # **Exercise** # # * What is different, what is common between the sample functions? # # Solution: You may have observed that the amplitude values of the individual sample functions $x_n[k]$ differ for a fixed time instant $k$. However, the sample functions seem to share some common properties. For instance, positive and negative values seem to occur with approximately the same probability. # ### Properties of Random Processes and Random Signals # # It was already argued above, that it is not meaningful to describe a random signal by the amplitude values of a particular sample function. Instead, random signals are characterized by specific statistical measures. In statistical signal processing it is common to use # # * amplitude distributions and # * ensemble averages/moments # # for this purpose. These measures will be introduced in the remainder.
Lectures_Advanced-DSP/random_signals/introduction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Reading the csv file into pandas dataframe # + import types import pandas as pd def __iter__(self): return 0 # @hidden_cell # add missing __iter__ method, so pandas accepts body as file-like object if not hasattr("Admission_Predict.csv", "__iter__"): "Admission_Predict.csv".__iter__ = types.MethodType( __iter__, body ) df = pd.read_csv("Admission_Predict.csv") # - # <p> To see what our data set looks like we used <b>head()</b> to display the first five rows of the dataframe.</p> df.head() # <p> We also checked the number of rows and columns in our dataframe using the <b>shape</b> method.</p> df.shape # We decided to drop the Serial No. column using <b>.drop()</b> as it serves no real purpose for data analysis. df.drop(['Serial No.'], axis=1, inplace=True) df.head() df.shape # We used the describe method to uderstand our dataframe better. df.describe() # It's always important to know the correlation between each element in a data set in order to uderstand the degree of association between two variables. Thus, we also checked the correlation between the elements in our data set using corr(). df.corr() # From the above matrix we can see that there is a strong positive correlation between GRE score and TOEFL score, GRE score and CGPA, GRE score and Chance of Admit, TOEFL score and CGPA, TOEFL score and Chance of Admit and lastly; CGPA and Chance of Admit. # ### Identify and handle missing value # <h4>Evaluating for Missing Data</h4> # # We used Python's built-in functions to identify these missing values. There are two methods to detect missing data: # <ol> # <li><b>.isnull()</b></li> # <li><b>.notnull()</b></li> # </ol> # The output is a boolean value indicating whether the value that is passed into the argument is in fact missing data. missing_data=df.isnull() missing_data.head(5) # <p>"True" stands for missing value, while "False" stands for not missing value.</p> # <h4>Count missing values in each column</h4> # <p> # Using a for loop in Python, we figured out the number of missing values in each column. As mentioned above, "True" represents a missing value, "False" means the value is present in the dataset. In the body of the for loop the method ".value_counts()" counts the number of "True" values. # </p> for column in missing_data.columns.values.tolist(): print(column) print (missing_data[column].value_counts()) print("") # <p> Based on the summary above, none of the columns containing missing data</p> df.columns = df.columns.to_series().apply(lambda x: x.strip()) df[["SOP", "LOR"]] = df[["SOP", "LOR"]].astype("int") df.dtypes # #### Detecting outliers using boxplots import seaborn as sns sns.boxplot(df['GRE Score']) sns.boxplot(df['TOEFL Score']) sns.boxplot(df['University Rating']) sns.boxplot(df['SOP']) sns.boxplot(df['LOR']) sns.boxplot(df['CGPA']) sns.boxplot(df['Research']) sns.boxplot(df['Chance of Admit']) # As seen from the box plots there seem to be outliers in LOR and Chance of Admit Values. We used Interquartile range rule for outliers to remove outliers. Q1 = df.quantile(0.25) Q3 = df.quantile(0.75) IQR = Q3 - Q1 print(IQR) print(df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR)) df1 = df[~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR))).any(axis=1)] df1.shape df1.head() sns.boxplot(df1['LOR']) sns.boxplot(df1['Chance of Admit']) df=df1.copy() df.shape # Since we are aiming to predict the chance of admit. We choose our three modeling algorithms to be: Linear Regression, Polynomial Regerssion and Ridge Regression. # Hypothesis 1: How important are GRE, TOEFL scores and CGPA important in getting an admit? import matplotlib.pyplot as plt import pylab as pl import numpy as np # %matplotlib inline # Selecting required features for analysis sdf= df[["GRE Score","TOEFL Score","CGPA","Chance of Admit"]] sdf.head() # Visualizing each attribute wrt Chance of admit using histogram & scatterplot. viz = sdf[["GRE Score","TOEFL Score","CGPA","Chance of Admit"]] viz.hist() plt.show() plt.scatter(sdf["GRE Score"], sdf["Chance of Admit"], color='green') plt.xlabel("GRE Score") plt.ylabel("Chance of Admit") plt.show() plt.scatter(sdf["TOEFL Score"], sdf["Chance of Admit"], color='green') plt.xlabel("TOEFL Score") plt.ylabel("Chance of Admit") plt.show() plt.scatter(sdf["CGPA"], sdf["Chance of Admit"], color='green') plt.xlabel("CGPA") plt.ylabel("Chance of Admit") plt.show() # Creating Train & Test dataset: # We split our dataset into train and test sets, 80% of the entire data for training, and the 20% for testing. We created a mask to select random rows using np.random.rand() function msk = np.random.rand(len(df)) < 0.8 train = sdf[msk] test = sdf[~msk] # ### Model building # 1) Multiple regression model from sklearn import linear_model regr1 = linear_model.LinearRegression() train_x1 = np.asanyarray(train[["GRE Score","TOEFL Score","CGPA"]]) train_y1 = np.asanyarray(train[["Chance of Admit"]]) regr1.fit (train_x1, train_y1) # The coefficients print ('Coefficients: ', regr1.coef_) print ('Intercept: ',regr1.intercept_) # Evaluation # + from sklearn.metrics import r2_score test_x1 = np.asanyarray(test[["GRE Score","TOEFL Score","CGPA"]]) test_y1 = np.asanyarray(test[["Chance of Admit"]]) test_y_pred1 = regr1.predict(test_x1) print("Mean absolute error: %.2f" % np.mean(np.absolute(test_y_pred1 - test_y1))) print("Residual sum of squares (MSE): %.2f" % np.mean((test_y_pred1 - test_y1) ** 2)) print("R2-score: %.2f" % r2_score(test_y_pred1 , test_y1) ) # - # 2) Polynomial Regression # + from sklearn.preprocessing import PolynomialFeatures from sklearn import linear_model train_x = np.asanyarray(train[["GRE Score","TOEFL Score","CGPA"]]) train_y = np.asanyarray(train[["Chance of Admit"]]) test_x = np.asanyarray(test[["GRE Score","TOEFL Score","CGPA"]]) test_y = np.asanyarray(test[["Chance of Admit"]]) poly = PolynomialFeatures(degree=4) train_x_poly = poly.fit_transform(train_x) train_x_poly # - # <b>fit_transform()</b> takes the x values, and output a list of the data raised from power of 0 to 4 (since in our case we set the degree of our polynomial to 4). # We used <b>LinearRegression()</b> function to treat polynomial regression as linear regression so that we could analyze this model easily. clf = linear_model.LinearRegression() train_y_ = clf.fit(train_x_poly, train_y) # The coefficients print ('Coefficients: ', clf.coef_) print ('Intercept: ',clf.intercept_) # Evaluation # + from sklearn.metrics import r2_score test_x_poly = poly.fit_transform(test_x) test_y_ = clf.predict(test_x_poly) print("Mean absolute error: %.2f" % np.mean(np.absolute(test_y_ - test_y))) print("Residual sum of squares (MSE): %.2f" % np.mean((test_y_ - test_y) ** 2)) print("R2-score: %.2f" % r2_score(test_y_ , test_y) ) # - # 3) Regression tree # + #Snippets of this code has been taken from geeks for geeks from sklearn.tree import DecisionTreeRegressor regressor = DecisionTreeRegressor(random_state = 0) X = np.asanyarray(train[["GRE Score","TOEFL Score","CGPA"]]) y = np.asanyarray(train[["Chance of Admit"]]) regressor.fit(X, y) # - # Evaluation # + from sklearn.metrics import r2_score test_X = np.asanyarray(test[["GRE Score","TOEFL Score","CGPA"]]) test_y = np.asanyarray(test[["Chance of Admit"]]) test_y_pred = regressor.predict(test_X) print("Mean absolute error: %.2f" % np.mean(np.absolute(test_y_pred - test_y))) print("Residual sum of squares (MSE): %.2f" % np.mean((test_y_pred - test_y) ** 2)) print("R2-score: %.2f" % r2_score(test_y_pred , test_y) ) # - # Finding the best model d = {'Model': ['Multiple Linear Regression','Polynomial Regression','Regression Tree'], 'MAE': [0.05,0.05,0.16], 'MSE': [0.0,0.0,0.04],'R2-Score':[0.77,0.76,0.67]} df_bestmodel = pd.DataFrame(data=d) df_bestmodel df_bestmodel.set_index('Model') # As we can see from the above dataframe all three models are good models to predict the chance of admit. But the best model amongst the 3 models is Multiple linear regression model since it has lowest mean absolute error as well as lowest mean squared error and the highest R2-Score. Polynomial regression has no mean squared error but it has higher mean absolute error and lower R2-Score when compared to multiple linear regression model. In case of regression tree it has higher mean absolute error and higher mean squared error than multiple linear regression and polynomial regression. It also has lowest R2-Score, hence it is not the best model when compared with other two. Thus, after comparing all three models we concluded that multiple linear regression is the best model. Also, we found out that GRE,TOEFL Scores and CGPA are really important variables in obtaining an admit from the university as higher GRE/TOEFL scores and higher CGPA would lead to greater chances of getting admit from a university. # Hypothesis 2: Does higher rating of SOP and LOR leads to greater chance of admit? # Selecting required features for analysis sdf= df[["SOP","LOR","Chance of Admit"]] sdf.head() # Visualizing each attribute wrt Chance of admit using histogram & scatterplot. viz = sdf[["SOP","LOR","Chance of Admit"]] viz.hist() plt.show() plt.scatter(sdf["SOP"], sdf["Chance of Admit"], color='green') plt.xlabel("SOP") plt.ylabel("Chance of Admit") plt.show() plt.scatter(sdf["LOR"], sdf["Chance of Admit"], color='green') plt.xlabel("LOR") plt.ylabel("Chance of Admit") plt.show() # Creating Train & Test dataset: We split our dataset into train and test sets, 80% of the entire data for training, and the 20% for testing. We created a mask to select random rows using np.random.rand() function msk = np.random.rand(len(df)) < 0.8 train = sdf[msk] test = sdf[~msk] # ### Model building # 1) Multiple linear Regression from sklearn import linear_model regr1 = linear_model.LinearRegression() train_x1 = np.asanyarray(train[["SOP","LOR"]]) train_y1 = np.asanyarray(train[["Chance of Admit"]]) regr1.fit (train_x1, train_y1) # The coefficients print ('Coefficients: ', regr1.coef_) print ('Intercept: ',regr1.intercept_) # Evaluation # + from sklearn.metrics import r2_score test_x1 = np.asanyarray(test[["SOP","LOR"]]) test_y1 = np.asanyarray(test[["Chance of Admit"]]) test_y_pred1 = regr1.predict(test_x1) print("Mean absolute error: %.2f" % np.mean(np.absolute(test_y_pred1 - test_y1))) print("Residual sum of squares (MSE): %.2f" % np.mean((test_y_pred1 - test_y1) ** 2)) print("R2-score: %.2f" % r2_score(test_y_pred1 , test_y1) ) # - # 2.1) Polynomial Regression (Degree=4) # + from sklearn.preprocessing import PolynomialFeatures from sklearn import linear_model train_x = np.asanyarray(train[["SOP","LOR"]]) train_y = np.asanyarray(train[["Chance of Admit"]]) test_x = np.asanyarray(test[["SOP","LOR"]]) test_y = np.asanyarray(test[["Chance of Admit"]]) poly = PolynomialFeatures(degree=4) train_x_poly = poly.fit_transform(train_x) train_x_poly # - # fit_transform() takes the x values, and output a list of the data raised from power of 0 to 4 (since in our case we set the degree of our polynomial to 4). # # We used LinearRegression() function to treat polynomial regression as linear regression so that we could analyze this model easily. clf = linear_model.LinearRegression() train_y_ = clf.fit(train_x_poly, train_y) # The coefficients print ('Coefficients: ', clf.coef_) print ('Intercept: ',clf.intercept_) # Evaluation # + from sklearn.metrics import r2_score test_x_poly = poly.fit_transform(test_x) test_y_ = clf.predict(test_x_poly) print("Mean absolute error: %.2f" % np.mean(np.absolute(test_y_ - test_y))) print("Residual sum of squares (MSE): %.2f" % np.mean((test_y_ - test_y) ** 2)) print("R2-score: %.2f" % r2_score(test_y_ , test_y) ) # - # 2.2) Polynomial Regression (Degree=12) # + from sklearn.preprocessing import PolynomialFeatures from sklearn import linear_model train_x = np.asanyarray(train[["SOP","LOR"]]) train_y = np.asanyarray(train[["Chance of Admit"]]) test_x = np.asanyarray(test[["SOP","LOR"]]) test_y = np.asanyarray(test[["Chance of Admit"]]) poly = PolynomialFeatures(degree=12) train_x_poly = poly.fit_transform(train_x) train_x_poly # - clf = linear_model.LinearRegression() train_y_ = clf.fit(train_x_poly, train_y) # The coefficients print ('Coefficients: ', clf.coef_) print ('Intercept: ',clf.intercept_) # + from sklearn.metrics import r2_score test_x_poly = poly.fit_transform(test_x) test_y_ = clf.predict(test_x_poly) print("Mean absolute error: %.2f" % np.mean(np.absolute(test_y_ - test_y))) print("Residual sum of squares (MSE): %.2f" % np.mean((test_y_ - test_y) ** 2)) print("R2-score: %.2f" % r2_score(test_y_ , test_y) ) # - # We can see by comparing our polynomial regression models that building a model with degree 12 has lower R2-Score (-0.01) but the MAE and MSE is really high which means that the model captures noise and it is a case of overfitting. Hence, we choose model with degree 4 for our final comparison. # 3) Regression Tree # + #Snippets of this code has been taken from geeks for geeks from sklearn.tree import DecisionTreeRegressor regressor = DecisionTreeRegressor(random_state = 0) X = np.asanyarray(train[["SOP","LOR"]]) y = np.asanyarray(train[["Chance of Admit"]]) regressor.fit(X, y) # - # Evaluation # + from sklearn.metrics import r2_score test_X = np.asanyarray(test[["SOP","LOR"]]) test_y = np.asanyarray(test[["Chance of Admit"]]) test_y_pred = regressor.predict(test_X) print("Mean absolute error: %.2f" % np.mean(np.absolute(test_y_pred - test_y))) print("Residual sum of squares (MSE): %.2f" % np.mean((test_y_pred - test_y) ** 2)) print("R2-score: %.2f" % r2_score(test_y_pred , test_y) ) # - # Finding the best model d = {'Model': ['Multiple Linear Regression','Polynomial Regression','Regression Tree'], 'MAE': [0.08,0.08,0.15], 'MSE': [0.01,0.01,0.04],'R2-Score':[-0.12,-0.06,-0.07]} df_bestmodel = pd.DataFrame(data=d) df_bestmodel df_bestmodel.set_index('Model') # As we saw from our scatter plot matrices there seem to be a little or no correlation between the SOP/LOR and chance of admit. Hence, looking at our 3 models we got a very low R2-Score. The relatively high R2-score was observed for Polynomial Regression (-0.06). Polynomial regression also has lower mean absolute error and mean squared error when compared to Regression tree. Hence, amongst our three models Polynomial Regression would be the best one. But over-all it can be concluded that higher rating of SOP and LOR does not necessarily lead to higher chances of admit. Maybe SOP and LOR coupled with other variables might lead to more accurate predictions but just SOP and LOR are not ideal variables to predict the chance of obtaining an admit from the university. # Hypothesis 3: What are the chances of getting an admit from the university considering all variables (i.e. GRE Score,TOEFL Score,University Rating, SOP, LOR, CGPA, Research)? # Selecting required features for analysis sdf= df[["GRE Score","TOEFL Score","University Rating","SOP","LOR","CGPA","Research","Chance of Admit"]] sdf.head() # Creating Train & Test dataset: We split our dataset into train and test sets, 80% of the entire data for training, and the 20% for testing. We created a mask to select random rows using np.random.rand() function msk = np.random.rand(len(df)) < 0.8 train = sdf[msk] test = sdf[~msk] # 1) Multiple linear Regression from sklearn import linear_model regr1 = linear_model.LinearRegression() train_x1 = np.asanyarray(train[["GRE Score","TOEFL Score","University Rating","SOP","LOR","CGPA","Research"]]) train_y1 = np.asanyarray(train[["Chance of Admit"]]) regr1.fit (train_x1, train_y1) # The coefficients print ('Coefficients: ', regr1.coef_) print ('Intercept: ',regr1.intercept_) # Evaluation # + from sklearn.metrics import r2_score test_x1 = np.asanyarray(test[["GRE Score","TOEFL Score","University Rating","SOP","LOR","CGPA","Research"]]) test_y1 = np.asanyarray(test[["Chance of Admit"]]) test_y_pred1 = regr1.predict(test_x1) print("Mean absolute error: %.2f" % np.mean(np.absolute(test_y_pred1 - test_y1))) print("Residual sum of squares (MSE): %.2f" % np.mean((test_y_pred1 - test_y1) ** 2)) print("R2-score: %.2f" % r2_score(test_y_pred1 , test_y1) ) # - # 2) Polynomial Reggression # + from sklearn.preprocessing import PolynomialFeatures from sklearn import linear_model train_x = np.asanyarray(train[["GRE Score","TOEFL Score","University Rating","SOP","LOR","CGPA","Research"]]) train_y = np.asanyarray(train[["Chance of Admit"]]) test_x = np.asanyarray(test[["GRE Score","TOEFL Score","University Rating","SOP","LOR","CGPA","Research"]]) test_y = np.asanyarray(test[["Chance of Admit"]]) poly = PolynomialFeatures(degree=3) train_x_poly = poly.fit_transform(train_x) train_x_poly # - clf = linear_model.LinearRegression() train_y_ = clf.fit(train_x_poly, train_y) # The coefficients print ('Coefficients: ', clf.coef_) print ('Intercept: ',clf.intercept_) # Evaluation # + from sklearn.metrics import r2_score test_x_poly = poly.fit_transform(test_x) test_y_ = clf.predict(test_x_poly) print("Mean absolute error: %.2f" % np.mean(np.absolute(test_y_ - test_y))) print("Residual sum of squares (MSE): %.2f" % np.mean((test_y_ - test_y) ** 2)) print("R2-score: %.2f" % r2_score(test_y_ , test_y) ) # - # 3) Regression Trees # + #Snippets of this code has been taken from geeks for geeks from sklearn.tree import DecisionTreeRegressor regressor = DecisionTreeRegressor(random_state = 0) X = np.asanyarray(train[["GRE Score","TOEFL Score","University Rating","SOP","LOR","CGPA","Research"]]) y = np.asanyarray(train[["Chance of Admit"]]) regressor.fit(X, y) # - # Evaluation # + from sklearn.metrics import r2_score test_X = np.asanyarray(test[["GRE Score","TOEFL Score","University Rating","SOP","LOR","CGPA","Research"]]) test_y = np.asanyarray(test[["Chance of Admit"]]) test_y_pred = regressor.predict(test_X) print("Mean absolute error: %.2f" % np.mean(np.absolute(test_y_pred - test_y))) print("Residual sum of squares (MSE): %.2f" % np.mean((test_y_pred - test_y) ** 2)) print("R2-score: %.2f" % r2_score(test_y_pred , test_y) ) # - # Finding the best model d = {'Model': ['Multiple Linear Regression','Polynomial Regression','Regression Tree'], 'MAE': [0.05,0.06,0.15], 'MSE': [0.00,0.01,0.03],'R2-Score':[0.66,0.31,0.50]} df_bestmodel = pd.DataFrame(data=d) df_bestmodel df_bestmodel.set_index('Model') # As we can see from the above dataframe all three models are decent models to predict the chance of admit. But the best model amongst the 3 models is Multiple linear regression model since it has lowest mean absolute error as well as lowest mean squared error and the highest R2-Score. Polynomial regression has lower mean squared error and lower mean absolute error but lower R2-Score when compared to regression tree model. In case of regression tree it has higher mean absolute error and higher mean squared error than multiple linear regression. It also has lower R2-Score, hence it is not the best model when compared with linear regression model. Thus, after comparing all three models we concluded that multiple linear regression is the best model. Also, we found out that considering all variables for prediction does not lead to higher accuracy results as we got a model with better fit when we considered only GRE Score, TOEFL Score and CGPA.
_Graduate Student Admission.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from neural_net import train, load, predict train() # + import numpy as np net = load() test = np.array([[0, 2, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 3], [0, 0, 0, 4, 0]]) predict(net, test) # - from utils import generate_array from graphics import display_grid from grid import Grid import numpy as np train_xs = np.load("grids_data.npy") train_ys = np.load("actions_data.npy") print(train_xs.shape, train_ys.shape) action_dict = {(0, 0):0, (-1, 0):1, (0, 1):2, (1, 0):3, (0, -1):4} C = int(np.max(train_xs))+1 print(C) import torch import torch.nn as nn import torch.nn.functional as F # + class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(C, 10, 5, padding=2) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(10, 30, 3, padding=1) self.fc1 = nn.Linear(30 * 1 * 1, 5) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 30 * 1 * 1) x = self.fc1(x) return x net = Net() # + import torch.optim as optim criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(net.parameters(),lr=0.01)#optim.SGD(net.parameters(), lr=0.1, momentum=0.9) # input shape b x c x h x w to net # so need to unsqueeze to make channel dimension of 1, could also make each agent it's own channel instead B, H, W = train_xs.shape train_xs = torch.from_numpy(train_xs).unsqueeze(1).to(torch.long) onehot_train_xs = torch.zeros([B, C, H, W], dtype = torch.float32) print(train_xs.shape, onehot_train_xs.shape) onehot_train_xs.scatter_(1, train_xs, torch.ones(onehot_train_xs.shape)) #print(onehot_train_xs[0]) train_ys = torch.from_numpy(train_ys).to(torch.long) # - for epoch in range(1000): # loop over the dataset multiple times running_loss = 0.0 inputs, labels = onehot_train_xs, train_ys # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # print statistics running_loss += loss.item() if epoch%30==0: print('Epoch:{}, loss: {:.3f}'.format(epoch + 1, running_loss)) running_loss = 0.0 # + def predict(input_state, C=5): ''' input_state: HxW(5x5) numpy array returns 1x5(num actions) numpy array of pre-softmax probabilities for taking an action ''' H, W = input_state.shape onehot_test_xs = torch.zeros([1, C, H, W]) test_x = torch.from_numpy(input_state).unsqueeze(0).unsqueeze(1).to(torch.long) onehot_test_xs.scatter_(1, test_x, torch.ones(onehot_test_xs.shape)) outputs = net(onehot_test_xs) return outputs.detach().numpy() test = np.array([[0, 2, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 3], [0, 0, 0, 4, 0]]) predict(test) # -
investigation_notebooks/train_nn_tuomas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- import gpflow import numpy as np import matplotlib.pyplot as plt import seaborn as sns #sns.set_context('talk', font_scale= 1.5) sns.set_style("white") sns.set_context("paper") # %matplotlib inline import sys sys.path.append('../') from GPHetero import hetero_kernels, hetero_likelihoods, hetero_gpmc class Ex1Func(object): def __init__(self, sigma=lambda x: 0.5): self.sigma = sigma def __call__(self, x): if x[0] < 0.5: return x[0]**2. + self.sigma(x[0]) * np.random.randn() if x[0] == 0.5: return 1. if x[0] > 0.5: return 2. - (x[0]-0.5) ** 2. from pyDOE import * dim = 1 n = 6 noise=0 sigma = eval('lambda x: ' + str(noise)) objective = Ex1Func(sigma=sigma) X = lhs(dim, n , criterion='center') Y = np.array([objective(x) for x in X])[:, None] Xnorm = (X - 0.5) / 0.5 fig, ax = plt.subplots() ax.plot(X, Y, 'x', markeredgewidth=2, color='black') ax.set_xlabel('$x$', fontsize=16) ax.set_ylabel('$f(x)$', fontsize=16) plt.xticks(fontsize=16) plt.yticks(fontsize=16) #build the model k = gpflow.kernels.RBF(input_dim=1) k.lengthscales.prior = gpflow.priors.Gamma(1, 1) noisekern = gpflow.kernels.RBF(input_dim=1) nonstat = hetero_kernels.NonStationaryLengthscaleRBF() mean_func = gpflow.mean_functions.Constant(0) m = hetero_gpmc.GPMCAdaptiveLengthscaleMultDim(Xnorm, Y, k, nonstat, mean_func) for i in xrange(dim): print i m.kerns["ell" + str(i)].lengthscales.prior = gpflow.priors.Gamma(1., 1.) m.kerns["ell" + str(i)].variance.prior = gpflow.priors.Gamma(1., 1.) m.mean_funcs["ell" + str(i)].c = 0. m.mean_funcs["ell" + str(i)].c.fixed = True m.nonstat.signal_variance.prior = gpflow.priors.Gamma(1., 1.) m.likelihood.variance = 1e-6 m.likelihood.variance.fixed = True m.optimize(maxiter=50000) # start near MAP mcmc_samples = 1000 samples = m.sample(mcmc_samples, verbose=True, epsilon=0.0005, thin = 2, burn = 1000, Lmax = 20) xnew = np.linspace(0,1,50)[:,None] xx = (xnew - 0.5)/0.5 sample_df = m.get_samples_df(samples) sample_df sample_df = m.get_samples_df(samples) y_pos = [] num_post_samp = 1000 xnew = np.linspace(0, 1, 100)[:,None] xx = (xnew - 0.5) / 0.5 mean_f_mat = np.zeros(shape=(sample_df.shape[0], xx.shape[0])) mean_l_mat = np.zeros(shape=(sample_df.shape[0], xx.shape[0])) var_f_mat = np.zeros(shape=(sample_df.shape[0], xx.shape[0])) for i, s in sample_df.iterrows(): m.set_parameter_dict(s) mean_f, var_f = m.predict(xx) mean_f_mat[i, :] = mean_f[:, 0] var_f_mat[i, :] = np.diag(var_f) mean_l, var_l = m.predict_l(xx) mean_l_mat[i, :] = np.exp(mean_l[0][:, 0]) y_pos.append((m.posterior_samples(xx, num_post_samp))) std_f_mat = np.sqrt(var_f_mat) Lowerf = mean_f_mat - 2 * std_f_mat Upperf = mean_f_mat + 2 * std_f_mat y_pos = np.vstack(y_pos) y_m = np.percentile(y_pos, 50, axis=0) y_l = np.percentile(y_pos, 2.5, axis=0) y_u = np.percentile(y_pos, 97.5, axis=0) Y_true = np.array([objective(x) for x in xnew])[:, None] import seaborn as sns sns.set_style("white") sns.set_context("paper") plt.fill_between(xnew[:, 0], y_l, y_u, color=sns.color_palette()[1], alpha=0.25, zorder=3) plt.plot(xnew, y_m, color = sns.color_palette()[1]) plt.scatter(X, Y, marker='X', s=80, color='black') plt.plot(xnew, Y_true, '.', color = sns.color_palette()[0]) plt.xticks(fontsize=16) plt.yticks(fontsize=16) plt.xlabel('$x$', fontsize=16) plt.ylabel('$f(x)$', fontsize=16) def visualize_l(Xgrid, M): """ Visualize 1D utility funciton values Xgrid : grid states Mgrid : mean of GP at those finite grid points Vargrid : variance of GP at those finite grid points """ line, = plt.plot(Xgrid, M, lw = 2, color = sns.color_palette()[1], alpha = 0.25) plt.xlabel('$x$', fontsize=16) plt.ylabel('$ell(x)$', fontsize=16) plt.xticks(fontsize=16) plt.yticks(fontsize=16) return mini = 0 maxi = 5 for i in xrange(mini,maxi): visualize_l(xnew, mean_l_mat[i,:]) def visualize(Xgrid, Mgrid, Vargrid): """ Visualize 1D utility funciton values Xgrid : grid states Mgrid : mean of GP at those finite grid points Vargrid : variance of GP at those finite grid points """ signal_std = np.sqrt(Vargrid) Stdgrid = np.sqrt(Vargrid) lower = Mgrid - 2*Stdgrid upper = Mgrid + 2*Stdgrid line, = plt.plot(Xgrid, Mgrid, lw = 2, color=sns.color_palette()[1], alpha = 0.5) plt.fill_between(Xgrid[:,0], lower, upper, color = sns.color_palette()[1], alpha = 0.05) plt.scatter(X, Y, marker='X', s=80, color='black') plt.plot(xnew, Y_true, '.', color = sns.color_palette()[0]) plt.xticks(fontsize=16) plt.yticks(fontsize=16) plt.xlabel('$x$', fontsize=16) plt.ylabel('$f(x)$', fontsize=16) return # + mini = 105 maxi = 109 for i in xrange(mini,maxi): visualize(xnew, mean_f_mat[i,:], var_f_mat[i,:]) # - mean_l_mat = np.zeros(shape = (samples.shape[0], xx.shape[0])) var_l_mat = np.zeros(shape = (samples.shape[0], xx.shape[0])) # + for i, s in sample_df.iterrows(): m.set_parameter_dict(s) mean1, v1 = m.predict_l(xx) var1 = v1[0][:,:] mean_l_mat[i,:] = mean1[0][:,0] var_l_mat[i,:] = np.diag(var1) std_l_mat = np.sqrt(var_l_mat) # - LowerlogL = mean_l_mat - 2*std_l_mat UpperlogL = mean_l_mat + 2*std_l_mat MeanL = np.exp(mean_l_mat) LowerL = np.exp(LowerlogL) UpperL = np.exp(UpperlogL) def visualize_l(Xgrid, M, L, U): """ Visualize 1D utility funciton values Xgrid : grid states Mgrid : mean of GP at those finite grid points Vargrid : variance of GP at those finite grid points """ line, = plt.plot(Xgrid, M, lw = 2, color = sns.color_palette()[1], alpha = 0.5) plt.fill_between(Xgrid[:,0], L, U, color = sns.color_palette()[1], alpha = 0.05) plt.xlabel('$x$', fontsize=16) plt.ylabel('$ell(x)$', fontsize=16) plt.xticks(fontsize=16) plt.yticks(fontsize=16) return mini = 105 maxi = 400 for i in xrange(mini,maxi): visualize_l(xnew, MeanL[i,:], LowerL[i,:], UpperL[i,:])
notebooks/presentation_fully_bayesian_nonstat.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import matplotlib.pyplot as plt # %matplotlib inline import numpy as np x = np.linspace(0,5,11) y = x ** 2 x y # Functional plt.plot(x,y,'r-') plt.show() # Functional plt.plot(x,y,'r-') plt.xlabel('X Label') plt.ylabel('Y Label') plt.title('Title') # + plt.subplot(1,2,1) plt.plot(x,y,'r') plt.subplot(1,2,2) plt.plot(y,x,'b') # + # OO fig = plt.figure() axes = fig.add_axes([0.1,0.1,0.8,0.8]) axes.plot(x,y) axes.set_xlabel('X Label') axes.set_ylabel('Y Label') axes.set_title('Set Title') # + fig = plt.figure() axes1 = fig.add_axes([0.1,0.1,0.8,0.8]) axes2 = fig.add_axes([0.2,0.15,0.4,0.3]) axes1.plot(x,y) axes1.set_title('LARGER PLOT') axes2.plot(x,y) axes2.set_title('SMALLER PLOT') # - fig = plt.figure() axes1 = fig.add_axes([0.1,0.1,0.8,0.8]) axes1.plot(x,y)
Aula 42 - Matplotlib - Parte 1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Regularization # # Welcome to the second assignment of this week. Deep Learning models have so much flexibility and capacity that **overfitting can be a serious problem**, if the training dataset is not big enough. Sure it does well on the training set, but the learned network **doesn't generalize to new examples** that it has never seen! # # **You will learn to:** Use regularization in your deep learning models. # # Let's get started! # ## Table of Contents # - [1 - Packages](#1) # - [2 - Problem Statement](#2) # - [3 - Loading the Dataset](#3) # - [4 - Non-Regularized Model](#4) # - [5 - L2 Regularization](#5) # - [Exercise 1 - compute_cost_with_regularization](#ex-1) # - [Exercise 2 - backward_propagation_with_regularization](#ex-2) # - [6 - Dropout](#6) # - [6.1 - Forward Propagation with Dropout](#6-1) # - [Exercise 3 - forward_propagation_with_dropout](#ex-3) # - [6.2 - Backward Propagation with Dropout](#6-2) # - [Exercise 4 - backward_propagation_with_dropout](#ex-4) # - [7 - Conclusions](#7) # <a name='1'></a> # ## 1 - Packages # + # import packages import numpy as np import matplotlib.pyplot as plt import sklearn import sklearn.datasets import scipy.io from reg_utils import sigmoid, relu, plot_decision_boundary, initialize_parameters, load_2D_dataset, predict_dec from reg_utils import compute_cost, predict, forward_propagation, backward_propagation, update_parameters from testCases import * from public_tests import * # %matplotlib inline plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # %load_ext autoreload # %autoreload 2 # - # <a name='2'></a> # ## 2 - Problem Statement # You have just been hired as an AI expert by the French Football Corporation. They would like you to recommend positions where France's goal keeper should kick the ball so that the French team's players can then hit it with their head. # # <img src="images/field_kiank.png" style="width:600px;height:350px;"> # # <caption><center><font color='purple'><b>Figure 1</b>: Football field. The goal keeper kicks the ball in the air, the players of each team are fighting to hit the ball with their head </font></center></caption> # # # They give you the following 2D dataset from France's past 10 games. # <a name='3'></a> # ## 3 - Loading the Dataset train_X, train_Y, test_X, test_Y = load_2D_dataset() # Each dot corresponds to a position on the football field where a football player has hit the ball with his/her head after the French goal keeper has shot the ball from the left side of the football field. # - If the dot is blue, it means the French player managed to hit the ball with his/her head # - If the dot is red, it means the other team's player hit the ball with their head # # **Your goal**: Use a deep learning model to find the positions on the field where the goalkeeper should kick the ball. # **Analysis of the dataset**: This dataset is a little noisy, but it looks like a diagonal line separating the upper left half (blue) from the lower right half (red) would work well. # # You will first try a non-regularized model. Then you'll learn how to regularize it and decide which model you will choose to solve the French Football Corporation's problem. # <a name='4'></a> # ## 4 - Non-Regularized Model # # You will use the following neural network (already implemented for you below). This model can be used: # - in *regularization mode* -- by setting the `lambd` input to a non-zero value. We use "`lambd`" instead of "`lambda`" because "`lambda`" is a reserved keyword in Python. # - in *dropout mode* -- by setting the `keep_prob` to a value less than one # # You will first try the model without any regularization. Then, you will implement: # - *L2 regularization* -- functions: "`compute_cost_with_regularization()`" and "`backward_propagation_with_regularization()`" # - *Dropout* -- functions: "`forward_propagation_with_dropout()`" and "`backward_propagation_with_dropout()`" # # In each part, you will run this model with the correct inputs so that it calls the functions you've implemented. Take a look at the code below to familiarize yourself with the model. def model(X, Y, learning_rate = 0.3, num_iterations = 30000, print_cost = True, lambd = 0, keep_prob = 1): """ Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID. Arguments: X -- input data, of shape (input size, number of examples) Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (output size, number of examples) learning_rate -- learning rate of the optimization num_iterations -- number of iterations of the optimization loop print_cost -- If True, print the cost every 10000 iterations lambd -- regularization hyperparameter, scalar keep_prob - probability of keeping a neuron active during drop-out, scalar. Returns: parameters -- parameters learned by the model. They can then be used to predict. """ grads = {} costs = [] # to keep track of the cost m = X.shape[1] # number of examples layers_dims = [X.shape[0], 20, 3, 1] # Initialize parameters dictionary. parameters = initialize_parameters(layers_dims) # Loop (gradient descent) for i in range(0, num_iterations): # Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID. if keep_prob == 1: a3, cache = forward_propagation(X, parameters) elif keep_prob < 1: a3, cache = forward_propagation_with_dropout(X, parameters, keep_prob) # Cost function if lambd == 0: cost = compute_cost(a3, Y) else: cost = compute_cost_with_regularization(a3, Y, parameters, lambd) # Backward propagation. assert (lambd == 0 or keep_prob == 1) # it is possible to use both L2 regularization and dropout, # but this assignment will only explore one at a time if lambd == 0 and keep_prob == 1: grads = backward_propagation(X, Y, cache) elif lambd != 0: grads = backward_propagation_with_regularization(X, Y, cache, lambd) elif keep_prob < 1: grads = backward_propagation_with_dropout(X, Y, cache, keep_prob) # Update parameters. parameters = update_parameters(parameters, grads, learning_rate) # Print the loss every 10000 iterations if print_cost and i % 10000 == 0: print("Cost after iteration {}: {}".format(i, cost)) if print_cost and i % 1000 == 0: costs.append(cost) # plot the cost plt.plot(costs) plt.ylabel('cost') plt.xlabel('iterations (x1,000)') plt.title("Learning rate =" + str(learning_rate)) plt.show() return parameters # Let's train the model without any regularization, and observe the accuracy on the train/test sets. parameters = model(train_X, train_Y) print ("On the training set:") predictions_train = predict(train_X, train_Y, parameters) print ("On the test set:") predictions_test = predict(test_X, test_Y, parameters) # The train accuracy is 94.8% while the test accuracy is 91.5%. This is the **baseline model** (you will observe the impact of regularization on this model). Run the following code to plot the decision boundary of your model. plt.title("Model without regularization") axes = plt.gca() axes.set_xlim([-0.75,0.40]) axes.set_ylim([-0.75,0.65]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) # The non-regularized model is obviously overfitting the training set. It is fitting the noisy points! Lets now look at two techniques to reduce overfitting. # <a name='5'></a> # ## 5 - L2 Regularization # # The standard way to avoid overfitting is called **L2 regularization**. It consists of appropriately modifying your cost function, from: # $$J = -\frac{1}{m} \sum\limits_{i = 1}^{m} \large{(}\small y^{(i)}\log\left(a^{[L](i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right) \large{)} \tag{1}$$ # To: # $$J_{regularized} = \small \underbrace{-\frac{1}{m} \sum\limits_{i = 1}^{m} \large{(}\small y^{(i)}\log\left(a^{[L](i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right) \large{)} }_\text{cross-entropy cost} + \underbrace{\frac{1}{m} \frac{\lambda}{2} \sum\limits_l\sum\limits_k\sum\limits_j W_{k,j}^{[l]2} }_\text{L2 regularization cost} \tag{2}$$ # # Let's modify your cost and observe the consequences. # # <a name='ex-1'></a> # ### Exercise 1 - compute_cost_with_regularization # Implement `compute_cost_with_regularization()` which computes the cost given by formula (2). To calculate $\sum\limits_k\sum\limits_j W_{k,j}^{[l]2}$ , use : # ```python # np.sum(np.square(Wl)) # ``` # Note that you have to do this for $W^{[1]}$, $W^{[2]}$ and $W^{[3]}$, then sum the three terms and multiply by $ \frac{1}{m} \frac{\lambda}{2} $. # + deletable=false nbgrader={"cell_type": "code", "checksum": "88e54417c158ef5260e3107ab846463e", "grade": false, "grade_id": "cell-02a896d283f479aa", "locked": false, "schema_version": 3, "solution": true, "task": false} # GRADED FUNCTION: compute_cost_with_regularization def compute_cost_with_regularization(A3, Y, parameters, lambd): """ Implement the cost function with L2 regularization. See formula (2) above. Arguments: A3 -- post-activation, output of forward propagation, of shape (output size, number of examples) Y -- "true" labels vector, of shape (output size, number of examples) parameters -- python dictionary containing parameters of the model Returns: cost - value of the regularized loss function (formula (2)) """ m = Y.shape[1] W1 = parameters["W1"] W2 = parameters["W2"] W3 = parameters["W3"] cross_entropy_cost = compute_cost(A3, Y) # This gives you the cross-entropy part of the cost #(≈ 1 lines of code) # L2_regularization_cost = # YOUR CODE STARTS HERE L2_regularization_cost = lambd/(2*m)*(np.sum(np.square(W1))+np.sum(np.square(W2))+np.sum(np.square(W3))) # YOUR CODE ENDS HERE cost = cross_entropy_cost + L2_regularization_cost return cost # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "c8efc715a4d6127a214a1b9f97e9f4cb", "grade": true, "grade_id": "cell-8a99b24d8ecfe0c3", "locked": true, "points": 10, "schema_version": 3, "solution": false, "task": false} A3, t_Y, parameters = compute_cost_with_regularization_test_case() cost = compute_cost_with_regularization(A3, t_Y, parameters, lambd=0.1) print("cost = " + str(cost)) compute_cost_with_regularization_test(compute_cost_with_regularization) # - # Of course, because you changed the cost, you have to change backward propagation as well! All the gradients have to be computed with respect to this new cost. # # <a name='ex-2'></a> # ### Exercise 2 - backward_propagation_with_regularization # Implement the changes needed in backward propagation to take into account regularization. The changes only concern dW1, dW2 and dW3. For each, you have to add the regularization term's gradient ($\frac{d}{dW} ( \frac{1}{2}\frac{\lambda}{m} W^2) = \frac{\lambda}{m} W$). # + deletable=false nbgrader={"cell_type": "code", "checksum": "eb2dfa385aa47fe2e2edf5c6821618e6", "grade": false, "grade_id": "cell-c6f6ed3630e04d4b", "locked": false, "schema_version": 3, "solution": true, "task": false} # GRADED FUNCTION: backward_propagation_with_regularization def backward_propagation_with_regularization(X, Y, cache, lambd): """ Implements the backward propagation of our baseline model to which we added an L2 regularization. Arguments: X -- input dataset, of shape (input size, number of examples) Y -- "true" labels vector, of shape (output size, number of examples) cache -- cache output from forward_propagation() lambd -- regularization hyperparameter, scalar Returns: gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables """ m = X.shape[1] (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache dZ3 = A3 - Y #(≈ 1 lines of code) # dW3 = 1./m * np.dot(dZ3, A2.T) + None # YOUR CODE STARTS HERE dW3 = 1. / m * np.dot(dZ3, A2.T) + (lambd / m) * W3 # YOUR CODE ENDS HERE db3 = 1. / m * np.sum(dZ3, axis=1, keepdims=True) dA2 = np.dot(W3.T, dZ3) dZ2 = np.multiply(dA2, np.int64(A2 > 0)) #(≈ 1 lines of code) # dW2 = 1./m * np.dot(dZ2, A1.T) + None # YOUR CODE STARTS HERE dW2 = 1. / m * np.dot(dZ2, A1.T) + (lambd / m) * W2 # YOUR CODE ENDS HERE db2 = 1. / m * np.sum(dZ2, axis=1, keepdims=True) dA1 = np.dot(W2.T, dZ2) dZ1 = np.multiply(dA1, np.int64(A1 > 0)) #(≈ 1 lines of code) # dW1 = 1./m * np.dot(dZ1, X.T) + None # YOUR CODE STARTS HERE dW1 = 1. / m * np.dot(dZ1, X.T) + (lambd / m) * W1 # YOUR CODE ENDS HERE db1 = 1. / m * np.sum(dZ1, axis=1, keepdims=True) gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1} return gradients # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "bd8e0024ad54c2facd2fb8e11d21d2a0", "grade": true, "grade_id": "cell-9826510f7bfdd0f8", "locked": true, "points": 10, "schema_version": 3, "solution": false, "task": false} t_X, t_Y, cache = backward_propagation_with_regularization_test_case() grads = backward_propagation_with_regularization(t_X, t_Y, cache, lambd = 0.7) print ("dW1 = \n"+ str(grads["dW1"])) print ("dW2 = \n"+ str(grads["dW2"])) print ("dW3 = \n"+ str(grads["dW3"])) backward_propagation_with_regularization_test(backward_propagation_with_regularization) # - # Let's now run the model with L2 regularization $(\lambda = 0.7)$. The `model()` function will call: # - `compute_cost_with_regularization` instead of `compute_cost` # - `backward_propagation_with_regularization` instead of `backward_propagation` parameters = model(train_X, train_Y, lambd = 0.7) print ("On the train set:") predictions_train = predict(train_X, train_Y, parameters) print ("On the test set:") predictions_test = predict(test_X, test_Y, parameters) # Congrats, the test set accuracy increased to 93%. You have saved the French football team! # # You are not overfitting the training data anymore. Let's plot the decision boundary. plt.title("Model with L2-regularization") axes = plt.gca() axes.set_xlim([-0.75,0.40]) axes.set_ylim([-0.75,0.65]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) # **Observations**: # - The value of $\lambda$ is a hyperparameter that you can tune using a dev set. # - L2 regularization makes your decision boundary smoother. If $\lambda$ is too large, it is also possible to "oversmooth", resulting in a model with high bias. # # **What is L2-regularization actually doing?**: # # L2-regularization relies on the assumption that a model with small weights is simpler than a model with large weights. Thus, by penalizing the square values of the weights in the cost function you drive all the weights to smaller values. It becomes too costly for the cost to have large weights! This leads to a smoother model in which the output changes more slowly as the input changes. # # <br> # <font color='blue'> # # **What you should remember:** the implications of L2-regularization on: # - The cost computation: # - A regularization term is added to the cost. # - The backpropagation function: # - There are extra terms in the gradients with respect to weight matrices. # - Weights end up smaller ("weight decay"): # - Weights are pushed to smaller values. # <a name='6'></a> # ## 6 - Dropout # # Finally, **dropout** is a widely used regularization technique that is specific to deep learning. # **It randomly shuts down some neurons in each iteration.** Watch these two videos to see what this means! # # <!-- # To understand drop-out, consider this conversation with a friend: # - Friend: "Why do you need all these neurons to train your network and classify images?". # - You: "Because each neuron contains a weight and can learn specific features/details/shape of an image. The more neurons I have, the more featurse my model learns!" # - Friend: "I see, but are you sure that your neurons are learning different features and not all the same features?" # - You: "Good point... Neurons in the same layer actually don't talk to each other. It should be definitly possible that they learn the same image features/shapes/forms/details... which would be redundant. There should be a solution." # !--> # # # <center> # <video width="620" height="440" src="images/dropout1_kiank.mp4" type="video/mp4" controls> # </video> # </center> # <br> # <caption><center><font color='purple'><b>Figure 2 </b>: <b>Drop-out on the second hidden layer.</b> <br> At each iteration, you shut down (= set to zero) each neuron of a layer with probability $1 - keep\_prob$ or keep it with probability $keep\_prob$ (50% here). The dropped neurons don't contribute to the training in both the forward and backward propagations of the iteration. </font></center></caption> # # <center> # <video width="620" height="440" src="images/dropout2_kiank.mp4" type="video/mp4" controls> # </video> # </center> # # <caption><center><font color='purple'><b>Figure 3</b>:<b> Drop-out on the first and third hidden layers. </b><br> $1^{st}$ layer: we shut down on average 40% of the neurons. $3^{rd}$ layer: we shut down on average 20% of the neurons. </font></center></caption> # # # When you shut some neurons down, you actually modify your model. The idea behind drop-out is that at each iteration, you train a different model that uses only a subset of your neurons. With dropout, your neurons thus become less sensitive to the activation of one other specific neuron, because that other neuron might be shut down at any time. # # <a name='6-1'></a> # ### 6.1 - Forward Propagation with Dropout # # <a name='ex-3'></a> # ### Exercise 3 - forward_propagation_with_dropout # # Implement the forward propagation with dropout. You are using a 3 layer neural network, and will add dropout to the first and second hidden layers. We will not apply dropout to the input layer or output layer. # # **Instructions**: # You would like to shut down some neurons in the first and second layers. To do that, you are going to carry out 4 Steps: # 1. In lecture, we dicussed creating a variable $d^{[1]}$ with the same shape as $a^{[1]}$ using `np.random.rand()` to randomly get numbers between 0 and 1. Here, you will use a vectorized implementation, so create a random matrix $D^{[1]} = [d^{[1](1)} d^{[1](2)} ... d^{[1](m)}] $ of the same dimension as $A^{[1]}$. # 2. Set each entry of $D^{[1]}$ to be 1 with probability (`keep_prob`), and 0 otherwise. # # **Hint:** Let's say that keep_prob = 0.8, which means that we want to keep about 80% of the neurons and drop out about 20% of them. We want to generate a vector that has 1's and 0's, where about 80% of them are 1 and about 20% are 0. # This python statement: # `X = (X < keep_prob).astype(int)` # # is conceptually the same as this if-else statement (for the simple case of a one-dimensional array) : # # ``` # for i,v in enumerate(x): # if v < keep_prob: # x[i] = 1 # else: # v >= keep_prob # x[i] = 0 # ``` # Note that the `X = (X < keep_prob).astype(int)` works with multi-dimensional arrays, and the resulting output preserves the dimensions of the input array. # # Also note that without using `.astype(int)`, the result is an array of booleans `True` and `False`, which Python automatically converts to 1 and 0 if we multiply it with numbers. (However, it's better practice to convert data into the data type that we intend, so try using `.astype(int)`.) # # 3. Set $A^{[1]}$ to $A^{[1]} * D^{[1]}$. (You are shutting down some neurons). You can think of $D^{[1]}$ as a mask, so that when it is multiplied with another matrix, it shuts down some of the values. # 4. Divide $A^{[1]}$ by `keep_prob`. By doing this you are assuring that the result of the cost will still have the same expected value as without drop-out. (This technique is also called inverted dropout.) # + deletable=false nbgrader={"cell_type": "code", "checksum": "249ddfb0abac7c799948d3e600db7a4c", "grade": false, "grade_id": "cell-a81658747a0683be", "locked": false, "schema_version": 3, "solution": true, "task": false} # GRADED FUNCTION: forward_propagation_with_dropout def forward_propagation_with_dropout(X, parameters, keep_prob = 0.5): """ Implements the forward propagation: LINEAR -> RELU + DROPOUT -> LINEAR -> RELU + DROPOUT -> LINEAR -> SIGMOID. Arguments: X -- input dataset, of shape (2, number of examples) parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3": W1 -- weight matrix of shape (20, 2) b1 -- bias vector of shape (20, 1) W2 -- weight matrix of shape (3, 20) b2 -- bias vector of shape (3, 1) W3 -- weight matrix of shape (1, 3) b3 -- bias vector of shape (1, 1) keep_prob - probability of keeping a neuron active during drop-out, scalar Returns: A3 -- last activation value, output of the forward propagation, of shape (1,1) cache -- tuple, information stored for computing the backward propagation """ np.random.seed(1) # retrieve parameters W1 = parameters["W1"] b1 = parameters["b1"] W2 = parameters["W2"] b2 = parameters["b2"] W3 = parameters["W3"] b3 = parameters["b3"] # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID Z1 = np.dot(W1, X) + b1 A1 = relu(Z1) #(≈ 4 lines of code) # Steps 1-4 below correspond to the Steps 1-4 described above. # D1 = # Step 1: initialize matrix D1 = np.random.rand(..., ...) # D1 = # Step 2: convert entries of D1 to 0 or 1 (using keep_prob as the threshold) # A1 = # Step 3: shut down some neurons of A1 # A1 = # Step 4: scale the value of neurons that haven't been shut down # YOUR CODE STARTS HERE D1 = np.random.rand(A1.shape[0], A1.shape[1]) # Step 1: initialize matrix D1 = np.random.rand(..., ...) D1 = (D1 < keep_prob) # Step 2: convert entries of D1 to 0 or 1 (using keep_prob as the threshold) A1 = A1 * D1 # Step 3: shut down some neurons of A1 A1 = A1 / keep_prob # Step 4: scale the value of neurons that haven't been shut down # YOUR CODE ENDS HERE Z2 = np.dot(W2, A1) + b2 A2 = relu(Z2) #(≈ 4 lines of code) # D2 = # Step 1: initialize matrix D2 = np.random.rand(..., ...) # D2 = # Step 2: convert entries of D2 to 0 or 1 (using keep_prob as the threshold) # A2 = # Step 3: shut down some neurons of A2 # A2 = # Step 4: scale the value of neurons that haven't been shut down # YOUR CODE STARTS HERE D2 = np.random.rand(A2.shape[0], A2.shape[1]) # Step 1: initialize matrix D2 = np.random.rand(..., ...) D2 = (D2 < keep_prob) # Step 2: convert entries of D2 to 0 or 1 (using keep_prob as the threshold) A2 = A2 * D2 # Step 3: shut down some neurons of A2 A2 = A2 / keep_prob # Step 4: scale the value of neurons that haven't been shut down # YOUR CODE ENDS HERE Z3 = np.dot(W3, A2) + b3 A3 = sigmoid(Z3) cache = (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) return A3, cache # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "caec66931ac05dbe474596e75f3a14cd", "grade": true, "grade_id": "cell-be6195c629f586bf", "locked": true, "points": 20, "schema_version": 3, "solution": false, "task": false} t_X, parameters = forward_propagation_with_dropout_test_case() A3, cache = forward_propagation_with_dropout(t_X, parameters, keep_prob=0.7) print ("A3 = " + str(A3)) forward_propagation_with_dropout_test(forward_propagation_with_dropout) # - # <a name='6-2'></a> # ### 6.2 - Backward Propagation with Dropout # # <a name='ex-4'></a> # ### Exercise 4 - backward_propagation_with_dropout # Implement the backward propagation with dropout. As before, you are training a 3 layer network. Add dropout to the first and second hidden layers, using the masks $D^{[1]}$ and $D^{[2]}$ stored in the cache. # # **Instruction**: # Backpropagation with dropout is actually quite easy. You will have to carry out 2 Steps: # 1. You had previously shut down some neurons during forward propagation, by applying a mask $D^{[1]}$ to `A1`. In backpropagation, you will have to shut down the same neurons, by reapplying the same mask $D^{[1]}$ to `dA1`. # 2. During forward propagation, you had divided `A1` by `keep_prob`. In backpropagation, you'll therefore have to divide `dA1` by `keep_prob` again (the calculus interpretation is that if $A^{[1]}$ is scaled by `keep_prob`, then its derivative $dA^{[1]}$ is also scaled by the same `keep_prob`). # # + deletable=false nbgrader={"cell_type": "code", "checksum": "ee4145889a9c078fcf6aef51aceb3ba9", "grade": false, "grade_id": "cell-5b97731b540b0b87", "locked": false, "schema_version": 3, "solution": true, "task": false} # GRADED FUNCTION: backward_propagation_with_dropout def backward_propagation_with_dropout(X, Y, cache, keep_prob): """ Implements the backward propagation of our baseline model to which we added dropout. Arguments: X -- input dataset, of shape (2, number of examples) Y -- "true" labels vector, of shape (output size, number of examples) cache -- cache output from forward_propagation_with_dropout() keep_prob - probability of keeping a neuron active during drop-out, scalar Returns: gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables """ m = X.shape[1] (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) = cache dZ3 = A3 - Y dW3 = 1./m * np.dot(dZ3, A2.T) db3 = 1./m * np.sum(dZ3, axis=1, keepdims=True) dA2 = np.dot(W3.T, dZ3) #(≈ 2 lines of code) # dA2 = # Step 1: Apply mask D2 to shut down the same neurons as during the forward propagation # dA2 = # Step 2: Scale the value of neurons that haven't been shut down # YOUR CODE STARTS HERE dA2 = dA2 * D2 # Step 1: Apply mask D2 to shut down the same neurons as during the forward propagation dA2 = dA2 / keep_prob # Step 2: Scale the value of neurons that haven't been shut down # YOUR CODE ENDS HERE dZ2 = np.multiply(dA2, np.int64(A2 > 0)) dW2 = 1./m * np.dot(dZ2, A1.T) db2 = 1./m * np.sum(dZ2, axis=1, keepdims=True) dA1 = np.dot(W2.T, dZ2) #(≈ 2 lines of code) # dA1 = # Step 1: Apply mask D1 to shut down the same neurons as during the forward propagation # dA1 = # Step 2: Scale the value of neurons that haven't been shut down # YOUR CODE STARTS HERE dA1 = dA1 * D1 # Step 1: Apply mask D1 to shut down the same neurons as during the forward propagation dA1 = dA1 / keep_prob # Step 2: Scale the value of neurons that haven't been shut down # YOUR CODE ENDS HERE dZ1 = np.multiply(dA1, np.int64(A1 > 0)) dW1 = 1./m * np.dot(dZ1, X.T) db1 = 1./m * np.sum(dZ1, axis=1, keepdims=True) gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1} return gradients # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "c30bb7a9f59c7d421c8627d5d9252b29", "grade": true, "grade_id": "cell-958c189ce5b16569", "locked": true, "points": 10, "schema_version": 3, "solution": false, "task": false} t_X, t_Y, cache = backward_propagation_with_dropout_test_case() gradients = backward_propagation_with_dropout(t_X, t_Y, cache, keep_prob=0.8) print ("dA1 = \n" + str(gradients["dA1"])) print ("dA2 = \n" + str(gradients["dA2"])) backward_propagation_with_dropout_test(backward_propagation_with_dropout) # - # Let's now run the model with dropout (`keep_prob = 0.86`). It means at every iteration you shut down each neurons of layer 1 and 2 with 14% probability. The function `model()` will now call: # - `forward_propagation_with_dropout` instead of `forward_propagation`. # - `backward_propagation_with_dropout` instead of `backward_propagation`. # + parameters = model(train_X, train_Y, keep_prob = 0.86, learning_rate = 0.3) print ("On the train set:") predictions_train = predict(train_X, train_Y, parameters) print ("On the test set:") predictions_test = predict(test_X, test_Y, parameters) # - # Dropout works great! The test accuracy has increased again (to 95%)! Your model is not overfitting the training set and does a great job on the test set. The French football team will be forever grateful to you! # # Run the code below to plot the decision boundary. plt.title("Model with dropout") axes = plt.gca() axes.set_xlim([-0.75,0.40]) axes.set_ylim([-0.75,0.65]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) # **Note**: # - A **common mistake** when using dropout is to use it both in training and testing. You should use dropout (randomly eliminate nodes) only in training. # - Deep learning frameworks like [tensorflow](https://www.tensorflow.org/api_docs/python/tf/nn/dropout), [PaddlePaddle](http://doc.paddlepaddle.org/release_doc/0.9.0/doc/ui/api/trainer_config_helpers/attrs.html), [keras](https://keras.io/layers/core/#dropout) or [caffe](http://caffe.berkeleyvision.org/tutorial/layers/dropout.html) come with a dropout layer implementation. Don't stress - you will soon learn some of these frameworks. # # <font color='blue'> # # **What you should remember about dropout:** # - Dropout is a regularization technique. # - You only use dropout during training. Don't use dropout (randomly eliminate nodes) during test time. # - Apply dropout both during forward and backward propagation. # - During training time, divide each dropout layer by keep_prob to keep the same expected value for the activations. For example, if keep_prob is 0.5, then we will on average shut down half the nodes, so the output will be scaled by 0.5 since only the remaining half are contributing to the solution. Dividing by 0.5 is equivalent to multiplying by 2. Hence, the output now has the same expected value. You can check that this works even when keep_prob is other values than 0.5. # <a name='7'></a> # ## 7 - Conclusions # **Here are the results of our three models**: # # <table> # <tr> # <td> # <b>model</b> # </td> # <td> # <b>train accuracy</b> # </td> # <td> # <b>test accuracy</b> # </td> # </tr> # <td> # 3-layer NN without regularization # </td> # <td> # 95% # </td> # <td> # 91.5% # </td> # <tr> # <td> # 3-layer NN with L2-regularization # </td> # <td> # 94% # </td> # <td> # 93% # </td> # </tr> # <tr> # <td> # 3-layer NN with dropout # </td> # <td> # 93% # </td> # <td> # 95% # </td> # </tr> # </table> # Note that regularization hurts training set performance! This is because it limits the ability of the network to overfit to the training set. But since it ultimately gives better test accuracy, it is helping your system. # Congratulations for finishing this assignment! And also for revolutionizing French football. :-) # <font color='blue'> # # **What we want you to remember from this notebook**: # - Regularization will help you reduce overfitting. # - Regularization will drive your weights to lower values. # - L2 regularization and Dropout are two very effective regularization techniques.
C02W01/1.2 Regularization/Regularization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"} # # Carving Unit Tests # # So far, we have always generated _system input_, i.e. data that the program as a whole obtains via its input channels. If we are interested in testing only a small set of functions, having to go through the system can be very inefficient. This chapter introduces a technique known as _carving_, which, given a system test, automatically extracts a set of _unit tests_ that replicate the calls seen during the unit test. The key idea is to _record_ such calls such that we can _replay_ them later – as a whole or selectively. On top, we also explore how to synthesize API grammars from carved unit tests; this means that we can _synthesize API tests without having to write a grammar at all._ # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} # **Prerequisites** # # * Carving makes use of dynamic traces of function calls and variables, as introduced in the [chapter on configuration fuzzing](ConfigurationFuzzer.ipynb). # * Using grammars to test units was introduced in the [chapter on API fuzzing](APIFuzzer.ipynb). # + slideshow={"slide_type": "skip"} import bookutils # + slideshow={"slide_type": "skip"} import APIFuzzer # + [markdown] slideshow={"slide_type": "slide"} # ## Synopsis # <!-- Automatically generated. Do not edit. --> # # To [use the code provided in this chapter](Importing.ipynb), write # # ```python # >>> from fuzzingbook.Carver import <identifier> # ``` # # and then make use of the following features. # # # This chapter provides means to _record and replay function calls_ during a system test. Since individual function calls are much faster than a whole system run, such "carving" mechanisms have the potential to run tests much faster. # # ### Recording Calls # # The `CallCarver` class records all calls occurring while it is active. It is used in conjunction with a `with` clause: # # ```python # >>> with CallCarver() as carver: # >>> y = my_sqrt(2) # >>> y = my_sqrt(4) # ``` # After execution, `called_functions()` lists the names of functions encountered: # # ```python # >>> carver.called_functions() # ['my_sqrt', '__exit__'] # ``` # The `arguments()` method lists the arguments recorded for a function. This is a mapping of the function name to a list of lists of arguments; each argument is a pair (parameter name, value). # # ```python # >>> carver.arguments('my_sqrt') # [[('x', 2)], [('x', 4)]] # ``` # Complex arguments are properly serialized, such that they can be easily restored. # # ### Synthesizing Calls # # While such recorded arguments already could be turned into arguments and calls, a much nicer alternative is to create a _grammar_ for recorded calls. This allows to synthesize arbitrary _combinations_ of arguments, and also offers a base for further customization of calls. # # The `CallGrammarMiner` class turns a list of carved executions into a grammar. # # ```python # >>> my_sqrt_miner = CallGrammarMiner(carver) # >>> my_sqrt_grammar = my_sqrt_miner.mine_call_grammar() # >>> my_sqrt_grammar # {'<start>': ['<call>'], # '<call>': ['<my_sqrt>'], # '<my_sqrt-x>': ['4', '2'], # '<my_sqrt>': ['my_sqrt(<my_sqrt-x>)']} # ``` # This grammar can be used to synthesize calls. # # ```python # >>> fuzzer = GrammarCoverageFuzzer(my_sqrt_grammar) # >>> fuzzer.fuzz() # 'my_sqrt(2)' # ``` # These calls can be executed in isolation, effectively extracting unit tests from system tests: # # ```python # >>> eval(fuzzer.fuzz()) # 2.0 # ``` # # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"} # ## System Tests vs Unit Tests # # Remember the URL grammar introduced for [grammar fuzzing](Grammars.ipynb)? With such a grammar, we can happily test a Web browser again and again, checking how it reacts to arbitrary page requests. # # Let us define a very simple "web browser" that goes and downloads the content given by the URL. # + slideshow={"slide_type": "skip"} import urllib.parse # + slideshow={"slide_type": "fragment"} def webbrowser(url): """Download the http/https resource given by the URL""" import requests # Only import if needed r = requests.get(url) return r.text # + [markdown] slideshow={"slide_type": "subslide"} # Let us apply this on [fuzzingbook.org](https://www.fuzzingbook.org/) and measure the time, using the [Timer class](Timer.ipynb): # + slideshow={"slide_type": "skip"} from Timer import Timer # + slideshow={"slide_type": "fragment"} with Timer() as webbrowser_timer: fuzzingbook_contents = webbrowser( "http://www.fuzzingbook.org/html/Fuzzer.html") print("Downloaded %d bytes in %.2f seconds" % (len(fuzzingbook_contents), webbrowser_timer.elapsed_time())) # + slideshow={"slide_type": "fragment"} fuzzingbook_contents[:100] # + [markdown] slideshow={"slide_type": "subslide"} # A full webbrowser, of course, would also render the HTML content. We can achieve this using these commands (but we don't, as we do not want to replicate the entire Web page here): # # # ```python # from IPython.display import HTML, display # HTML(fuzzingbook_contents) # ``` # + [markdown] slideshow={"slide_type": "subslide"} # Having to start a whole browser (or having it render a Web page) again and again means lots of overhead, though – in particular if we want to test only a subset of its functionality. In particular, after a change in the code, we would prefer to test only the subset of functions that is affected by the change, rather than running the well-tested functions again and again. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"} # Let us assume we change the function that takes care of parsing the given URL and decomposing it into the individual elements – the scheme ("http"), the network location (`"www.fuzzingbook.com"`), or the path (`"/html/Fuzzer.html"`). This function is named `urlparse()`: # + slideshow={"slide_type": "skip"} from urllib.parse import urlparse # + slideshow={"slide_type": "fragment"} urlparse('https://www.fuzzingbook.com/html/Carver.html') # + [markdown] slideshow={"slide_type": "subslide"} # You see how the individual elements of the URL – the _scheme_ (`"http"`), the _network location_ (`"www.fuzzingbook.com"`), or the path (`"//html/Carver.html"`) are all properly identified. Other elements (like `params`, `query`, or `fragment`) are empty, because they were not part of our input. # + [markdown] slideshow={"slide_type": "fragment"} # The interesting thing is that executing only `urlparse()` is orders of magnitude faster than running all of `webbrowser()`. Let us measure the factor: # + slideshow={"slide_type": "subslide"} runs = 1000 with Timer() as urlparse_timer: for i in range(runs): urlparse('https://www.fuzzingbook.com/html/Carver.html') avg_urlparse_time = urlparse_timer.elapsed_time() / 1000 avg_urlparse_time # + [markdown] slideshow={"slide_type": "fragment"} # Compare this to the time required by the webbrowser # + slideshow={"slide_type": "fragment"} webbrowser_timer.elapsed_time() # + [markdown] slideshow={"slide_type": "fragment"} # The difference in time is huge: # + slideshow={"slide_type": "fragment"} webbrowser_timer.elapsed_time() / avg_urlparse_time # + [markdown] slideshow={"slide_type": "subslide"} # Hence, in the time it takes to run `webbrowser()` once, we can have _tens of thousands_ of executions of `urlparse()` – and this does not even take into account the time it takes the browser to render the downloaded HTML, to run the included scripts, and whatever else happens when a Web page is loaded. Hence, strategies that allow us to test at the _unit_ level are very promising as they can save lots of overhead. # + [markdown] slideshow={"slide_type": "slide"} # ## Carving Unit Tests # # Testing methods and functions at the unit level requires a very good understanding of the individual units to be tested as well as their interplay with other units. Setting up an appropriate infrastructure and writing unit tests by hand thus is demanding, yet rewarding. There is, however, an interesting alternative to writing unit tests by hand. The technique of _carving_ automatically _converts system tests into unit tests_ by means of recording and replaying function calls: # # 1. During a system test (given or generated), we _record_ all calls into a function, including all arguments and other variables the function reads. # 2. From these, we synthesize a self-contained _unit test_ that reconstructs the function call with all arguments. # 3. This unit test can be executed (replayed) at any time with high efficiency. # # In the remainder of this chapter, let us explore these steps. # + [markdown] button=false new_sheet=true run_control={"read_only": false} slideshow={"slide_type": "slide"} # ## Recording Calls # # Our first challenge is to record function calls together with their arguments. (In the interest of simplicity, we restrict ourself to arguments, ignoring any global variables or other non-arguments that are read by the function.) To record calls and arguments, we use the mechanism [we introduced for coverage](Coverage.ipynb): By setting up a tracer function, we track all calls into individual functions, also saving their arguments. Just like `Coverage` objects, we want to use `Carver` objects to be able to be used in conjunction with the `with` statement, such that we can trace a particular code block: # # ```python # with Carver() as carver: # function_to_be_traced() # c = carver.calls() # ``` # # The initial definition supports this construct: # + [markdown] slideshow={"slide_type": "subslide"} # \todo{Get tracker from [dynamic invariants](DynamicInvariants.ipynb)} # + slideshow={"slide_type": "skip"} import sys # + slideshow={"slide_type": "subslide"} class Carver(object): def __init__(self, log=False): self._log = log self.reset() def reset(self): self._calls = {} # Start of `with` block def __enter__(self): self.original_trace_function = sys.gettrace() sys.settrace(self.traceit) return self # End of `with` block def __exit__(self, exc_type, exc_value, tb): sys.settrace(self.original_trace_function) # + [markdown] slideshow={"slide_type": "subslide"} # The actual work takes place in the `traceit()` method, which records all calls in the `_calls` attribute. First, we define two helper functions: # + slideshow={"slide_type": "skip"} import inspect # + slideshow={"slide_type": "fragment"} def get_qualified_name(code): """Return the fully qualified name of the current function""" name = code.co_name module = inspect.getmodule(code) if module is not None: name = module.__name__ + "." + name return name # + slideshow={"slide_type": "subslide"} def get_arguments(frame): """Return call arguments in the given frame""" # When called, all arguments are local variables local_variables = frame.f_locals.copy() arguments = [(var, frame.f_locals[var]) for var in local_variables] arguments.reverse() # Want same order as call return arguments # + slideshow={"slide_type": "subslide"} class CallCarver(Carver): def add_call(self, function_name, arguments): """Add given call to list of calls""" if function_name not in self._calls: self._calls[function_name] = [] self._calls[function_name].append(arguments) # Tracking function: Record all calls and all args def traceit(self, frame, event, arg): if event != "call": return None code = frame.f_code function_name = code.co_name qualified_name = get_qualified_name(code) arguments = get_arguments(frame) self.add_call(function_name, arguments) if qualified_name != function_name: self.add_call(qualified_name, arguments) if self._log: print(simple_call_string(function_name, arguments)) return None # + [markdown] slideshow={"slide_type": "subslide"} # Finally, we need some convenience functions to access the calls: # + slideshow={"slide_type": "subslide"} class CallCarver(CallCarver): def calls(self): """Return a dictionary of all calls traced.""" return self._calls def arguments(self, function_name): """Return a list of all arguments of the given function as (VAR, VALUE) pairs. Raises an exception if the function was not traced.""" return self._calls[function_name] def called_functions(self, qualified=False): """Return all functions called.""" if qualified: return [function_name for function_name in self._calls.keys() if function_name.find('.') >= 0] else: return [function_name for function_name in self._calls.keys() if function_name.find('.') < 0] # + [markdown] slideshow={"slide_type": "subslide"} # ### Recording my_sqrt() # + [markdown] slideshow={"slide_type": "fragment"} # Let's try out our new `Carver` class – first on a very simple function: # + slideshow={"slide_type": "skip"} from Intro_Testing import my_sqrt # + slideshow={"slide_type": "fragment"} with CallCarver() as sqrt_carver: my_sqrt(2) my_sqrt(4) # + [markdown] slideshow={"slide_type": "fragment"} # We can retrieve all calls seen... # + slideshow={"slide_type": "fragment"} sqrt_carver.calls() # + slideshow={"slide_type": "subslide"} sqrt_carver.called_functions() # + [markdown] slideshow={"slide_type": "fragment"} # ... as well as the arguments of a particular function: # + slideshow={"slide_type": "fragment"} sqrt_carver.arguments("my_sqrt") # + [markdown] slideshow={"slide_type": "fragment"} # We define a convenience function for nicer printing of these lists: # + slideshow={"slide_type": "fragment"} def simple_call_string(function_name, argument_list): """Return function_name(arg[0], arg[1], ...) as a string""" return function_name + "(" + \ ", ".join([var + "=" + repr(value) for (var, value) in argument_list]) + ")" # + slideshow={"slide_type": "subslide"} for function_name in sqrt_carver.called_functions(): for argument_list in sqrt_carver.arguments(function_name): print(simple_call_string(function_name, argument_list)) # + [markdown] slideshow={"slide_type": "fragment"} # This is a syntax we can directly use to invoke `my_sqrt()` again: # + slideshow={"slide_type": "fragment"} eval("my_sqrt(x=2)") # + [markdown] slideshow={"slide_type": "subslide"} # ### Carving urlparse() # + [markdown] slideshow={"slide_type": "fragment"} # What happens if we apply this to `webbrowser()`? # + slideshow={"slide_type": "fragment"} with CallCarver() as webbrowser_carver: webbrowser("http://www.example.com") # + [markdown] slideshow={"slide_type": "fragment"} # We see that retrieving a URL from the Web requires quite some functionality: # + slideshow={"slide_type": "fragment"} function_list = webbrowser_carver.called_functions(qualified=True) len(function_list) # + slideshow={"slide_type": "fragment"} print(function_list[:50]) # + [markdown] slideshow={"slide_type": "fragment"} # Among several other functions, we also have a call to `urlparse()`: # + slideshow={"slide_type": "subslide"} urlparse_argument_list = webbrowser_carver.arguments("urllib.parse.urlparse") urlparse_argument_list # + [markdown] slideshow={"slide_type": "subslide"} # Again, we can convert this into a well-formatted call: # + slideshow={"slide_type": "fragment"} urlparse_call = simple_call_string("urlparse", urlparse_argument_list[0]) urlparse_call # + [markdown] slideshow={"slide_type": "fragment"} # Again, we can re-execute this call: # + slideshow={"slide_type": "fragment"} eval(urlparse_call) # + [markdown] slideshow={"slide_type": "fragment"} # We now have successfully carved the call to `urlparse()` out of the `webbrowser()` execution. # + [markdown] slideshow={"slide_type": "slide"} # ## Replaying Calls # + [markdown] slideshow={"slide_type": "subslide"} # Replaying calls in their entirety and in all generality is tricky, as there are several challenges to be addressed. These include: # # 1. We need to be able to _access_ individual functions. If we access a function by name, the name must be in scope. If the name is not visible (for instance, because it is a name internal to the module), we must make it visible. # # 2. Any _resources_ accessed outside of arguments must be recorded and reconstructed for replay as well. This can be difficult if variables refer to external resources such as files or network resources. # # 3. _Complex objects_ must be reconstructed as well. # + [markdown] slideshow={"slide_type": "subslide"} # These constraints make carving hard or even impossible if the function to be tested interacts heavily with its environment. To illustrate these issues, consider the `email.parser.parse()` method that is invoked in `webbrowser()`: # + slideshow={"slide_type": "fragment"} email_parse_argument_list = webbrowser_carver.arguments("email.parser.parse") # + [markdown] slideshow={"slide_type": "fragment"} # Calls to this method look like this: # + slideshow={"slide_type": "fragment"} email_parse_call = simple_call_string( "email.parser.Parser.parse", email_parse_argument_list[0]) email_parse_call # + [markdown] slideshow={"slide_type": "fragment"} # We see that `email.parser.Parser.parse()` is part of a `email.parser.Parser` object (`self`) and it gets a `StringIO` object (`fp`). Both are non-primitive values. How could we possibly reconstruct them? # + [markdown] slideshow={"slide_type": "subslide"} # ### Serializing Objects # # The answer to the problem of complex objects lies in creating a _persistent_ representation that can be _reconstructed_ at later points in time. This process is known as _serialization_; in Python, it is also known as _pickling_. The `pickle` module provides means to create a serialized representation of an object. Let us apply this on the `email.parser.Parser` object we just found: # + slideshow={"slide_type": "skip"} import pickle # + slideshow={"slide_type": "fragment"} email_parse_argument_list # + slideshow={"slide_type": "fragment"} parser_object = email_parse_argument_list[0][2][1] parser_object # + slideshow={"slide_type": "subslide"} pickled = pickle.dumps(parser_object) pickled # + [markdown] slideshow={"slide_type": "fragment"} # From this string representing the serialized `email.parser.Parser` object, we can recreate the Parser object at any time: # + slideshow={"slide_type": "fragment"} unpickled_parser_object = pickle.loads(pickled) unpickled_parser_object # + [markdown] slideshow={"slide_type": "fragment"} # The serialization mechanism allows us to produce a representation for all objects passed as parameters (assuming they can be pickled, that is). We can now extend the `simple_call_string()` function such that it automatically pickles objects. Additionally, we set it up such that if the first parameter is named `self` (i.e., it is a class method), we make it a method of the `self` object. # + slideshow={"slide_type": "subslide"} def call_value(value): value_as_string = repr(value) if value_as_string.find('<') >= 0: # Complex object value_as_string = "pickle.loads(" + repr(pickle.dumps(value)) + ")" return value_as_string # + slideshow={"slide_type": "subslide"} def call_string(function_name, argument_list): """Return function_name(arg[0], arg[1], ...) as a string, pickling complex objects""" if len(argument_list) > 0: (first_var, first_value) = argument_list[0] if first_var == "self": # Make this a method call method_name = function_name.split(".")[-1] function_name = call_value(first_value) + "." + method_name argument_list = argument_list[1:] return function_name + "(" + \ ", ".join([var + "=" + call_value(value) for (var, value) in argument_list]) + ")" # + [markdown] slideshow={"slide_type": "fragment"} # Let us apply the extended `call_string()` method to create a call for `email.parser.parse()`, including pickled objects: # + slideshow={"slide_type": "subslide"} call = call_string("email.parser.Parser.parse", email_parse_argument_list[0]) print(call) # + [markdown] slideshow={"slide_type": "fragment"} # With this call involving the pickled object, we can now re-run the original call and obtain a valid result: # + slideshow={"slide_type": "skip"} import email # + slideshow={"slide_type": "fragment"} eval(call) # + [markdown] slideshow={"slide_type": "subslide"} # ### All Calls # # So far, we have seen only one call of `webbrowser()`. How many of the calls within `webbrowser()` can we actually carve and replay? Let us try this out and compute the numbers. # + slideshow={"slide_type": "skip"} import traceback # + slideshow={"slide_type": "skip"} import enum import socket # + slideshow={"slide_type": "fragment"} all_functions = set(webbrowser_carver.called_functions(qualified=True)) call_success = set() run_success = set() # + slideshow={"slide_type": "subslide"} exceptions_seen = set() for function_name in webbrowser_carver.called_functions(qualified=True): for argument_list in webbrowser_carver.arguments(function_name): try: call = call_string(function_name, argument_list) call_success.add(function_name) result = eval(call) run_success.add(function_name) except Exception as exc: exceptions_seen.add(repr(exc)) # print("->", call, file=sys.stderr) # traceback.print_exc() # print("", file=sys.stderr) continue # + slideshow={"slide_type": "subslide"} print("%d/%d calls (%.2f%%) successfully created and %d/%d calls (%.2f%%) successfully ran" % ( len(call_success), len(all_functions), len( call_success) * 100 / len(all_functions), len(run_success), len(all_functions), len(run_success) * 100 / len(all_functions))) # + [markdown] slideshow={"slide_type": "fragment"} # About a quarter of the calls succeed. Let us take a look into some of the error messages we get: # + slideshow={"slide_type": "subslide"} for i in range(10): print(list(exceptions_seen)[i]) # + [markdown] slideshow={"slide_type": "subslide"} # We see that: # # * **A large majority of calls could be converted into call strings.** If this is not the case, this is mostly due to having unserialized objects being passed. # * **About a quarter of the calls could be executed.** The error messages for the failing runs are varied; the most frequent being that some internal name is invoked that is not in scope. # + [markdown] slideshow={"slide_type": "fragment"} # Our carving mechanism should be taken with a grain of salt: We still do not cover the situation where external variables and values (such as global variables) are being accessed, and the serialization mechanism cannot recreate external resources. Still, if the function of interest falls among those that _can_ be carved and replayed, we can very effectively re-run its calls with their original arguments. # + [markdown] slideshow={"slide_type": "slide"} # ## Mining API Grammars from Carved Calls # # So far, we have used carved calls to replay exactly the same invocations as originally encountered. However, we can also _mutate_ carved calls to effectively fuzz APIs with previously recorded arguments. # # The general idea is as follows: # # 1. First, we record all calls of a specific function from a given execution of the program. # 2. Second, we create a grammar that incorporates all these calls, with separate rules for each argument and alternatives for each value found; this allows us to produce calls that arbitrarily _recombine_ these arguments. # # Let us explore these steps in the following sections. # + [markdown] slideshow={"slide_type": "subslide"} # ### From Calls to Grammars # # Let us start with an example. The `power(x, y)` function returns $x^y$; it is but a wrapper around the equivalent `math.pow()` function. (Since `power()` is defined in Python, we can trace it – in contrast to `math.pow()`, which is implemented in C.) # + slideshow={"slide_type": "skip"} import math # + slideshow={"slide_type": "fragment"} def power(x, y): return math.pow(x, y) # + [markdown] slideshow={"slide_type": "fragment"} # Let us invoke `power()` while recording its arguments: # + slideshow={"slide_type": "fragment"} with CallCarver() as power_carver: z = power(1, 2) z = power(3, 4) # + slideshow={"slide_type": "fragment"} power_carver.arguments("power") # + [markdown] slideshow={"slide_type": "subslide"} # From this list of recorded arguments, we could now create a grammar for the `power()` call, with `x` and `y` expanding into the values seen: # + slideshow={"slide_type": "skip"} from Grammars import START_SYMBOL, is_valid_grammar, new_symbol, extend_grammar # + slideshow={"slide_type": "fragment"} POWER_GRAMMAR = { "<start>": ["power(<x>, <y>)"], "<x>": ["1", "3"], "<y>": ["2", "4"] } assert is_valid_grammar(POWER_GRAMMAR) # + [markdown] slideshow={"slide_type": "fragment"} # When fuzzing with this grammar, we then get arbitrary combinations of `x` and `y`; aiming for coverage will ensure that all values are actually tested at least once: # + slideshow={"slide_type": "skip"} from GrammarCoverageFuzzer import GrammarCoverageFuzzer # + slideshow={"slide_type": "subslide"} power_fuzzer = GrammarCoverageFuzzer(POWER_GRAMMAR) [power_fuzzer.fuzz() for i in range(5)] # + [markdown] slideshow={"slide_type": "fragment"} # What we need is a method to automatically convert the arguments as seen in `power_carver` to the grammar as seen in `POWER_GRAMMAR`. This is what we define in the next section. # + [markdown] slideshow={"slide_type": "subslide"} # ### A Grammar Miner for Calls # # We introduce a class `CallGrammarMiner`, which, given a `Carver`, automatically produces a grammar from the calls seen. To initialize, we pass the carver object: # + slideshow={"slide_type": "fragment"} class CallGrammarMiner(object): def __init__(self, carver, log=False): self.carver = carver self.log = log # + [markdown] slideshow={"slide_type": "subslide"} # #### Initial Grammar # # The initial grammar produces a single call. The possible `<call>` expansions are to be constructed later: # + slideshow={"slide_type": "skip"} import copy # + slideshow={"slide_type": "fragment"} class CallGrammarMiner(CallGrammarMiner): CALL_SYMBOL = "<call>" def initial_grammar(self): return extend_grammar( {START_SYMBOL: [self.CALL_SYMBOL], self.CALL_SYMBOL: [] }) # + slideshow={"slide_type": "subslide"} m = CallGrammarMiner(power_carver) initial_grammar = m.initial_grammar() initial_grammar # + [markdown] slideshow={"slide_type": "subslide"} # #### A Grammar from Arguments # # Let us start by creating a grammar from a list of arguments. The method `mine_arguments_grammar()` creates a grammar for the arguments seen during carving, such as these: # + slideshow={"slide_type": "fragment"} arguments = power_carver.arguments("power") arguments # + [markdown] slideshow={"slide_type": "fragment"} # The `mine_arguments_grammar()` method iterates through the variables seen and creates a mapping `variables` of variable names to a set of values seen (as strings, going through `call_value()`). In a second step, it then creates a grammar with a rule for each variable name, expanding into the values seen. # + slideshow={"slide_type": "subslide"} class CallGrammarMiner(CallGrammarMiner): def var_symbol(self, function_name, var, grammar): return new_symbol(grammar, "<" + function_name + "-" + var + ">") def mine_arguments_grammar(self, function_name, arguments, grammar): var_grammar = {} variables = {} for argument_list in arguments: for (var, value) in argument_list: value_string = call_value(value) if self.log: print(var, "=", value_string) if value_string.find("<") >= 0: var_grammar["<langle>"] = ["<"] value_string = value_string.replace("<", "<langle>") if var not in variables: variables[var] = set() variables[var].add(value_string) var_symbols = [] for var in variables: var_symbol = self.var_symbol(function_name, var, grammar) var_symbols.append(var_symbol) var_grammar[var_symbol] = list(variables[var]) return var_grammar, var_symbols # + slideshow={"slide_type": "subslide"} m = CallGrammarMiner(power_carver) var_grammar, var_symbols = m.mine_arguments_grammar( "power", arguments, initial_grammar) # + slideshow={"slide_type": "fragment"} var_grammar # + [markdown] slideshow={"slide_type": "fragment"} # The additional return value `var_symbols` is a list of argument symbols in the call: # + slideshow={"slide_type": "fragment"} var_symbols # + [markdown] slideshow={"slide_type": "subslide"} # #### A Grammar from Calls # # To get the grammar for a single function (`mine_function_grammar()`), we add a call to the function: # + slideshow={"slide_type": "subslide"} class CallGrammarMiner(CallGrammarMiner): def function_symbol(self, function_name, grammar): return new_symbol(grammar, "<" + function_name + ">") def mine_function_grammar(self, function_name, grammar): arguments = self.carver.arguments(function_name) if self.log: print(function_name, arguments) var_grammar, var_symbols = self.mine_arguments_grammar( function_name, arguments, grammar) function_grammar = var_grammar function_symbol = self.function_symbol(function_name, grammar) if len(var_symbols) > 0 and var_symbols[0].find("-self") >= 0: # Method call function_grammar[function_symbol] = [ var_symbols[0] + "." + function_name + "(" + ", ".join(var_symbols[1:]) + ")"] else: function_grammar[function_symbol] = [ function_name + "(" + ", ".join(var_symbols) + ")"] if self.log: print(function_symbol, "::=", function_grammar[function_symbol]) return function_grammar, function_symbol # + slideshow={"slide_type": "subslide"} m = CallGrammarMiner(power_carver) function_grammar, function_symbol = m.mine_function_grammar( "power", initial_grammar) function_grammar # + [markdown] slideshow={"slide_type": "fragment"} # The additionally returned `function_symbol` holds the name of the function call just added: # + slideshow={"slide_type": "fragment"} function_symbol # + [markdown] slideshow={"slide_type": "subslide"} # #### A Grammar from all Calls # # Let us now repeat the above for all function calls seen during carving. To this end, we simply iterate over all function calls seen: # + slideshow={"slide_type": "fragment"} power_carver.called_functions() # + slideshow={"slide_type": "subslide"} class CallGrammarMiner(CallGrammarMiner): def mine_call_grammar(self, function_list=None, qualified=False): grammar = self.initial_grammar() fn_list = function_list if function_list is None: fn_list = self.carver.called_functions(qualified=qualified) for function_name in fn_list: if function_list is None and (function_name.startswith("_") or function_name.startswith("<")): continue # Internal function # Ignore errors with mined functions try: function_grammar, function_symbol = self.mine_function_grammar( function_name, grammar) except: if function_list is not None: raise if function_symbol not in grammar[self.CALL_SYMBOL]: grammar[self.CALL_SYMBOL].append(function_symbol) grammar.update(function_grammar) assert is_valid_grammar(grammar) return grammar # + [markdown] slideshow={"slide_type": "subslide"} # The method `mine_call_grammar()` is the one that clients can and should use – first for mining... # + slideshow={"slide_type": "fragment"} m = CallGrammarMiner(power_carver) power_grammar = m.mine_call_grammar() power_grammar # + [markdown] slideshow={"slide_type": "fragment"} # ...and then for fuzzing: # + slideshow={"slide_type": "fragment"} power_fuzzer = GrammarCoverageFuzzer(power_grammar) [power_fuzzer.fuzz() for i in range(5)] # + [markdown] slideshow={"slide_type": "subslide"} # With this, we have successfully extracted a grammar from a recorded execution; in contrast to "simple" carving, our grammar allows us to _recombine_ arguments and thus to fuzz at the API level. # + [markdown] slideshow={"slide_type": "slide"} # ## Fuzzing Web Functions # # Let us now apply our grammar miner on a larger API – the `urlparse()` function we already encountered during carving. # + slideshow={"slide_type": "fragment"} with CallCarver() as webbrowser_carver: webbrowser("https://www.fuzzing<EMAIL>") webbrowser("http://www.example.com") # + [markdown] slideshow={"slide_type": "fragment"} # We can mine a grammar from the calls encountered: # + slideshow={"slide_type": "fragment"} m = CallGrammarMiner(webbrowser_carver) webbrowser_grammar = m.mine_call_grammar() # + [markdown] slideshow={"slide_type": "fragment"} # This is a rather large grammar: # + slideshow={"slide_type": "fragment"} call_list = webbrowser_grammar['<call>'] len(call_list) # + slideshow={"slide_type": "subslide"} print(call_list[:20]) # + [markdown] slideshow={"slide_type": "fragment"} # Here's the rule for the `urlsplit()` function: # + slideshow={"slide_type": "fragment"} webbrowser_grammar["<urlsplit>"] # + [markdown] slideshow={"slide_type": "fragment"} # Here are the arguments. Note that although we only passed `http://www.fuzzingbook.org` as a parameter, we also see the `https:` variant. That is because opening the `http:` URL automatically redirects to the `https:` URL, which is then also processed by `urlsplit()`. # + slideshow={"slide_type": "fragment"} webbrowser_grammar["<urlsplit-url>"] # + [markdown] slideshow={"slide_type": "subslide"} # There also is some variation in the `scheme` argument: # + slideshow={"slide_type": "fragment"} webbrowser_grammar["<urlsplit-scheme>"] # + [markdown] slideshow={"slide_type": "fragment"} # If we now apply a fuzzer on these rules, we systematically cover all variations of arguments seen, including, of course, combinations not seen during carving. Again, we are fuzzing at the API level here. # + slideshow={"slide_type": "subslide"} urlsplit_fuzzer = GrammarCoverageFuzzer( webbrowser_grammar, start_symbol="<urlsplit>") for i in range(5): print(urlsplit_fuzzer.fuzz()) # + [markdown] slideshow={"slide_type": "fragment"} # Just as seen with carving, running tests at the API level is orders of magnitude faster than executing system tests. Hence, this calls for means to fuzz at the method level: # + slideshow={"slide_type": "skip"} from urllib.parse import urlsplit # + slideshow={"slide_type": "skip"} from Timer import Timer # + slideshow={"slide_type": "subslide"} with Timer() as urlsplit_timer: urlsplit('http://www.fuzzingbook.org/', 'http', True) urlsplit_timer.elapsed_time() # + slideshow={"slide_type": "fragment"} with Timer() as webbrowser_timer: webbrowser("http://www.fuzzingbook.org") webbrowser_timer.elapsed_time() # + slideshow={"slide_type": "fragment"} webbrowser_timer.elapsed_time() / urlsplit_timer.elapsed_time() # + [markdown] slideshow={"slide_type": "subslide"} # But then again, the caveats encountered during carving apply, notably the requirement to recreate the original function environment. If we also alter or recombine arguments, we get the additional risk of _violating an implicit precondition_ – that is, invoking a function with arguments the function was never designed for. Such _false alarms_, resulting from incorrect invocations rather than incorrect implementations, must then be identified (typically manually) and wed out (for instance, by altering or constraining the grammar). The huge speed gains at the API level, however, may well justify this additional investment. # + [markdown] slideshow={"slide_type": "slide"} # ## Synopsis # # This chapter provides means to _record and replay function calls_ during a system test. Since individual function calls are much faster than a whole system run, such "carving" mechanisms have the potential to run tests much faster. # + [markdown] slideshow={"slide_type": "subslide"} # ### Recording Calls # # The `CallCarver` class records all calls occurring while it is active. It is used in conjunction with a `with` clause: # + slideshow={"slide_type": "fragment"} with CallCarver() as carver: y = my_sqrt(2) y = my_sqrt(4) # + [markdown] slideshow={"slide_type": "fragment"} # After execution, `called_functions()` lists the names of functions encountered: # + slideshow={"slide_type": "fragment"} carver.called_functions() # + [markdown] slideshow={"slide_type": "fragment"} # The `arguments()` method lists the arguments recorded for a function. This is a mapping of the function name to a list of lists of arguments; each argument is a pair (parameter name, value). # + slideshow={"slide_type": "subslide"} carver.arguments('my_sqrt') # + [markdown] slideshow={"slide_type": "fragment"} # Complex arguments are properly serialized, such that they can be easily restored. # + [markdown] slideshow={"slide_type": "subslide"} # ### Synthesizing Calls # # While such recorded arguments already could be turned into arguments and calls, a much nicer alternative is to create a _grammar_ for recorded calls. This allows to synthesize arbitrary _combinations_ of arguments, and also offers a base for further customization of calls. # + [markdown] slideshow={"slide_type": "fragment"} # The `CallGrammarMiner` class turns a list of carved executions into a grammar. # + slideshow={"slide_type": "subslide"} my_sqrt_miner = CallGrammarMiner(carver) my_sqrt_grammar = my_sqrt_miner.mine_call_grammar() my_sqrt_grammar # + [markdown] slideshow={"slide_type": "fragment"} # This grammar can be used to synthesize calls. # + slideshow={"slide_type": "fragment"} fuzzer = GrammarCoverageFuzzer(my_sqrt_grammar) fuzzer.fuzz() # + [markdown] slideshow={"slide_type": "fragment"} # These calls can be executed in isolation, effectively extracting unit tests from system tests: # + slideshow={"slide_type": "fragment"} eval(fuzzer.fuzz()) # + [markdown] button=false new_sheet=true run_control={"read_only": false} slideshow={"slide_type": "slide"} # ## Lessons Learned # # * _Carving_ allows for effective replay of function calls recorded during a system test. # * A function call can be _orders of magnitude faster_ than a system invocation. # * _Serialization_ allows to create persistent representations of complex objects. # * Functions that heavily interact with their environment and/or access external resources are difficult to carve. # * From carved calls, one can produce API grammars that arbitrarily combine carved arguments. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"} # ## Next Steps # # In the next chapter, we will discuss [how to reduce failure-inducing inputs](Reducer.ipynb). # + [markdown] slideshow={"slide_type": "slide"} # ## Background # # Carving was invented by Elbaum et al. \cite{Elbaum2006} and originally implemented for Java. In this chapter, we follow several of their design choices (including recording and serializing method arguments only). # # The combination of carving and fuzzing at the API level is described in \cite{Kampmann2018}. # + [markdown] button=false new_sheet=true run_control={"read_only": false} slideshow={"slide_type": "slide"} # ## Exercises # # ### Exercise 1: Carving for Regression Testing # # So far, during carving, we only have looked into reproducing _calls_, but not into actually checking the _results_ of these calls. This is important for _regression testing_ – i.e. checking whether a change to code does not impede existing functionality. We can build this by recording not only _calls_, but also _return values_ – and then later compare whether the same calls result in the same values. This may not work on all occasions; values that depend on time, randomness, or other external factors may be different. Still, for functionality that abstracts from these details, checking that nothing has changed is an important part of testing. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution="hidden" solution2="hidden" solution2_first=true solution_first=true # Our aim is to design a class `ResultCarver` that extends `CallCarver` by recording both calls and return values. # # In a first step, create a `traceit()` method that also tracks return values by extending the `traceit()` method. The `traceit()` event type is `"return"` and the `arg` parameter is the returned value. Here is a prototype that only prints out the returned values: # + slideshow={"slide_type": "subslide"} class ResultCarver(CallCarver): def traceit(self, frame, event, arg): if event == "return": if self._log: print("Result:", arg) super().traceit(frame, event, arg) # Need to return traceit function such that it is invoked for return # events return self.traceit # + slideshow={"slide_type": "subslide"} with ResultCarver(log=True) as result_carver: my_sqrt(2) # + [markdown] slideshow={"slide_type": "subslide"} solution2="hidden" solution2_first=true # #### Part 1: Store function results # # Extend the above code such that results are _stored_ in a way that associates them with the currently returning function (or method). To this end, you need to keep track of the _current stack of called functions_. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"} solution="hidden" solution2="hidden" # **Solution.** Here's a solution, building on the above: # + slideshow={"slide_type": "skip"} solution2="hidden" class ResultCarver(CallCarver): def reset(self): super().reset() self._call_stack = [] self._results = {} def add_result(self, function_name, arguments, result): key = simple_call_string(function_name, arguments) self._results[key] = result def traceit(self, frame, event, arg): if event == "call": code = frame.f_code function_name = code.co_name qualified_name = get_qualified_name(code) self._call_stack.append( (function_name, qualified_name, get_arguments(frame))) if event == "return": result = arg (function_name, qualified_name, arguments) = self._call_stack.pop() self.add_result(function_name, arguments, result) if function_name != qualified_name: self.add_result(qualified_name, arguments, result) if self._log: print( simple_call_string( function_name, arguments), "=", result) # Keep on processing current calls super().traceit(frame, event, arg) # Need to return traceit function such that it is invoked for return # events return self.traceit # + slideshow={"slide_type": "skip"} solution2="hidden" with ResultCarver(log=True) as result_carver: my_sqrt(2) result_carver._results # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution="hidden" solution2="hidden" solution2_first=true solution_first=true # #### Part 2: Access results # # Give it a method `result()` that returns the value recorded for that particular function name and result: # # ```python # class ResultCarver(CallCarver): # def result(self, function_name, argument): # """Returns the result recorded for function_name(argument""" # ``` # + [markdown] slideshow={"slide_type": "skip"} solution2="hidden" # **Solution.** This is mostly done in the code for part 1: # + slideshow={"slide_type": "skip"} solution2="hidden" class ResultCarver(ResultCarver): def result(self, function_name, argument): key = simple_call_string(function_name, arguments) return self._results[key] # + [markdown] slideshow={"slide_type": "subslide"} solution2="hidden" solution2_first=true # #### Part 3: Produce assertions # # For the functions called during `webbrowser()` execution, create a set of _assertions_ that check whether the result returned is still the same. Test this for `urllib.parse.urlparse()` and `urllib.parse.urlsplit()`. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"} solution="hidden" solution2="hidden" # **Solution.** Not too hard now: # + slideshow={"slide_type": "skip"} solution2="hidden" with ResultCarver() as webbrowser_result_carver: webbrowser("http://www.example.com") # + slideshow={"slide_type": "skip"} solution2="hidden" for function_name in ["urllib.parse.urlparse", "urllib.parse.urlsplit"]: for arguments in webbrowser_result_carver.arguments(function_name): try: call = call_string(function_name, arguments) result = webbrowser_result_carver.result(function_name, arguments) print("assert", call, "==", call_value(result)) except Exception: continue # + [markdown] slideshow={"slide_type": "skip"} solution2="hidden" # We can run these assertions: # + slideshow={"slide_type": "skip"} solution2="hidden" from urllib.parse import SplitResult, ParseResult, urlparse, urlsplit # + slideshow={"slide_type": "skip"} solution2="hidden" assert urlparse( url='http://www.example.com', scheme='', allow_fragments=True) == ParseResult( scheme='http', netloc='www.example.com', path='', params='', query='', fragment='') assert urlsplit( url='http://www.example.com', scheme='', allow_fragments=True) == SplitResult( scheme='http', netloc='www.example.com', path='', query='', fragment='') # + [markdown] slideshow={"slide_type": "skip"} solution2="hidden" # We can now add these carved tests to a _regression test suite_ which would be run after every change to ensure that the functionality of `urlparse()` and `urlsplit()` is not changed. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution2="hidden" solution2_first=true # ### Exercise 2: Abstracting Arguments # # When mining an API grammar from executions, set up an abstraction scheme to widen the range of arguments to be used during testing. If the values for an argument, all conform to some type `T`. abstract it into `<T>`. For instance, if calls to `foo(1)`, `foo(2)`, `foo(3)` have been seen, the grammar should abstract its calls into `foo(<int>)`, with `<int>` being appropriately defined. # # Do this for a number of common types: integers, positive numbers, floating-point numbers, host names, URLs, mail addresses, and more. # + [markdown] slideshow={"slide_type": "skip"} solution2="hidden" # **Solution.** Left to the reader.
docs/beta/notebooks/Carver.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/FairozaAmira/AI_Programming_1_e/blob/master/Lesson06/Tuples.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="q7kOGK91dw4B" # # Solution for Tuples Exercise # + [markdown] colab_type="text" id="vGcrD88kdw4D" # 1. Create a tuples with variable, t that has 4, 9, 10 as its elements. # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="LKHvBvK5dw4E" outputId="458eb980-c0bf-4885-82fc-8b897b412ee9" t = (4,9,10) print(t) # - type(t) t = 4,9, 10 print(t) type(t) # + [markdown] colab_type="text" id="kdiuC2Y2dw4J" # 2. Find the length of t # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="RcXLuFYgdw4J" outputId="97bdb19a-7d2e-4581-f625-811c1ee6d290" len(t) # + [markdown] colab_type="text" id="aAgNbFTWdw4L" # 3. What is the value of the first element of t? # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="3nxuZFlFdw4M" outputId="cfa4f062-6266-4b42-d541-29e6bd3347b7" t[0] # + [markdown] colab_type="text" id="6R7XsqbIdw4Q" # 4. Is it possible to change the value of an element in tuple? If it is possible, change t[1] to 5. # + colab={"base_uri": "https://localhost:8080/", "height": 173} colab_type="code" id="bvEJofYmdw4R" outputId="3b89a17e-9899-4c14-9e2a-9b033648adfd" t[1] = 5 # + [markdown] colab_type="text" id="RRBCu2Updw4T" # 5. Is it possible to add an element in tuple? If it is so, add element 12 . # + colab={"base_uri": "https://localhost:8080/", "height": 173} colab_type="code" id="KNhYqubqdw4U" outputId="db39dbac-01b3-4299-895a-554b1a335179" t.append(12) # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="Z8RgTR0bdw4V" outputId="73da790d-4626-4f73-b2c6-1e7359f101ae" t2 = (12, ) t += t2 print(t) # + colab={} colab_type="code" id="2HD2A1xadw4X"
Data Structure/Tuples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Cb4espuLKJiA" # ##### Copyright 2021 The TensorFlow Authors. # + cellView="form" id="DjZQV2njKJ3U" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="mTL0TERThT6z" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/tutorials/audio/transfer_learning_audio"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/audio/transfer_learning_audio.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/audio/transfer_learning_audio.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/audio/transfer_learning_audio.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # <td> # <a href="https://tfhub.dev/google/yamnet/1"><img src="https://www.tensorflow.org/images/hub_logo_32px.png" />See TF Hub model</a> # </td> # </table> # + [markdown] id="K2madPFAGHb3" # # Transfer Learning with YAMNet for environmental sound classification # # [YAMNet](https://tfhub.dev/google/yamnet/1) is an audio event classifier that can predict audio events from [521 classes](https://github.com/tensorflow/models/blob/master/research/audioset/yamnet/yamnet_class_map.csv), like laughter, barking, or a siren. # # In this tutorial you will learn how to: # # - Load and use the YAMNet model for inference. # - Build a new model using the YAMNet embeddings to classify cat and dog sounds. # - Evaluate and export your model. # # + [markdown] id="5Mdp2TpBh96Y" # ## Import TensorFlow and other libraries # # + [markdown] id="zCcKYqu_hvKe" # Start by installing [TensorFlow I/O](https://www.tensorflow.org/io), which will make it easier for you to load audio files off disk. # + id="urBpRWDHTHHU" # !pip install tensorflow_io # + id="7l3nqdWVF-kC" import os from IPython import display import matplotlib.pyplot as plt import numpy as np import pandas as pd import tensorflow as tf import tensorflow_datasets as tfds import tensorflow_hub as hub import tensorflow_io as tfio # + [markdown] id="v9ZhybCnt_bM" # ## About YAMNet # # YAMNet is an audio event classifier that takes audio waveform as input and makes independent predictions for each of 521 audio events from the [AudioSet](https://research.google.com/audioset/) ontology. # # Internally, the model extracts "frames" from the audio signal and processes batches of these frames. This version of the model uses frames that are 0.96s long and extracts one frame every 0.48s. # # The model accepts a 1-D float32 Tensor or NumPy array containing a waveform of arbitrary length, represented as mono 16 kHz samples in the range `[-1.0, +1.0]`. This tutorial contains code to help you convert a `.wav` file into the correct format. # # The model returns 3 outputs, including the class scores, embeddings (which you will use for transfer learning), and the log mel spectrogram. You can find more details [here](https://tfhub.dev/google/yamnet/1), and this tutorial will walk you through using these in practice. # # One specific use of YAMNet is as a high-level feature extractor: the `1024-D` embedding output of YAMNet can be used as the input features of another shallow model which can then be trained on a small amount of data for a particular task. This allows the quick creation of specialized audio classifiers without requiring a lot of labeled data and without having to train a large model end-to-end. # # You will use YAMNet's embeddings output for transfer learning and train one or more [Dense](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense) layers on top of this. # # First, you will try the model and see the results of classifying audio. You will then construct the data pre-processing pipeline. # # ### Loading YAMNet from TensorFlow Hub # # You are going to use YAMNet from [Tensorflow Hub](https://tfhub.dev/) to extract the embeddings from the sound files. # # Loading a model from TensorFlow Hub is straightforward: choose the model, copy its URL and use the `load` function. # # Note: to read the documentation of the model, you can use the model url in your browser. # + id="06CWkBV5v3gr" yamnet_model_handle = 'https://tfhub.dev/google/yamnet/1' yamnet_model = hub.load(yamnet_model_handle) # + [markdown] id="GmrPJ0GHw9rr" # With the model loaded and following the [models's basic usage tutorial](https://www.tensorflow.org/hub/tutorials/yamnet) you'll download a sample wav file and run the inference. # # + id="C5i6xktEq00P" testing_wav_file_name = tf.keras.utils.get_file('miaow_16k.wav', 'https://storage.googleapis.com/audioset/miaow_16k.wav', cache_dir='./', cache_subdir='test_data') print(testing_wav_file_name) # + [markdown] id="mBm9y9iV2U_-" # You will need a function to load the audio files. They will also be used later when working with the training data. # # Note: The returned `wav_data` from `load_wav_16k_mono` is already normalized to values in `[-1.0, 1.0]` (as stated in the model's [documentation](https://tfhub.dev/google/yamnet/1)). # + id="Xwc9Wrdg2EtY" # Util functions for loading audio files and ensure the correct sample rate @tf.function def load_wav_16k_mono(filename): """ read in a waveform file and convert to 16 kHz mono """ file_contents = tf.io.read_file(filename) wav, sample_rate = tf.audio.decode_wav( file_contents, desired_channels=1) wav = tf.squeeze(wav, axis=-1) sample_rate = tf.cast(sample_rate, dtype=tf.int64) wav = tfio.audio.resample(wav, rate_in=sample_rate, rate_out=16000) return wav # + id="FRqpjkwB0Jjw" testing_wav_data = load_wav_16k_mono(testing_wav_file_name) _ = plt.plot(testing_wav_data) # Play the audio file. display.Audio(testing_wav_data,rate=16000) # + [markdown] id="6z6rqlEz20YB" # ### Load the class mapping # # It's important to load the class names that YAMNet is able to recognize. The mapping file is present at `yamnet_model.class_map_path()`, in the `csv` format. # + id="6Gyj23e_3Mgr" class_map_path = yamnet_model.class_map_path().numpy().decode('utf-8') class_names =list(pd.read_csv(class_map_path)['display_name']) for name in class_names[:20]: print(name) print('...') # + [markdown] id="5xbycDnT40u0" # ### Run inference # # YAMNet provides frame-level class-scores (i.e., 521 scores for every frame). In order to determine clip-level predictions, the scores can be aggregated per-class across frames (e.g., using mean or max aggregation). This is done below by `scores_np.mean(axis=0)`. Finally, in order to find the top-scored class at the clip-level, we take the maximum of the 521 aggregated scores. # # + id="NT0otp-A4Y3u" scores, embeddings, spectrogram = yamnet_model(testing_wav_data) class_scores = tf.reduce_mean(scores, axis=0) top_class = tf.argmax(class_scores) infered_class = class_names[top_class] print(f'The main sound is: {infered_class}') print(f'The embeddings shape: {embeddings.shape}') # + [markdown] id="YBaLNg5H5IWa" # Note: The model correctly inferred an animal sound. Your goal is to increase accuracy for specific classes. Also, notice that the the model generated 13 embeddings, 1 per frame. # + [markdown] id="fmthELBg1A2-" # ## ESC-50 dataset # # The [ESC-50 dataset](https://github.com/karolpiczak/ESC-50#repository-content), well described [here](https://www.karolpiczak.com/papers/Piczak2015-ESC-Dataset.pdf), is a labeled collection of 2000 environmental audio recordings (each 5 seconds long). The data consists of 50 classes, with 40 examples per class. # # Next, you will download and extract it. # # + id="MWobqK8JmZOU" _ = tf.keras.utils.get_file('esc-50.zip', 'https://github.com/karoldvl/ESC-50/archive/master.zip', cache_dir='./', cache_subdir='datasets', extract=True) # + [markdown] id="qcruxiuX1cO5" # ### Explore the data # # The metadata for each file is specified in the csv file at `./datasets/ESC-50-master/meta/esc50.csv` # # and all the audio files are in `./datasets/ESC-50-master/audio/` # # You will create a pandas dataframe with the mapping and use that to have a clearer view of the data. # # + id="jwmLygPrMAbH" esc50_csv = './datasets/ESC-50-master/meta/esc50.csv' base_data_path = './datasets/ESC-50-master/audio/' pd_data = pd.read_csv(esc50_csv) pd_data.head() # + [markdown] id="7d4rHBEQ2QAU" # ### Filter the data # # Given the data on the dataframe, you will apply some transformations: # # - filter out rows and use only the selected classes (dog and cat). If you want to use any other classes, this is where you can choose them. # - change the filename to have the full path. This will make loading easier later. # - change targets to be within a specific range. In this example, dog will remain 0, but cat will become 1 instead of its original value of 5. # + id="tFnEoQjgs14I" my_classes = ['dog', 'cat'] map_class_to_id = {'dog':0, 'cat':1} filtered_pd = pd_data[pd_data.category.isin(my_classes)] class_id = filtered_pd['category'].apply(lambda name: map_class_to_id[name]) filtered_pd = filtered_pd.assign(target=class_id) full_path = filtered_pd['filename'].apply(lambda row: os.path.join(base_data_path, row)) filtered_pd = filtered_pd.assign(filename=full_path) filtered_pd.head(10) # + [markdown] id="BkDcBS-aJdCz" # ### Load the audio files and retrieve embeddings # # Here you'll apply the `load_wav_16k_mono` and prepare the wav data for the model. # # When extracting embeddings from the wav data, you get an array of shape `(N, 1024)` where `N` is the number of frames that YAMNet found (one for every 0.48 seconds of audio). # + [markdown] id="AKDT5RomaDKO" # Your model will use each frame as one input so you need to to create a new column that has one frame per row. You also need to expand the labels and fold column to proper reflect these new rows. # # The expanded fold column keeps the original value. You cannot mix frames because, when doing the splits, you might end with parts of the same audio on different splits and that would make our validation and test steps less effective. # + id="u5Rq3_PyKLtU" filenames = filtered_pd['filename'] targets = filtered_pd['target'] folds = filtered_pd['fold'] main_ds = tf.data.Dataset.from_tensor_slices((filenames, targets, folds)) main_ds.element_spec # + id="rsEfovDVAHGY" def load_wav_for_map(filename, label, fold): return load_wav_16k_mono(filename), label, fold main_ds = main_ds.map(load_wav_for_map) main_ds.element_spec # + id="k0tG8DBNAHcE" # applies the embedding extraction model to a wav data def extract_embedding(wav_data, label, fold): ''' run YAMNet to extract embedding from the wav data ''' scores, embeddings, spectrogram = yamnet_model(wav_data) num_embeddings = tf.shape(embeddings)[0] return (embeddings, tf.repeat(label, num_embeddings), tf.repeat(fold, num_embeddings)) # extract embedding main_ds = main_ds.map(extract_embedding).unbatch() main_ds.element_spec # + [markdown] id="ZdfPIeD0Qedk" # ### Split the data # # You will use the `fold` column to split the dataset into train, validation and test. # # The fold values are so that files from the same original wav file are keep on the same split, you can find more information on the [paper](https://www.karolpiczak.com/papers/Piczak2015-ESC-Dataset.pdf) describing the dataset. # # The last step is to remove the `fold` column from the dataset since we're not going to use it anymore on the training process. # # + id="1ZYvlFiVsffC" cached_ds = main_ds.cache() train_ds = cached_ds.filter(lambda embedding, label, fold: fold < 4) val_ds = cached_ds.filter(lambda embedding, label, fold: fold == 4) test_ds = cached_ds.filter(lambda embedding, label, fold: fold == 5) # remove the folds column now that it's not needed anymore remove_fold_column = lambda embedding, label, fold: (embedding, label) train_ds = train_ds.map(remove_fold_column) val_ds = val_ds.map(remove_fold_column) test_ds = test_ds.map(remove_fold_column) train_ds = train_ds.cache().shuffle(1000).batch(32).prefetch(tf.data.AUTOTUNE) val_ds = val_ds.cache().batch(32).prefetch(tf.data.AUTOTUNE) test_ds = test_ds.cache().batch(32).prefetch(tf.data.AUTOTUNE) # + [markdown] id="v5PaMwvtcAIe" # ## Create your model # # You did most of the work! # Next, define a very simple Sequential Model to start with -- one hiden layer and 2 outputs to recognize cats and dogs. # # + id="JYCE0Fr1GpN3" my_model = tf.keras.Sequential([ tf.keras.layers.Input(shape=(1024), dtype=tf.float32, name='input_embedding'), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dense(len(my_classes)) ], name='my_model') my_model.summary() # + id="l1qgH35HY0SE" my_model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer="adam", metrics=['accuracy']) callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3, restore_best_weights=True) # + id="T3sj84eOZ3pk" history = my_model.fit(train_ds, epochs=20, validation_data=val_ds, callbacks=callback) # + [markdown] id="OAbraYKYpdoE" # Lets run the evaluate method on the test data just to be sure there's no overfitting. # + id="H4Nh5nec3Sky" loss, accuracy = my_model.evaluate(test_ds) print("Loss: ", loss) print("Accuracy: ", accuracy) # + [markdown] id="cid-qIrIpqHS" # You did it! # + [markdown] id="nCKZonrJcXab" # ## Test your model # # Next, try your model on the embedding from the previous test using YAMNet only. # # + id="79AFpA3_ctCF" scores, embeddings, spectrogram = yamnet_model(testing_wav_data) result = my_model(embeddings).numpy() infered_class = my_classes[result.mean(axis=0).argmax()] print(f'The main sound is: {infered_class}') # + [markdown] id="k2yleeev645r" # ## Save a model that can directly take a wav file as input # # Your model works when you give it the embeddings as input. # # In a real situation you'll want to give it the sound data directly. # # To do that you will combine YAMNet with your model into one single model that you can export for other applications. # # To make it easier to use the model's result, the final layer will be a `reduce_mean` operation. When using this model for serving, as you will see bellow, you will need the name of the final layer. If you don't define one, TF will auto define an incremental one that makes it hard to test as it will keep changing everytime you train the model. When using a raw tf operation you can't assign a name to it. To address this issue, you'll create a custom layer that just apply `reduce_mean` and you will call it 'classifier'. # # + id="QUVCI2Suunpw" class ReduceMeanLayer(tf.keras.layers.Layer): def __init__(self, axis=0, **kwargs): super(ReduceMeanLayer, self).__init__(**kwargs) self.axis = axis def call(self, input): return tf.math.reduce_mean(input, axis=self.axis) # + id="zE_Npm0nzlwc" saved_model_path = './dogs_and_cats_yamnet' input_segment = tf.keras.layers.Input(shape=(), dtype=tf.float32, name='audio') embedding_extraction_layer = hub.KerasLayer(yamnet_model_handle, trainable=False, name='yamnet') _, embeddings_output, _ = embedding_extraction_layer(input_segment) serving_outputs = my_model(embeddings_output) serving_outputs = ReduceMeanLayer(axis=0, name='classifier')(serving_outputs) serving_model = tf.keras.Model(input_segment, serving_outputs) serving_model.save(saved_model_path, include_optimizer=False) # + id="y-0bY5FMme1C" tf.keras.utils.plot_model(serving_model) # + [markdown] id="btHQDN9mqxM_" # Load your saved model to verify that it works as expected. # + id="KkYVpJS72WWB" reloaded_model = tf.saved_model.load(saved_model_path) # + [markdown] id="4BkmvvNzq49l" # And for the final test: given some sound data, does your model return the correct result? # + id="xeXtD5HO28y-" reloaded_results = reloaded_model(testing_wav_data) cat_or_dog = my_classes[tf.argmax(reloaded_results)] print(f'The main sound is: {cat_or_dog}') # + [markdown] id="ZRrOcBYTUgwn" # If you want to try your new model on a serving setup, you can use the 'serving_default' signature. # + id="ycC8zzDSUG2s" serving_results = reloaded_model.signatures['serving_default'](testing_wav_data) cat_or_dog = my_classes[tf.argmax(serving_results['classifier'])] print(f'The main sound is: {cat_or_dog}') # + [markdown] id="da7blblCHs8c" # ## (Optional) Some more testing # # The model is ready. # # Let's compare it to YAMNet on the test dataset. # + id="vDf5MASIIN1z" test_pd = filtered_pd.loc[filtered_pd['fold'] == 5] row = test_pd.sample(1) filename = row['filename'].item() print(filename) waveform = load_wav_16k_mono(filename) print(f'Waveform values: {waveform}') _ = plt.plot(waveform) display.Audio(waveform, rate=16000) # + id="eYUzFxYJIcE1" # Run the model, check the output. scores, embeddings, spectrogram = yamnet_model(waveform) class_scores = tf.reduce_mean(scores, axis=0) top_class = tf.argmax(class_scores) infered_class = class_names[top_class] top_score = class_scores[top_class] print(f'[YAMNet] The main sound is: {infered_class} ({top_score})') reloaded_results = reloaded_model(waveform) your_top_class = tf.argmax(reloaded_results) your_infered_class = my_classes[your_top_class] class_probabilities = tf.nn.softmax(reloaded_results, axis=-1) your_top_score = class_probabilities[your_top_class] print(f'[Your model] The main sound is: {your_infered_class} ({your_top_score})') # + [markdown] id="g8Tsym8Rq-0V" # ## Next steps # # You just created a model that can classify sounds from dogs or cats. With the same idea and proper data you could, for example, build a bird recognizer based on their singing. # # Let us know what you come up with! Share your project with us on social media. #
site/en/tutorials/audio/transfer_learning_audio.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Preamble # + import numpy as np import pandas as pd from pylab import meshgrid from scipy import exp,optimize,log,floor from scipy.signal import argrelextrema from scipy.optimize import fsolve from scipy.integrate import ode backend = 'dopri5' import csv # Timer import time # - # # Parameters values # ### Main pathway dynamics α = 0.3; θ = 0.45 κ = 40.0 # Robustness parameter for the main pathway L = 0.2 # Parameter desribing the treatment action on the main pathway # Reduction and translocation factors to the expression of the main pathway due to the treatment A = lambda σ: 1-σ*(1-θ)*(1-L) Θ = lambda σ: θ+(1-θ)*σ*L # Production function as a step-like function and corresponding potential function # + # Heaviside function # Heaviside = lambda x: 1.0 * (x >= 0) f = lambda y, σ: A(σ)*(α+(1-α)*Heaviside(y-Θ(σ))) U = lambda y, σ: -A(σ)*(α+(1.-α)*Heaviside(y-Θ(σ)))*(y-Θ(σ))+(y**2-Θ(σ)**2)/2.0 # - # Corresponding potential bariers (will be required for the dynamics) Eplus = lambda σ: U(Θ(σ),σ)-U(f(1.,σ),σ) Eminus = lambda σ: U(Θ(σ),σ)-U(f(0.,σ),σ) # Difference in potential bariers ΔE = lambda σ: Eplus(σ)-Eminus(σ) # ### Fitting some parameter values according with <NAME> et al 2013 *eLife* d = 0.13 # death rate per day b = (0.1*(exp(κ*ΔE(1))+1)-0.14*(exp(κ*ΔE(0))+1))/(exp(κ*ΔE(1))-exp(κ*ΔE(0))) χ = 1-(0.14*(exp(κ*ΔE(0))+1)-b*exp(κ*ΔE(0)))/b print("Birth rate: %.4f" % b) print("Penalty χ: %.4f" % χ) # ### Other parameters c = 0.04 # cost of resistance ε = 0.01 # initial fraction of resistant cells # # Periodic treatment # # ** ¡It takes quite a substantial amount of time! ** T = 2.0*12*30 # two years def func(μ,μbar,treatment_periodicity,σ): ode_rhs = lambda t, X, σ: [b*(χ/(exp(κ*ΔE(σ))+1)-c)*X[0]*(1-X[0])+μ*(1-X[0])/(exp(κ*ΔE(σ))+1)-μbar*exp(-κ*Eminus(σ))*X[0],\ b*(1-χ*(1-X[0])/(exp(κ*ΔE(σ))+1)-c*X[0])*X[1]-d*X[1]] n = floor(T/(2*treatment_periodicity)) x = n*treatment_periodicity + (treatment_periodicity if (T/treatment_periodicity-2*n>1) else T-2*n*treatment_periodicity) σx = σ*T/2/x #here σx is the double of one half of the applied treatment intensity solver = ode(ode_rhs).set_integrator(backend).set_initial_value([ε,1.]) tme = 0.0; treatment = True while (tme+treatment_periodicity < T): tme += treatment_periodicity solver.set_f_params(int(treatment)*σx).integrate(tme) treatment = not treatment solver.set_f_params(int(treatment)*σx).integrate(T) return solver.y # + File = open("../figures/draft/sensitivity_periodic_mubar_fixed_smaller_step.csv", 'w') File.write("T,Tbar,period,sigma,Resistance,FoldChange\n") writer = csv.writer(File,lineterminator='\n') step_periods = .1; step_σFixed = .01 Periods, σFixed = np.meshgrid(np.arange(step_periods,30.0+step_periods,step_periods), np.arange(0.7,1.0+step_σFixed,step_σFixed)) periods_n, σFixed_n = Periods.shape mubart = 60.0 for mut in np.arange(120,2,-2): time0=time.time() FoldChangeMin = 1e10 for xk in range(periods_n): for yk in range(σFixed_n): Resistance, FoldChange = func(1./mut,1./mubart,Periods[xk,yk],σFixed[xk,yk]) if FoldChange<FoldChangeMin: FoldChangeMin = FoldChange output = [mut,mubart,Periods[xk,yk],σFixed[xk,yk],Resistance,FoldChange] print("This proccess took %0.1f minutes" % ((time.time()-time0)/60.)) writer.writerow(output) print(output) File.close() # -
scripts/.ipynb_checkpoints/C3-Copy2. Sensitivity for periodic treatment, mu fixed smaller step [Python]-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="jYjbFqJ6Jshs" # #**This is Colaboratory is owned by:** # ###1. <NAME> (J045) # ###2. <NAME> (J074) # ###3. <NAME> (J078) # # ##**Subject: Deep Learning Final Project** # # ##**Topic: Breast Cancer Treatment Response Classification Based Flask App** # + [markdown] id="Jee5A9u4VrjG" # ## **Importing Libraries and Pre-processing** # + colab={"base_uri": "https://localhost:8080/"} id="ZEgFmGRlJ-H9" outputId="635801d2-0d5a-4980-b163-9e96da1feb41" # !pip install pydicom # + id="8vJyYOOjVvNT" import pandas as pd import numpy as np from keras.models import Sequential from keras.layers import Dense from keras.wrappers.scikit_learn import KerasClassifier from keras.utils import np_utils from sklearn import linear_model from sklearn.model_selection import cross_val_score, KFold, GridSearchCV, RandomizedSearchCV from sklearn.preprocessing import LabelEncoder from sklearn.metrics import accuracy_score, mean_squared_error, r2_score from sklearn.pipeline import Pipeline import matplotlib.pyplot as plt import seaborn as sns import time import os import cv2 import skimage.transform as st import matplotlib.pyplot as plt import pydicom from pydicom.data import get_testdata_files # + colab={"base_uri": "https://localhost:8080/"} id="Dd5aSJjYT37-" outputId="92ec8526-f370-4886-c0d7-cd63d4f3c67e" from google.colab import drive drive.mount('/content/drive') # + id="IviKAuXYami2" dataset = pydicom.dcmread('/content/drive/MyDrive/SM 2 Project/TRAIN_NEW/PCR/1-01 (1).dcm') # + id="K05plkG3K-Mr" colab={"base_uri": "https://localhost:8080/"} outputId="1a486c24-4d3e-43d9-9c4a-4e6355779a10" # plot the image using matplotlib plt.imshow(dataset.pixel_array, cmap=plt.cm.bone) plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="6JmEYQ1Vijyh" outputId="333e8683-e1c7-4a26-b422-08fd5af839fb" dataset.pixel_array # + colab={"base_uri": "https://localhost:8080/"} id="-zrPeD7SMXm1" outputId="4e89bc58-01a5-4bcc-8ee7-7197c529b3d4" dataset.pixel_array.shape # + id="AXQEwuqWd6HH" del dataset # + [markdown] id="i-ds7E_KfJsY" # # # --- # # # # --- # # # # --- # # # + [markdown] id="PzEisD4ws4Cq" # ##Loading TRAIN-PCR-DataSet # + id="IrTiGX8HsOgm" folder="/content/drive/My Drive/SM 2 Project/TRAIN_FINAL/PCR" train_pcr_images = [] y_train_pcr=[] for filename in os.listdir(folder): img = np.array(pydicom.dcmread(os.path.join(folder,filename)).pixel_array) if img is not None: img=np.round_(img/4096,decimals = 1) img = np.reshape(img,(512,512)) train_pcr_images.append(img) y_train_pcr.append(0) # + id="N5NogkgJsOgs" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="2659bb5e-6240-4217-af86-4cfd2801e080" #To show some images in a group fig = plt.figure(figsize=(10, 5)) # plot several images for i in range(10):#Showing 10 images ax = fig.add_subplot(2, 5, i + 1, xticks=[], yticks=[]) ax.imshow(train_pcr_images[i].reshape(512, 512), cmap=plt.cm.bone) # + [markdown] id="zDg_7RmetB1Q" # ##Loading TRAIN-NON PCR-DataSet # + id="wD8KfwuUr-b2" folder="/content/drive/My Drive/SM 2 Project/TRAIN_FINAL/NON-PCR" train_nonpcr_images = [] y_train_nonpcr=[] for filename in os.listdir(folder): img = np.array(pydicom.dcmread(os.path.join(folder,filename)).pixel_array) if img is not None: img=np.round_(img/4096,decimals = 1) img=np.reshape(img,(512,512)) train_nonpcr_images.append(img) y_train_nonpcr.append(1) # + id="rIbflvtgr-b-" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="f7cfa665-29a3-4a9d-b555-aefeffd156cd" #To show some images in a group fig = plt.figure(figsize=(10, 5)) # plot several images for i in range(10):#Showing 10 images ax = fig.add_subplot(2, 5, i + 1, xticks=[], yticks=[]) ax.imshow(train_nonpcr_images[i].reshape(512, 512), cmap=plt.cm.bone) # + id="0CkRo6GP0pLV" colab={"base_uri": "https://localhost:8080/"} outputId="9c25c010-67f6-48fb-e4bf-a1d834ff9220" a=np.array(train_pcr_images) print(a.shape) # + id="V3NdGKFeoO2Q" del train_pcr_images # + colab={"base_uri": "https://localhost:8080/"} id="C8pt72Wq5yiu" outputId="1cbbb1f9-a44b-44a8-d6f0-3a4e41922289" b=np.array(train_nonpcr_images) print(b.shape) # + id="Ijkm4gS7o0y1" del train_nonpcr_images # + colab={"base_uri": "https://localhost:8080/"} id="VEynBoNY5z8q" outputId="7f595f1f-7458-4f98-bb48-da7bc89f69ff" X_train=np.vstack((a, b)) print(X_train.shape) # + id="YaZ3yqFg053M" del a del b del img # + [markdown] id="aiN7Wik7tHTW" # ##Loading TEST-PCR-DataSet # + id="Nw9v2KpHrXuy" folder="/content/drive/My Drive/SM 2 Project/TEST_FINAL/pcr" test_pcr_images = [] y_test_pcr=[] for filename in os.listdir(folder): img = np.array(pydicom.dcmread(os.path.join(folder,filename)).pixel_array) if img is not None: img=np.round_(img/4096,decimals = 1) img=np.reshape(img,(512,512)) test_pcr_images.append(img) y_test_pcr.append(0) # + id="V-ZmTlWFrXu4" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="ee99cb6b-7177-4053-8813-415306b41195" #To show some images in a group fig = plt.figure(figsize=(10, 5)) # plot several images for i in range(10):#Showing 10 images ax = fig.add_subplot(2, 5, i + 1, xticks=[], yticks=[]) ax.imshow(test_pcr_images[i].reshape(512, 512), cmap=plt.cm.bone) # + [markdown] id="lGnzgyh8tMR6" # ##Loading TEST-NON PCR-DataSet # + id="zNEpl68TrrbH" folder="/content/drive/My Drive/SM 2 Project/TEST_FINAL/non-pcr" test_nonpcr_images = [] y_test_nonpcr=[] for filename in os.listdir(folder): img = np.array(pydicom.dcmread(os.path.join(folder,filename)).pixel_array) if img is not None: img=np.round_(img/4096,decimals = 1) img=np.reshape(img,(512,512)) test_nonpcr_images.append(img) y_test_nonpcr.append(1) # + id="6mHym074rrbP" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="7d553159-aea1-4566-895b-fe65e026360c" #To show some images in a group fig = plt.figure(figsize=(10, 5)) # plot several images for i in range(10):#Showing 10 images ax = fig.add_subplot(2, 5, i + 1, xticks=[], yticks=[]) ax.imshow(test_nonpcr_images[i].reshape(512, 512), cmap=plt.cm.bone) # + id="UlAKf9Yi1CAC" colab={"base_uri": "https://localhost:8080/"} outputId="45b8009b-6d8b-4c39-8ef1-1d77ee7f8bc9" a=np.array(test_pcr_images) print(a.shape) # + colab={"base_uri": "https://localhost:8080/"} id="6CxfX1hg9QBI" outputId="8a0966cf-b896-46c8-d736-93d5732eb1bd" b=np.array(test_nonpcr_images) print(b.shape) # + id="n08d7gEetU_G" del test_pcr_images del test_nonpcr_images # + colab={"base_uri": "https://localhost:8080/"} id="kBNo0Om_9RgX" outputId="16ee2170-0a48-4da4-c4b8-f25d46bbab17" X_test=np.vstack((a, b)) print(X_test.shape) # + id="7Q9G_I7E1b0J" del a del b del img # + [markdown] id="pMfieuXua98h" # # # --- # # # # --- # # # # --- # # # + id="ACL1zAjY2O3K" colab={"base_uri": "https://localhost:8080/"} outputId="af15d397-1dbb-49b5-c118-c1877609082a" #-----------------------------TRAIN--------------------------------- y_train_pcr_array=np.array(y_train_pcr) print(y_train_pcr_array.shape) y_train_nonpcr_array=np.array(y_train_nonpcr) print(y_train_nonpcr_array.shape) Y_train=np.concatenate((y_train_pcr_array,y_train_nonpcr_array)) print(Y_train.shape) print(Y_train) #--------------------------TEST---------------------------------------- y_test_pcr_array=np.array(y_test_pcr) print(y_test_pcr_array.shape) y_test_nonpcr_array=np.array(y_test_nonpcr) print(y_test_nonpcr_array.shape) Y_test=np.concatenate((y_test_pcr_array,y_test_nonpcr_array)) print(Y_test.shape) print(Y_test) # + id="ytcPGGp85YbC" del y_train_pcr_array del y_train_nonpcr_array del y_test_pcr_array del y_test_nonpcr_array del y_train_pcr del y_train_nonpcr del y_test_pcr del y_test_nonpcr # + id="QLaLSNf7WQIA" from keras import models from keras import layers from keras.utils import to_categorical # + id="TBd1XNWwiWPX" colab={"base_uri": "https://localhost:8080/"} outputId="d4f2e580-7ddd-414e-95cf-f4d3bf26204b" X_train.shape # + id="5ffAYjaXjn5I" Y_train = np.reshape(Y_train, (X_train.shape[0],1)) # + id="pEmRTpT5j0KX" colab={"base_uri": "https://localhost:8080/"} outputId="76a8658a-f98e-473b-c9da-46eb7df46889" Y_train.shape # + id="VIy-wg4qldo1" colab={"base_uri": "https://localhost:8080/"} outputId="837bd7b3-3e94-4170-d020-63c44b83ba52" Y_train # + colab={"base_uri": "https://localhost:8080/"} id="2wSAyvX0p1K-" outputId="db58d36b-86aa-4fe0-eb06-ab8168dc87a2" #pre-processing import sys def sizeof_fmt(num, suffix='B'): for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']: if abs(num) < 1024.0: return "%3.1f %s%s" % (num, unit, suffix) num /= 1024.0 return "%.1f %s%s" % (num, 'Yi', suffix) for name, size in sorted(((name, sys.getsizeof(value)) for name, value in locals().items()), key= lambda x: -x[1])[:10]: print("{:>30}: {:>8}".format(name, sizeof_fmt(size))) # + id="nVfiTEeHlByF" import keras from keras import regularizers from keras import optimizers initializer = keras.initializers.glorot_normal() from keras.callbacks import ModelCheckpoint, EarlyStopping filepath = "best-weights-improvement.h5" checkpoint = ModelCheckpoint(filepath, monitor ='val_accuracy', verbose=1, save_best_only=True, mode='max') es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=20, min_delta=0.01, baseline=5) callbacks_list = [checkpoint, es] # + colab={"base_uri": "https://localhost:8080/"} id="zPo1z_DhuFyR" outputId="5664d72e-adf2-4852-a102-21e4b2bf6ab3" print(X_train.shape) X_train1= np.expand_dims(X_train, 0) del X_train X_train_final = np.moveaxis(X_train1, 0, 3) del X_train1 print(X_train_final.shape) print(Y_train.shape) # + id="yyJ1zr9cule5" Y_test = np.reshape(Y_test, (X_test.shape[0],1)) # + colab={"base_uri": "https://localhost:8080/"} id="HZS3zH1BuhfS" outputId="9bb92aac-63fa-4052-9b9f-15e76b8b7c1e" a= np.expand_dims(X_test, 0) del X_test X_test_final= np.moveaxis(a, 0, 3) del a print(X_test_final.shape) print(Y_test.shape) # + [markdown] id="bDCeb3wJzfSu" # ## **MobileNet** # + colab={"base_uri": "https://localhost:8080/"} id="HjzWXKjHQL0n" outputId="9e4a5e8c-b916-473e-f51b-f54b14434b94" # build the MobileNet network from keras.models import Model from keras.layers import Input from keras import applications from keras import applications from keras.preprocessing.image import ImageDataGenerator from keras import optimizers from keras.models import Sequential from keras.layers import Dropout, Flatten, Dense input_tensor = Input(shape=(512,512,1)) base_model = applications.mobilenet.MobileNet(weights=None,include_top= False,input_tensor=input_tensor) top_model = Sequential() top_model.add(Flatten(input_shape=base_model.output_shape[1:])) top_model.add(Dense(32, activation='relu',kernel_initializer=initializer)) top_model.add(Dropout(0.25)) top_model.add(Dense(1, activation='sigmoid')) model = Model(inputs= base_model.input, outputs= top_model(base_model.output)) # note that it is necessary to start with a fully-trained # classifier, including the top classifier, # in order to successfully do fine-tuning # add the model on top of the convolutional base # compile the model with adam model.compile(loss='binary_crossentropy',optimizer='adam', metrics=['accuracy']) model.summary() history=model.fit(X_train_final,Y_train,epochs=50,callbacks=callbacks_list,validation_data=(X_test_final, Y_test)) # + id="4xFHmhXiReLF" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="156e65af-7400-4fc2-c07e-dd672fbea8a3" #representing accuracy graphically import matplotlib.pyplot as plt plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title("Model accuracy") plt.ylabel("Accuracy") plt.xlabel("Epochs/iterations") plt.legend(['Train','Validation'], loc="upper left") plt.show() # + id="KaC4zQ8MRpyq" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="89a2a5c6-cfe5-482a-86b2-82eccf68eb8a" #representing loss graphically import matplotlib.pyplot as plt plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title("Model LOSS") plt.ylabel("Loss") plt.xlabel("Epochs/iterations") plt.legend(['Train','Validation'], loc="upper left") plt.show() # + id="qJj7-IawZEQd" from tensorflow import keras model_new = keras.models.load_model("best-weights-improvement.h5") # + id="IuiY5pZmWZgB" colab={"base_uri": "https://localhost:8080/"} outputId="92b677b3-8457-4a61-be18-c5deccd889d9" model_new.evaluate(X_test_final, Y_test) # + id="GBhmphXZ-tEp" colab={"base_uri": "https://localhost:8080/"} outputId="44273fdd-31ab-4e27-fcf7-5521fffeabd0" final_predictions=[] predictions=model_new.predict(X_test_final) for x in predictions: if x >0.5: z=1 else: z=0 final_predictions.append(z) pred=np.array(final_predictions) print(pred) # + id="9navmtMQy8Dx" del input_tensor del base_model del top_model del model # + [markdown] id="dWz1zJcyvtZ0" # ## **Xception** # + colab={"base_uri": "https://localhost:8080/"} id="D0FBTVoOzna4" outputId="14723852-26de-4227-ba91-fd2891c9c6f7" # build the Xception network from keras.models import Model from keras.layers import Input from keras import applications from keras import applications from keras.preprocessing.image import ImageDataGenerator from keras import optimizers from keras.models import Sequential from keras.layers import Dropout, Flatten, Dense input_tensor = Input(shape=(512,512,1)) base_model = applications.Xception(weights=None,include_top= False,input_tensor=input_tensor) top_model = Sequential() top_model.add(Flatten(input_shape=base_model.output_shape[1:])) top_model.add(Dense(32, activation='relu',kernel_initializer=initializer)) top_model.add(Dropout(0.25)) top_model.add(Dense(1, activation='sigmoid')) model = Model(inputs= base_model.input, outputs= top_model(base_model.output)) # note that it is necessary to start with a fully-trained # classifier, including the top classifier, # in order to successfully do fine-tuning # add the model on top of the convolutional base #set the first 35 layers (up to the last conv block) to non-trainable (weights will not be updated) for layer in model.layers[:35]: layer.trainable = False # compile the model with adam model.compile(loss='binary_crossentropy',optimizer='adam', metrics=['accuracy']) model.summary() history=model.fit(X_train_final,Y_train,epochs=50,callbacks=callbacks_list,validation_data=(X_test_final, Y_test)) # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="oOwErqqFzna5" outputId="3e72ef1f-0608-4b36-aa07-ef3a78c40de4" #representing accuracy graphically import matplotlib.pyplot as plt plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title("Model accuracy") plt.ylabel("Accuracy") plt.xlabel("Epochs/iterations") plt.legend(['Train','Validation'], loc="upper left") plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="RypNkh5nzna7" outputId="15e7a4f2-5e98-45c0-f577-e7c69a288b41" #representing loss graphically import matplotlib.pyplot as plt plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title("Model LOSS") plt.ylabel("Loss") plt.xlabel("Epochs/iterations") plt.legend(['Train','Validation'], loc="upper left") plt.show() # + id="I1J1vQTZzna7" from tensorflow import keras model_new = keras.models.load_model("best-weights-improvement.h5") # + colab={"base_uri": "https://localhost:8080/"} id="mHmDEajKzna8" outputId="df9d9df3-105b-49ca-f844-e30e0687fa2d" model_new.evaluate(X_test_final, Y_test) # + colab={"base_uri": "https://localhost:8080/"} id="sHnJAZI9zna9" outputId="14423540-1e7b-44ea-c633-e7c8fd64d882" final_predictions=[] predictions=model_new.predict(X_test_final) for x in predictions: if x >0.5: z=1 else: z=0 final_predictions.append(z) pred=np.array(final_predictions) print(pred) # + id="ykCLYjDizna9" del input_tensor del base_model del top_model del model del history # + [markdown] id="tbPljBojuGk1" # Thank you! # + [markdown] id="CAEO-WOyoma7" # # # --- # # --- # # --- # # # # # #
Pre_Trained_Models_DL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="X24v90qcPhq2" colab_type="text" # # Blood Cell Classification # #### <NAME> # Using AI, we are able to train the programs to perform a task without us expliciting programming it; instead, we train the program to train itself by letting the program play games with itself. The program below uses a convoluted neutron network to train itself to classify what type of white blood cell it actually is. The convoluted neutron network imitate the structure of neurons that process images in the brain and use techniques to reduce neuron count, as well as maintaining positional relationships in the data by processing the data through multiple layers. The four different white blood cells that the program is training to classify are Eosinophil, Lymphocyte, Monocyte, and Neutrophil. Eosinophil make up 2 to 4 percent of white blood cells(WBC) which excretes acids to combat parasites; Lymphocyte make up 20 to 30 percent of WBCs which migrates in and out of blood; Monocytes make up 2 to 8 percent of WBCs which enter peripheral tissues to become tissue macrophages which can engulf large particles and pathogens; and Neutrophil make up 50 to 70 percent of WBCs and their cytoplasm is packed with pale granules containing lysosomal enzymes and bacteria-killing compounds. In the future, this program can help hasten the mandatory blood tests. # + id="iHtq_5lrPhq3" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1598673330677, "user_tz": 240, "elapsed": 611, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14769114701246385476"}} # %matplotlib inline # + id="Uha77-GhPhq7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 368} executionInfo={"status": "error", "timestamp": 1598673333459, "user_tz": 240, "elapsed": 3385, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14769114701246385476"}} outputId="9c9e7113-5391-49da-aa99-6b4920420eda" import numpy as np from keras.models import Sequential, load_model from keras.layers import Dense, Dropout, Activation, Flatten, Input from keras.layers import Conv2D, GlobalAveragePooling2D, LeakyReLU from keras.utils import np_utils from keras.optimizers import adam, SGD, rmsprop from keras.applications import MobileNet from string import ascii_uppercase import matplotlib.pyplot as plt from pandas_ml import ConfusionMatrix from sklearn.model_selection import train_test_split from sklearn.utils import shuffle from PIL import Image from glob import glob import cv2 # + [markdown] id="9UmS3MpcPhq-" colab_type="text" # # Data Preparation # # * Uploading and formatting images for training and testing the convoluted neutron network* # # + id="lUfauRpIPhq_" colab_type="code" colab={} executionInfo={"status": "aborted", "timestamp": 1598673333452, "user_tz": 240, "elapsed": 3376, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14769114701246385476"}} # TODO classes = [] for x in glob("data/train/*"): classes.append(x[11:]) num_classes = len(classes) print(classes) # + id="uK-Npmi7PhrB" colab_type="code" colab={} executionInfo={"status": "aborted", "timestamp": 1598673333453, "user_tz": 240, "elapsed": 3376, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14769114701246385476"}} read_img = lambda path: cv2.resize(cv2.imread(path), (224, 224)) #TODO load in dataset for training x_train = [] y_train = [] label = 0 for folder in glob("data/train/*"): for img in glob(folder + "/*"): x_train.append(read_img(img)) y_train.append(label) label += 1 x_train = np.asarray(x_train) y_train = np.asarray(y_train) x_test = [] y_test = [] label = 0 for folder in glob("data/test/*"): for img in glob(folder + "/*"): x_test.append(read_img(img)) y_test.append(label) label += 1 x_test = np.asarray(x_test) y_test = np.asarray(y_test) x_train, y_train = shuffle(x_train, y_train) # Converts labels for train and test set to one hot encodings y_train = np_utils.to_categorical(y_train, num_classes) y_test = np_utils.to_categorical(y_test, num_classes) x_train.shape, y_train.shape, x_test.shape, y_test.shape # + [markdown] id="jKgtjnYLPhrE" colab_type="text" # # A collection of image of White Blood Cells # + id="Wgh-SUg5PhrE" colab_type="code" colab={} executionInfo={"status": "aborted", "timestamp": 1598673333453, "user_tz": 240, "elapsed": 3375, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14769114701246385476"}} w=x_train.shape[1] h=x_train.shape[2] fig=plt.figure(figsize=(8, 8)) columns = 4 rows = 5 for i in range(1, columns*rows +1): img = np.random.randint(10, size=(h,w)) fig.add_subplot(rows, columns, i) plt.imshow(x_train[i]) # + id="j0diWrKzPhrG" colab_type="code" colab={} executionInfo={"status": "aborted", "timestamp": 1598673333454, "user_tz": 240, "elapsed": 3375, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14769114701246385476"}} #TODO model_name = "Blood CNN.h5" load_checkpoint = False # + id="FqNWoOIqPhrI" colab_type="code" colab={} executionInfo={"status": "aborted", "timestamp": 1598673333454, "user_tz": 240, "elapsed": 3374, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14769114701246385476"}} #Load existing model if load_checkpoint: model = load_model(model_name) #Create new model else: model_base = MobileNet(include_top=False,input_shape=x_train.shape[1:]) model = Sequential() model.add(model_base) model.add(GlobalAveragePooling2D()) model.add(Dropout(0,5)) model.add(Dense(num_classes,activation='softmax')) model.summary() # + id="duJVlu9APhrM" colab_type="code" colab={} executionInfo={"status": "aborted", "timestamp": 1598673333455, "user_tz": 240, "elapsed": 3374, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14769114701246385476"}} #TODO opt = SGD(lr=0.01) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) # + [markdown] id="oCel4p4xPhrO" colab_type="text" # # Training the Program # + id="xT1bVBVAPhrP" colab_type="code" colab={} executionInfo={"status": "aborted", "timestamp": 1598673333455, "user_tz": 240, "elapsed": 3373, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14769114701246385476"}} while True: model.fit(x_train, y_train, batch_size= 16, epochs=1, verbose=1) model.save(model_name) # + [markdown] id="nNqWY5XcPhrR" colab_type="text" # # Evaluation and Testing # + [markdown] id="fYEoFAicPhrS" colab_type="text" # # Loss vs Accuracy # + id="vsN8kGF5PhrS" colab_type="code" colab={} executionInfo={"status": "aborted", "timestamp": 1598673333456, "user_tz": 240, "elapsed": 3373, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14769114701246385476"}} score = model.evaluate(x_test, y_test, verbose=0) "Loss: %s, Accuracy: %s" % (score[0], score[1]) # + [markdown] id="EUDa6id0PhrV" colab_type="text" # # Proof of Concept # + id="5QBpychPPhrW" colab_type="code" colab={} executionInfo={"status": "aborted", "timestamp": 1598673333456, "user_tz": 240, "elapsed": 3372, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14769114701246385476"}} i = 5 plt.imshow(x_test[i]) prediction = model.predict(np.expand_dims(x_test[i], axis=0)) "Expected: %s, Predicted: %s" % (classes[y_test[i].argmax()], classes[prediction.argmax()]) # + [markdown] id="Bk3ZVkBlPhrZ" colab_type="text" # # The Confusion Matrix # * Where is the program making the most mistakes in its classification* # + id="Z08TnbN_PhrZ" colab_type="code" colab={} executionInfo={"status": "aborted", "timestamp": 1598673333457, "user_tz": 240, "elapsed": 3372, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14769114701246385476"}} ConfusionMatrix([classes[one_hot.argmax()] for one_hot in y_test], [classes[pred.argmax()] for pred in model.predict(x_test)]).plot() # + id="TxjtiDOxPhrc" colab_type="code" colab={} executionInfo={"status": "aborted", "timestamp": 1598673333457, "user_tz": 240, "elapsed": 3370, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14769114701246385476"}} # + id="IZMnOl7sPhre" colab_type="code" colab={} executionInfo={"status": "aborted", "timestamp": 1598673333458, "user_tz": 240, "elapsed": 3370, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14769114701246385476"}} # + id="8bc8a8ujPhrg" colab_type="code" colab={} executionInfo={"status": "aborted", "timestamp": 1598673333458, "user_tz": 240, "elapsed": 3369, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14769114701246385476"}}
Neural Network Classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Demonstartion of the PyGromosTools implementation of the Gromos++ programs # # # ## Ran_Box # The program ran_box can generate liquid conformations from a single molecule topology and coordinate file. In the following notebook we show an example for the ussage of this program. import os from pygromos.gromos.pyGromosPP.ran_box import ran_box from pygromos.files.coord.cnf import Cnf # + pycharm={"name": "#%%\n"} # define the paths to the single molecule topology and the single molecule conformation file root_dir = os.getcwd()+"/example_files/tool_examples" in_cnf_path = root_dir +"/ran_box.cnf" in_top_path = root_dir +"/ran_box.top" # - #define some properties for the new liquid conformation nmols=150 #42 #The number of molecules in the liquid box dens=1000 #The density of the liquid # + pycharm={"name": "#%%\n"} #execute ran_box out_cnf = ran_box(in_top_path= in_top_path, in_cnf_path= in_cnf_path, out_cnf_path= root_dir+"/out_ran_"+str(nmols)+"mol.cnf", nmolecule = nmols, dens = dens) # - #write the conformation to a pdb file cnfF = Cnf(out_cnf) cnfF.write_pdb(out_cnf.replace(".cnf", ".pdb")) # + pycharm={"name": "#%%\n"} # printing the new liquid conformation. The pdb can visualized with software packages like pymol. cnfF # - cnfF.visualize()
examples/example_PyGromosPP.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import numpy as np import pandas as pd import pickle # ## Load Processed Vectorized Data data = pickle.load(open('data_face_emotions.pickle', mode='rb')) data.keys() # ## Splitting Data X = np.array(data['data']) y = np.array(data['label']) X.shape,y.shape X = X.reshape(-1,128) X.shape from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X,y,train_size=0.8, random_state=0) X_train.shape,X_test.shape, y_train.shape, y_test.shape # ## Training Models from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import VotingClassifier from sklearn.metrics import classification_report, accuracy_score, f1_score # ### Logistic Regression model_log = LogisticRegression() model_log.fit(X_train,y_train) # + y_pred_train = model_log.predict(X_train) y_pred_test = model_log.predict(X_test) #accuracy score acc_train = accuracy_score(y_train,y_pred_train) acc_test = accuracy_score(y_test,y_pred_test) # f1-score f1_score_train = f1_score(y_train,y_pred_train,average='macro') f1_score_test= f1_score(y_test,y_pred_test,average='macro') # - # ### Logistic Regression Evaluation Metrics print(f'accuracy score for training set = {acc_train}') print(f'accuracy score for test set = {acc_test}') print(f'f1 score for training set = {f1_score_train}') print(f'f1 score score for test set = {f1_score_test}') #function to evaluate different trained models def evaluation(model,x_train,y_train,x_test,y_test): y_pred_train = model.predict(X_train) y_pred_test = model.predict(X_test) #accuracy score acc_train = accuracy_score(y_train,y_pred_train) acc_test = accuracy_score(y_test,y_pred_test) # f1-score f1_score_train = f1_score(y_train,y_pred_train,average='macro') f1_score_test= f1_score(y_test,y_pred_test,average='macro') print(f'accuracy score for training set = {acc_train}') print(f'accuracy score for test set = {acc_test}') print(f'f1 score for training set = {f1_score_train}') print(f'f1 score score for test set = {f1_score_test}') # ### SVM model_svc = SVC(probability=True) model_svc.fit(X_train,y_train) evaluation(model_svc,X_train,y_train,X_test,y_test) #svc performs better than Log # ### Random Forest model_rf = RandomForestClassifier() model_rf.fit(X_train,y_train) evaluation(model_rf,X_train,y_train,X_test,y_test) #overfit # ### Voting Classifier model_voting = VotingClassifier(estimators=[ ('log',LogisticRegression()), ('svm',SVC(probability=True)), ('rf',RandomForestClassifier()) ], voting='soft', weights=[2,3,1]) model_voting.fit(X_train,y_train) evaluation(model_voting,X_train,y_train,X_test,y_test) from sklearn.model_selection import GridSearchCV model_grid = GridSearchCV(model_voting, param_grid={ 'svm__C':[3,5,7,10], 'svm__gamma':[0.1,0.3,0.5], 'rf__n_estimators':[5,10,20], 'rf__max_depth':[3,5,7], 'voting':['soft','hard'] },scoring='accuracy',cv=3,n_jobs=1,verbose=2) model_grid.fit(X_train,y_train) model_grid.best_params_ # + #model_grid.best_estimator_ # - model_grid.best_score_ model_best_estimator = model_grid.best_estimator_ # ### Save Model pickle.dump(model_best_estimator,open('models/machinelearning_face_emotion2.pkl',mode='wb'))
notebooks/Facial_Emotion_Model.ipynb
# + # Зависимости import pandas as pd # import numpy as np # import matplotlib.pyplot as plt import random from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler from sklearn.compose import ColumnTransformer from sklearn.svm import SVR, SVC from sklearn.metrics import mean_squared_error, f1_score # - # Генерируем уникальный seed my_code = "Рахматуллаев и Тимуров" seed_limit = 2 ** 32 my_seed = int.from_bytes(my_code.encode(), "little") % seed_limit # Читаем данные из файла example_data = pd.read_csv("datasets/Fish.csv") example_data.head() # Определим размер валидационной и тестовой выборок val_test_size = round(0.2*len(example_data)) print(val_test_size) # Создадим обучающую, валидационную и тестовую выборки random_state = my_seed train_val, test = train_test_split(example_data, test_size=val_test_size, random_state=random_state) train, val = train_test_split(train_val, test_size=val_test_size, random_state=random_state) print(len(train), len(val), len(test)) # + # Значения в числовых столбцах преобразуем к отрезку [0,1]. # Для настройки скалировщика используем только обучающую выборку. num_columns = ['Weight', 'Length1', 'Length2', 'Length3', 'Height', 'Width'] ct = ColumnTransformer(transformers=[('numerical', MinMaxScaler(), num_columns)], remainder='passthrough') ct.fit(train) # - # Преобразуем значения, тип данных приводим к DataFrame sc_train = pd.DataFrame(ct.transform(train)) sc_test = pd.DataFrame(ct.transform(test)) sc_val = pd.DataFrame(ct.transform(val)) # Устанавливаем названия столбцов column_names = num_columns + ['Species'] sc_train.columns = column_names sc_test.columns = column_names sc_val.columns = column_names sc_train # + # Задание №1 - анализ метода опорных векторов в задаче регрессии # https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVR.html#sklearn.svm.SVR # kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'}, default='rbf' # Только для kernel = 'poly' : degreeint, default=3 # + # Выбираем 4 числовых переменных, три их них будут предикторами, одна - зависимой переменной n = 4 labels = random.sample(num_columns, n) y_label = labels[0] x_labels = labels[1:] print(x_labels) print(y_label) # + # Отберем необходимые параметры x_train = sc_train[x_labels] x_test = sc_test[x_labels] x_val = sc_val[x_labels] y_train = sc_train[y_label] y_test = sc_test[y_label] y_val = sc_val[y_label] # - x_train # Создайте 4 модели с различными ядрами: 'linear', 'poly', 'rbf', 'sigmoid'. # Решите получившуюся задачу регрессии с помощью созданных моделей и сравните их эффективность. # При необходимости применяйте параметр регуляризации C : float, default=1.0 # Укажите, какая модель решает задачу лучше других. r_model_1 = SVR(kernel='linear', C=0.8) r_model_2 = SVR(kernel='poly', degree=3, C=1.0) r_model_3 = SVR(kernel='rbf', C=1.0) r_model_4 = SVR(kernel='sigmoid', C=0.6) r_models = [] r_models.append(r_model_1) r_models.append(r_model_2) r_models.append(r_model_3) r_models.append(r_model_4) # Обучаем модели for model in r_models: model.fit(x_train, y_train) # Оценииваем качество работы моделей на валидационной выборке mses = [] for model in r_models: val_pred = model.predict(x_val) mse = mean_squared_error(y_val, val_pred) mses.append(mse) print(mse) # Выбираем лучшую модель i_min = mses.index(min(mses)) best_r_model = r_models[i_min] best_r_model.get_params() # Вычислим ошибку лучшей модели на тестовой выборке. test_pred = best_r_model.predict(x_test) mse = mean_squared_error(y_test, test_pred) print(mse) # + # Задание №2 - анализ метода опорных векторов в задаче классификации # https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html#sklearn.svm.SVC # kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'}, default='rbf' # Только для kernel = 'poly' : degreeint, default=3 # + # Выбираем 2 числовых переменных, которые будут параметрами элементов набора данных # Метка класса всегда 'Species' n = 2 x_labels = random.sample(num_columns, n) y_label = 'Species' print(x_labels) print(y_label) # + # Отберем необходимые параметры x_train = sc_train[x_labels] x_test = sc_test[x_labels] x_val = sc_val[x_labels] y_train = sc_train[y_label] y_test = sc_test[y_label] y_val = sc_val[y_label] # - x_train # Создайте 4 модели с различными ядрами: 'linear', 'poly', 'rbf', 'sigmoid'. # Решите получившуюся задачу регрессии с помощью созданных моделей и сравните их эффективность. # При необходимости применяйте параметр регуляризации C : float, default=1.0 # Укажите, какая модель решает задачу лучше других. c_model = SVC() c_model_1 = SVC(kernel='linear', C=0.8) c_model_2 = SVC(kernel='poly', degree=3, C=1.0) c_model_3 = SVC(kernel='rbf', C=1.0) c_model_4 = SVC(kernel='sigmoid', C=0.6) c_models = [] c_models.append(c_model_1) c_models.append(c_model_2) c_models.append(c_model_3) c_models.append(c_model_4) # Обучаем модели for model in c_models: model.fit(x_train, y_train) # Оценииваем качество работы моделей на валидационной выборке. f1s = [] for model in c_models: val_pred = model.predict(x_val) f1 = f1_score(y_val, val_pred, average='weighted') f1s.append(f1) print(f1) # Выбираем лучшую модель i_min = f1s.index(min(f1s)) best_c_model = c_models[i_min] best_c_model.get_params() # Вычислим ошибку лучшей модели на тестовой выборке. test_pred = best_c_model.predict(x_test) f1 = f1_score(y_test, test_pred, average='weighted') print(f1)
2021 Весенний семестр/Практическое задание 2/ПЗ-2_Рахматуллаев.Ж.Ж_Тимуров.У.Т._ИСТ-18-2/ПЗ-2_Рахматуллаев.Ж.Ж_Тимуров.У.Т._ИСТ-18-2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- load('../sim_data/block_compLasso.RData') # + dir = '../sim_data' dim.list = list() size = c(50, 100, 500, 1000) idx = 0 for (P in size){ for (N in size){ idx = idx + 1 dim.list[[idx]] = c(P=P, N=N) } } rou.list = seq(0.1, 0.9, 0.2) files = NULL for (rou in rou.list){ for (dim in dim.list){ p = dim[1] n = dim[2] files = cbind(files, paste0(dir, '/sim_block_corr', rou, paste('P', p, 'N', n, sep='_'), '.RData', sep='')) } } # - avg_FDR = NULL for (i in 1:length(files)){ sim_file = files[i] load(sim_file, dat <- new.env()) sub = dat$sim_array[[i]] p = sub$p # take true values from 1st replicate of each simulated data coef = sub$beta coef.true = which(coef != 0) tt = results_block_compLasso[[i]]$Stab.table FDR = NULL for (r in 1:nrow(tt)){ FDR = c(FDR, length(setdiff(which(tt[r, ] !=0), coef.true))/sum(tt[r, ])) } avg_FDR = c(avg_FDR, mean(FDR, na.rm=T)) } # + table_block = NULL tmp_num_select = rep(0, length(results_block_compLasso)) for (i in 1:length(results_block_compLasso)){ table_block = rbind(table_block, results_block_compLasso[[i]][c('n', 'p', 'rou', 'FP', 'FN', 'MSE', 'Stab')]) tmp_num_select[i] = mean(rowSums(results_block_compLasso[[i]]$Stab.table)) } table_block = as.data.frame(table_block) table_block$num_select = tmp_num_select table_block$FDR = round(avg_FDR,2) # - head(table_block) # + # export result result.table_block <- apply(table_block,2,as.character) rownames(result.table_block) = rownames(table_block) result.table_block = as.data.frame(result.table_block) # extract numbers only for 'n' & 'p' result.table_block$n = tidyr::extract_numeric(result.table_block$n) result.table_block$p = tidyr::extract_numeric(result.table_block$p) result.table_block$ratio = result.table_block$p / result.table_block$n result.table_block = result.table_block[c('n', 'p', 'rou', 'ratio', 'Stab', 'MSE', 'FP', 'FN', 'num_select', 'FDR')] colnames(result.table_block)[1:4] = c('N', 'P', 'Corr', 'Ratio') # - # convert interested measurements to be numeric result.table_block$Stab = as.numeric(as.character(result.table_block$Stab)) result.table_block$MSE_mean = as.numeric(substr(result.table_block$MSE, start=1, stop=4)) result.table_block$FP_mean = as.numeric(substr(result.table_block$FP, start=1, stop=4)) result.table_block$FN_mean = as.numeric(substr(result.table_block$FN, start=1, stop=4)) result.table_block$FN_mean[is.na(result.table_block$FN_mean)] = 0 result.table_block$num_select = as.numeric(as.character(result.table_block$num_select)) # check whether missing values exists result.table_block[rowSums(is.na(result.table_block)) > 0,] # recover values result.table_block$FP_mean[is.na(result.table_block$FP_mean)] = 6 result.table_block$MSE_mean[is.na(result.table_block$MSE_mean)] = 1 result.table_block[c(21,26,51), ] head(result.table_block) tail(result.table_block) ## export write.table(result.table_block, '../results_summary/sim_block_compLasso.txt', sep='\t', row.names=F) # + library(ggplot2) library(ggpubr) result.table_block$N = as.factor(result.table_block$N) fig_block_stab = ggplot(result.table_block, aes(x=P, y=Stab, color=N)) + geom_point(aes(size = Corr, alpha=Corr)) + theme(legend.position = "none") + scale_size_discrete(range = c(1,4)) + scale_alpha_discrete(range = c(1, 0.4)) + ylab('Stability') fig_block_mse = ggplot(result.table_block, aes(x=P, y=MSE_mean, color=N)) + geom_point(aes(size = Corr, alpha=Corr)) + theme(legend.position="none") + scale_size_discrete(range = c(1,4)) + scale_alpha_discrete(range = c(1, 0.4)) + ylab('MSE') fig_block_fp = ggplot(result.table_block, aes(x=P, y=FP_mean, color=N)) + geom_point(aes(size = Corr, alpha=Corr)) + theme(legend.position = "none") + scale_size_discrete(range = c(1,4)) + scale_alpha_discrete(range = c(1, 0.4)) + ylab('False Positives') fig_block_fn = ggplot(result.table_block, aes(x=P, y=FN_mean, color=N)) + geom_point(aes(size = Corr, alpha=Corr)) + theme(legend.position = "none") + scale_size_discrete(range = c(1,4)) + scale_alpha_discrete(range = c(1, 0.4)) + ylab('False Negatives') fig = ggarrange(fig_block_stab, fig_block_mse, fig_block_fp, fig_block_fn, ncol=2, nrow=2, common.legend = TRUE, legend="right") fig = annotate_figure(fig, top = text_grob("Block_compLasso")) ggexport(fig, filename = "../figures_sim/figure_block_compLasso.pdf", height=6, width=6) # - result.table_block[with(result.table_block, order(N, P, Corr)),]
simulations/notebooks_simulations/sim_block_compLasso.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Riskfolio-Lib Tutorial: # <br>__[Financionerioncios](https://financioneroncios.wordpress.com)__ # <br>__[Orenji](https://www.orenj-i.net)__ # <br>__[Riskfolio-Lib](https://riskfolio-lib.readthedocs.io/en/latest/)__ # <br>__[<NAME>](https://www.linkedin.com/in/dany-cajas/)__ # <a href='https://ko-fi.com/B0B833SXD' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://cdn.ko-fi.com/cdn/kofi1.png?v=2' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a> # # ## Tutorial 5: Multi Assets Algorithmic Trading Backtesting with Backtrader # # ## 1. Downloading the data: # + import pandas as pd import datetime import yfinance as yf import backtrader as bt import numpy as np import warnings warnings.filterwarnings("ignore") # Date range start = '2010-01-01' end = '2020-12-31' # Tickers of assets assets = ['JCI', 'TGT', 'CMCSA', 'CPB', 'MO', 'APA', 'MMC', 'JPM', 'ZION', 'PSA', 'BAX', 'BMY', 'LUV', 'PCAR', 'TXT', 'TMO', 'DE', 'MSFT', 'HPQ', 'SEE', 'VZ', 'CNP', 'NI', 'T', 'BA','SPY'] assets.sort() # Downloading data prices = yf.download(assets, start=start, end=end) display(prices.head()) prices = prices.dropna() # + ############################################################ # Showing data ############################################################ display(prices.head()) # - # ## 2. Building the Backtest Function with Backtrader # # ### 2.1 Defining Backtest Function # + ############################################################ # Defining the backtest function ############################################################ def backtest(datas, strategy, start, end, plot=False, **kwargs): cerebro = bt.Cerebro() # Here we add transaction costs and other broker costs cerebro.broker.setcash(1000000.0) cerebro.broker.setcommission(commission=0.005) # Commission 0.5% cerebro.broker.set_slippage_perc(0.005, # Slippage 0.5% slip_open=True, slip_limit=True, slip_match=True, slip_out=False) for data in datas: cerebro.adddata(data) # Here we add the indicators that we are going to store cerebro.addanalyzer(bt.analyzers.SharpeRatio, riskfreerate=0.0) cerebro.addanalyzer(bt.analyzers.Returns) cerebro.addanalyzer(bt.analyzers.DrawDown) cerebro.addstrategy(strategy, **kwargs) cerebro.addobserver(bt.observers.Value) cerebro.addobserver(bt.observers.DrawDown) results = cerebro.run(stdstats=False) if plot: cerebro.plot(iplot=False, start=start, end=end) return (results[0].analyzers.drawdown.get_analysis()['max']['drawdown'], results[0].analyzers.returns.get_analysis()['rnorm100'], results[0].analyzers.sharperatio.get_analysis()['sharperatio']) # - # ### 2.2 Building Data Feeds for Backtesting # + ############################################################ # Create objects that contain the prices of assets ############################################################ # Creating Assets bt.feeds assets_prices = [] for i in assets: if i != 'SPY': prices_ = prices.drop(columns='Adj Close').loc[:, (slice(None), i)].dropna() prices_.columns = ['Close', 'High', 'Low', 'Open', 'Volume'] assets_prices.append(bt.feeds.PandasData(dataname=prices_, plot=False)) # Creating Benchmark bt.feeds prices_ = prices.drop(columns='Adj Close').loc[:, (slice(None), 'SPY')].dropna() prices_.columns = ['Close', 'High', 'Low', 'Open', 'Volume'] benchmark = bt.feeds.PandasData(dataname=prices_, plot=False) display(prices_.head()) # - # ## 3. Building Strategies with Backtrader # # ### 3.1 Buy and Hold SPY # + ############################################################ # Building the Buy and Hold strategy ############################################################ class BuyAndHold(bt.Strategy): def __init__(self): self.counter = 0 def next(self): if self.counter >= 1004: if self.getposition(self.data).size == 0: self.order_target_percent(self.data, target=0.99) self.counter += 1 # - # If you have an error related to 'warnings' modules when you try to plot, you must modify the 'locator.py' file from backtrader library following the instructions in this __[link](https://community.backtrader.com/topic/981/importerror-cannot-import-name-min_per_hour-when-trying-to-plot/8)__. # + ############################################################ # Run the backtest for the selected period ############################################################ # %matplotlib inline import matplotlib.pyplot as plt plt.rcParams["figure.figsize"] = (10, 6) # (w, h) plt.plot() # We need to do this to avoid errors in inline plot start = 1004 end = prices.shape[0] - 1 dd, cagr, sharpe = backtest([benchmark], BuyAndHold, start=start, end=end, plot=True) # + ############################################################ # Show Buy and Hold Strategy Stats ############################################################ print(f"Max Drawdown: {dd:.2f}%") print(f"CAGR: {cagr:.2f}%") print(f"Sharpe: {sharpe:.3f}") # - # ### 3.2 Rebalancing Quarterly using Riskfolio-Lib # + ############################################################ # Calculate assets returns ############################################################ pd.options.display.float_format = '{:.4%}'.format data = prices.loc[:, ('Adj Close', slice(None))] data.columns = assets data = data.drop(columns=['SPY']).dropna() returns = data.pct_change().dropna() display(returns.head()) # + ############################################################ # Selecting Dates for Rebalancing ############################################################ # Selecting last day of month of available data index = returns.groupby([returns.index.year, returns.index.month]).tail(1).index index_2 = returns.index # Quarterly Dates index = [x for x in index if float(x.month) % 3.0 == 0 ] # Dates where the strategy will be backtested index_ = [index_2.get_loc(x) for x in index if index_2.get_loc(x) > 1000] # + ############################################################ # Building Constraints ############################################################ asset_classes = {'Assets': ['JCI','TGT','CMCSA','CPB','MO','APA','MMC','JPM', 'ZION','PSA','BAX','BMY','LUV','PCAR','TXT','TMO', 'DE','MSFT','HPQ','SEE','VZ','CNP','NI','T','BA'], 'Industry': ['Consumer Discretionary','Consumer Discretionary', 'Consumer Discretionary', 'Consumer Staples', 'Consumer Staples','Energy','Financials', 'Financials','Financials','Financials', 'Health Care','Health Care','Industrials','Industrials', 'Industrials','Health care','Industrials', 'Information Technology','Information Technology', 'Materials','Telecommunications Services','Utilities', 'Utilities','Telecommunications Services','Financials']} asset_classes = pd.DataFrame(asset_classes) asset_classes = asset_classes.sort_values(by=['Assets']) constraints = {'Disabled': [False, False, False], 'Type': ['All Assets', 'All Classes', 'All Classes'], 'Set': ['', 'Industry', 'Industry'], 'Position': ['', '', ''], 'Sign': ['<=', '<=', '>='], 'Weight': [0.10, 0.20, 0.03], 'Type Relative': ['', '', ''], 'Relative Set': ['', '', ''], 'Relative': ['', '', ''], 'Factor': ['', '', '']} constraints = pd.DataFrame(constraints) display(constraints) # + ############################################################ # Building constraint matrixes for Riskfolio Lib ############################################################ import riskfolio.ConstraintsFunctions as cf A, B = cf.assets_constraints(constraints, asset_classes) # + # %%time ############################################################ # Building a loop that estimate optimal portfolios on # rebalancing dates ############################################################ import riskfolio.Portfolio as pf models = {} # rms = ['MV', 'MAD', 'MSV', 'FLPM', 'SLPM', # 'CVaR', 'WR', 'MDD', 'ADD', 'CDaR'] rms = ['MV', 'CVaR', 'WR', 'CDaR'] for j in rms: weights = pd.DataFrame([]) for i in index_: Y = returns.iloc[i-1000:i,:] # taking last 4 years (250 trading days per year) # Building the portfolio object port = pf.Portfolio(returns=Y) # Add portfolio constraints port.ainequality = A port.binequality = B # Calculating optimum portfolio # Select method and estimate input parameters: method_mu='hist' # Method to estimate expected returns based on historical data. method_cov='hist' # Method to estimate covariance matrix based on historical data. port.assets_stats(method_mu=method_mu, method_cov=method_cov, d=0.94) # Estimate optimal portfolio: port.solvers = ['MOSEK'] port.alpha = 0.05 model='Classic' # Could be Classic (historical), BL (Black Litterman) or FM (Factor Model) rm = j # Risk measure used, this time will be variance obj = 'Sharpe' # Objective function, could be MinRisk, MaxRet, Utility or Sharpe hist = True # Use historical scenarios for risk measures that depend on scenarios rf = 0 # Risk free rate l = 0 # Risk aversion factor, only useful when obj is 'Utility' w = port.optimization(model=model, rm=rm, obj=obj, rf=rf, l=l, hist=hist) if w is None: w = weights.tail(1).T weights = pd.concat([weights, w.T], axis = 0) models[j] = weights.copy() models[j].index = index_ # + ############################################################ # Building the Asset Allocation Class ############################################################ class AssetAllocation(bt.Strategy): def __init__(self): j = 0 for i in assets: setattr(self, i, self.datas[j]) j += 1 self.counter = 0 def next(self): if self.counter in weights.index.tolist(): for i in assets: w = weights.loc[self.counter, i] self.order_target_percent(getattr(self, i), target=w) self.counter += 1 # + ############################################################ # Backtesting Mean Variance Strategy ############################################################ assets = returns.columns.tolist() weights = models['MV'] dd, cagr, sharpe = backtest(assets_prices, AssetAllocation, start=start, end=end, plot=True) # + ############################################################ # Show Mean Variance Strategy Stats ############################################################ print(f"Max Drawdown: {dd:.2f}%") print(f"CAGR: {cagr:.2f}%") print(f"Sharpe: {sharpe:.3f}") # + ############################################################ # Plotting the composition of the last MV portfolio ############################################################ import riskfolio.PlotFunctions as plf w = pd.DataFrame(models['MV'].iloc[-1,:]) ax = plf.plot_pie(w=w, title='Sharpe Mean Variance', others=0.05, nrow=25, cmap = "tab20", height=6, width=10, ax=None) # + ############################################################ # Composition per Industry ############################################################ w_classes = pd.concat([asset_classes.set_index('Assets'), w], axis=1) w_classes = w_classes.groupby(['Industry']).sum() w_classes.columns = ['weights'] display(w_classes) # + ############################################################ # Backtesting Mean CVaR Strategy ############################################################ assets = returns.columns.tolist() weights = models['CVaR'] dd, cagr, sharpe = backtest(assets_prices, AssetAllocation, start=start, end=end, plot=True) # + ############################################################ # Show CVaR Strategy Stats ############################################################ print(f"Max Drawdown: {dd:.2f}%") print(f"CAGR: {cagr:.2f}%") print(f"Sharpe: {sharpe:.3f}") # + ############################################################ # Plotting the composition of the last CVaR portfolio ############################################################ w = pd.DataFrame(models['CVaR'].iloc[-1,:]) ax = plf.plot_pie(w=w, title='Sharpe Mean CVaR', others=0.05, nrow=25, cmap = "tab20", height=6, width=10, ax=None) # + ############################################################ # Composition per Industry ############################################################ w_classes = pd.concat([asset_classes.set_index('Assets'), w], axis=1) w_classes = w_classes.groupby(['Industry']).sum() w_classes.columns = ['weights'] display(w_classes) # + ############################################################ # Backtesting Mean Worst Realization Strategy ############################################################ assets = returns.columns.tolist() weights = models['WR'] dd, cagr, sharpe = backtest(assets_prices, AssetAllocation, start=start, end=end, plot=True) # + ############################################################ # Show Worst Realization Strategy Stats ############################################################ print(f"Max Drawdown: {dd:.2f}%") print(f"CAGR: {cagr:.2f}%") print(f"Sharpe: {sharpe:.3f}") # + ############################################################ # Plotting the composition of the last WR portfolio ############################################################ w = pd.DataFrame(models['WR'].iloc[-1,:]) ax = plf.plot_pie(w=w, title='Sharpe Mean WR', others=0.05, nrow=25, cmap = "tab20", height=6, width=10, ax=None) # + ############################################################ # Composition per Industry ############################################################ w_classes = pd.concat([asset_classes.set_index('Assets'), w], axis=1) w_classes = w_classes.groupby(['Industry']).sum() w_classes.columns = ['weights'] display(w_classes) # + ############################################################ # Backtesting Mean CDaR Strategy ############################################################ assets = returns.columns.tolist() weights = models['CDaR'] dd, cagr, sharpe = backtest(assets_prices, AssetAllocation, start=start, end=end, plot=True) # + ############################################################ # Show CDaR Strategy Stats ############################################################ print(f"Max Drawdown: {dd:.2f}%") print(f"CAGR: {cagr:.2f}%") print(f"Sharpe: {sharpe:.3f}") # + ############################################################ # Plotting the composition of the last CDaR portfolio ############################################################ w = pd.DataFrame(models['CDaR'].iloc[-1,:]) ax = plf.plot_pie(w=w, title='Sharpe Mean CDaR', others=0.05, nrow=25, cmap = "tab20", height=6, width=10, ax=None) # + ############################################################ # Composition per Industry ############################################################ w_classes = pd.concat([asset_classes.set_index('Assets'), w], axis=1) w_classes = w_classes.groupby(['Industry']).sum() w_classes.columns = ['weights'] display(w_classes) # - # ## 4. Conclusion # # In this example, the best strategy in terms of performance is __WR__ . The ranking of strategies in base of performance follows: # # 1. WR (7.03%): Worst Scenario or Minimax Model. # 1. SPY (6.53%): Buy and Hold SPY. # 1. CVaR (5.73%): Conditional Value at Risk. # 1. MV (5.68%): Mean Variance. # 1. CDaR (4.60%): Conditional Drawdown at Risk. # # On the other hand, the best strategy in terms of Sharpe Ratio is __MV__ . The ranking of strategies in base of Sharpe Ratio follows: # # 1. MV (0.701): Mean Variance. # 1. CVaR (0.694): Conditional Value at Risk. # 1. WR (0.681): Worst Scenario or Minimax Model. # 1. SPY (0.679): Buy and Hold SPY. # 1. CDaR (0.622): Conditional Drawdown at Risk.
examples/Tutorial 5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + Test params: 2, 2, 0.05, 1, 0.5 /Users/fanxu/anaconda2/envs/my-rdkit-env/lib/python2.7/site-packages/ipykernel_launcher.py:18: FutureWarning: 'argmin' is deprecated. Use 'idxmin' instead. The behavior of 'argmin' will be corrected to return the positional minimum in the future. Use 'series.values.argmin' to get the position of the minimum now. test-merror 0.116666 for 29 rounds Test params: 2, 2, 0.05, 1, 1 test-merror 0.1085652 for 116 rounds Test params: 2, 2, 0.05, 2, 0.5 test-merror 0.1046756 for 132 rounds Test params: 2, 2, 0.05, 2, 1 test-merror 0.1182866 for 17 rounds Test params: 2, 2, 0.1, 1, 0.5 test-merror 0.1004634 for 87 rounds Test params: 2, 2, 0.1, 1, 1 test-merror 0.1030542 for 95 rounds Test params: 2, 2, 0.1, 2, 0.5 test-merror 0.1017588 for 100 rounds Test params: 2, 2, 0.1, 2, 1 test-merror 0.1150464 for 27 rounds Test params: 2, 2, 0.15, 1, 0.5 test-merror 0.1007872 for 66 rounds Test params: 2, 2, 0.15, 1, 1 test-merror 0.1011102 for 85 rounds Test params: 2, 2, 0.15, 2, 0.5 test-merror 0.1166668 for 11 rounds Test params: 2, 2, 0.15, 2, 1 test-merror 0.0998146 for 93 rounds Test params: 2, 2, 0.2, 1, 0.5 test-merror 0.1017596 for 38 rounds Test params: 2, 2, 0.2, 1, 1 test-merror 0.1001388 for 75 rounds Test params: 2, 2, 0.2, 2, 0.5 test-merror 0.0962486 for 74 rounds Test params: 2, 2, 0.2, 2, 1 test-merror 0.0972226 for 82 rounds Test params: 2, 4, 0.05, 1, 0.5 test-merror 0.1049984 for 31 rounds Test params: 2, 4, 0.05, 1, 1 test-merror 0.1066198 for 34 rounds Test params: 2, 4, 0.05, 2, 0.5 test-merror 0.0981958 for 108 rounds Test params: 2, 4, 0.05, 2, 1 test-merror 0.105648 for 64 rounds Test params: 2, 4, 0.1, 1, 0.5 test-merror 0.093983 for 68 rounds Test params: 2, 4, 0.1, 1, 1 test-merror 0.0975456 for 42 rounds Test params: 2, 4, 0.1, 2, 0.5 test-merror 0.100463 for 44 rounds Test params: 2, 4, 0.1, 2, 1 test-merror 0.0965744 for 65 rounds Test params: 2, 4, 0.15, 1, 0.5 test-merror 0.0933316 for 49 rounds Test params: 2, 4, 0.15, 1, 1 test-merror 0.0962502 for 40 rounds Test params: 2, 4, 0.15, 2, 0.5 test-merror 0.094306 for 54 rounds Test params: 2, 4, 0.15, 2, 1 test-merror 0.0949542 for 41 rounds Test params: 2, 4, 0.2, 1, 0.5 test-merror 0.0952776 for 24 rounds Test params: 2, 4, 0.2, 1, 1 test-merror 0.0956006 for 31 rounds Test params: 2, 4, 0.2, 2, 0.5 test-merror 0.0985182 for 34 rounds Test params: 2, 4, 0.2, 2, 1 test-merror 0.0981946 for 28 rounds Test params: 2, 6, 0.05, 1, 0.5 test-merror 0.1014342 for 30 rounds Test params: 2, 6, 0.05, 1, 1 test-merror 0.1040268 for 20 rounds Test params: 2, 6, 0.05, 2, 0.5 test-merror 0.1040276 for 39 rounds Test params: 2, 6, 0.05, 2, 1 test-merror 0.1014362 for 58 rounds Test params: 2, 6, 0.1, 1, 0.5 test-merror 0.0956018 for 59 rounds Test params: 2, 6, 0.1, 1, 1 test-merror 0.095602 for 40 rounds Test params: 2, 6, 0.1, 2, 0.5 test-merror 0.0985198 for 40 rounds Test params: 2, 6, 0.1, 2, 1 test-merror 0.103704 for 18 rounds Test params: 2, 6, 0.15, 1, 0.5 test-merror 0.0965732 for 30 rounds Test params: 2, 6, 0.15, 1, 1 test-merror 0.0949526 for 31 rounds Test params: 2, 6, 0.15, 2, 0.5 test-merror 0.0975456 for 33 rounds Test params: 2, 6, 0.15, 2, 1 test-merror 0.1017588 for 25 rounds Test params: 2, 6, 0.2, 1, 0.5 test-merror 0.0962482 for 21 rounds Test params: 2, 6, 0.2, 1, 1 test-merror 0.093334 for 25 rounds Test params: 2, 6, 0.2, 2, 0.5 test-merror 0.097548 for 36 rounds Test params: 2, 6, 0.2, 2, 1 test-merror 0.097222 for 39 rounds Test params: 6, 2, 0.05, 1, 0.5 test-merror 0.116666 for 29 rounds Test params: 2, 2, 0.05, 1, 1 test-merror 0.1085652 for 116 rounds Test params: 2, 2, 0.05, 2, 0.5 --------------------------------------------------------------------------- KeyboardInterrupt Traceback (most recent call last) <ipython-input-37-f47408f9405e> in <module>() 14 bst = xgb.train(param, dtrain, num_round) 15 ---> 16 cv = xgb.cv(param, dtrain, 999, nfold=5, early_stopping_rounds=10) 17 mean = cv['test-merror-mean'].min() 18 boost_rounds = cv['test-merror-mean'].argmin() /Users/fanxu/anaconda2/envs/my-rdkit-env/lib/python2.7/site-packages/xgboost/training.pyc in cv(params, dtrain, num_boost_round, nfold, stratified, folds, metrics, obj, feval, maximize, early_stopping_rounds, fpreproc, as_pandas, verbose_eval, show_stdv, seed, callbacks, shuffle) 405 for fold in cvfolds: 406 fold.update(i, obj) --> 407 res = aggcv([f.eval(i, feval) for f in cvfolds]) 408 409 for key, mean, std in res: /Users/fanxu/anaconda2/envs/my-rdkit-env/lib/python2.7/site-packages/xgboost/training.pyc in eval(self, iteration, feval) 220 def eval(self, iteration, feval): 221 """"Evaluate the CVPack for one iteration.""" --> 222 return self.bst.eval_set(self.watchlist, iteration, feval) 223 224 /Users/fanxu/anaconda2/envs/my-rdkit-env/lib/python2.7/site-packages/xgboost/core.pyc in eval_set(self, evals, iteration, feval) 949 if not isinstance(d[1], STRING_TYPES): 950 raise TypeError('expected string, got {}'.format(type(d[1]).__name__)) --> 951 self._validate_features(d[0]) 952 953 dmats = c_array(ctypes.c_void_p, [d[0].handle for d in evals]) /Users/fanxu/anaconda2/envs/my-rdkit-env/lib/python2.7/site-packages/xgboost/core.pyc in _validate_features(self, data) 1271 else: 1272 # Booster can't accept data with different feature names -> 1273 if self.feature_names != data.feature_names: 1274 dat_missing = set(self.feature_names) - set(data.feature_names) 1275 my_missing = set(data.feature_names) - set(self.feature_names) /Users/fanxu/anaconda2/envs/my-rdkit-env/lib/python2.7/site-packages/xgboost/core.pyc in feature_names(self) 629 """ 630 if self._feature_names is None: --> 631 return ['f{0}'.format(i) for i in range(self.num_col())] 632 else: 633 return self._feature_names KeyboardInterrupt:
fangli/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt # %load_ext autoreload # %autoreload 2 # %matplotlib inline # - dim = 4 structure = np.zeros((dim, dim), dtype=int) for i in range(dim): for j in range(i+1): structure[i, j] = i+1 structure from depimpact.model import Vine vine = Vine(structure) vine.build_new() # + class Tree(object): def __init__(self, dim, rank): self.dim = dim self.rank = rank self.nodes = [] self.edges = [] class Vine(object): """ """ def __init__(self, structure): self.structure = structure dim = structure.shape[0] trees = [] for i in range(dim-1): tree = Tree(dim=dim, rank=i) trees.append(tree) self.trees = trees self.dim = dim def build(self): dim = self.dim structure = self.structure tmp = structure.diagonal().tolist() self.trees[0].nodes = [([k], []) for k in tmp] # Explore the structure matrix for col in range(dim-1): # The other pairs rows = range(1+col, dim)[::-1] for k_tree, row in enumerate(rows): tree = self.trees[k_tree] i = structure[col, col] j = structure[row, col] conditionned = [i, j] conditionning = structure[row+1:, col].tolist() edge = (conditionned, conditionning) tree.edges.append(edge) for k_tree in range(dim-2): self.trees[k_tree+1].nodes = self.trees[k_tree].edges vine = Vine(structure) vine.build() # + fig, ax = plt.subplots(figsize=(10, 7)) fontsize = 12 radius = 0.3 trees = vine.trees for k_tree, tree in enumerate(trees): for k_node, node in enumerate(tree.nodes): xy = (k_node+k_tree*0.5+1, dim-k_tree) circle = plt.Circle(xy, radius, alpha=0.4, facecolor='r', edgecolor='k', ls='-', linewidth=2, fill=True) if k_tree == 0: text = '{}'.format(node[0][0]) elif k_tree == 1: text = '{},{}'.format(node[0][0], node[0][1]) else: text = '{},{}|{}'.format(node[0][0], node[0][1], node[1][0]) ax.text(xy[0]-len(text)/fontsize/2, xy[1]-0.05, text) ax.add_artist(circle) ax.set_xlim(0, dim+1) ax.set_ylim(1, dim+1) fig.tight_layout() # - class Tree(object): def __init__(self, structure): pass trees = T nodes = list(range(1, dim+1)) for col in range(dim-1): tree = {} tree['nodes'] = nodes edges = [] for row_j in range(1+col, dim)[::-1]: i = structure[col, col] j = structure[row_j, col] conditionned = [i, j] conditionning = structure[row_j+1:, col].tolist() edge = (conditionned, conditionning) edges.append(edge) tree['edges'] = edges trees.append(tree) print(edges) tree_nodes = list(range(1, dim+1)) for i_tree in range(dim-1): conditionned_set = [] conditionning_set = [] for k in range(dim-i_tree-1): i = structure[k, k] row_j = dim-i_tree-1 j = structure[row_j, k] conditionned_set.append([i, j]) for l in range(dim-row_j): conditionning_set.append(structure[row_j-l, k]) tree_edges = [conditionned_set, conditionning_set] print(tree_edges)
notebooks/vine-structure-plot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import missingno as miss from scipy import stats import seaborn as sns from sklearn.model_selection import KFold from sklearn.ensemble import RandomForestClassifier from sklearn import svm from sklearn.model_selection import cross_val_score,cross_val_predict from sklearn.model_selection import StratifiedKFold from sklearn.metrics import accuracy_score from sklearn.exceptions import FitFailedWarning from sklearn import preprocessing from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import SGDClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import VotingClassifier from sklearn.model_selection import train_test_split from sklearn.svm import LinearSVC from sklearn.preprocessing import StandardScaler from sklearn.pipeline import make_pipeline from sklearn.ensemble import StackingClassifier from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from sklearn.ensemble import RandomForestRegressor import warnings warnings.simplefilter(action='ignore', category=FutureWarning) train=pd.read_csv("/Users/saranlalmokandasan/Downloads/train_ctrUa4K.csv") test=pd.read_csv("/Users/saranlalmokandasan/Downloads/test_lAUu6dG.csv") testcol=test["Loan_ID"] train=train[train.LoanAmount<600] train.Self_Employed.fillna("No",inplace=True) train.Credit_History.fillna("1.0",inplace=True) train.loc[train.Credit_History!=0,'Credit_History']=1 train.Loan_Amount_Term.fillna("360",inplace=True) train.Dependents.fillna("0",inplace=True) train.Dependents.replace("3+",3,inplace=True) train.Gender.fillna('Male',inplace=True) train.Married.fillna('Y',inplace=True) train.LoanAmount.fillna(np.median(train.LoanAmount),inplace=True) train.insert(10,"Loan_Risk","other") train.loc[(train["Credit_History"]==1)&(train["Dependents"]=="0")&(train["Self_Employed"]=="No")&(train["Property_Area"]=="Semiurban"),"Loan_Risk"]=0 train.loc[train["Loan_Risk"]=="other","Loan_Risk"]=1 train.drop("Loan_ID",axis=1,inplace=True) train.insert(2,"Debt_Income",(train.LoanAmount.astype(float)/(train.ApplicantIncome+train.CoapplicantIncome))) ''' train.insert(5,"Total_Income",train["ApplicantIncome"]+train["CoapplicantIncome"]) train.insert(4,"Status","other") train.loc[train["Total_Income"]>=20000,"Status"]="Very High" train.loc[(train["Total_Income"]>=10000)&(train["Total_Income"]<20000),"Status"]="High" train.loc[(train["Total_Income"]>=7000)&(train["Total_Income"]<10000),"Status"]="Medium" train.loc[(train["Total_Income"]>=5000)&(train["Total_Income"]<7000),"Status"]="Low" train.loc[(train["Total_Income"]>=1400)&(train["Total_Income"]<5000),"Status"]="Very Low" train.drop(columns=["ApplicantIncome","CoapplicantIncome"],axis=1,inplace=True) ''' categorical_feature_mask = train.dtypes==object categorical_cols = train.columns[categorical_feature_mask].tolist() le = preprocessing.LabelEncoder() train[categorical_cols] = train[categorical_cols].apply(lambda col: le.fit_transform(col.astype(str))) test.Self_Employed.fillna("No",inplace=True) test.Gender.fillna('Male',inplace=True) test.Married.fillna('Y',inplace=True) test.Dependents.fillna("0",inplace=True) test.Loan_Amount_Term.fillna("360",inplace=True) test.Credit_History.fillna(1.0,inplace=True) train.loc[train.Credit_History!=0,'Credit_History']=1 test.LoanAmount.fillna(np.mean(test.LoanAmount),inplace=True) test.insert(10,"Loan_Risk","other") test.loc[(test["Credit_History"]==1)&(test["Dependents"]=="0")&(test["Self_Employed"]=="No")&(train["Property_Area"]=="Semiurban"),"Loan_Risk"]=0 test.loc[test["Loan_Risk"]=="other","Loan_Risk"]=1 test.drop("Loan_ID",axis=1,inplace=True) test.insert(2,"Debt_Income",(test.LoanAmount/(test.ApplicantIncome+(test.CoapplicantIncome)))) ''' test.insert(5,"Total_Income",test["ApplicantIncome"]+test["CoapplicantIncome"]) test.insert(4,"Status","other") test.loc[test["Total_Income"]>=20000,"Status"]="Very High" test.loc[(test["Total_Income"]>=10000)&(test["Total_Income"]<20000),"Status"]="High" test.loc[(test["Total_Income"]>=7000)&(test["Total_Income"]<10000),"Status"]="Medium" test.loc[(test["Total_Income"]>=5000)&(test["Total_Income"]<7000),"Status"]="Low" test.loc[(test["Total_Income"]>=1400)&(test["Total_Income"]<5000),"Status"]="Very Low" test.drop(columns=["ApplicantIncome","CoapplicantIncome"],axis=1,inplace=True) ''' categorical_feature_masks=test.dtypes==object categorical_cols=test.columns[categorical_feature_masks].tolist() test[categorical_cols]=test[categorical_cols].apply(lambda col:le.fit_transform(col.astype(str))) X=train.iloc[:,0:13] y=train.Loan_Status classifier = RandomForestClassifier(n_estimators=1000, random_state=0) accuracy = cross_val_score(classifier, X, y, scoring='accuracy', cv = 6) print("The Accuracy of Random Forrest is : {}".format(accuracy.mean())) lr=LogisticRegression(solver='liblinear',max_iter = 1000) accuracy=cross_val_score(lr,X,y,scoring='accuracy',cv=6) print("The Accuracy of Logistic Regression is : {}".format(accuracy.mean())) nm=GaussianNB() accuracy=cross_val_score(nm,X,y,scoring='accuracy',cv=6) print("The Accuracy of Naive Bayes is : {}".format(accuracy.mean())) classi=SGDClassifier() accuracy=cross_val_score(classi,X,y,scoring='accuracy',cv=6) print("The Accuracy of SGDClassifier is : {}".format(accuracy.mean())) neigh = KNeighborsClassifier(n_neighbors=3) accuracy=cross_val_score(neigh,X,y,scoring='accuracy',cv=6) print("The Accuracy of knn is : {}".format(accuracy.mean())) x_train,x_test,y_train,y_test=train_test_split(X,y,test_size=0.2,stratify=y) model1 = LogisticRegression(random_state=1,solver='liblinear',max_iter = 1000) model2 = RandomForestClassifier(random_state=1) model3= nm=GaussianNB() model = VotingClassifier(estimators=[('lr', model1), ('dt', model2),('nei',model3)], voting='hard') model.fit(X,y) model.predict(test) print("The Accuracy score of Voting Classifier is {}".format(model.score(x_test,y_test))) estimators = [('rf', LogisticRegression(solver='lbfgs',max_iter = 1000)), ('svr', make_pipeline(StandardScaler(),LinearSVC(random_state=42,dual=False)))] clf = StackingClassifier(estimators=estimators, final_estimator=RandomForestClassifier(n_estimators=10, random_state=42)) clf.fit(x_train, y_train) print("The Accuracy score of Stacking Classifier is {}".format(clf.score(x_test,y_test))) rfc=RandomForestClassifier(criterion= 'gini', max_depth= 5, max_features= 'auto', n_estimators= 200,random_state=42) rfc.fit(X,y) rfc.predict(test) lr=LogisticRegression(solver='liblinear',max_iter = 1000) lr.fit(X,y) lr.predict(test) submissions=pd.DataFrame({"Loan_ID":testcol, "Loan_Status":final}) submissions.Loan_Status.replace({1:"Y",0:"N"},inplace=True) submissions.to_csv('submissions.csv',index=False) # Create first pipeline for base without reducing features. # pipe = Pipeline([('classifier', RandomForestClassifier())]) # Create param grid. #param_grid = [ # { # 'penalty' : ['l1', 'l2'], # 'C' : np.logspace(-4, 4, 20), #'solver' : ['liblinear','newton-cg','lbfgs','sag','saga' ]}, #] # Create grid search object #clfs = GridSearchCV(LogisticRegression(max_iter = 2000), param_grid = param_grid, cv = 5, verbose=True, n_jobs=-1) # Fit on data clfs=LogisticRegression(max_iter=2000,solver='liblinear',penalty='l2',C= 0.615848211066026) clfs= clfs.fit(x_train, y_train) final=clfs.predict(test) submissions=pd.DataFrame({"Loan_ID":testcol, "Loan_Status":final}) submissions.Loan_Status.replace({1:"Y",0:"N"},inplace=True) submissions.to_csv('submissions.csv',index=False) print("Hi") # -
Bank Prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Un discours mémorable # # Vous disposez d’une version trilingue anglais-français-espagnol au format TSV (*tabulated-separated values*) du discours de Barack Obama prononcé le 4 juin 2009 devant l’université du Caire, et souhaitez la transformer en un fichier au format TMX. # # Chargez tout d’abord les lignes du fichier dans une variable `rows` : # + import csv with open('../files/obama-le-caire.tsv') as csvfile: fieldnames = ['en', 'fr', 'es'] reader = csv.DictReader(csvfile, delimiter='\t', fieldnames=fieldnames) rows = [row for row in reader] # - # Concevez à présent votre programme, sans oublier de déclarer la [DTD du format TMX](../files/tmx.dtd), présente également dans le répertoire *files* : # your code here
4.grammars/exercises/1.cairo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:tf] # language: python # name: conda-env-tf-py # --- # # Make predictions of stellar 'group' on CASSIS spectra # Trained on SWS Atlas data. # + import glob import pandas as pd import tensorflow as tf from tensorflow import keras from swsnet.dataframe_utils import read_spectrum # - def load_model(file_path): """Returns a keras model (compressed as .h5).""" try: model = keras.models.load_model(file_path) except Exception as e: raise e return model # ## Load keras model # Stored as .h5 file. model = load_model('sws_model_01.h5') model.summary() # ## Read in metadata (pd.DataFrame) data_dir = '../../data/cassis/' metadata_pickle = data_dir + 'metadata_step1_normalized.pkl' meta = pd.read_pickle(metadata_pickle) meta.head() # # Perform predictions def predict_group(spectrum): """Return the probabilities (from model) that source belongs to each group.""" f = spectrum['flux'].values probabilities = model.predict(np.array([f])) return probabilities # # + results_list = [] # Iterate over all spectra. for index, row in enumerate(meta.itertuples()): if index % 200 == 0: print(index) file_path = getattr(row, 'file_path') aorkey = getattr(row, 'aorkey') spectrum = read_spectrum(data_dir + file_path) probabilities = predict_group(spectrum) wrap = [index, aorkey, file_path, *list(*probabilities)] results_list.append(wrap) print('Done.') # - results_list[0] np.savetxt('results.txt', np.array(results_list), delimiter=',', fmt='%s', header='index, aorkey, file_path, PROBABILITIES (groups 0 - 4) shifted by one downwards.')
models/sws_model_01/predict.ipynb