code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Question 1: # # Write a decorator function for your taking input for you any kind of function you want to build, # For example - You make a fibonacci series function,in which your input range is been defined by # the decorator program input. # def decor(f): dic = {} def series(x): if x not in dic: dic[x] = f(x) return dic[x] return series def fibo(r): if r == 0: return 0 elif r == 1: return 1 else: return fibo(r-1) + fibo(r-2) fibonacci = decor(fibo) print(fibonacci(18)) # # Question 2: # # For this challenge you need to develop a Python program to open a file in read only mode and try # writing something to it and handle the subsequent errors using Exception Handling. file = open("Assignment.txt", "w") file.write(" Hi this is Abhishek, i love Letsupgrade. Thanku for teaching so many things. ") file.close() file = open("Assignment.txt", "r") txtRead = file.read() print(txtRead) file.close() try: file = open("Assignment.txt", "r") file.write(" I'm Studing Online from Letsupgrade. ") print("\nWrite operation has done successfully in read only mode.") file.close() except Exception as e: print("\nError has been occured") print("\nError message is : ",e) finally: print("\nI will be a Rowdy in python")
Day - 8/Day - 8 Assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import scanpy.api as sc from sklearn.decomposition import PCA from quasildr.graphdr import graphdr data = pd.read_csv('./data/other/hochgerner_2018.data.gz',sep='\t',index_col=0) anno = pd.read_csv('./data/other/hochgerner_2018.anno',sep='\t',header=None) adata = sc.AnnData(data.values.T, data.columns.values,data.index.values) adata.var_names_make_unique() sc.pp.normalize_per_cell(adata) sc.pp.log1p(adata) sc.pp.scale(adata) sc.tl.pca(adata) pca50 = PCA(50).fit_transform(adata.X) pca50 = pca50/pca50[:,0].std() dr = graphdr(pca50, n_neighbors=15,_lambda=10, refine_threshold=10,refine_iter=6, no_rotation=True,rescale=True) # + #Hochgerner et al. 2018 GraphDR import plotnine plotnine.options.figure_size=(20,15) from plotnine import * import warnings warnings.simplefilter("ignore") p = ggplot(pd.DataFrame({'x':-dr[:,0], 'y':dr[:,1], 'c': anno.iloc[:,1]}))+geom_point(aes(x='x', y='y', color='c'),size=0.01, alpha=1)+ guides(color=False)+ theme_void()+scale_color_hue(l=0.6,h=15/360,s=0.8) p.save('./figures/hochgerner_2018.graphdr.pdf', width=12, height=10.5) p # - #Plot PCA p = ggplot(pd.DataFrame({'x':-pca50[:,0], 'y':pca50[:,1], 'c': anno.iloc[:,1]}))+geom_point(aes(x='x', y='y', color='c'),size=0.01)+ guides(color=False)+ theme_void()+scale_color_hue(l=0.6,h=15/360,s=0.8) p.save('./figures/hochgerner_2018.pca.pdf', width=8, height=7) #Plot tSNE np.random.seed(0) sc.tl.tsne(adata) p = ggplot(pd.DataFrame({'x': adata.obsm['X_tsne'][:,0], 'y': adata.obsm['X_tsne'][:,1], 'c': anno.iloc[:,1]}))+geom_point(aes(x='x', y='y', color='c'),size=0.01)+ guides(color=False)+ theme_void()+scale_color_hue(l=0.6,h=15/360,s=0.8) p.save('./figures/hochgerner_2018.tsne.pdf', width=8, height=7) #Zeisel et al. 2018 GraphDR import loompy ds = loompy.connect('./data/other/zeisel_2018.data.loom') data = pd.DataFrame(ds[:,:],index= ds.row_attrs['Gene'],columns=ds.col_attrs['CellID']) adata = sc.AnnData(data.values.T, ds.col_attrs['CellID'],ds.row_attrs['Gene']) adata.var_names_make_unique() sc.pp.recipe_zheng17(adata) sc.pp.pca(adata) sc.tl.tsne(adata) pca50 = PCA(50).fit_transform(adata.X) pca50 = pca50/pca50[:,0].std() # + anno = pd.read_csv('./data/other/zeisel_2018.anno',sep='\t',header=None) dr = graphdr(pca50, n_neighbors=10,_lambda=10, refine_threshold=4,refine_iter=2, no_rotation=True,rescale=True) import plotnine plotnine.options.figure_size=(30,30) p = ggplot(pd.DataFrame({'x':-dr[:,0], 'y':dr[:,1], 'c': anno.iloc[:,1]}))+geom_point(aes(x='x', y='y', color='c'),size=0.01, alpha=0.1)+ guides(color=False)+ theme_void() p.save('./figures/zeisel_2018.graphdr.pdf', width=16, height=14) p # - #Plot PCA p = ggplot(pd.DataFrame({'x':-pca50[:,0], 'y':pca50[:,1], 'c': anno.iloc[:,1]}))+geom_point(aes(x='x', y='y', color='c'),size=0.01)+ guides(color=False)+ theme_void() p.save('./figures/zeisel_2018.pca.pdf', width=16, height=14) #Plot tSNE np.random.seed(0) sc.tl.tsne(adata) p = ggplot(pd.DataFrame({'x': adata.obsm['X_tsne'][:,0], 'y': adata.obsm['X_tsne'][:,1], 'c': anno.iloc[:,1]}))+geom_point(aes(x='x', y='y', color='c'),size=0.01)+ guides(color=False)+ theme_void() p.save('./figures/zeisel_2018.tsne.pdf', width=16, height=14)
Manuscript/GraphDR_examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Machine Learning Model Building Pipeline: Feature Engineering # # In the following videos, we will take you through a practical example of each one of the steps in the Machine Learning model building pipeline that we described in the previous lectures. There will be a notebook for each one of the Machine Learning Pipeline steps: # # 1. Data Analysis # 2. Feature Engineering # 3. Feature Selection # 4. Model Building # # **This is the notebook for step 2: Feature Engineering** # # We will use the house price dataset available on [Kaggle.com](https://www.kaggle.com/c/house-prices-advanced-regression-techniques/data). See below for more details. # # =================================================================================================== # # ## Predicting Sale Price of Houses # # The aim of the project is to build a machine learning model to predict the sale price of homes based on different explanatory variables describing aspects of residential houses. # # ### Why is this important? # # Predicting house prices is useful to identify fruitful investments, or to determine whether the price advertised for a house is over or underestimated, before making a buying judgment. # # ### What is the objective of the machine learning model? # # We aim to minimise the difference between the real price, and the estimated price by our model. We will evaluate model performance using the mean squared error (mse) and the root squared of the mean squared error (rmse). # # ### How do I download the dataset? # # To download the House Price dataset go this website: # https://www.kaggle.com/c/house-prices-advanced-regression-techniques/data # # Scroll down to the bottom of the page, and click on the link 'train.csv', and then click the 'download' blue button towards the right of the screen, to download the dataset. Rename the file as 'houseprice.csv' and save it to a directory of your choice. # # **Note the following:** # - You need to be logged in to Kaggle in order to download the datasets. # - You need to accept the terms and conditions of the competition to download the dataset # - If you save the file to the same directory where you saved this jupyter notebook, then you can run the code as it is written here. # # ==================================================================================================== # ## House Prices dataset: Feature Engineering # # In the following cells, we will engineer / pre-process the variables of the House Price Dataset from Kaggle. We will engineer the variables so that we tackle: # # 1. Missing values # 2. Temporal variables # 3. Non-Gaussian distributed variables # 4. Categorical variables: remove rare labels # 5. Categorical variables: convert strings to numbers # 5. Standarise the values of the variables to the same range # # ### Setting the seed # # It is important to note that we are engineering variables and pre-processing data with the idea of deploying the model if we find business value in it. Therefore, from now on, for each step that includes some element of randomness, it is extremely important that we **set the seed**. This way, we can obtain reproducibility between our research and our development code. # # This is perhaps one of the most important lessons that you need to take away from this course: **Always set the seeds**. # # Let's go ahead and load the dataset. # + # to handle datasets import pandas as pd import numpy as np # for plotting import matplotlib.pyplot as plt # %matplotlib inline # to divide train and test set from sklearn.model_selection import train_test_split # feature scaling from sklearn.preprocessing import MinMaxScaler # to visualise al the columns in the dataframe pd.pandas.set_option('display.max_columns', None) # - # load dataset data = pd.read_csv('houseprice.csv') print(data.shape) data.head() # ### Separate dataset into train and test # # Before beginning to engineer our features, it is important to separate our data intro training and testing set. This is to avoid over-fitting. This step involves randomness, therefore, we need to set the seed. # + # Let's separate into train and test set # Remember to set the seed (random_state for this sklearn function) X_train, X_test, y_train, y_test = train_test_split(data, data.SalePrice, test_size=0.1, random_state=0) # we are setting the seed here X_train.shape, X_test.shape # - # ### Missing values # # For categorical variables, we will fill missing information by adding an additional category: "missing" # + # make a list of the categorical variables that contain missing values vars_with_na = [var for var in data.columns if X_train[var].isnull().sum()>1 and X_train[var].dtypes=='O'] # print the variable name and the percentage of missing values for var in vars_with_na: print(var, np.round(X_train[var].isnull().mean(), 3), ' % missing values') # - # function to replace NA in categorical variables def fill_categorical_na(df, var_list): X = df.copy() X[var_list] = df[var_list].fillna('Missing') return X # + # replace missing values with new label: "Missing" X_train = fill_categorical_na(X_train, vars_with_na) X_test = fill_categorical_na(X_test, vars_with_na) # check that we have no missing information in the engineered variables X_train[vars_with_na].isnull().sum() # - # check that test set does not contain null values in the engineered variables [vr for var in vars_with_na if X_train[var].isnull().sum()>0] # For numerical variables, we are going to add an additional variable capturing the missing information, and then replace the missing information in the original variable by the mode, or most frequent value: # + # make a list of the numerical variables that contain missing values vars_with_na = [var for var in data.columns if X_train[var].isnull().sum()>1 and X_train[var].dtypes!='O'] # print the variable name and the percentage of missing values for var in vars_with_na: print(var, np.round(X_train[var].isnull().mean(), 3), ' % missing values') # + # replace the missing values for var in vars_with_na: # calculate the mode mode_val = X_train[var].mode()[0] # train X_train[var+'_na'] = np.where(X_train[var].isnull(), 1, 0) X_train[var].fillna(mode_val, inplace=True) # test X_test[var+'_na'] = np.where(X_test[var].isnull(), 1, 0) X_test[var].fillna(mode_val, inplace=True) # check that we have no more missing values in the engineered variables X_train[vars_with_na].isnull().sum() # - # check that we have the added binary variables that capture missing information X_train[['LotFrontage_na', 'MasVnrArea_na', 'GarageYrBlt_na']].head() # check that test set does not contain null values in the engineered variables [vr for var in vars_with_na if X_test[var].isnull().sum()>0] # ### Temporal variables # # We remember from the previous lecture, that there are 4 variables that refer to the years in which something was built or something specific happened. We will capture the time elapsed between the that variable and the year the house was sold: # + # let's explore the relationship between the year variables and the house price in a bit of more details def elapsed_years(df, var): # capture difference between year variable and year the house was sold df[var] = df['YrSold'] - df[var] return df # - for var in ['YearBuilt', 'YearRemodAdd', 'GarageYrBlt']: X_train = elapsed_years(X_train, var) X_test = elapsed_years(X_test, var) # check that test set does not contain null values in the engineered variables [vr for var in ['YearBuilt', 'YearRemodAdd', 'GarageYrBlt'] if X_test[var].isnull().sum()>0] # ### Numerical variables # # We will log transform the numerical variables that do not contain zeros in order to get a more Gaussian-like distribution. This tends to help Linear machine learning models. for var in ['LotFrontage', 'LotArea', '1stFlrSF', 'GrLivArea', 'SalePrice']: X_train[var] = np.log(X_train[var]) X_test[var]= np.log(X_test[var]) # check that test set does not contain null values in the engineered variables [var for var in ['LotFrontage', 'LotArea', '1stFlrSF', 'GrLivArea', 'SalePrice'] if X_test[var].isnull().sum()>0] # same for train set [var for var in ['LotFrontage', 'LotArea', '1stFlrSF', 'GrLivArea', 'SalePrice'] if X_train[var].isnull().sum()>0] # ### Categorical variables # # First, we will remove those categories within variables that are present in less than 1% of the observations: # let's capture the categorical variables first cat_vars = [var for var in X_train.columns if X_train[var].dtype == 'O'] # + def find_frequent_labels(df, var, rare_perc): # finds the labels that are shared by more than a certain % of the houses in the dataset df = df.copy() tmp = df.groupby(var)['SalePrice'].count() / len(df) return tmp[tmp>rare_perc].index for var in cat_vars: frequent_ls = find_frequent_labels(X_train, var, 0.01) X_train[var] = np.where(X_train[var].isin(frequent_ls), X_train[var], 'Rare') X_test[var] = np.where(X_test[var].isin(frequent_ls), X_test[var], 'Rare') # - # Next, we need to transform the strings of these variables into numbers. We will do it so that we capture the monotonic relationship between the label and the target: # + # this function will assign discrete values to the strings of the variables, # so that the smaller value corresponds to the smaller mean of target def replace_categories(train, test, var, target): ordered_labels = train.groupby([var])[target].mean().sort_values().index ordinal_label = {k:i for i, k in enumerate(ordered_labels, 0)} train[var] = train[var].map(ordinal_label) test[var] = test[var].map(ordinal_label) # - for var in cat_vars: replace_categories(X_train, X_test, var, 'SalePrice') # check absence of na [var for var in X_train.columns if X_train[var].isnull().sum()>0] # check absence of na [var for var in X_test.columns if X_test[var].isnull().sum()>0] # + # let me show you what I mean by monotonic relationship between labels and target def analyse_vars(df, var): df = df.copy() df.groupby(var)['SalePrice'].median().plot.bar() plt.title(var) plt.ylabel('SalePrice') plt.show() for var in cat_vars: analyse_vars(X_train, var) # - # We can now see monotonic relationships between the labels of our variables and the target (remember that the target is log-transformed, that is why the differences seem so small). # ### Feature Scaling # # For use in linear models, features need to be either scaled or normalised. In the next section, I will scale features between the min and max values: train_vars = [var for var in X_train.columns if var not in ['Id', 'SalePrice']] len(train_vars) X_train[['Id', 'SalePrice']].reset_index(drop=True) # + # fit scaler scaler = MinMaxScaler() # create an instance scaler.fit(X_train[train_vars]) # fit the scaler to the train set for later use # transform the train and test set, and add on the Id and SalePrice variables X_train = pd.concat([X_train[['Id', 'SalePrice']].reset_index(drop=True), pd.DataFrame(scaler.transform(X_train[train_vars]), columns=train_vars)], axis=1) X_test = pd.concat([X_test[['Id', 'SalePrice']].reset_index(drop=True), pd.DataFrame(scaler.transform(X_test[train_vars]), columns=train_vars)], axis=1) # - X_train.head() # That concludes the feature engineering section for this dataset. # # **Remember: the aim of this course and this particular project is to show you how to put models in production. Surely there are additional things you can do on this dataset, to extract additional value from the features.** # # **In order to capitalise on the deployment aspect of things, we deliberately kept the engineering side simple, yet include many of the traditional engineering steps, so you get a full flavour of building and deploying a machine learning model.** # check absence of missing values X_train.isnull().sum() # + # let's now save the train and test sets for the next notebook! X_train.to_csv('xtrain.csv', index=False) X_test.to_csv('xtest.csv', index=False) # - # That is all for this notebook. We hope you enjoyed it and see you in the next one!
jupyter_notebooks/Section2_MLPipelineOverview/02.7_ML_Pipeline_Step2-FeatureEngineering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Python CNTK Distributed GPU # # ## Introduction # # This example uses the CIFAR-10 dataset to demonstrate how to train a convolutional neural network (CNN) on a multi-node multi-GPU cluster. You can run this recipe on a single or multiple nodes. # # ## Details # # - For demonstration purposes, CIFAR-10 data preparation script and ConvNet_CIFAR10_DataAug_Distributed.py with its dependency will be deployed at Azure File Share; # - Standard output of the job and the model will be stored on Azure File Share; # - CIFAR-10 dataset(http://www.cs.toronto.edu/~kriz/cifar.html) has been preprocessed available at Azure [blob](https://batchaisamples.blob.core.windows.net/samples/CIFAR-10_dataset.tar?st=2017-09-29T18%3A29%3A00Z&se=2099-12-31T08%3A00%3A00Z&sp=rl&sv=2016-05-31&sr=b&sig=nFXsAp0Eq%2BoS5%2BKAEPnfyEGlCkBcKIadDvCPA%2BcX6lU%3D). # - The official CNTK example [ConvNet_CIFAR10_DataAug_Distributed.py](https://github.com/Microsoft/CNTK/blob/master/Examples/Image/Classification/ConvNet/Python/ConvNet_CIFAR10_DataAug_Distributed.py) is used [ConvNet_CIFAR10_DataAug_Distributed.py](/recipes/CNTK-GPU-Python/ConvNet_CIFAR10_DataAug_Distributed.py). The example has been updated to allow specify input and output directories via command line arguments. # ## Instructions # # ### Install Dependencies and Create Configuration file. # Follow [instructions](/recipes) to install all dependencies and create configuration file. # ### Read Configuration and Create Batch AI client # + nbpresent={"id": "bfa11f00-8866-4051-bbfe-a9646e004910"} from __future__ import print_function from datetime import datetime import os import sys from azure.storage.file import FileService import azure.mgmt.batchai.models as models # utilities.py contains helper functions used by different notebooks sys.path.append('../..') import utilities cfg = utilities.Configuration('../../configuration.json') client = utilities.create_batchai_client(cfg) utilities.create_resource_group(cfg) # - # ## 1. Prepare Training Script in Azure Storage # ### Create File Share # # For this example we will create a new File Share with name `batchaisample` under your storage account. This will be used to share the *training script file* and *output file*. # # **Note** You don't need to create new file share for every cluster. We are doing this in this sample to simplify resource management for you. azure_file_share_name = 'batchaisample' file_service = FileService(cfg.storage_account_name, cfg.storage_account_key) file_service.create_share(azure_file_share_name, fail_on_exist=False) # ### Deploy Sample Script to Azure File Share # # For each job we will create a folder containing a copy of [ConvNet_CIFAR10_DataAug_Distributed.py](./ConvNet_CIFAR10_DataAug_Distributed.py) and [CIFAR-10_data_prepare.sh](./CIFAR-10_data_prepare.sh). This allows to run the same job with different scripts. cntk_script_path = "cntk_samples" file_service.create_directory( azure_file_share_name, cntk_script_path, fail_on_exist=False) file_service.create_file_from_path( azure_file_share_name, cntk_script_path, 'ConvNet_CIFAR10_DataAug_Distributed.py', 'ConvNet_CIFAR10_DataAug_Distributed.py') file_service.create_file_from_path( azure_file_share_name, cntk_script_path, 'CIFAR-10_data_prepare.sh', 'CIFAR-10_data_prepare.sh') # ## 2. Create Azure Batch AI Compute Cluster # ### Configure Compute Cluster # # - For this example we will use a GPU cluster of `STANDARD_NC6` nodes. Number of nodes in the cluster is configured with `nodes_count` variable; # - We will mount file share at folder with name `afs`. Full path of this folder on a computer node will be `$AZ_BATCHAI_MOUNT_ROOT/afs`; # - We will call the cluster `nc6`; # # So, the cluster will have the following parameters: # + azure_file_share = 'afs' nodes_count = 2 cluster_name = 'nc6' volumes = models.MountVolumes( azure_file_shares=[ models.AzureFileShareReference( account_name=cfg.storage_account_name, credentials=models.AzureStorageCredentialsInfo( account_key=cfg.storage_account_key), azure_file_url = 'https://{0}.file.core.windows.net/{1}'.format( cfg.storage_account_name, azure_file_share_name), relative_mount_path=azure_file_share) ] ) parameters = models.ClusterCreateParameters( location=cfg.location, vm_size='STANDARD_NC6', virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='microsoft-ads', offer='linux-data-science-vm-ubuntu', sku='linuxdsvmubuntu', version='latest')), user_account_settings=models.UserAccountSettings( admin_user_name=cfg.admin, admin_user_password=<PASSWORD>, admin_user_ssh_public_key=cfg.admin_ssh_key), scale_settings=models.ScaleSettings( manual=models.ManualScaleSettings(target_node_count=nodes_count) ), node_setup=models.NodeSetup( mount_volumes=volumes, ) ) # - # ### Create Compute Cluster _ = client.clusters.create(cfg.resource_group, cluster_name, parameters).result() # ### Monitor Cluster Creation # # Monitor the just created cluster. utilities.py contains a helper function to print out detail status of the cluster. cluster = client.clusters.get(cfg.resource_group, cluster_name) utilities.print_cluster_status(cluster) # ## 3. Run Azure Batch AI Training Job # ### Configure Input Directories # # The job needs to know where to find ConvNet_CIFAR10_DataAug_Distributed.py. We will create an input directory for this: input_directories = [ models.InputDirectory( id='SCRIPT', path='$AZ_BATCHAI_MOUNT_ROOT/{0}/{1}'.format(azure_file_share, cntk_script_path))] # The job will be able to reference those directories using environment variables: # - ```AZ_BATCHAI_INPUT_SCRIPT``` : refers to the mounted path of Azure File Share # ### Configure Output Directories # We will store standard and error output of the job in File Share: std_output_path_prefix = '$AZ_BATCHAI_MOUNT_ROOT/{0}'.format(azure_file_share) # The model output will be stored in File Share: output_directories = [ models.OutputDirectory( id='MODEL', path_prefix='$AZ_BATCHAI_MOUNT_ROOT/{0}'.format(azure_file_share), path_suffix='Models')] # The job will be able to reference this directory as `$AZ_BATCHAI_OUTPUT_MODEL` and we will be able to enumerate files in this directory using `MODEL` id. # ### Configure Job # - The job will use `microsoft/cntk:2.1-gpu-python3.5-cuda8.0-cudnn6.0` container. # - Will use job preparation task to execute CIFAR-10 data preparation script (CIFAR-10_data_prepare.sh). The data set will be downloaded and processed on compute nodes locally (under AZ_BATCHAI_JOB_TEMP directory); # - Will use configured previously input and output directories; # - Will run ConvNet_CIFAR10_DataAug_Distributed.py providing CIFAR-10 Dataset path as the first parameter and desired mode output as the second one. # - For illustration purpose, we will only run 5 epoches # - By removing container_settings, the job will be ran on the host VMs if you are using DSVM. # # **Note** You must agree to the following licenses before using this container: # - [CNTK License](https://github.com/Microsoft/CNTK/blob/master/LICENSE.md) # parameters = models.job_create_parameters.JobCreateParameters( location=cfg.location, cluster=models.ResourceId(id=cluster.id), node_count=nodes_count, input_directories=input_directories, std_out_err_path_prefix=std_output_path_prefix, output_directories=output_directories, job_preparation=models.JobPreparation(command_line='bash $AZ_BATCHAI_INPUT_SCRIPT/CIFAR-10_data_prepare.sh'), container_settings=models.ContainerSettings( image_source_registry=models.ImageSourceRegistry(image='microsoft/cntk:2.1-gpu-python3.5-cuda8.0-cudnn6.0')), cntk_settings = models.CNTKsettings( language_type='python', python_script_file_path='$AZ_BATCHAI_INPUT_SCRIPT/ConvNet_CIFAR10_DataAug_Distributed.py', command_line_args='--datadir $AZ_BATCHAI_JOB_TEMP --outputdir $AZ_BATCHAI_OUTPUT_MODEL -n 5', process_count=nodes_count)) # ### Create a training Job and wait for Job completion # job_name = datetime.utcnow().strftime('cntk_%m_%d_%Y_%H%M%S') job = client.jobs.create(cfg.resource_group, job_name, parameters).result() print('Create Job: {}'.format(job.name)) # ### Wait for Job to Finish # The job will start running when the cluster will have enough idle nodes. The following code waits for job to start running printing the cluster state. During job run, the code prints current content of stderr.txt (the ConvNet_CIFAR10_DataAug_Distributed.py was changed to merge stdout and stderr output.) # # **Note** Execution may take several minutes to complete. utilities.wait_for_job_completion(client, cfg.resource_group, job_name, cluster_name, 'stdouterr', 'stdout.txt') # ### Download stdout.txt and stderr.txt files for the Job files = client.jobs.list_output_files(cfg.resource_group, job_name, models.JobsListOutputFilesOptions(outputdirectoryid='stdouterr')) for f in list(files): if f.download_url: utilities.download_file(f.download_url, f.name) print('All files downloaded') # ### Enumerate Model Output # Previously we configured the job to use output directory with `ID='MODEL'` for model output. We can enumerate the output using the following code. files = client.jobs.list_output_files(cfg.resource_group, job_name, models.JobsListOutputFilesOptions(outputdirectoryid='MODEL')) for f in list(files): print(f.name, f.download_url or 'directory') # ## 4. Clean Up (Optional) # ### Delete the Job _ = client.jobs.delete(cfg.resource_group, job_name) # ### Delete the Cluster # When you are finished with the sample and don't want to submit any more jobs you can delete the cluster using the following code. _ = client.clusters.delete(cfg.resource_group, cluster_name) # ### Delete File Share # When you are finished with the sample and don't want to submit any more jobs you can delete the file share completely with all files using the following code. service = FileService(cfg.storage_account_name, cfg.storage_account_key) service.delete_share(azure_file_share_name)
recipes/CNTK/CNTK-GPU-Python-Distributed/CNTK-GPU-Python-Distrbuted.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.8 64-bit (''atcs-practical'': conda)' # name: python388jvsc74a57bd00b7cfd85d2fc43f76dbccc53202d8ba6a9a8cca408d693df58307e7c75a304a7 # --- # %pwd #look at the current work dir # %cd .. import torch from torchtext.legacy.datasets.nli import SNLI from torchtext.legacy.data import Field from torchtext.legacy import data import torchtext from torch.utils.data import DataLoader TEXT = Field(lower=True, include_lengths=False, batch_first=True,tokenize='spacy',tokenizer_language="en_core_web_sm") LABEL = Field(sequential=False) #Read the snli data and rewrite it with only 1000 top so is easier to load it and iterate coding from os import listdir from os.path import isfile, join path_data = "./data/snli/snli_1.0/" onlyfiles = [f for f in listdir(path_data) if (isfile(join(path_data, f)) and 'snli' in f)] print(onlyfiles) max_lines = 50000 small_data_path = "./subdata/snli/snli_1.0/" if not os.path.exists(small_data_path): os.makedirs(small_data_path) print("Writing files with up to",max_lines," max lines in path:") print(small_data_path) for f in onlyfiles: max_lines = 50000 if 'train' in f else 10000 with open(path_data+f) as myfile: subset_data = [next(myfile) for x in range(max_lines)] with open(small_data_path+f, 'w') as new_f: for item in subset_data: new_f.write(item) # make splits for data train, dev, test = SNLI.splits(TEXT, LABEL, root= './subdata') # + train, dev, test = SNLI.splits(TEXT, LABEL, root= './subdata') glove = torchtext.vocab.GloVe(name='840B', dim=300) TEXT.build_vocab(train, vectors=glove) # - TEXT.build_vocab(train, vectors=glove) LABEL.build_vocab(train, specials_first=False) n_vocab = len(TEXT.vocab.itos) # n_vocab import torch torch.device # TEXT.vocab.stoi # + import torch import pytorch_lightning as pl from torch import nn from torch.nn import functional as F from modules.AverageEmbeddings import AverageEmbeddings from torchmetrics import MetricCollection, Accuracy, Precision, Recall import wandb metrics = MetricCollection([Accuracy()]) # - metrics = dict(metrics) train_iter, dev_iter, test_iter = data.BucketIterator.splits( (train, dev, test), batch_sizes=(16, 256, 256), device=0) next(iter(train_iter)) # + tags=[] # %load_ext autoreload # %autoreload 2 from pytorch_lightning import Trainer from modules.AverageEmbeddings import AverageEmbeddings from modules.Classifier import Classifier model = Classifier(emb_vec=TEXT.vocab.vectors) trainer = Trainer() trainer.fit(model, train_iter,dev_iter) # + tags=[] # %load_ext autoreload # %autoreload 2 from modules.Classifier import Classifier from pytorch_lightning import LightningModule from pytorch_lightning import Trainer from modules.AverageEmbeddings import AverageEmbeddings from modules.Classifier import Classifier model = Classifier() # train, dev, test = SNLI.splits(TEXT, LABEL, root= './subdata') # glove = torchtext.vocab.GloVe(name='840B', dim=300) # TEXT.build_vocab(train, vectors=glove) checkpath = "trained_models/lstm/elated-waterfall-212/lstm.ckpt" checkpath = 'trained_models/awe/gold/awe.ckpt' pretrained_model = model.load_from_checkpoint(checkpath, model_name='awe',disable_nonlinear=True) trainer = Trainer(fast_dev_run=True) trainer.fit(model, train_iter,dev_iter) # trainer.fit(model, train_iter,dev_iter) # - pretrained_model import torch torch.cuda.is_available() # + from pytorch_lightning.callbacks import ModelCheckpoint checkpoint_callback = ModelCheckpoint( monitor='val_loss', # dirpath='checkpoints/', filename='checkpoint-{epoch:02d}-{val_loss:.2f}', save_top_k=3, mode='min', ) trainer = Trainer(callbacks=[checkpoint_callback],auto_select_gpus=True,fast_dev_run=False) trainer.fit(model, train_iter,dev_iter) # + from modules.Classifier import Classifier from pytorch_lightning import LightningModule from pytorch_lightning import Trainer from modules.AverageEmbeddings import AverageEmbeddings from modules.Classifier import Classifier # model = Classifier(emb_vec=TEXT.vocab.vectors) pretrained_model = model.load_from_checkpoint("lightning_logs/version_44/checkpoints/epoch=0-step=7257.ckpt",emb_vec=TEXT.vocab.vectors) trainer = Trainer(fast_dev_run=True) trainer.fit(model, train_iter,dev_iter) # trainer.fit(model, train_iter,dev_iter) # - # proto = '-fast' if args.prototype else '-full' # path = args.save_results + args.model +proto + '.json' import json path= 'results_senteval/awe-fast.json' with open(path) as file: results = json.load(file) # use `json.loads` to do the reverse results.keys() def compute_macro_score(results): total_dev_acc = 0 len_metrics = len(results.keys()) for k in results.keys(): devacc = results[k]['devacc'] total_dev_acc += devacc return total_dev_acc/len_metrics def compute_micro_score(results): total_dev_acc = 0 total_samples = 0 len_metrics = len(results.keys()) for k in results.keys(): devacc = results[k]['devacc']*results[k]['ndev'] total_samples += results[k]['ndev'] total_dev_acc += devacc len_metrics*=total_samples return total_dev_acc/total_samples compute_macro_score(results) compute_micro_score(results)
dev_notebooks/testing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np with open('day8.input') as fp: datastr = fp.read().strip() data = np.array([int(c) for c in datastr]) # ## Part 1 ## nrows, ncols = 6, 25 layer_size = nrows*ncols nlayers = data.size/layer_size if nlayers != int(nlayers): raise ValueError(f'Not an integer number of layers: {nlayers}') nlayers = int(nlayers) data.shape = (nlayers, nrows, ncols) def count_val(arr, val): return sum(1 for x in arr.ravel() if x == val) min_zeros_layer_num = np.argmin([count_val(data[i, :, :], 0) for i in range(nlayers)]) min_zeros_layer = data[min_zeros_layer_num, :, :] count_val(min_zeros_layer, 1)*count_val(min_zeros_layer, 2) # ## Part 2 ## test_data = np.array([int(c) for c in '0222112222120000']) test_data.shape = (4, 2, 2) test_data[:, 0, 0] def stack(arr): nlayers, nrows, ncols = arr.shape stacked_arr = np.ones((nrows, ncols)) + 1 for i in range(nrows): for j in range(ncols): pixel_stack = arr[:, i, j] for pixel in pixel_stack: if pixel == 2: continue stacked_arr[i, j] = pixel break return stacked_arr stack(test_data) stacked = stack(data) pt = {1: '#', 0: ' '} for i in range(nrows): print(''.join([pt[x] for x in stacked[i, :]]))
day8.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/martinfinis/HelloCodeSchoolProject/blob/master/Quest_Simple_Image_Classification_with_Neural_Networks_Martin.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="h1PGdjzAafpp" import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline import tensorflow as tf from tensorflow import keras from tensorflow.keras.optimizers import Adam from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, Activation, BatchNormalization,ReLU from tensorflow.keras.preprocessing.image import ImageDataGenerator # + id="zzK4gGoqp_eI" import matplotlib.pyplot as plt def history_plot_from_loss_acc(history, title): plt.figure(figsize=(10,5)) train_loss_list = history.history['loss'] train_accuracy = history.history['accuracy'] val_loss_list = history.history['val_loss'] val_accuracy_list = history.history['val_accuracy'] x = history.epoch plt.subplot(1,2,1) plt.plot(x, train_loss_list) plt.plot(x, val_loss_list) plt.legend(['train_loss', 'val_loss']) plt.title(title+"_ LOSS") plt.subplot(1,2,2) plt.plot(x, train_accuracy) plt.plot(x, val_accuracy_list) plt.legend(['train_acc','val_accuracy']) plt.title(title+"_ ACCURACY") plt.show() # + [markdown] id="BMT9RcchaTWA" # #load the data # + id="6z7dBuLYaEYA" (X_train, y_train),(X_test, y_test) = keras.datasets.fashion_mnist.load_data() # + [markdown] id="9jgn0PI3hvPH" # # view the data # + colab={"base_uri": "https://localhost:8080/"} id="693cmxaZhD6r" outputId="d5f12843-1a5d-4338-fbba-6c094931fa96" type(X_train),type(y_train) # + colab={"base_uri": "https://localhost:8080/"} id="CkE828oviuY-" outputId="49d663e9-d709-4781-e306-730cc01b6806" X_train.shape,y_train.shape,X_test.shape, y_test.shape # + colab={"base_uri": "https://localhost:8080/"} id="gUJxMrw3jCpf" outputId="7d98479d-6ef5-49eb-8541-a8d3b875b59d" np.unique(y_train[:]) # + [markdown] id="ezEQtCOiRV6s" # ### display Images # + id="uwUNUIXPjgdH" class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] class_label = dict(zip(range(10), class_names)) # + id="f78WrDLsRV6t" colab={"base_uri": "https://localhost:8080/", "height": 879} outputId="e3d201bf-c067-461e-ed94-9c7a1b5962e0" import matplotlib.pyplot as plt plt.figure(figsize=(15,15)) for i in range(25): plt.subplot(5,5, i+1) plt.imshow(X_train[i]) plt.title(class_label.get(y_train[i])) plt.show() # + [markdown] id="LoNxSD9UOAKW" # # build model # + [markdown] id="du2GtLZmOEN9" # ## model without ImageDataGenerator # + colab={"base_uri": "https://localhost:8080/"} id="zKfaxUUnkvlj" outputId="e7cf9711-1ad8-407a-f2df-98969675eb6e" learning_rate = 0.001 dec_rate = 0 #1/4353 * 0.2 optim = Adam(lr=learning_rate, decay=dec_rate) model = Sequential() model.add(Conv2D(32, (3,3), padding='valid',activation='relu', input_shape=(28,28,1))) model.add(MaxPooling2D()) model.add(Conv2D(64,(3,3), padding='valid',activation='relu')) model.add(MaxPooling2D()) model.add(Conv2D(128,(3,3), padding='valid',activation='relu')) model.add(Flatten()) model.add(Dense(64,activation='relu')) model.add(Dense(10,activation='softmax')) model.summary() # + id="ERGdVZBi7CEn" # reshape input X_train = X_train.reshape(-1, 28, 28, 1) X_test = X_test.reshape(-1, 28, 28, 1) # + id="vZlnMSDhpnCp" model.compile(loss='sparse_categorical_crossentropy',optimizer='adam',metrics=['accuracy']) # + colab={"base_uri": "https://localhost:8080/"} id="MYYWhVEnBcVU" outputId="7e3e32f6-3b8c-4e8b-f882-298ae86aaeda" history = model.fit(X_train, y_train, epochs=10, validation_split=0.2) # + colab={"base_uri": "https://localhost:8080/"} id="Nu_lTyfo_7B9" outputId="601ea0e8-f7a3-40be-b244-060ffbfab446" model.evaluate(X_test, y_test) # + colab={"base_uri": "https://localhost:8080/", "height": 336} id="pftgogb8__I1" outputId="b278491e-fd3f-4e55-ae58-9f35917872ff" history_plot_from_loss_acc(history, "sparse_categorical_crossentropy") # + [markdown] id="3PJNuIseNjR-" # ## model_2 with ImageDataGenerator # + id="Lrg_Si-wpqMP" datagen = ImageDataGenerator(rescale=1./255.,validation_split=0.2) datagen_test = ImageDataGenerator(rescale=1./255. ) train_generator = datagen.flow(X_train,y_train,batch_size=32,shuffle=True,seed=42,subset='training')# TODO,color_mode='grayscale' valid_generator = datagen.flow(X_train,y_train,batch_size=32,shuffle=True,seed=42,subset='validation') test_generator = datagen_test.flow(X_test,y_test,batch_size=32,seed=42) # + colab={"base_uri": "https://localhost:8080/"} id="JtPsUrW2psFQ" outputId="6159f4d5-566a-48f2-b5ed-fedaaed4005d" STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size STEP_SIZE_VALID=valid_generator.n//valid_generator.batch_size STEP_SIZE_TEST = test_generator.n//test_generator.batch_size STEP_SIZE_TRAIN,STEP_SIZE_VALID,STEP_SIZE_TEST # + colab={"base_uri": "https://localhost:8080/"} id="llOlO9zkNuEo" outputId="afd911e8-b42d-4b6a-8e92-8f4864838194" learning_rate = 0.001 dec_rate = 0 #1/4353 * 0.2 optim = Adam(lr=learning_rate, decay=dec_rate) model_2 = Sequential() model_2.add(Conv2D(32, (3,3), padding='valid',activation='relu', input_shape=(28,28,1))) model_2.add(MaxPooling2D()) model_2.add(Conv2D(64,(3,3), padding='valid',activation='relu')) model_2.add(MaxPooling2D()) model_2.add(Conv2D(128,(3,3), padding='valid',activation='relu')) model_2.add(Flatten()) model_2.add(Dense(64,activation='relu')) model_2.add(Dense(10,activation='softmax')) model_2.summary() # + id="FoGNwOvECoRf" model_2.compile(loss='sparse_categorical_crossentropy',optimizer='adam',metrics=['accuracy']) # + colab={"base_uri": "https://localhost:8080/"} id="nHEYPiJOpuwv" outputId="99117fd7-d4ca-4c8a-d151-38c506f1e7f0" history_2 = model_2.fit(train_generator, steps_per_epoch=STEP_SIZE_TRAIN, validation_data = valid_generator, validation_steps = STEP_SIZE_TEST, epochs=10) # + colab={"base_uri": "https://localhost:8080/"} id="SEADiE0hAqOx" outputId="6edfd4cf-027c-435c-dbec-60470b83b5d0" model_2.evaluate(test_generator) # + colab={"base_uri": "https://localhost:8080/", "height": 336} id="0gMOoo85Apm4" outputId="1e22765d-d73c-4703-8380-d63d786c7caf" history_plot_from_loss_acc(history_2, "ImageDataGenerator") # + [markdown] id="nBm0mZMQVK8x" # # model_3 add BatchNormalization # + colab={"base_uri": "https://localhost:8080/"} id="4bnekVFZVRuR" outputId="ca693463-cfa6-4708-e7fb-90b55e4f5651" learning_rate = 0.001 dec_rate = 0 #1/4353 * 0.2 optim = Adam(lr=learning_rate, decay=dec_rate) model_3 = Sequential() model_3.add(Conv2D(32, (3,3), padding='valid', input_shape=(28,28,1))) model_3.add(BatchNormalization()) model_3.add(ReLU()) model_3.add(MaxPooling2D()) model_3.add(Conv2D(64,(3,3), padding='valid')) model_3.add(BatchNormalization()) model_3.add(ReLU()) model_3.add(MaxPooling2D()) model_3.add(Conv2D(128,(3,3), padding='valid')) model_3.add(BatchNormalization()) model_3.add(ReLU()) model_3.add(Flatten()) model_3.add(Dense(64)) model_3.add(BatchNormalization()) model_3.add(ReLU()) model_3.add(Dense(10,activation='softmax')) model_3.summary() # + id="fPls8l0oWQcz" model_3.compile(loss='sparse_categorical_crossentropy',optimizer='adam',metrics=['accuracy']) # + colab={"base_uri": "https://localhost:8080/"} id="1gFW2MWjWQc1" outputId="3f3b5df8-4991-479c-a9d2-fe870383b50e" history_3 = model_3.fit(X_train, y_train, epochs=15, validation_split=0.2) # + colab={"base_uri": "https://localhost:8080/"} id="09cH1BQ6WQc4" outputId="b338dba1-e03b-4f8a-a993-353cf8023a10" model_3.evaluate(X_test, y_test) # + colab={"base_uri": "https://localhost:8080/", "height": 336} id="4Mgx_f43WQc5" outputId="5297b335-47b3-4649-8a61-08dd357da21e" history_plot_from_loss_acc(history_3, "model_3 BatchNormalization") # + [markdown] id="M6fkkhzzTfe2" # # model_4 with dropout and Batchnormalization # + colab={"base_uri": "https://localhost:8080/"} id="kESeHVGFbYS6" outputId="be74353d-20c5-4de0-b91a-0716e5109b3e" drop_1 = 0.3 # Dropout rate drop_2 = 0.5 # Dropout rate for 1st Dense layer learning_rate = 0.001 dec_rate = 0 #1/4353 * 0.2 optim = Adam(lr=learning_rate, decay=dec_rate) model_4 = Sequential() model_4.add(Conv2D(32, (3,3), padding='valid', input_shape=(28,28,1))) model_4.add(BatchNormalization()) model_4.add(ReLU()) model_4.add(MaxPooling2D()) model_4.add(Dropout(drop_1)) model_4.add(Conv2D(64,(3,3), padding='valid')) model_4.add(BatchNormalization()) model_4.add(ReLU()) model_4.add(MaxPooling2D()) model_4.add(Dropout(drop_1)) model_4.add(Conv2D(128,(3,3), padding='valid')) model_4.add(BatchNormalization()) model_4.add(ReLU()) model_4.add(Flatten()) model_4.add(Dropout(drop_2)) model_4.add(Dense(64)) model_4.add(BatchNormalization()) model_4.add(ReLU()) model_4.add(Dense(10,activation='softmax')) model_4.summary() # + id="QVg6hkXFdgf2" model_4.compile(loss='sparse_categorical_crossentropy',optimizer='adam',metrics=['accuracy']) # + colab={"base_uri": "https://localhost:8080/"} id="FhZtEa-Ldgf2" outputId="0fa66b67-3df0-4dda-c27e-e7d090938613" history_4 = model_4.fit(X_train, y_train, epochs=15, validation_split=0.2) # + colab={"base_uri": "https://localhost:8080/"} id="e3B5n0MQdgf3" outputId="0268da44-bd55-4f16-8974-13e965829c41" model_4.evaluate(X_test, y_test) # + colab={"base_uri": "https://localhost:8080/", "height": 336} id="NJikJYRTdgf3" outputId="60239a67-64d1-4631-9398-dbfae2c64f2f" history_plot_from_loss_acc(history_4, "model_4 BatchNormalization and dropout") # + [markdown] id="yS-COFj1eUcs" # # model_5 different model architecture, add for each layer one Conv2D and padding same # + colab={"base_uri": "https://localhost:8080/"} id="4tlFgeKaeYjE" outputId="7d57918d-2cc3-4cbc-fbb4-4350dc26b1c7" drop_1 = 0.3 # Dropout rate drop_2 = 0.5 # Dropout rate for 1st Dense layer learning_rate = 0.001 dec_rate = 0 #1/4353 * 0.2 optim = Adam(lr=learning_rate, decay=dec_rate) model_5 = Sequential() model_5.add(Conv2D(32, (3,3), padding='same', input_shape=(28,28,1))) model_5.add(Conv2D(32, (3,3), padding='same')) model_5.add(BatchNormalization()) model_5.add(ReLU()) model_5.add(MaxPooling2D()) model_5.add(Dropout(drop_1)) model_5.add(Conv2D(64,(3,3), padding='same')) model_5.add(Conv2D(64,(3,3), padding='same')) model_5.add(BatchNormalization()) model_5.add(ReLU()) model_5.add(MaxPooling2D()) model_5.add(Dropout(drop_1)) model_5.add(Conv2D(128,(3,3), padding='same')) model_5.add(Conv2D(128,(3,3), padding='same')) model_5.add(BatchNormalization()) model_5.add(ReLU()) model_5.add(Flatten()) model_5.add(Dropout(drop_2)) model_5.add(Dense(64)) model_5.add(BatchNormalization()) model_5.add(ReLU()) model_5.add(Dense(10,activation='softmax')) model_5.summary() # + id="NOJqNAL1fiZc" model_5.compile(loss='sparse_categorical_crossentropy',optimizer='adam',metrics=['accuracy']) # + colab={"base_uri": "https://localhost:8080/"} id="4N4t_oBQfiZc" outputId="843dc373-6a45-45be-cc11-ba79c1c8cded" history_5 = model_5.fit(X_train, y_train, epochs=15, validation_split=0.2) # + colab={"base_uri": "https://localhost:8080/"} id="avp7U2IafiZc" outputId="349176f8-48e4-497a-ae95-d96d69cbcdcb" model_5.evaluate(X_test, y_test) # + colab={"base_uri": "https://localhost:8080/", "height": 381} id="IxPCj3v-fiZd" outputId="58fe34d8-ebe4-49f9-9782-cd507617471d" history_plot_from_loss_acc(history_5, "model_5 different model architecture,\n add for each layer \none Conv2D and padding valid\n") # + [markdown] id="_kfHv3WN_YR7" # # show a few images with their ture and predicted labels # + [markdown] id="9CBf6eLSZzGP" # ## first 25 prediction # + colab={"base_uri": "https://localhost:8080/"} id="W_JVwKI4_zlQ" outputId="9ba08c28-181b-4bce-befb-5b1bd9355c09" X_test_subset =X_test[0:25] y_test_subset =y_test[0:25] # + colab={"base_uri": "https://localhost:8080/"} id="u18YdG2dINLm" outputId="870b1a3b-f0e5-4e07-a4a4-913ec145fd5b" y_predict = np.argmax(model.predict(X_test_subset), axis=-1) y_predict # + id="IA77Dy3OHPUE" def plot_prediction(X_test,y_test,class_label,y_predict): plt.figure(figsize=(20,20)) plt.subplots_adjust(left=0.125, bottom=0.1, right=0.9, top=0.9) for i in range(25): plt.subplot(5,5, i+1) plt.imshow(X_test[i].reshape(28,28)) plt.title("real "+class_label.get(y_test[i])+"\n predict "+class_label.get(y_test[i])) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="sf8jvrLHKrjT" outputId="aa74f0b5-2a7e-445d-d70d-7a9babd95bfb" plot_prediction(X_test_subset,y_test_subset,class_label,y_predict) # + [markdown] id="Fwatd1K3Z62_" # ## wrong prediction # + id="vh0PTTbVOQAX" import pandas as pd # + id="PbNCh89PSInb" def plot_prediction(df,X_test,class_label): m = int(df.shape[0]/5)+1 plt.figure(figsize=(20,m*4)) plt.subplots_adjust(left=0.125, bottom=0.1, right=0.9, top=0.9) i = 1 for index, row in df.iterrows(): plt.subplot(m,5, i) plt.imshow(X_test[index].reshape(28,28)) plt.title("label: "+class_label.get(row.y_test)+"\n predict: "+class_label.get(row.y_predict)) i += 1 plt.show() # + id="E917Fn_u_XMI" y_predict_all = np.argmax(model.predict(X_test), axis=-1) # + id="dQhgrxUiN-tv" df = pd.DataFrame(data=list(zip(y_test,y_predict_all)),columns=['y_test','y_predict']) # + id="z7HrRKf0QTQ5" df_wrong = df[df['y_test'] != df['y_predict']] # + colab={"base_uri": "https://localhost:8080/", "height": 533} id="iijg7LvyTr4M" outputId="be11fd78-a58b-4462-b0ec-bc16c2f04444" plot_prediction(df_wrong.head(10),X_test,class_label) # + [markdown] id="88JJVhr_ayfI" # ## right prediction # + id="HYYl6CYFaNG5" df_right = df[df['y_test'] == df['y_predict']] # + colab={"base_uri": "https://localhost:8080/", "height": 986} id="apGRmIdraO_P" outputId="bad3b624-c8ba-43dc-d467-fdbd645b95e8" plot_prediction(df_right.sample(20),X_test,class_label)
Quest_Simple_Image_Classification_with_Neural_Networks_Martin.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.6.10 64-bit (''ma'': conda)' # metadata: # interpreter: # hash: 739f6139ea16146f6825468ed5e82eb0c1c232f377b4e45bfd13eaa0a4a5ceb5 # name: python3 # --- # # Training and Evaluation # + # system imports import os from datetime import datetime # additional imports import pandas as pd import numpy as np from tqdm.auto import tqdm from sklearn.preprocessing import LabelEncoder, StandardScaler from sklearn.model_selection import train_test_split, KFold # internal imports from utils import plot_confusion_matrix # - # ## Hyperparameters hparams = { 'dataset': 'data/prepared_data_balanced.csv', 'epochs': 15, 'batch_size': 16, 'lr': 1e-3, 'features': [ 'chroma_stft', 'rmse', 'spectral_centroid', 'spectral_bandwidth', 'rolloff', 'zero_crossing_rate', 'mfcc1', 'mfcc2', 'mfcc3', 'mfcc4', 'mfcc5', 'mfcc6', 'mfcc7', 'mfcc8', 'mfcc9', 'mfcc10', 'mfcc11', 'mfcc12', 'mfcc13', 'mfcc14', 'mfcc15', 'mfcc16', 'mfcc17', 'mfcc18', 'mfcc19', 'mfcc20' ] } # ## Prepare Data # + df_features = pd.read_csv(hparams['dataset']) X = np.array(df_features[hparams['features']], dtype=np.float32) encoder = LabelEncoder() y = encoder.fit_transform(df_features['label']) print('classes:', encoder.classes_) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # scale data scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) print('X_train.shape:', X_train.shape) print('y_train.shape:', y_train.shape) # + def train_eval_classifier(clf): clf.fit(X_train, y_train) predictions = clf.predict(X_train) accuracy_train = np.sum(predictions == y_train) / len(y_train) print("Train Accuracy:", accuracy_train) predictions = clf.predict(X_test) accuracy_test = np.sum(predictions == y_test) / len(y_test) print("Test Accuracy:", accuracy_test) plot_confusion_matrix(y_test, predictions, encoder.classes_) def k_fold_train_eval_classifier(clf): k_folds = 4 kfold = KFold(n_splits=k_folds, shuffle=True, random_state=42) indices = np.arange(len(y)) results_train = [] results_test = [] print(f'K-FOLD CROSS VALIDATION RESULTS FOR {k_folds} FOLDS') print('--------------------------------------------') print('| | Train Accuracy | Test Accuracy |') print('--------------------------------------------') for fold, (train_ids, test_ids) in enumerate(kfold.split(indices)): X_train = X[train_ids] y_train = y[train_ids] X_test = X[test_ids] y_test = y[test_ids] # train classifier clf.fit(X_train, y_train) # evaluate classifier on train dataset predictions = clf.predict(X_train) train_accuracy = np.sum(predictions == y_train) / len(y_train) results_train.append(train_accuracy) # evaluate classifier on test dataset predictions = clf.predict(X_test) eval_accuracy = np.sum(predictions == y_test) / len(y_test) results_test.append(eval_accuracy) print(f'| Fold {fold} | {train_accuracy*100:.2f} % | {eval_accuracy*100:.2f} % |') print('--------------------------------------------') print(f'| Average | {np.mean(results_train)*100:.2f} % | {np.mean(results_test)*100:.2f} % |') plot_confusion_matrix(y_test, predictions, encoder.classes_) # - # ## Naive Bayes # + from sklearn.naive_bayes import GaussianNB clf = GaussianNB() k_fold_train_eval_classifier(clf) # - # ## Support Verctor Machine # + from sklearn import svm clf = svm.NuSVC(kernel='poly') k_fold_train_eval_classifier(clf) # - # ## RandomForestClassifier # + tags=[] from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier(max_depth=10, n_estimators=100, random_state=42) k_fold_train_eval_classifier(clf)
005_evaluate_other_classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="V8k9buNKTfud" # # Label and feature engineering # # This lab is *optional*. It demonstrates advanced SQL queries for time-series engineering. For real-world problems, this type of feature engineering code is essential. If you are pursuing a time-series project for open project week, feel free to use this code as a template. # # --- # # Learning objectives: # # 1. Learn how to use BigQuery to build time-series features and labels for forecasting # 2. Learn how to visualize and explore features. # 3. Learn effective scaling and normalizing techniques to improve our modeling results # # **Note: In the previous lab we explored the data, if you haven’t run the previous notebook, go back to [optional_1_data_exploration.ipynb](../solutions/optional_1_data_exploration.ipynb) and run it.** # # Now that we have explored the data, let's start building our features, so we can build a model. # # <h3><font color="#4885ed">Feature Engineering</font> </h3> # # Use the `price_history` table, we can look at past performance of a given stock, to try to predict it's future stock price. In this notebook we will be focused on cleaning and creating features from this table. # # There are typically two different approaches to creating features with time-series data. # # **One approach** is aggregate the time-series into "static" features, such as "min_price_over_past_month" or "exp_moving_avg_past_30_days". Using this approach, we can use a deep neural network or a more "traditional" ML model to train. Notice we have essentially removed all sequention information after aggregating. This assumption can work well in practice. # # A **second approach** is to preserve the ordered nature of the data and use a sequential model, such as a recurrent neural network. This approach has a nice benefit that is typically requires less feature engineering. Although, training sequentially models typically takes longer. # # In this notebook, we will build features and also create rolling windows of the ordered time-series data. # # <h3><font color="#4885ed">Label Engineering</font> </h3> # # We are trying to predict if the stock will go up or down. In order to do this we will need to "engineer" our label by looking into the future and using that as the label. We will be using the [`LAG`](https://cloud.google.com/bigquery/docs/reference/standard-sql/functions-and-operators#lag) function in BigQuery to do this. Visually this looks like: # # ![](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/blogs/gcp_forecasting/rolling_window.gif?raw=true) # + [markdown] colab_type="text" id="WBBSZf_uTdGy" # ## Import libraries; setup # + colab={} colab_type="code" id="kC9RZRlqTfuj" jupyter={"outputs_hidden": true} PROJECT = 'your-gcp-project' # Replace with your project ID. # + colab={} colab_type="code" id="IjsuN9heTfue" jupyter={"outputs_hidden": true} import pandas as pd from google.cloud import bigquery from IPython.core.magic import register_cell_magic from IPython import get_ipython bq = bigquery.Client(project = PROJECT) # + colab={} colab_type="code" id="xyaeBdzMTdG2" jupyter={"outputs_hidden": true} # Allow you to easily have Python variables in SQL query. @register_cell_magic('with_globals') def with_globals(line, cell): contents = cell.format(**globals()) if 'print' in line: print(contents) get_ipython().run_cell(contents) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="VHEy7L2EW-ug" jupyter={"outputs_hidden": true} outputId="b8a94157-c6d9-425d-b124-cead5d7d9d3f" def create_dataset(): dataset = bigquery.Dataset(bq.dataset("stock_market")) try: bq.create_dataset(dataset) # Will fail if dataset already exists. print("Dataset created") except: print("Dataset already exists") create_dataset() # + [markdown] colab_type="text" id="Ip9SZU7CTful" # ## Create time-series features and determine label based on market movement # + [markdown] colab_type="text" id="WduqaabdTfum" # ### Summary of base tables # - # **TODO**: How many rows are in our base tables `price_history` and `snp500`? # + colab={"base_uri": "https://localhost:8080/", "height": 77} colab_type="code" id="GEmgSKBNTdG_" jupyter={"outputs_hidden": true} outputId="845d17a4-83a0-4314-d888-8cc29ee90c2b" # %%with_globals # %%bigquery --project {PROJECT} --# TODO # + colab={"base_uri": "https://localhost:8080/", "height": 77} colab_type="code" id="Utn87x_ATdHC" jupyter={"outputs_hidden": true} outputId="9d4a74d7-1ad0-4510-adf0-91c7ee7e87b5" # %%with_globals # %%bigquery --project {PROJECT} --# TODO # + [markdown] colab_type="text" id="at7EL7pITfuq" # ### Label engineering # + [markdown] colab_type="text" id="pQ7R1VcWeJq9" # Ultimately, we need to end up with a single label for each day. The label takes on 3 values: {`down`, `stay`, `up`}, where `down` and `up` indicates the normalized price (more on this below) went down 1% or more and up 1% or more, respectively. `stay` indicates the stock remained within 1%. # # The steps are: # # 1. Compare close price and open price # 2. Compute price features using analytics functions # 3. Compute normalized price change (%) # 4. Join with S&P 500 table # 5. Create labels (`up`, `down`, `stay`) # # + [markdown] colab_type="text" id="1FvPgI6UOoQO" # <h3><font color="#4885ed">Compare close price and open price</font> </h3> # # For each row, get the close price of yesterday and the open price of tomorrow using the [`LAG`](https://cloud.google.com/bigquery/docs/reference/legacy-sql#lag) function. We will determine tomorrow's close - today's close. # + [markdown] colab_type="text" id="vBJzyVtCTfur" # #### Shift to get tomorrow's close price. # - # **Learning objective 1** # + colab={} colab_type="code" id="IreuNo_pTfus" jupyter={"outputs_hidden": true} # %%with_globals print # %%bigquery df --project {PROJECT} CREATE OR REPLACE TABLE `stock_market.price_history_delta` AS ( WITH shifted_price AS ( SELECT *, (LAG(close, 1) OVER (PARTITION BY symbol order by Date DESC)) AS tomorrow_close FROM `stock_src.price_history` WHERE Close > 0 ) SELECT a.*, (tomorrow_close - Close) AS tomo_close_m_close FROM shifted_price a ) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="adrk4vc1TdHM" jupyter={"outputs_hidden": true} outputId="6d6fbba1-9b94-4b96-fd49-6ceb39fa4ccb" # %%with_globals # %%bigquery --project {PROJECT} SELECT * FROM stock_market.price_history_delta ORDER by Date LIMIT 100 # + [markdown] colab_type="text" id="8UnOKtvdTdHO" # **TODO**: Historically, we know that the stock market has been going up. Can you think of a way to verify this using our newly created table `price_history_delta`? # - # **Learning objective 2** # + colab={"base_uri": "https://localhost:8080/", "height": 77} colab_type="code" id="XY2MetOeTfux" jupyter={"outputs_hidden": true} outputId="9310bac5-2d0e-468b-fa8e-a981f09b4b1c" # %%with_globals print # %%bigquery --project {PROJECT} SELECT --# TODO: verify the stock market is going up -- on average. FROM stock_market.price_history_delta # + [markdown] colab_type="text" id="efb9PCBdTfu0" # ### Add time series features # + [markdown] colab_type="text" id="S_vRjdyhOqZi" # <h3><font color="#4885ed">Compute price features using analytics functions</font> </h3> # # In addition, we will also build time-series features using the min, max, mean, and std (can you think of any over functions to use?). To do this, let's use [analytic functions]() in BigQuery (also known as window functions). # ``` # An analytic function is a function that computes aggregate values over a group of rows. Unlike aggregate functions, which return a single aggregate value for a group of rows, analytic functions return a single value for each row by computing the function over a group of input rows. # ``` # Using the `AVG` analytic function, we can compute the average close price of a given symbol over the past week (5 business days): # ```python # (AVG(close) OVER (PARTITION BY symbol # ORDER BY Date # ROWS BETWEEN 5 PRECEDING AND 1 PRECEDING)) / close # AS close_avg_prior_5_days # ``` # - # **Learning objective 1** # # **TODO**: Please fill in the `# TODO`s in the below query # + colab={"base_uri": "https://localhost:8080/", "height": 840} colab_type="code" id="pBi_CruzTfu0" jupyter={"outputs_hidden": true} outputId="7a7c57f8-cb57-4c81-9786-c785c9c4c518" def get_window_fxn(agg_fxn, n_days): """Generate a time-series feature. E.g., Compute the average of the price over the past 5 days.""" SCALE_VALUE = 'close' sql = ''' ({agg_fxn}(close) OVER (PARTITION BY (# TODO) ORDER BY (# TODO) ROWS BETWEEN {n_days} (# TODO)))/{scale} AS close_{agg_fxn}_prior_{n_days}_days'''.format( agg_fxn=agg_fxn, n_days=n_days, scale=SCALE_VALUE) return sql WEEK = 5 MONTH = 20 YEAR = 52*5 agg_funcs = ('MIN', 'MAX', 'AVG', 'STDDEV') lookbacks = (WEEK, MONTH, YEAR) sqls = [] for fxn in agg_funcs: for lookback in lookbacks: sqls.append(get_window_fxn(fxn, lookback)) time_series_features_sql = ','.join(sqls) # SQL string. def preview_query(): print(time_series_features_sql[0:1000]) preview_query() # + colab={"base_uri": "https://localhost:8080/", "height": 31} colab_type="code" id="4WX4VFSvTfu2" jupyter={"outputs_hidden": true} outputId="41cdcb4d-ccbd-4e12-9c6c-88de2d4538f5" # %%with_globals print # %%bigquery --project {PROJECT} CREATE OR REPLACE TABLE stock_market.price_features_delta AS SELECT * FROM (SELECT *, {time_series_features_sql}, -- Also get the raw time-series values; will be useful for the RNN model. (ARRAY_AGG(close) OVER (PARTITION BY symbol ORDER BY Date ROWS BETWEEN 260 PRECEDING AND 1 PRECEDING)) AS close_values_prior_260, ROW_NUMBER() OVER (PARTITION BY symbol ORDER BY Date) AS days_on_market FROM stock_market.price_history_delta) WHERE days_on_market > {YEAR} # + jupyter={"outputs_hidden": true} # %%bigquery --project {PROJECT} SELECT * FROM stock_market.price_features_delta ORDER BY symbol, Date LIMIT 10 # + [markdown] colab_type="text" id="EjGaQYuRTfu6" # #### Compute percentage change, then self join with prices from S&P index. # # We will also compute price change of S&P index, GSPC. We do this so we can compute the normalized percentage change. # + [markdown] colab_type="text" id="zL55y-YnOvOu" # <h3><font color="#4885ed">Compute normalized price change (%)</font> </h3> # # Before we can create our labels we need to normalize the price change using the S&P 500 index. The normalization using the S&P index fund helps ensure that the future price of a stock is not due to larger market effects. Normalization helps us isolate the factors contributing to the performance of a stock_market. # # Let's use the normalization scheme from by subtracting the scaled difference in the S&P 500 index during the same time period. # # In Python: # ```python # # Example calculation. # scaled_change = (50.59 - 50.69) / 50.69 # scaled_s_p = (939.38 - 930.09) / 930.09 # normalized_change = scaled_change - scaled_s_p # assert normalized_change == ~1.2% # ``` # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="0uz1Qo0STfu7" jupyter={"outputs_hidden": true} outputId="67e1be37-8729-47d5-fde9-9c4f87da82b5" scaled_change = (50.59 - 50.69) / 50.69 scaled_s_p = (939.38 - 930.09) / 930.09 normalized_change = scaled_change - scaled_s_p print(''' scaled change: {:2.3f} scaled_s_p: {:2.3f} normalized_change: {:2.3f} '''.format(scaled_change, scaled_s_p, normalized_change)) # + [markdown] colab_type="text" id="HY9AJAN3Tfu-" # ### Compute normalized price change (shown above). # + [markdown] colab_type="text" id="m6OhYVoITdHd" # Let's join scaled price change (tomorrow_close / close) with the [gspc](https://en.wikipedia.org/wiki/S%26P_500_Index) symbol (symbol for the S&P index). Then we can normalize using the scheme described above. # - # **Learning objective 3** # # **TODO**: Please fill in the `# TODO` in the code below. # + colab={} colab_type="code" id="_W71_cb4TdHe" jupyter={"outputs_hidden": true} snp500_index = 'gspc' # + colab={"base_uri": "https://localhost:8080/", "height": 31} colab_type="code" id="b1PNvxhuTfu_" jupyter={"outputs_hidden": true} outputId="c9b87b11-cff2-452f-8c29-147290f95e1f" # %%with_globals print # %%bigquery --project {PROJECT} CREATE OR REPLACE TABLE stock_market.price_features_norm_per_change AS WITH all_percent_changes AS ( SELECT *, (tomo_close_m_close / Close) AS scaled_change FROM `stock_market.price_features_delta` ), s_p_changes AS (SELECT scaled_change AS s_p_scaled_change, date FROM all_percent_changes WHERE symbol="{snp500_index}") SELECT all_percent_changes.*, s_p_scaled_change, (# TODO) AS normalized_change FROM all_percent_changes LEFT JOIN s_p_changes --# Add S&P change to all rows ON all_percent_changes.date = s_p_changes.date # + [markdown] colab_type="text" id="5lcs6_BtTfvB" # #### Verify results # + colab={} colab_type="code" id="0G1SbI8kTdHl" jupyter={"outputs_hidden": true} # %%with_globals print # %%bigquery df --project {PROJECT} SELECT * FROM stock_market.price_features_norm_per_change LIMIT 10 # + colab={"base_uri": "https://localhost:8080/", "height": 299} colab_type="code" id="BeNiVymgTdHn" jupyter={"outputs_hidden": true} outputId="f7534321-713c-483d-ba4f-d096c59296fa" df.head() # + [markdown] colab_type="text" id="8TFFeA5sOm2Y" # <h3><font color="#4885ed">Join with S&P 500 table and Create labels: {`up`, `down`, `stay`}</font> </h3> # # Join the table with the list of S&P 500. This will allow us to limit our analysis to S&P 500 companies only. # # Finally we can create labels. The following SQL statement should do: # # ```sql # CASE WHEN normalized_change < -0.01 THEN 'DOWN' # WHEN normalized_change > 0.01 THEN 'UP' # ELSE 'STAY' # END # ``` # - # **Learning objective 1** # + colab={} colab_type="code" id="iv8i3e8GTdHq" jupyter={"outputs_hidden": true} down_thresh = -0.01 up_thresh = 0.01 # - # **TODO**: Please fill in the `CASE` function below. # + colab={} colab_type="code" id="-Kf5POU6TfvM" jupyter={"outputs_hidden": true} # %%with_globals print # %%bigquery df --project {PROJECT} CREATE OR REPLACE TABLE stock_market.percent_change_sp500 AS SELECT *, CASE --# TODO END AS direction FROM stock_market.price_features_norm_per_change features INNER JOIN `stock_src.snp500` USING (symbol) # + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" id="jQzSbN2yTdH0" jupyter={"outputs_hidden": true} outputId="7a58fcb5-9a31-4c1b-fddb-ef3a5b379f2e" # %%with_globals print # %%bigquery --project {PROJECT} SELECT direction, COUNT(*) as cnt FROM stock_market.percent_change_sp500 GROUP BY direction # + colab={} colab_type="code" id="OLYTEUstTfva" jupyter={"outputs_hidden": true} # %%with_globals print # %%bigquery df --project {PROJECT} SELECT * FROM stock_market.percent_change_sp500 LIMIT 20 # + colab={"base_uri": "https://localhost:8080/", "height": 202} colab_type="code" id="iiARkRzPTdH5" jupyter={"outputs_hidden": true} outputId="2ed8fc43-f8c7-4827-9033-12b142917c9a" df.columns # - # The dataset is still quite large and the majority of the days the market `STAY`s. Let's focus our analysis on dates where [earnings per share](https://en.wikipedia.org/wiki/Earnings_per_share) (EPS) information is released by the companies. The EPS data has 3 key columns surprise, reported_EPS, and consensus_EPS: # + jupyter={"outputs_hidden": true} # %%with_globals print # %%bigquery --project {PROJECT} SELECT * FROM `stock_src.eps` LIMIT 10 # - # The surprise column indicates the difference between the expected (consensus expected eps by analysts) and the reported eps. We can join this table with our derived table to focus our analysis during earnings periods: # + jupyter={"outputs_hidden": true} # %%with_globals print # %%bigquery --project {PROJECT} CREATE OR REPLACE TABLE stock_market.eps_percent_change_sp500 AS SELECT a.*, b.consensus_EPS, b.reported_EPS, b.surprise FROM stock_market.percent_change_sp500 a INNER JOIN `stock_src.eps` b ON a.Date = b.date AND a.symbol = b.symbol # + colab={} colab_type="code" id="OLYTEUstTfva" jupyter={"outputs_hidden": true} # %%with_globals print # %%bigquery --project {PROJECT} SELECT * FROM stock_market.eps_percent_change_sp500 LIMIT 20 # + jupyter={"outputs_hidden": true} # %%with_globals print # %%bigquery --project {PROJECT} SELECT direction, COUNT(*) as cnt FROM stock_market.eps_percent_change_sp500 GROUP BY direction # + [markdown] colab_type="text" id="COPWKR1WTfvd" # ## Feature exploration # + [markdown] colab_type="text" id="T5HLcwy1Tfve" # Now that we have created our recent movements of the company’s stock price, let's visualize our features. This will help us understand the data better and possibly spot errors we may have made during our calculations. # # As a reminder, we calculated the scaled prices 1 week, 1 month, and 1 year before the date that we are predicting at. # + [markdown] colab_type="text" id="RDROJ7qMh7oz" # Let's write a re-usable function for aggregating our features. # - # **Learning objective 2** # + colab={} colab_type="code" id="Q7dT9NTSTfvf" def get_aggregate_stats(field, round_digit=2): """Run SELECT ... GROUP BY field, rounding to nearest digit.""" df = bq.query(''' SELECT {field}, COUNT(*) as cnt FROM (SELECT ROUND({field}, {round_digit}) AS {field} FROM stock_market.eps_percent_change_sp500) rounded_field GROUP BY {field} ORDER BY {field}'''.format(field=field, round_digit=round_digit, PROJECT=PROJECT)).to_dataframe() return df.dropna() # + colab={"base_uri": "https://localhost:8080/", "height": 338} colab_type="code" id="xgmCvlMtTfvh" jupyter={"outputs_hidden": true} outputId="5db92f71-8cce-4475-fad8-a991e2fc208f" field = 'close_AVG_prior_260_days' CLIP_MIN, CLIP_MAX = 0.1, 4. df = get_aggregate_stats(field) values = df[field].clip(CLIP_MIN, CLIP_MAX) counts = 100 * df['cnt'] / df['cnt'].sum() # Percentage. ax = values.hist(weights=counts, bins=30, figsize=(10, 5)) ax.set(xlabel=field, ylabel="%"); # - # **TODO** Use the `get_aggregate_stats` from above to visualize the `normalized_change` column. # + colab={} colab_type="code" id="UDcnYJrCTfvj" jupyter={"outputs_hidden": true} outputId="7e949d16-8c9a-416c-de2d-28ba97a2aa65" field = 'normalized_change' # TODO # + [markdown] colab_type="text" id="DuV7glaEh7o_" # Let's look at results by day-of-week, month, etc. # + colab={} colab_type="code" id="l7egsYhcTfvm" jupyter={"outputs_hidden": true} VALID_GROUPBY_KEYS = ('DAYOFWEEK', 'DAY', 'DAYOFYEAR', 'WEEK', 'MONTH', 'QUARTER', 'YEAR') DOW_MAPPING = {1: 'Sun', 2: 'Mon', 3: 'Tues', 4: 'Wed', 5: 'Thur', 6: 'Fri', 7: 'Sun'} def groupby_datetime(groupby_key, field): if groupby_key not in VALID_GROUPBY_KEYS: raise Exception('Please use a valid groupby_key.') sql = ''' SELECT {groupby_key}, AVG({field}) as avg_{field} FROM (SELECT {field}, EXTRACT({groupby_key} FROM date) AS {groupby_key} FROM stock_market.eps_percent_change_sp500) foo GROUP BY {groupby_key} ORDER BY {groupby_key} DESC'''.format(groupby_key=groupby_key, field=field, PROJECT=PROJECT) print(sql) df = bq.query(sql).to_dataframe() if groupby_key == 'DAYOFWEEK': df.DAYOFWEEK = df.DAYOFWEEK.map(DOW_MAPPING) return df.set_index(groupby_key).dropna() # + colab={"base_uri": "https://localhost:8080/", "height": 403} colab_type="code" id="z7mxvIqYTfvp" jupyter={"outputs_hidden": true} outputId="df531585-b724-4907-a84a-03483efc9a7d" field = 'normalized_change' df = groupby_datetime('DAYOFWEEK', field) ax = df.plot(kind='barh', color='orange', alpha=0.7) ax.grid(which='major', axis='y', linewidth=0) # + colab={"base_uri": "https://localhost:8080/", "height": 403} colab_type="code" id="BRI70WJpTfvs" jupyter={"outputs_hidden": true} outputId="b7525484-9b43-407b-f180-7de5ab7225a4" field = 'close' df = groupby_datetime('DAYOFWEEK', field) ax = df.plot(kind='barh', color='orange', alpha=0.7) ax.grid(which='major', axis='y', linewidth=0) # + colab={"base_uri": "https://localhost:8080/", "height": 403} colab_type="code" id="40m-6nMKTfvw" jupyter={"outputs_hidden": true} outputId="4c4bd05b-2278-4eb7-a741-d39076ec59d3" field = 'normalized_change' df = groupby_datetime('MONTH', field) ax = df.plot(kind='barh', color='blue', alpha=0.7) ax.grid(which='major', axis='y', linewidth=0) # + colab={"base_uri": "https://localhost:8080/", "height": 403} colab_type="code" id="Zj2pOrAiTfvz" jupyter={"outputs_hidden": true} outputId="4afba896-7e82-458a-86b0-4562dd31b5a4" field = 'normalized_change' df = groupby_datetime('QUARTER', field) ax = df.plot(kind='barh', color='green', alpha=0.7) ax.grid(which='major', axis='y', linewidth=0) # + colab={"base_uri": "https://localhost:8080/", "height": 418} colab_type="code" id="ohYWA_YsTfv4" jupyter={"outputs_hidden": true} outputId="7b597812-82f6-41dd-98c4-f3ed1ec5dc6c" field = 'close' df = groupby_datetime('YEAR', field) ax = df.plot(kind='line', color='purple', alpha=0.7) ax.grid(which='major', axis='y', linewidth=0) # + colab={"base_uri": "https://localhost:8080/", "height": 418} colab_type="code" id="BBTC2VunTfv2" jupyter={"outputs_hidden": true} outputId="3b21cb7d-4f80-4faf-a2c9-e67191969556" field = 'normalized_change' df = groupby_datetime('YEAR', field) ax = df.plot(kind='line', color='purple', alpha=0.7) ax.grid(which='major', axis='y', linewidth=0) # + [markdown] colab_type="text" id="P7UJ0W-5WvZC" # BONUS: How do our features correlate with the label `direction`? Build some visualizations. What features are most important? You can visualize this and do it statistically using the [`CORR`](https://cloud.google.com/bigquery/docs/reference/standard-sql/statistical_aggregate_functions) function. # + [markdown] colab_type="text" id="oRCY1E6CTfxD" # Copyright 2019 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
courses/machine_learning/deepdive2/time_series_prediction/labs/optional_2_feature_engineering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt from matplotlib import rc rc('font', **{'family': 'serif', 'serif': ['Computer Modern']}) rc('text', usetex=True) from priors import * # ## Aligned-spin chi_effective priors # # Let's demonstrate the priors on chi_effective corresponding to a uniform, aligned prior on component spins. # + # Set up three subplots to hold three examples, showing the result of varying mass ratio and # maximum dimensionless spin fig = plt.figure(figsize=(15,4)) ax1 = fig.add_subplot(131) ax2 = fig.add_subplot(132) ax3 = fig.add_subplot(133) # Choose maximum dimensionless spin and mass ratio -- we'll condition on these two parameters aMax=1 q=0.8 ntrials=100000 # Now draw random aligned component spin values from their prior (subject to this choice of aMax) # and compute chi_effectives s1s = aMax*(2.*np.random.random(ntrials)-1.) s2s = aMax*(2.*np.random.random(ntrials)-1.) xeff = (s1s + q*s2s)/(1.+q) # Alternatively, defined a grid of chi_effectives and use our analytic expression for the prior from priors.py xs = np.linspace(-1,1,300) p_xeff = chi_effective_prior_from_aligned_spins(q,aMax,xs) # Plot both on top of one another! ax1.hist(xeff,density=True,bins=50) ax1.plot(xs,p_xeff,color='black') ax1.xaxis.grid(True,which='major',ls=':',color='grey') ax1.yaxis.grid(True,which='major',ls=':',color='grey') ax1.tick_params(labelsize=14) ax1.set_xlabel(r'$\chi_{\rm eff}$',fontsize=18) ax1.set_ylabel(r'$p(\chi_{\rm eff})$',fontsize=18) ax1.annotate(r'$q={0}$'.format(q),(0.75,0.85),xycoords='axes fraction',fontsize=16) ax1.annotate(r'$a_{{\rm max}} = {0}$'.format(aMax),(0.75,0.75),xycoords='axes fraction',fontsize=16) # Repeat a second time, but now under a different choice of mass ratio aMax=1 q=0.1 ntrials=100000 s1s = aMax*(2.*np.random.random(ntrials)-1.) s2s = aMax*(2.*np.random.random(ntrials)-1.) xeff = (s1s + q*s2s)/(1.+q) xs = np.linspace(-1,1,300) p_xeff = chi_effective_prior_from_aligned_spins(q,aMax,xs) ax2.hist(xeff,density=True,bins=50) ax2.plot(xs,p_xeff,color='black') ax2.xaxis.grid(True,which='major',ls=':',color='grey') ax2.yaxis.grid(True,which='major',ls=':',color='grey') ax2.tick_params(labelsize=14) ax2.set_xlabel(r'$\chi_{\rm eff}$',fontsize=18) ax2.annotate(r'$q={0}$'.format(q),(0.75,0.85),xycoords='axes fraction',fontsize=16,backgroundcolor=(1,1,1,0.95)) ax2.annotate(r'$a_{{\rm max}} = {0}$'.format(aMax),(0.75,0.75),xycoords='axes fraction',fontsize=16,backgroundcolor=(1,1,1,0.95)) ax2.set_ylim(0,0.65) # ...and a third time, now varying the maximum spin magnitude aMax=0.4 q=0.8 ntrials=30000 s1s = aMax*(2.*np.random.random(ntrials)-1.) s2s = aMax*(2.*np.random.random(ntrials)-1.) xeff = (s1s + q*s2s)/(1.+q) xs = np.linspace(-1,1,300) p_xeff = chi_effective_prior_from_aligned_spins(q,aMax,xs) ax3.hist(xeff,density=True,bins=50) ax3.plot(xs,p_xeff,color='black') ax3.xaxis.grid(True,which='major',ls=':',color='grey') ax3.yaxis.grid(True,which='major',ls=':',color='grey') ax3.tick_params(labelsize=14) ax3.set_xlabel(r'$\chi_{\rm eff}$',fontsize=18) ax3.annotate(r'$q={0}$'.format(q),(0.7,0.85),xycoords='axes fraction',fontsize=16) ax3.annotate(r'$a_{{\rm max}} = {0}$'.format(aMax),(0.7,0.75),xycoords='axes fraction',fontsize=16) plt.tight_layout() #plt.savefig('demo_chi_eff_aligned.pdf',bbox_inches='tight') plt.show() # - # ## Isotropic chi_effective priors # # Now demonstrate the priors on chi_effective corresponding to a uniform, *isotropic* prior on component spins. # + # As above, set up three different cases so we can test the various piecewise cases appearing in our analytic # definition of p(chi_eff|q) fig = plt.figure(figsize=(15,4)) ax1 = fig.add_subplot(131) ax2 = fig.add_subplot(132) ax3 = fig.add_subplot(133) # Choose a conditional value of aMax and mass ratio q aMax=1 q=0.8 # Make random draws from our component spin magnitudes (a1s and a2s) and cosine tilts (u1s and u2s) # and numerically construct the chi_effective prior ntrials=100000 a1s = aMax*np.random.random(ntrials) a2s = aMax*np.random.random(ntrials) u1s = 2.*np.random.random(ntrials)-1. u2s = 2.*np.random.random(ntrials)-1. xeff = (a1s*u1s + q*a2s*u2s)/(1.+q) # Alternatively, use our analytic function defined in priors.py xs = np.linspace(-1,1,300) p_xeff = chi_effective_prior_from_isotropic_spins(q,aMax,xs) # Plot! ax1.hist(xeff,density=True,bins=50) ax1.plot(xs,p_xeff,color='black') ax1.xaxis.grid(True,which='major',ls=':',color='grey') ax1.yaxis.grid(True,which='major',ls=':',color='grey') ax1.tick_params(labelsize=14) ax1.set_xlabel(r'$\chi_{\rm eff}$',fontsize=18) ax1.set_ylabel(r'$p(\chi_{\rm eff})$',fontsize=18) ax1.annotate(r'$q={0}$'.format(q),(0.075,0.85),xycoords='axes fraction',fontsize=16) ax1.annotate(r'$a_{{\rm max}} = {0}$'.format(aMax),(0.075,0.75),xycoords='axes fraction',fontsize=16) # Again, under a different choice of aMax and q aMax=1 q=0.1 ntrials=100000 a1s = aMax*np.random.random(ntrials) a2s = aMax*np.random.random(ntrials) u1s = 2.*np.random.random(ntrials)-1. u2s = 2.*np.random.random(ntrials)-1. xeff = (a1s*u1s + q*a2s*u2s)/(1.+q) xs = np.linspace(-1,1,300) p_xeff = chi_effective_prior_from_isotropic_spins(q,aMax,xs) ax2.hist(xeff,density=True,bins=50) ax2.plot(xs,p_xeff,color='black') ax2.xaxis.grid(True,which='major',ls=':',color='grey') ax2.yaxis.grid(True,which='major',ls=':',color='grey') ax2.tick_params(labelsize=14) ax2.set_xlabel(r'$\chi_{\rm eff}$',fontsize=18) ax2.annotate(r'$q={0}$'.format(q),(0.075,0.85),xycoords='axes fraction',fontsize=16) ax2.annotate(r'$a_{{\rm max}} = {0}$'.format(aMax),(0.075,0.75),xycoords='axes fraction',fontsize=16) # ...and a third time aMax=0.4 q=0.8 ntrials=30000 a1s = aMax*np.random.random(ntrials) a2s = aMax*np.random.random(ntrials) u1s = 2.*np.random.random(ntrials)-1. u2s = 2.*np.random.random(ntrials)-1. xeff = (a1s*u1s + q*a2s*u2s)/(1.+q) xs = np.linspace(-1,1,300) p_xeff = chi_effective_prior_from_isotropic_spins(q,aMax,xs) ax3.hist(xeff,density=True,bins=50) ax3.plot(xs,p_xeff,color='black') ax3.xaxis.grid(True,which='major',ls=':',color='grey') ax3.yaxis.grid(True,which='major',ls=':',color='grey') ax3.tick_params(labelsize=14) ax3.set_xlabel(r'$\chi_{\rm eff}$',fontsize=18) ax3.annotate(r'$q={0}$'.format(q),(0.075,0.85),xycoords='axes fraction',fontsize=16) ax3.annotate(r'$a_{{\rm max}} = {0}$'.format(aMax),(0.075,0.75),xycoords='axes fraction',fontsize=16) plt.tight_layout() #plt.savefig('demo_chi_eff.pdf',bbox_inches='tight') plt.show() # - # ## Isotropic chi_p priors # # Finally, demonstrate the priors on chi_p corresponding to a uniform and isotropic prior on component spins. # + # Again set up three different cases fig = plt.figure(figsize=(15,4)) ax1 = fig.add_subplot(131) ax2 = fig.add_subplot(132) ax3 = fig.add_subplot(133) # Case 1 aMax=1 q=0.8 ntrials=100000 a1s = aMax*np.random.random(ntrials) a2s = aMax*np.random.random(ntrials) u1s = 2.*np.random.random(ntrials)-1. u2s = 2.*np.random.random(ntrials)-1. sin1s = np.sqrt(1.-u1s**2) sin2s = np.sqrt(1.-u2s**2) xp = np.maximum(a1s*sin1s,((3.+4.*q)/(4.+3.*q))*q*a2s*sin2s) xs = np.linspace(0,1,300) p_xp = chi_p_prior_from_isotropic_spins(q,aMax,xs) ax1.hist(xp,density=True,bins=50) ax1.plot(xs,p_xp,color='black') ax1.xaxis.grid(True,which='major',ls=':',color='grey') ax1.yaxis.grid(True,which='major',ls=':',color='grey') ax1.tick_params(labelsize=14) ax1.set_xlabel(r'$\chi_{\rm p}$',fontsize=18) ax1.set_ylabel(r'$p(\chi_{\rm p})$',fontsize=18) ax1.annotate(r'$q={0}$'.format(q),(0.75,0.85),xycoords='axes fraction',fontsize=16) ax1.annotate(r'$a_{{\rm max}} = {0}$'.format(aMax),(0.75,0.75),xycoords='axes fraction',fontsize=16) aMax=1 q=0.2 ntrials=100000 a1s = aMax*np.random.random(ntrials) a2s = aMax*np.random.random(ntrials) u1s = 2.*np.random.random(ntrials)-1. u2s = 2.*np.random.random(ntrials)-1. sin1s = np.sqrt(1.-u1s**2) sin2s = np.sqrt(1.-u2s**2) xp = np.maximum(a1s*sin1s,((3.+4.*q)/(4.+3.*q))*q*a2s*sin2s) xs = np.linspace(0,1,300) p_xp = chi_p_prior_from_isotropic_spins(q,aMax,xs) ax2.hist(xp,density=True,bins=50) ax2.plot(xs,p_xp,color='black') ax2.xaxis.grid(True,which='major',ls=':',color='grey') ax2.yaxis.grid(True,which='major',ls=':',color='grey') ax2.tick_params(labelsize=14) ax2.set_xlabel(r'$\chi_{\rm p}$',fontsize=18) ax2.annotate(r'$q={0}$'.format(q),(0.75,0.85),xycoords='axes fraction',fontsize=16) ax2.annotate(r'$a_{{\rm max}} = {0}$'.format(aMax),(0.75,0.75),xycoords='axes fraction',fontsize=16) aMax=0.4 q=0.8 ntrials=100000 a1s = aMax*np.random.random(ntrials) a2s = aMax*np.random.random(ntrials) u1s = 2.*np.random.random(ntrials)-1. u2s = 2.*np.random.random(ntrials)-1. sin1s = np.sqrt(1.-u1s**2) sin2s = np.sqrt(1.-u2s**2) xp = np.maximum(a1s*sin1s,((3.+4.*q)/(4.+3.*q))*q*a2s*sin2s) xs = np.linspace(0,1,300) p_xp = chi_p_prior_from_isotropic_spins(q,aMax,xs) ax3.hist(xp,density=True,bins=50) ax3.plot(xs,p_xp,color='black') ax3.xaxis.grid(True,which='major',ls=':',color='grey') ax3.yaxis.grid(True,which='major',ls=':',color='grey') ax3.tick_params(labelsize=14) ax3.set_xlabel(r'$\chi_{\rm p}$',fontsize=18) ax3.annotate(r'$q={0}$'.format(q),(0.75,0.85),xycoords='axes fraction',fontsize=16) ax3.annotate(r'$a_{{\rm max}} = {0}$'.format(aMax),(0.75,0.75),xycoords='axes fraction',fontsize=16) plt.tight_layout() #plt.savefig('demo_chi_p.pdf',bbox_inches='tight') plt.show() # -
Demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Create dataset bundle # This notebook will create the dataset bundle for the Skin lesion tutorial in Knowledge Center. # # Download the training data and ground truth for the segmentation task from the __[competition site](https://challenge.kitware.com/#phase/5abcb19a56357d0139260e53)__ or via the links below: # # * https://storage.googleapis.com/bucket-8732/SkinLesionSegmentation/ISIC2018_Task1-2_Training_Input.zip # * https://storage.googleapis.com/bucket-8732/SkinLesionSegmentation/ISIC2018_Task1_Training_GroundTruth.zip # + import functools from glob import glob import os import pandas as pd import sidekick from sklearn.model_selection import train_test_split # - # --- # **NOTE!** # # For information about how to install sidekick, see https://github.com/Peltarion/sidekick # # --- # input_path = '.' output_path = './data.zip' tr_images_rel_path = glob(input_path + '/ISIC2018_Task1-2_Training_Input/*.jpg') gt_images_rel_path = glob(input_path + '/ISIC2018_Task1_Training_GroundTruth/*.png') print('Training images: {}, Ground Truth: {}'.format(len(tr_images_rel_path), len(gt_images_rel_path))) tr_images_rel_path = sorted(glob(os.path.join('*Training_Input*', '*.jpg'))) gt_images_rel_path = sorted(glob(os.path.join('*GroundTruth', '*.png'))) print('Training images: {}, Ground Truth: {}'.format(len(tr_images_rel_path), len(gt_images_rel_path))) df = pd.DataFrame({'image': tr_images_rel_path, 'mask': gt_images_rel_path}) # + def create_subsets(df): train_data, validate_data = train_test_split(df, test_size=0.20, random_state=42) print('Training samples: ' + str(len(train_data.values))) print('Validation samples: ' + str(len(validate_data.values))) train_data.insert(loc=2, column='subset', value='T') validate_data.insert(loc=2, column='subset', value='V') return train_data.append(validate_data, ignore_index=True) df = create_subsets(df) # - df.head(5) # + # Create preprocessor for images, cropping to specified size image_processor = functools.partial(sidekick.process_image, mode='resize', size=(64, 64), file_format='png') # Create dataset sidekick.create_dataset( output_path, df, path_columns=['image','mask'], preprocess={ 'image': image_processor, 'mask': image_processor } )
skin_lesion_segmentation/skin_lesion_image_segmentation_preprocess.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Anomaly Detection of Retail Store Sales # # This hands-on mini-project will enable you to reinforce your learnings pertaining to anomaly detection in this unit. By now, you must already be aware of the key objective of anomaly detection. Just to refresh your memory, anomaly detection is the identification of outliers or rare event items in a dataset which potentially exhibit abnormal behavior or properties as compared to the rest of the datapoints. # # There are a wide variety of anomaly detection methods including supervised, unsupervised and semi-supervised. Typically you can perform anomaly detection on univariate data, multivariate data as well as data which is temporal in nature. In this mini-project you will leverage state-of-the-art anomaly detection models from frameworks like [__`scikit-learn`__](https://scikit-learn.org/stable/modules/outlier_detection.html) and [__`PyOD`__](https://pyod.readthedocs.io/en/latest/index.html). # # # By the end of this mini-project, you will have successfully applied these techniques to find out potential outliers pertaining to sales transactional data in a retail store dataset and also learnt how to visualize outliers similar to the following plot. # # ![](outlier_ex.png) # # We will be performing anomaly detection on both univariate and multivariate data and leverage the following anomaly detection techniques. # # - Simple Statistical Models (mean & standard deviation: the three-sigma rule) # - Isolation Forest # - Clustering-Based Local Outlier Factor # - Auto-encoders # # 1. Getting and Loading the Dataset # # The first step towards solving any data science or machine learning problem is to obtain the necessary data. In this scenario, we will be dealing with a popular retail dataset known as the [SuperStore Sales Dataset](https://community.tableau.com/docs/DOC-1236) which consists of transactional data pertaining to a retail store. # # #### Please download the required dataset from [here](https://community.tableau.com/docs/DOC-1236) if necessary, although it will also be provided to you along with this notebook for this mini-project # # Once we have the necessary data, we will load up the dataset and perform some initial exploratory data analysis # # 2. Exploratory Data Analysis # # It's time to do some basic exploratory analysis on the retail store transactional data. We start by loading up the dataset into a pandas dataframe. # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import matplotlib import warnings warnings.filterwarnings('ignore') # %matplotlib inline df = pd.read_excel("./Superstore.xls") df.info() # - # We don't have any major missing values in our dataset and we can now look at a sample subset of the data df.head() # ## Visualize Sales vs. Order Date # # Let's look more closely at the __`Sales`__ attribute of the dataset in the next few cells. We'll start by looking at typical sales over time fig, ax = plt.subplots(1, 1, figsize=(12, 6)) sns.lineplot(x=df['Order Date'], y=df['Sales']); # ## Visualize Sales Distribution # # # Let's now look at the data distribution for __`Sales`__ sns.distplot(df['Sales']) plt.title("Sales Distribution"); df['Sales'].describe() # We can definitely see the presence of potential outliers in terms of the min or max values as compared to the meat of the distribution in the interquartile range as observed in the distribution statistics # ## Q 2.1: Visualize Profit vs. Order Date # # Let's now look closely at the __`Profit`__ attribute of the dataset in the next few cells. We'll start by looking at typical profits over time. # # __Your turn: Plot `Order Date` vs. `Profit` using a line plot__ fig, ax = plt.subplots(1, 1, figsize=(12, 6)) sns.lineplot(x=df['Order Date'], y=df['Profit']); # ## Q 2.2: Visualize Profit Distribution # # Let's now look at the data distribution for __`Profit`__ # # __Your turn: Plot the distribution for `Profit`__ sns.distplot(df['Profit']) plt.title("Profit Distribution"); # __Your turn: Get the essential descriptive statistics for `Profit` using an appropriate function__ df['Profit'].describe() # __Your turn: Do you notice anything interesting about the distribution?__ # We have both positive and negative values in profits since it indicates either a profit or a loss based on the sales and original price of the items. # ## Visualize Discount vs. Profit sns.scatterplot(x="Discount", y="Profit", data=df); # In the above visual, we look at a scatter plot showing the distribution of profits w.r.t discounts given # # 3. Univariate Anomaly Detection # # Univariate is basically analysis done on a single attribute or feature. In this section, we will perform anomaly detection on a single attribute using the following methods. # # - Statistical Process Control Methods (mean + 3sigma thresholding) # - Isolation Forest # # We will start off by demonstrating both these techniques on the __`Sales`__ attribute and later on, you will implement similar techniques on the __`Profit`__ attribute. # ## 3.1: Univariate Anomaly Detection on Sales using Statistical Modeling # # Here we start off by implementing anomaly detecting using statistical modeling on the __`Sales`__ attribute # ### Obtain Upper Limit Threshold for Sales # # Here we are concerned about transactions with high sales values so we compute the upper limit using the $\mu$ + 3$\sigma$ rule where $\mu$ is the mean of the distribution and $\sigma$ is the standard deviation of the distribution. # + mean_sales = df['Sales'].mean() sigma_sales = df['Sales'].std() three_sigma_sales = 3*sigma_sales threshold_sales_value = mean_sales + three_sigma_sales print('Threshold Sales:', threshold_sales_value) # - # ### Visualize Outlier Region # + fig, ax = plt.subplots(1, 1, figsize=(12, 6)) sns.distplot(df['Sales']) plt.axvspan(threshold_sales_value, df['Sales'].max(), facecolor='r', alpha=0.3) plt.title("Sales Distribution with Outlier Region"); # - # ### Filter and Sort Outliers # # Here we filter out the outlier observations and sort by descending order and view the top 5 outlier values sales_outliers_df = df['Sales'][df['Sales'] > threshold_sales_value] print('Total Sales Outliers:', len(sales_outliers_df)) sales_outliers_sorted = sales_outliers_df.sort_values(ascending=False) sales_outliers_sorted.head(5) # ### View Top 10 Outlier Transactions (df.loc[sales_outliers_sorted.index.tolist()][['City', 'Category', 'Sub-Category', 'Product Name', 'Sales', 'Quantity', 'Discount', 'Profit']]).head(10) # ### View Bottom 10 Outlier Transactions (df.loc[sales_outliers_sorted.index.tolist()][['City', 'Category', 'Sub-Category', 'Product Name', 'Sales', 'Quantity', 'Discount', 'Profit']]).tail(10) # ## Q 3.2: Univariate Anomaly Detection on Profit using Statistical Modeling # # In this section you will use the learning from Section 3.1 and implement anomaly detecting using statistical modeling on the __`Profit`__ attribute. Since we have both +ve (profits) and -ve (losses) values in the distribution, we will try to find anomalies for each. # ### Obtain Upper Limit Threshold for Profit # # __Your turn:__ Compute the upper and lower limits using the 𝜇 + 3 𝜎 rule where 𝜇 is the mean of the distribution and 𝜎 is the standard deviation of the distribution. # + # threshold_sales_value = mean_sales + three_sigma_sales mean_profit = df['Profit'].mean() sigma_profit = df['Profit'].std() three_sigma_profit = 3*sigma_profit threshold_profit_upper_limit = mean_profit + three_sigma_profit threshold_profit_lower_limit = mean_profit - three_sigma_profit print('Thresholds Profit:', threshold_profit_lower_limit, threshold_profit_upper_limit) # - # ### Visualize Outlier Regions # # __Your turn:__ Visualize the upper and lower outlier regions in the distribution similar to what you did in 3.1 # + fig, ax = plt.subplots(1, 1, figsize=(12, 6)) sns.distplot(df['Profit']) plt.axvspan(threshold_sales_value, df['Profit'].max(), facecolor='orange', alpha=0.3) plt.title("Profit Distribution with Outlier Region"); # - # ### Filter and Sort Outliers # # __Your turn:__ Filter out the outlier observations and sort by descending order and view the top 5 outlier values profit_outliers_df =df['Profit'][df['Profit'] > threshold_profit_upper_limit] print('Total Profit Outliers:', len(profit_outliers_df)) profit_outliers_sorted = profit_outliers_df.sort_values(ascending=False) profit_outliers_df.head(5) # We need to identify also the losses outliers loses_outliers_df =df['Profit'][df['Profit'] < threshold_profit_lower_limit] print('Total Losses Outliers:', len(loses_outliers_df)) loses_outliers_sorted = loses_outliers_df.sort_values(ascending=False) loses_outliers_df.head(5) # ### View Top 10 Outlier Transactions # # __Your turn:__ View the top ten transactions based on highest profits (df.loc[profit_outliers_sorted.index.tolist()][['City', 'Category', 'Sub-Category', 'Product Name', 'Sales', 'Quantity', 'Discount', 'Profit']]).head(10) # ### Q: Do you notice any interesting insights based on these transactions? # __A:__ Most of these are purchases for Copiers and Binders , looks like Canon products yielded some good profits` # ### View Bottom 10 Outlier Transactions # # __Your turn:__ View the bottom ten transactions based on lowest profits (highest losses) (df.loc[loses_outliers_sorted.index.tolist()][['City', 'Category', 'Sub-Category', 'Product Name', 'Sales', 'Quantity', 'Discount', 'Profit']]).tail(10) # ### Q: Do you notice any interesting insights based on these transactions? # __A:__ Most of these are purchases for Machines and Binders , looks like Cibify 3D Printers yielded high losses # ## 3.3: Univariate Anomaly Detection on Sales using Isolation Forest # # You might have already learnt about this model from the curriculum. Just to briefly recap, the Isolation Forest model, 'isolates' observations by randomly selecting a feature and then randomly selecting a split value between the maximum and minimum values of the selected feature. # # Recursive partitioning can be represented by a tree structure. Hence, the number of splittings required to isolate a sample is equivalent to the path length from the root node to the terminating node. This path length, averaged over a forest of such random trees, is a measure of normality and our decision function. # # Random partitioning produces noticeably shorter paths for anomalies. Hence, when a forest of random trees collectively produce shorter path lengths for particular samples, they are highly likely to be anomalies. # # More details are available in this [User Guide](https://scikit-learn.org/stable/modules/outlier_detection.html#isolation-forest) # ### Initialize and Train Model # # Here we initialize the isolation forest model with some hyperparameters assuming the proportion of outliers to be 1% of the total data (using the `contamination` setting) # + from sklearn.ensemble import IsolationForest sales_ifmodel = IsolationForest(n_estimators=100, contamination=0.01) sales_ifmodel.fit(df[['Sales']]) # - # ### Visualize Outlier Region # # Here we visualize the outlier region in the data distribution xx = np.linspace(df['Sales'].min(), df['Sales'].max(), len(df)).reshape(-1,1) anomaly_score = sales_ifmodel.decision_function(xx) outlier = sales_ifmodel.predict(xx) plt.figure(figsize=(12, 6)) plt.plot(xx, anomaly_score, label='anomaly score') plt.fill_between(xx.T[0], np.min(anomaly_score), np.max(anomaly_score), where=outlier==-1, color='r', alpha=.4, label='outlier region') plt.legend() plt.ylabel('anomaly score') plt.xlabel('Sales'); # ### Filter and Sort Outliers # # Here we predict outliers in our dataset using our trained model and filter out the outlier observations and sort by descending order and view the top 5 outlier values # + outlier_predictions = sales_ifmodel.predict(df[['Sales']]) sales_outliers_df = df[['Sales']] sales_outliers_df['Outlier'] = outlier_predictions sales_outliers_df = sales_outliers_df[sales_outliers_df['Outlier'] == -1]['Sales'] print('Total Sales Outliers:', len(sales_outliers_df)) sales_outliers_sorted = sales_outliers_df.sort_values(ascending=False) sales_outliers_sorted.head(5) # - # ### View Top 10 Outlier Transactions (df.loc[sales_outliers_sorted.index.tolist()][['City', 'Category', 'Sub-Category', 'Product Name', 'Sales', 'Quantity', 'Discount', 'Profit']]).head(10) # ### View Bottom 10 Outlier Transactions (df.loc[sales_outliers_sorted.index.tolist()][['City', 'Category', 'Sub-Category', 'Product Name', 'Sales', 'Quantity', 'Discount', 'Profit']]).tail(10) # ## Q 3.4: Univariate Anomaly Detection on Profit using Isolation Forest # # In this section you will use the learning from Section 3.3 and implement anomaly detecting using isolation on the __`Profit`__ attribute. Since we have both +ve (profits) and -ve (losses) values in the distribution, we will try to find anomalies for each. # ### Initialize and Train Model # # __Your Turn:__ Initialize the isolation forest model with similar hyperparameters as Section 3.3 and also assuming the proportion of outliers to be 1% of the total data (using the contamination setting) # + from sklearn.ensemble import IsolationForest profit_ifmodel = IsolationForest(n_estimators=100, contamination=0.01) profit_ifmodel.fit(df[['Profit']]) # - # ### Visualize Outlier Regions # # __Your turn:__ Visualize the upper and lower outlier regions in the distribution similar to what you did in 3.3 xx = np.linspace(df['Profit'].min(), df['Profit'].max(), len(df)).reshape(-1,1) anomaly_score = profit_ifmodel.decision_function(xx) outlier = profit_ifmodel.predict(xx) plt.figure(figsize=(12, 6)) plt.plot(xx, anomaly_score, label='anomaly score') plt.fill_between(xx.T[0], np.min(anomaly_score), np.max(anomaly_score), where=outlier==-1, color='r', alpha=.4, label='outlier region') plt.legend() plt.ylabel('anomaly score') plt.xlabel('Profit'); # ### Filter and Sort Outliers # # __Your Turn:__ Predict outliers in our dataset using our trained model and filter out the outlier observations and sort by descending order and view the top 5 outlier values similar to 3.3 # + outlier_predictions = profit_ifmodel.predict(df[['Profit']]) profit_outliers_df = df[['Profit']] profit_outliers_df['Outlier'] = outlier_predictions profit_outliers_df = profit_outliers_df[profit_outliers_df['Outlier'] == -1]['Profit'] print('Total Profit Outliers:', len(profit_outliers_df)) profit_outliers_sorted = profit_outliers_df.sort_values(ascending=False) profit_outliers_sorted.head(5) # - # ### View Top 10 Outlier Transactions # # __Your turn:__ View the top ten transactions based on highest profits (df.loc[profit_outliers_sorted.index.tolist()][['City', 'Category', 'Sub-Category', 'Product Name', 'Sales', 'Quantity', 'Discount', 'Profit']]).head(10) # ### View Bottom 10 Outlier Transactions # # __Your turn:__ View the bottom ten transactions based on lowest profits (highest losses) (df.loc[profit_outliers_sorted.index.tolist()][['City', 'Category', 'Sub-Category', 'Product Name', 'Sales', 'Quantity', 'Discount', 'Profit']]).tail(10) # ### Q: Do you observe any similarity in the results with the previous method? # __A:__ Yes # Another interesting approach to check out would be the [Generalized ESD Test for Outliers](https://www.itl.nist.gov/div898/handbook/eda/section3/eda35h3.htm) # # # # 4. Multivariate Anomaly Detection # # Multivariate is basically analysis done on more than one attribute or feature at a time. In this section, we will perform anomaly detection on two attributes (__`Discount`__ & __`Profit`__) using the following methods. # # - Clustering Based Local Outlier Factor (CBLOF) # - Isolation Forest # - Auto-Encoders # # You will learn how to train these models to detect outliers and also visualize these outliers. For this section we will be using the __[`pyod`](https://pyod.readthedocs.io/en/latest/)__ package so make sure you have it installed. # !pip install pyod # ## Extract Subset Data for Outlier Detection cols = ['Discount', 'Profit'] subset_df = df[cols] subset_df.head() # ## Feature Scaling # + from sklearn.preprocessing import MinMaxScaler mms = MinMaxScaler(feature_range=(0, 1)) subset_df[cols] = mms.fit_transform(subset_df) subset_df.head() # - # ## 4.1: Multivariate Anomaly Detection with Clustering Based Local Outlier Factor (CBLOF) # # The CBLOF model takes as an input the dataset and the cluster model that was generated by a clustering algorithm. It classifies the clusters into small clusters and large clusters using the parameters alpha and beta. The anomaly score is then calculated based on the size of the cluster the point belongs to as well as the distance to the nearest large cluster. # # By default, kMeans is used for clustering algorithm. You can read more in the [official documentation](https://pyod.readthedocs.io/en/latest/pyod.models.html#module-pyod.models.cblof) # ### Initialize and Train Model # # Here we initialize the CBLOF model with some hyperparameters assuming the proportion of outliers to be 1% of the total data (using the `contamination` setting) # + from pyod.models import cblof cblof_model = cblof.CBLOF(contamination=0.01, random_state=42) cblof_model.fit(subset_df) # - # ### Filter and Sort Outliers # # Here we predict outliers in our dataset using our trained model and filter out the outlier observations and sort by descending order and view the top 5 outlier values # + outlier_predictions = cblof_model.predict(subset_df) outliers_df = subset_df.copy(deep=True) outliers_df['Outlier'] = outlier_predictions outliers_df = outliers_df[outliers_df['Outlier'] == 1] print('Total Outliers:', len(outliers_df)) outliers_sorted = outliers_df.sort_values(by=['Profit', 'Discount'], ascending=False) outliers_sorted.head(5) # - # ### View Bottom 10 Outlier Transactions (df.loc[outliers_sorted.index.tolist()][['City', 'Category', 'Sub-Category', 'Product Name', 'Sales', 'Quantity', 'Discount', 'Profit']]).tail(10) # We can definitely see some huge losses incurred based on giving higher discounts even if the sales amount was high which is interesting as well as concerning. # ## Q 4.2: Multivariate Anomaly Detection with Isolation Forest # # Here you will detect anomalies using the Isolation Forest model and use the learnings from 4.1. Here you will use the [`pyod`](https://pyod.readthedocs.io/en/latest/pyod.models.html#module-pyod.models.iforest) version of [Isolation Forest](https://pyod.readthedocs.io/en/latest/pyod.models.html#module-pyod.models.iforest) which is basically a wrapper over the `scikit-learn` version but with more functionalities. # ### Initialize and Train Model # # __Your Turn:__ Initialize the isolation forest model with similar hyperparameters as before and also assuming the proportion of outliers to be 1% of the total data (using the contamination setting) # + from pyod.models import iforest if_model = iforest.IForest(contamination=0.01, random_state=42) if_model.fit(subset_df) # - # ### Filter and Sort Outliers # # __Your Turn:__ Predict outliers in our dataset using our trained model and filter out the outlier observations and sort by descending order and view the top 5 outlier values similar to 4.1 # + outlier_predictions = if_model.predict(subset_df) outliers_df = subset_df.copy(deep=True) outliers_df['Outlier'] = outlier_predictions outliers_df = outliers_df[outliers_df['Outlier'] == 1] print('Total Outliers:', len(outliers_df)) outliers_sorted = outliers_df.sort_values(by=['Profit', 'Discount'], ascending=False) outliers_sorted.head(5) # - # ### View Bottom 10 Outlier Transactions # # __Your turn:__ View the bottom ten transactions (df.loc[outliers_sorted.index.tolist()][['City', 'Category', 'Sub-Category', 'Product Name', 'Sales', 'Quantity', 'Discount', 'Profit']]).tail(10) # ### Q: Do you notice any differences in the results with the previous model? # We do notice some transactions with 80% discount and high losses # ## Q 4.3: Multivariate Anomaly Detection with Auto-encoders # # Here you will detect anomalies using the Auto-encoder model and use the learnings from 4.1. Here you will use the [Auto-encoder](https://pyod.readthedocs.io/en/latest/pyod.models.html#module-pyod.models.auto_encoder) model from `pyod` which is a deep learning model often used for learning useful data representations in an unsupervised fashion without any labeled data. # # ![](outlier_ae.png) # # Similar to PCA, AE could be used to detect outlier objects in the data by calculating the reconstruction errors # ### Initialize Model # # Here we initiaze an auto-encoder network with a few hidden layers so that we could train it for a 100 epochs # + from pyod.models import auto_encoder ae_model = auto_encoder.AutoEncoder(hidden_neurons=[2, 32, 32, 2], hidden_activation='relu', output_activation='sigmoid', epochs=100, batch_size=32, contamination=0.01) # - # ### Train Model # # __Your turn:__ Train the model by calling the `fit()` function on the right data ae_model.fit(subset_df) # ### Filter and Sort Outliers # # __Your Turn:__ Predict outliers in our dataset using our trained model and filter out the outlier observations and sort by descending order and view the top 5 outlier values similar to 4.1 # + outlier_predictions = ae_model.predict(subset_df) outliers_df = subset_df.copy(deep=True) outliers_df['Outlier'] = outlier_predictions outliers_df = outliers_df[outliers_df['Outlier'] == 1] print('Total Outliers:', len(outliers_df)) outliers_sorted = outliers_df.sort_values(by=['Profit', 'Discount'], ascending=False) outliers_sorted.head(5) # - # ### View Bottom 10 Outlier Transactions # # __Your turn:__ View the bottom ten transactions (df.loc[outliers_sorted.index.tolist()][['City', 'Category', 'Sub-Category', 'Product Name', 'Sales', 'Quantity', 'Discount', 'Profit']]).tail(10) # ## 4.4: Visualize Anomalies and Compare Anomaly Detection Models # # Here we will look at the visual plots of anomalies as detected by the above three models def visualize_anomalies(model, xx, yy, data_df, ax_obj, subplot_title): # predict raw anomaly score scores_pred = model.decision_function(data_df) * -1 # prediction of a datapoint category outlier or inlier y_pred = model.predict(data_df) n_inliers = len(y_pred) - np.count_nonzero(y_pred) n_outliers = np.count_nonzero(y_pred == 1) out_df = data_df.copy(deep=True) out_df['Outlier'] = y_pred.tolist() # discount - inlier feature 1, profit - inlier feature 2 inliers_discount = out_df[out_df['Outlier'] == 0]['Discount'].values inliers_profit = out_df[out_df['Outlier'] == 0]['Profit'].values # discount - outlier feature 1, profit - outlier feature 2 outliers_discount = out_df[out_df['Outlier'] == 1]['Discount'].values outliers_profit = out_df[out_df['Outlier'] == 1]['Profit'].values # Use threshold value to consider a datapoint inlier or outlier # threshold = stats.scoreatpercentile(scores_pred,100 * outliers_fraction) threshold = np.percentile(scores_pred, 100 * outliers_fraction) # decision function calculates the raw anomaly score for every point Z = model.decision_function(np.c_[xx.ravel(), yy.ravel()]) * -1 Z = Z.reshape(xx.shape) # fill blue map colormap from minimum anomaly score to threshold value ax_obj.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),cmap=plt.cm.Blues_r) # draw red contour line where anomaly score is equal to thresold a = ax_obj.contour(xx, yy, Z, levels=[threshold],linewidths=2, colors='red') # fill orange contour lines where range of anomaly score is from threshold to maximum anomaly score ax_obj.contourf(xx, yy, Z, levels=[threshold, Z.max()],colors='orange') b = ax_obj.scatter(inliers_discount, inliers_profit, c='white',s=20, edgecolor='k') c = ax_obj.scatter(outliers_discount, outliers_profit, c='black',s=20, edgecolor='k') ax_obj.legend([a.collections[0], b,c], ['learned decision function', 'inliers','outliers'], prop=matplotlib.font_manager.FontProperties(size=10),loc='upper right') ax_obj.set_xlim((0, 1)) ax_obj.set_ylim((0, 1)) ax_obj.set_xlabel('Discount') ax_obj.set_ylabel('Sales') ax_obj.set_title(subplot_title) # + outliers_fraction = 0.01 xx , yy = np.meshgrid(np.linspace(0, 1, 100), np.linspace(0, 1, 100)) fig, ax = plt.subplots(1, 3, figsize=(20, 6)) ax_objs = [ax[0], ax[1], ax[2]] models = [cblof_model, if_model, ae_model] plot_titles = ['Cluster-based Local Outlier Factor (CBLOF)', 'Isolation Forest', 'Auto-Encoder'] for ax_obj, model, plot_title in zip(ax_objs, models, plot_titles): visualize_anomalies(model=model, xx=xx, yy=yy, data_df=subset_df, ax_obj=ax_obj, subplot_title=plot_title) plt.axis('tight'); # -
anomaly-detection/mec-16.4.1-anomaly-detection-mini-project/Mini_Project_Anomaly_Detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Project 3: Natural Language Processing # ## Imports # + import pandas as pd import numpy as np import requests import seaborn as sns import matplotlib.pyplot as plt from tqdm import tqdm import time import sklearn.metrics as metrics from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from nltk.tokenize import WordPunctTokenizer # - # ## Gathering posts # + # URL for the pushshift api post_url = 'https://api.pushshift.io/reddit/search/submission' # + # creates a function to pull posts from the api def get_posts(url, subreddit, timespan): ''' url: the API URL subreddit: the name of the subreddit (after r/) as a string timespan: the number of days into the past you want to collect posts ''' # create an empty data frame for posts to be added to df = pd.DataFrame() # set standard parameters with the size == 100 and the subreddit # set to the subreddit passed into the function params = { 'size':100, 'subreddit': subreddit } # loops through each number (i) in range(timespan), adding them as # the before (i) and after (i+1) parameters in the params # dictionary to be used for the api call for i in tqdm(range(timespan)): params['before'] = f'{i}d' params['after']= f'{i+1}d' # requests data from the api for each day and concatenates it to # the data frame instantiated above req = requests.get(url, params) data = req.json() posts = data['data'] day_posts = pd.DataFrame(posts) df = pd.concat([df, day_posts],ignore_index = True) # add a .5 sec delay to each request so as to not hit the # API too quickly time.sleep(.5) # returns the populated data frame return df # + # calls the api to pull and return posts for the subreddit r/QAnonCasualties # for the sake of time/consistency, I have commented out this function # the pull was saved as a csv in this repo # qanon_casualties = get_posts(post_url, 'QAnonCasualties', 500) # + # saves the above cell as a csv, which is used below. # qanon_casualties.to_csv('qanon_casualties.csv') # + # calls the api to pull and return posts for the subreddit r/OffMyChest # for the sake of time/consistency, I have commented out this function # the pull was saved as a csv in this repo # offmychest = get_posts(post_url, 'offmychest', 200) # + # saves the above cell as a csv, which is used below. # offmychest.to_csv('offmychest.csv') # + # reads in the previously saved csvs with the same variable name # as those used to call the api initially qanon_casualties = pd.read_csv('./data/qanon_casualties.csv').drop(columns = 'Unnamed: 0') offmychest = pd.read_csv('./data/offmychest.csv').drop(columns = 'Unnamed: 0') # drops in place any null values from the 'selftext' column qanon_casualties.dropna(subset=['selftext'], inplace = True) offmychest.dropna(subset=['selftext'], inplace = True) # - # ## Combining dataframes, cleaning, vectorizing and setting up X, y # + # combine the two datasets from the reddit pull df = pd.concat([qanon_casualties, offmychest], ignore_index=True) # drops any null values, and saves the data frame back as just # the body text and the target (i.e., 'selftext', 'subreddit') df = df[['selftext', 'subreddit']].dropna() # removes any cells that are only either [removed] or [deleted] df = df[df['selftext'] != '[removed]'] df = df[df['selftext'] != '[deleted]'] # drops duplicates in place df.selftext.drop_duplicates(inplace = True) # + # a function to redact any words that may affect the model def redact(row): ''' pass in a value and the function will remove the pre-determined leak words that may affect how the model performs ''' leak_words = ['QAnon', 'qanon', 'q anon', 'Q anon', 'Q Anon', 'Q ', 'conspiracy', 'conspiracies'] for word in leak_words: row = row.replace(word,'') return row # + # maps the redact function to the selftext column and saves it back df.selftext = df.selftext.map(redact) # - # ## EDA # # + # sets standard font sizes across graphs title_dict = {'fontsize': 18} label_dict = {'fontsize':14} # + # gets cleaned data for both of the subreddits qanon = df[df['subreddit'] == 'QAnonCasualties'] omc = df[df['subreddit'] == 'offmychest'] # - qanon # + # graph word counts for QAnonCasualties q_word_counts = qanon.selftext.apply(lambda x: len(x.split())) print('Average word counts:', q_word_counts.mean()) plt.figure(figsize = (12, 6)) plt.title('Distribution of r/QAnonCasualties post word counts', fontdict=title_dict) plt.xlabel('Word counts', fontdict=label_dict) plt.ylabel('Frequency', fontdict=label_dict) plt.hist(q_word_counts); # + # instantiate count vectorizer and fit/transform to the qanon data cvec = CountVectorizer(stop_words='english', max_features=5000) qanon_cvec = pd.DataFrame(cvec.fit_transform(qanon.selftext).todense(), columns = cvec.get_feature_names()) # + # plot the top 20 most commonly used words for qanon data plt.figure(figsize = (12,6)) plt.title('Most commonly used words in r/QAnonCasualties posts', fontdict=title_dict) plt.ylabel('Words', fontdict=label_dict) plt.xlabel('Frequency', fontdict=label_dict) plt.barh(qanon_cvec.sum().sort_values(ascending=False)[:20].index, qanon_cvec.sum().sort_values(ascending=False)[:20].values); # + # graph word counts for offmychest omc_word_counts = omc.selftext.apply(lambda x: len(x.split())) print('Average word counts:', omc_word_counts.mean()) plt.figure(figsize = (12, 6)) plt.title('Distribution of r/offmychest post word counts', fontdict=title_dict) plt.xlabel('Word counts', fontdict=label_dict) plt.ylabel('Frequency', fontdict=label_dict) plt.hist(omc_word_counts, color = 'g'); # + # instantiate count vectorizer and fit/transform to the offmychest data cvec = CountVectorizer(stop_words='english', max_features=5000) omc_cvec = pd.DataFrame(cvec.fit_transform(omc.selftext).todense(), columns = cvec.get_feature_names()) # + # plot the top 20 most commonly used words for offmychest data plt.figure(figsize = (12,6)) plt.title('Most commonly used words in r/offmychest posts', fontdict=title_dict) plt.ylabel('Words', fontdict=label_dict) plt.xlabel('Frequency', fontdict=label_dict) plt.barh(omc_cvec.sum().sort_values(ascending=False)[:20].index, omc_cvec.sum().sort_values(ascending=False)[:20].values, color = 'g'); # - # ## Vectorizing unstructured data # + # instantiates the Tfidf vectorizer with english stop words tf = TfidfVectorizer(stop_words='english', strip_accents='unicode', max_features=5000) # fits the vectorizer to the unstructured data (i.e., the 'selftext' column) tf.fit(df['selftext']) # transforms the unstructured data into vectors and # returns it as a data frame that can be used for modeling tfvec = pd.DataFrame(tf.transform(df['selftext']).todense(), columns=tf.get_feature_names()) # + # sets X = the features (i.e., the vectorized data) # sets y = the target (i.e., the subreddit) X = tfvec y = df['subreddit'] # + # baseline y.value_counts(normalize = True) # baseline accuracy is 0.736737 # - X_train, X_test, y_train, y_test = train_test_split(X, y, stratify = y) # ## Log Reg # + # instantiates the logistic regression logr = LogisticRegression() # + # fit the training data and return the train/test scores for logistic regression logr.fit(X_train, y_train) print('Train:', logr.score(X_train, y_train)) print('Test:', logr.score(X_test, y_test)) # logistic regression returns the following scores as the baseline # Train: 0.9608389122578351 # Test: 0.9489959125644215 # + # plot the confusion matrix metrics.plot_confusion_matrix(logr, X_test, y_test, normalize = 'true'); # - # ## Random Forests # + # instantiates a random forest classifier rf = RandomForestClassifier() # + # fit the training data to the random forests classifier and # return the train/test scores rf.fit(X_train, y_train) print('Train:', rf.score(X_train, y_train)) print('Test:', rf.score(X_test, y_test)) # Random forests classifier returns the following scores: # Train: 0.9992298121926655 # Test: 0.9363781766483028 # The model is overfit to the training data, but it does not do # significantly worse than the logistic regression on the test # data # - metrics.plot_confusion_matrix(rf, X_test, y_test, normalize='true'); # ## KNN # + # instantiates a K-nearest Neighbors classifier knn = KNeighborsClassifier(n_jobs = -1) # + # fits the data to X_train and y_train knn.fit(X_train, y_train) # returns the performance of the KNN model for the training # and the testing data print('Train score:', knn.score(X_train, y_train)) print('Test score:', knn.score(X_test, y_test)) # Train score: 0.7867172225842763 # Test score: 0.7517327172560867 # KNN performs significantly worse than logistic regression # and random forests # - metrics.plot_confusion_matrix(knn, X_test, y_test, normalize='true');
comparing-text-in-subreddits-nlp-code.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction to Programming with Python # --- # ## What is Python and why would I use it? # Python is a programming language. # # A programming language is a way of writing commands so that an interpreter or compiler can turn them into machine instructions. # # We like using Python in Software Carpentry Workshops for lots of reasons # # - Widely used in science # - It's easy to read and write # - Huge supporting community - lots of ways to learn and get help # - This Jupyter Notebook. Not a lot of languages have this kind of thing (name comes from Julia, Python, and R). # Even if you aren't using Python in your work, you can use Python to learn the fundamentals of programming that will apply accross languages # ### Characters # # Python uses certain characters as part of its syntax. Here is what they are called: # # * `[` : left `square bracket` # * `]` : right `square bracket` # * `(` : left `paren` (parentheses) # * `)` : right `paren` # * `{` : left `curly brace` # * `}` : right `curly brace` # * `<` : left `angle bracket` # * `>` : right `angle bracket` # * `-` `dash` (not hyphen. Minus only when used in an equation or formula) # * `"` : `double quote` # * `'` : `single quote` (apostrophe) # # What are the fundamentals? # ## VARIABLES # # * We store values inside variables. # * We can refer to variables in other parts of our programs. # * In Python, the variable is created when a value is assigned to it. # * Values are assigned to variable names using the equals sign (=). # * A variable can hold two types of things. Basic data types and objects(ways to structure data and code). # * In Python, all variables are objects. # # Some data types you will find in almost every language include: # # - Strings (characters, words, sentences or paragraphs): 'a' 'b' 'c' 'abc' '0' '3' ';' '?' # - Integers (whole numbers): 1 2 3 100 10000 -100 # - Floating point or Float (decimals): 10.0 56.9 -3.765 # - Booleans: True, False # # Here, Python assigns an age to a variable `age` and a name in quotation marks to a variable `first_name`. age = 42 first_name = "Ahmed" # #### Of Note: # Variable names: # * Cannot start with a digit # * Cannot contain spaces, quotation marks, or other punctuation # You can display what is inside `age` by using the print command # `print()` # with the value placed inside the parenthesis print(age) # --- # ## EXERCISE: # 1. Create two new variables called age and first_name with your own age and name # 1. Print each variable out to dispaly it's value # # You can also combine values in a single print command by separating them with commas # Insert your variable values into the print statement below print(, 'is', , 'years old') # * `print` automatically puts a single space between items to separate them. # * And wraps around to a new line at the end. # ### Using Python built-in type() function # # If you are not sure of what your variables' types are, you can call a python function called type() in the same manner as you used print() function. # Python is an object-oriented language, so any defined variable has a type. Default common types are str, int, float, list, and tuple. We will cover list and tuple later print(type(age)) print(type(first_name)) # ### STRING TYPE # One or more characters strung together and enclosed in quotes (single or double): "Hello World!" greeting = "Hello World!" print ("The greeting is:", greeting) greeting = 'Hello World!' print ('The greeting is:', greeting) # #### Need to use single quotes in your string? # Use double quotes to make your string. greeting = "Hello 'World'!" print ("The greeting is:", greeting) # #### Need to use both? greeting1 = "'Hello'" greeting2 = '"World"!' print ("The greeting is:", greeting1, greeting2) # #### Concatenation bear = "wild" down = "cats" print (bear+down) # --- # ## EtherPad # Why isn't `greeting` enclosed in quotes in the statements above? # # Post your answers to the EtherPad, or vote for existing answers # # --- # #### Use an index to get a single character from a string. # * The characters (individual letters, numbers, and so on) in a string are ordered. # * For example, the string ‘AB’ is not the same as ‘BA’. Because of this ordering, we can treat the string as a list of characters. # * Each position in the string (first, second, etc.) is given a number. This number is called an index or sometimes a subscript. # * Indices are numbered from 0. # * Use the position’s index in square brackets to get the character at that position. # + # String : H e l i u m # Index Location: 0 1 2 3 4 5 atom_name = 'helium' print(atom_name[0], atom_name[3]) # - # ### NUMERIC TYPES # * Numbers are stored as numbers (no quotes) and are either integers (whole) or real numbers (decimal). # * In programming, numbers with decimal precision are called floating-point, or float. # * Floats use more processing than integers so use them wisely! # * Floats and ints come in various sizes but Python switches between them transparently. # + my_integer = 10 my_float = 10.99998 my_value = my_integer print("My numeric value:", my_value) print("Type:", type(my_value)) # - # ### BOOLEAN TYPE # * Boolean values are binary, meaning they can only either true or false. # * In python True and False (no quotes) are boolean values # + is_true = True is_false = False print("My true boolean variable:", is_true) # - # --- # ## EtherPad # What data type is `'1024'`? # <ol style="list-style-type:lower-alpha"> # <li>String</li> # <li>Int</li> # <li>Float</li> # <li>Boolean</li> # </ol> # # Post your answers to the EtherPad, or vote for existing answers # # --- # ## Variables can be used in calculations. # # * We can use variables in calculations just as if they were values. # * Remember, we assigned 42 to `age` a few lines ago. age = age + 3 print('Age in three years:', age) # * This now sets our age value 45. We can also add strings together. When you add strings it's called "concatenating" name = "Sonoran" full_name = name + " Desert" print(full_name) # * Notice how I included a space in the quotes before "Desert". If we hadn't, we would have had "SonoranDesert" # * Can we subtract, multiply, or divide strings? # + #Create a new variable called last_name with your own last name. #Create a second new variable called full_name that is a combination of your first and last name # - # ## DATA STRUCTURES # Python has many objects that can be used to structure data including: # # - Lists # - Tuples # - Sets # - Dictionaries # ### LISTS # Lists are collections of values held together in brackets: list_of_characters = ['a', 'b', 'c'] print (list_of_characters) # Create a new list called list_of_numbers with four numbers in it # * Just like strings, we can access any value in the list by it's position in the list. # * **IMPORTANT:** Indexes start at 0 # ~~~ # list: ['a', 'b', 'c', 'd'] # index location: 0 1 2 3 # ~~~ # Print out the second value in the list list_of_numbers # Once you have created a list you can add more items to it with the append method list_of_numbers.append(5) print(list_of_numbers) # #### Aside: Sizes of data structures # # To determine how large (how many values/entries/elements/etc.) any Python data structure has, use the `len()` function len(list_of_numbers) # Note that you cannot compute the length of a numeric variable: len(age) # This will give an error: `TypeError: object of type 'int' has no len()` # However, `len()` can compute the lengths of strings # + print(len('this is a sentence')) # You can also get the lengths of strings in a list list_of_strings = ["Python is Awesome!", "Look! I'm programming.", "E = mc^2"] # This will get the length of "Look! I'm programming." print(len(list_of_strings[1])) # - # ### TUPLES # Tuples are like a List, `cannot be changed (immutable)`. # # Tuples can be used to represent any collection of data. They work well for things like coordinates. tuple_of_x_y_coordinates = (3, 4) print (tuple_of_x_y_coordinates) # Tuples can have any number of values # + coordinates = (1, 7, 38, 9, 0) print (coordinates) icecream_flavors = ("strawberry", "vanilla", "chocolate") print (icecream_flavors) # - # ... and any types of values. # # Once created, you `cannot add more items to a tuple` (but you can add items to a list). If we try to append, like we did with lists, we get an error icecream_flavors.append('bubblegum') # ### THE DIFFERENCE BETWEEN TUPLES AND LISTS # Lists are good for manipulating data sets. It's easy for the computer to add, remove and sort items. Sorted tuples are easier to search and index. This happens because tuples reserve entire blocks of memory to make finding specific locations easier while lists use addressing and force the computer to step through the whole list. # ![array%20vs%20list.png](array%20vs%20list.png) # Let's say you want to get to the last item. The tuple can calculate the location because: # # (address)=(size of data)×(inex of the item)+(original address) # # This is how zero indexing works. The computer can do the calculation and jump directly to the address. The list would need to go through every item in the list to get there. # # Now lets say you wanted to remove the third item. Removing it from the tuple requires it to be resized and coppied. Python would even make you do this manually. Removing the third item in the list is as simple as making the second item point to the fourth. Python makes this as easy as calling a method on the tuple object. # ### SETS # Sets are similar to lists and tuples, but can only contain unique values and are held in braces # # # For example a list could contain multiple exact values # + # In the gapminder data that we will use, we will have data entries for the continents # of each country in the dataset my_list = ['Africa', 'Europe', 'North America', 'Africa', 'Europe', 'North America'] print("my_list is", my_list) # A set would only allow for unique values to be held my_set = {'Africa', 'Europe', 'North America', 'Africa', 'Europe', 'North America'} print("my_set is", my_set) # - # Just list lists, you can append to a set using the add() function # + my_set.add('Asia') # Now let's try to append one that is in: my_set.add('Europe') # - # ### DICTIONARIES # * Dictionaries are collections of things that you can lookup like in a real dictionary: # * Dictionarys can organized into key and value pairs separated by commas (like lists) and surrounded by braces. # * E.g. {key1: value1, key2: value2} # * We call each association a "key-value pair". # # dictionary_of_definitions = {"aardvark" : "The aardvark is a medium-sized, burrowing, nocturnal mammal native to Africa.", "boat" : "A boat is a thing that floats on water"} # We can find the definition of aardvark by giving the dictionary the "key" to the definition we want in brackets. # # In this case the key is the word we want to lookup print ("The definition of aardvark is:", dictionary_of_definitions["aardvark"]) # Print out the definition of a boat # Just like lists and sets, you can add to dictionaries by doing the following: dictionary_of_definitions['ocean'] = "An ocean is a very large expanse of sea, in particular each of the main areas into which the sea is divided geographically." print(dictionary_of_definitions) # --- # ## EtherPad # Which one of these is not a valid entry in a dictionary? # # 1. `"key"`: `"value"` # 2. `"GCBHSA"`: `"ldksghdklfghfdlgkfdhgfldkghfgfhd"` # 3. `"900"` : `"key"` : `"value"` # 4. `Books` : `10000` # # Post your answer to the EtherPad, or vote for an existing answer # --- # ## EXERCISE: # 1. Create a dictionary called `zoo` with at least three animal types with a different count for each animal. # 1. `print` out the count of the second animal in your dictionary # # --- # ## Statements # # OK great. Now what can we do with all of this? # # We can plug everything together with a bit of logic and python language and make a program that can do things like: # # * process data # # * parse files # # * data analysis # What kind of logic are we talking about? # # We are talking about something called a "logical structure" which starts at the top (first line) and reads down the page in order # # In python a logical structure are often composed of statements. Statements are powerful operators that control the flow of your script. There are two main types: # # * conditionals (if, while) # * loops (for) # # ### Conditionals # Conditionals are how we make a decision in the program. # In python, conditional statements are called if/else statements. # # # * If statement use boolean values to define flow. # * E.g. If something is True, do this. Else, do this # + it_is_daytime = False # this is the variable that holds the current condition of it_is_daytime which is True or False if it_is_daytime: print ("Have a nice day.") else: print ("Have a nice night.") # before running this cell # what will happen if we change it_is_daytime to True? # what will happen if we change it_is_daytime to False? # - # * Often if/else statement use a comparison between two values to determine True or False # * These comparisons use "comparison operators" such as ==, >, and <. # * \>= and <= can be used if you need the comparison to be inclusive. # * **NOTE**: Two equal signs is used to compare values, while one equals sign is used to assign a value # * E.g. # # 1 > 2 is False<br/> # 2 > 2 is False<br/> # 2 >= 2 is True<br/> # 'abc' == 'abc' is True # + user_name = "Ben" if user_name == "Marnee": print ("Marnee likes to program in Python.") else: print ("We do not know who you are.") # - # * What if a condition has more than two choices? Does it have to use a boolean? # * Python if-statments will let you do that with elif # * `elif` stands for "else if" # # + if user_name == "Marnee": print ("Marnee likes to program in Python.") elif user_name == "Ben": print ("Ben likes maps.") elif user_name == "Brian": print ("Brian likes plant genomes") else: print ("We do not know who you are") # for each possibility of user_name we have an if or else-if statment to check the value of the name # and print a message accordingly. # - # What does the following statement print? # # my_num = 42 # my_num = 8 + my_num # new_num = my_num / 2 # if new_num >= 30: # print("Greater than thirty") # elif my_num == 25: # print("Equals 25") # elif new_num <= 30: # print("Less than thirty") # else: # print("Unknown") # --- # ## EXERCISE: # * 1. Check to see if you have more than three entries in the `zoo` dictionary you created earlier. If you do, print "more than three". If you don't, print "less than three" # # --- # ### Loops # Loops tell a program to do the same thing over and over again until a certain condition is met. # In python two main loop types are for loops and while loops. # #### For Loops # We can loop over collections of things like lists or dictionaries or we can create a looping structure. # + # LOOPING over a collection # LIST # If I want to print a list of fruits, I could write out each print statment like this: print("apple") print("banana") print("mango") # or I could create a list of fruit # loop over the list # and print each item in the list list_of_fruit = ["apple", "banana", "mango"] # this is how we write the loop # "fruit" here is a variable that will hold each item in the list, the fruit, as we loop # over the items in the list print (">>looping>>") for fruit in list_of_fruit: print (fruit) # - # LOOPING a set number of times # We can do this with range # range automatically creates a list of numbers in a range # here we have a list of 10 numbers starting with 0 and increasing by one until we have 10 numbers # What will be printed for x in range(0,10): print (x) # + # LOOPING over a collection # DICTIONARY # We can do the same thing with a dictionary and each association in the dictionary fruit_price = {"apple" : 0.10, "banana" : 0.50, "mango" : 0.75} for key, value in fruit_price.items(): print ("%s price is %s" % (key, value)) # - # --- # ## EXERCISE: # 1\. For each entry in your `zoo` dictionary, print that entry/key # 2\. For each entry in your zoo dictionary, print that value # --- # #### While Loops # Similar to if statements, while loops use a boolean test to either continue looping or break out of the loop. # + # While Loops my_num = 10 while my_num > 0: print("My number", my_num) my_num = my_num - 1 # - # NOTE: While loops can be dangerous, because if you forget to to include an operation that modifies the variable being tested (above, we're subtracting 1 at the end of each loop), it will continue to run forever and you script will never finish. # That's it. With just these data types, structures, and logic, you can build a program # # Let's do that next with functions # # -- COMMIT YOUR WORK TO GITHUB -- # # Key Points # # * Python is an open-source programming language that can be used to do science! # * We store information in variables # * There are a variety of data types and objects for storing data # * You can do math on numeric variables, you can concatenate strings # * There are different Python default data structures including: lists, tuples, sets and dictionaries # * Programming uses conditional statements for flow control such as: if/else, for loops and while loops
python-lessons/01 - Introduction to Programming with Python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # Debiased CFSv2 # # ## Debiases CFSv2 ensemble forecast over specified range of years and leads # + import os, sys from subseasonal_toolkit.utils.notebook_util import isnotebook if isnotebook(): # Autoreload packages that are modified # %load_ext autoreload # %autoreload 2 else: from argparse import ArgumentParser import numpy as np import pandas as pd from scipy.spatial.distance import cdist, euclidean from datetime import datetime, timedelta from ttictoc import tic, toc from subseasonal_data.utils import get_measurement_variable from subseasonal_toolkit.utils.general_util import printf from subseasonal_toolkit.utils.experiments_util import get_id_name, get_th_name, get_first_year, get_forecast_delta from subseasonal_toolkit.utils.models_util import (get_submodel_name, start_logger, log_params, get_forecast_filename, save_forecasts) from subseasonal_toolkit.utils.eval_util import get_target_dates, mean_rmse_to_score, save_metric from sklearn.linear_model import * from subseasonal_data import data_loaders # + # # Specify model parameters # model_name = "deb_cfsv2" if not isnotebook(): # If notebook run as a script, parse command-line arguments parser = ArgumentParser() parser.add_argument("pos_vars",nargs="*") # gt_id and horizon parser.add_argument('--target_dates', '-t', default="std_test") parser.add_argument('--first_year', '-fy', default=1999, help="first year (inclusive) to use for debiasing") parser.add_argument('--last_year', '-ly', default=2010, help="last year (inclusive) to use for debiasing") parser.add_argument('--first_lead', '-fl', default=0, help="first cfsv2 lead to average into forecast (0-29)") parser.add_argument('--last_lead', '-ll', default=29, help="last cfsv2 lead to average into forecast (0-29)") args, opt = parser.parse_known_args() # Assign variables gt_id = get_id_name(args.pos_vars[0]) # "contest_precip" or "contest_tmp2m" horizon = get_th_name(args.pos_vars[1]) # "12w", "34w", or "56w" target_dates = args.target_dates first_lead = int(args.first_lead) last_lead = int(args.last_lead) first_year = int(args.first_year) last_year = int(args.last_year) else: # Otherwise, specify arguments interactively gt_id = "us_tmp2m_1.5x1.5" horizon = "34w" target_dates = "std_ecmwf" first_year = 1999 last_year = 2010 if horizon == "34w": first_lead = 15 last_lead = 15 elif horizon == "56w": first_lead = 29 last_lead = 29 # # Choose regression parameters # # Record standard settings of these parameters if gt_id.endswith("1.5x1.5"): prefix = "iri_cfsv2" else: prefix = "subx_cfsv2" if "tmp2m" in gt_id: base_col = prefix+'_tmp2m' elif "precip" in gt_id: base_col = prefix+'_precip' # # Process model parameters # # Get list of target date objects target_date_objs = pd.Series(get_target_dates(date_str=target_dates, horizon=horizon)) # Identify measurement variable name measurement_variable = get_measurement_variable(gt_id) # 'tmp2m' or 'precip' # Column name for ground truth gt_col = measurement_variable LAST_SAVE_YEAR = get_first_year(prefix) # Don't save forecasts for years earlier than LAST_SAVE_YEAR # Record model and submodel names submodel_name = get_submodel_name( model_name, first_year=first_year, last_year=last_year, first_lead=first_lead, last_lead=last_lead) if not isnotebook(): # Save output to log file logger = start_logger(model=model_name,submodel=submodel_name,gt_id=gt_id, horizon=horizon,target_dates=target_dates) # Store parameter values in log params_names = ['gt_id', 'horizon', 'target_dates', 'first_year', 'last_year', 'first_lead', 'last_lead', 'base_col' ] params_values = [eval(param) for param in params_names] log_params(params_names, params_values) # + # Load and process CFSv2 data printf("Loading cfsv2 data and averaging leads") # Choose data shift based on horizon base_shift = get_forecast_delta(horizon) tic() mask = None if gt_id.startswith("us_"): suffix = "-us" else: suffix = "" if gt_id.endswith("1.5x1.5"): suffix += "1_5" else: mask = data_loaders.get_us_mask() data = data_loaders.get_forecast(prefix+"-"+measurement_variable+suffix, mask_df=mask, shift=base_shift) cols = [prefix+"_"+gt_id.split("_")[1]+"-{}.5d_shift{}".format(col,base_shift) for col in range(first_lead, last_lead+1)] data[base_col] = data[cols].mean(axis=1) toc() printf('Pivoting dataframe to have one row per start_date') tic() data = data[['lat','lon','start_date',base_col]].set_index(['lat','lon','start_date']).unstack(['lat','lon']) toc() # Load ground truth tic() gt = data_loaders.get_ground_truth(gt_id).loc[:,['lat','lon','start_date',gt_col]] toc() printf('Pivoting ground truth to have one row per start_date') tic() gt = gt.loc[gt.start_date.isin(data.index),['lat','lon','start_date',gt_col]].set_index(['lat','lon','start_date']).unstack(['lat','lon']) toc() printf("Merging ground truth") tic() data = data.join(gt, how="left") del gt toc() # Identify the month-day combination for each date treating 2/29 as 2/28 monthdays = pd.Series([(d.month,d.day) if d.month != 2 or d.day != 29 else (2,28) for d in data.index],index=data.index) # Compute debiasing correction printf('Compute debiasing correction (ground-truth - base prediction) by month-day combination') tic() debias = (data[gt_col] - data[base_col]) debias = debias[(debias.index >= str(first_year)) & (debias.index <= str(last_year))] debias = debias.groupby(by=monthdays[debias.index]).mean() toc() # + # Make predictions for each target date printf('Creating dataframe to store performance') tic() rmses = pd.Series(index=target_date_objs, dtype=np.float64) toc() printf('Forming debiased predictions for target dates') tic() # Form predictions for target dates in data matrix valid_targets = data.index.intersection(target_date_objs) target_monthdays = monthdays.loc[valid_targets] preds = data.loc[valid_targets, base_col] + debias.loc[target_monthdays].values preds.index.name = "start_date" # Order valid targets by day of week valid_targets = valid_targets[valid_targets.weekday.argsort(kind='stable')] toc() for target_date_obj in valid_targets: # Skip if forecast already produced for this target target_date_str = datetime.strftime(target_date_obj, '%Y%m%d') forecast_file = get_forecast_filename( model=model_name, submodel=submodel_name, gt_id=gt_id, horizon=horizon, target_date_str=target_date_str) if os.path.isfile(forecast_file): printf(f"prior forecast exists for target={target_date_obj}") pred = pd.read_hdf(forecast_file).set_index(['lat','lon']).pred else: printf(f'Processing {model_name} forecast for {target_date_obj}') tic() # Add correction to base prediction pred = preds.loc[target_date_obj,:] # Save prediction to file in standard format if target_date_obj.year >= LAST_SAVE_YEAR: save_forecasts( preds.loc[[target_date_obj],:].unstack().rename("pred").reset_index(), model=model_name, submodel=submodel_name, gt_id=gt_id, horizon=horizon, target_date_str=target_date_str) toc() # Evaluate and store error if we have ground truth data tic() if target_date_obj in data.index: rmse = np.sqrt(np.square(pred - data.loc[target_date_obj,gt_col]).mean()) rmses.loc[target_date_obj] = rmse printf("-rmse: {}, score: {}".format(rmse, mean_rmse_to_score(rmse))) mean_rmse = rmses.mean() printf("-mean rmse: {}, running score: {}".format(mean_rmse, mean_rmse_to_score(mean_rmse))) toc() printf("Save rmses in standard format") rmses = rmses.sort_index().reset_index() rmses.columns = ['start_date','rmse'] save_metric(rmses, model=model_name, submodel=submodel_name, gt_id=gt_id, horizon=horizon, target_dates=target_dates, metric="rmse") # -
subseasonal_toolkit/models/deb_cfsv2/deb_cfsv2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # Advection correction # # This tutorial shows how to use the optical flow routines of pysteps to implement # the advection correction procedure described in Anagnostou and Krajewski (1999). # # Advection correction is a temporal interpolation procedure that is often used # when estimating rainfall accumulations to correct for the shift of rainfall patterns # between consecutive radar rainfall maps. This shift becomes particularly # significant for long radar scanning cycles and in presence of fast moving # precipitation features. # # <div class="alert alert-info"><h4>Note</h4><p>The code for the advection correction using pysteps was originally # written by `<NAME> <https://github.com/wolfidan>`_.</p></div> # # + from datetime import datetime import matplotlib.pyplot as plt import numpy as np from pysteps import io, motion, rcparams from pysteps.utils import conversion, dimension from pysteps.visualization import plot_precip_field from scipy.ndimage import map_coordinates # - # ## Read the radar input images # # First, we import a sequence of 36 images of 5-minute radar composites # that we will use to produce a 3-hour rainfall accumulation map. # We will keep only one frame every 10 minutes, to simulate a longer scanning # cycle and thus better highlight the need for advection correction. # # You need the pysteps-data archive downloaded and the pystepsrc file # configured with the data_source paths pointing to data folders. # # # Selected case date = datetime.strptime("201607112100", "%Y%m%d%H%M") data_source = rcparams.data_sources["mch"] # ### Load the data from the archive # # # + root_path = data_source["root_path"] path_fmt = data_source["path_fmt"] fn_pattern = data_source["fn_pattern"] fn_ext = data_source["fn_ext"] importer_name = data_source["importer"] importer_kwargs = data_source["importer_kwargs"] timestep = data_source["timestep"] # Find the input files from the archive fns = io.archive.find_by_date( date, root_path, path_fmt, fn_pattern, fn_ext, timestep=5, num_next_files=35 ) # Read the radar composites importer = io.get_method(importer_name, "importer") R, __, metadata = io.read_timeseries(fns, importer, **importer_kwargs) # Convert to mm/h R, metadata = conversion.to_rainrate(R, metadata) # Upscale to 2 km (simply to reduce the memory demand) R, metadata = dimension.aggregate_fields_space(R, metadata, 2000) # Keep only one frame every 10 minutes (i.e., every 2 timesteps) # (to highlight the need for advection correction) R = R[::2] # - # ## Advection correction # # Now we need to implement the advection correction for a pair of successive # radar images. The procedure is based on the algorithm described in Anagnostou # and Krajewski (Appendix A, 1999). # # To evaluate the advection occurred between two successive radar images, we are # going to use the Lucas-Kanade optical flow routine available in pysteps. # # def advection_correction(R, T=5, t=1): """ R = np.array([qpe_previous, qpe_current]) T = time between two observations (5 min) t = interpolation timestep (1 min) """ # Evaluate advection oflow_method = motion.get_method("LK") fd_kwargs = {"buffer_mask": 10} # avoid edge effects V = oflow_method(np.log(R), fd_kwargs=fd_kwargs) # Perform temporal interpolation Rd = np.zeros((R[0].shape)) x, y = np.meshgrid( np.arange(R[0].shape[1], dtype=float), np.arange(R[0].shape[0], dtype=float) ) for i in range(t, T + t, t): pos1 = (y - i / T * V[1], x - i / T * V[0]) R1 = map_coordinates(R[0], pos1, order=1) pos2 = (y + (T - i) / T * V[1], x + (T - i) / T * V[0]) R2 = map_coordinates(R[1], pos2, order=1) Rd += (T - i) * R1 + i * R2 return t / T ** 2 * Rd # Finally, we apply the advection correction to the whole sequence of radar # images and produce the rainfall accumulation map. # # R_ac = R[0].copy() for i in range(R.shape[0] - 1): R_ac += advection_correction(R[i : (i + 2)], T=10, t=1) R_ac /= R.shape[0] # ## Results # # We compare the two accumulation maps. The first map on the left is # computed without advection correction and we can therefore see that the shift # between successive images 10 minutes apart produces irregular accumulations. # Conversely, the rainfall accumulation of the right is produced using advection # correction to account for this spatial shift. The final result is a smoother # rainfall accumulation map. # # plt.figure(figsize=(9, 4)) plt.subplot(121) plot_precip_field(R.mean(axis=0), title="3-h rainfall accumulation") plt.subplot(122) plot_precip_field(R_ac, title="Same with advection correction") plt.tight_layout() plt.show() # ### Reference # # <NAME>., and <NAME>. 1999. "Real-Time Radar Rainfall # Estimation. Part I: Algorithm Formulation." Journal of Atmospheric and # Oceanic Technology 16: 189–97. # https://doi.org/10.1175/1520-0426(1999)016<0189:RTRREP>2.0.CO;2 # #
notebooks/advection_correction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import json from collections import Counter def get_data_ngrams(data, n_grams): grams = set([]) for line in data: line = line.split() for i in range(len(line) - n_grams + 1): grams.add(" ".join(line[i : i + n_grams])) return grams def plagiarism_check(n_grams, training, test): training_grams = get_data_ngrams(training, n_grams) test_grams = get_data_ngrams(test, n_grams) common = training_grams.intersection(test_grams) return 100 * (len(common) / len(test_grams)) # ### Plagiarism Results for ACL Onthology Dataset # # | n | 1 | 2 | 3 | 4 | 5 | 6 | # |--------------|------|----|------|------|------|------| # | Human | 81.4 | 59 | 31.6 | 13.5 | 5.7 | 2.7 | # | <NAME> | 100 | 78 | 47.4 | 21.5 | 8.82 | 3.43 | # | WEPGen | 100 | 82 | 52 | 24 | 9.5 | 3.68 | # # ### Plagiarism Results for XMLA Dataset # # | n | 1 | 2 | 3 | 4 | 5 | 6 | # |--------------|-----|------|------|------|-------|-----| # | Human | 86 | 71 | 44 | 21.4 | 10 | 5.1 | # | <NAME> | 100 | 81.6 | 58.4 | 30 | 13.72 | 6 | # | WEPGen | 100 | 89 | 66 | 37 | 17.2 | 7.5 | # # + test_data = [] with open("arxiv-dataset/arxiv-original.txt") as f: for line in f: j = json.loads(line) test_data.append(j["abstract"]) training_data = [] with open("arxiv-dataset/train_arxiv.txt") as f: for line in f: j = json.loads(line) training_data.append(j["abstract"]) for n_grams in range(1, 8): print("n-grams: {}, common: {}%".format(n_grams, plagiarism_check(n_grams, training_data, test_data))) # -
scripts/plagiarism_checker.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from sklearn import datasets dataSets = datasets.make_regression(n_samples=30000, n_features= 10 ,noise=100) x = list() for i in dataSets[0]: tmp = list(i) tmp.insert(0,1) x.append(tmp) y = dataSets[1] print(len(x[0])) line = list() cost = list() # - from sklearn import datasets dataSets = datasets.load_boston() #print(dataSets.keys()) x = [i[:12] for i in dataSets['data'].tolist()] y = [i[-1] for i in dataSets['data'].tolist()] x.pop() y.pop() print((x[0])) # + # 求假设函数的预测值 def h(theta, x): ans = 0.0 for i in range(len(x)): ans += (theta[i] * x[i]) return ans # 求代价函数的值 def get_cost(theta, x, y): cost = 0 m = len(y) for i in range(len(x)): cost += pow((h(theta, x[i]) - y[i] ), 2) /(2.0 * m) return cost def timer(func): import time def wrapper(*args): start = time.time() func(*args) end = time.time() print("总用时为:{}s".format(round(end - start, 2))) return wrapper def feature_scaling(x): from numpy import matrix #寻找每个特征值的最大值以使用特征缩放 x_mat = matrix(x).T max_x = (x_mat.max(axis=0)).tolist()[0] print(len(max_x)) for i in range(len(x)): for j in range(len(x[0])): x[i][j] = float(x[i][j]) / max_x[i] return x # + @timer def g_descent(x, y, alpha = 0.3): global cost #设置参数theta初始值 theta = [0 for i in range(len(x[0]))] iterator = 1 cost_value = 0 last_cost = 0 diff = 0.2 cost = [list(),list()] m = len(y) #控制迭代次数,当下降前后代价值变化小于10e-18时认为梯度下降以收敛 while iterator < 1500 and diff > pow(10, -18): iterator += 1 cost_value = 0 delta_theta = [0 for i in range(len(x[0]))] for i in range(m): cost_value += (pow((h(theta, x[i]) - y[i]), 2) / (2.0 * m)) #计算每个特征量对应的参数变化值 for j in range(len(x[i])): delta_theta[j] += (h(theta, x[i]) - y[i]) * x[i][j] diff = abs(last_cost - cost_value) last_cost = cost_value cost[0].append(iterator / 1000) cost[1].append(diff / 100) # 梯度更新参数 for i in range(len(theta)): theta[i] -= (delta_theta[i] * alpha *(1.0 / m)) print("theta:" ,theta,"迭代次数:" ,iterator,"最终代价值:", cost_value) global line line_x = [i[1] for i in x] line_y = [(theta[0] + theta[1] * x[i][1]) for i in range(m)] line = [line_x, line_y] return theta theta = g_descent(feature_scaling(x), y, 0.1) # + @timer def normal_equation(x, y): import numpy as np x_matrix = np.matrix(x) y_mat = np.matrix(y) # 正规方程求解公式 theta = ((x_matrix.T * x_matrix).I) * x_matrix.T * y_mat.T print("theta:", theta, "最终代价值:", get_cost(theta, x, y)) normal_equation(x, y) # + import matplotlib.pyplot as plt fig = plt.figure(1, dpi=140) # # 数据散点图 # plt.subplot(211) # plt.title("linear regression") # #plt.scatter([i[1] for i in x], y) # # 拟合曲线 # #plt.plot(line[0], line[1], color='red') # #plt.tight_layout() # # 代价函数变化图 # plt.subplot(212) plt.plot(cost[0], cost[1]) plt.title("value of cost") plt.tight_layout() plt.show() # - for i in range(100): print(cost[1][i])
Algorithm_notbook/LinearRegression(WroteByMyself).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # การสร้างแบบ stacked bar ด้วย pandas #เรียนรู้ Pandas bar stacked from IPython.display import IFrame, YouTubeVideo, SVG, HTML YouTubeVideo('26Nmks3vctU', 400,300) import pandas as pd import matplotlib.pyplot as plt from matplotlib.ticker import FuncFormatter # for custom number format on x and y axis # %matplotlib inline # %config InlineBackend.figure_format = 'retina' print(f'pandas version: {pd.__version__}') pd.set_option('display.max_rows', 10) # ### PISA score from Wikipedia # https://en.wikipedia.org/wiki/Programme_for_International_Student_Assessment df=pd.read_csv('https://github.com/prasertcbs/basic-dataset/raw/master/pisa2015.csv') df df[:5].plot(kind='bar', stacked=True); df.set_index('Country', inplace=True) df df[:5].plot(kind='bar', stacked=True, title='PISA score 2015'); df=pd.read_csv('https://github.com/prasertcbs/basic-dataset/raw/master/pisa2015.csv', index_col='Country') df df[:5].plot(kind='barh', stacked=True, title='PISA score 2015'); df[:5].plot(kind='barh', stacked=True, figsize=(8, 4)); df[:5][['Maths', 'Science']].plot(kind='barh', stacked=True); # https://matplotlib.org/2.0.0/examples/color/named_colors.html ax=df[:5][['Maths', 'Science']].plot(kind='barh', stacked=True, color=['gold', 'c'], title='PISA Score 2015'); ax.set_xlabel('score') ax.set_ylabel(''); df['Total'] = df.Maths + df.Science + df.Reading df df.nlargest(10, 'Total')[['Maths', 'Science', 'Reading']].plot(kind='barh', stacked=True) df.loc[['Japan', 'Taiwan', 'Vietnam', 'Thailand'], ['Maths', 'Science', 'Reading']].plot(kind='barh', stacked=True) # ## การกำหนดรูปแบบการแสดงตัวเลขในแกน เช่น มีการคั่นด้วย , ในเลขหลักพัน ax=df.loc[['Japan', 'Taiwan', 'Vietnam', 'Thailand'], ['Maths', 'Science', 'Reading']].plot(kind='barh', stacked=True, color=['maroon', 'gold', 'c'], figsize=(10,5)) ax.xaxis.set_major_formatter(FuncFormatter(lambda v, p: "{:,.0f}".format(v))) ax.set_xlabel('score') ax.set_ylabel('');
learn_jupyter/7_pandas_stacked_bar_plot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt import librosa import librosa.display #To load audio file y, sr = librosa.load("auds/speech.wav",8000) #Plotting original speech signal plt.figure(figsize=(5,2)) plt.title('Original Speech Signal') plt.plot(y,label="original file") plt.xlabel("Time") plt.grid(True) def plot_spectrogram(signal,hop_length,win_length,title=None,ax=None): n_fft = 1024 D = np.abs(librosa.stft( y, n_fft, hop_length, win_length, window='hamming' )) S_Db =librosa.amplitude_to_db(D, ref=np.max) colormesh=librosa.display.specshow(S_Db,sr=sr,x_axis='s', y_axis='linear',ax=ax) ax.set(title=title) return colormesh fig,ax = plt.subplots(1,3,figsize=(15,5),gridspec_kw={"width_ratios":[7,7,.2]}) plot_spectrogram(y,10,40,title='Wideband spectrogram',ax= ax[0]) colormesh = plot_spectrogram(y,100,400,title='Narrowband spectrogram',ax= ax[1]) fig.colorbar(colormesh,cax=ax[2], format="%+2.f dB") plt.tight_layout()
speech processing/2 - Wideband and Narrow Spectrogram .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # Compute all-to-all connectivity in sensor space # # # Computes the Phase Lag Index (PLI) between all gradiometers and shows the # connectivity in 3D using the helmet geometry. The left visual stimulation data # are used which produces strong connectvitiy in the right occipital sensors. # # + # Author: <NAME> <<EMAIL>> # # License: BSD (3-clause) import numpy as np from scipy import linalg import mne from mne import io from mne.connectivity import spectral_connectivity from mne.datasets import sample print(__doc__) # - # Set parameters # # # + data_path = sample.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' # Setup for reading the raw data raw = io.read_raw_fif(raw_fname) events = mne.read_events(event_fname) # Add a bad channel raw.info['bads'] += ['MEG 2443'] # Pick MEG gradiometers picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True, exclude='bads') # Create epochs for the visual condition event_id, tmin, tmax = 3, -0.2, 1.5 # need a long enough epoch for 5 cycles epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6)) # Compute connectivity for band containing the evoked response. # We exclude the baseline period fmin, fmax = 3., 9. sfreq = raw.info['sfreq'] # the sampling frequency tmin = 0.0 # exclude the baseline period con, freqs, times, n_epochs, n_tapers = spectral_connectivity( epochs, method='pli', mode='multitaper', sfreq=sfreq, fmin=fmin, fmax=fmax, faverage=True, tmin=tmin, mt_adaptive=False, n_jobs=1) # the epochs contain an EOG channel, which we remove now ch_names = epochs.ch_names idx = [ch_names.index(name) for name in ch_names if name.startswith('MEG')] con = con[idx][:, idx] # con is a 3D array where the last dimension is size one since we averaged # over frequencies in a single band. Here we make it 2D con = con[:, :, 0] # Now, visualize the connectivity in 3D from mayavi import mlab # noqa mlab.figure(size=(600, 600), bgcolor=(0.5, 0.5, 0.5)) # Plot the sensor locations sens_loc = [raw.info['chs'][picks[i]]['loc'][:3] for i in idx] sens_loc = np.array(sens_loc) pts = mlab.points3d(sens_loc[:, 0], sens_loc[:, 1], sens_loc[:, 2], color=(1, 1, 1), opacity=1, scale_factor=0.005) # Get the strongest connections n_con = 20 # show up to 20 connections min_dist = 0.05 # exclude sensors that are less than 5cm apart threshold = np.sort(con, axis=None)[-n_con] ii, jj = np.where(con >= threshold) # Remove close connections con_nodes = list() con_val = list() for i, j in zip(ii, jj): if linalg.norm(sens_loc[i] - sens_loc[j]) > min_dist: con_nodes.append((i, j)) con_val.append(con[i, j]) con_val = np.array(con_val) # Show the connections as tubes between sensors vmax = np.max(con_val) vmin = np.min(con_val) for val, nodes in zip(con_val, con_nodes): x1, y1, z1 = sens_loc[nodes[0]] x2, y2, z2 = sens_loc[nodes[1]] points = mlab.plot3d([x1, x2], [y1, y2], [z1, z2], [val, val], vmin=vmin, vmax=vmax, tube_radius=0.001, colormap='RdBu') points.module_manager.scalar_lut_manager.reverse_lut = True mlab.scalarbar(points, title='Phase Lag Index (PLI)', nb_labels=4) # Add the sensor names for the connections shown nodes_shown = list(set([n[0] for n in con_nodes] + [n[1] for n in con_nodes])) for node in nodes_shown: x, y, z = sens_loc[node] mlab.text3d(x, y, z, raw.ch_names[picks[node]], scale=0.005, color=(0, 0, 0)) view = (-88.7, 40.8, 0.76, np.array([-3.9e-4, -8.5e-3, -1e-2])) mlab.view(*view)
stable/_downloads/13161608f5f01f8a66c6efa4aab22d71/plot_sensor_connectivity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # HW3 Question2 import pandas as pd from sklearn.model_selection import cross_val_score, train_test_split, RandomizedSearchCV from sklearn.model_selection import GridSearchCV, cross_val_score, KFold from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns import numpy as np from sklearn.metrics import confusion_matrix, classification_report from sklearn.metrics import accuracy_score, roc_curve, auc from sklearn.metrics import make_scorer from sklearn.externals.six import StringIO dot_data = StringIO() from sklearn.preprocessing import StandardScaler, OneHotEncoder, label_binarize import warnings warnings.filterwarnings('ignore') from scipy import interp from itertools import cycle from sklearn import tree from sklearn import linear_model from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB from sklearn.svm import SVC import scikitplot as skplt import matplotlib as mpl from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import GradientBoostingClassifier #import lightgbm from xgboost import XGBClassifier data = pd.read_csv('/Users/yuxuanwang/Desktop/competition/model/3contrat.csv',index_col=['Time']) data.head() from sklearn.ensemble import RandomForestRegressor df = data.reset_index(drop = False) df columns2 = df.columns.tolist() columns2 columns2 = [c for c in columns2 if c not in['Time','Close']] columns2 target ='Close' target train2 = df.sample(frac=0.8, random_state=1) test2 = df.loc[~df.index.isin(train2.index)] print(train2.shape) #need to have same number of features only difference should be obs print(test2.shape) model = RandomForestRegressor(random_state=1) model.fit(train2[columns2], train2[target]) predictions_rf = model.predict(test2[columns2]) df.columns # + features=df.columns[2:] importances = model.feature_importances_ indices = np.argsort(importances) plt.figure(1) plt.title('Feature Importances') plt.barh(range(len(indices)), importances[indices], color='b', align='center') plt.yticks(range(len(indices)), features[indices]) plt.xlabel('Relative Importance') # - features np.set_printoptions(precision = 5, suppress = True) print(importances) # split data into features and target X = data.iloc[:, 1:] Y = data.iloc[:, 0] X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42) X.shape # normalization sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) index = pd.DataFrame(X_train).columns # # Feature Selection Using Different Methods from sklearn.linear_model import LogisticRegression from sklearn.feature_selection import SelectFromModel # Using Lasso X_train_lasso = SelectFromModel(LogisticRegression(penalty="l1", C=0.1)).fit_transform(X_train, y_train) reg = LogisticRegression(penalty="l1", C=0.1).fit(X_train, y_train) coef = pd.Series(reg.coef_[0], index = pd.DataFrame(X_train).columns) X_test_lasso = X_test[:, coef != 0] # Using XGBoost X_train_xgboost = SelectFromModel(XGBClassifier()).fit_transform(X_train, y_train) # Using RFE from sklearn.feature_selection import RFE X_train_RFE = RFE(estimator=LogisticRegression(), n_features_to_select=30).fit_transform(X_train, y_train) # + # Compare the feature selection methods log_base_clf = LogisticRegression() # No feature selection base_scores = cross_val_score(log_base_clf, X_train, y_train, cv=10) print('Score with no feature selection: ' + str(base_scores.mean()) + ' +/- ' + str(base_scores.std())) # Lasso feature set lasso_scores = cross_val_score(log_base_clf, X_train_lasso, y_train, cv=10) print('Score with Lasso feature selection: ' + str(lasso_scores.mean()) + ' +/- ' + str(lasso_scores.std())) # Xgboost feature set xgboost_scores = cross_val_score(log_base_clf, X_train_xgboost, y_train, cv=10) print('Score with Xgboost feature selection: ' + str(xgboost_scores.mean()) + ' +/- ' + str(xgboost_scores.std())) # RFE feature set rfe_scores = cross_val_score(log_base_clf, X_train_RFE, y_train, cv=10) print('Score with RFE feature selection: ' + str(rfe_scores.mean()) + ' +/- ' + str(rfe_scores.std())) # - # # Prediction Using Different Models # ## Model Selection Using Nested CV # ### 1. Logistic Regression # + # Number of random trials NUM_TRIALS = 5 # Model Initialization log_clf = linear_model.LogisticRegression() nested_scores_log = np.zeros(NUM_TRIALS) p_grid_log = {'penalty': ['l1', 'l2'], 'C': [0.001,0.01,0.1,1,10,100]} # Logistic Regression Overall Performance for i in range(NUM_TRIALS): inner_cv = KFold(n_splits=4, shuffle=True, random_state=i) outer_cv = KFold(n_splits=4, shuffle=True, random_state=i) # Non_nested parameter search and scoring clf = RandomizedSearchCV(estimator=log_clf, param_distributions=p_grid_log, cv=inner_cv) clf.fit(X_train_lasso, y_train) # Nested CV with parameter optimization nested_score = cross_val_score(clf, X=X_train_lasso, y=y_train, cv=outer_cv) nested_scores_log[i] = nested_score.mean() # - # ### 2. XGBoost # + xg_clf = XGBClassifier() nested_scores_xg = np.zeros(NUM_TRIALS) p_grid_xgb = {'n_estimators': range(50, 400, 50), 'max_depth':range(3,10,2), 'gamma':[i/10.0 for i in range(0,5)], 'reg_alpha':[0, 0.001, 0.005, 0.01, 0.05]} # XGBoost Overall Performance for i in range(NUM_TRIALS): inner_cv = KFold(n_splits=4, shuffle=True, random_state=i) outer_cv = KFold(n_splits=4, shuffle=True, random_state=i) # Non_nested parameter search and scoring clf = RandomizedSearchCV(estimator=xg_clf, param_distributions=p_grid_xgb, cv=inner_cv) clf.fit(X_train_lasso, y_train) # Nested CV with parameter optimization nested_score = cross_val_score(clf, X=X_train_lasso, y=y_train, cv=outer_cv) nested_scores_xg[i] = nested_score.mean() # - # ### 3. Random Forest # + rf_clf = RandomForestClassifier() nested_scores_rf = np.zeros(NUM_TRIALS) p_grid_rf = {'max_depth': [10, 20, 30, 40, 50, 60], 'max_features': ['auto', 'sqrt'], 'n_estimators': [100, 200, 400]} # Random Forest Overall Performance for i in range(NUM_TRIALS): inner_cv = KFold(n_splits=4, shuffle=True, random_state=i) outer_cv = KFold(n_splits=4, shuffle=True, random_state=i) # Non_nested parameter search and scoring clf = RandomizedSearchCV(estimator=rf_clf, param_distributions=p_grid_rf, cv=inner_cv) clf.fit(X_train_lasso, y_train) # Nested CV with parameter optimization nested_score = cross_val_score(clf, X=X_train_lasso, y=y_train, cv=outer_cv) nested_scores_rf[i] = nested_score.mean() # - # ### 4. LightGBM # + lgb_clf = lightgbm.LGBMClassifier() nested_scores_lgb = np.zeros(NUM_TRIALS) p_grid_lgb = { 'learning_rate': [0.01, 0.05, 0.1, 1], 'n_estimators': [20, 40, 60, 80], 'num_leaves': range(10, 100, 2)} # LightGBM Overall Performance for i in range(NUM_TRIALS): inner_cv = KFold(n_splits=4, shuffle=True, random_state=i) outer_cv = KFold(n_splits=4, shuffle=True, random_state=i) # Non_nested parameter search and scoring clf = RandomizedSearchCV(estimator=lgb_clf, param_distributions=p_grid_lgb, cv=inner_cv) clf.fit(X_train_lasso, y_train) # Nested CV with parameter optimization nested_score = cross_val_score(clf, X=X_train_lasso, y=y_train, cv=outer_cv) nested_scores_lgb[i] = nested_score.mean() # - # ### Model Performance Comparison # + performance_dic = {'Logit': nested_scores_log.mean(), 'LightGBM': nested_scores_lgb.mean(), 'XGBoost': nested_scores_xg.mean(), 'Random Forest': nested_scores_rf.mean()} std_dic = {'Logit': nested_scores_log.std(), 'LightGBM': nested_scores_lgb.std(), 'XGBoost': nested_scores_xg.std(), 'Random Forest': nested_scores_rf.std()} performance = pd.DataFrame(list(performance_dic.items()), columns=['Model', 'Accuracy']) performance['Std'] = performance['Model'].map(std_dic) performance.index = performance.Model performance.drop('Model', axis=1) # - # **According the results above, Xgboost algorithm gives us the highest accuracy.** # # So we dive deeper into the Xgboost model. # + xg_reg = XGBClassifier() p_grid_xgb = {'n_estimators': range(50, 400, 50), 'max_depth':range(3,10,2), 'gamma':[i/10.0 for i in range(0,5)], 'reg_alpha':[0, 0.001, 0.005, 0.01, 0.05]} grid = RandomizedSearchCV(xg_reg, p_grid_xgb, cv = 10, scoring = 'accuracy') grid.fit(X_train_lasso, y_train) grid.best_params_ # - xgb = XGBClassifier(reg_alpha=0.005, n_estimators=350, max_depth=7, gamma=0.3) xgb.fit(X_train_lasso, y_train) y_pred = xgb.predict(X_test_lasso) accuracy_score(y_test, y_pred) # ### 5. Neural Network from kerastuner.tuners.bayesian import BayesianOptimization from kerastuner.tuners import RandomSearch from tensorflow import keras from tensorflow.keras import layers # + # split data into features and target XNN = data.iloc[:, :-1] YNN = data.iloc[:, -1] X_train_nn, X_test_nn, y_train_nn, y_test_nn \ = train_test_split(XNN, YNN, test_size=0.2, random_state=100, stratify=YNN) X_train_nn, X_val_nn, y_train_nn, y_val_nn \ = train_test_split(X_train_nn, y_train_nn, test_size=0.2, random_state=100, stratify=y_train_nn) # - # Normalization sc = StandardScaler() X_train_nn = sc.fit_transform(X_train_nn) X_val_nn = sc.transform(X_val_nn) X_test_nn = sc.transform(X_test_nn) def build_model(hp): model = keras.Sequential() for i in range(hp.Int('num_layers', 2, 20)): model.add(layers.Dense(units=hp.Int('units_' + str(i), 32, 512, 32), activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) model.compile( optimizer=keras.optimizers.Adam( hp.Choice('learning_rate', [1e-2, 1e-3, 1e-4])), loss='binary_crossentropy', metrics=['accuracy']) return model tuner = BayesianOptimization( build_model, objective='val_accuracy', max_trials=5, executions_per_trial=3, directory='cost_insensitive_4') tuner.search(x=X_train_nn, y=y_train_nn.to_numpy(), epochs=4, validation_data=(X_val_nn, y_val_nn.to_numpy())) tuner.results_summary() best_model = tuner.get_best_models(num_models=1)[0] full_X_train_nn = np.concatenate([X_train_nn, X_val_nn]) full_y_train_nn = np.concatenate([y_train_nn, y_val_nn]) best_model.fit(full_X_train_nn, full_y_train_nn, epochs=20, batch_size=32) y_pred_nn = best_model.predict_classes(X_test_nn) accuracy_score(y_test_nn, y_pred_nn) # ### Evaluate XGBOOST Performance print(confusion_matrix(y_test, y_pred)) print(classification_report(y_test, y_pred)) y_probas = y_pred = xgb.predict_proba(X_test_lasso) skplt.metrics.plot_roc(y_test, y_probas) skplt.metrics.plot_lift_curve(y_test, y_probas) plt.title('Lift Curve of XGBoost') # ### Cost-Accuracy Combined Evaluation # + def misclassification_cost(y_test, y_pred): tn, fp, fn, tp = confusion_matrix(y_test, y_pred).ravel() cost = -10*fp + -1*fn return cost cost_score = make_scorer(misclassification_cost, greater_is_better=False) # + # Logistic Regression log_clf = LogisticRegression() grid = GridSearchCV(log_clf, p_grid_log, cv = 10, scoring = cost_score) grid.fit(X_train_lasso, y_train) log_best_clf = LogisticRegression(C=10, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, l1_ratio=None, max_iter=100, multi_class='warn', n_jobs=None, penalty='l1', random_state=None, solver='warn', tol=0.0001, verbose=0, warm_start=False) log_best_clf.fit(X_train_lasso, y_train) cost_score(log_best_clf, X_test_lasso, y_test) # - # Random Forest rf_clf = RandomForestClassifier() grid = RandomizedSearchCV(rf_clf, p_grid_rf, cv = 10, scoring = cost_score) grid.fit(X_train_lasso, y_train) rf_best_clf = RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', max_depth=20, max_features='sqrt', max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=200, n_jobs=None, oob_score=False, random_state=None, verbose=0, warm_start=False) rf_best_clf.fit(X_train_lasso, y_train) cost_score(rf_best_clf, X_test_lasso, y_test) # XGBoost xgb_clf = XGBClassifier() grid = RandomizedSearchCV(xgb_clf, p_grid_xgb, cv = 10, scoring = cost_score) grid.fit(X_train_lasso, y_train) xgb_best_clf = XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1, colsample_bynode=1, colsample_bytree=1, gamma=0.3, learning_rate=0.1, max_delta_step=0, max_depth=9, min_child_weight=1, missing=None, n_estimators=50, n_jobs=1, nthread=None, objective='binary:logistic', random_state=0, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None, silent=None, subsample=1, verbosity=1) xgb_best_clf.fit(X_train_lasso, y_train) cost_score(xgb_best_clf, X_test_lasso, y_test) # LightGBM lgb_clf = lightgbm.LGBMClassifier() grid = RandomizedSearchCV(lgb_clf, p_grid_lgb, cv = 10, scoring = cost_score) grid.fit(X_train_lasso, y_train) lgb_best_clf = lightgbm.LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=1.0, importance_type='split', learning_rate=0.01, max_depth=-1, min_child_samples=20, min_child_weight=0.001, min_split_gain=0.0, n_estimators=20, n_jobs=-1, num_leaves=64, objective=None, random_state=None, reg_alpha=0.0, reg_lambda=0.0, silent=True, subsample=1.0, subsample_for_bin=200000, subsample_freq=0) lgb_best_clf.fit(X_train_lasso, y_train) cost_score(lgb_best_clf, X_test_lasso, y_test) y_pred = rf_best_clf.predict(X_test_lasso) accuracy_score(y_test, y_pred) print(confusion_matrix(y_test, y_pred)) print(classification_report(y_test, y_pred)) y_probas = y_pred = xgb.predict_proba(X_test_lasso) skplt.metrics.plot_roc(y_test, y_probas) skplt.metrics.plot_lift_curve(y_test, y_probas) plt.title('Lift Curve of RandomForest')
feature selection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Example: Covertype Data Set # The following example uses the (processed) Covertype dataset from [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Covertype). # # It is a dataset with both categorical (`wilderness_area` and `soil_type`) and continuous (the rest) features. The target is the `cover_type` column: # + covertype_dataset = spark.read.parquet("covertype_dataset.snappy.parquet") covertype_dataset.printSchema() # - # The 10 first rows: covertype_dataset.limit(10).toPandas() # In order for Spark's `DecisionTreeClassifier` to work with the categorical features (as well as the target), we first need to use [`pyspark.ml.feature.StringIndexer`](https://spark.apache.org/docs/latest/api/python/pyspark.ml.html#pyspark.ml.feature.StringIndexer)s to generate a numeric representation for those columns: # + from pyspark.ml.feature import StringIndexer string_indexer_wilderness = StringIndexer(inputCol="wilderness_area", outputCol="wilderness_area_indexed") string_indexer_soil = StringIndexer(inputCol="soil_type", outputCol="soil_type_indexed") string_indexer_cover = StringIndexer(inputCol="cover_type", outputCol="cover_type_indexed") # - # To generate the new *StringIndexerModels*, we call `.fit()` on each `StringIndexer` instance: # + string_indexer_wilderness_model = string_indexer_wilderness.fit(covertype_dataset) string_indexer_soil_model = string_indexer_soil.fit(covertype_dataset) string_indexer_cover_model = string_indexer_cover.fit(covertype_dataset) # - # And we create the new columns: covertype_dataset_indexed_features = string_indexer_cover_model.transform(string_indexer_soil_model .transform(string_indexer_wilderness_model .transform(covertype_dataset) ) ) # New columns can be seen at the right: covertype_dataset_indexed_features.limit(10).toPandas() # Now, we just have to `VectorAssemble` our features to create the feature vector: # + from pyspark.ml.feature import VectorAssembler feature_columns = ["elevation", "aspect", "slope", "horizontal_distance_to_hydrology", "vertical_distance_to_hydrology", "horizontal_distance_to_roadways", "hillshade_9am", "hillshade_noon", "hillshade_3pm", "horizontal_distance_to_fire_points", "wilderness_area_indexed", "soil_type_indexed"] feature_assembler = VectorAssembler(inputCols=feature_columns, outputCol="features") # - # And we have our dataset prepared for ML: covertype_dataset_prepared = feature_assembler.transform(covertype_dataset_indexed_features) covertype_dataset_prepared.printSchema() # Let's build a simple `pyspark.ml.classification.DecisionTreeClassifier`: # + from pyspark.ml.classification import DecisionTreeClassifier dtree = DecisionTreeClassifier(featuresCol="features", labelCol="cover_type_indexed", maxDepth=3, maxBins=50) # - # We fit it, and we get our `DecisionTreeClassificationModel`: # + dtree_model = dtree.fit(covertype_dataset_prepared) dtree_model # - # The `.toDebugString` attribute prints the decision rules for the tree, but it is not very user-friendly: print(dtree_model.toDebugString) # Perhaps `spark_tree_plotting` may be helpful here ;) # + from spark_tree_plotting import plot_tree tree_plot = plot_tree(dtree_model, featureNames=feature_columns, categoryNames={"wilderness_area_indexed":string_indexer_wilderness_model.labels, "soil_type_indexed":string_indexer_soil_model.labels}, classNames=string_indexer_cover_model.labels, filled=True, # With color! roundedCorners=True, # Rounded corners in the nodes roundLeaves=True # Leaves will be ellipses instead of rectangles ) # + from IPython.display import Image Image(tree_plot)
examples/Example_covertype_dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Extended Kalman filter for Nomoto model # An Extended Kalman filter with a Nomoto model as the predictor will be developed. # The filter is run on simulated data as well as real model test data. # + tags=["hide-cell"] # %load_ext autoreload # %autoreload 2 import pandas as pd import numpy as np import matplotlib.pyplot as plt from numpy.linalg import inv import sympy as sp import src.visualization.book_format as book_format book_format.set_style() from src.substitute_dynamic_symbols import lambdify from sympy import Matrix from sympy.physics.mechanics import (dynamicsymbols, ReferenceFrame, Particle, Point) from IPython.display import display, Math, Latex from src.substitute_dynamic_symbols import run, lambdify from sympy.physics.vector.printing import vpprint, vlatex from src.data import mdl from src.extended_kalman_filter import extended_kalman_filter # - # ## Nomoto model for ship manoeuvring dynamics # The Nomoto model can be written as: # + tags=["remove-input"] r,r1d,r2d = sp.symbols('r \dot{r} \ddot{r}') psi,psi1d = sp.symbols('psi \dot{\psi}') h,u = sp.symbols('h u') x, x1d = sp.symbols('x \dot{x}') A,B,C,D,E, Phi = sp.symbols('A B C D E Phi') w = sp.symbols('w') K, delta, T_1, T_2 = sp.symbols('K delta T_1 T_2') eq_nomoto = sp.Eq(K*delta, r + T_1*r1d + T_2*r2d) Math(vlatex(eq_nomoto)) # - # where $r$ is yaw rate with its time derivatives and $\delta$ is the rudder angle. $K$, $T_{1}$ # and $T_{1}$ are the coefficients describing the hydrodynamics of the ship. # # For slow manoeuvres this equation can be further simplified by removing the $\ddot{r}$ term into a first order Nomoto model: # + tags=["remove-input"] eq_nomoto_simple = eq_nomoto.subs(r2d,0) Math(vlatex(eq_nomoto_simple)) # - # ### Simulation model # + tags=["remove-input"] f_hat = sp.Function('\hat{f}')(x,u,w) eq_system = sp.Eq(x1d, f_hat) eq_system # - # Where the state vector $x$: # + tags=["remove-input"] eq_x = sp.Eq(x, sp.UnevaluatedExpr(Matrix([psi,r]))) eq_x # - # and input vector $u$: # and $w$ is zero mean Gausian process noise # For the nomoto model the time derivatives for the states can be expressed as: # + tags=["remove-input"] eq_psi1d = sp.Eq(psi1d,r) eq_psi1d # + tags=["remove-input"] eq_r1d = sp.Eq(r1d,sp.solve(eq_nomoto_simple,r1d)[0]) eq_r1d # - def lambda_f_constructor(K, T_1): def lambda_f(x, u): delta = u f = np.array([[x[1], (K*delta-x[1])/T_1]]).T return f return lambda_f jac = sp.eye(2,2) + Matrix([r,eq_r1d.rhs]).jacobian([psi,r])*h jac Matrix([r, eq_r1d.rhs]).jacobian([delta]) def lambda_jacobian_constructor(h,T_1): def lambda_jacobian(x, u): jac = np.array( [ [1, h], [0, 1-h/T_1], ] ) return jac return lambda_jacobian # ## Simulation # Simulation with this model where rudder angle shifting between port and starboard # + tags=["cell_hide", "hide-cell"] T_1_ = 1.8962353076056344 K_ = 0.17950970687951323 h_ = 0.02 lambda_f = lambda_f_constructor(K=K_, T_1=T_1_) lambda_jacobian = lambda_jacobian_constructor(h=h_, T_1=T_1_) # - def simulate(E, ws, t, us): simdata = [] x_=np.deg2rad(np.array([[0,0]]).T) for u_,w_ in zip(us,ws): x_=x_ + h_*lambda_f(x=x_.flatten(), u=u_) simdata.append(x_.flatten()) simdata = np.array(simdata) df = pd.DataFrame(simdata, columns=["psi","r"], index=t) df['delta'] = us return df # + tags=["cell_hide", "hide-cell"] N_ = 4000 t_ = np.arange(0,N_*h_,h_) us = np.deg2rad(np.concatenate((-10*np.ones(int(N_/4)), 10*np.ones(int(N_/4)), -10*np.ones(int(N_/4)), 10*np.ones(int(N_/4))))) np.random.seed(42) E = np.array([[0, 1]]).T process_noise = np.deg2rad(0.01) ws = process_noise*np.random.normal(size=N_) df = simulate(E=E, ws=ws, t=t_, us=us) measurement_noise = np.deg2rad(0.5) df['epsilon'] = measurement_noise*np.random.normal(size=N_) df['psi_measure'] = df['psi'] + df['epsilon'] df['psi_deg'] = np.rad2deg(df['psi']) df['psi_measure_deg'] = np.rad2deg(df['psi_measure']) df['delta_deg'] = np.rad2deg(df['delta']) # + tags=["hide_input", "remove-input"] fig,ax=plt.subplots() df.plot(y='psi_deg', ax=ax) df.plot(y='psi_measure_deg', ax=ax, zorder=-1) df.plot(y='delta_deg', ax=ax, zorder=-1) df.plot(y='r') ax.set_title('Simulation with measurement and process noise') ax.set_xlabel('Time [s]'); # - # ## Kalman filter # Implementation of the Kalman filter. The code is inspired of this Matlab implementation: [ExEKF.m](https://github.com/cybergalactic/MSS/blob/master/mssExamples/ExEKF.m). # + tags=["hide-cell"] x0=np.deg2rad(np.array([[0,0]]).T) P_prd = np.diag(np.deg2rad([1, 0.1])) Qd = np.deg2rad(np.diag([0, 0.5])) Rd = np.deg2rad(1) ys = df['psi_measure'].values E_ = np.array( [[0,0], [0,1]], ) C_ = np.array([[1, 0]]) Cd_ = C_ Ed_ = h_ * E_ time_steps = extended_kalman_filter(x0=x0, P_prd=P_prd, lambda_f=lambda_f, lambda_jacobian=lambda_jacobian,h=h_, us=us, ys=ys, E=E_, Qd=Qd, Rd=Rd, Cd=Cd_) x_hats = np.array([time_step["x_hat"] for time_step in time_steps]).T time = np.array([time_step["time"] for time_step in time_steps]).T Ks = np.array([time_step["K"] for time_step in time_steps]).T # + tags=["remove-input"] n=len(P_prd) fig,axes=plt.subplots(nrows=n) keys = ['psi','r'] for i,key in enumerate(keys): ax=axes[i] df.plot(y=key, ax=ax, label="True") if key=='psi': df.plot(y='psi_measure', ax=ax, label="Measured", zorder=-1) ax.plot(time, x_hats[i, :], "-", label="kalman") ax.set_ylabel(key) ax.legend() # + tags=["remove-input"] fig,ax=plt.subplots() for i,key in enumerate(keys): ax.plot(time,Ks[i,:],label=key) ax.set_title('Kalman gains') ax.legend(); ax.set_ylim(0,0.1); # - # # Real data # Using the developed Kalman filter on some real model test data # ## Load test # + tags=["remove-input"] id=22773 df, units, meta_data = mdl.load(dir_path = '../data/raw', id=id) df.index = df.index.total_seconds() df.index-=df.index[0] # + tags=["remove-input"] from src.visualization.plot import track_plot fig,ax=plt.subplots() fig.set_size_inches(10,10) track_plot(df=df, lpp=meta_data.lpp, x_dataset='x0', y_dataset='y0', psi_dataset='psi', beam=meta_data.beam, ax=ax); # + tags=["hide-input"] ys = df['psi'].values h_m=h_ = df.index[1]-df.index[0] x0=np.deg2rad(np.array([[0,0]]).T) us = df['delta'].values P_prd = np.diag(np.deg2rad([1, 0.1])) Qd = np.deg2rad(np.diag([0, 10])) Rd = np.deg2rad(0.5) time_steps = extended_kalman_filter(x0=x0, P_prd=P_prd, lambda_f=lambda_f, lambda_jacobian=lambda_jacobian,h=h_, us=us, ys=ys, E=E_, Qd=Qd, Rd=Rd, Cd=Cd_) x_hats = np.array([time_step["x_hat"] for time_step in time_steps]).T time = np.array([time_step["time"] for time_step in time_steps]).T Ks = np.array([time_step["K"] for time_step in time_steps]).T # + tags=["remove-input"] n=len(P_prd) fig,axes=plt.subplots(nrows=n) ax=axes[0] df.plot(y='psi', ax=ax, label="Measured", zorder=-1) df['-delta']=-df['delta'] df.plot(y='-delta', ax=ax, label='$-\delta$', zorder=-10) ax.plot(time, x_hats[0, :], "-", label="kalman", zorder=10) ax.set_ylabel('$\Psi$') ax.legend() ax=axes[1] ax.plot(time, x_hats[1, :], "-", label="kalman") ax.set_ylabel('$r$') ax.legend();
notebooks/15.40_EKF_nomoto.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Deep Neural Network [Keras] # --- # - Author: <NAME> # - GitHub: [github.com/diegoinacio](https://github.com/diegoinacio) # - Notebook: [neural_network_deep_Keras.ipynb](https://github.com/diegoinacio/machine-learning-notebooks/blob/master/Deep-Learning-Models/neural_network_deep_Keras.ipynb) # --- # Implementation of *Deep Neural Network* using Keras library. # + # %matplotlib inline import matplotlib import matplotlib.pyplot as plt import numpy as np from PIL import Image import tensorflow as tf # - plt.rcParams['figure.figsize'] = (16, 8) # + n1, n2 = 128, 128 # Read Image x = Image.open('sourceimages/mandril.png') # Rescale image to a lower resolution x = x.resize((n1, n2), Image.ANTIALIAS) x = np.asarray(x)/255 n1, n2, c = x.shape ### split channels ### r, g, b = x[:,:,0], x[:,:,1], x[:,:,2] # - # Transform data and produce X_train Y_train = np.array([r.ravel(), g.ravel(), b.ravel()]).T t, s = np.mgrid[0:n1, 0:n2] s = (s - s.mean())/s.std() t = (t - t.mean())/t.std() # X_train is the normalized spatial coordinates X_train = np.array([s.ravel(), t.ravel()], dtype=np.float32).T # + fig, [axA, axB] = plt.subplots(1, 2, figsize=(20, 10)) st = np.stack([s, t, s*0], axis=2) st = (st - st.min())/(st.max() - st.min()) st[:,:,2] = 0; axA.imshow(st); axA.axis('off') axA.text(5, 10, f'({s.min():.3f}, {t.min():.3f})', color='white', size=18) axA.text(86, 10, f'({s.max():.3f}, {t.min():.3f})', color='white', size=18) axA.text(5, 120, f'({s.min():.3f}, {t.max():.3f})', color='white', size=18) axA.text(86, 120, f'({s.max():.3f}, {t.max():.3f})', color='white', size=18) axA.set_title('X_Train') axB.imshow(x) axB.set_title('Y_Train') plt.show() # + # Produce X_test to another scale # Upscale the image approximation N1 = N2 = 512 t, s = np.mgrid[0:N1, 0:N2] s = (s - s.mean())/s.std() t = (t - t.mean())/t.std() X_test = np.array([s.ravel(), t.ravel()], dtype=np.float32).T print('X_train:', X_train.shape) print('Y_train:', Y_train.shape) print('X_test:', X_test.shape) # - # ## Deep Neural Network # --- # ![perceptron](sourceimages/neural_network_deep.png "Deep Neural Network") # + EPOCHS = 100 # epochs # List of number of neurons for each hidden layer NEURONS = [32, 64, 128, 256, 512, 1024] # model # Input (2): x and y coordinates # Output (3): RGB model = tf.keras.Sequential([ tf.keras.layers.Input(2), *[tf.keras.layers.Dense(n, activation='relu') for n in NEURONS], tf.keras.layers.Dense(3, activation='sigmoid') ]) model.compile( loss='mean_squared_error', optimizer='adam', metrics=['accuracy'] ) # loss and accuracy storage loss_plot = []; accA_plot = [] for epoch in range(EPOCHS + 1): model.fit(X_train, Y_train, epochs=2, verbose=0) loss_plot += [e*100 for e in model.history.history['loss']] accA_plot += [e*100 for e in model.history.history['accuracy']] if (not epoch % 10) and (epoch != 0): print(f'epoch: {epoch:04d} | loss: {loss_plot[-1]:.3f} | accuracy: {accA_plot[-1]:06.2f} %') # + fig, [axA, axB] = plt.subplots(2, 1, sharex=True) axA.plot(loss_plot) axA.set_ylabel('loss') axB.plot(accA_plot) axB.set_ylabel('accuracy') plt.xlabel('epochs') plt.show() # + fig, [axA, axB] = plt.subplots(1, 2, figsize=(20, 10)) axA.imshow(x) axA.set_title(r'$Y_{train}$', size=20) # Predict using X_train (128x128x3) Y_predA = model.predict(X_train) Y_predA = Y_predA.reshape(n1, n2, c) axB.imshow(Y_predA) axB.set_title(r'$\hat{Y}_{train}$', size=20) plt.show() # + fig, [axA, axB] = plt.subplots(1, 2, figsize=(20, 10)) axA.imshow(x) axA.set_title(r'$Y_{train}$', size=20) # Predict using X_test (512x512x3) Y_predB = model.predict(X_test) Y_predB = Y_predB.reshape(N1, N2, c) axB.imshow(Y_predB) axB.set_title(r'$\hat{Y}_{test}$', size=20) plt.show()
Deep-Learning-Models/neural_network_deep_Keras.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Building a Simple Chatbot from Scratch in Python (using NLTK) # # ![Alt text](https://cdn-images-1.medium.com/max/800/1*pPcVfZ7i-gLMabUol3zezA.gif) # # History of chatbots dates back to 1966 when a computer program called ELIZA was invented by Weizenbaum. It imitated the language of a psychotherapist from only 200 lines of code. You can still converse with it here: [Eliza](http://psych.fullerton.edu/mbirnbaum/psych101/Eliza.htm?utm_source=ubisend.com&utm_medium=blog-link&utm_campaign=ubisend). # # On similar lines let's create a very basic chatbot utlising the Python's NLTK library.It's a very simple bot with hardly any cognitive skills,but still a good way to get into NLP and get to know about chatbots. # # For detailed analysis, please see the accompanying blog titled:**[Building a Simple Chatbot in Python (using NLTK](https://medium.com/analytics-vidhya/building-a-simple-chatbot-in-python-using-nltk-7c8c8215ac6e) # # ## NLP # NLP is a way for computers to analyze, understand, and derive meaning from human language in a smart and useful way. By utilizing NLP, developers can organize and structure knowledge to perform tasks such as automatic summarization, translation, named entity recognition, relationship extraction, sentiment analysis, speech recognition, and topic segmentation. # ## Import necessary libraries import io import random import string # to process standard python strings import warnings import numpy as np from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity import warnings warnings.filterwarnings('ignore') # ## Downloading and installing NLTK # NLTK(Natural Language Toolkit) is a leading platform for building Python programs to work with human language data. It provides easy-to-use interfaces to over 50 corpora and lexical resources such as WordNet, along with a suite of text processing libraries for classification, tokenization, stemming, tagging, parsing, and semantic reasoning, wrappers for industrial-strength NLP libraries. # # [Natural Language Processing with Python](http://www.nltk.org/book/) provides a practical introduction to programming for language processing. # # For platform-specific instructions, read [here](https://www.nltk.org/install.html) # # pip install nltk # ### Installing NLTK Packages # # # import nltk from nltk.stem import WordNetLemmatizer nltk.download('popular', quiet=True) # for downloading packages #nltk.download('punkt') # first-time use only #nltk.download('wordnet') # first-time use only # ## Reading in the corpus # # For our example,we will be using the Wikipedia page for chatbots as our corpus. Copy the contents from the page and place it in a text file named ‘chatbot.txt’. However, you can use any corpus of your choice. f=open('chatbot.txt','r',errors = 'ignore') raw=f.read() raw = raw.lower()# converts to lowercase # # The main issue with text data is that it is all in text format (strings). However, the Machine learning algorithms need some sort of numerical feature vector in order to perform the task. So before we start with any NLP project we need to pre-process it to make it ideal for working. Basic text pre-processing includes: # # * Converting the entire text into **uppercase** or **lowercase**, so that the algorithm does not treat the same words in different cases as different # # * **Tokenization**: Tokenization is just the term used to describe the process of converting the normal text strings into a list of tokens i.e words that we actually want. Sentence tokenizer can be used to find the list of sentences and Word tokenizer can be used to find the list of words in strings. # # _The NLTK data package includes a pre-trained Punkt tokenizer for English._ # # * Removing **Noise** i.e everything that isn’t in a standard number or letter. # * Removing the **Stop words**. Sometimes, some extremely common words which would appear to be of little value in helping select documents matching a user need are excluded from the vocabulary entirely. These words are called stop words # * **Stemming**: Stemming is the process of reducing inflected (or sometimes derived) words to their stem, base or root form — generally a written word form. Example if we were to stem the following words: “Stems”, “Stemming”, “Stemmed”, “and Stemtization”, the result would be a single word “stem”. # * **Lemmatization**: A slight variant of stemming is lemmatization. The major difference between these is, that, stemming can often create non-existent words, whereas lemmas are actual words. So, your root stem, meaning the word you end up with, is not something you can just look up in a dictionary, but you can look up a lemma. Examples of Lemmatization are that “run” is a base form for words like “running” or “ran” or that the word “better” and “good” are in the same lemma so they are considered the same. # # # ## Tokenisation sent_tokens = nltk.sent_tokenize(raw)# converts to list of sentences word_tokens = nltk.word_tokenize(raw)# converts to list of words # ## Preprocessing # # We shall now define a function called LemTokens which will take as input the tokens and return normalized tokens. # + lemmer = nltk.stem.WordNetLemmatizer() #WordNet is a semantically-oriented dictionary of English included in NLTK. def LemTokens(tokens): return [lemmer.lemmatize(token) for token in tokens] remove_punct_dict = dict((ord(punct), None) for punct in string.punctuation) def LemNormalize(text): return LemTokens(nltk.word_tokenize(text.lower().translate(remove_punct_dict))) # - # ## Keyword matching # # Next, we shall define a function for a greeting by the bot i.e if a user’s input is a greeting, the bot shall return a greeting response.ELIZA uses a simple keyword matching for greetings. We will utilize the same concept here. GREETING_INPUTS = ("hello", "hi", "greetings", "sup", "what's up","hey",) GREETING_RESPONSES = ["hi", "hey", "*nods*", "hi there", "hello", "I am glad! You are talking to me"] def greeting(sentence): for word in sentence.split(): if word.lower() in GREETING_INPUTS: return random.choice(GREETING_RESPONSES) # ## Generating Response # # ### Bag of Words # After the initial preprocessing phase, we need to transform text into a meaningful vector (or array) of numbers. The bag-of-words is a representation of text that describes the occurrence of words within a document. It involves two things: # # * A vocabulary of known words. # # * A measure of the presence of known words. # # Why is it is called a “bag” of words? That is because any information about the order or structure of words in the document is discarded and the model is only **concerned with whether the known words occur in the document, not where they occur in the document.** # # The intuition behind the Bag of Words is that documents are similar if they have similar content. Also, we can learn something about the meaning of the document from its content alone. # # For example, if our dictionary contains the words {Learning, is, the, not, great}, and we want to vectorize the text “Learning is great”, we would have the following vector: (1, 1, 0, 0, 1). # # # ### TF-IDF Approach # A problem with the Bag of Words approach is that highly frequent words start to dominate in the document (e.g. larger score), but may not contain as much “informational content”. Also, it will give more weight to longer documents than shorter documents. # # One approach is to rescale the frequency of words by how often they appear in all documents so that the scores for frequent words like “the” that are also frequent across all documents are penalized. This approach to scoring is called Term Frequency-Inverse Document Frequency, or TF-IDF for short, where: # # **Term Frequency: is a scoring of the frequency of the word in the current document.** # # ``` # TF = (Number of times term t appears in a document)/(Number of terms in the document) # ``` # # **Inverse Document Frequency: is a scoring of how rare the word is across documents.** # # ``` # IDF = 1+log(N/n), where, N is the number of documents and n is the number of documents a term t has appeared in. # ``` # ### Cosine Similarity # # Tf-idf weight is a weight often used in information retrieval and text mining. This weight is a statistical measure used to evaluate how important a word is to a document in a collection or corpus # # ``` # Cosine Similarity (d1, d2) = Dot product(d1, d2) / ||d1|| * ||d2|| # ``` # where d1,d2 are two non zero vectors. # # # To generate a response from our bot for input questions, the concept of document similarity will be used. We define a function response which searches the user’s utterance for one or more known keywords and returns one of several possible responses. If it doesn’t find the input matching any of the keywords, it returns a response:” I am sorry! I don’t understand you” # + def response(user_response): robo_response='' sent_tokens.append(user_response) TfidfVec = TfidfVectorizer(tokenizer=LemNormalize, stop_words='english') tfidf = TfidfVec.fit_transform(sent_tokens) vals = cosine_similarity(tfidf[-1], tfidf) idx=vals.argsort()[0][-2] flat = vals.flatten() flat.sort() req_tfidf = flat[-2] if(req_tfidf==0): robo_response=robo_response+"I am sorry! I don't understand you" return robo_response else: robo_response = robo_response+sent_tokens[idx] return robo_response # - # Finally, we will feed the lines that we want our bot to say while starting and ending a conversation depending upon user’s input. flag=True print("ROBO: My name is Robo. I will answer your queries about Chatbots. If you want to exit, type Bye!") while(flag==True): user_response = input() user_response=user_response.lower() if(user_response!='bye'): if(user_response=='thanks' or user_response=='thank you' ): flag=False print("ROBO: You are welcome..") else: if(greeting(user_response)!=None): print("ROBO: "+greeting(user_response)) else: print("ROBO: ",end="") print(response(user_response)) sent_tokens.remove(user_response) else: flag=False print("ROBO: Bye! take care..")
src/chatbot/Chatbot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ChihabEddine98/DL_course/blob/main/VGG19_CIFAR10_transfer_learning.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="Q3uhG7xSoFIq" outputId="1ee31075-d9a1-45d1-f9d4-afc8f70ac60a" import tensorflow as tf import numpy as np import matplotlib.pyplot as plt model = tf.keras.applications.vgg16.VGG16(include_top=False, classes=10, input_shape = (32,32,3)) (X_train, y_train), (X_test, y_test) = tf.keras.datasets.cifar10.load_data() X_train = tf.keras.applications.vgg16.preprocess_input(X_train) X_test = tf.keras.applications.vgg16.preprocess_input(X_test) y_train = tf.keras.utils.to_categorical(y_train) y_test = tf.keras.utils.to_categorical(y_test) model.trainable = False input_layer = model.input x = model(input_layer) x = tf.keras.layers.Flatten()(x) x = tf.keras.layers.Dense(256, activation = 'relu')(x) x = tf.keras.layers.Dense(10, activation = 'softmax')(x) new_model = tf.keras.models.Model(inputs = input_layer, outputs = x) new_model.summary() # + colab={"base_uri": "https://localhost:8080/"} id="8KU3mKlCodXF" outputId="40e9723b-643c-42b3-f4ec-9f24e0929b23" new_model.compile(loss ='categorical_crossentropy', optimizer = 'adam', metrics = ['acc']) history = new_model.fit(X_train, y_train, validation_split = 0.1, epochs = 5) new_model.evaluate(X_test, y_test) # + id="0_eHwOWQo0oM"
lec6_transfer_learning/VGG19_CIFAR10_transfer_learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sympy as sym from functools import reduce from sympy.matrices import Matrix, MatrixSymbol sym.init_printing() y = MatrixSymbol('y', 1, 1) x = MatrixSymbol('x', 1, 3) #x = Matrix([[1,2,3]]) theta = Matrix([[1],[2],[3]]) prediction = x * theta loss = (prediction - y)**2 loss sym.diff(loss, x)
smoothness-writeup/notebooks/Matrix Test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd stocks = pd.read_csv('http://bit.ly/smallstocks') stocks stocks.index # Criando um groupby stocks.groupby('Symbol').Close.mean() # Gerando um MultiIndex s = stocks.groupby(['Symbol', 'Date']).Close.mean() s # Observe que esse MultiIndex tem duas dimensões, portanto pode vir a se tornar um dataframe s.index # Gerando o dataframe a partir do multiIndex de duas dimensões s.unstack() # Outra forma para chegar nesse dataframe df = stocks.pivot_table(values='Close', index = 'Symbol', columns='Date') df # Slicing da Series com multiIndex s.loc['AAPL'] # Pode funcionar semelhante ao slicing do dataframe s.loc['AAPL', '2016-10-04'] s.loc[:, '2016-10-04'] # Criando um multiIndex num dataframe stocks.set_index(['Symbol', 'Date'], inplace=True) stocks # Para organiza-lo basta usar o sort stocks.sort_index(inplace=True) stocks # Slicing no dataframe stocks.loc['AAPL'] # No espaço index se passa os parametros dos indexes em uma tupla stocks.loc[('AAPL', '2016-10-04'), :] # Para selecionar multiplos indexes é passada uma lista na sua chamada stocks.loc[(['AAPL', 'MSFT'], '2016-10-04'), :] # Para selecionar toda uma classe de um multiIndex é atribuido o slice(none) stocks.loc[(slice(None), ['2016-10-04', '2016-10-03']), 'Close']
data_science/pandas/MultiIndex.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3-azureml # kernelspec: # display_name: Python 3.6 - AzureML # language: python # name: python3-azureml # --- # ## Scikit-Learn PCA # ### Using BREASTCANCER_VIEW from DWC. This view has 569 records # + [markdown] nteract={"transient": {"deleting": false}} # ## Install fedml_azure package # + gather={"logged": 1633631268533} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}} pip install fedml_azure-1.0.0-py3-none-any.whl --force-reinstall # - # ## Import the libraries needed in this notebook # + gather={"logged": 1633631268931} from fedml_azure import DwcAzureTrain # - # ## Set up # ### Creating a Training object and setting the workspace, compute target, and environment. # # Before running the below cell, ensure that you have a workspace and replace the subscription_id, resource_group, and workspace_name with your information. # # The whl file for the fedml_azure library must be passed to the pip_wheel_files key in the environment_args and to use scikit-learn, you must pass the name to conda_packages as well. # + gather={"logged": 1633631271595} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}} #creation of training object and creating workspace in constructor. training = DwcAzureTrain(workspace_args={"subscription_id": "cb97564e-cea8-45a4-9c5c-a3357e8f7ee4", "resource_group": "Sample2_AzureML_Resource", "workspace_name": "Sample2_AzureML_Worskpace" }, environment_type='CondaPackageEnvironment', environment_args={'name':'test-env-pca', 'conda_packages':['scikit-learn'],'pip_wheel_files':['fedml_azure-1.0.0-py3-none-any.whl']}, experiment_args={'name':'test-2'}, compute_type='AmlComputeCluster', compute_args={'vm_size':'Standard_D12_v2', 'vm_priority':'lowpriority', 'compute_name':'cpu-clu-pca', 'min_nodes':0, 'max_nodes':1, 'idle_seconds_before_scaledown':1700 }) # - # ### Here we are updating the experiment for this training job. This is optional. # + gather={"logged": 1633631271760} #self,script_directory,experiment=None,experiment_args=None,compute_type=None,compute=None,compute_args=None,environment=None,environment_type=None,environment_args=None,is_compute_create_required=False training.update_experiment(experiment_args={'name': 'test-1'}) # - # ### Then, we need to generate the run config. This is needed to package the configuration specified so we can submit a job for training. # # Before running the following cell, you should have a config.json file with the specified values to allow you to access to DWC. Provide this file path to config_file_path in the below cell. # # You should also have the follow view BREASTCANCER_VIEW created in your DWC. To gather this data, please refer to https://www.kaggle.com/uciml/breast-cancer-wisconsin-data # # https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.scriptrunconfig?view=azure-ml-py # + gather={"logged": 1633631271878} #generating the run config src=training.generate_run_config(config_file_path='dwc_configs/config.json', config_args={ 'source_directory':'Scikit-Learn-Dimensionality-Reduction', 'script':'pca_script.py', 'arguments':['--model_file_name','regression.pkl', '--table_name', 'BREASTCANCER_VIEW', '--num_components', '3'] } ) # - # ### Submitting the job for training # + gather={"logged": 1633631608500} #submitting the training run run=training.submit_run(src) # - # ## Register the model for deployment # + gather={"logged": 1633541164478} model = training.register_model(run=run, model_args={'model_name':'sklearn_pca_model', "model_path":'outputs/regression.pkl'}, resource_config_args={"cpu":1, "memory_in_gb":0.5}, is_sklearn_model=True ) print('Name:', model.name) print('Version:', model.version)
Azure/sample-notebooks/DimensionalityReduction/PCA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import json bquad_path = r'./bquad.json' with open(bquad_path, 'r', encoding='UTF-8') as f: bquad = json.load(f) # - bquad.keys() # ## Bquad contains around 13k QA pairs from 6000+ articles of 1000+ titles. # + data = bquad['data'] print(f'Titles: {len(data)}') paras, qas = 0, 0 for i in range(len(data)): paras+= len(data[i]['paragraphs']) for j in range(len(data[i]['paragraphs'])): qas+= len(data[i]['paragraphs'][j]['qas']) print(f'Paragraphs: {paras}') print(f'QA Pairs: {qas}') # - titles = [data[i]['title'] for i in range(len(data))] print(titles) data[-1].keys() data[-1]['paragraphs'][-1].keys() data[261]['title'] data[261]['paragraphs'][0].keys() data[261]['paragraphs'] data[1630]['paragraphs'][0]['qas'] # + ids = [] for i in range(len(data)): paras+= len(data[i]['paragraphs']) for j in range(len(data[i]['paragraphs'])): for k in range(len(data[i]['paragraphs'][j]['qas'])): ids.append(data[i]['paragraphs'][j]['qas'][k]['id']) print(f'{len(ids)} unique ids') ids[:10] # -
explore_bquad.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 线性回归 --- 从0开始 # # 虽然强大的深度学习框架可以减少很多重复性工作,但如果你过于依赖它提供的便利抽象,那么你可能不会很容易地理解到底深度学习是如何工作的。所以我们的第一个教程是如何只利用ndarray和autograd来实现一个线性回归的训练。 # # ## 线性回归 # # 给定一个数据点集合`X`和对应的目标值`y`,线性模型的目标是找一根线,其由向量`w`和位移`b`组成,来最好地近似每个样本`X[i]`和`y[i]`。用数学符号来表示就是我们将学`w`和`b`来预测, # # $$\boldsymbol{\hat{y}} = X \boldsymbol{w} + b$$ # # 并最小化所有数据点上的平方误差 # # $$\sum_{i=1}^n (\hat{y}_i-y_i)^2.$$ # # 你可能会对我们把古老的线性回归作为深度学习的一个样例表示很奇怪。实际上线性模型是最简单但也可能是最有用的神经网络。一个神经网络就是一个由节点(神经元)和有向边组成的集合。我们一般把一些节点组成层,每一层使用下一层的节点作为输入,并输出给上面层使用。为了计算一个节点值,我们将输入节点值做加权和,然后再加上一个激活函数。对于线性回归而言,它是一个两层神经网络,其中第一层是(下图橙色点)输入,每个节点对应输入数据点的一个维度,第二层是单输出节点(下图绿色点),它使用身份函数($f(x)=x$)作为激活函数。 # # ![](../img/simple-net-linear.png) # # ## 创建数据集 # # 这里我们使用一个人工数据集来把事情弄简单些,因为这样我们将知道真实的模型是什么样的。具体来说我们使用如下方法来生成数据 # # `y[i] = 2 * X[i][0] - 3.4 * X[i][1] + 4.2 + noise` # # 这里噪音服从均值0和标准差为0.01的正态分布。 # + attributes={"classes": [], "id": "", "n": "2"} from mxnet import ndarray as nd from mxnet import autograd num_inputs = 2 num_examples = 1000 true_w = [2, -3.4] true_b = 4.2 X = nd.random_normal(shape=(num_examples, num_inputs)) y = true_w[0] * X[:, 0] + true_w[1] * X[:, 1] + true_b y += .01 * nd.random_normal(shape=y.shape) # - # 注意到`X`的每一行是一个长度为2的向量,而`y`的每一行是一个长度为1的向量(标量)。 # + attributes={"classes": [], "id": "", "n": "3"} print(X[0], y[0]) # - # ## 数据读取 # # 当我们开始训练神经网络的时候,我们需要不断读取数据块。这里我们定义一个函数它每次返回`batch_size`个随机的样本和对应的目标。我们通过python的`yield`来构造一个迭代器。 # + attributes={"classes": [], "id": "", "n": "4"} import random batch_size = 10 def data_iter(): # 产生一个随机索引 idx = list(range(num_examples)) random.shuffle(idx) for i in range(0, num_examples, batch_size): j = nd.array(idx[i:min(i+batch_size,num_examples)]) yield nd.take(X, j), nd.take(y, j) # - # 下面代码读取第一个随机数据块 # + attributes={"classes": [], "id": "", "n": "5"} for data, label in data_iter(): print(data, label) break # - # ## 初始化模型参数 # # 下面我们随机初始化模型参数 # + attributes={"classes": [], "id": "", "n": "6"} w = nd.random_normal(shape=(num_inputs, 1)) b = nd.zeros((1,)) params = [w, b] # - # 之后训练时我们需要对这些参数求导来更新它们的值,所以我们需要创建它们的梯度。 # + attributes={"classes": [], "id": "", "n": "7"} for param in params: param.attach_grad() # - # ## 定义模型 # # 线性模型就是将输入和模型做乘法再加上偏移: # + attributes={"classes": [], "id": "", "n": "8"} def net(X): return nd.dot(X, w) + b # - # ## 损失函数 # # 我们使用常见的平方误差来衡量预测目标和真实目标之间的差距。 # + attributes={"classes": [], "id": "", "n": "9"} def square_loss(yhat, y): # 注意这里我们把y变形成yhat的形状来避免自动广播 return (yhat - y.reshape(yhat.shape)) ** 2 # - # ## 优化 # # 虽然线性回归有显试解,但绝大部分模型并没有。所以我们这里通过随机梯度下降来求解。每一步,我们将模型参数沿着梯度的反方向走特定距离,这个距离一般叫学习率。(我们会之后一直使用这个函数,我们将其保存在[utils.py](../utils.py)。) # + attributes={"classes": [], "id": "", "n": "10"} def SGD(params, lr): for param in params: param[:] = param - lr * param.grad # - # ## 训练 # # 现在我们可以开始训练了。训练通常需要迭代数据数次,一次迭代里,我们每次随机读取固定数个数据点,计算梯度并更新模型参数。 # + attributes={"classes": [], "id": "", "n": "11"} epochs = 5 learning_rate = .001 for e in range(epochs): total_loss = 0 for data, label in data_iter(): with autograd.record(): output = net(data) loss = square_loss(output, label) loss.backward() SGD(params, learning_rate) total_loss += nd.sum(loss).asscalar() print("Epoch %d, average loss: %f" % (e, total_loss/num_examples)) # - # 训练完成后我们可以比较学到的参数和真实参数 # + attributes={"classes": [], "id": "", "n": "12"} true_w, w # + attributes={"classes": [], "id": "", "n": "13"} true_b, b # - # ## 结论 # # 我们现在看到仅仅使用NDArray和autograd我们可以很容易地实现一个模型。 # # ## 练习 # # 尝试用不同的学习率查看误差下降速度(收敛率) # # **吐槽和讨论欢迎点**[这里](https://discuss.gluon.ai/t/topic/743)
chapter_supervised-learning/.ipynb_checkpoints/linear-regression-scratch-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data Science Internship at Widhya # ## Mission: Movies Recommendation Using Python # #### Importing Required Libraries import pandas as pd from sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics.pairwise import cosine_similarity # ### Microtask 1 : Data Collection # #### Reading dataset df = pd.read_csv('movie_dataset.csv') # #### Preview of dataset df.head() df.tail() # #### Dataset Information df.info() # #### Shape of dataset df.shape # #### Tagline of movie at 15th position df.loc[14, "tagline"] # ### Microtask 2 : Feature Selection and Data Cleaning # #### Selecting required features features = ['keywords', 'cast', 'genres', 'director'] for feature in features: df[feature] = df[feature].fillna('') # #### creating Function: 'combined_features' def combined_features(row): return row['keywords']+" "+row['cast']+" "+row['genres']+" "+row['director'] df["combined_features"] = df.apply(combined_features, axis =1) # #### preview after adding new feature df.head() # ### Microtask 3 : Find Similiraity # #### Vectorizing data cv=CountVectorizer() count_vector=cv.fit_transform(df['combined_features']) print("Count Vector:", count_vector.toarray()) # #### Identifying Similarities x=[] y=0 for i in count_vector: x.append([cosine_similarity(i,count_vector[0]),y]) y+=1 x[0] cosine_sim = cosine_similarity(count_vector) print(cosine_sim) # ### Microtask 4 : Recommendation based on a Movie # #### Here I have selected 'Batman' movie movie = "Batman" # #### Creating function for getting index position from title of movie def get_index_from_title(title): return df[df.title == title]["index"].values[0] movie_index = get_index_from_title(movie) movie_index # #### Here I have selected Index = 1359 index_movie=1359 # #### Creating function for getting title of movie from index position. def get_title_from_index(index): return df[df.index == index]["title"].values[0] movie_title = get_title_from_index(index_movie) movie_title # #### Getting Similar Movies based on similarity score similar_movies = list(enumerate(cosine_sim[movie_index])) similar_movies # #### Movies recommendations based on Similarity score after sorting similar_movies_after_sort = sorted(similar_movies, key=lambda x:x[1], reverse=True) similar_movies_after_sort # #### Identifying similiar Movies to selected movie # + print("Movies Similar to", movie, ":\n") i=0 for movie in similar_movies_after_sort: print(i + 1, get_title_from_index(movie[0])) i=i+1 if i>15: break # - # #### Creating a Similar Movies Identifier function def similar_movies_identifier(): movie=input('Please Enter Movie Title:') def get_index_from_title(title): return df[df.title == title]["index"].values[0] movie_index = get_index_from_title(movie) def get_title_from_index(index): return df[df.index == index]["title"].values[0] movie_title = get_title_from_index(index_movie) similar_movies = list(enumerate(cosine_sim[movie_index])) similar_movies_after_sort = sorted(similar_movies, key=lambda x:x[1], reverse=True) print("Top 10 similar movies to selected movie:", movie, "\n") i=0 for movie in similar_movies_after_sort: print(i + 1, get_title_from_index(movie[0])) i=i+1 if i>10: break # #### Similar movies Samples similar_movies_identifier() similar_movies_identifier() similar_movies_identifier() # ### Thank you :)
Movies Recommendation using python/Movies Recommendation Using Python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load #import numpy as np # linear algebra #import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory #import os ''' for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) ''' # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # - import os workdir_path = '/kaggle/input/rsna-miccai-brain-tumor-radiogenomic-classification' os.chdir(workdir_path) # !ls import torch import torchvision import pandas as pd import tensorflow as tf from PIL import Image import numpy as np import collections import matplotlib.pyplot as plt # %matplotlib inline import re sorted(os.listdir('train/00000')) # Declaração de constantes TRAIN_FOLDER = 'train' TEST_FOLDER = 'test' mri_types = collections.namedtuple('mri_types', ['FLAIR', 'T1W', 'T1WCE', 'T2W']) MRI_TYPES = mri_types('FLAIR', 'T1w', 'T1wCE', 'T2w') # ### TODO-Criar bloco com funções acessórias def loader(folder, patient_id, mri): for files in os.listdir(os.path.join(folder, patient_id, mri)): print(patient_id, files) ''' Função para ler todas as imagens de cada paciente Estimativa de 17 horas para ler todos os arquivos. Necessário criar outra função def image_loader(folder:str): for dirpath, dirnames, filenames in os.walk(folder): for i in range(0, len(dirnames)): for patient, empty, photos in os.walk(os.path.join(dirpath, dirnames[i])): for files in photos: print('faltam', len(dirnames)-i, 'pacientes' ) print('faltam ', len(photos)-t, 'fotos na pasta', patient ) file_path = os.path.join(patient, files) try: yield file_path im.close() except ValueError as err: print(err) except TypeError as err: print (err) except AttributeError as err: print(err) ''' # ## TODO-Análise exploratória do dataset df_label = pd.read_csv("train_labels.csv", dtype={'BraTS21ID':str, 'MGMT_value':int}) df_label.MGMT_value.unique() df_label.describe() df_label.head() # + # Lista os pacientes que fazem parte do diretório de treinamento train_patients = [subdirs for subdirs in os.listdir(TRAIN_FOLDER)] print('Número de pacientes no diretório de treino', len(train_patients)) # Lista os pacientes que fazem parte do diretório de teste test_patients = [subdirs for subdirs in os.listdir(TEST_FOLDER)] print('Número de pacientes no diretório de teste', len(test_patients)) print('Total de pacientes', len(test_patients)+len(train_patients)) # - # ### Função de exclusão de casos com falha no dataset # Exclusão dos pacientes 00109, 00123, 00709 devido a falha do dataset patients_delete = ('00109', '00123', '00709') try: for patient in patients_delete: df_label = df_label[df_label.BraTS21ID != patient] train_patients.remove(patient) except Exception as err: print('erro: ', err) print('Número de pacientes no diretório de treino', len(train_patients)) def train_path(patient): for mri in MRI_TYPES: path = [TRAIN_FOLDER, patient, mri, sorted(os.listdir(os.path.join(TRAIN_FOLDER, patient, mri)), key=lambda file: int(re.sub('[^0-9]', '', file))) ] print(path) return path train_patients = sorted(train_patients) train_patients # Após ordenação dos pacientes e a ordenação correta dos arquivos de imagem, aparentemente o código abaixo funcionou mais rápido. image_path = list(map(train_path, train_patients)) # ## TODO - Ordenar caminhos das imagens para otimização. # Fazer através de dataframe ou alterando a linha anterior para loop for no lugar de map sorted(image_path, key=lambda x: x[2]) # + # Após verificar que as imagens não estavam ordenadas corretamente, fiz testes # até chegar na função lambda abaixo para ordenar de forma correta. # inserido na definição da função train_path() #sorted(image_path[0][3], key=lambda number: int(re.sub('[^0-9]', '', number))) # - #
superado/notebook1a3375ef89.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # keras_net_dev # # Developing the CNN in Keras/Theano. # + # A bit of setup # Usual imports import time import math import numpy as np import os import matplotlib.pyplot as plt # Notebook plotting magic # %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # For auto-reloading external modules # %load_ext autoreload # %autoreload 2 # My modules import generate_data as d def rel_error(x, y): """ Returns relative error """ return np.max(np.abs(x - y) / (np.maximum(1e-8. np.abs(x) + np.abs(y)))) # - import generate_data # My data function # Get some small dataset train, val, test = generate_data.get_data( datadir=os.path.abspath('../salami-audio/'), salamidir=os.path.abspath('../salami-data-public/'), outputdir=os.path.abspath('./bindata/'), n_songs_train=1, n_songs_val=1, n_songs_test=1, seed=None ) # Show our data dictionary print 'Training data:' for item in train: print '\t', item, ':\t', train[item] # + # For now, just load in all the data X_train = np.memmap( train['Xfile'], dtype='float32', mode='r', offset=0, shape=tuple(train['Xshape']) ) y_train = np.memmap( train['yfile'], dtype='float32', mode='r', offset=0, shape=tuple(train['yshape']) ) X_val = np.memmap( val['Xfile'], dtype='float32', mode='r', offset=0, shape=tuple(val['Xshape']) ) y_val = np.memmap( val['yfile'], dtype='float32', mode='r', offset=0, shape=tuple(val['yshape']) ) print "Train: ", X_train.shape, y_train.shape print "Val: ", X_val.shape, y_val.shape # + # Make it even smaller, for debugging X_train = X_train[512:1024] y_train = y_train[512:1024] X_val = X_val[64:128] y_val = y_val[64:128] print "Train: ", X_train.shape, y_train.shape print "Val: ", X_val.shape, y_val.shape # - # Is it well behaved? print "Inf values : ", np.any(np.isinf(X_train)) print "Nan values : ", np.any(np.isnan(X_train)) plt.rcParams['image.cmap'] = 'jet' plt.figure() for iPlot in xrange(1,5): plt.subplot(2,2,iPlot) samp = np.random.randint(0,X_train.shape[0]) plt.imshow(X_train[samp,0], origin="lower") plt.colorbar() plt.title("Segmentation rating (label): {0:1.5f}".format(y_train[samp,0])) plt.show() plt.plot(y_train) # + # Import keras import keras from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Convolution2D, MaxPooling2D from keras.layers.normalization import BatchNormalization from keras.regularizers import l2 from keras.optimizers import SGD # VGG-like convnet, from Keras examples, http://keras.io/examples/ model= Sequential() reg_amount = 0.01 model.add(Convolution2D( 16, 3, 3, border_mode='valid', input_shape=(1, 128, 129), init='glorot_normal', W_regularizer=l2(reg_amount), b_regularizer=l2(reg_amount) )) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(Convolution2D( 16, 3, 3, init='glorot_normal', W_regularizer=l2(reg_amount), b_regularizer=l2(reg_amount) )) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Convolution2D( 16, 3, 3, border_mode='valid', init='glorot_normal', W_regularizer=l2(reg_amount), b_regularizer=l2(reg_amount) )) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(Convolution2D( 16, 3, 3, init='glorot_normal', W_regularizer=l2(reg_amount), b_regularizer=l2(reg_amount) )) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) # Note: Keras does automatic shape inference. model.add(Dense( 256, init='glorot_normal', W_regularizer=l2(reg_amount), b_regularizer=l2(reg_amount) )) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense( 1, init='glorot_normal', W_regularizer=l2(reg_amount), b_regularizer=l2(reg_amount) )) model.add(Activation('linear')) sgd = SGD(lr=1e-4, decay=1e-6, momentum=0.9, nesterov=True) model.compile(loss='msle', optimizer=sgd) # - # plot the model from keras.utils.visualize_util import plot plot(model, to_file='model.png') # + from keras.callbacks import ModelCheckpoint # Callback for model checkpointing checkpointer = ModelCheckpoint(filepath="./bindata/weights.hdf5", verbose=1, save_best_only=True) # Callback for loss history class LossHistory(keras.callbacks.Callback): def on_train_begin(self, logs={}): self.losses = [] def on_batch_end(self, batch, logs={}): self.losses.append(logs.get('loss')) # + history = LossHistory() model.reset_states() hist = model.fit( X_train, y_train, batch_size=32, nb_epoch=1, show_accuracy=True, verbose=1, callbacks=[checkpointer, history], validation_data=(X_val, y_val) ) # - for weights in model.get_weights(): print "====================================" print weights plt.plot(history.losses) import keras_net as kn # + kn.TRAIN_CAP = 256 hist = kn.main( num_epochs=1, n_songs_train=1, n_songs_val=1, n_songs_test=1, batch_size=256, learning_rate=0.0001, datadir='/home/tim/Projects/convnet-music-structure/salami-audio', salamidir='/home/tim/Projects/convnet-music-structure/salami-data-public', outputdir='/home/tim/Projects/convnet-music-structure/src/bindata', reg_amount=0.01 ) # - whos # Test! X_test = np.memmap( test['Xfile'], dtype='float32', mode='r', offset=0, shape=tuple(test['Xshape']) ) y_test = np.memmap( test['yfile'], dtype='float32', mode='r', offset=0, shape=tuple(test['yshape']) ) sid = test['sids'][0] # + TRAIN_CAP = None import threading class DataGenerator(object): ''' Generate minibatches from serialized data. ''' def __init__(self, datadict, batch_size=32, shuffle=False, seed=None): self.lock = threading.Lock() self.data = datadict self.batch_size = batch_size self.shuffle = shuffle self.seed = seed self = self.flow( datadict, batch_size=batch_size, shuffle=False, seed=None, save_to_dir=None, save_prefix="", save_format="jpeg") def next(self): # for python 2.x # Keep under lock only the mechainsem which advance the indexing of each batch # see # http://anandology.com/blog/using-iterators-and-generators/ with self.lock: index_array, current_index, current_batch_size = next(self.flow_generator) # The transformation of images is not under thread lock so it can be done in parallel offsetmul = self.data['Xshape'][1] * self.data['Xshape'][2] * self.data['Xshape'][3] x_path = os.path.abspath( os.path.join(self.data['datadir'], self.data['Xfile']) ) y_path = os.path.abspath( os.path.join(self.data['datadir'], self.data['yfile']) ) bX = np.memmap( x_path, dtype='float32', mode='r', shape=(current_batch_size, self.data['Xshape'][1], self.data['Xshape'][2], self.data['Xshape'][3]), offset=current_index*offsetmul ) bY = np.memmap( y_path, dtype='float32', mode='r', shape=(current_batch_size, 1), offset=current_index ) return bX, bY def flow(self, datadict, batch_size=32, shuffle=False, seed=None, save_to_dir=None, save_prefix="", save_format="jpeg"): assert datadict['Xshape'][0] == datadict['yshape'][0] self.save_to_dir = save_to_dir self.save_prefix = save_prefix self.save_format = save_format self.flow_generator = self._flow_index(datadict['Xshape'][0], batch_size, shuffle, seed) return self def _flow_index(self, N, batch_size=32, shuffle=False, seed=None): # Check cap if TRAIN_CAP: N = min(N, TRAIN_CAP) b = 0 total_b = 0 while 1: if b == 0: if seed is not None: np.random.seed(seed + total_b) if shuffle: index_array = np.random.permutation(N) else: index_array = np.arange(N) current_index = (b * batch_size) % N if N >= current_index + batch_size: current_batch_size = batch_size else: current_batch_size = N - current_index if current_batch_size == batch_size: b += 1 else: b = 0 total_b += 1 yield index_array[current_index: current_index + current_batch_size], current_index, current_batch_size # + batch_size = 32 # TEST MODEL ############################################################### test_batch_gen = DataGenerator( test, batch_size=batch_size, shuffle=False, seed=None ) n_batches = int(math.ceil(test['yshape'][0]/float(batch_size))) y_pred = np.zeros((n_batches*batch_size, 1)) y_true = np.zeros((n_batches*batch_size, 1)) i_start = 0 for iBatch in xrange(n_batches): bX, by = test_batch_gen.next() n_batch = bX.shape[0] y_pred[i_start:i_start+n_batch] = np.array(model.predict_on_batch(bX)) y_true[i_start:i_start+n_batch] = by[:] i_start += n_batch plt.figure(3) plt.plot(y_pred, label="Prediction") plt.plot(y_true, label="Ground truth") plt.grid() plt.legend() plt.savefig( os.path.abspath(os.path.join(outputdir, 'test.pdf')), bbox_inches='tight' ) plt.show() # -
src/keras_net_dev.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Chapter 1 : Getting Started # + [markdown] deletable=true editable=true # # vector addition the Python way. # # ### The first vector to be added contains the squares of 0 up to n. # ### The second vector contains the cubes of 0 up to n. # ### The program prints the last 2 elements of the sum and the elapsed time. # + deletable=true editable=true import sys from datetime import datetime import numpy as np # + deletable=true editable=true def numpysum(n): a = np.arange(n) ** 2 b = np.arange(n) ** 3 c = a + b return c # + deletable=true editable=true def pythonsum(n): a = list(range(n)) b = list(range(n)) c = [] for i in range(len(a)): a[i] = i ** 2 b[i] = i ** 3 c.append(a[i] + b[i]) return c # + [markdown] deletable=true editable=true # ## size is an integer that specifies the size of the vectors. # + deletable=true editable=true size = 10000 # + deletable=true editable=true start = datetime.now() c = pythonsum(size) delta = datetime.now() - start print("The last 2 elements of the sum", c[-2:]) print("PythonSum elapsed time in microseconds", delta.microseconds) start = datetime.now() c = numpysum(size) delta = datetime.now() - start print("The last 2 elements of the sum", c[-2:]) print("NumPySum elapsed time in microseconds", delta.microseconds) # + [markdown] deletable=true editable=true # # printing modules in numpy, scipy and pandas # + deletable=true editable=true import pkgutil as pu import pydoc import numpy as np import scipy as sp import pandas as pd import matplotlib as mpl print("NumPy version", np.__version__) print("SciPy version", sp.__version__) print("pandas version", pd.__version__) print("Matplotlib version", mpl.__version__) def clean(astr): s = astr # remove multiple spaces s = ' '.join(s.split()) s = s.replace('=','') return s def print_desc(prefix, pkg_path): for pkg in pu.iter_modules(path=pkg_path): name = prefix + "." + pkg[1] if pkg[2] == True: try: docstr = pydoc.plain(pydoc.render_doc(name)) docstr = clean(docstr) start = docstr.find("DESCRIPTION") docstr = docstr[start: start + 140] print(name, docstr) except: continue print("\n") print_desc("numpy", np.__path__) print("\n") print_desc("scipy", sp.__path__) print("\n") print_desc("pandas", pd.__path__) print("\n") print_desc("matplotlib", mpl.__path__) # + [markdown] deletable=true editable=true # # Matplotlib plots # + deletable=true editable=true from sklearn.datasets import load_iris from sklearn.datasets import load_boston from matplotlib import pyplot as plt # + deletable=true editable=true iris = load_iris() print(iris.DESCR) # + deletable=true editable=true data=iris.data plt.plot(data[:,0],data[:,1],".") plt.show() # + deletable=true editable=true boston = load_boston() print(boston.DESCR) # + deletable=true editable=true data=boston.data plt.plot(data[:,2],data[:,4],"+") plt.show()
Chapter01/ch-01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/stephenbeckr/randomized-algorithm-class/blob/master/Demos/demo14_MonteCarlo_and_improvements.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="U5PNBZA4E3dt" # # Monte Carlo and variants # # Discusses Monte Carlo in the context of integration: # # - There are many ways to integrate functions # - Deterministic "quadrature" rules are fancy Riemann Sums, and will work *very well* if the integrand is smooth and in low dimensions. They break down when the integrand is highly oscillatory, and/or for high-dimensional integrals. Special versions targeted for oscillatory integrals is the subject of current applied math research. # - Monte Carlo integration interprets the integral as an expectation of a random variable, and draws samples to approximate the true mean with a sample mean. For a smooth function in low dimensions, Monte Carlo integration is a bad idea because classical quadrature rules are much, much better # - Monte Carlo is slow/inaccurate, but the inaccuracy is independent of the dimension of the integral. So for large enough dimensions, it makes sense (while in large dimensions, making a deterministic grid is impossible since it will be too large) # - Since Monte Carlo is useful sometimes, there are many known techniques to make it better. We examine two: # - **Quasi Monte Carlo**, which uses low-discrepancy sequences, and inherits some of the advantages and disadvantages from both Monte Carlo and grid/quadrature methods. Refs: # - <NAME> and <NAME>. [Digital nets and sequences: discrepancy theory and quasi-Monte Carlo integration](https://web.maths.unsw.edu.au/~josefdick/preprints/DP_book_preprint.pdf). Cambridge University Press, 2010 # - Art Owen's ["Monte Carlo Book: the Quasi-Monte Carlo parts"](https://artowen.su.domains/mc/qmcstuff.pdf) from [Monte Carlo theory, methods and examples (incomplete draft)](https://artowen.su.domains/mc/) by Art Owen # - [scipy.stats.qmc documentation](https://docs.scipy.org/doc/scipy/reference/stats.qmc.htm) which is quite useful, and according to this [commit](https://github.com/scipy/scipy/commit/b24017ea594a0e32e711c99015fbb27432a96ff0#diff-a94e84f2e5470e07eaf65ca735fe2f698d24edc24a1bed2768a8842a12c9d8ea) appears to have been written by Art Owen # - Full of good advice, such as make sure to use $n=2^d$ samples; if the number of samples is not a power of 2, performance can be much worse # - [wikipedia low-discrepancy sequences](https://en.wikipedia.org/wiki/Low-discrepancy_sequence#Construction_of_low-discrepancy_sequences) # - ["High-dimensional integration: The quasi-Monte Carlo way"](https://web.maths.unsw.edu.au/~josefdick/preprints/DKS2013_Acta_Num_Version.pdf) by <NAME> and Sloan (Acta Numerica, 2013) # - QMC (and randomized QMC, RQMC) can improve the **convergence rate** # - QMC code: # - [`scipy.stats.qmc`](https://docs.scipy.org/doc/scipy/reference/stats.qmc.htm) which is from 2020 # - [QMCPy](https://qmcpy.org/), Version 1.0 from 2021 # - Similar, but not the same, as QMC, is [**Sparse grid**](https://en.wikipedia.org/wiki/Sparse_grid) via **Smolyak's quadrature rule** # - **Control variates** as a means of **variance reduction**. Refs: # - [wikipedia control variates](https://en.wikipedia.org/wiki/Control_variates) # - There are many types of variance reduction. Other methods, not discussed here, include antithetic variates, [importance sampling](https://en.wikipedia.org/wiki/Importance_sampling) and [stratified sampling](https://en.wikipedia.org/wiki/Stratified_sampling). # - Variance reduction techniques do *not* improve convergence rate, but improve the **constants** # # <NAME>, University of Colorado, April 2019, ipynb version Nov 2021 # + [markdown] id="Ye6ZHFO8xy7z" # The `qmc` module was added in version 1.7.0 of `scipy` (around July 2021), so we may need to upgrade our `scipy`. Let's see what version colab provides us with: # + id="l2_6p2OnYKCy" outputId="bf2c77fc-95fd-47c6-d443-e04176d8fb87" colab={"base_uri": "https://localhost:8080/"} import scipy print(scipy.__version__) # Nov 2021 on colab, this is 1.4.1 # + id="3C3mGb0Xx9Je" # !pip install scipy==1.7 # + id="dtIAm7rGxVG-" colab={"base_uri": "https://localhost:8080/"} outputId="c8d9b45c-45a1-42b7-f84e-dfa14914cfc9" import scipy print(scipy.__version__) # + id="gjRJTTnskSSd" import numpy as np from scipy.special import sici from numpy import sinc, pi rng=np.random.default_rng() from numpy.linalg import norm import scipy.stats.qmc as qmc import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams["lines.linewidth"] = 2 mpl.rcParams["figure.figsize"] = [8,5] # + [markdown] id="Fw_KaFbtkVJT" # ## Integrate $\sin(x)/x$ from 0 to 1 (e.g. Si(1), Si is Sine Integral) # # The sine integral, Si(z), is the integral of $\sin(x)/x$ from 0 to z where we define $\sin(0)/0$ to be 1 (consistent with the limit) # # This integral is not known in closed form. See [Trigonometric_integral#Sine_integral](https://en.wikipedia.org/wiki/Trigonometric_integral#Sine_integral) on wikipedia. # # How can we approximate it? There are specialized techniques that are faster and more accurate than what we will discuss here, but we'll treat it via the integral definition and try to numerically evaluate the integral. # + colab={"base_uri": "https://localhost:8080/"} id="lcqRsEWLk4YH" outputId="f127998c-cd4c-4845-d3a0-72f5dffa4bf8" si = sici(1)[0] print(f"The sine integral Si(1) is {si}") # + [markdown] id="OfRT5ZeiIyi6" # Let's try some classical [quadrature rules](https://en.wikipedia.org/wiki/Numerical_integration) to integrate $\int_a^bf(x)\,dx$ # + colab={"base_uri": "https://localhost:8080/"} id="mN_N8Bj0lBSc" outputId="c5985924-e9b2-4e3b-a03f-5a18032f68ea" f = lambda x : sinc(x/pi) a = 0 b = 1 N = int(4e1) + 1 # simpler to have it odd (for Simpson's rule) xgrid, h = np.linspace(a,b,num=N,retstep=True) # spacing is h composite_mid = h*np.sum( f( xgrid[1:]-h/2) ) # open formula fx = f(xgrid) composite_trap = h*(np.sum(fx) - fx[0]/2 - fx[-1]/2 ) composite_simp = h/3*(fx[0]+fx[-1]+4*np.sum(fx[1::2]) + 2*np.sum(fx[2:-1:2])) print( si - composite_mid) print( si - composite_trap) print( si - composite_simp) # + [markdown] id="vgxijUm1JqZu" # Getting ready for quasi-Monte Carlo, let's visualize discrepancy of random numbers on $[0,1]$ # + colab={"base_uri": "https://localhost:8080/", "height": 324} id="yTcd-yQmJvQ-" outputId="b13e2e39-9dc9-4229-93af-4052582c5a5e" N = 2**8 # for the fancy QMC, we want powers of 2 setA = np.sort( rng.uniform(size=N) ) # uniform # Try something slight lower discrepancy and very easy to construct # (note: this is a *random* quasi-MC method) setB = np.hstack( (.5*setA[::2], .5 + .5*setA[1::2]) ) sampler = qmc.Sobol(d=1,scramble=True) setC = sampler.random_base2(m=int(np.log2(N))).ravel() setC.sort() # for visualization purposes plt.plot( setA, label='uniform random' ) plt.plot( setB, label='lower discrepancy' ) plt.plot( setC, label='Sobol sequence (proper way)' ) plt.plot( [0,N-1], [0,1], '--') plt.xlim((0,50)) plt.ylim((0,.2)) plt.legend() plt.show() # + id="OGuY6N-m1BWJ" colab={"base_uri": "https://localhost:8080/", "height": 623} outputId="cc937dcf-b8b6-4e96-b507-e857037f2084" # == Another way to plot it == kernel_size = 10 kernel = np.ones(kernel_size) / kernel_size smooth = lambda data : np.convolve(data, kernel, mode='same') plt.fill_between( np.arange(N), 0, smooth( setA - np.linspace(0,1,num=N) ), \ label='uniform random', alpha=0.5 ) plt.fill_between( np.arange(N), 0, smooth( setB - np.linspace(0,1,num=N) ), label='lower discrepancy', alpha=0.5 ) plt.fill_between( np.arange(N), 0, smooth( setC - np.linspace(0,1,num=N) ), label='Sobol sequence', alpha=0.5 ) plt.legend() plt.show() # == Another way to plot it == plt.hist( np.diff(setA), label='uniform random', alpha=0.5, bins=30 ) plt.hist( np.diff(setB), label='lower discrepancy', alpha=0.5, bins=30 ) plt.hist( np.diff(setC), label='Sobol sequence', alpha=0.5, bins=30 ) plt.legend() plt.show() # + [markdown] id="19kxHDMX1FQa" # #### Visualize this in 2D # # Also compare with a [sparse grid implementation](https://github.com/mfouesneau/sparsegrid) and [Latin hypercube sampling](https://en.wikipedia.org/wiki/Latin_hypercube_sampling) (with shuffling aka balanced sampling) # + id="SmB_XhW4YD3W" # !wget -q https://github.com/mfouesneau/sparsegrid/raw/master/sparsegrid.py # + id="A-92PK_x1MWZ" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="12eb4bac-9da5-4e56-a492-247cd9c5f507" m = 9 N = 2**m setA = rng.uniform(size=(N,2)) sampler = qmc.Sobol(d=2,scramble=False) setB = sampler.random_base2(m=m) sampler = qmc.Sobol(d=2,scramble=True) setC = sampler.random_base2(m=m) # Let's do balanced sampling of a Latin Hypercube x1 = np.linspace(0,1,num=N) x2 = np.linspace(0,1,num=N) rng.shuffle(x1) rng.shuffle(x1) setD = np.vstack( (x1,x2) ).T # and look at a sparse grid import sparsegrid from sparsegrid import SparseInterpolator dim = 2 # Dimensionality of function to interpolate nn = 6 indxi3 = sparsegrid.get_multi_index_sequence(nn, dim) nnodes, x_coord = sparsegrid._initialize_nodes(nn, interpolation_type) indxi4, pnt = sparsegrid._compute_sparse_grid(dim, nnodes, indxi3, x_coord) setE = pnt plt.figure(figsize=(7,7)) plt.title('Uniform([0,1]^2), for MC') plt.plot( setA[:,0], setA[:,1], '.', label='Uniform([0,1]^2)') plt.show() plt.figure(figsize=(7,7)) plt.title('Sobol sequence, for QMC') plt.plot( setB[:,0], setB[:,1], '.r', label='Sobol') plt.show() plt.figure(figsize=(7,7)) plt.title('Sobol sequence, scrambled, for rQMC') plt.plot( setC[:,0], setC[:,1], '.g', label='Sobol') plt.show() plt.figure(figsize=(7,7)) plt.title('balanced sampling for Latin Hypercube') plt.plot( setD[:,0], setD[:,1], '.', label='Latin Hypercube') plt.show() plt.figure(figsize=(7,7)) plt.title('Sparse Grid') plt.plot( setE[:,0], setE[:,1], '.') plt.show() # + id="pe8MZRXR2reB" colab={"base_uri": "https://localhost:8080/"} outputId="4e921d59-4a1f-4487-c43a-5a94ec7059d7" print('== Discrepancy: lower is better ==') print(f'Discrepancy for uniform is\t\t {qmc.discrepancy(setA):.1e}') print(f'Discrepancy for Sobol is\t\t {qmc.discrepancy(setB):.1e}') print(f'Discrepancy for scrambled Sobol is\t {qmc.discrepancy(setC):.1e}') print(f'Discrepancy for Latin Hypercube is\t {qmc.discrepancy(setD):.1e}') print(f'Discrepancy for sparse Grid is\t\t {qmc.discrepancy(setE):.1e}') # + [markdown] id="pI9Cpoa_NC4x" # Now use MC and quasi-MC to evaluate the integral # + colab={"base_uri": "https://localhost:8080/"} id="Ifz9PwYeNJvi" outputId="a243729b-727b-4f80-8aa1-76b5d0c0bda4" m = 10 N = 2**m # Sobol sequences like powers of 2 setA = rng.uniform(size=N) # uniform sampler = qmc.Sobol(d=1,scramble=True) setB = sampler.random_base2(m=m).ravel() int_MC = np.mean( f(setA) ) # simple! int_QMC = np.mean( f(setB) ) # simple! print(f"Via Monte Carlo, error is\t\t{si-int_MC:.3e}") print(f"Via Quasi-Monte Carlo, error is\t\t{si-int_QMC:.3e}") ## and for comparison with quadrature ... # since N is even, let's use N+1 points to make Simpson's rule nice xgrid, h = np.linspace(a,b,num=N+1,retstep=True) # spacing is h composite_mid = h*np.sum( f( xgrid[1:]-h/2) ) # open formula fx = f(xgrid) composite_trap = h*(np.sum(fx) - fx[0]/2 - fx[-1]/2 ) composite_simp = h/3*(fx[0]+fx[-1]+4*np.sum(fx[1::2]) + 2*np.sum(fx[2:-1:2])) print(f"Via comp. midpt rule, error is\t\t{si-composite_mid:.3e}") print(f"Via comp. trap rule, error is\t\t{si-composite_trap:.3e}") print(f"Via comp. simpson's rule, error is\t{si-composite_simp:.3e}") # + [markdown] id="nFzuwMBo30vE" # ### Let's look at the error rate of MC vs QMC # + id="R6-AwYMu30L0" colab={"base_uri": "https://localhost:8080/"} outputId="f5d251b1-ca10-4522-b444-d26e31f15fc1" nReps = 1000 mMax = 14 errMC = [] errQMC = [] nList = [] for m in range(6,mMax): N = 2**m nList.append(N) print(f"m is {m:2d} of {mMax} total") # Find avg error for MC err = [] for reps in range(nReps): x = rng.uniform(size=N) err.append( np.mean(f(x)) - si ) errMC.append( np.mean(np.abs(err)) ) # Repeat for QMC err = [] for reps in range(nReps): sampler = qmc.Sobol(d=1,scramble=True) x = sampler.random_base2(m=m).ravel() err.append( np.mean(f(x)) - si ) errQMC.append( np.mean(np.abs(err)) ) # + [markdown] id="vl3h5waK60LA" # Let's look at the convergence rates. For MC, we should be fairly close to theory, namely $1/\sqrt{n}$. For QMC, since this is a mix of MC and quadrature, we can do better than worse-case if the integrant is **smooth**, which it is in our case. (There are QMC results involving the smoothness of a function). # # For QMC, we should get at least $1/n$ if not more (though in higher dimensions, the dimension starts to play a mild role, whereas it doesn't for pure MC) # + id="CLdzfO7Q6BPw" colab={"base_uri": "https://localhost:8080/", "height": 346} outputId="4a7dc54c-cbeb-4c64-84e5-7a74518b63fd" nList = np.asarray(nList) plt.loglog( nList, errMC, 'o-', label='MC' ) plt.loglog( nList, errQMC, '*-', label='QMC' ) plt.loglog( nList, .03/np.sqrt(nList), '--', label='$O(1/\sqrt{n})$') plt.loglog( nList, .001/nList, '-.', label='$O(1/n)$') plt.loglog( nList, .1/nList**2, '-.', label='$O(1/n^2)$') plt.xlabel('Number of points $n$') plt.legend() plt.show() # + [markdown] id="HXwvPFZ0N9dg" # #### Now add in control variates # We're trying to integrate # $$ \int_0^1 \left( f(x) = \frac{\sin(x)}{x}\right) \,dx$$ # which has no closed form. But we can integrate polynomials in closed form. Let's approximate $f$ by its Maclaurin series, # $$ f(x) \approx g(x) = 1 - x^2/6 $$ # and we can compute # $$ \nu = \int_0^1 g(x)\,dx = \frac{17}{18} $$ # using basic calculus. # # + colab={"base_uri": "https://localhost:8080/"} id="wUm5fyOROkEr" outputId="ca52d535-e72b-41fa-cfe4-84aeda39bbaa" g = lambda x : 1 - x**2/6 nu = 17/18 x = rng.uniform(size=N) fx = f(x) gx = g(x) int_MC = np.mean( fx ) # Estimate the covariance and variance of gx Cov = np.cov( np.vstack( (fx,gx-nu) ) ) print("Scaled covariance matrix:") print(Cov/norm(Cov.flatten(),ord=np.Inf)) c = -Cov[0,1]/Cov[1,1] # Or this is slightly more accurate, using that we know mean(gx) exactly: c = -np.dot(fx-int_MC,gx-nu)/(norm(gx-nu)**2) print(f"Using c value of {c:.5f}") # c = -1 # this is also reasonable int_MC_CV = int_MC + c*( np.mean(gx) - nu ) print(f"Via Monte Carlo, error is\t\t\t{si-int_MC:.3e}") print(f"Via Monte Carlo w/ control variates, error is\t{si-int_MC_CV:.3e}") # Just the Taylor series alone isn't as accurage: print(f" And approximating integral with nu, error is\t{si-nu:.3e}") # Look at variance print(f"Variance of MC is \t\t{np.mean( (fx - si)**2 ):.2e}") print(f"Variance w. control variates is\t{np.mean( (fx + c*(gx-nu) - si)**2 ):.2e}") # + [markdown] id="KLND3WFydn3h" # Plot the error as a function of number of samples # # With the control variates, we don't change the $1/\sqrt{n}$ decay rate, but we do improve the constant factor # + colab={"base_uri": "https://localhost:8080/", "height": 324} id="e4gexxBBcaHf" outputId="a29b12ec-a1ce-4ebe-b53f-c9a15fc29c59" N = int(1e6) x = rng.uniform(size=N) fx = f(x) gx = g(x) er = np.abs( np.cumsum(fx)/np.arange(1,N+1) - si ) plt.loglog( er , label='monte carlo' ) er = np.abs( np.cumsum(fx+c*(gx-nu))/np.arange(1,N+1) - si ) plt.loglog( er , label='MC w/ control variate' ) plt.legend() plt.show() # + [markdown] id="0wT1TO60BcM1" # # Estimate the value of $\pi$ (skip this) # i.e., 2D integration of an indicator function # # We use the fact that the area of the unit circle is $\pi$, and so will look at the the ratio of the area of the unit circle to that of $[-1,1]^2$. Or equivalently, we can work in just the first quadrant. # # # Note: this part of the demo isn't as exciting as I'd hoped, so I'd suggest skipping it # + colab={"base_uri": "https://localhost:8080/"} id="IBBdHDMMBeWq" outputId="efb0437b-caf7-4508-a6a0-64eeefbf4aad" n = int(1e7) # Note: shape 2xn is faster than nx2 when n > 1e7 X = rng.uniform( size=(2,n) ) nrm = norm( X, ord=2, axis=0) Y = nrm <= 1 # Our final estimate for pi is just # 0's / total number, scaled by 4 # est = 4*np.count_nonzero(Y)/n est = 4*np.mean(Y) # another way print(f"Monte Carlo estimate of pi is {est:.6f}") # + colab={"base_uri": "https://localhost:8080/", "height": 341} id="WaJDkRemCJNq" outputId="87f084ae-3bd8-4b60-f5a0-b66b4edd9afe" # Let's also look at how this converges over time mc = np.cumsum( Y )/np.arange(1,n+1) err = np.abs( 4*mc - np.pi ) plt.loglog( err ) plt.show() var = np.mean( (4*Y - np.pi)**2 ) print(f"Variance is {var:.2e}") # + [markdown] id="gHpNJ445FDDZ" # #### Let's add a control variate # # Let's add in a polyhedral approximation, consisting of the lines connecting the points $(0,1)$, $(1/\sqrt{2},1/\sqrt{2})$ and $(1,0)$. # # Decomposing this into triangles, and we can figure out that it's area (in the first quadrant) is $1/\sqrt{2}$. # # This control variate isn't that good, so we won't see great results unfortunately. # + colab={"base_uri": "https://localhost:8080/"} id="Fp7XbI5iFBrD" outputId="1774b693-dfde-44bb-82d2-75d0ac2cf25c" slope = 1 - np.sqrt(2) intrcpt = 1 Za = X[0,:] <= slope*X[1,:] + intrcpt Zb = X[1,:] <= slope*X[0,:] + intrcpt # it's symmetric Z = Za & Zb sample_mean = np.mean(Z) true_mean = 1/np.sqrt(2) # to use a control variate, you need to know this print(f"Sample mean is {sample_mean}, true mean is {true_mean}") # ... just checking. # + colab={"base_uri": "https://localhost:8080/"} id="CW8eqbxiF5rw" outputId="cc9691fd-5b7b-4340-9287-ceb22688b1b9" # Estimate parameter "c", c = -Cov(Y,Z)/Var(Y) # The parameter c is high (close to 1), indicating good correlation Cov = np.cov( np.vstack( (Y,Z) ) ) print("Scaled covariance matrix:") print(Cov/norm(Cov.flatten(),ord=np.Inf)) c = -Cov[0,1]/Cov[1,1] print(c) # + colab={"base_uri": "https://localhost:8080/"} id="D_IYcPFAHP8q" outputId="be49e288-8fb6-4159-99ba-4f911bc741ca" CV = Y + c*( Z - true_mean) est = 4*np.mean(CV) print(f"Monte Carlo w. control variate estimate of pi is {est:.6f}") # + colab={"base_uri": "https://localhost:8080/", "height": 358} id="_1RD9nOUH6we" outputId="5f879712-6dff-4cb5-cd90-09e8f1dc9248" mc = np.cumsum( CV )/np.arange(1,n+1) err = np.abs( 4*mc - np.pi ) plt.loglog( err, label='w/ control variate' ) mc = np.cumsum( Y )/np.arange(1,n+1) err = np.abs( 4*mc - np.pi ) plt.loglog( err, label='basic MC' ) plt.legend() plt.show() var = np.mean( (4*Y - np.pi)**2 ) print(f"Variance is {var:.2e}") var = np.mean( (4*CV - np.pi)**2 ) print(f"Variance (using control variate) is {var:.2e}") # + id="1g6vJqIYj54v"
Demos/demo14_MonteCarlo_and_improvements.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] id="fVxf5ue7RxW5" # # The eigenfaces example: chaining PCA and SVMs # ============================================= # # The goal of this example is to show how an unsupervised method and a # supervised one can be chained for better prediction. # # Here we'll take a look at a simple facial recognition example. Ideally, # we would use a dataset consisting of a subset of the `Labeled Faces in # the Wild <http://vis-www.cs.umass.edu/lfw/>`__ data that is available # with :func:`sklearn.datasets.fetch_lfw_people`. The labelled face in the wild face dataset. # # However, this is a relatively large download (~200MB) so we will do the tutorial on a simpler, less rich dataset. # # # + colab={"base_uri": "https://localhost:8080/"} id="Ji8l6GbiRxW_" outputId="3a6224ea-921e-447e-c3ed-78d04be825a6" from sklearn import datasets faces = datasets.fetch_olivetti_faces() faces.data.shape # + [markdown] id="La__4Oa0RxXo" # Let's visualize these faces to see what we're working with # + colab={"base_uri": "https://localhost:8080/", "height": 342} id="gyACcsH2RxXv" outputId="6bb760c3-8ac3-41bd-9fb3-ee8b1c3f79f3" from matplotlib import pyplot as plt import random fig = plt.figure(figsize=(8, 6)) # plot several images for i in range(15): r = random.randint(0, 400) ax = fig.add_subplot(3, 5, i + 1, xticks=[], yticks=[]) ax.imshow(faces.images[r], cmap=plt.cm.bone) # + [markdown] id="Jn3nVfylRxYN" # Note is that these faces have already been localized and scaled to a common size. # # This is an important preprocessing piece for facial recognition, and is a process that can require a large collection of training data. # # This can be done in scikit-learn, but the challenge is gathering a sufficient amount of training data for the algorithm to work. # # We'll perform a Support Vector classification of the images. We'll do a typical train-test split on the images: # # # + colab={"base_uri": "https://localhost:8080/"} id="hDgt10c1RxYn" outputId="3c1cfecf-6282-4a8c-ea25-daf6de24aaee" from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(faces.data, faces.target, random_state=137) print(X_train.shape, X_test.shape) # + [markdown] id="U_zhGfTaRxY-" # Preprocessing: Principal Component Analysis # ------------------------------------------- # # We can use PCA to reduce these features to a manageable size, while maintaining most of the information # in the dataset. # # # + colab={"base_uri": "https://localhost:8080/"} id="zGxiRg_qRxZA" outputId="9fb9c845-c077-4308-fadf-674ffee17d8b" from sklearn import decomposition pca = decomposition.PCA(n_components=150, whiten=True) pca.fit(X_train) # + [markdown] id="Yhy0wHC5RxZT" # One interesting part of PCA is that it computes the "mean" face, which # can be interesting to examine: # # # + colab={"base_uri": "https://localhost:8080/", "height": 285} id="bwbJKHYXRxZY" outputId="a06f7975-5a76-4063-f12e-805150f96d76" plt.imshow(pca.mean_.reshape(faces.images[0].shape), cmap=plt.cm.bone) # + [markdown] id="oIvW5QygRxZv" # The principal components measure deviations about this mean along # orthogonal axes. # # # + colab={"base_uri": "https://localhost:8080/"} id="LgOkmKjcRxZy" outputId="e618139b-f4c6-44a2-ad27-8a20a9321dec" print(pca.components_.shape) # + [markdown] id="mn8IC0lORxZ-" # It is also interesting to visualize these principal components: # # # + colab={"base_uri": "https://localhost:8080/", "height": 340} id="9ePFr1ZARxaC" outputId="ed7ff1c7-d3aa-4a26-c455-daac3630a244" fig = plt.figure(figsize=(16, 6)) for i in range(30): ax = fig.add_subplot(3, 10, i + 1, xticks=[], yticks=[]) ax.imshow(pca.components_[i].reshape(faces.images[0].shape), cmap=plt.cm.bone) # + [markdown] id="SBkPczznRxaT" # The components ("eigenfaces") are ordered by their importance from # top-left to bottom-right. We see that the first few components seem to # primarily take care of lighting conditions; the remaining components # pull out certain identifying features: the nose, eyes, eyebrows, etc. # # With this projection computed, we can now project our original training # and test data onto the PCA basis: # # # + colab={"base_uri": "https://localhost:8080/"} id="0a8FG3HFRxaW" outputId="c66beba5-4f21-46a4-8a98-c39ad7187d11" X_train_pca = pca.transform(X_train) X_test_pca = pca.transform(X_test) print(X_train_pca.shape) # + colab={"base_uri": "https://localhost:8080/"} id="bwJ9tns3Rxar" outputId="b5ada946-999c-4cae-f8f0-acdc70b7510f" print(X_test_pca.shape) # + [markdown] id="rit_f47IRxbC" # These projected components correspond to factors in a linear combination # of component images such that the combination approaches the original # face. # # Doing the Learning: Support Vector Machines # ------------------------------------------- # # Now we'll perform support-vector-machine classification on this reduced # dataset: # # # + colab={"base_uri": "https://localhost:8080/"} id="jtFRD3fORxbH" outputId="89fc8ead-4f52-44d9-d760-eb5ca8bfa627" from sklearn import svm clf = svm.SVC(C=5., gamma=0.001) clf.fit(X_train_pca, y_train) # + [markdown] id="7kvWiXT-Rxbf" # Finally, we can evaluate how well this classification did. First, we # might plot a few of the test-cases with the labels learned from the # training set: # # # + colab={"base_uri": "https://localhost:8080/", "height": 355} id="Y7aXBtRKRxbp" outputId="8e11d205-b514-4453-d76b-c5a82902adbc" import numpy as np fig = plt.figure(figsize=(8, 6)) for i in range(15): ax = fig.add_subplot(3, 5, i + 1, xticks=[], yticks=[]) ax.imshow(X_test[i].reshape(faces.images[0].shape), cmap=plt.cm.bone) y_pred = clf.predict(X_test_pca[i, np.newaxis])[0] color = ('black' if y_pred == y_test[i] else 'red') ax.set_title(y_pred, fontsize='small', color=color) # + [markdown] id="yFzw1hvKRxcB" # The classifier is correct on an impressive number of images given the # simplicity of its learning model! Using a linear classifier on 150 # features derived from the pixel-level data, the algorithm correctly # identifies a large number of the people in the images. # # Again, we can quantify this effectiveness using one of several measures # from :mod:`sklearn.metrics`. First we can do the classification # report, which shows the precision, recall and other measures of the # "goodness" of the classification: # # # + colab={"base_uri": "https://localhost:8080/"} id="pIRPKH2jRxcE" outputId="ad9e9ce6-fe40-4cd2-ce22-e1c9a673136c" from sklearn import metrics y_pred = clf.predict(X_test_pca) print(metrics.classification_report(y_test, y_pred)) # + [markdown] id="jztgVH4fRxci" # Another interesting metric is the *confusion matrix*, which indicates # how often any two items are mixed-up. The confusion matrix of a perfect # classifier would only have nonzero entries on the diagonal, with zeros # on the off-diagonal: # # # + colab={"base_uri": "https://localhost:8080/"} id="jPcMhOBoRxcl" outputId="198063a0-74aa-434a-f672-dea7476c76c1" print(metrics.confusion_matrix(y_test, y_pred)) # + [markdown] id="f8aB9bwGRxc_" # Pipelining # ---------- # # Above we used PCA as a pre-processing step before applying our support # vector machine classifier. Plugging the output of one estimator directly # into the input of a second estimator is a commonly used pattern; for # this reason scikit-learn provides a ``Pipeline`` object which automates # this process. The above problem can be re-expressed as a pipeline as # follows: # # # + colab={"base_uri": "https://localhost:8080/"} id="Wei0jY67RxdC" outputId="cc62bd4e-882e-4231-d011-f7815ecd5223" from sklearn.pipeline import Pipeline clf = Pipeline([('pca', decomposition.PCA(n_components=150, whiten=True)), ('svm', svm.LinearSVC(C=1.0))]) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) print(metrics.confusion_matrix(y_pred, y_test)) plt.show()
Lab 11/PCA_SVM_EigenFaces.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.2 64-bit # metadata: # interpreter: # hash: c4d2db86565210e44e1312e025fd2a01c5965d45dad733b18a9ee28031514e1f # name: python3 # --- from pyspark.sql import SparkSession spark=SParkSession.builder.appName("Logistics Regression").getOrCreate()
.ipynb_checkpoints/Logistic Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import ee # This script will create a tif inside a data folder on my Gdrive ee.Initialize() landsat=ee.Image('LANDSAT/LC8_L1T_TOA/LC81230322014135LGN00').select(['B4', 'B3', 'B2']) geometry = ee.Geometry.Rectangle([116.2621, 39.8412, 116.4849, 40.01236]); config = { 'image':landsat, 'region':geometry['coordinates'], 'folder':'data', 'maxPixels':10**10, 'fileNamePrefix:':'testLansat', } myTask=ee.batch.Export.image.toDrive(**config) myTask.start() myTask.status() tasks = ee.batch.Task.list() tasks # Known issues: it is creating the folder data even if it already exist
ResourceWatch/GEE_task.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Otros tipos de datos, funciones, objetos # ## Diccionarios # # Un diccionario o mapa es una estructura de datos que guarda información en pares `clave:valor`. Las operaciones típicas son agregar un valor (con su clave) y extraer el valor asociado a una clave. En Python los diccionarios se declaran entre llaves `{}`, con pares `clave: valor` separados por comas: from datetime import date diccionario = {"nombre": "Javier", "edad": 27, "Fecha": date.today()} print(type(diccionario)) print(diccionario) # También se pueden declarar con dict() diccionario = dict([("nombre", "Javier"), ("edad", 27), ("Fecha", date.today())]) print(type(diccionario)) print(diccionario) # Como ven en el ejemplo, los valores de un diccionario pueden ser de cualquier tipo. Por otro lado, las claves tienen que ser *inmutables*; strings y números funcionan como claves, listas no. ejemplo = {[1,2,3]: 1} # Ni las claves ni los valores tienen porqué ser todos del mismo tipo ejemplo_2 = {0: "Hola", 1: date.today(), "nombre": "Javier"} print(ejemplo_2) # Para acceder el valor de una clave, lo hacemos como lo hacíamos con listas (acá el índice es la clave): print(ejemplo_2[1]) print(ejemplo_2["nombre"]) # También se puede usar el método `get()`: print(ejemplo_2.get(1)) print(ejemplo_2.get("nombre")) # Si la clave no existe devuelve una excepción ejemplo_2["clave"] # De la misma manera agregamos un nuevo par: ejemplo_2["nueva_clave"] = "valor" ejemplo_2 # Para borrar un par pueden usar el método `pop()` o `del`. `pop()` además de eliminar el par te devuelve el valor asociado a la clave: valor = ejemplo_2.pop("nueva_clave") print(valor) ejemplo_2 del(ejemplo_2["nombre"]) ejemplo_2 # El método `items()` devuelve un iterable con tuplas `(clave, valor)`, `keys()` devuelve las claves, `values()` los valores. ejemplo_2.items() ejemplo_2.keys() ejemplo_2.values() for clave, valor in ejemplo_2.items(): print(clave, valor) for clave in ejemplo_2.keys(): print(clave) # Un comentario sobre iterables: nunca es buena idea, adentro de un loop, modificar el iterable sobre el que se hace el loop. Si creen que necesitan hacer algo así, háganlo con otra variable que sea una copia del iterable original. copia = ejemplo_2.copy() print(copia) ejemplo_2["nueva_clave"] = "valor" print(ejemplo_2) print(copia) a = [1,2,3,4] b = a.copy() a.append(5) print(a) print(b) # Para chequear que una clave existe se usa `in`, la cantidad de entradas del diccionario se obtiene con `len`. print(0 in ejemplo_2) print("inexistente" in ejemplo_2) print(len(ejemplo_2)) # ## Funciones # # Una función es esencialmente un bloque de código que recipe un input y devuelve un resultado. # En general, cuando empiecen a hacer ejercicios van a tener ciertos bloques de código que cumplen un rol específico y se ejecutan muchas veces. En esos casos es común poner ese bloque en una función, para que el código esté más ordenado y sea más facil de leer. # Ejemplo: supongan que en el código que están escribiendo están trabajando mucho con strings y necesitan saber muy seguido la última palabra de los strings. def ultima_palabra(texto): palabras = texto.split() return palabras[-1] ultima_palabra("Esto es una oración") ultima_palabra("Esto también") # Definir esta función no es estrictamente necesario, ustedes podrían, cada vez que necesitan la última palabra, escribir el código # ``` # texto.split()[-1] # ``` # Sin embargo esto es menos claro que la linea `ultima_palabra(texto)`. Esa es un poco la utilidad de las funciones, separar el código en partes más claras y fáciles de leer. # Para definir una función se usa `def`, seguido del nombre de la función y entre paréntesis sus argumentos (inputs). Después de los dos puntos, todo lo que le siga en un bloque indentado es el cuerpo de la función (el código que se ejecuta al llamarla). Si queremos que la función devuelva un valor, usamos `return valor`; esto termina la ejecución de la función. def al_cuadrado(numero): return numero ** 2 al_cuadrado(2) al_cuadrado(4) def potencia(numero, n): return numero ** n print(potencia(2, 2)) print(potencia(2, 3)) print(potencia(3, 3)) # En Python las funciones pueden devolver más de un valor: def primero_y_ultimo(lista): return lista[0], lista[-1] # El resultado de la función es una tupla resultado = primero_y_ultimo([1, 2, 3, 4, 5]) print(resultado) print(type(resultado)) primero, ultimo = resultado print(primero) print(ultimo) # Pueden hacer que un argumento tenga un valor *default*, que se usa si al llamar la función no se especifica su valor. # Si no le pasás n, la función eleva al cuadrado def potencia(numero, n=2): return numero ** n print(potencia(10)) print(potencia(10, 3)) # ## Objetos # Sacado en buena parte de https://realpython.com/python3-object-oriented-programming/ # Python provee todas las funcionalidades básicas de la programación orientada a objetos ([OOP](https://es.wikipedia.org/wiki/Programaci%C3%B3n_orientada_a_objetos), por sus siglas en inglés). La idea básica de este paradigma es que uno tiene _objetos_ asociados a _clases_ (por ejemplo, las clases `str` (string), `list`, `int`, etc); de este modo, la variable `a = 1` es un objeto de la clase `int`, `b = [1, 2, 3]` un objeto de la clase `list`. A veces también se dice que `a` es una _instancia_ de la clase `int`. # Los tipos básicos de python son clases predefinidas. print(type('IEEE')) print(type([])) print(type(1)) print(type(diccionario)) # Las clases pueden pensarse como un *template* de un tipo de objeto, donde uno define los _atributos_ y _métodos_ de los objetos de ese tipo. Los atributos pueden pensarse como _propiedades_ de los objetos de esa clase, los métodos como _comportamientos_. Por ejemplo, un objeto de tipo *email* podría tener como atributos sus destinatarios, el título y el cuerpo del mail y como métodos agregar archivos adjuntos y enviar. lista = [1, 2, 3] # Si agarran la lista de arriba y en una celda de código escriben `lista.` y apretan `tab` (si están en un Collab, es control + espacio, o command + espacio si usan mac, a veces también es automático si le dan unos segundos), van a ver que les aparece un desplegable con funciones para autocompletar. Estas funciones son los métodos de clase `list`, algunos de los cuales ya vimos. Pueden probar hacer lo mismo con un diccionario, o en general con cualquier objeto de una clase; de hecho, el desplegable les va a mostrar no sólo los métodos, sino tambien los atributos cuando los haya. # Hasta ahora sólo hablamos de clases que Python ya define por su cuenta, pero en OOP la idea es que uno puede definir sus propias clases, para después poder crear objetos de estas. En el siguiente ejemplo, definimos la clase `Perro`. class Perro: especie = "Dogo" def __init__(self, nombre, edad): self.nombre = nombre self.edad = edad # Para definir la clase, usamos `class` seguido del nombre de la clase (por convención los nombres de clases suelen empezar en mayúscula); despues del `:`, declaramos todo lo que nos importa (atributos, métodos, etc). # # En este ejemplo, lo que hicimos fue definir la clase `Perro`, que tiene un atributo `especie`, cuyo valor es "Dogo". La linea siguiente define la función `__init__` de la clase, que en muchos otros lenguajes orientados a objetos es lo que se llama el _constructor_ de la clase <sup>*</sup>. Esta es una función especial de las clases, que se llama cuando uno instancia un objeto de una clase. En este caso, lo que estamos haciendo es decir que si uno escribe `Perro(nombre, edad)`, eso crea un objeto de tipo perro, con los atributos `nombre` y edad igual a los que le pasamos. # # El parámetro `self` es quizás la parte más confusa; esencialmente, `self` refiere a la instancia de la clase `Perro` que se acaba de crear. Es decir, cuando uno llama al constructor `Perro(nombre, edad)`, Python instancia un objeto de tipo perro y se lo pasa a `__init__` como el parámetro `self`; los otros paramétros los pasamos nosotros. # # # <sup> *</sup>: Si uno se pone formal,`__init__` no es exactamente un constructor como en otros lenguajes OOP, pero acá no nos importa esa distinción. perro = Perro("Bowie", 5) print(perro) # Una vez instanciado nuestro perro, podemos acceder a sus atributos usando un punto `.` print(perro.especie) print(perro.nombre) print(perro.edad) # Estos valores se pueden cambiar: perro.especie = "Bulldog" print(perro.especie) # Si quisiéramos poder especificar la especie del perro al crearlo (en vez de que sea Dogo por default), lo ponemos adentro de la función `__init__`: class Perro: def __init__(self, nombre, edad, especie): self.nombre = nombre self.edad = edad self.especie = especie perro = Perro("Bowie", 5, "Labrador") print(perro.especie) # Bien, tenemos atributos, nos falta poder definir métodos. Por ejemplo, queremos un método que nos devuelva una descripción completa del perro. class Perro: def __init__(self, nombre, edad, especie): self.nombre = nombre self.edad = edad self.especie = especie def descripcion(self): return f"{self.nombre} tiene {self.edad} años y es de raza {self.especie}" def dice(self, sonido): return f"{self.nombre} dice {sonido}" # El primer método devuelve una descripcíon completa del perro como string. Para llamarlo sobre un perro, usamos también un punto `.`: bowie = Perro("Bowie", 5, "Labrador") bowie.descripcion() bowie.dice("Woof!") # Cuando uno define métodos, aparece de nuevo el parámetro `self`; como antes, esto refiere a la instancia de `Perro` que está llamando al método. En los ejemplos de arriba, `self` es `bowie`, y cuando hacemos `bowie.descripcion()` estamos llamando a la función `descripcion` de la clase `Perro` con `bowie` como parámetro. De hecho, si quisiéramos podríamos llamar al método de esta manera: # Esto es lo mismo que bowie.descripcion() print(Perro.descripcion(bowie)) # Esto es lo mismo que bowie.dice("Woof!") print(Perro.dice(bowie, "Woof!")) # Por supuesto, la primera notación es mucho más cómoda, así que nadie usa la otra. # En general, puede ser útil tener un método como descripción que devuelva información legible del objeto en cuestión. Sin embargo, lo que hicimos con `descripcion()` no es la mejor forma de hacerlo. Estaría bueno que si uno hace `print(perro)`, nos salga la descripción: print(bowie) # Las clases predefinidas de Python ya hacen esto; por ejemplo, si uno tiene un diccionario y hace `print(diccionario)`, el resultado no es como lo de arriba. Esto es porque estas clases tienen un método especial `__str__()`, que es el que dice cómo se imprime un objeto. Si le cambiamos el nombre a `descripcion()`, podemos hacer lo mismo con nuestra clase: class Perro: def __init__(self, nombre, edad, especie): self.nombre = nombre self.edad = edad self.especie = especie def __str__(self): return f"{self.nombre} tiene {self.edad} años y es de raza {self.especie}" def dice(self, sonido): return f"{self.nombre} dice {sonido}" bowie = Perro("Bowie", 5, "Labrador") print(bowie) # Existen muchos de estos métodos especiales de Python que permiten customizar nuestras clases, todos ellos empiezan y terminan con `__`. Por ejemplo, si quisiéramos usar el operador `>` para decir que un perro es más chico o más grande que otro (en edad), podemos definir el método `__gt__` (gt es por *greater than*): class Perro: def __init__(self, nombre, edad, especie): self.nombre = nombre self.edad = edad self.especie = especie def __str__(self): return f"{self.nombre} tiene {self.edad} años y es de raza {self.especie}" def dice(self, sonido): return f"{self.nombre} dice {sonido}" def __gt__(self, otro_perro): return self.edad > otro_perro.edad bowie = Perro("Bowie", 5, "Labrador") perro_2 = Perro("Luna", 8, "Beagle") bowie > perro_2
clase03/clase_3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set_style("white") df = pd.read_csv("results/stats.csv") df.num_kmers += 1 df.time += 1 g = sns.scatterplot(x="num_kmers", y="time", hue="alg", style="k", data=df, marker=2) g.set_yscale("log") g.set_xscale("log") plt.title("Random data, time per window, all omega") plt.xlabel("# of generated k-mers") plt.ylabel("Time, microseconds") #plt.savefig("fig/all.pdf", format="pdf") # + sns.set(rc={'figure.figsize':(11.7,8.27)}) sns.set_style("white") dfx = df[df["omega"] == 1.0] g = sns.scatterplot(x="num_kmers", y="time", hue="alg", style="k", data=dfx, marker=2) g.set_yscale("log") g.set_xscale("log") plt.title("Random data, time per window, omega = 1.0") plt.xlabel("# of generated k-mers") plt.ylabel("Time, microseconds") #plt.savefig("fig/omega1.0.pdf", format="pdf") # - dfx = df[df["omega"] == 1.25] g = sns.scatterplot(x="num_kmers", y="time", hue="alg", style="k", data=dfx, marker=2) g.set_yscale("log") g.set_xscale("log") plt.title("Random data, time per window, omega = 1.25") plt.xlabel("# of generated k-mers") plt.ylabel("Time, microseconds") #plt.savefig("fig/omega1.25.pdf", format="pdf") dfx = df[df["omega"] == 1.5] g = sns.scatterplot(x="num_kmers", y="time", hue="alg", style="k", data=dfx, marker=2) g.set_yscale("log") g.set_xscale("log") plt.title("Random data, time per window, omega = 1.5") plt.xlabel("# of generated k-mers") plt.ylabel("Time, microseconds") #plt.savefig("fig/omega1.5.pdf", format="pdf") dfx = df[df["omega"] == 1.75] g = sns.scatterplot(x="num_kmers", y="time", hue="alg", style="k", data=dfx, marker=2) g.set_yscale("log") g.set_xscale("log") plt.title("Random data, time per window, omega = 1.75") plt.xlabel("# of generated k-mers") plt.ylabel("Time, microseconds") #plt.savefig("fig/omega1.75.pdf", format="pdf") dfx = df[df["omega"] == 2.0] g = sns.scatterplot(x="num_kmers", y="time", hue="alg", style="k", data=dfx, marker=2) g.set_yscale("log") g.set_xscale("log") plt.title("Random data, time per window, omega = 2.0") plt.xlabel("# of generated k-mers") plt.ylabel("Time, microseconds") #plt.savefig("fig/omega2.0.pdf", format="pdf") # + #g = sns.FacetGrid(df, col="omega", hue="alg") #g.map(sns.scatterplot, "num_kmers", "time", alpha=.5).set(yscale = 'log', xscale = 'log') # -
alg_plots.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (moneyball) # language: python # name: moneyball # --- # # Feature Engineering: Feature Creation # ### Read in text # + import pandas as pd data = pd.read_csv("SMSSpamCollection.tsv", sep='\t') data.columns = ['label', 'body_text'] # - # ### Create feature for text message length # + # character length without white spaces data['body_len'] = data['body_text'].apply(lambda x: len(x) - x.count(" ")) data.head() # - # ### Create feature for % of text that is punctuation # + import string def count_punct(text): # return 1 if found punctuation count = sum([1 for char in text if char in string.punctuation]) return round(count/(len(text) - text.count(" ")), 3)*100 data['punct%'] = data['body_text'].apply(lambda x: count_punct(x)) data.head() # - # ### Evaluate created features from matplotlib import pyplot import numpy as np # %matplotlib inline
nlp/Ex_Files_NLP_Python_ML_EssT/Exercise Files/Ch04/04_02/End/04_02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: tfq # language: python # name: tfq # --- # ## 11.3 양자인공지능 알고리즘 구현 # ### 11.3.2 양자인공지능 학습하기 # #### 양자인공지능 패키지 설치하기 # !conda install python=3.7 # !pip install tensorflow==2.4.1 # !pip install tensorflow-quantum # #### 관련 패키지 부르기 # + import tensorflow_quantum as tfq import cirq from cirq.contrib.svg import SVGCircuit import tensorflow as tf from tensorflow.keras.layers import Dense from tensorflow.keras import Sequential, Input, Model import sympy import numpy as np # %matplotlib inline import matplotlib.pyplot as plt # - # #### 입력 데이터와 기대 출력 레이블 만들기 X_Classic = np.array([[0], [1]], dtype=np.float32) # + Qubit = cirq.GridQubit(0, 0) Initial_rotation_phase = np.random.uniform(0, 2 * np.pi) X_Quantum_circuit = cirq.Circuit( cirq.ry(Initial_rotation_phase)(Qubit) ) X_Quantum = tfq.convert_to_tensor( [X_Quantum_circuit]*len(X_Classic)) Y = np.array([[1], [-1]], dtype=np.float32) # - # #### 전통인공지능 부분 만들기: 파라미터 제어 뉴럴넷 # + def make_classical_NN(x): model = Sequential() model.add(Dense(2, activation='relu')) model.add(Dense(1)) return model(x) Classical_NN_In = Input(shape=(1,), dtype=tf.dtypes.float32, name='commands_input') Classical_NN = make_classical_NN(Classical_NN_In) # - # #### 파라미터를 포함하는 양자회로 만들기 W = sympy.symbols('W') Quantum_circuit = cirq.Circuit(cirq.ry(W)(Qubit)) SVGCircuit(Quantum_circuit) # ### 하이브리드 인공지능 만들기: 양자 및 전통 인공지능 복합형 Quantum_In = Input(shape=(),dtype=tf.string, name='circuits_input') Quantum_layer = tfq.layers.ControlledPQC(Quantum_circuit, operators = cirq.Z(Qubit)) expectation = Quantum_layer([Quantum_In, Classical_NN]) model = Model(inputs=[Quantum_In, Classical_NN_In], outputs=expectation) tf.keras.utils.plot_model(model, show_shapes=True, dpi=70) optimizer = tf.keras.optimizers.Adam(learning_rate=0.05) loss = tf.keras.losses.MeanSquaredError() model.compile(optimizer=optimizer, loss=loss) history = model.fit(x=[X_Quantum, X_Classic], y=Y, epochs=50, verbose=0) plt.plot(history.history['loss']) plt.title("Training for Quantum-Classic AI") plt.xlabel("Epoch") plt.ylabel("Loss") plt.grid() plt.show() # - 모델의 예측 결과 확인 Y_pred = model([X_Quantum, X_Classic]).numpy() Err = Y - Y_pred print('Predicted_Y =', Y_pred.flatten()) print('Y - Predicted_Y =', Err.flatten()) # --- # #### 전체 코드 # + import tensorflow_quantum as tfq import cirq from cirq.contrib.svg import SVGCircuit import tensorflow as tf from tensorflow.keras.layers import Dense from tensorflow.keras import Sequential, Input, Model import sympy import numpy as np # %matplotlib inline import matplotlib.pyplot as plt ## 입력 데이터와 기대 출력 레이블 만들기 X_Classic = np.array([[0], [1]], dtype=np.float32) Qubit = cirq.GridQubit(0, 0) Initial_rotation_phase = np.random.uniform(0, 2 * np.pi) X_Quantum_circuit = cirq.Circuit( cirq.ry(Initial_rotation_phase)(Qubit) ) X_Quantum = tfq.convert_to_tensor( [X_Quantum_circuit]*len(X_Classic)) Y = np.array([[1], [-1]], dtype=np.float32) ## 전통인공지능 부분 만들기: 파라미터 제어 뉴럴넷 def make_classical_NN(x): model = Sequential() model.add(Dense(2, activation='relu')) model.add(Dense(1)) return model(x) Classical_NN_In = Input(shape=(1,), dtype=tf.dtypes.float32, name='commands_input') Classical_NN = make_classical_NN(Classical_NN_In) ## 파라미터를 포함하는 양자회로 만들기 W = sympy.symbols('W') Quantum_circuit = cirq.Circuit(cirq.ry(W)(Qubit)) SVGCircuit(Quantum_circuit) ## 하이브리드 인공지능 부분 만들기: 양자 및 전통 인공지능 복합형 Quantum_In = Input(shape=(),dtype=tf.string, name='circuits_input') Quantum_layer = tfq.layers.ControlledPQC(Quantum_circuit, operators = cirq.Z(Qubit)) expectation = Quantum_layer([Quantum_In, Classical_NN]) model = Model(inputs=[Quantum_In, Classical_NN_In], outputs=expectation) ## 모델 학습 및 결과 확인하기 optimizer = tf.keras.optimizers.Adam(learning_rate=0.05) loss = tf.keras.losses.MeanSquaredError() model.compile(optimizer=optimizer, loss=loss) history = model.fit(x=[X_Quantum, X_Classic], y=Y, epochs=50, verbose=0) plt.plot(history.history['loss']) plt.title("Training for Quantum-Classic AI") plt.xlabel("Epoch") plt.ylabel("Loss") plt.grid() plt.show() Y_pred = model([X_Quantum, X_Classic]).numpy() Err = Y - Y_pred print('Predicted_Y =', Y_pred.flatten()) print('Y - Predicted_Y =', Err.flatten()) # -
nb_ex11_3_qai_hybrid.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Self-Driving Car Engineer Nanodegree # # # ## Project 2: **Advanced Lane Finding** # *** # The goals / steps of this project are the following: # # * Compute the camera calibration matrix and distortion coefficients given a set of chessboard images. # * Apply a distortion correction to raw images. # * Use color transforms, gradients, etc., to create a thresholded binary image. # * Apply a perspective transform to rectify binary image ("birds-eye view"). # * Detect lane pixels and fit to find the lane boundary. # * Determine the curvature of the lane and vehicle position with respect to center. # * Warp the detected lane boundaries back onto the original image. # * Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position. # # ## Import Packages #importing some useful packages import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np import cv2 import os # %matplotlib inline # ## Read in a Sample Image # + #reading in an image image = mpimg.imread('test_images/straight_lines1.jpg') #printing out some stats and plotting print('This image is:', type(image), 'with dimensions:', image.shape) plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray') # - # ## Helper Functions from Project 1 # + import math def grayscale(img): """ Applies the Grayscale transform This will return an image with only one color channel but NOTE: to see the returned image as grayscale (assuming your grayscaled image is called 'gray') you should call plt.imshow(gray, cmap='gray') """ return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # Or use BGR2GRAY if you read an image with cv2.imread() # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) def region_of_interest(img, vertices): """ Applies an image mask. Only keeps the region of the image defined by the polygon formed from `vertices`. The rest of the image is set to black. `vertices` should be a numpy array of integer points. """ #defining a blank mask to start with mask = np.zeros_like(img) #defining a 3 channel or 1 channel color to fill the mask with depending on the input image if len(img.shape) > 2: channel_count = img.shape[2] # i.e. 3 or 4 depending on your image ignore_mask_color = (255,) * channel_count else: ignore_mask_color = 255 #filling pixels inside the polygon defined by "vertices" with the fill color cv2.fillPoly(mask, vertices, ignore_mask_color) #returning the image only where mask pixels are nonzero masked_image = cv2.bitwise_and(img, mask) masked_color = np.dstack((masked_image, masked_image, masked_image)) * 255 return masked_image, masked_color def weighted_img(img, initial_img, α=0.8, β=1., γ=0.): """ `img` is the output of the hough_lines(), An image with lines drawn on it. Should be a blank image (all black) with lines drawn on it. `initial_img` should be the image before any processing. The result image is computed as follows: initial_img * α + img * β + γ NOTE: initial_img and img must be the same shape! """ return cv2.addWeighted(initial_img, α, img, β, γ) # - # ## Camera Calibration # ### Extract Chessboard Corners # + import glob # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) nrows = 6 ncols = 9 objp = np.zeros((nrows*ncols,3), np.float32) objp[:,:2] = np.mgrid[0:ncols, 0:nrows].T.reshape(-1,2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d points in real world space imgpoints = [] # 2d points in image plane. # Make a list of calibration images images = glob.glob('camera_cal/calibration*.jpg') # Step through the list and search for chessboard corners for idx, fname in enumerate(images): img = cv2.imread(fname) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Find the chessboard corners ret, corners = cv2.findChessboardCorners(gray, (ncols,nrows), None) # If found, add object points, image points if ret == True: objpoints.append(objp) imgpoints.append(corners) # Draw and display the corners cv2.drawChessboardCorners(img, (ncols,nrows), corners, ret) #write_name = 'corners_found'+str(idx)+'.jpg' #cv2.imwrite(write_name, img) # #Uncomment the two lines below to view the result #cv2.imshow('img', img) #cv2.waitKey(500) cv2.destroyAllWindows() # - # ### Compute Camera Calibration Matrix # + import pickle # %matplotlib inline # Test undistortion on an image img = cv2.imread('camera_cal/calibration1.jpg') img_size = (img.shape[1], img.shape[0]) # Do camera calibration given object points and image points ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None) dst = cv2.undistort(img, mtx, dist, None, mtx) cv2.imwrite('output_images/undistorted_chessboard.jpg',dst) # Save the camera calibration result for later use dist_pickle = {} dist_pickle["mtx"] = mtx dist_pickle["dist"] = dist pickle.dump( dist_pickle, open( "camera_cal/wide_dist_pickle.p", "wb" ) ) #dst = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB) # Visualize undistortion f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(img) ax1.set_title('Original Image', fontsize=30) ax2.imshow(dst) ax2.set_title('Undistorted Image', fontsize=30) # - # ### Derivation of polynomial transformation from image coordinates to real coordinates # # While most of the content in the helper functions is based on code and theory described in the lessons, # it might be useful to describe the logic used to tranform polynomials from the image coordinates to the # real coordinates since this was not explicitly described in the course lessons. This section therefore, # provides a brief description of the approach used to perform the tranformation. # # Let the equation of the polynomial in the image coordinates be: # # \begin{equation} # x_i = A_i y_i^{2} + B_i y_i + C_i # \end{equation} # # In the above equation, $x$ is the dependent variable and $y$ is the independent # variable. Let the equation of the corresponding polynomial in the real coordinates be: # # \begin{equation} # x_r = A_r y_r^{2} + B_r y_r + C_r # \end{equation} # # Let $\lambda_x$ and $\lambda_y$ be the $x$ and $y$ distances (in meters) per pixel. The $x$ and $y$ # coordinates can therefore be transformed as: # # \begin{equation} # x_r = \lambda_{x} x_i \\ # y_r = \lambda_{y} x_i # \end{equation} # # If we substitute the above equations in to the polynomial expression in # real coordinates, we get: # # \begin{equation} # x_i \lambda_x = A_i\lambda_y^2 y_i^{2} + B_i \lambda_y y_i + C_i \\ # x_i = \frac{A_i\lambda_y^{2}}{\lambda_x} y_i^{2} + \frac{B_i \lambda_y}{\lambda_x} y_i + \frac{C_i}{\lambda_x} # \end{equation} # # Comparing the above equation with the equation for the image coordinates and equating the coefficients, we get: # # \begin{equation} # A_r = \frac{A_i\lambda_y^{2}}{\lambda_x} \\ # B_r = \frac{B_i \lambda_y}{\lambda_x} \\ # C_r = \frac{C_i}{\lambda_x} # \end{equation} # ### Helper Functions for Project 2 # + def edge_thresholds(img): # Extract R-channel R = img[:,:,0] # Extract S-channel hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS) s_channel = hls[:,:,2] # Grayscale image gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # Sobel x sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0) # Take the derivative in x abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx)) # Threshold x gradient thresh_min = 50 thresh_max = 100 sxbinary = np.zeros_like(scaled_sobel) sxbinary[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1 # Threshold R channel thresh_R = (150, 255) binary_R = np.zeros_like(R) binary_R[(R > thresh_R[0]) & (R <= thresh_R[1])] = 1 # Threshold S channel s_thresh_min = 100 s_thresh_max = 255 s_binary = np.zeros_like(s_channel) s_binary[(s_channel >= s_thresh_min) & (s_channel <= s_thresh_max)] = 1 # Combine multiple binary images combined_binary = np.zeros_like(sxbinary) combined_binary[((s_binary == 1) & binary_R == 1) | (sxbinary == 1) ] = 1 #combined_binary[((s_binary == 1) & binary_R == 1)] = 1 #combined_binary[((s_binary == 1)) | (sxbinary == 1) ] = 1 color_image = np.dstack((combined_binary, combined_binary, combined_binary)) * 255 return binary_R, s_binary, sxbinary, combined_binary, color_image def topview(image): """ Perform perspective transformation to obtain a top view of the image """ src = np.float32([[600, 444], [675, 444], [1041, 676], [268, 676]]) offsetv = 0 offseth = 300 img_size = (image.shape[1], image.shape[0]) dst = np.float32([[offseth, offsetv], [img_size[0]-offseth, offsetv], [img_size[0]-offseth, img_size[1]-offsetv], [offseth, img_size[1]-offsetv]]) M = cv2.getPerspectiveTransform(src, dst) Minv = cv2.getPerspectiveTransform(dst, src) warped = cv2.warpPerspective(image, M, image.shape[1::-1], flags=cv2.INTER_LINEAR) return warped, M, Minv def fit_poly(img_shape, leftx, lefty, rightx, righty): # Fit a second order polynomial to each with np.polyfit() ### left_fit = np.polyfit(lefty, leftx, 2) right_fit = np.polyfit(righty, rightx, 2) # Generate x and y values for plotting ploty = np.linspace(0, img_shape[0]-1, img_shape[0]) #Calc both polynomials using ploty, left_fit and right_fit ### left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2] right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2] middle_fitx = 0.5*(left_fitx + right_fitx) middle_fit = np.polyfit(middle_fitx, ploty, 2) return left_fitx, right_fitx, middle_fitx, ploty, left_fit, right_fit, middle_fit def find_lane_pixels(binary_warped): # Take a histogram of the bottom half of the image histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0) # Create an output image to draw on and visualize the result stacked_windows_image = np.dstack((binary_warped, binary_warped, binary_warped)) # Find the peak of the left and right halves of the histogram # These will be the starting point for the left and right lines midpoint = np.int(histogram.shape[0]//2) leftx_base = np.argmax(histogram[:midpoint]) rightx_base = np.argmax(histogram[midpoint:]) + midpoint # Choose the number of sliding windows nwindows = 9 # Set the width of the windows +/- margin margin = 100 # Set minimum number of pixels found to recenter window minpix = 50 # Set height of windows - based on nwindows above and image shape window_height = np.int(binary_warped.shape[0]//nwindows) # Identify the x and y positions of all nonzero pixels in the image nonzero = binary_warped.nonzero() nonzeroy = np.array(nonzero[0]) nonzerox = np.array(nonzero[1]) # Current positions to be updated later for each window in nwindows leftx_current = leftx_base rightx_current = rightx_base # Create empty lists to receive left and right lane pixel indices left_lane_inds = [] right_lane_inds = [] # Step through the windows one by one for window in range(nwindows): # Identify window boundaries in x and y (and right and left) win_y_low = binary_warped.shape[0] - (window+1)*window_height win_y_high = binary_warped.shape[0] - window*window_height win_xleft_low = leftx_current - margin win_xleft_high = leftx_current + margin win_xright_low = rightx_current - margin win_xright_high = rightx_current + margin # Draw the windows on the visualization image cv2.rectangle(stacked_windows_image,(win_xleft_low,win_y_low), (win_xleft_high,win_y_high),(0,255,0), 2) cv2.rectangle(stacked_windows_image,(win_xright_low,win_y_low), (win_xright_high,win_y_high),(0,255,0), 2) good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0] good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0] # Append these indices to the lists left_lane_inds.append(good_left_inds) right_lane_inds.append(good_right_inds) #If number of pixes in windows exceeds minpix pixels, recenter next window if len(good_left_inds) > minpix: leftx_current = np.int(np.mean(nonzerox[good_left_inds])) if len(good_right_inds) > minpix: rightx_current = np.int(np.mean(nonzerox[good_right_inds])) # Concatenate the arrays of indices (previously was a list of lists of pixels) try: left_lane_inds = np.concatenate(left_lane_inds) right_lane_inds = np.concatenate(right_lane_inds) except ValueError: # Avoids an error if the above is not implemented fully pass stacked_windows_image[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0] stacked_windows_image[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255] # Extract left and right line pixel positions leftx = nonzerox[left_lane_inds] lefty = nonzeroy[left_lane_inds] rightx = nonzerox[right_lane_inds] righty = nonzeroy[right_lane_inds] return leftx, lefty, rightx, righty, left_lane_inds, right_lane_inds, stacked_windows_image def search_around_poly(binary_warped): # Grab activated pixels nonzero = binary_warped.nonzero() nonzeroy = np.array(nonzero[0]) nonzerox = np.array(nonzero[1]) # Extract the lane points leftx, lefty, rightx, righty, left_lane_inds, right_lane_inds, stacked_windows_image = find_lane_pixels(binary_warped) # Fit new polynomials left_fitx, right_fitx, middle_fitx, ploty, left_fit, right_fit, middle_fit = fit_poly(binary_warped.shape, leftx, lefty, rightx, righty) # Create an image to draw on and an image to show the selection window binary_warped_color = np.dstack((binary_warped, binary_warped, binary_warped))*255 window_img = np.zeros_like(binary_warped_color) # Identify the region between the left and right lanes left_region_limits = np.array([np.transpose(np.vstack([left_fitx, ploty]))]) right_region_limits = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))]) region_pts = np.hstack((left_region_limits, right_region_limits)) # Draw the polynomial fit for middle lane line mid_lower_limits = np.array([np.transpose(np.vstack([middle_fitx-5, ploty]))]) mid_upper_limits = np.array([np.flipud(np.transpose(np.vstack([middle_fitx+5, ploty])))]) mid_region_pts = np.hstack((mid_lower_limits, mid_upper_limits)) # Draw the polynomial fit for left lane line left_lower_limits = np.array([np.transpose(np.vstack([left_fitx-5, ploty]))]) left_upper_limits = np.array([np.flipud(np.transpose(np.vstack([left_fitx+5, ploty])))]) left_region_pts = np.hstack((left_lower_limits, left_upper_limits)) # Draw the polynomial fit for right lane line right_lower_limits = np.array([np.transpose(np.vstack([right_fitx-5, ploty]))]) right_upper_limits = np.array([np.flipud(np.transpose(np.vstack([right_fitx+5, ploty])))]) right_region_pts = np.hstack((right_lower_limits, right_upper_limits)) # Write left, middle and right polynomial fits to image using different colors cv2.fillPoly(stacked_windows_image, np.int_([mid_region_pts]), (255,255, 0)) #yellow cv2.fillPoly(stacked_windows_image, np.int_([left_region_pts]), (255, 105, 180)) #pink cv2.fillPoly(stacked_windows_image, np.int_([right_region_pts]), (128, 0, 128)) #purple # Draw the lane onto the warped blank image cv2.fillPoly(window_img, np.int_([region_pts]), (0,255, 0)) result = cv2.addWeighted(binary_warped_color, 1, window_img, 0.3, 0) # Draw the lane onto the warped blank image result[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0] result[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255] return result, stacked_windows_image, left_fit, right_fit, middle_fit, \ left_fitx, right_fitx, middle_fitx, ploty def convert_to_real(poly_fit, xm_per_pix, ym_per_pix, poly_degree): """ Convert from image coordinates to real coordinates using scaling factors """ poly_fit_real = [0.0] * (poly_degree+1) for d in range(poly_degree+1): poly_fit_real[d] = poly_fit[d] * xm_per_pix / (ym_per_pix ** (poly_degree - d)) return poly_fit_real def measure_curvature(poly_fit, y_eval): ''' Calculates the curvature of polynomial given the polynomial coefficients ''' poly_curverad = ((1.0 + (2.0*poly_fit[0]*y_eval + poly_fit[1])**2)**1.5)/(2.0*poly_fit[0]) return poly_curverad def process_single_image(image, write_to_file = False): alpha = 1 beta = 1 gamma = 0 # Specify corners of the quadrilateral masked region of interest imshape = image.shape mask_vertices = np.array([[(0.05*imshape[1],imshape[0]), (0.47*imshape[1], 0.6*imshape[0]), (0.53*imshape[1], 0.6*imshape[0]), (0.95*imshape[1], imshape[0])]], dtype=np.int32) # Pixel to real world conversion factors ym_per_pix = 30/720 # meters per pixel in y dimension xm_per_pix = 3.7/700 # meters per pixel in x dimension # Write the original image to disk for reference if write_to_file: cv2.imwrite('output_images/original_image.png', cv2.cvtColor(image, cv2.COLOR_RGB2BGR)) # Undistort the original image by applying camera calibrtion matrix undst = cv2.undistort(image, mtx, dist, None, mtx) if write_to_file: cv2.imwrite('output_images/undistorted.png', cv2.cvtColor(undst, cv2.COLOR_RGB2BGR)) # Use a combination of thresholds and color gradients to obtain an image # that retains the lane edges while eliminating irrelevant edges as much as possible binary_R, s_binary, sxbinary, edges_binary, edges_color = edge_thresholds(undst) if write_to_file: cv2.imwrite('output_images/edges.png', edges_color) R_color = np.dstack((binary_R, binary_R, binary_R))*255 s_color = np.dstack((s_binary, s_binary, s_binary))*255 sx_color = np.dstack((sxbinary, sxbinary, sxbinary))*255 cv2.imwrite('output_images/R.jpg', R_color) cv2.imwrite('output_images/s.jpg', s_color) cv2.imwrite('output_images/sx.jpg', sx_color) # Remove all edges outside the region of interest defined by the vertices masked_image, masked_color = region_of_interest(edges_binary, mask_vertices) if write_to_file: cv2.imwrite('output_images/masked_image.png', masked_color) # Transform the masked image to obtain a top view of the image through perspective transform topview_image, M, Minv = topview(masked_image) topview_color = np.dstack((topview_image, topview_image, topview_image)) * 255 if write_to_file: cv2.imwrite('output_images/topview.png', topview_color) # Search the top view image to identify the lane region and fit polynomials to # the left, right and middle of the lane topview_region, stacked_windows_image, left_fit, right_fit, middle_fit, \ left_fitx, right_fitx, middle_fitx, ploty = search_around_poly(topview_image) if write_to_file: cv2.imwrite('output_images/topview_region.png', topview_region) cv2.imwrite('output_images/windowed_image.png', cv2.cvtColor(stacked_windows_image, cv2.COLOR_RGB2BGR)) # Convert polynomial coefficients from image to real coordinate system left_fit_real = convert_to_real(left_fit, xm_per_pix, ym_per_pix, 2) right_fit_real = convert_to_real(right_fit, xm_per_pix, ym_per_pix, 2) middle_fit_real = convert_to_real(middle_fit, xm_per_pix, ym_per_pix, 2) # Compute curvature for left, right and middle lines left_curverad = measure_curvature(left_fit_real, (image.shape[0])*ym_per_pix) right_curverad = measure_curvature(right_fit_real, (image.shape[0])*ym_per_pix) middle_curverad = measure_curvature(middle_fit_real, (image.shape[0])*ym_per_pix) if False: print('Left lane curvature (m) = ', left_curverad) print('Right lane curvature (m) = ', right_curverad) print('Middle lane curvature (m) = ', middle_curverad) # Transform the warped image back into the original image warped_back = cv2.warpPerspective(topview_region, Minv, topview_region.shape[1::-1], flags=cv2.INTER_LINEAR) if write_to_file: cv2.imwrite('output_images/lane_region.png', cv2.cvtColor(warped_back, cv2.COLOR_RGB2BGR)) # Compute mid point of the lane mid_lane = np.array([[middle_fitx[-1], ploty[-1]]], dtype = "float32") mid_lane = np.array([mid_lane]) midpoint_lane = cv2.perspectiveTransform(mid_lane, Minv) # Compute midpoint of the image (assume this is the center of the car) mid_image = np.array([[image.shape[1]//2, ploty[-1]]], dtype = "float32") mid_image = np.array([mid_image]) midpoint_image = cv2.perspectiveTransform(mid_image, Minv) offset = (midpoint_lane[0][0][0] - midpoint_image[0][0][0])*xm_per_pix # Combined the lane region markings to the original image result = weighted_img(warped_back, image, alpha, 1, gamma) if write_to_file: cv2.imwrite('output_images/weighted_image.png', cv2.cvtColor(result, cv2.COLOR_RGB2BGR)) # Write radius of curvature and offset onto the image font = cv2.FONT_HERSHEY_SIMPLEX radius_loc = (int(0.1*image.shape[0]),int(0.05*image.shape[1])) offset_loc = (int(0.1*image.shape[0]),int(0.1*image.shape[1])) fontScale = 1.5 fontColor = (255,255,255) lineType = 2 radius_text = 'Radius of curvature = ' + str(middle_curverad) + '(m)' if offset > 0: offset_text = 'Vehicle is {0:0.2f}m right of center'.format(offset) else: offset_text = 'Vehicle is {0:0.2f}m left of center'.format(-offset) cv2.putText(result,radius_text, radius_loc, font, fontScale, fontColor, lineType) cv2.putText(result, offset_text, offset_loc, font, fontScale, fontColor, lineType) if write_to_file: cv2.imwrite('output_images/annotated_image.png', cv2.cvtColor(result, cv2.COLOR_RGB2BGR)) return result # - # ### Test Sample Image #for imgpath in os.listdir("test_images/"): for imgpath in ["straight_lines1.jpg"]: fullpath = os.path.join("test_images", imgpath) basename, ext = os.path.splitext(imgpath) imgpath_output = basename + '_output' + ext fullpath_output = os.path.join("output_images", imgpath_output) image = mpimg.imread(fullpath) result = process_single_image(image, write_to_file = True) cv2.imwrite(fullpath_output, cv2.cvtColor(result, cv2.COLOR_RGB2BGR)) plt.imshow(result) # ### Create Video # Import everything needed to edit/save/watch video clips from moviepy.editor import VideoFileClip from IPython.display import HTML # Process video by proecessing the individual images that constitute the video video_input = 'project_video.mp4' video_output = 'output_images/project_video_output.mp4' clip1 = VideoFileClip(video_input).subclip(0, None) project_clip = clip1.fl_image(process_single_image) #NOTE: this function expects color images!! # %time project_clip.write_videofile(video_output, audio=False) # Embed video HTML(""" <video width="960" height="540" controls> <source src="{0}"> </video> """.format(video_output))
P2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Classifying Surnames with a Convolutional Neural Network # ## Imports # + from argparse import Namespace from collections import Counter import json import os import string import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.utils.data import Dataset, DataLoader from tqdm.notebook import tqdm as tqdm_notebook # - # ### Vectorize Data class Vocabulary(object): """Class to process text and extract vocabulary for mapping""" def __init__(self, token_to_idx=None, add_unk=True, unk_token="<UNK>"): """ Args: token_to_idx (dict): a pre-existing map of tokens to indices add_unk (bool): a flag that indicates whether to add the UNK token unk_token (str): the UNK token to add into the Vocabulary """ if token_to_idx is None: token_to_idx = {} self._token_to_idx = token_to_idx self._idx_to_token = {idx: token for token, idx in self._token_to_idx.items()} self._add_unk = add_unk self._unk_token = unk_token self.unk_index = -1 if add_unk: self.unk_index = self.add_token(unk_token) def to_serializable(self): """ returns a dictionary that can be serialized """ return {'token_to_idx': self._token_to_idx, 'add_unk': self._add_unk, 'unk_token': self._unk_token} @classmethod def from_serializable(cls, contents): """ instantiates the Vocabulary from a serialized dictionary """ return cls(**contents) def add_token(self, token): """Update mapping dicts based on the token. Args: token (str): the item to add into the Vocabulary Returns: index (int): the integer corresponding to the token """ try: index = self._token_to_idx[token] except KeyError: index = len(self._token_to_idx) self._token_to_idx[token] = index self._idx_to_token[index] = token return index def add_many(self, tokens): """Add a list of tokens into the Vocabulary Args: tokens (list): a list of string tokens Returns: indices (list): a list of indices corresponding to the tokens """ return [self.add_token(token) for token in tokens] def lookup_token(self, token): """Retrieve the index associated with the token or the UNK index if token isn't present. Args: token (str): the token to look up Returns: index (int): the index corresponding to the token Notes: `unk_index` needs to be >=0 (having been added into the Vocabulary) for the UNK functionality """ if self.unk_index >= 0: return self._token_to_idx.get(token, self.unk_index) else: return self._token_to_idx[token] def lookup_index(self, index): """Return the token associated with the index Args: index (int): the index to look up Returns: token (str): the token corresponding to the index Raises: KeyError: if the index is not in the Vocabulary """ if index not in self._idx_to_token: raise KeyError("the index (%d) is not in the Vocabulary" % index) return self._idx_to_token[index] def __str__(self): return "<Vocabulary(size=%d)>" % len(self) def __len__(self): return len(self._token_to_idx) class SurnameVectorizer(object): """ The Vectorizer which coordinates the Vocabularies and puts them to use""" def __init__(self, surname_vocab, nationality_vocab, max_surname_length): """ Args: surname_vocab (Vocabulary): maps characters to integers nationality_vocab (Vocabulary): maps nationalities to integers max_surname_length (int): the length of the longest surname """ self.surname_vocab = surname_vocab self.nationality_vocab = nationality_vocab self._max_surname_length = max_surname_length def vectorize(self, surname): """ Args: surname (str): the surname Returns: one_hot_matrix (np.ndarray): a matrix of one-hot vectors """ one_hot_matrix_size = (len(self.surname_vocab), self._max_surname_length) one_hot_matrix = np.zeros(one_hot_matrix_size, dtype=np.float32) for position_index, character in enumerate(surname): character_index = self.surname_vocab.lookup_token(character) one_hot_matrix[character_index][position_index] = 1 return one_hot_matrix @classmethod def from_dataframe(cls, surname_df): """Instantiate the vectorizer from the dataset dataframe Args: surname_df (pandas.DataFrame): the surnames dataset Returns: an instance of the SurnameVectorizer """ surname_vocab = Vocabulary(unk_token="@") nationality_vocab = Vocabulary(add_unk=False) max_surname_length = 0 for index, row in surname_df.iterrows(): max_surname_length = max(max_surname_length, len(row.surname)) for letter in row.surname: surname_vocab.add_token(letter) nationality_vocab.add_token(row.nationality) return cls(surname_vocab, nationality_vocab, max_surname_length) @classmethod def from_serializable(cls, contents): surname_vocab = Vocabulary.from_serializable(contents['surname_vocab']) nationality_vocab = Vocabulary.from_serializable(contents['nationality_vocab']) return cls(surname_vocab=surname_vocab, nationality_vocab=nationality_vocab, max_surname_length=contents['max_surname_length']) def to_serializable(self): return {'surname_vocab': self.surname_vocab.to_serializable(), 'nationality_vocab': self.nationality_vocab.to_serializable(), 'max_surname_length': self._max_surname_length} # + class SurnameDataset(Dataset): def __init__(self, surname_df, vectorizer): """ Args: name_df (pandas.DataFrame): the dataset vectorizer (SurnameVectorizer): vectorizer instatiated from dataset """ self.surname_df = surname_df self._vectorizer = vectorizer self.train_df = self.surname_df[self.surname_df.split=='train'] self.train_size = len(self.train_df) self.val_df = self.surname_df[self.surname_df.split=='val'] self.validation_size = len(self.val_df) self.test_df = self.surname_df[self.surname_df.split=='test'] self.test_size = len(self.test_df) self._lookup_dict = {'train': (self.train_df, self.train_size), 'val': (self.val_df, self.validation_size), 'test': (self.test_df, self.test_size)} self.set_split('train') # Class weights class_counts = surname_df.nationality.value_counts().to_dict() def sort_key(item): return self._vectorizer.nationality_vocab.lookup_token(item[0]) sorted_counts = sorted(class_counts.items(), key=sort_key) frequencies = [count for _, count in sorted_counts] self.class_weights = 1.0 / torch.tensor(frequencies, dtype=torch.float32) @classmethod def load_dataset_and_make_vectorizer(cls, surname_csv): """Load dataset and make a new vectorizer from scratch Args: surname_csv (str): location of the dataset Returns: an instance of SurnameDataset """ surname_df = pd.read_csv(surname_csv) train_surname_df = surname_df[surname_df.split=='train'] return cls(surname_df, SurnameVectorizer.from_dataframe(train_surname_df)) @classmethod def load_dataset_and_load_vectorizer(cls, surname_csv, vectorizer_filepath): """Load dataset and the corresponding vectorizer. Used in the case in the vectorizer has been cached for re-use Args: surname_csv (str): location of the dataset vectorizer_filepath (str): location of the saved vectorizer Returns: an instance of SurnameDataset """ surname_df = pd.read_csv(surname_csv) vectorizer = cls.load_vectorizer_only(vectorizer_filepath) return cls(surname_df, vectorizer) @staticmethod def load_vectorizer_only(vectorizer_filepath): """a static method for loading the vectorizer from file Args: vectorizer_filepath (str): the location of the serialized vectorizer Returns: an instance of SurnameDataset """ with open(vectorizer_filepath) as fp: return SurnameVectorizer.from_serializable(json.load(fp)) def save_vectorizer(self, vectorizer_filepath): """saves the vectorizer to disk using json Args: vectorizer_filepath (str): the location to save the vectorizer """ with open(vectorizer_filepath, "w") as fp: json.dump(self._vectorizer.to_serializable(), fp) def get_vectorizer(self): """ returns the vectorizer """ return self._vectorizer def set_split(self, split="train"): """ selects the splits in the dataset using a column in the dataframe """ self._target_split = split self._target_df, self._target_size = self._lookup_dict[split] def __len__(self): return self._target_size def __getitem__(self, index): """the primary entry point method for PyTorch datasets Args: index (int): the index to the data point Returns: a dictionary holding the data point's features (x_data) and label (y_target) """ row = self._target_df.iloc[index] surname_matrix = \ self._vectorizer.vectorize(row.surname) nationality_index = \ self._vectorizer.nationality_vocab.lookup_token(row.nationality) return {'x_surname': surname_matrix, 'y_nationality': nationality_index} def get_num_batches(self, batch_size): """Given a batch size, return the number of batches in the dataset Args: batch_size (int) Returns: number of batches in the dataset """ return len(self) // batch_size def generate_batches(dataset, batch_size, shuffle=True, drop_last=True, device="cpu"): """ A generator function which wraps the PyTorch DataLoader. It will ensure each tensor is on the write device location. """ dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last) for data_dict in dataloader: out_data_dict = {} for name, tensor in data_dict.items(): out_data_dict[name] = data_dict[name].to(device) yield out_data_dict # - # ### Classifier class SurnameClassifier(nn.Module): def __init__(self, initial_num_channels, num_classes, num_channels): """ Args: initial_num_channels (int): size of the incoming feature vector num_classes (int): size of the output prediction vector num_channels (int): constant channel size to use throughout network """ super(SurnameClassifier, self).__init__() self.convnet = nn.Sequential( nn.Conv1d(in_channels=initial_num_channels, out_channels=num_channels, kernel_size=3), nn.ELU(), nn.Conv1d(in_channels=num_channels, out_channels=num_channels, kernel_size=3, stride=2), nn.ELU(), nn.Conv1d(in_channels=num_channels, out_channels=num_channels, kernel_size=3, stride=2), nn.ELU(), nn.Conv1d(in_channels=num_channels, out_channels=num_channels, kernel_size=3), nn.ELU() ) self.fc = nn.Linear(num_channels, num_classes) def forward(self, x_surname, apply_softmax=False): """The forward pass of the classifier Args: x_surname (torch.Tensor): an input data tensor. x_surname.shape should be (batch, initial_num_channels, max_surname_length) apply_softmax (bool): a flag for the softmax activation should be false if used with the Cross Entropy losses Returns: the resulting tensor. tensor.shape should be (batch, num_classes) """ features = self.convnet(x_surname).squeeze(dim=2) prediction_vector = self.fc(features) if apply_softmax: prediction_vector = F.softmax(prediction_vector, dim=1) return prediction_vector # ### Training def make_train_state(args): return {'stop_early': False, 'early_stopping_step': 0, 'early_stopping_best_val': 1e8, 'learning_rate': args.learning_rate, 'epoch_index': 0, 'train_loss': [], 'train_acc': [], 'val_loss': [], 'val_acc': [], 'test_loss': -1, 'test_acc': -1, 'model_filename': args.model_state_file} def update_train_state(args, model, train_state): """Handle the training state updates. Components: - Early Stopping: Prevent overfitting. - Model Checkpoint: Model is saved if the model is better :param args: main arguments :param model: model to train :param train_state: a dictionary representing the training state values :returns: a new train_state """ # Save one model at least if train_state['epoch_index'] == 0: torch.save(model.state_dict(), train_state['model_filename']) train_state['stop_early'] = False # Save model if performance improved elif train_state['epoch_index'] >= 1: loss_tm1, loss_t = train_state['val_loss'][-2:] # If loss worsened if loss_t >= train_state['early_stopping_best_val']: # Update step train_state['early_stopping_step'] += 1 # Loss decreased else: # Save the best model if loss_t < train_state['early_stopping_best_val']: torch.save(model.state_dict(), train_state['model_filename']) # Reset early stopping step train_state['early_stopping_step'] = 0 # Stop early ? train_state['stop_early'] = \ train_state['early_stopping_step'] >= args.early_stopping_criteria return train_state def compute_accuracy(y_pred, y_target): y_pred_indices = y_pred.max(dim=1)[1] n_correct = torch.eq(y_pred_indices, y_target).sum().item() return n_correct / len(y_pred_indices) * 100 # + args = Namespace( # Data and Path information surname_csv="data/surnames/surnames_with_splits.csv", vectorizer_file="vectorizer.json", model_state_file="model.pth", save_dir="model_storage", # Model hyper parameters hidden_dim=100, num_channels=256, # Training hyper parameters seed=1337, learning_rate=0.001, batch_size=128, num_epochs=100, early_stopping_criteria=5, dropout_p=0.1, # Runtime options cuda=False, reload_from_files=False, expand_filepaths_to_save_dir=True, catch_keyboard_interrupt=True ) if args.expand_filepaths_to_save_dir: args.vectorizer_file = os.path.join(args.save_dir, args.vectorizer_file) args.model_state_file = os.path.join(args.save_dir, args.model_state_file) print("Expanded filepaths: ") print("\t{}".format(args.vectorizer_file)) print("\t{}".format(args.model_state_file)) # Check CUDA if not torch.cuda.is_available(): args.cuda = False args.device = torch.device("cuda" if args.cuda else "cpu") print("Using CUDA: {}".format(args.cuda)) def set_seed_everywhere(seed, cuda): np.random.seed(seed) torch.manual_seed(seed) if cuda: torch.cuda.manual_seed_all(seed) def handle_dirs(dirpath): if not os.path.exists(dirpath): os.makedirs(dirpath) # Set seed for reproducibility set_seed_everywhere(args.seed, args.cuda) # handle dirs handle_dirs(args.save_dir) # + if args.reload_from_files: # training from a checkpoint dataset = SurnameDataset.load_dataset_and_load_vectorizer(args.surname_csv, args.vectorizer_file) else: # create dataset and vectorizer dataset = SurnameDataset.load_dataset_and_make_vectorizer(args.surname_csv) dataset.save_vectorizer(args.vectorizer_file) vectorizer = dataset.get_vectorizer() classifier = SurnameClassifier(initial_num_channels=len(vectorizer.surname_vocab), num_classes=len(vectorizer.nationality_vocab), num_channels=args.num_channels) classifer = classifier.to(args.device) dataset.class_weights = dataset.class_weights.to(args.device) loss_func = nn.CrossEntropyLoss(weight=dataset.class_weights) optimizer = optim.Adam(classifier.parameters(), lr=args.learning_rate) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, mode='min', factor=0.5, patience=1) train_state = make_train_state(args) # - dataset.set_split("train") vars(dataset).keys() print(classifier) # + epoch_bar = tqdm_notebook(desc='training routine', total=args.num_epochs, position=0) dataset.set_split('train') train_bar = tqdm_notebook(desc='split=train', total=dataset.get_num_batches(args.batch_size), position=1, leave=True) dataset.set_split('val') val_bar = tqdm_notebook(desc='split=val', total=dataset.get_num_batches(args.batch_size), position=1, leave=True) try: for epoch_index in range(args.num_epochs): train_state['epoch_index'] = epoch_index # Iterate over training dataset # setup: batch generator, set loss and acc to 0, set train mode on dataset.set_split('train') batch_generator = generate_batches(dataset, batch_size=args.batch_size, device=args.device) running_loss = 0.0 running_acc = 0.0 classifier.train() for batch_index, batch_dict in enumerate(batch_generator): # the training routine is these 5 steps: # -------------------------------------- # step 1. zero the gradients optimizer.zero_grad() # step 2. compute the output y_pred = classifier(batch_dict['x_surname']) # step 3. compute the loss loss = loss_func(y_pred, batch_dict['y_nationality']) loss_t = loss.item() running_loss += (loss_t - running_loss) / (batch_index + 1) # step 4. use loss to produce gradients loss.backward() # step 5. use optimizer to take gradient step optimizer.step() # ----------------------------------------- # compute the accuracy acc_t = compute_accuracy(y_pred, batch_dict['y_nationality']) running_acc += (acc_t - running_acc) / (batch_index + 1) # update bar train_bar.set_postfix(loss=running_loss, acc=running_acc, epoch=epoch_index) train_bar.update() train_state['train_loss'].append(running_loss) train_state['train_acc'].append(running_acc) # Iterate over val dataset # setup: batch generator, set loss and acc to 0; set eval mode on dataset.set_split('val') batch_generator = generate_batches(dataset, batch_size=args.batch_size, device=args.device) running_loss = 0. running_acc = 0. classifier.eval() for batch_index, batch_dict in enumerate(batch_generator): # compute the output y_pred = classifier(batch_dict['x_surname']) # step 3. compute the loss loss = loss_func(y_pred, batch_dict['y_nationality']) loss_t = loss.item() running_loss += (loss_t - running_loss) / (batch_index + 1) # compute the accuracy acc_t = compute_accuracy(y_pred, batch_dict['y_nationality']) running_acc += (acc_t - running_acc) / (batch_index + 1) val_bar.set_postfix(loss=running_loss, acc=running_acc, epoch=epoch_index) val_bar.update() train_state['val_loss'].append(running_loss) train_state['val_acc'].append(running_acc) train_state = update_train_state(args=args, model=classifier, train_state=train_state) scheduler.step(train_state['val_loss'][-1]) if train_state['stop_early']: break train_bar.n = 0 val_bar.n = 0 epoch_bar.update() except KeyboardInterrupt: print("Exiting loop") # + classifier.load_state_dict(torch.load(train_state['model_filename'])) classifier = classifier.to(args.device) dataset.class_weights = dataset.class_weights.to(args.device) loss_func = nn.CrossEntropyLoss(dataset.class_weights) dataset.set_split('test') batch_generator = generate_batches(dataset, batch_size=args.batch_size, device=args.device) running_loss = 0. running_acc = 0. classifier.eval() for batch_index, batch_dict in enumerate(batch_generator): # compute the output y_pred = classifier(batch_dict['x_surname']) # compute the loss loss = loss_func(y_pred, batch_dict['y_nationality']) loss_t = loss.item() running_loss += (loss_t - running_loss) / (batch_index + 1) # compute the accuracy acc_t = compute_accuracy(y_pred, batch_dict['y_nationality']) running_acc += (acc_t - running_acc) / (batch_index + 1) train_state['test_loss'] = running_loss train_state['test_acc'] = running_acc # - print("Test loss: {};".format(train_state['test_loss'])) print("Test Accuracy: {}".format(train_state['test_acc'])) # ### Inference def predict_nationality(surname, classifier, vectorizer): """Predict the nationality from a new surname Args: surname (str): the surname to classifier classifier (SurnameClassifer): an instance of the classifier vectorizer (SurnameVectorizer): the corresponding vectorizer Returns: a dictionary with the most likely nationality and its probability """ vectorized_surname = vectorizer.vectorize(surname) vectorized_surname = torch.tensor(vectorized_surname).unsqueeze(0) result = classifier(vectorized_surname, apply_softmax=True) probability_values, indices = result.max(dim=1) index = indices.item() predicted_nationality = vectorizer.nationality_vocab.lookup_index(index) probability_value = probability_values.item() return {'nationality': predicted_nationality, 'probability': probability_value} new_surname = input("Enter a surname to classify: ") classifier = classifier.cpu() prediction = predict_nationality(new_surname, classifier, vectorizer) print("{} -> {} (p={:0.2f})".format(new_surname, prediction['nationality'], prediction['probability'])) # + def predict_topk_nationality(surname, classifier, vectorizer, k=5): """Predict the top K nationalities from a new surname Args: surname (str): the surname to classifier classifier (SurnameClassifer): an instance of the classifier vectorizer (SurnameVectorizer): the corresponding vectorizer k (int): the number of top nationalities to return Returns: list of dictionaries, each dictionary is a nationality and a probability """ vectorized_surname = vectorizer.vectorize(surname) vectorized_surname = torch.tensor(vectorized_surname).unsqueeze(dim=0) prediction_vector = classifier(vectorized_surname, apply_softmax=True) probability_values, indices = torch.topk(prediction_vector, k=k) # returned size is 1,k probability_values = probability_values[0].detach().numpy() indices = indices[0].detach().numpy() results = [] for kth_index in range(k): nationality = vectorizer.nationality_vocab.lookup_index(indices[kth_index]) probability_value = probability_values[kth_index] results.append({'nationality': nationality, 'probability': probability_value}) return results new_surname = input("Enter a surname to classify: ") k = int(input("How many of the top predictions to see? ")) if k > len(vectorizer.nationality_vocab): print("Sorry! That's more than the # of nationalities we have.. defaulting you to max size :)") k = len(vectorizer.nationality_vocab) predictions = predict_topk_nationality(new_surname, classifier, vectorizer, k=k) print("Top {} predictions:".format(k)) print("===================") for prediction in predictions: print("{} -> {} (p={:0.2f})".format(new_surname, prediction['nationality'], prediction['probability']))
chapter_4/Surname-CNN/4_4_Classifying_Surnames_with_a_CNN.ipynb
# ##### Copyright 2020 Google LLC. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # simple_cp_program # <table align="left"> # <td> # <a href="https://colab.research.google.com/github/google/or-tools/blob/master/examples/notebook/constraint_solver/simple_cp_program.ipynb"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/colab_32px.png"/>Run in Google Colab</a> # </td> # <td> # <a href="https://github.com/google/or-tools/blob/master/ortools/constraint_solver/samples/simple_cp_program.py"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/github_32px.png"/>View source on GitHub</a> # </td> # </table> # First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab. # !pip install ortools # + # Copyright 2010-2018 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # [START program] """Simple Constraint optimization example.""" # [START import] from ortools.constraint_solver import pywrapcp # [END import] """Entry point of the program.""" # Instantiate the solver. # [START solver] solver = pywrapcp.Solver('CPSimple') # [END solver] # Create the variables. # [START variables] num_vals = 3 x = solver.IntVar(0, num_vals - 1, 'x') y = solver.IntVar(0, num_vals - 1, 'y') z = solver.IntVar(0, num_vals - 1, 'z') # [END variables] # Constraint 0: x != y. # [START constraints] solver.Add(x != y) print('Number of constraints: ', solver.Constraints()) # [END constraints] # Solve the problem. # [START solve] decision_builder = solver.Phase([x, y, z], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE) # [END solve] # Print solution on console. # [START print_solution] count = 0 solver.NewSearch(decision_builder) while solver.NextSolution(): count += 1 solution = 'Solution {}:\n'.format(count) for var in [x, y, z]: solution += ' {} = {}'.format(var.Name(), var.Value()) print(solution) solver.EndSearch() print('Number of solutions found: ', count) # [END print_solution] # [START advanced] print('Advanced usage:') print('Problem solved in ', solver.WallTime(), 'ms') print('Memory usage: ', pywrapcp.Solver.MemoryUsage(), 'bytes') # [END advanced]
examples/notebook/constraint_solver/simple_cp_program.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from tkinter import * from tkinter import messagebox import pandas as pd from tkinter import ttk from ttkwidgets.autocomplete import AutocompleteCombobox from surprise import SVD from surprise import Dataset,Reader from surprise.model_selection import cross_validate from surprise import accuracy from surprise.model_selection import train_test_split movies=pd.read_csv('ml-100k/movies.csv') ratings=pd.read_csv('ml-100k/ratings.csv') def recommend(): s.quit() movie=[i.get() for i in movlist] rat=[i.get() for i in tkvar] mid=[name_to_mid(i) for i in movie] # print(ids) # print(rats) rating=add_user_ratings(mid, rat,ratings) svd=calc_rating(rating) movs,r=rec_movies(movies, svd) s.destroy() win = Tk() win.title("Movies") win.geometry('400x300') win.configure(bg='tan2') print('test') Label(win,bg='tan2',fg='white',padx=5,pady=4, text="Recommended Movies:").grid(row=0, column=2) Label(win,bg='tan2',fg='white',padx=5,pady=4, text="Predicted rating:").grid(row=0, column=3) for i,j in enumerate(movs): Label(win,bg='tan2',fg='white',padx=5,pady=4, text=j).grid(row=i+1, column=2) Label(win,bg='tan2',fg='white',padx=5,pady=4, text=r[i]).grid(row=i+1, column=3) win.mainloop() def rec_movies(movies, svd): movies['est'] = movies['movieId'].apply(lambda x: svd.predict(672, x).est) movies = movies.sort_values('est', ascending=False) # movies.head(10) return (list(movies.head(10)['title']),list(movies.head(10)['est'])) def calc_rating(rating): reader = Reader(rating_scale=(1, 5)) data = Dataset.load_from_df( ratings[['userId', 'movieId', 'rating']], reader) trainset, testset = train_test_split(data, test_size=.25) svd = SVD() svd.fit(trainset) return svd def add_user_ratings(mid, rat,ratings): new_user = pd.DataFrame({'userId': [672]*10, 'movieId': mid, 'rating': rat }) ratings=ratings.append(new_user) return ratings def name_to_mid(movie): return int(movies.movieId[movies.title == movie]) s = Tk() s.title("Movie Recommendation System") s.geometry('600x800') s.configure(bg='tan2') movlist=[] popupMenu=[] tkvar=[] for i in range(10): Label(s, text="Movie Name: ",fg='white',bg='tan2').grid(row=i, column=1) n=StringVar() movlist.append(AutocompleteCombobox(s, width = 27,textvariable =n)) # movlist.append(ttk.Combobox(s, width = 27, textvariable = n) ) movlist[i].set_completion_list(tuple(movies.title)) # movlist[i]['values'] = tuple(movies.title) movlist[i].grid(column = 2, row = i) choices = { 1,2,3,4,5} tkvar.append(StringVar(s)) tkvar[i].set(3) # set the default option popupMenu.append(OptionMenu(s, tkvar[i], *choices,)) Label(s, text="Choose a rating: ",bg='tan2',fg='white').grid(row = i, column = 4) popupMenu[i].grid(row = i, column =5) Button(s, text="Submit",bg='orange1',borderwidth=4,fg='white', command=recommend).grid(row=12, column=3,padx=5,pady=4) s.mainloop() # -
ml-latest/pranav_rec.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Fictitious Names # ### Introduction: # # This time you will create a data again # # Special thanks to [<NAME>](http://chrisalbon.com/) for sharing the dataset and materials. # All the credits to this exercise belongs to him. # # In order to understand about it go to [here](https://blog.codinghorror.com/a-visual-explanation-of-sql-joins/). # # ### Step 1. Import the necessary libraries import pandas as pd # ### Step 2. Create the 3 DataFrames based on the following raw data # + raw_data_1 = { 'subject_id': ['1', '2', '3', '4', '5'], 'first_name': ['Alex', 'Amy', 'Allen', 'Alice', 'Ayoung'], 'last_name': ['Anderson', 'Ackerman', 'Ali', 'Aoni', 'Atiches']} raw_data_2 = { 'subject_id': ['4', '5', '6', '7', '8'], 'first_name': ['Billy', 'Brian', 'Bran', 'Bryce', 'Betty'], 'last_name': ['Bonder', 'Black', 'Balwner', 'Brice', 'Btisan']} raw_data_3 = { 'subject_id': ['1', '2', '3', '4', '5', '7', '8', '9', '10', '11'], 'test_id': [51, 15, 15, 61, 16, 14, 15, 1, 61, 16]} # - # ### Step 3. Assign each to a variable called data1, data2, data3 # + data1 = pd.DataFrame(raw_data_1, columns = ['subject_id', 'first_name', 'last_name']) data2 = pd.DataFrame(raw_data_2, columns = ['subject_id', 'first_name', 'last_name']) data3 = pd.DataFrame(raw_data_3, columns = ['subject_id','test_id']) data3 # - # ### Step 4. Join the two dataframes along rows and assign all_data all_data = pd.concat([data1, data2]) all_data # ### Step 5. Join the two dataframes along columns and assing to all_data_col all_data_col = pd.concat([data1, data2], axis = 1) all_data_col # ### Step 6. Print data3 data3 # ### Step 7. Merge all_data and data3 along the subject_id value pd.merge(all_data, data3, on='subject_id') # ### Step 8. Merge only the data that has the same 'subject_id' on both data1 and data2 pd.merge(data1, data2, on='subject_id', how='inner') # ### Step 9. Merge all values in data1 and data2, with matching records from both sides where available. pd.merge(data1, data2, on='subject_id', how='outer')
05_Merge/Fictitous Names/Exercises_with_solutions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Basic Libraries # + import matplotlib.image as mi import matplotlib.pyplot as plt import numpy as np # These are some basic libraries which have been used. # - # # Reading Image # + img=mi.imread('Image/rabbit.png') plt.imshow(img) plt.show() img.shape # This is where we are reading image. # As mentioned in report itself, that this system has been desgined for 3-channeled image so,keep in mind while reading image. # + # As we can see 3 in the last it denotes the no. of channel present in image. # - # # Size of Image # + height=img.shape[0] width=img.shape[1] # This is where we are finding the size of image. # - # # Color to grayscale img2D=np.zeros((height,width)) # + for i in range(height): for j in range(width): img2D[i][j]=(0.3*img[i][j][0] + 0.59*img[i][j][1] + 0.11*img[i][j][2]) # This is the weighted method which has been employed to convert color to grayscale. # We could have used another method known as average method but that gives somewhat black image. # - plt.imshow(img2D,cmap='gray') plt.show() img2D.shape # # Derivatives of pixel intensity:- Ix and Iy #derivative masks or kernels fx=np.array([(-1,0,1),(-1,0,1),(-1,0,1)]) fy=np.array([(1,1,1),(0,0,0),(-1,-1,-1)]) print("Deivative mask for x direction:") print() print(fx) print() print("Deivative mask for y direction:") print() print(fy) # + def derivative(img2D,height,width,kernel): Id=np.zeros((height,width)) m=0 n=0 sum_result=0 for i in range(1,height-1): for j in range(1,width-1): for k in range(i-1,i+2): for l in range(j-1,j+2): sum_result+=img2D[k][l]*kernel[m][n] n=n+1 n=0 m=m+1 m=0 Id[i][j]=sum_result/3 sum_result=0 return Id # This is where we are finding the derivative of image. # - # # Ix:Gradient of image in x direction # + Ix=derivative(img2D,height,width,fx) plt.imshow(Ix) plt.show() # The above function has been called to find the derivative of image in x direction. # - # # Iy:Gradient of image in y direction # + Iy=derivative(img2D,height,width,fy) plt.imshow(Iy) plt.show() # In the same way for y direction only the difference being use of different mask for x and y as shown above. # - # # Calculating Auto-correlation matrix # Weighing function w(x,y)=g(x,y,σ)=exp(−(x**2+y**2)/2(σ**2))/2Π(σ**2) # + def weighingfn(x,y,σ): result=np.exp(-(x**2+y**2)/(2*(σ**2)))/(2*np.pi*(σ**2))**0.5 return result # This weighing function has been used, it is guassian function here i have used sigma=1. # + def gaussian_filter(derivative_product,σ): sums=0 kernel=np.zeros((3,3)) for y in range(-1,2,1): for x in range(-1,2,1): kernel[y+1][x+1]=weighingfn(x,y,σ) sums+=kernel[y+1][x+1] for i in range(3): for j in range(3): kernel[i][j]/=sums print(kernel) # This is where we r generating the guassian kernel of size 3 X 3. Idd=np.zeros((height,width)) m=0 n=0 sum_result=0 for i in range(1,height-1): for j in range(1,width-1): for k in range(i-1,i+2): for l in range(j-1,j+2): sum_result+=derivative_product[k][l]*kernel[m][n] n=n+1 n=0 m=m+1 m=0 Idd[i][j]=sum_result/3 sum_result=0 return Idd # This whole shadowed part is responsible for convolution part. # - # Computing the sum of the products of derivatives at each pixel # + Ixx=gaussian_filter(Ix**2,1) # This is where we are calling the above fn for finding elements of auto-correlation matrix M. # - Iyy=gaussian_filter(Iy**2,1) # Same for this. Ixy=gaussian_filter(Ix*Iy,1) # Auto-correlation Matrix M=[[Ixx,Ixy],[Ixy,Iyy]] # + M=[[Ixx,Ixy],[Ixy,Iyy]] print(M) # This is auto-correlation matrix M. # - # # Harris Response Calculation # + response=[] # Find determinant and trace of auto-correlation matrix i.e M, use to get corner response. # k is sensitive factor to separate corners from edges and has value close to zero. k=0.05 #determinant detM = (Ixx*Iyy)-(Ixy**2) # trace traceM = Ixx + Iyy response=detM-k*(traceM**2) # Now here we are calculating the response value of each pixel using auto-correlation matrix M. # - # # Corner and Edge # + image_for_corner=np.copy(img) image_for_edge=np.copy(img) for rowindex,response_value in enumerate(response): for colindex,r in enumerate(response_value): if r>0: # It is corner image_for_corner[rowindex,colindex]=[0,1,0] elif r<0: # It is an edge image_for_edge[rowindex,colindex]=[1,0,0] fig,axis=plt.subplots(nrows=1,ncols=2,figsize=(10,10)) axis[0].set_title("Corners found") axis[0].imshow(image_for_corner) axis[1].set_title("Edge found") axis[1].imshow(image_for_edge) plt.show() # After finding M and response value, we r detecting the corner points and edges. # Since the image read is densely populated that's why it is not working that much perfect now. # Below we can see the part which has been copy move forged ,we are getting approx same corner points and edge. # Lets run it with another image. # If we would have any part copy move forged,we would have got the approx same no. of corner points # and edges as we got in previous image.
Code/.ipynb_checkpoints/main-checkpoint.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.4.1 # language: julia # name: julia-1.4 # --- # # Binary logistic regression with Polya-Gamma augmentation # + using LinearAlgebra using Plots pyplot(); using Pkg Pkg.activate("../") using Revise using ExpFamilyDistributions using BayesianModels # - # ### Synthetic data # + μ1 = [1., 1.] Σ1 = [0.7 0.5; 0.5 0.7] μ2 = [-0.5, 0.5] Σ2 = [1 -0.5; -0.5 0.3] μ3 = [0.5, 4.] Σ3 = 0.5 * [2. 0.25; 0.25 0.5] function samplenormal(μ, Σ, size) L = cholesky(Σ).L μ .+ L * randn(2, size) end sizecoeff = 1 x1 = hcat(samplenormal(μ1, Σ1, 3* sizecoeff), samplenormal(μ2, Σ2, 2 * sizecoeff)) x2 = samplenormal(μ3, Σ3, 7 * sizecoeff) X = hcat(x1, x2) z = vcat(zeros(Int64, size(x1, 2)), ones(Int64, size(x2, 2))) p = plot( title="Synthetic data", size=(500, 500), xlims=(-4, 4), ylims=(-2, 6), aspect_ratio=:equal, xlabel="x1", ylabel="x2" ) scatter!( x1[1, :], x1[2, :], label="z = 0", marker = (:diamond, 5, 0.5, :red, stroke(0)) ) scatter!( x2[1, :], x2[2, :], label="z = 1", marker = (:circle, 5, 0.5, :blue, stroke(0)), ) # - function fit!(model, X, z; nsteps = 100) elbos = [elbo(model, X, z) / length(z)] for step in 1:nsteps accstats = (X, z) |> model.β.stats update!(model.β.posterior, naturalparam(model.β.prior) + accstats) push!(elbos, elbo(model, X, z) / length(z)) end elbos end model = BinaryLogisticRegression( inputdim = size(X, 1), hasbias = true, pseudocounts = 1 ) model(X, z) # + elbos = fit!(model, X, z, nsteps = 20); ptraining = plot( 0:(length(elbos) - 1), elbos, size=(500, 500), xlabel="step", ylabel="ELBO", legend=false ) # + μ₀ = model.β.prior.μ Σ₀ = model.β.prior.Σ μᵦ = model.β.posterior.μ Σᵦ = model.β.posterior.Σ ppost = plot( #xlims = (-50, 50), #ylims = (-50, 50), aspect_ratio=:equal, xlabel = "β1", ylabel = "β2", ) plotnormal2d(ppost, μ₀[1:2], Σ₀[1:2, 1:2], ncontours = 3, color = :black, label="p(β)") plotnormal2d(ppost, μᵦ[1:2], Σᵦ[1:2, 1:2], ncontours = 3, color = :green, label="q(β)") # - # ### Visualisation # + function sampleβ(μ, Σ) L = cholesky(Σ).L μ .+ L * randn(length(μ)) end function plotdecisionline!(p, w, w₀; plotting_radius=10, args...) # Normalize vector of weights norm_w = w ./ sqrt(sum(w.^2)) # Angle of the weight vector in the range [0,2 π] θ = atan(w[2], w[1]) + π # Vector perpendicular to "w" with unit length. w⊥ = [cos(θ + π/2), sin(θ + π/2)] # Translation vector of the decision boundary induced # by the bias shift = -(w₀ / sqrt(sum(w.^2))) * norm_w # Angle of the decision line in the range [0, 2π] α = atan(w⊥[2], w⊥[1]) + π # To get the decision line we rotate and shift the # abscissa R = [cos(α) -sin(α); sin(α) cos(α)] decisionline = vcat(range(-plotting_radius, plotting_radius, length=1000)', zeros(1000)') decisionline = R * decisionline .+ shift plot!(p, decisionline[1, :], decisionline[2, :]; args...) end # + μᵦ = model.β.posterior.μ Σᵦ = model.β.posterior.Σ pdec = plot( size=(500, 500), aspect_ratio=:equal, xlims=(-4, 4), ylims=(-2, 6), xlabel="x1", ylabel="x2" ) scatter!(pdec, x1[1, :], x1[2, :], label="z = 0", marker = (:diamond, 5, 0.5, :red, 0.5, stroke(0)) ) scatter!(pdec, x2[1, :], x2[2, :], label="z = 1", marker = (:circle, 5, 0.5, :blue, 0.5, stroke(0)), ) for i in 1:20 sβ = sampleβ(μᵦ, Σᵦ) w = sβ[1:2] w₀ = model.hasbias ? sβ[end] : 0. label = i > 1 ? "" : "βᵀx = 0, β ∼ q(β)" plotdecisionline!(pdec, w, w₀, line = (:green, 0.3), label=label) end w = μᵦ[1:2] w₀ = model.hasbias ? μᵦ[end] : 0. plotdecisionline!(pdec, w, w₀, line = (:green), linewidth=2, label="μᵦᵀx = 0") pdec # + model res = 100 dim1 = range(-10, 10, length=res) dim2 = range(-10, 10, length=res) testX = vec([ [i, j] for i=dim1, j=dim2]) testX = hcat(testX...) testz1 = ones(size(testX, 2)) pz1 = predict(model, testX) pz1grid = reshape(pz1, res, res) ppredict = heatmap( dim1, dim2, pz1grid', xlims = (-10, 10), ylims = (-10, 10), c = :viridis ) w = μᵦ[1:2] w₀ = μᵦ[end] plotdecisionline!(ppredict, w, w₀, plotting_radius=20, line = (:green), linewidth=2, label="μᵦᵀx = 0") scatter!(ppredict, x1[1, :], x1[2, :], label="z = 0", marker = (:diamond, 5, 0.5, :red, 0.5, stroke(0)) ) scatter!(ppredict, x2[1, :], x2[2, :], label="z = 1", marker = (:circle, 5, 0.5, :blue, 0.5, stroke(0)), ) # + model res = 100 dim1 = range(-10, 10, length=res) dim2 = range(-10, 10, length=res) testX = vec([ [i, j] for i=dim1, j=dim2]) testX = hcat(testX...) testz1 = ones(size(testX, 2)) # Here we compute the prediction using the Maximum A Posteriori parameters pz1 = predict(model, testX, marginalize = false) pz1grid = reshape(pz1, res, res) ppredict = heatmap( dim1, dim2, pz1grid', xlims = (-10, 10), ylims = (-10, 10), c = :viridis ) w = μᵦ[1:2] w₀ = μᵦ[end] plotdecisionline!(ppredict, w, w₀, plotting_radius=20, line = (:green), linewidth=2, label="μᵦᵀx = 0") scatter!(ppredict, x1[1, :], x1[2, :], label="z = 0", marker = (:diamond, 5, 0.5, :red, 0.5, stroke(0)) ) scatter!(ppredict, x2[1, :], x2[2, :], label="z = 1", marker = (:circle, 5, 0.5, :blue, 0.5, stroke(0)), ) # -
examples/BinaryLogisticRegression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Example 2: A Counter with Ripple Carry Adder. # This next example shows how you make stateful things with registers # and more complex hardware structures with functions. We generate # a **3-bit ripple carry adder** building off of the 1-bit adder from # the prior example, and then hook it to a register to count up modulo 8. import pyrtl pyrtl.reset_working_block() # A **function in PyRTL** is nothing special -- it just so happens that the statements # it encapsulate tell PyRTL to build some hardware. def one_bit_add(a, b, carry_in): assert len(a) == len(b) == 1 # len returns the bitwidth sum = a ^ b ^ carry_in carry_out = a & b | a & carry_in | b & carry_in return sum, carry_out # If we call *one_bit_add* # above with the arguments *x*, *y*, and *z* it will make a **one-bit adder to add # those values together** and return the wires for sum and carry_out as applied to *x*, # *y*, and *z*. If I call it again on *i*, *j*, and *k* it will build a **new one-bit # adder** for those inputs and return the resulting sum and carry_out for that adder. # While PyRTL actually provides an "+" operator for wirevectors which generates # adders, a **ripple carry adder** is something people can understand easily but has # enough structure to be mildly interesting. Let's **define an adder of arbitrary # length** recursively and (hopefully) pythonically. More comments after the code. def ripple_add(a, b, carry_in=0): a, b = pyrtl.match_bitwidth(a, b) # this function is a function that allows us to match the bitwidth of multiple # different wires. By default, it zero extends the shorter bits if len(a) == 1: sumbits, carry_out = one_bit_add(a, b, carry_in) else: lsbit, ripplecarry = one_bit_add(a[0], b[0], carry_in) msbits, carry_out = ripple_add(a[1:], b[1:], ripplecarry) sumbits = pyrtl.concat(msbits, lsbit) return sumbits, carry_out # #### The above code breaks down into two cases: # * If the size of the inputs is one-bit just do one_bit_add. # * if they are more than one bit, do a one-bit add on the least significant bits, a ripple carry on the rest, and then stick the results back together into one WireVector. # #### A couple interesting features of PyRTL can be seen here: # * WireVectors can be indexed like lists, with [0] accessing the least significant bit and [1:] being an example of the use of Python slicing syntax. # * While you can add two lists together in python a WireVector + Wirevector means "make an adder" so to concatenate the bits of two vectors one need to use "concat". # * If we look at "cin" it seems to have a default value of the integer "0" but is a WireVector at other times.Python supports polymorphism throughout and PyRTL will cast integers and some other types to WireVectors when it can. # Now let's **build a 3-bit counter** from our N-bit ripple carry adder. counter = pyrtl.Register(bitwidth=3, name='counter') sum, carry_out = ripple_add(counter, pyrtl.Const("1'b1")) counter.next <<= sum # #### A couple new things in the above code: # * The two remaining types of basic WireVectors, Const and Register, both appear. Const, unsurprisingly, is just for holding constants (such as the 0 in ripple_add), but here we create one directly from a Verilog-like string which includes both the value and the bitwidth. # * Registers are just like wires, except their updates are delayed to the next clock cycle. This is made explicit in the syntax through the property '.next' which should always be set for registers. # * In this simple example, we take counter next cycle equal to counter this cycle plus one. # Now let's **run the bugger**. No need for inputs, it doesn't have any, but let's # **throw in an assert** to check that it really counts up modulo 8. Finally we'll # **print the trace** to the screen. sim_trace = pyrtl.SimulationTrace() sim = pyrtl.Simulation(tracer=sim_trace) for cycle in range(15): sim.step({}) assert sim.value[counter] == cycle % 8 sim_trace.render_trace()
ipynb-examples/example2-counter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from keras import backend as K from keras.models import Model, Sequential, load_model from keras.layers import Input, Dense, LSTM, Embedding, Dropout from keras.layers.merge import add from keras.utils import to_categorical from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.image import load_img, img_to_array from keras.applications.vgg19 import VGG19, preprocess_input from keras.callbacks import ModelCheckpoint import numpy as np import h5py import string import pickle # %matplotlib inline from matplotlib.pyplot import imshow from PIL import Image # - meta_info = { 'input_shape': { 'VGG16': 4096, 'VGG19': 4096, 'ResNet50': 4096, 'InceptionV3': 2048, 'InceptionResNetV2': 1536 }, 'n_embeddeing': 512, 'n_vocabs': 7277, 'M': 36, 'text_dir': 'Flickr8k_text/' } with open('tokenizer.pkl', 'rb') as f: tokenizer = pickle.load(f) print(len(tokenizer.word_index)) def model_select(model_name): if model_name == 'VGG16': from keras.applications.vgg16 import VGG16, preprocess_input # load the model model = VGG16() # 4096 elif model_name == 'VGG19': from keras.applications.vgg19 import VGG19, preprocess_input # load the model model = VGG19() # 4096 elif model_name == 'ResNet50': from keras.applications.resnet50 import ResNet50, preprocess_input # load the model model = ResNet50() # 4096 elif model_name == 'InceptionV3': from keras.applications.inception_v3 import InceptionV3, preprocess_input # load the model model = InceptionV3() # 2048, elif model_name == 'InceptionResNetV2': from keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input # load the model model = InceptionResNetV2() # 1536, return model # Generate caption from input feature def generate_caption(model, tokenizer, img): # start sign generated = ['CLS'] # Loop for max length or end sign('[SEP]') for i in range(meta_info['M']): sequence = tokenizer.texts_to_sequences([generated])[0] sequence = pad_sequences([sequence], maxlen=meta_info['M']) # Predict next word y_pred = model.predict([img, sequence], verbose=0) y_pred = np.argmax(y_pred) word_pred = '[SEP]' for word, i in tokenizer.word_index.items(): if i == y_pred: word_pred = word # Generate sentence generated.append(word_pred) # If end sign, break if word_pred == '[SEP]': break return generated def generate_example(cnn_model, final_model, img_path, tokenizer): img = load_img(img_path, target_size=(224, 224)) img = img_to_array(img) img = img.reshape((1, img.shape[0], img.shape[1], img.shape[2])) img = preprocess_input(img) # print(img.shape) feature = cnn_model.predict(img) model = load_model(final_model) caption = generate_caption(model, tokenizer, feature) caption.pop(0) caption.pop() return caption # Display in jupyter notebook cell # ref: https://stackoverflow.com/questions/39416004/matplotlib-not-displaying-image-on-jupyter-notebook def show_img(file): img = Image.open(file, 'r') imshow(np.asarray(img)) base_model = model_select('VGG19') cnn_model = Model(inputs=base_model.inputs, outputs=base_model.layers[-2].output) # cnn_model.summary() show_img('examples/example0.jpg') # + final_model_file = 'transfer.model.ep001.acc0.3872.h5' img_file = 'examples/example0.jpg' caption = generate_example(cnn_model, final_model_file, img_file, tokenizer) caption = ' '.join(caption) show_img(img_file) print(caption) # + img_file = 'examples/example1.jpg' caption = generate_example(cnn_model, final_model_file, img_file, tokenizer) caption = ' '.join(caption) show_img(img_file) print(caption) # + img_file = 'examples/example2.jpg' caption = generate_example(cnn_model, final_model_file, img_file, tokenizer) caption = ' '.join(caption) show_img(img_file) print(caption) # + img_file = 'examples/example3.jpg' caption = generate_example(cnn_model, final_model_file, img_file, tokenizer) caption = ' '.join(caption) show_img(img_file) print(caption) # -
demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import pandas as pd from IPython.core.display import HTML css = open('style-table.css').read() + open('style-notebook.css').read() HTML('<style>{}</style>'.format(css)) titles = pd.DataFrame.from_csv('data/titles.csv', index_col=None) titles.head() cast = pd.DataFrame.from_csv('data/cast.csv', index_col=None) cast.head() # + # What are the ten most common movie names of all time? titles.title.value_counts().head(10) # + # Which three years of the 1930s saw the most films released? t = titles t = t[t.year // 10 == 193] t.year.value_counts().head(3) # + # Plot the number of films that have been released each decade # over the history of cinema. t = titles (t.year // 10 * 10).value_counts().sort_index().plot(kind='bar') # + # Plot the number of "Hamlet" films made each decade. t = titles t = t[t.title == 'Hamlet'] (t.year // 10 * 10).value_counts().sort_index().plot(kind='bar') # + # Plot the number of "Rustler" characters # in each decade of the history of film. c = cast c = c[c.character == 'Rustler'] (c.year // 10 * 10).value_counts().sort_index().plot(kind='bar') # + # Plot the number of "Hamlet" characters each decade. c = cast c = c[c.character == 'Hamlet'] (c.year // 10 * 10).value_counts().sort_index().plot(kind='bar') # + # What are the 11 most common character names in movie history? cast.character.value_counts().head(11) # + # Who are the 10 people most often credited as "Herself" in film history? c = cast c[c.character == 'Herself'].name.value_counts().head(10) # + # Who are the 10 people most often credited as "Himself" in film history? c = cast c[c.character == 'Himself'].name.value_counts().head(10) # + # Which actors or actresses appeared in the most movies in the year 1945? cast[cast.year == 1945].name.value_counts().head(10) # + # Which actors or actresses appeared in the most movies in the year 1985? cast[cast.year == 1985].name.value_counts().head(10) # + # Plot how many roles Mammootty has played in each year of his career. cast[cast.name == 'Mammootty'].year.value_counts().sort_index().plot() # + # What are the 10 most frequent roles that start with the phrase "Patron in"? c = cast c[c.character.str.startswith('Patron in ')].character.value_counts().head(10) # + # What are the 10 most frequent roles that start with the word "Science"? c = cast c[c.character.str.startswith('Science')].character.value_counts().head(10) # + # Plot the n-values of the roles that <NAME> has played over her career. c = cast c = c[c.name == '<NAME>'].sort('year') c = c[c.n.notnull()] c.plot(x='year', y='n', kind='scatter') # + # Plot the n-values of <NAME>'s roles through his career. c = cast c = c[c.name == '<NAME>'].sort('year') c = c[c.n.notnull()] c.plot(x='year', y='n', kind='scatter') # + # Plot the n-value of the roles that <NAME> has acted # over the years. c = cast c = c[c.name == '<NAME>'].sort('year') c = c[c.n.notnull()] c.plot(x='year', y='n', kind='scatter') # + # How many leading (n=1) roles were available to actors, # and how many to actresses, in the 1950s? c = cast c = c[c.year // 10 == 195] c = c[c.n == 1] c.type.value_counts() # + # How many supporting (n=2) roles were available to actors, # and how many to actresses, in the 1950s? c = cast c = c[c.year // 10 == 195] c = c[c.n == 2] c.type.value_counts() # -
.ipynb_checkpoints/Solutions-2-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Create a notebook to perform Runge-Kutta integration # %matplotlib inline import matplotlib.pyplot as plt import numpy as np # ### Define a function to integrate def dfdx(x,f): return x**2 + x # ### Define its integral def f_int(x,C): return (x**3)/3. + 0.5*x**2 + C # ### Define the 2nd order RK scheme def rk2_core(x_i,f_i,h,g): #advance f by step h #half step x_ipoh = x_i + 0.5*h f_ipoh = f_i + 0.5*h*g(x_i,f_i) #full step f_ipo = f_i + h*g(x_ipoh,f_ipoh) return f_ipo # ### Define a wrapper routine for RK2 def rk2(dfdx, a, b, f_a, N): #dfdx = derivative of x #a = lower bound #b = upper bound #f_a = boundary condition at a #N = number of steps #define our steps x = np.linspace(a,b,N) #a single step size h = x[1]-x[0] #an array tp hold f f = np.zeros(N,dtype=float) f[0] = f_a #value of f at a #evolve f along x for i in range(1,N): f[i] = rk2_core(x[i-1],f[i-1],h,dfdx) return x,f # ### Define the 4th order RK method def rk4_core(x_i,f_i,h,g): #advance f by step h #half step x_ipoh = x_i + 0.5*h #define x at 1 step x_ipo = x_i + h #advance f by a step h k_1 = h*g(x_i,f_i) k_2 = h*g(x_ipoh, f_i + 0.5*k_1) k_3 = h*g(x_ipoh, f_i + 0.5*k_2) k_4 = h*g(x_ipo, f_i + k_3) f_ipo = f_i + (k_1 + 2*k_2 + 2*k_3 + k_4)/6 return f_ipo # ### Define a wrapper for RK4 def rk4(dfdx, a, b, f_a, N): #dfdx = derivative of x #a = lower bound #b = upper bound #f_a = boundary condition at a #N = number of steps #define our steps x = np.linspace(a,b,N) #a single step size h = x[1]-x[0] #an array tp hold f f = np.zeros(N,dtype=float) f[0] = f_a #value of f at a #evolve f along x for i in range(1,N): f[i] = rk4_core(x[i-1],f[i-1],h,dfdx) return x,f # ### Perform the integration a = 0.0 b = 1.0 f_a = 0.0 N = 10 x_2, f_2 = rk2(dfdx,a,b,f_a,N) x_4, f_4 = rk4(dfdx,a,b,f_a,N) x = x_2.copy() plt.plot(x_2,f_2,label = 'RK2') plt.plot(x_4,f_4,label = 'RK4') plt.plot(x,f_int(x,f_a),'o',label='Analytic') plt.legend(frameon=False)
runge-kutta.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import re from os import walk fs=[] for (dp,dn,fn) in walk("/home/judson/Desktop/sentenceSeg/clean_sent_train/"): fs.extend(fn) print(fs) import pickle import bootstrapping as bs import os def pickle_pat_tagger(fname): #tag="" count=0 fw=0 pl=pickle.load(open("/home/judson/Desktop/sentenceSeg/SentSegPickled/"+fname,"rb")) txt=open("/home/judson/Desktop/sentenceSeg/clean_sent_train/"+fname,"r").readlines() for i in range(len(pl)): pt=bs.base_pattern_matcher(pat,pl[i]) if pt==1: continue else: if txt[i].strip().split()[-1][0]=="<": continue else: txt[i]=txt[i].strip()+" "+tag+"\n" fw=1 count=count+1 if fw==1: os.remove("/home/judson/Desktop/sentenceSeg/clean_sent_train/"+fname) with open("/home/judson/Desktop/sentenceSeg/clean_sent_train/"+fname,"a") as f: for k in txt: f.write(k) return count tag="<DECISION>" c=0 import concurrent.futures s=concurrent.futures.ProcessPoolExecutor() for m in s.map(pickle_pat_tagger,fs): c=c+m c arg=pickle.load(open("decision.pickle","rb")) from copy import deepcopy #print(arg[0]) pat=deepcopy(arg[75]) pat[3:]="*" for i,j in tagpat.items(): if pat in j: print("pattern already exist in ",i) pat tagpat=pickle.load(open("tagged_patterns.pickle","rb")) exi=0 for i,j in tagpat.items(): if pat in j: print("pattern already exist in ",i) exi=1 break if exi==0: tagpat[tag].append(pat) pickle.dump(tagpat,open("tagged_patterns.pickle","wb")) print("pattern added") # + #tagpat=pickle.load(open("tagged_patterns.pickle","rb")) # - tagpat[tag]
Rough_implementation_ipynb/Pickle_tagger.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Gutenburg NLP Analysis using RAPIDS # # ### Blog Link: # https://medium.com/rapids-ai/show-me-the-word-count-3146e1173801 # # # ### Objective: Show case nlp capabilties of cudf # # ### Pre-Processing : # * filter punctuation # * to_lower # * remove stop words (from nltk corpus) # * remove multiple spaces with one # * remove leading and trailing spaces # # ### Word Count: # * Get Frequency count for the whole dataset # * Compare word count for two authors (<NAME> vs <NAME> ) # * Get Word counts for all the authors # # ### Encode the word-count for all authors into a count-vector # # We do this in two steps: # # 1. Encode the string Series using `top 20k` most used `words` in the Dataset which we calculated earlier. # * We encode anything not in the series to string_id = `20_000` (`threshold`) # # # 2. With the encoded count series for all authors, we create an aligned word-count vector for them, where: # * Where each column corresponds to a `word_id` from the the `top 20k words` # * Each row corresponds to the `count vector` for that author # # # ### Find the nearest authors using the count-vector: # * Fit a knn # * Find the authors nearest to each other in the count vector space # * Decrease dimunitonality using UMAP # * Find the authors nearest to each other in the latent space # ### Data Download Links: # # Download the data from: https://web.eecs.umich.edu/~lahiri/gutenberg_dataset.html # # You can also run below commands # !pip install gdown # !gdown https://drive.google.com/uc?id=0B2Mzhc7popBga2RkcWZNcjlRTGM # !apt update # !apt install unzip # !unzip Gutenberg.zip # ### Import libraries import cudf import os import numpy as np import cuml try: import nltk except ModuleNotFoundError: os.system('pip install nltk') import nltk from numba import cuda from dask.utils import parse_bytes # ### Setting Rmm Pool # RAPIDS Memory Manager allows sharing a memory pool between RAPIDS libraries and CuPy. # This allows us to use a single device memory pool on the entire GPU, providing significant performance gains by reducing the cost of dynamically allocating and freeing memory. cudf.set_allocator(pool=True, initial_pool_size=parse_bytes("8GB")) # ### Set Data Dir data_dir = 'Gutenberg/txt' # ## Read Text Frame # #### Read helper functions # + def get_non_empty_lines(lines): """ returns non empty lines from a list of lines """ clean_lines = [] for line in lines: str_line = line.strip() if str_line: clean_lines.append(str_line) return clean_lines def get_txt_lines(data_dir): """ Read text lines from gutenberg texts returns (text_ls,fname_ls) where text_ls= input_text_lines and fname_ls = list of fnames """ text_ls = [] fname_ls = [] for fn in os.listdir(data_dir): full_fn = os.path.join(data_dir,fn) with open(full_fn,encoding="utf-8",errors="ignore") as f: content = f.readlines() content = get_non_empty_lines(content) text_ls += content ### dont add .txt to the file fname_ls += [fn[:-4]]*len(content) return text_ls, fname_ls # - # ### Read text lines into a cudf dataframe # + print("File Read Time:") # %time txt_ls,fname_ls = get_txt_lines(data_dir) df = cudf.DataFrame() print("\nCUDF Creation Time:") # %time df['text'] = cudf.Series(txt_ls,dtype='str') df['label'] = cudf.Series(fname_ls,dtype='str') title_label_df = df['label'].str.split('___') df['author'] = title_label_df[0] df['title'] = title_label_df[1] df = df.drop(labels=['label']) print("Number of lines in the DF = {:,}".format(len(df))) df.head(5) # - # ## NLP Preprocessing # # In almost every workflow involving textual data, we'll want to do some kind of preprocessing before running our analysis. We might want to remove punctuation, standardize to all lowercase characters, and potentially dozens of other small tasks. RAPIDS makes developing GPU accelerated preprocessing pipelines smooth. # # Let's start by removing all the punctuation, since we don't want those characters to cloud our analysis. We could replace them one by one in many calls to replace. More realistically, we might generate a large regular expression pattern that looks for `!`, `,`, `%` and all of our other patterns and replaces them. It might look something like this: `(!)|(,)...|(%)`. # # A longer regex may or may not be less efficient necessarily on the GPU. If an instruction within the regex fails to match the current character being processed for the string, the rest of the expression does not need to be evaluated and we can move on to the next character. However, regexes with many alternation as in our case, may mean evaluating the same character over many more instructions before continuing. An alternation can be explicit like in `(\bone\b)|(\b1\b)` but also can be implicit like in `[aA]`. # # # This can be tedious, and isn't well suited to the GPU. # # Overall, avoiding regex can be more efficient since the algorithm is complex due to the richness of its features. # # For cases like removing multiple `characters` or `stop words`, a `general regex` can be overkill and `cudf.str` provides some alternative methods which make this computation much faster. # # In this workflow we use the following `cudf.Series.str` functions: # * `str_ser.str.translate`: (Allows passing dict to replace multiple punctuation characters with blank spaces. # * `str_ser.str.replace_tokens`: To replace the tokens with a empty space. # # Please checkout `https://docs.rapids.ai/api/cudf/nightly/`, we are adding more features everyday. # # #### Now back to our workflow: # ##### Removing Filters: # First, we need to define our list of filter characters. # remove the following punctuations/characters from cudf filters = [ '!', '"', '#', '$', '%', '&', '(', ')', '*', '+', '-', '.', '/', '\\', ':', ';', '<', '=', '>', '?', '@', '[', ']', '^', '_', '`', '{', '|', '}', '\t','\n',"'",",",'~' , '—'] # Next, we can simply pass `filters` to the string processing methods inside cuDF and apply it to our Series. We'll eventually make a helper function to let us execute this on every column in the DataFrame. But, let's just take a quick look now on a sample of our text data. text_col_sample = df.head(5) text_col_sample['text'] translation_table = {ord(char): ord(' ') for char in filters} text_col_sample['text_clean'] = text_col_sample['text'].str.translate(translation_table) text_col_sample['text_clean'].to_pandas() # With one method we removed all of the symbols in our `filters` list. Next, we'll want to convert to lowercase with `str.lower()` # ##### To Lower text_col_sample['text_clean'] = text_col_sample['text_clean'].str.lower() text_col_sample['text_clean'].to_pandas() # We can also remove stopwords with `replace_tokens`. We can pass the default list of English stopwords that ships with the `nltk` library. We'll replace each of our stopwords with a single space. # ##### Remove Stop Words nltk.download('stopwords') STOPWORDS = nltk.corpus.stopwords.words('english') STOPWORDS = cudf.Series(STOPWORDS) text_col_sample['text_clean'] = text_col_sample['text_clean'].str.replace_tokens(STOPWORDS, ' ') text_col_sample['text_clean'].to_pandas() # ##### Replacing Multiple White Spaces # # This looks great, but we'll probably want to replace multiple spaces in a row with a single space and strip leading and trailing spaces. We can do that easily, too. text_col_sample['text_clean'] = text_col_sample['text_clean'].str.normalize_spaces( ) text_col_sample['text_clean'] = text_col_sample['text_clean'].str.strip(' ') text_col_sample['text_clean'].to_pandas() # With that, we've finished our basic preprocessing steps on a tiny sample of our text column. We'll wrap this into a function for portability, and run it on the entire data. We'll rewrite our code to create our filter list and stopwords again for clarity. # #### Full Pre-processing Pipe-Line # # ##### CPU # - ```5 min 2s``` with pure ```Pandas``` # - ``` Dask CPU Time ``` = ```15.25 s ``` (on a dual 16-core CPU (64 virtual core)) # # ##### GPU (RAPIDS) # - ``` 2.94 s``` on a ```Tesla T4 GPU ``` # + STOPWORDS = nltk.corpus.stopwords.words('english') filters = [ '!', '"', '#', '$', '%', '&', '(', ')', '*', '+', '-', '.', '/', '\\', ':', ';', '<', '=', '>', '?', '@', '[', ']', '^', '_', '`', '{', '|', '}', '\t','\n',"'",",",'~' , '—'] def preprocess_text(input_strs , filters=None , stopwords=STOPWORDS): """ * filter punctuation * to_lower * remove stop words (from nltk corpus) * remove multiple spaces with one * remove leading spaces """ # filter punctuation and case conversion translation_table = {ord(char): ord(' ') for char in filters} input_strs = input_strs.str.translate(translation_table) input_strs = input_strs.str.lower() # remove stopwords stopwords_gpu = cudf.Series(stopwords) input_strs = input_strs.str.replace_tokens(STOPWORDS, ' ') # replace multiple spaces with single one and strip leading/trailing spaces input_strs = input_strs.str.normalize_spaces( ) input_strs = input_strs.str.strip(' ') return input_strs def preprocess_text_df(df, text_cols=['text'], **kwargs): for col in text_cols: df[col] = preprocess_text(df[col], **kwargs) return df # - # With our function defined, we can execute it to preprocess the entire dataset. # %time df = preprocess_text_df(df, filters=filters) df['text'].head(5) # ## Word Count # # Lets find the top words used in: # * Whole dataset # * by <NAME> # * by <NAME> # + ## Getting a frequency count for Strings def get_word_count(str_col): """ returns the count of input strings """ ## Tokenize: convert sentences into a long list of words ## Get counts: Groupby each token to get value counts df = cudf.DataFrame() # tokenize sentences into a string using nvtext.tokenize() # it into a single tall data-frame df['string'] = str_col.str.tokenize() # Using Group by to do a value count for string columns # This will be natively supported soon # See: issue https://github.com/rapidsai/cudf/issues/1951 df['counts'] = np.dtype('int32').type(0) res = df.groupby('string').count() res = res.reset_index(drop=False).sort_values(by='counts', ascending=False) return res.rename(columns={'index':'string'}) # - # ### Top Words Across the dataset # + # %%time count_df = get_word_count(df['text']) count_df.head(5).to_pandas() # - # ### Now lets compare <NAME> and <NAME> # #### <NAME> einstein_df = df[df['author'].str.contains('Einstein')] einstein_count_df = get_word_count(einstein_df['text']) einstein_count_df.head(5).to_pandas() # #### <NAME> charles_dickens_df = df[df['author'].str.contains('<NAME>')] charles_dickens_count_df = get_word_count(charles_dickens_df['text']) charles_dickens_count_df.head(5).to_pandas() # # So Einstein is talking about relativity, with words like `relativity`,`theory`,`body` , # while <NAME> is telling stories with `one`, `upon`, `time` , `old` # # Our Word Count seems to be working :-D # ### Word Counts for all the authors # #### Lets get the list of authors for our dataframe df['author'].unique().to_pandas().head(5) # #### Calculate the word count for all authors into a list # %%time author_wc_ls = [] author_name_ls = [] for author_name in df['author'].unique(): df_auth = df[df['author']==author_name] author_wc = get_word_count(df_auth['text']) author_wc_ls.append(author_wc) author_name_ls.append(author_name) # ## Encode the word-count `series` list for all authors into a count-vector # # We do this in two steps: # # 1. Encode the string Series using`top 20k` most used `words` in the Dataset which we calculated earlier. # * We encode anything not in the series to string_id = `20_000` (threshold) # # # 2. With the encoded count series for all authors, we create an aligned word-count vector for them, where: # * Where each column corresponds to a `word_id` from the the `top 20k words` # * Each row corresponds to the `count vector` for that author # #### Categorize the `string series` from the `word count series` into a `integer series` for all the authors def encode_count_df(auth_wc_df,keys,out_of_dict_id): """ Encode the count series for all authors by using the index provided in keys All strings not in keys are mapped to out_of_dict_id and their count is summed """ auth_wc_df['encoded_str_id'] = auth_wc_df['string'].astype('category') auth_wc_df['encoded_str_id'] = auth_wc_df['encoded_str_id'].cat.set_categories(keys)._column.codes auth_wc_df['encoded_str_id'] = auth_wc_df['encoded_str_id'].fillna(out_of_dict_id) # sub df which contains words that are in the dictionary in_dict_wc_df = auth_wc_df[auth_wc_df['encoded_str_id']!=out_of_dict_id] # sum of `count series` of words not in dictionary out_of_dict_wcount = auth_wc_df[auth_wc_df['encoded_str_id']==out_of_dict_id]['counts'].sum() # mapping out the count of words to -1 out_of_dict_df = cudf.DataFrame({'encoded_str_id':out_of_dict_id,'counts': out_of_dict_wcount,'string':'other'}) out_of_dict_df['encoded_str_id'] = out_of_dict_df['encoded_str_id'].astype(np.int32) out_of_dict_df['counts'] = out_of_dict_df['counts'].astype(np.int32) return cudf.concat([in_dict_wc_df,out_of_dict_df]) # + # %%time # keep only top 20k words in the dataset th = 20_000 keys = count_df['string'][:th] encoded_wc_ls = [] for auth_wc_df in author_wc_ls: encoded_count_df = encode_count_df(auth_wc_df,keys,th) encoded_wc_ls.append(encoded_count_df) # - # ##### Now lets check if the encoding worked ! # ##### <NAME> Counts author_id = author_name_ls.index('<NAME>') print(author_name_ls[author_id]) encoded_wc_ls[author_id].head(5).to_pandas() # ##### <NAME> Counts author_id = author_name_ls.index('<NAME>') print(author_name_ls[author_id]) encoded_wc_ls[author_id].head(5).to_pandas() # ##### We can see that the encoded_str_id for `said` is `0` for both `<NAME>` and `Agatha Christie`. Yaay! the encoding worked # ## Create a aligned word-count vector for each author: # # We create a dataframe, where a row represents a `author` and the columnss contain the count of the `words` respresented by that `column`. # #### Create a numba nd-array of shape (`num_authors`,`Vocablary Size+1`)) num_authors = len(encoded_wc_ls) count_ary = np.zeros(shape = (num_authors,th+1), dtype=np.int32) count_dary = cuda.to_device(count_ary) # Fill the count array using a numba function: # # Apply the numba function to fill the `author_count_array` with the count of words used by the `author` # `Numba Function`: See https://numba.pydata.org/numba-doc/0.13/CUDAJit.html for more `info` on how to write `cuda-jit` functions. # + # %%time @cuda.jit('void(int32[:], int32[:], int32[:])') def count_vec_func(author_token_id_array,author_token_count_array,author_count_array): pos = cuda.grid(1) if pos < author_token_id_array.size: token_id = author_token_id_array[pos] token_count = author_token_count_array[pos] author_count_array[token_id] = token_count for author_id,encoded_wc_df in enumerate(encoded_wc_ls): count_sr = encoded_wc_df['counts'] token_id_sr = encoded_wc_df['encoded_str_id'] count_ar = count_sr._column.data_array_view token_id_ar = token_id_sr._column.data_array_view author_ar = count_dary[author_id] # See https://numba.pydata.org/numba-doc/0.13/CUDAJit.html threadsperblock = 36 blockspergrid = (count_ar.size + (threadsperblock - 1)) // threadsperblock count_vec_func[blockspergrid, threadsperblock](token_id_ar,count_ar,author_ar) # - # #### Now, Lets check if creating the count vectors worked ! # + author_id = author_name_ls.index('<NAME>') print(author_name_ls[author_id]) top_word_ids = encoded_wc_ls[author_id]['encoded_str_id'].head(5).to_pandas() for word_id in top_word_ids: print("{} : {}".format(word_id,count_dary[author_id][word_id])) # - # ## Lets find the Nearest Authors # # Now your count df is ready for ML # # Let's train a KNN on the count-df and see if we can find any interesting patterns in count_df. Though `euclidian distance` is not the best measure for these higher dimensional spaces but it still works as a small toy example. # # #### Normalize Counts normalized_count_array = count_dary/np.sum(count_dary,axis=1)[:,None] # #### Train and find nearest_neighours on the non embedded space # %%time nn_model = cuml.neighbors.NearestNeighbors(n_neighbors = 5) nn_model.fit(normalized_count_array) ouput_mat,output_indices_count_sp = nn_model.kneighbors(X=normalized_count_array) # #### Nearest authors to <NAME> in the count vector space author_id = author_name_ls.index('<NAME>') for index in output_indices_count_sp[author_id]: print(author_name_ls[int(index)]) # #### Nearest authors to <NAME> in the count vector space author_id = author_name_ls.index('<NAME>') for index in output_indices_count_sp[author_id]: print(author_name_ls[int(index)]) # #### Encode the count vecotrs to a lower dimention using Umap embedding_ar_gpu = cuml.UMAP(n_neighbors=100,n_components=3).fit_transform(normalized_count_array) # #### KNN in the lower dimentional space # %%time nn_model = cuml.neighbors.NearestNeighbors(n_neighbors = 5) nn_model.fit(embedding_ar_gpu) ouput_mat,output_indices_umap = nn_model.kneighbors(X=embedding_ar_gpu) # #### Nearest authors to <NAME> in the emdedded space author_id = author_name_ls.index('<NAME>') for index in output_indices_umap[author_id]: print(author_name_ls[int(index)]) # #### Nearest authors to <NAME> in the emdedded space author_id = author_name_ls.index('<NAME>') for index in output_indices_umap[author_id]: print(author_name_ls[int(index)]) # Want to get started with RAPIDS? Check out [`cuDF`](https://github.com/rapidsai/cudf) on Github and let us know what you think! You can download pre-built Docker containers for our 0.8 and newer releases from [NGC](https://ngc.nvidia.com/catalog/landing) or [Dockerhub](https://hub.docker.com/r/rapidsai/rapidsai/) to get started, or install it yourself via Conda. Need something even easier? You can quickly get started with RAPIDS in [Google Colab](https://colab.research.google.com/drive/1XTKHiIcvyL5nuldx0HSL_dUa8yopzy_Y#forceEdit=true&offline=true&sandboxMode=true) and try out all the new things we've added with just a single push of a button. # # Don't want to wait for the next release to use upcoming features? You can download our nightly containers from [Dockerhub](https://hub.docker.com/r/rapidsai/rapidsai-nightly) or install via [Conda](https://anaconda.org/rapidsai-nightly) to stay at the tip of our development branch. # ### Other Examples of NLP workflows: # - [Q18](https://github.com/rapidsai/tpcx-bb/tree/master/tpcx_bb/queries/q18): Identify the stores with flat or declining sales in 4 consecutive months, check if there are any negative reviews regarding these stores available online. # # - [Q19](https://github.com/rapidsai/tpcx-bb/tree/master/tpcx_bb/queries/q19): Retrieve the items with the highest number of returns where the number of returns was approximately equivalent across all store and web channels (within a tolerance of +/ 10%), within the week ending given dates. Analyse the online reviews for these items to see if there are any negative reviews. # # - [Q27](https://github.com/rapidsai/tpcx-bb/tree/master/tpcx_bb/queries/q27): For a given product, find "competitor" company names in the product reviews. Display review id, product id, "competitor’s" company name and the related sentence from the online review # # - [Q28](https://github.com/rapidsai/tpcx-bb/tree/master/tpcx_bb/queries/q28): Build text classifier for online review sentiment classification (Positive, Negative, Neutral), using 90% of available reviews for training and the remaining 10% for testing. Display classifier accuracy on testing data # and classification result for the 10% testing data: `<reviewSK>`,`<originalRating>`,`<classificationResult>` # # - [cyBERT](https://medium.com/rapids-ai/cybert-28b35a4c81c4) Click-streams work for cyber log parsing # # ### Upcoming NLP work # # - [Count vectorizer in cuml](https://github.com/rapidsai/cuml/pull/2267) # - [GPU accelerated Bert tokenizer](https://github.com/rapidsai/cudf/issues/4981)
the_archive/archived_rapids_blog_notebooks/nlp/show_me_the_word_count_gutenberg/show_me_the_word_count_gutenberg.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Spotify (Python3) # language: python # name: spotify # --- # + import pandas as pd df = pd.read_csv("recipes_table_v2.csv") # - df.head() df["name"][0] test[1:-2].split("delimiter") import psycopg2 conn = psycopg2.connect(database ='postgres', user = 'postgres', password = '<PASSWORD>', host = 'mydishdb-dev.c3und8sjo4p2.us-east-2.rds.amazonaws.com', port = '5432') # + cursor = conn.cursor() cursor.execute("""SELECT table_name FROM information_schema.tables WHERE table_schema='public' ORDER BY table_schema, table_name; """) table_list = cursor.fetchall() cursor.close() table_list # + import sqlalchemy database ='postgres' dbname = "postgres" user = 'postgres' password = '<PASSWORD>' host = 'mydishdb-dev.c3und8sjo4p2.us-east-2.rds.amazonaws.com' port = '5432' engine = sqlalchemy.create_engine(f"{database}://{dbname}:{password}@{host}:{port}/{user}") con = engine.connect() # - print(engine.table_names()) table_name = 'recipes' df.to_sql(table_name, con) print(engine.table_names()) # + from PIL import Image im = Image.open("test.jpg") newsize = (64,64) resized = im.resize(newsize) resized.save("test2.jpg") # - resized.show() # + import base64 from io import BytesIO buffered = BytesIO() resized.save(buffered, format="JPEG") img_str = base64.b64encode(buffered.getvalue()) # - print(img_str) b'/<KEY>' # + test = {"image":"hello"} test["image"] # - test = (1,) test[0] import json from typing import List, Optional def stringify(num: int) -> str: return str(num) stringify(22) stringify("hello") import os import psycopg2 # + conn = psycopg2.connect(database ='postgres', user = 'postgres', password = '<PASSWORD>', host = 'mydishdb-dev.c3und8sjo4p2.us-east-2.rds.amazonaws.com', port = '5432') cursor = conn.cursor() cursor.execute("""SELECT version();""") table_list = cursor.fetchall() cursor.close() table_list # - addresses = """ http://101cookbooks.com/ http://allrecipes.com/ http://bbc.com/ http://bbc.co.uk/ http://bbcgoodfood.com/ http://bettycrocker.com/ http://bonappetit.com/ https://www.budgetbytes.com/ http://closetcooking.com/ https://cookpad.com/ http://cookstr.com/ http://copykat.com/ https://cybercook.com.br/ https://en.wikibooks.org/ http://delish.com/ http://epicurious.com/ http://finedininglovers.com/ https://food.com/ http://foodnetwork.com/ http://foodrepublic.com/ https://geniuskitchen.com/ https://greatbritishchefs.com/ http://giallozafferano.it/ http://gonnawantseconds.com/ https://www.gousto.co.uk/ https://healthyeating.nhlbi.nih.gov/ https://heinzbrasil.com.br/ https://www.hellofresh.com/ https://www.hellofresh.co.uk/ https://receitas.ig.com.br/ https://inspiralized.com/ http://jamieoliver.com/ https://justbento.com/ https://kennymcgovern.com/ https://www.thekitchn.com/ https://www.marmiton.org/ https://www.matprat.no/ http://www.mindmegette.hu/ https://www.misya.info/ http://www.motherthyme.com/ http://mybakingaddiction.com/ https://panelinha.com.br/ http://paninihappy.com/ http://przepisy.pl/ http://realsimple.com/ https://www.seriouseats.com/ http://simplyrecipes.com/ https://www.southernliving.com/ http://steamykitchen.com/ https://www.tastesoflizzyt.com http://tastykitchen.com/ http://thepioneerwoman.com/ https://www.thespruceeats.com/ http://thehappyfoodie.co.uk/ http://thevintagemixer.com/ http://tine.no/ https://tudogostoso.com.br/ http://twopeasandtheirpod.com/ http://vegolosi.it/ http://whatsgabycooking.com/ http://yummly.com/ """ adre = addresses.splitlines()[1:-1] adre[0] # + adre = [url.strip() for url in adre] aim = "https://www.101cookbooks.com/" # - adre[0][4:] test = [url[:4] + "s" + url[4:] for url in adre] test = [url[:8] + "www." + url[8:] for url in test] from recipe_scrapers import scrape_me scraper = scrape_me('https://www.allrecipes.com/recipe/16956/death-by-chocolate-mousse/') # + fractions = ['½','⅓','⅔','¼','¾','⅕','⅖','⅗','⅘','⅙','⅚', '⅐','⅛','⅜','⅝','⅞','⅑','⅒'] fractions_better = ['1/2','1/3','2/3','1/4','3/4','1/5','2/5','3/5','4/5', '1/6','5/6','1/7','1/8','3/8','5/8','7/8','1/9','1/10'] def improve_fractions(recipe): """ function to transform from - for example - '½' to '1/2'. Makes the recipe entries easier to edit for the user and enables parsing of fractions. """ for i in range(len(fractions)): recipe = recipe.replace(fractions[i], fractions_better[i]) return recipe def is_number(n): """ function to check if the first character of a string is a number. This function only checks the first character to cover cases like "3-4" or "3/4". """ if len(n) == 0: return False try: float(n[0]) except ValueError: return False return True # + from text_to_num import alpha2digit import re def parse_ingredients_mod(ingredients): """ takes in scraper.ingredients() """ ingredients = "\n".join(ingredients) print(ingredients) ingredients = improve_fractions(ingredients) # remove all special characters ingredients = re.sub('[^A-Za-z0-9 ,;.:-?!""]+', '', ingredients) # since all websites that can be accessed by the recipe_scraper library # are in english there is no need to detect the language ingredients = alpha2digit(ingredients, "en") # transform the string into an iterable format, line for line, word for word. ingredients = [line.split() for line in ingredients.splitlines()] # list to store ingredients all_ingredients = [] # parse each line of the recipe for unit, quantity and ingredient for line in ingredients: length = len(line) if length == 1: # either this line is part of the previous line or it is an ingredient # that needs no unit or qantity. This code will treat it as an ingredient. # Web will hopefully implement functionality that enables the user to # add this line to the previous ingredient if necessary. all_ingredients.append({"quantity":None, "unit":None, "ingredient":line[0]}) continue elif length == 2: # this line is most probably an ingredient and a quantity if (is_number(line[0][0]) and not is_number(line[1][0])): all_ingredients.append({"quantity":line[0], "unit":None, "ingredient":line[1]}) continue elif (not is_number(line[0][0]) and is_number(line[1][0])): all_ingredients.append({"quantity":line[1], "unit":None, "ingredient":line[0]}) continue else: # the last case covers both the possibility that both strings contain # a number or that both dont. For both cases the same treatment makes sense. # 1. If they both contain numbers something went wrong and saving this line # only under ingredient will make it easier for the user to modify it. # 2. If they both dont contain numbers they are probably part of the previous # line and probably part of the ingredient part of that line. Saving # this as one string under ingredient will make it easier for the user # to modify this part. all_ingredients.append({"quantity":None, "unit":None, "ingredient":" ".join(line)}) continue elif length > 2: number_map = [1 if is_number(word) else 0 for word in line] instances_number = sum(number_map) if instances_number == 0: # in the line there are no words that begin with a number all_ingredients.append({"quantity":None, "unit":None, "ingredient":" ".join(line)}) continue elif instances_number == 1: # in the line there is one word that begins with a number index = number_map.index(1) if index == 0: all_ingredients.append({"quantity":line[0], "unit":line[1], "ingredient":" ".join(line[2:])}) continue elif index == 1: all_ingredients.append({"quantity":line[1], "unit":line[0], "ingredient":" ".join(line[2:])}) continue elif index == (length - 2): all_ingredients.append({"quantity":line[-2], "unit":line[-1], "ingredient":" ".join(line[:-2])}) continue elif index == (length -1): all_ingredients.append({"quantity":line[-1], "unit":line[-2], "ingredient":" ".join(line[:-2])}) continue else: all_ingredients.append({"quantity":None, "unit":None, "ingredient":" ".join(line)}) continue else: # in the line there are two or more words that begin with a number all_ingredients.append({"quantity":None, "unit":None, "ingredient":" ".join(line)}) continue ingredients_dict = {"ingredients": all_ingredients} return ingredients_dict # - parse_ingredients_mod(scraper.ingredients()) ingredients = scraper.ingredients() ingredients = "\n".join(ingredients) ingredients = [line.split() for line in ingredients.splitlines()] for line in ingredients: print(line)
Dishify/app/.ipynb_checkpoints/Untitled-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np from pandas import Series,DataFrame obj=pd.Series([4,7,-5,3]) obj obj.values obj.index obj2=pd.Series([4,7,-5,3],index=['d','b','a','c']) obj2 obj2.index obj2['a'] obj2['b'] obj2[['c','a','d']] obj2[obj2>0] obj2*2 # + np.exp(obj2) # - 'b' in obj2 'e' in obj2 sdata={'Ohio':35000,'Texas':71000,'Oregon':16000,'Utah':5000} obj3=pd.Series(sdata) obj3 states=['California','Ohio','Oregon','Texas'] obj4=pd.Series(sdata,index=states) obj4 pd.isnull(obj4) pd.notnull(obj4) obj4.isnull obj3 obj4 obj3+obj4 obj4.name='population' obj4.index.name='state' obj4 obj obj.index=['Bob','Steve','Jeff','ryan'] obj data={'state':['Ohio','Ohio','Ohio','Nevada','Nevada','Nevada'], 'year':[2000,2001,2002,2001,2002,2003], 'pop':[1.5,1.7,3.6,2.4,2.9,3.2]} frame=pd.DataFrame(data) frame frame.head() pd.DataFrame(data,columns=['year','state','pop']) frame2=pd.DataFrame(data,columns=['year','state','pop','debt'], ....: index=['one','two','three','four','five','six']) frame2 frame2.columns frame2['state'] frame2.year frame2.loc['three'] frame2['debt']=16.5 frame2 frame2['debt']=np.arange(6.) frame2 val=pd.Series([-1.2,-1.5,-1.7], index=['two','four','five']) frame2['debt']=val frame2 frame2['eastern']=frame2.state=='Ohio' frame2 del frame2['eastern'] frame2.columns pop = {'Nevada':{2001:2.4,2002:2.9}, ....: 'Ohio': {2000:1.5,2001:1.7,2002:3.6}} frame3=pd.DataFrame(pop) frame3 frame3.T pd.DataFrame(pop,index=[2001,2002,2003]) pdata={'Ohio':frame3['Ohio'][:-1], ....: 'Nevada':frame3['Nevada'][:2]} pd.DataFrame(pdata) frame3.index.name='year';frame3.columns.name='state' frame3 frame2.values obj=pd.Series(range(3),index=['a','b','c']) index=obj.index index index[1:] labels=pd.Index(np.arange(3)) labels obj2=pd.Series([1.5,-2.5,0],index=labels) obj2 obj2.index is labels frame3 frame3.columns 'Ohio' in frame3.columns 2003 in frame3.index dup_labels=pd.Index(['foo','foo','bar','bar']) dup_labels obj=pd.Series([4.5,7.2,-5.3,3.6],index=['d','b','a','c']) obj obj2=obj.reindex(['a','b','c','d','e']) obj2 obj3=pd.Series(['blue','purple','yellow'],index=[0,2,4]) obj3 obj3.reindex(range(6),method='ffill') frame=pd.DataFrame(np.arange(9).reshape((3,3)), ....: index=['a','c','d'], ....: columns=['Ohio','Texas','California']) frame frame2=frame.reindex(['a','b','c','d']) frame2 states=['Texas','Utah','California'] frame.reindex(columns=states) obj=pd.Series(np.arange(5.),index=['a','b','c','d','e']) obj new_obj=obj.drop('c') new_obj obj.drop(['d','c']) data=pd.DataFrame(np.arange(16).reshape((4,4)), ....: index=['Ohio','Coloorado','Utah','New York'], ....: columns=['one','two','three','four']) data data.drop(['Colorado','Ohio']) data.drop('two',axis=1) data.drop(['two','four'],axis='columns') obj.drop('c',inplace=True) obj obj=pd.Series(np.arange(4.),index=['a','b','c','d']) obj obj['b'] obj[1] obj[2:4] obj[['b','a','d']] obj[[1,3]] obj[obj<2] obj['b':'c'] obj['b':'c']=5 obj data=pd.DataFrame(np.arange(16).reshape((4,4)), ....: index=['Ohio','Colorado','Utah','New York'], ....: columns=['ones','two','three','four']) data data['two'] data[['three','one']] data[:2] data[data['three']>5] data<5 data[data<5]=0 data data.loc['Colorado',['two','three']] data.iloc[2,[3,0,1]] data.iloc[2] data.iloc[[1,2],[3,0,1]] data.loc[:'Utah','two'] data.iloc[:,:3][data.three>5] # + active="" # # - ser=pd.Series(np.arange(3.)) ser ser[-1] ser ser2=pd.Series(np.arange(3.),index=['a','b','c']) ser2[-1] ser2[:1] ser.loc[:1] ser.iloc[:1] s1=pd.Series([7.3,-2.5,3.4,1.5],index=['a','c','d','e']) s2=pd.Series([-2.1,3.6,-1.5,4,3.1], ....: index=['a','c','e','f','g']) s1 s2 s1+s2 df1=pd.DataFrame(np.arange(9.).reshape((3,3)), columns=list('bcd'), index=['Ohio','Texas','Colorado']) df2=pd.DataFrame(np.arange(12.).reshape((4,3)), columns=list('bde'),index=['Utah','Ohio','Texas','Oregon']) df1 df2 df1+df2 pd.DataFrame({'A':[1,2]}) pd.DataFrame({'B':[3,4]}) df1 df2 df1-df2 df1=pd.DataFrame(np.arange(12.).reshape((3,4)), columns=list('abcd')) df2=pd.DataFrame(np.arange(20.).reshape((4,5)), columns=list('abcde')) df2.loc[1,'b']=np.nan df1 df1+df2 df1.add(df2,fill_value=0) 1/df1 df1.rdiv(1) df1.reindex(columns=df2.columns,fill_value=0) arr=np.arange(12.).reshape((3,4)) arr arr[0] arr-arr[0] frame=pd.DataFrame(np.arange(12.).reshape(4,3)), columns=list('bde') index=['Utah','Ohio','Texas','Oregon'] series=frame.iloc[0] frame series data=pd.DataFrame(np.arange(6).reshape((2,3)),index=pd.Index(['Ohio','Colorado'],name='state'),columns=pd.Index(['one','two','three'],name='number')) data result=data.stack() result result.unstack result.unstack(0) result.unstack('state') s1=pd.Series([0,1,2,3],index=['a','b','c','d']) s2=pd.Series([4,5,6],index=['c','d','e']) data2=pd.concat([s1,s2],keys=['one','two']) data2 data2.unstack() data2.unstack().stack() data2.unstack().stack(dropna=False) df=pd.DataFrame({'left':result,'right':result+5},columns=pd.Index(['left','right'],name='side')) df df.unstack('state') data=pd.read_csv('macrodata.csv') data.head() data.loc['Colorado',['two','three']] data.iloc[2,[3,0,1]] obj=pd.Series(range(4),index=['d','a','b','c']) obj.sort_index obj=pd.Series([4,7,-3,2]) obj=pd.Series(range(5),index=['a','a','b','b','c']) obj df=pd.DataFrame([[1.4,np.nan],[7.1,-4.5],[np.nan,np.nan],[0.75,-1.3]],index=['a','b','c','d'],columns=['one','two']) df df.sum() df.sum(axis=1)
pandasexercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="kF6XR0AMAuRW" # # Week 2 - Ungraded Lab: A journey through Data # # Welcome to the ungraded lab for week 2 of Machine Learning Engineering for Production. **The paradigm behind Deep Learning is now facing a shift from model-centric to data-centric.** In this lab you will see how data intricacies affect the outcome of your models. To show you how far it will take you to apply data changes without addressing the model, you will be using a single model throughout: a simple Convolutional Neural Network (CNN). While training this model the journey will take you to address common problems: class imbalance and overfitting. As you navigate these issues, the lab will walk you through useful diagnosis tools and methods to mitigate these common problems. # # ------- # ------- # + [markdown] id="lao0CVv7c3Rd" # ### **IMPORTANT NOTES BEFORE STARTING THE LAB** # # Once opened in Colab, click on the "Connect" button on the upper right side corner of the screen to connect to a runtime to run this lab. # # # **NOTE 1:** # # For this lab you get the option to either train the models yourself (this takes around 20 minutes with GPU enabled for each model) or to use pretrained versions which are already provided. There are a total of 3 CNNs that require training and although some parameters have been tuned to provide a faster training time (such as `steps_per_epoch` and `validation_steps` which have been heavily lowered) this may result in a long time spent running this lab rather than thinking about what you observe. # # To speed things up we have provided saved pre-trained versions of each model along with their respective training history. We recommend you use these pre-trained versions to save time. However we also consider that training a model is an important learning experience especially if you haven't done this before. **If you want to perform this training by yourself, the code for replicating the training is provided as well. In this case the GPU is absolutely necessary, so be sure that it is enabled.** # # To make sure your runtime is GPU you can go to Runtime -> Change runtime type -> Select GPU from the menu and then press SAVE # # - Note: Restarting the runtime may # be required. # # - Colab will tell you if restarting is necessary -- you can do this from Runtime -> Restart Runtime option in the dropdown. # # **If you decide to use the pretrained versions make sure you are not using a GPU as it is not required and may prevent other users from getting access to one.** To check this, go to Runtime -> Change runtime type -> Select None from the menu and then press SAVE. # # **NOTE 2:** # # Colab **does not** guarantee access to a GPU. This depends on the availability of these resources. However **it is not very common to be denied GPU access**. If this happens to you, you can still run this lab without training the models yourself. If you really want to do the training but are denied a GPU, try switching the runtime to a GPU after a couple of hours. # # To know more about Colab's policies check out this [FAQ](https://research.google.com/colaboratory/faq.html). # # ----------- # ----------- # # Let's get started! # + id="LttdbzB5XB0O" import os import shutil import random import zipfile import tarfile import numpy as np import pandas as pd import seaborn as sns import tensorflow as tf import matplotlib.pyplot as plt # To ignore some warnings about Image metadata that Pillow prints out import warnings warnings.filterwarnings("ignore") # + [markdown] id="v4Gq9Xffccwt" # Before you move on, download the two datasets used in the lab, as well as the pretrained models and histories: # + id="CkTzJYihXWu3" colab={"base_uri": "https://localhost:8080/"} outputId="49bd69ce-9926-4f26-d6ac-d87109b147f0" # Download datasets # Cats and dogs # !wget https://storage.googleapis.com/mlep-public/course_1/week2/kagglecatsanddogs_3367a.zip # Caltech birds # !wget https://storage.googleapis.com/mlep-public/course_1/week2/CUB_200_2011.tar # Download pretrained models and training histories # !wget -q -P /content/model-balanced/ https://storage.googleapis.com/mlep-public/course_1/week2/model-balanced/saved_model.pb # !wget -q -P /content/model-balanced/variables/ https://storage.googleapis.com/mlep-public/course_1/week2/model-balanced/variables/variables.data-00000-of-00001 # !wget -q -P /content/model-balanced/variables/ https://storage.googleapis.com/mlep-public/course_1/week2/model-balanced/variables/variables.index # !wget -q -P /content/history-balanced/ https://storage.googleapis.com/mlep-public/course_1/week2/history-balanced/history-balanced.csv # !wget -q -P /content/model-imbalanced/ https://storage.googleapis.com/mlep-public/course_1/week2/model-imbalanced/saved_model.pb # !wget -q -P /content/model-imbalanced/variables/ https://storage.googleapis.com/mlep-public/course_1/week2/model-imbalanced/variables/variables.data-00000-of-00001 # !wget -q -P /content/model-imbalanced/variables/ https://storage.googleapis.com/mlep-public/course_1/week2/model-imbalanced/variables/variables.index # !wget -q -P /content/history-imbalanced/ https://storage.googleapis.com/mlep-public/course_1/week2/history-imbalanced/history-imbalanced.csv # !wget -q -P /content/model-augmented/ https://storage.googleapis.com/mlep-public/course_1/week2/model-augmented/saved_model.pb # !wget -q -P /content/model-augmented/variables/ https://storage.googleapis.com/mlep-public/course_1/week2/model-augmented/variables/variables.data-00000-of-00001 # !wget -q -P /content/model-augmented/variables/ https://storage.googleapis.com/mlep-public/course_1/week2/model-augmented/variables/variables.index # !wget -q -P /content/history-augmented/ https://storage.googleapis.com/mlep-public/course_1/week2/history-augmented/history-augmented.csv # + [markdown] id="suKuIsOYdC9G" # ## A story of data # # To guide you through this lab we have prepared a narrative that simulates a real life scenario: # # Suppose you have been tasked to create a model that classifies images of cats, dogs and birds. For this you settle on a simple CNN architecture, since CNN's are known to perform well for image classification. You are probably familiar with two widely used datasets: `cats vs dogs`, and `caltech birds`. As a side note both datasets are available through `Tensforflow Datasets (TFDS)`. However, you decide NOT to use `TFDS` since the lab requires you to modify the data and combine the two datasets into one. # # ## Combining the datasets # # The raw images in these datasets can be found within the following paths: # # + id="-ja5V3AbYCp8" cats_and_dogs_zip = '/content/kagglecatsanddogs_3367a.zip' caltech_birds_tar = '/content/CUB_200_2011.tar' base_dir = '/tmp/data' # + [markdown] id="xRqfAVn6e8Lp" # The next step is extracting the data into a directory of choice, `base_dir` in this case. # # Note that the `cats vs dogs` images are in `zip` file format while the `caltech birds` images come in a `tar` file. # + id="aUl3_4nVXcsE" with zipfile.ZipFile(cats_and_dogs_zip, 'r') as my_zip: my_zip.extractall(base_dir) # + id="JQYh7tAyqOA7" with tarfile.open(caltech_birds_tar, 'r') as my_tar: my_tar.extractall(base_dir) # + [markdown] id="65E3t5Qlfwwn" # For the cats and dogs images no further preprocessing is needed as all exemplars of a single class are located in one directory: `PetImages\Cat` and `PetImages\Dog` respectively. Let's check how many images are available for each category: # + id="husRshAjYim9" colab={"base_uri": "https://localhost:8080/"} outputId="2030ff2b-8d0f-412b-f671-7c17e4c18240" base_dogs_dir = os.path.join(base_dir, 'PetImages/Dog') base_cats_dir = os.path.join(base_dir,'PetImages/Cat') print(f"There are {len(os.listdir(base_dogs_dir))} images of dogs") print(f"There are {len(os.listdir(base_cats_dir))} images of cats") # + [markdown] id="oqiG9G7-g2Z1" # The Bird images dataset organization is quite different. This dataset is commonly used to classify species of birds so there is a directory for each species. Let's treat all species of birds as a single class. This requires moving all bird images to a single directory (`PetImages/Bird` will be used for consistency). This can be done by running the next cell: # + id="ifcKshS6xmVj" colab={"base_uri": "https://localhost:8080/"} outputId="e3ce63b2-bd05-4f9c-88dc-45ec26ed391d" raw_birds_dir = '/tmp/data/CUB_200_2011/images' base_birds_dir = os.path.join(base_dir,'PetImages/Bird') os.mkdir(base_birds_dir) for subdir in os.listdir(raw_birds_dir): subdir_path = os.path.join(raw_birds_dir, subdir) for image in os.listdir(subdir_path): shutil.move(os.path.join(subdir_path, image), os.path.join(base_birds_dir)) print(f"There are {len(os.listdir(base_birds_dir))} images of birds") # + [markdown] id="9tteiK1fieHo" # It turns out that there is a similar number of images for each class you are trying to predict! Nice! # + [markdown] id="z3jHPdb7SE61" # Let's take a quick look at an image of each class you are trying to predict. # + id="lXE9RlF2ZFLL" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="bb609144-26f9-4dae-ab8a-86b8f4cff98b" from IPython.display import Image, display print("Sample cat image:") display(Image(filename=f"{os.path.join(base_cats_dir, os.listdir(base_cats_dir)[0])}")) print("\nSample dog image:") display(Image(filename=f"{os.path.join(base_dogs_dir, os.listdir(base_dogs_dir)[0])}")) print("\nSample bird image:") display(Image(filename=f"{os.path.join(base_birds_dir, os.listdir(base_birds_dir)[0])}")) # + [markdown] id="FduWstcripzJ" # ## Train / Evaluate Split # + [markdown] id="EiL9L8eSizCp" # Before training the model you need to split the data into `training` and `evaluating` sets. For training, we have chosen the [`Keras`](https://keras.io) application programming interface (API) which includes functionality to read images from various directories. The easier way to split the data is to create a different directory for each split of each class. # # Run the next cell to create the directories for training and evaluating sets. # + id="NdBnzB2Mvcs2" train_eval_dirs = ['train/cats', 'train/dogs', 'train/birds', 'eval/cats', 'eval/dogs', 'eval/birds'] for dir in train_eval_dirs: if not os.path.exists(os.path.join(base_dir, dir)): os.makedirs(os.path.join(base_dir, dir)) # + [markdown] id="x4XYN51Zj7-J" # Now, let's define a function that will move a percentage of images from an origin folder to a destination folder as desired to generate the training and evaluation splits: # + id="DRpbU9HAdn4n" def move_to_destination(origin, destination, percentage_split): num_images = int(len(os.listdir(origin))*percentage_split) for image_name, image_number in zip(sorted(os.listdir(origin)), range(num_images)): shutil.move(os.path.join(origin, image_name), destination) # + [markdown] id="DfssLKoathoG" # And now you are ready to call the previous function and split the data: # + id="VMKvQGH6fGdW" # Move 70% of the images to the train dir move_to_destination(base_cats_dir, os.path.join(base_dir, 'train/cats'), 0.7) move_to_destination(base_dogs_dir, os.path.join(base_dir, 'train/dogs'), 0.7) move_to_destination(base_birds_dir, os.path.join(base_dir, 'train/birds'), 0.7) # Move the remaining images to the eval dir move_to_destination(base_cats_dir, os.path.join(base_dir, 'eval/cats'), 1) move_to_destination(base_dogs_dir, os.path.join(base_dir, 'eval/dogs'), 1) move_to_destination(base_birds_dir, os.path.join(base_dir, 'eval/birds'), 1) # + [markdown] id="0eAD4J1ukGYC" # Something important to mention is that as it currently stands your dataset has some issues that will prevent model training and evaluation. Mainly: # # 1. Some images are corrupted and have zero bytes. # 2. Cats vs dogs zip file included a `.db` file for each class that needs to be deleted. # # If you didn't fix this before training you will get errors regarding these issues and training will fail. Zero-byte images are not valid images and Keras will let you know once these files are reached. In a similar way `.db` files are not valid images. **It is a good practice to always make sure that you are submitting files with the correct specifications to your training algorithm before start running it** as these issues might not be encountered right away and you will have to solve them and start training again. # # Running the following `bash` commands in the base directory will resolve these issues: # + id="3An_dEi0hwHj" # !find /tmp/data/ -size 0 -exec rm {} + # !find /tmp/data/ -type f ! -name "*.jpg" -exec rm {} + # + [markdown] id="oeqbprKcmr-0" # The first command removes all zero-byte files from the filesystem. The second one removes any file that does not have a `.jpg` extension. # # This also serves as a reminder of the power of bash. Although you could achieve the same result with Python code, bash allows you to do this much quicker. If you are not familiar with bash or some other shell-like language we encourage you to learn some of it as it is a very useful tool for data manipulation purposes. # # Let's check how many images you have available for each split and class after you remove the corrupted images: # + id="nZFk4f0jhEAk" colab={"base_uri": "https://localhost:8080/"} outputId="97113f51-59c6-4b16-aa65-c9c12ad405c3" print(f"There are {len(os.listdir(os.path.join(base_dir, 'train/cats')))} images of cats for training") print(f"There are {len(os.listdir(os.path.join(base_dir, 'train/dogs')))} images of dogs for training") print(f"There are {len(os.listdir(os.path.join(base_dir, 'train/birds')))} images of birds for training\n") print(f"There are {len(os.listdir(os.path.join(base_dir, 'eval/cats')))} images of cats for evaluation") print(f"There are {len(os.listdir(os.path.join(base_dir, 'eval/dogs')))} images of dogs for evaluation") print(f"There are {len(os.listdir(os.path.join(base_dir, 'eval/birds')))} images of birds for evaluation") # + [markdown] id="LSmRaN_Qm-s4" # It turns out that very few files presented the issues mentioned above. That's good news but it is also a reminder that small problems with the dataset might unexpectedly affect the training process. In this case, 4 non valid image files will have prevented you from training the model. # # In most cases training Deep Learning models is a time intensive task, so be sure to have everything in place before starting this process. # # # ## An unexpected issue! # # Let's face the first real life issue in this narrative! There was a power outage in your office and some hard drives were damaged and as a result of that, many of the images for `dogs` and `birds` have been erased. As a matter of fact, only 20% of the dog images and 10% of the bird images survived. # # To simulate this scenario, let's quickly create a new directory called `imbalanced` and copy only the proportions mentioned above for each class. # + id="wAG-rJRPZTQt" for dir in train_eval_dirs: if not os.path.exists(os.path.join(base_dir, 'imbalanced/'+dir)): os.makedirs(os.path.join(base_dir, 'imbalanced/'+dir)) # + id="GAGTj51qZT4e" colab={"base_uri": "https://localhost:8080/"} outputId="572c31c9-fb1b-4b55-80df-d53b8b69dcf4" # Very similar to the one used before but this one copies instead of moving def copy_with_limit(origin, destination, percentage_split): num_images = int(len(os.listdir(origin))*percentage_split) for image_name, image_number in zip(sorted(os.listdir(origin)), range(num_images)): shutil.copy(os.path.join(origin, image_name), destination) # Perform the copying copy_with_limit(os.path.join(base_dir, 'train/cats'), os.path.join(base_dir, 'imbalanced/train/cats'), 1) copy_with_limit(os.path.join(base_dir, 'train/dogs'), os.path.join(base_dir, 'imbalanced/train/dogs'), 0.2) copy_with_limit(os.path.join(base_dir, 'train/birds'), os.path.join(base_dir, 'imbalanced/train/birds'), 0.1) copy_with_limit(os.path.join(base_dir, 'eval/cats'), os.path.join(base_dir, 'imbalanced/eval/cats'), 1) copy_with_limit(os.path.join(base_dir, 'eval/dogs'), os.path.join(base_dir, 'imbalanced/eval/dogs'), 0.2) copy_with_limit(os.path.join(base_dir, 'eval/birds'), os.path.join(base_dir, 'imbalanced/eval/birds'), 0.1) # Print number of available images print(f"There are {len(os.listdir(os.path.join(base_dir, 'imbalanced/train/cats')))} images of cats for training") print(f"There are {len(os.listdir(os.path.join(base_dir, 'imbalanced/train/dogs')))} images of dogs for training") print(f"There are {len(os.listdir(os.path.join(base_dir, 'imbalanced/train/birds')))} images of birds for training\n") print(f"There are {len(os.listdir(os.path.join(base_dir, 'imbalanced/eval/cats')))} images of cats for evaluation") print(f"There are {len(os.listdir(os.path.join(base_dir, 'imbalanced/eval/dogs')))} images of dogs for evaluation") print(f"There are {len(os.listdir(os.path.join(base_dir, 'imbalanced/eval/birds')))} images of birds for evaluation") # + [markdown] id="2Qt_EGGJAaOR" # For now there is no quick or clear solution to the accidental file loss. So you decide to keep going and train the model with the remaining images. # + [markdown] id="qlDuR43ZAfwk" # ## Selecting the model # # Let's go ahead and create a model architecture and define a loss function, optimizer and performance metrics leveraging keras API: # + id="AiTGrTiHZ9fS" from tensorflow.keras import layers, models, optimizers def create_model(): # A simple CNN architecture based on the one found here: https://www.tensorflow.org/tutorials/images/classification model = models.Sequential([ layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, (3, 3), activation='relu'), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, (3, 3), activation='relu'), layers.MaxPooling2D((2, 2)), layers.Conv2D(128, (3, 3), activation='relu'), layers.MaxPooling2D((2, 2)), layers.Flatten(), layers.Dense(512, activation='relu'), layers.Dense(3, activation='softmax') ]) # Compile the model model.compile( loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer=optimizers.Adam(), metrics=[tf.keras.metrics.SparseCategoricalAccuracy()] ) return model # + [markdown] id="UVj-I-Ke03Au" # And let's print out a model summary as a quick check. # + id="elM3J9P8I_zu" colab={"base_uri": "https://localhost:8080/"} outputId="d791f734-163c-4410-fcf0-5af2c94d1572" # Create a model to use with the imbalanced dataset imbalanced_model = create_model() # Print the model's summary print(imbalanced_model.summary()) # + [markdown] id="9YjjV9iU78Ca" # For training the model you will be using Keras' ImageDataGenerator, which has built-in functionalities to easily feed your model with raw, rescaled or even augmented image data. # # Another cool functionality within ImageDataGenerator is the `flow_from_directory` method which allows to read images as needed from a root directory. This method needs the following arguments: # # - `directory`: Path to the root directory where the images are stored. # - `target_size`: The dimensions to which all images found will be resized. Since images come in all kinds of resolutions, you need to standardize their size. 150x150 is used but other values should work well too. # - `batch_size`: Number of images the generator yields everytime it is asked for a next batch. 32 is used here. # - `class_mode`: How the labels are represented. Here "binary" is used to indicate that labels will be 1D. This is done for compatibility with the loss and evaluation metrics used when compiling the model. # # If you want to learn more about using Keras' ImageDataGenerator, check this [tutorial](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image/ImageDataGenerator). # + id="4SyU0P66azNE" colab={"base_uri": "https://localhost:8080/"} outputId="5777257a-cfb7-41cd-c866-8e77536841fc" from tensorflow.keras.preprocessing.image import ImageDataGenerator # No data augmentation for now, only normalizing pixel values train_datagen = ImageDataGenerator(rescale=1./255) test_datagen = ImageDataGenerator(rescale=1./255) # Point to the imbalanced directory train_generator = train_datagen.flow_from_directory( '/tmp/data/imbalanced/train', target_size=(150, 150), batch_size=32, class_mode='binary') validation_generator = test_datagen.flow_from_directory( '/tmp/data/imbalanced/eval', target_size=(150, 150), batch_size=32, class_mode='binary') # + [markdown] id="NsowgcmDAOv-" # Let's do a quick sanity check to inspect that both generators (training and validation) use the same labels for each class: # + id="MlCgRwvWX8BO" colab={"base_uri": "https://localhost:8080/"} outputId="fbf48cec-1ece-4d59-9c90-2bcdc76953e4" print(f"labels for each class in the train generator are: {train_generator.class_indices}") print(f"labels for each class in the validation generator are: {validation_generator.class_indices}") # + [markdown] id="UqXdzv-soUzj" # # # Training a CNN with class imbalanced data # # + id="O1DI3mKCraJQ" colab={"base_uri": "https://localhost:8080/"} outputId="e9b97efb-5035-4e31-851e-204721adde6b" # Load pretrained model and history imbalanced_history = pd.read_csv('history-imbalanced/history-imbalanced.csv') imbalanced_model = tf.keras.models.load_model('model-imbalanced') # + id="UUhtEnsgxZ00" # Run only if you want to train the model yourself (this takes around 20 mins with GPU enabled) # imbalanced_history = imbalanced_model.fit( # train_generator, # steps_per_epoch=100, # epochs=50, # validation_data=validation_generator, # validation_steps=80) # + [markdown] id="9kHwAYLvEhiQ" # To analyze the model performance properly, it is important to track different metrics such as accuracy and loss function along the training process. Let's define a helper function to handle the metrics through the training history,depending on the method you previously selected: # + id="kmoJLjoTzb_L" def get_training_metrics(history): # This is needed depending on if you used the pretrained model or you trained it yourself if not isinstance(history, pd.core.frame.DataFrame): history = history.history acc = history['sparse_categorical_accuracy'] val_acc = history['val_sparse_categorical_accuracy'] loss = history['loss'] val_loss = history['val_loss'] return acc, val_acc, loss, val_loss # + [markdown] id="8OKRhD87E-V3" # Now, let's plot the metrics and losses for each training epoch as the training process progresses. # + id="RcYuJgrr11h4" colab={"base_uri": "https://localhost:8080/", "height": 590} outputId="bdc6e16b-f917-465a-8b7a-27ae7f517ae7" def plot_train_eval(history): acc, val_acc, loss, val_loss = get_training_metrics(history) acc_plot = pd.DataFrame({"training accuracy":acc, "evaluation accuracy":val_acc}) acc_plot = sns.lineplot(data=acc_plot) acc_plot.set_title('training vs evaluation accuracy') acc_plot.set_xlabel('epoch') acc_plot.set_ylabel('sparse_categorical_accuracy') plt.show() print("") loss_plot = pd.DataFrame({"training loss":loss, "evaluation loss":val_loss}) loss_plot = sns.lineplot(data=loss_plot) loss_plot.set_title('training vs evaluation loss') loss_plot.set_xlabel('epoch') loss_plot.set_ylabel('loss') plt.show() plot_train_eval(imbalanced_history) # + [markdown] id="4mF4fltDFM6o" # From these two plots is quite evident that the model is overfitting the training data. However, the evaluation accuracy is still pretty high. Maybe class imbalance is not such a big issue after all. Perhaps this is too good to be true. # # Let's dive a little deeper, and compute some additional metrics to explore if the class imbalance is hampering the model to perform well. In particular, let's compare: the accuracy score, the accuracy score balanced, and the confusion matrix. Information on the accuracy scores calculations is provided in the [sklearn](https://scikit-learn.org/stable/modules/model_evaluation.html#classification-metrics) documentation. To refresh ideas on what is a confusion matrix check [Wikipedia](https://en.wikipedia.org/wiki/Confusion_matrix). # + id="kB_8ipYTK6FF" from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, accuracy_score, balanced_accuracy_score # + id="QQRokFyn-KIN" colab={"base_uri": "https://localhost:8080/"} outputId="428fce4f-b7ff-49cf-f9fa-87f673c2aa53" # Use the validation generator without shuffle to easily compute additional metrics val_gen_no_shuffle = test_datagen.flow_from_directory( '/tmp/data/imbalanced/eval', target_size=(150, 150), batch_size=32, class_mode='binary', shuffle=False) # + id="yJEg83EIW_jm" colab={"base_uri": "https://localhost:8080/"} outputId="08584aeb-eb73-486d-c285-38d0ace53702" # Get the true labels from the generator y_true = val_gen_no_shuffle.classes # Use the model to predict (will take a couple of minutes) predictions_imbalanced = imbalanced_model.predict(val_gen_no_shuffle) # Get the argmax (since softmax is being used) y_pred_imbalanced = np.argmax(predictions_imbalanced, axis=1) # Print accuracy score print(f"Accuracy Score: {accuracy_score(y_true, y_pred_imbalanced)}") # Print balanced accuracy score print(f"Balanced Accuracy Score: {balanced_accuracy_score(y_true, y_pred_imbalanced)}") # + [markdown] id="cXQQR9D8HVUh" # Comparing the `accuracy` and `balanced accuracy` metrics, the class imbalance starts to become apparent. Now let's compute the `confusion matrix` of the predictions. Notice that the class imbalance is also present in the evaluation set so the confusion matrix will show an overwhelming majority for cats. # + id="zZqpe9uLN2k0" colab={"base_uri": "https://localhost:8080/", "height": 300} outputId="58b499fa-53aa-4b9f-a51d-47bc3ff0f1bf" imbalanced_cm = confusion_matrix(y_true, y_pred_imbalanced) ConfusionMatrixDisplay(imbalanced_cm, display_labels=['birds', 'cats', 'dogs']).plot(values_format="d") # + id="nu3xXDhYAnqL" colab={"base_uri": "https://localhost:8080/"} outputId="98e7f3e4-8aa9-48ff-f074-7b0945f909f3" misclassified_birds = (imbalanced_cm[1,0] + imbalanced_cm[2,0])/np.sum(imbalanced_cm, axis=0)[0] misclassified_cats = (imbalanced_cm[0,1] + imbalanced_cm[2,1])/np.sum(imbalanced_cm, axis=0)[1] misclassified_dogs = (imbalanced_cm[0,2] + imbalanced_cm[1,2])/np.sum(imbalanced_cm, axis=0)[2] print(f"Proportion of misclassified birds: {misclassified_birds*100:.2f}%") print(f"Proportion of misclassified cats: {misclassified_cats*100:.2f}%") print(f"Proportion of misclassified dogs: {misclassified_dogs*100:.2f}%") # + [markdown] id="e3tpDKCsT564" # Class imbalance is a real problem that if not detected early on, gives the wrong impression that your model is performing better than it actually is. For this reason, is important to rely on several metrics that do a better job at capturing these kinds of issues. **In this case the standard `accuracy` metric is misleading** and provides a false sense that the model is performing better than it actually is. # # To prove this point further consider a model that only predicts cats: # + id="Yv65fC5NK5sV" colab={"base_uri": "https://localhost:8080/"} outputId="3669e273-fa51-429d-9d80-efe5fb6806ec" # Predict cat for all images all_cats = np.ones(y_true.shape) # Print accuracy score print(f"Accuracy Score: {accuracy_score(y_true, all_cats)}") # Print balanced accuracy score print(f"Balanced Accuracy Score: {balanced_accuracy_score(y_true, all_cats)}") # + [markdown] id="g_Gp6mYcIQlW" # If you only look at the `accuracy` metric the model seems to be working fairly well, since the majority class is the same that the model always predicts. # # There are several techniques to deal with class imbalance. A very popular one is `SMOTE`, which oversamples the minority classes by creating syntethic data. However, these techniques are outside the scope of this lab. # # The previous metrics were computed with class imbalance both on the training and evaluation sets. If you are wondering how the model performed with class imbalance only on the training set run the following cell to see the confusion matrix with balanced classes in the evaluation set: # # + id="r6xecVSuqMLx" colab={"base_uri": "https://localhost:8080/", "height": 317} outputId="c668d88d-ace1-4577-df89-53ee8f16feff" # Use the validation generator without shuffle to easily compute additional metrics val_gen_no_shuffle = test_datagen.flow_from_directory( '/tmp/data/eval', target_size=(150, 150), batch_size=32, class_mode='binary', shuffle=False) # Get the true labels from the generator y_true = val_gen_no_shuffle.classes # Use the model to predict (will take a couple of minutes) predictions_imbalanced = imbalanced_model.predict(val_gen_no_shuffle) # Get the argmax (since softmax is being used) y_pred_imbalanced = np.argmax(predictions_imbalanced, axis=1) # Confusion matrix imbalanced_cm = confusion_matrix(y_true, y_pred_imbalanced) ConfusionMatrixDisplay(imbalanced_cm, display_labels=['birds', 'cats', 'dogs']).plot(values_format="d") # + [markdown] id="R5vJRVjlQvK-" # # Training with the complete dataset # # For the time being and following the narrative, assume that a colleague of yours was careful enough to save a backup of the complete dataset in her cloud storage. Now you can try training without the class imbalance issue, what a relief! # # Now that you have the complete dataset it is time to try again without suffering from class imbalance. **In general, collecting more data is beneficial for models!** # + id="w5VwUrpGPhH_" # Create a model to use with the balanced dataset balanced_model = create_model() # + id="FWFrVUmsmzzs" colab={"base_uri": "https://localhost:8080/"} outputId="a0c8e705-89d9-42eb-f78e-4334263cc7c1" # Still no data augmentation, only re-scaling train_datagen = ImageDataGenerator(rescale=1./255) test_datagen = ImageDataGenerator(rescale=1./255) # Generators now point to the complete dataset train_generator = train_datagen.flow_from_directory( '/tmp/data/train', target_size=(150, 150), batch_size=32, class_mode='binary') validation_generator = test_datagen.flow_from_directory( '/tmp/data/eval', target_size=(150, 150), batch_size=32, class_mode='binary') # + id="WC7-I1ylr-_n" colab={"base_uri": "https://localhost:8080/"} outputId="45302c2d-d8a1-4624-f07d-42660ffd1150" # Load pretrained model and history balanced_history = pd.read_csv('history-balanced/history-balanced.csv') balanced_model = tf.keras.models.load_model('model-balanced') # + id="NcOh1NVtm5Dg" # Run only if you want to train the model yourself (this takes around 20 mins with GPU enabled) # balanced_history = balanced_model.fit( # train_generator, # steps_per_epoch=100, # epochs=50, # validation_data=validation_generator, # validation_steps=80) # + [markdown] id="i7LZUa9RVvyX" # Let's check how the `accuracy` vs `balanced accuracy` comparison looks like now: # + id="EE3YiUW5WMOQ" colab={"base_uri": "https://localhost:8080/"} outputId="896b8569-c608-487f-d2db-715cef2d92dd" # Use the validation generator without shuffle to easily compute additional metrics val_gen_no_shuffle = test_datagen.flow_from_directory( '/tmp/data/eval', target_size=(150, 150), batch_size=32, class_mode='binary', shuffle=False) # + id="wfLgvWRfKuTQ" colab={"base_uri": "https://localhost:8080/"} outputId="8b03463c-afd3-4155-e24d-1f7052642859" # Get the true labels from the generator y_true = val_gen_no_shuffle.classes # Use the model to predict (will take a couple of minutes) predictions_balanced = balanced_model.predict(val_gen_no_shuffle) # Get the argmax (since softmax is being used) y_pred_balanced = np.argmax(predictions_balanced, axis=1) # Print accuracy score print(f"Accuracy Score: {accuracy_score(y_true, y_pred_balanced)}") # Print balanced accuracy score print(f"Balanced Accuracy Score: {balanced_accuracy_score(y_true, y_pred_balanced)}") # + id="7Mpnmv5YKyeD" colab={"base_uri": "https://localhost:8080/", "height": 296} outputId="2f2aef1b-4c8e-4cb0-9f0f-48ddad8aa9d6" balanced_cm = confusion_matrix(y_true, y_pred_balanced) ConfusionMatrixDisplay(balanced_cm, display_labels=['birds', 'cats', 'dogs']).plot(values_format="d") # + [markdown] id="Dp7QCgZ0Wuf3" # Both accuracy-based metrics are very similar now. The confusion matrix also looks way better than before. This suggests that class imbalance has been successfully mitigated by adding more data to the previously undersampled classes. # # Now that you now that you can trust the `accuracy` metric, let's plot the training history: # + id="6pr2VmKtJpet" colab={"base_uri": "https://localhost:8080/", "height": 590} outputId="9f9e0bcc-c39a-4314-a4bf-f4a2d546adc0" plot_train_eval(balanced_history) # + [markdown] id="YCH1hTj7JvHu" # This looks much better than for the imbalanced case! However, overfitting is still present. # # Can you think of ways to address this issue? If you are familiar with CNN's you might think of adding `dropout` layers. This intuition is correct but for the time being you decide to stick with the same model and only change the data to see if it is possible to mitigate overfitting in this manner. # # Another possible solution is to apply data augmentation techniques. Your whole team agrees this is the way to go so you decide to try this next! # + [markdown] id="VdlVWEZuX4ii" # # Training with Data Augmentation # # Augmenting images is a technique in which you create new versions of the images you have at hand, by applying geometric transformations. These transformations can vary from: zooming in and out, rotating, or even flipping the images. By doing this, you get a training dataset that exposes the model to a wider variety of images. This helps in further exploring the feature space and hence reducing the chances of overfitting. # # It is also a very natural idea since doing slight (or sometimes not so slight) changes to an image will result in an equally valid image. A cat sitting in an awkward position is still a cat, right? # + id="V1EUr1eTVXEz" # Create a model to use with the balanced and augmented dataset augmented_model = create_model() # + id="g7RAqkSRC98K" colab={"base_uri": "https://localhost:8080/"} outputId="b70d632c-6006-4bc6-fb3c-a92a48c86410" # Now applying image augmentation train_datagen = ImageDataGenerator( rescale=1./255, rotation_range=50, width_shift_range=0.15, height_shift_range=0.15, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) test_datagen = ImageDataGenerator(rescale=1./255) # Still pointing to directory with full dataset train_generator = train_datagen.flow_from_directory( '/tmp/data/train', target_size=(150, 150), batch_size=32, class_mode='binary') validation_generator = test_datagen.flow_from_directory( '/tmp/data/eval', target_size=(150, 150), batch_size=32, class_mode='binary') # + [markdown] id="DnmSteAYA4s3" # Notice that the only difference with the previous training is that the `ImageDataGenerator` object now has some extra parameters. We encourage you to read more about this topic [here](https://keras.io/api/preprocessing/image/) if you haven't already. Also **this was only done to the training generator since this technique should only be applied to the training images.** # # # But what exactly are these extra parameters doing? # # Let's see these transformations in action. The following cell applies and displays different transformations for a single image: # + id="iiu_u0iRqgFM" colab={"base_uri": "https://localhost:8080/", "height": 821} outputId="1e33d66c-e333-4136-972a-0bb830277df0" from tensorflow.keras.preprocessing.image import img_to_array, array_to_img, load_img # Displays transformations on random images of birds in the training partition def display_transformations(gen): train_birds_dir = "/tmp/data/train/birds" random_index = random.randint(0, len(os.listdir(train_birds_dir))) sample_image = load_img(f"{os.path.join(train_birds_dir, os.listdir(train_birds_dir)[random_index])}", target_size=(150, 150)) sample_array = img_to_array(sample_image) sample_array = sample_array[None, :] for iteration, array in zip(range(4), gen.flow(sample_array, batch_size=1)): array = np.squeeze(array) img = array_to_img(array) print(f"\nTransformation number: {iteration}\n") display(img) # An example of an ImageDataGenerator sample_gen = ImageDataGenerator( rescale=1./255, rotation_range=50, width_shift_range=0.25, height_shift_range=0.25, shear_range=0.2, zoom_range=0.25, horizontal_flip=True) display_transformations(sample_gen) # + [markdown] id="OUNLR1NFBED3" # Let's look at another more extreme example: # + id="biDxKkdx09bg" colab={"base_uri": "https://localhost:8080/", "height": 821} outputId="a3af075f-2cbe-4041-9cb8-bca126c5947c" # An ImageDataGenerator with more extreme data augmentation sample_gen = ImageDataGenerator( rescale=1./255, rotation_range=90, width_shift_range=0.3, height_shift_range=0.3, shear_range=0.5, zoom_range=0.5, vertical_flip=True, horizontal_flip=True) display_transformations(sample_gen) # + [markdown] id="KaKZ624jBlt6" # Feel free to try your own custom ImageDataGenerators! The results can be very fun to watch. If you check the [docs](https://keras.io/api/preprocessing/image/) there are some other parameters you may want to toy with. # # Now that you know what data augmentation is doing to the training images let's move onto training: # + id="6vO9TP1dJ5My" colab={"base_uri": "https://localhost:8080/"} outputId="a1f6b385-9837-46ef-8981-690736b8765b" # Load pretrained model and history augmented_history = pd.read_csv('history-augmented/history-augmented.csv') augmented_model = tf.keras.models.load_model('model-augmented') # + id="7aSV4CyGHRz-" # Run only if you want to train the model yourself (this takes around 20 mins with GPU enabled) # augmented_history = augmented_model.fit( # train_generator, # steps_per_epoch=100, # epochs=80, # validation_data=validation_generator, # validation_steps=80) # + [markdown] id="d0hoorf7brwZ" # Since you know that class imbalance is no longer an issue there is no need to check for more in-depth metrics. # # Let's plot the training history right away: # + id="8EYc1oXmHjE2" colab={"base_uri": "https://localhost:8080/", "height": 590} outputId="b5da6616-3169-48da-d552-3513be5b333d" plot_train_eval(augmented_history) # + [markdown] id="nBy1VcxacPEx" # Now, the evaluation accuracy follows more closely the training one. This indicates that **the model is no longer overfitting**. Quite a remarkable finding, achieved by just augmenting the data set. Another option to handle overfitting is to include dropout layers in your model as mentioned earlier. # # Another point worth mentioning, is that this model achieves a slightly lower evaluation accuracy when compared to the model without data augmentation. The reason for this, is that this model needs more epochs to train. To spot this issue, check that for the model without data augmentation, the training accuracy reached almost 100%, whereas the augmented one can still improve. # # + [markdown] id="dOA93ENHczla" # ## Wrapping it up # # **Congratulations on finishing this ungraded lab!** # # It is quite amazing to see how data alone can impact Deep Learning models. Hopefully this lab helped you have a better understanding of the importance of data. # # In particular, you figured out ways to diagnose the effects of class imbalance and looked at specific metrics to spot this problem. Adding more data is a simple way to overcome class imbalance. However, this is not always feasible in a real life scenario. # # In the final section, you applied multiple geometric transformations to the images in the training dataset, to generate an augmented version. The goal was to use data augmentation to reduce overfitting. Changing the network architecture is an alternative method to reduce overfitting. In practice, it is a good idea to implement both techniques for better results. # # # **Keep it up!**
C1W2_Ungraded_Lab_Birds_Cats_Dogs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 3. Markov Models Example Problems # We will now look at a model that examines our state of healthiness vs. being sick. Keep in mind that this is very much like something you could do in real life. If you wanted to model a certain situation or environment, we could take some data that we have gathered, build a maximum likelihood model on it, and do things like study the properties that emerge from the model, or make predictions from the model, or generate the next most likely state. # # Let's say we have 2 states: **sick** and **healthy**. We know that we spend most of our time in a healthy state, so the probability of transitioning from healthy to sick is very low: # # $$p(sick \; | \; healthy) = 0.005$$ # # Hence, the probability of going from healthy to healthy is: # # $$p(healthy \; | \; healthy) = 0.995$$ # # Now, on the other hand the probability of going from sick to sick is also very high. This is because if you just got sick yesterday then you are very likely to be sick tomorrow. # # $$p(sick \; | \; sick) = 0.8$$ # # However, the probability of transitioning from sick to healthy should be higher than the reverse, because you probably won't stay sick for as long as you would stay healthy: # # $$p(healthy \; | \; sick) = 0.02$$ # # We have now fully defined our state transition matrix, and we can now do some calculations. # # ## 1.1 Example Calculations # ### 1.1.1 # What is the probability of being healthy for 10 days in a row, given that we already start out as healthy? Well that is: # # $$p(healthy \; 10 \; days \; in \; a \; row \; | \; healthy \; at \; t=0) = 0.995^9 = 95.6 \%$$ # # How about the probability of being healthy for 100 days in a row? # # $$p(healthy \; 100 \; days \; in \; a \; row \; | \; healthy \; at \; t=0) = 0.995^{99} = 60.9 \%$$ # ## 2. Expected Number of Continuously Sick Days # We can now look at the expected number of days that you would remain in the same state (e.g. how many days would you expect to stay sick given the model?). This is a bit more difficult than the last problem, but completely doable, only involving the mathematics of <a href="https://en.wikipedia.org/wiki/Geometric_series">infinite sums</a>. # # First, we can look at the probability of being in state $i$, and going to state $i$ in the next state. That is just $A(i,i)$: # # $$p \big(s(t)=i \; | \; s(t-1)=i \big) = A(i, i)$$ # # Now, what is the probability distribution that we actually want to calculate? How about we calculate the probability that we stay in state $i$ for $n$ transitions, at which point we move to another state: # # $$p \big(s(t) \;!=i \; | \; s(t-1)=i \big) = 1 - A(i, i)$$ # # So, the joint probability that we are trying to model is: # # $$p\big(s(1)=i, s(2)=i,...,s(n)=i, s(n+1) \;!= i\big) = A(i,i)^{n-1}\big(1-A(i,i)\big)$$ # # In english this means that we are multiplying the transition probability of staying in the same state, $A(i,i)$, times the number of times we stayed in the same state, $n$, (note it is $n-1$ because we are given that we start in that state, hence there is no transition associated with it) times $1 - A(i,i)$, the probability of transitioning from that state. This leaves us with an expected value for $n$ of: # # $$E(n) = \sum np(n) = \sum_{n=1..\infty} nA(i,i)^{n-1}(1-A(i,i))$$ # # Note, in the above equation $p(n)$ is the probability that we will see state $i$ $n-1$ times after starting from $i$ and then see a state that is not $i$. Also, we know that the expected value of $n$ should be the sum of all possible values of $n$ times $p(n)$. # # # ### 2.1 Expected $n$ # So, we can now expand this function and calculate the two sums separately. # # $$E(n) = \sum_{n=1..\infty}nA(i,i)^{n-1}(1 - A(i,i)) = \sum nA(i, i)^{n-1} - \sum nA(i,i)^n$$ # # **First Sum**<br> # With our first sum, we can say that: # # $$S = \sum na(i, i)^{n-1}$$ # # $$S = 1 + 2a + 3a^2 + 4a^3+ ...$$ # # And we can then multiply that sum, $S$, by $a$, to get: # # $$aS = a + 2a^2 + 3a^3 + 4a^4+...$$ # # And then we can subtract $aS$ from $S$: # # $$S - aS = S'= 1 + a + a^2 + a^3+...$$ # # This $S'$ is another infinite sum, but it is one that is much easier to solve! # # $$S'= 1 + a + a^2 + a^3+...$$ # # And then $aS'$ is: # # $$aS' = a + a^2 + a^3+ + a^4 + ...$$ # # Which, when we then do $S' - aS'$, we end up with: # # $$S' - aS' = 1$$ # # $$S' = \frac{1}{1 - a}$$ # # And if we then substitute that value in for $S'$ above: # # $$S - aS = S'= 1 + a + a^2 + a^3+... = \frac{1}{1 - a}$$ # # $$S - aS = \frac{1}{1 - a}$$ # # $$S = \frac{1}{(1 - a)^2}$$ # # # **Second Sum**<br> # We can now look at our second sum: # # $$S = \sum na(i,i)^n$$ # # $$S = 1a + 2a^2 + 3a^3 +...$$ # # # $$Sa = 1a^2 + 2a^3 +...$$ # # $$S - aS = S' = a + a^2 + a^3 + ...$$ # # $$aS' = a^2 + a^3 + a^4 +...$$ # # $$S' - aS' = a$$ # # $$S' = \frac{a}{1 - a}$$ # # And we can plug back in $S'$ to get: # # $$S - aS = \frac{a}{1 - a}$$ # # $$S = \frac{a}{(1 - a)^2}$$ # # **Combine** <br> # We can now combine these two sums as follows: # # $$E(n) = \frac{1}{(1 - a)^2} - \frac{a}{(1-a)^2}$$ # # $$E(n) = \frac{1}{1-a}$$ # # **Calculate Number of Sick Days**<br> # So, how do we calculate the correct number of sick days? That is just: # # $$\frac{1}{1 - 0.8} = 5$$ # ## 3. SEO and Bounce Rate Optimization # We are now going to look at SEO and Bounch Rate Optimization. This is a problem that every developer and website owner can relate to. You have a website and obviously you would like to increase traffic, increase conversions, and avoid a high bounce rate (which could lead to google assigning your page a low ranking). What would a good way of modeling this data be? Without even looking at any code we can look at some examples of things that we want to know, and how they relate to markov models. # # ### 3.1 Arrival # First and foremost, how do people arrive on your page? Is it your home page? Your landing page? Well, this is just the very first page of what is hopefully a sequence of pages. So, the markov analogy here is that this is just the initial state distribution or $\pi$. So, once we have our markov model, the $\pi$ vector will tell us which of our pages a user is most likely to start on. # # ### 3.2 Sequences of Pages # What about sequences of pages? Well, if you think people are getting to your landing page, hitting the buy button, checking out, and then closing the browser window, you can test the validity of that assumption by calculating the probability of that sequence. Of course, the probability of any sequence is probability going to be much less than 1. This is because for a longer sequence, we have more multiplication, and hence smaller final numbers. We do have two alternatives however: # # > * 1) You can compare the probability of two different sequences. So, are people going through the entire checkout process? Or is it more probable that they are just bouncing? # * 2) Another option is to just find the transition probabilities themselves. These are conditional probabilities instead of joint probabilities. You want to know, once they have made it to the landing page, what is the probability of hitting buy. Then, once they have hit buy, what is the probability of them completing the checkout. # # ### 3.3 Bounce Rate # This is hard to measure, unless you are google and hence have analytics on nearly every page on the web. This is because once a user has left your site, you can no longer run code on their computer or track what they are doing. However, let's pretend that we can determine this information. Once we have done this, we can measure which page has the highest bounce rate. At this point we can manually analyze that page and ask our marketing people "what is different about this page that people don't find it useful/want to leave?" We can then address that problem, and the hopefully later analysis shows that the fixed page no longer has a high bounce right. In the markov model, we can just represents this as the null state. # # ### 3.4 Data # So, the data we are going to be working with has two columns: `last_page_id` and `next_page_id`. This can be interpreted as the current page and the next page. The site has 10 pages with the id's 0-9. We can represent start pages by making the current page -1, and the next page the actual page. We can represent the end of the page with two different codes, `B`(bounce) or `C` (close). In the case of bounce, the user saw the page and then immediately bounced. In the case of close, the user saw the page stayed and potentially saw some useful information, and then closed the window. So, you can imagine that our engineer may use time as a factor in determining if it is a bounce or a close. import numpy as np import pandas as pd # + """Goal here is to store start page and end page, and the count how many times that happens. After that we are going to turn it into a probability distribution. We can divide all transitions that start with specific start state, by row_sum""" transitions = {} # getting all specific transitions from start pg to end pg, tallying up # of times each occurs row_sums = {} # start date as key -> getting number of times each starting pg occurs # Collect our counts for line in open('../../../data/site/site_data.csv'): s, e = line.rstrip().split(',') # get start and end page transitions[(s, e)] = transitions.get((s, e), 0.) + 1 row_sums[s] = row_sums.get(s, 0.) + 1 # Normalize the counts so they become real probability distributions for k, v in transitions.items(): s, e = k transitions[k] = v / row_sums[s] # Calculate initial state distribution print('Initial state distribution') for k, v in transitions.items(): s, e = k if s == '-1': # this means it is the start of the sequence. print (e, v) # Which page has the highest bounce rate? for k, v in transitions.items(): s, e = k if e == 'B': print(f'Bounce rate for {s}: {v}') # - # We can see that page with `id` 9 has the highest value in the initial state distribution, so we are most likely to start on that page. We can then see that the page with highest bounce rate is also at page `id` 9. # ## 4. Build a 2nd-order language model and generate phrases # So, we are now going to work with non first order markov chains for a little bit. In this example we are going to try and create a language model. So we are going to first train a model on some data to determine the distribution of a word given the previous two words. We can then use this model to generate new phrases. Note that another step of this model would be to calculate the probability of a phrase. # # So the data that we are going to look at is just a collection of Robert Frost Poems. It is just a text file with all of the poems concatenated together. So, the first thing we are going to want to do is tokenize each sentence, and remove punctuation. It will look similar to this: # # ``` # def remove_punctuation(s): # return s.translate(None, string.punctuation) # # tokens = [t for t in remove_puncuation(line.rstrip().lower()).split()] # ``` # # Once we have tokenized each line, we want to perform various counts in addition to the second order model counts. We need to measure the initial distribution of words, or stated another way the distribution of the first word of a sentence. We also want to know the distribution of the second word of a sentence. Both of these do not have two previous words, so they are not second order. We could technically include them in the second order measurement by using `None` in place of the previous words, but we won't do that here. We also want to keep track of how to end the sentence (end of sentence distribution, will look similar to (w(t-2), w(t-1) -> END)), so we will include a special token for that too. # # When we do this counting, what we first want to do is create an array of all possibilities. So, for example if we had two sentences: # # ``` # I love dogs # I love cats # ``` # # Then we could have a dictionary where the key was `(I, love)` and the value was an array `[dogs, cats]`. If "I love" was also a stand alone sentence, then the value would be `[dogs, cats, END]`. The function below can help us with this, since we first need to check if there is any value for the key, create an array if not, otherwise just append to the array. # # ``` # def add2dict(d, k, v): # if k not in d: # d[k] = [] # else: # d[k].append(v) # ``` # # One we have collected all of these arrays of possible next words, we need to turn them into **probability distributions**. For example, the array `[cat, cat, dog]` would become the dictionary `{"cat": 2/3, "dog": 1/3}`. Here is a function that can do this: # # ``` # def list2pdict(ts): # d = {} # n = len(ts) # for t in ts: # d[t] = d.get(t, 0.) + 1 # for t, c in d.items(): # d[t] = c / n # return d # ``` # # Next, we will need a function that can sample from this dictionary. To do this we will need to generate a random number between 0 and 1, and then use the distribution of the words to sample a word given a random number. Here is a function that can do that: # # ``` # def sample_word(d): # p0 = np.random.random() # cumulative = 0 # for t, p in d.items(): # cumulative += p # if p0 < cumulative: # return t # assert(False) # should never get here # ``` # # Because all of our distributions are structured as dictionaries, we can use the same function for all of them. import numpy as np import string # + """3 dicts. 1st store pdist for the start of a phrase, then a second word dict which stores the distributions for the 2nd word of a sentence, and then we are going to have a dict for all second order transitions""" initial = {} second_word = {} transitions = {} def remove_punctuation(s): return s.translate(str.maketrans('', '', string.punctuation)) def add2dict(d, k, v): """Parameters: Dictionary, Key, Value""" if k not in d: d[k] = [] d[k].append(v) # Loop through file of poems for line in open('../../../data/poems/robert_frost.txt'): tokens = remove_punctuation(line.rstrip().lower()).split() # Get all tokens for specific line we are looping over T = len(tokens) # Length of sequence for i in range(T): # Loop through every token in sequence t = tokens[i] if i == 0: # We are looking at first word initial[t] = initial.get(t, 0.) + 1 else: t_1 = tokens[i - 1] if i == T - 1: # Looking at last word add2dict(transitions, (t_1, t), 'END') if i == 1: # second word of sentence, hence only 1 previous word add2dict(second_word, t_1, t) else: t_2 = tokens[i - 2] # Get second previous word add2dict(transitions, (t_2, t_1), t) # add previous and 2nd previous word as key, and current word as val # Normalize the distributions initial_total = sum(initial.values()) for t, c in initial.items(): initial[t] = c / initial_total # Take our list and turn it into a dictionary of probabilities def list2pdict(ts): d = {} n = len(ts) # get total number of values for t in ts: # look at each token d[t] = d.get(t, 0.) + 1 for t, c in d.items(): # go through dictionary, divide frequency by sum d[t] = c / n return d for t_1, ts in second_word.items(): second_word[t_1] = list2pdict(ts) for k, ts in transitions.items(): transitions[k] = list2pdict(ts) def sample_word(d): p0 = np.random.random() # Generate random number from 0 to 1 cumulative = 0 # cumulative count for all probabilities seen so far for t, p in d.items(): cumulative += p if p0 < cumulative: return t assert(False) # should never hit this """Function to generate a poem""" def generate(): for i in range(4): sentence = [] # initial word w0 = sample_word(initial) sentence.append(w0) # sample second word w1 = sample_word(second_word[w0]) sentence.append(w1) # second-order transitions until END -> enter infinite loop while True: w2 = sample_word(transitions[(w0, w1)]) # sample next word given previous two words if w2 == 'END': break sentence.append(w2) w0 = w1 w1 = w2 print(' '.join(sentence)) generate() # - # ## 5. Google's PageRank Algorithm # Markov models were even used in Google's PageRank algorithm. The basic problem we face is: # > * We have $M$ webpages that link to eachother, and we would like to assign importance scores $x(1),...,x(M)$ # * All of these scores are greater than or equal to 0 # * So, we want to assign a page rank to all of these pages # # How can we go about doing this? Well, we can think of a webpage as a sequence, and the page you are on as the state. Where does the ranking come from? Well, the ranking actually comes from the limiting distribution. That is, in the long run, the proportion of visits that will be spent on this page. Now, if you think "great that is all I need to know", slow down. How can we actually do this in practice? How do we train the markov model, and what are the values we assign to the state transition matrix? And how can we ensure that the limiting distribution exists and is unique? The key insight was that **we can use the linked structure of the web to determine the ranking**. # # The main idea is that a *link to a page* is like a *vote for its importance*. So, as a first attempt we could just use a frequency count to measure the votes. Of course, that wouldn't be a valid probability distribution, so we could just divide each row by its sum to make it sum to 1. So we set: # # $$A(i, j) = \frac{1}{n(i)} \; if \; i \; links \; to \; j$$ # $$A(i, j) = 0 \; otherwise$$ # # Here $n(i)$ stands for the total number of links on a page, and you can confirm that the sum of a row is $\frac{n(i)}{n(i)} = 1$, so this is a valid markov matrix. Now, we still aren't sure if the limiting distribution is unique. # # ### 5.1 This is already a good start # Let's keep in mind that the above solution already solves a few problems. For instance, let's say you are a spammer and you want to sell 1000 links on your webpage. Well, because the transition matrix must remain a valid probability matrix, the rows must sum to 1, which means that each of your links now only has a strength of $\frac{1}{1000}$. For example the frequency matrix would look like: # # | |abc.com|amazon.com|facebook.com|github.com| # |--- |--- |--- | --- |--- | # |thespammer.com|1 |1 |1 |1 | # # And then if we transformed that into a probability matrix it would just be each value divided by the total number of links, 4: # # | |abc.com|amazon.com|facebook.com|github.com| # |--- |--- |--- | --- |--- | # |thespammer.com|0.25 |0.25 |0.25 |0.25 | # # You may then think, I will just create 1000 pages and each of them will only have 1 link. Unfortunately, since nobody knows about those 1000 pages you just created nobody is going to link to them, which means they are impossible to get to. So, in the limiting distribution, those states will have 0 probability because you can't even get to them, so there outgoing links are worthless. Remember, the markov chains limiting distribution will model the long running proportion of visits to a state. So, if you never visit that state, its probability will be 0. # # We still have not ensure that the limiting distribution exists and is unique. # # ### 5.2 Perron-Frobenius Theorem # How can we ensure that our model has a unique stationary distribution. In 1910, this was actually determined. It is known as the **Perron-Frobenius Theorem**, and it states that: # > *If our transition matrix is a markov matrix -meaning that all of the rows sum to 1, and all of the values are strictly positive, i.e. no values that are 0- then the stationary distribution exists and is unique*. # # In fact, we can start in any initial state and as time approaches infinity we will always end up with the same stationary distribution, therefore this is also the limiting distribution. # # So, how can we satisfy the PF criterion? Let's return to this idea of **smoothing**, which we first talked about when discussing how to train a markov model. The basic idea was that we can make things that were 0, non-zero, so there is still a small possibility that we can get to that state. This might be good news for the spammer. So, we can create a uniform probability distribution $U = \frac{1}{M}$, which is an $M x M$ matrix ($M$ is the number of states). PageRanks solution was to take the matrix we had before and multiply it by 0.85, and to take the uniform distribution and multiply it by 0.15, and add them together to get the final pagerank matrix. # # $$G = 0.85A + 0.15U$$ # # Now all of the elements are strictly positive, and we can convince ourselves that G is still a valid markov matrix.
Machine_Learning/05-Hidden_Markov_Models-03-Markov-Models-Example-Problems-and-Applications.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Preliminaries: imports, start H2O, load data import sklearn import pandas as pd import numpy as np import shap import h2o from h2o.automl import H2OAutoML df = pd.read_csv('C:/Users/Karti/NEU/data/insurance.csv') df.head() h2o.init() data_path = 'C:/Users/Karti/NEU/data/insurance.csv' h2o_df = h2o.import_file(data_path) splits = h2o_df.split_frame(ratios=[0.8],seed=1) train = splits[0] test = splits[1] y = "charges" x = h2o_df.columns x.remove(y) # # Run h2o AutoML aml = H2OAutoML(max_runtime_secs=180, seed=1) aml.train(x=x,y=y, training_frame=train) lb = aml.leaderboard lb.head() # Get model ids for all models in the AutoML Leaderboard model_ids = list(aml.leaderboard['model_id'].as_data_frame().iloc[:,0]) # Get the "All Models" Stacked Ensemble model se = h2o.get_model([mid for mid in model_ids if "StackedEnsemble_AllModels" in mid][0]) # Get the Stacked Ensemble metalearner model metalearner = h2o.get_model(se.metalearner()['name']) pred = aml.predict(test) pred.head() lb = h2o.automl.get_leaderboard(aml, extra_columns = 'ALL') lb # # View 'Black box' model #extract the best model in leaderboard list bst_model = aml.leader bst_model # # Use a decision tree surrogate to generate explanations of the "black box" model # #### First bind the "black box" model predictions onto the training frame preds = bst_model.predict(h2o_df) preds.columns = ['predicted_charges'] frame_yhat = h2o_df.cbind(preds) # #### Train decision tree surrogate model # + from h2o.estimators.gbm import H2OGradientBoostingEstimator yhat = 'predicted_charges' model_id = 'dt_surrogate_mojo' # train single tree surrogate model surrogate = H2OGradientBoostingEstimator(ntrees=1, sample_rate=1, col_sample_rate=1, max_depth=3, seed=12345, model_id=model_id) _ = surrogate.train(x=x, y=yhat, training_frame=frame_yhat) # persist MOJO (compiled, representation of trained model) # from which to generate plot of surrogate mojo_path = surrogate.download_mojo(path='.') print(surrogate) print('Generated MOJO path:\n', mojo_path) # - # # Generate GraphViz representation of MOJO # + from h2o.backend import H2OLocalServer # for calling external processes import os import re import subprocess from subprocess import CalledProcessError import time details = False # print more info on tree, details = True title = 'Medical Charges Tree Surrogate' hs = H2OLocalServer() h2o_jar_path = hs._find_jar() print('Discovered H2O jar path:\n', h2o_jar_path) gv_file_name = model_id + '.gv' gv_args = str('-cp ' + h2o_jar_path + ' hex.genmodel.tools.PrintMojo --tree 0 -i ' + mojo_path + ' -o').split() gv_args.insert(0, 'java') gv_args.append(gv_file_name) if details: gv_args.append('--detail') if title is not None: gv_args = gv_args + ['--title', title] print() print('Calling external process ...') print(' '.join(gv_args)) _ = subprocess.call(gv_args) # - # # Generate PNG from GraphViz representation # + png_file_name = model_id + '.png' png_args = str('dot -Tpng ' + gv_file_name + ' -o ' + png_file_name) png_args = png_args.split() print('Calling external process ...') print(' '.join(png_args)) _ = subprocess.call(png_args) # + # for in-notebook display from IPython.display import Image from IPython.display import display display(Image((png_file_name))) # - # shutdown h2o h2o.cluster().shutdown(prompt=True)
AutoML/AutoML-Decision Tree - Insurance.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Convolutional Neural Networks with Tensorflow # # "Deep Learning" is a general term that usually refers to the use of neural networks with multiple layers that synthesize the way the human brain learns and makes decisions. A convolutional neural network is a kind of neural network that extracts *features* from matrices of numeric values (often images) by convolving multiple filters over the matrix values to apply weights and identify patterns, such as edges, corners, and so on in an image. The numeric representations of these patterns are then passed to a fully-connected neural network layer to map the features to specific classes. # # ## Building a CNN # There are several commonly used frameworks for creating CNNs. In this notebook, we'll build a simple example CNN using Tensorflow. The example is a classification model that can classify an image as a circle, a triangle, or a square. # # ### Import framework # # First, let's import the Tensorflow libraries we'll need. # + tags=[] import tensorflow from tensorflow import keras print('TensorFlow version:',tensorflow.__version__) print('Keras version:',keras.__version__) # - # ### Preparing the Data # Before we can train the model, we need to prepare the data. We'll divide the feature values by 255 to normalize them as floating point values between 0 and 1, and we'll split the data so that we can use 70% of it to train the model, and hold back 30% to validate it. When loading the data, the data generator will assing "hot-encoded" numeric labels to indicate which class each image belongs to based on the subfolders in which the data is stored. In this case, there are three subfolders - *circle*, *square*, and *triangle*, so the labels will consist of three *0* or *1* values indicating which of these classes is associated with the image - for example the label [0 1 0] indicates that the image belongs to the second class (*square*). # + tags=[] from tensorflow.keras.preprocessing.image import ImageDataGenerator data_folder = 'data/shapes' img_size = (128, 128) batch_size = 30 print("Getting Data...") datagen = ImageDataGenerator(rescale=1./255, # normalize pixel values validation_split=0.3) # hold back 30% of the images for validation print("Preparing training dataset...") train_generator = datagen.flow_from_directory( data_folder, target_size=img_size, batch_size=batch_size, class_mode='categorical', subset='training') # set as training data print("Preparing validation dataset...") validation_generator = datagen.flow_from_directory( data_folder, target_size=img_size, batch_size=batch_size, class_mode='categorical', subset='validation') # set as validation data classnames = list(train_generator.class_indices.keys()) print("class names: ", classnames) # - # ### Defining the CNN # Now we're ready to create our model. This involves defining the layers for our CNN, and compiling them for multi-class classification. # + tags=[] # Define a CNN classifier network from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense # Define the model as a sequence of layers model = Sequential() # The input layer accepts an image and applies a convolution that uses 32 6x6 filters and a rectified linear unit activation function model.add(Conv2D(32, (6, 6), input_shape=train_generator.image_shape, activation='relu')) # Next we;ll add a max pooling layer with a 2x2 patch model.add(MaxPooling2D(pool_size=(2,2))) # We can add as many layers as we think necessary - here we'll add another convolution, max pooling, and dropout layer model.add(Conv2D(32, (6, 6), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) # And another set model.add(Conv2D(32, (6, 6), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) # A dropout layer randomly drops some nodes to reduce inter-dependencies (which can cause over-fitting) model.add(Dropout(0.2)) # Now we'll flatten the feature maps and generate an output layer with a predicted probability for each class model.add(Flatten()) model.add(Dense(train_generator.num_classes, activation='sigmoid')) # With the layers defined, we can now compile the model for categorical (multi-class) classification model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) # - # ### Training the Model # With the layers of the CNN defined, we're ready to train the model using our image data. In the example below, we use 5 iterations (*epochs*) to train the model in 30-image batches, holding back 30% of the data for validation. After each epoch, the loss function measures the error (*loss*) in the model and adjusts the weights (which were randomly generated for the first iteration) to try to improve accuracy. # # > **Note**: We're only using 5 epochs to minimze the training time for this simple example. A real-world CNN is usually trained over more epochs than this. CNN model training is processor-intensive, involving a lot of matrix and vector-based operations; so it's recommended to perform this on a system that can leverage GPUs, which are optimized for these kinds of calculation. This will take a while to complete on a CPU-based system - status will be displayed as the training progresses. # + tags=[] # Train the model over 5 epochs using 30-image batches and using the validation holdout dataset for validation num_epochs = 5 history = model.fit( train_generator, steps_per_epoch = train_generator.samples // batch_size, validation_data = validation_generator, validation_steps = validation_generator.samples // batch_size, epochs = num_epochs) # - # ### View the Loss History # We tracked average training and validation loss history for each epoch. We can plot these to verify that loss reduced as the model was trained, and to detect *overfitting* (which is indicated by a continued drop in training loss after validation loss has levelled out or started to increase). # + # %matplotlib inline from matplotlib import pyplot as plt epoch_nums = range(1,num_epochs+1) training_loss = history.history["loss"] validation_loss = history.history["val_loss"] plt.plot(epoch_nums, training_loss) plt.plot(epoch_nums, validation_loss) plt.xlabel('epoch') plt.ylabel('loss') plt.legend(['training', 'validation'], loc='upper right') plt.show() # - # ### Evaluate Model Performance # We can see the final accuracy based on the test data, but typically we'll want to explore performance metrics in a little more depth. Let's plot a confusion matrix to see how well the model is predicting each class. # + tags=[] # Tensorflow doesn't have a built-in confusion matrix metric, so we'll use SciKit-Learn import numpy as np from sklearn.metrics import confusion_matrix import matplotlib.pyplot as plt # %matplotlib inline print("Generating predictions from validation data...") # Get the image and label arrays for the first batch of validation data x_test = validation_generator[0][0] y_test = validation_generator[0][1] # Use the moedl to predict the class class_probabilities = model.predict(x_test) # The model returns a probability value for each class # The one with the highest probability is the predicted class predictions = np.argmax(class_probabilities, axis=1) # The actual labels are hot encoded (e.g. [0 1 0], so get the one with the value 1 true_labels = np.argmax(y_test, axis=1) # Plot the confusion matrix cm = confusion_matrix(true_labels, predictions) plt.imshow(cm, interpolation="nearest", cmap=plt.cm.Blues) plt.colorbar() tick_marks = np.arange(len(classnames)) plt.xticks(tick_marks, classnames, rotation=85) plt.yticks(tick_marks, classnames) plt.xlabel("Predicted Shape") plt.ylabel("True Shape") plt.show() # - # ### Using the Trained Model # Now that we've trained the model, we can use it to predict the class of a new image. # + tags=[] from tensorflow.keras import models from random import randint import os # %matplotlib inline # Function to create a random image (of a square, circle, or triangle) def create_image (size, shape): from random import randint import numpy as np from PIL import Image, ImageDraw xy1 = randint(10,40) xy2 = randint(60,100) col = (randint(0,200), randint(0,200), randint(0,200)) img = Image.new("RGB", size, (255, 255, 255)) draw = ImageDraw.Draw(img) if shape == 'circle': draw.ellipse([(xy1,xy1), (xy2,xy2)], fill=col) elif shape == 'triangle': draw.polygon([(xy1,xy1), (xy2,xy2), (xy2,xy1)], fill=col) else: # square draw.rectangle([(xy1,xy1), (xy2,xy2)], fill=col) del draw return np.array(img) # Save the trained model modelFileName = 'models/shape_classifier.h5' model.save(modelFileName) del model # deletes the existing model variable # Create a random test image classnames = os.listdir(os.path.join('data', 'shapes')) classnames.sort() img = create_image ((128,128), classnames[randint(0, len(classnames)-1)]) plt.axis('off') plt.imshow(img) # The model expects a batch of images as input, so we'll create an array of 1 image imgfeatures = img.reshape(1, img.shape[0], img.shape[1], img.shape[2]) # We need to format the input to match the training data # The generator loaded the values as floating point numbers # and normalized the pixel values, so... imgfeatures = imgfeatures.astype('float32') imgfeatures /= 255 # Use the classifier to predict the class model = models.load_model(modelFileName) # loads the saved model class_probabilities = model.predict(imgfeatures) # Find the class predictions with the highest predicted probability class_idx = np.argmax(class_probabilities, axis=1) print (classnames[int(class_idx[0])]) # - # In this notebook, you used Tensorflow to train an image classification model based on a convolutional neural network.
05b - Convolutional Neural Networks (Tensorflow).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/QDaria/QDaria.github.io/blob/main/Copy_of_hello_many_worlds.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="xLOXFOT5Q40E" # ##### Copyright 2020 The TensorFlow Authors. # + cellView="form" id="iiQkM5ZgQ8r2" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="j6331ZSsQGY3" # # Hello, many worlds # + [markdown] id="i9Jcnb8bQQyd" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/quantum/tutorials/hello_many_worlds"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/quantum/blob/master/docs/tutorials/hello_many_worlds.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/quantum/blob/master/docs/tutorials/hello_many_worlds.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/quantum/docs/tutorials/hello_many_worlds.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + [markdown] id="6tYn2HaAUgH0" # This tutorial shows how a classical neural network can learn to correct qubit calibration errors. It introduces <a target="_blank" href="https://github.com/quantumlib/Cirq" class="external">Cirq</a>, a Python framework to create, edit, and invoke Noisy Intermediate Scale Quantum (NISQ) circuits, and demonstrates how Cirq interfaces with TensorFlow Quantum. # + [markdown] id="sPZoNKvpUaqa" # ## Setup # + id="TorxE5tnkvb2" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="04733134-0571-484b-f309-3b4b5382a635" # !pip install tensorflow==2.3.1 # + [markdown] id="FxkQA6oblNqI" # Install TensorFlow Quantum: # + id="saFHsRDpkvkH" colab={"base_uri": "https://localhost:8080/"} outputId="a6d478e5-8e66-4248-ad49-891c3f149e81" # !pip install tensorflow-quantum # + [markdown] id="F1L8h1YKUvIO" # Now import TensorFlow and the module dependencies: # + id="enZ300Bflq80" import tensorflow as tf import tensorflow_quantum as tfq import cirq import sympy import numpy as np # visualization tools # %matplotlib inline import matplotlib.pyplot as plt from cirq.contrib.svg import SVGCircuit # + [markdown] id="b08Mmbs8lr81" # ## 1. The Basics # + [markdown] id="y31qSRCczI-L" # ### 1.1 Cirq and parameterized quantum circuits # # Before exploring TensorFlow Quantum (TFQ), let's look at some <a target="_blank" href="https://github.com/quantumlib/Cirq" class="external">Cirq</a> basics. Cirq is a Python library for quantum computing from Google. You use it to define circuits, including static and parameterized gates. # # Cirq uses <a target="_blank" href="https://www.sympy.org" class="external">SymPy</a> symbols to represent free parameters. # + id="2yQdmhQLCrzQ" a, b = sympy.symbols('a b') # + [markdown] id="itUlpbKmDYNW" # The following code creates a two-qubit circuit using your parameters: # + id="Ps-pd2mndXs7" colab={"base_uri": "https://localhost:8080/", "height": 138} outputId="c18bf1d0-0f49-4b83-bc79-e8be00a6c0e2" # Create two qubits q0, q1 = cirq.GridQubit.rect(1, 2) # Create a circuit on these qubits using the parameters you created above. circuit = cirq.Circuit( cirq.rx(a).on(q0), cirq.ry(b).on(q1), cirq.CNOT(control=q0, target=q1)) SVGCircuit(circuit) # + [markdown] id="zcCX109cJUaz" # To evaluate circuits, you can use the `cirq.Simulator` interface. You replace free parameters in a circuit with specific numbers by passing in a `cirq.ParamResolver` object. The following code calculates the raw state vector output of your parameterized circuit: # + id="VMq7EayNRyQb" colab={"base_uri": "https://localhost:8080/"} outputId="a1ab4819-3c1b-4db9-b679-d83efd9fdedf" # Calculate a state vector with a=0.5 and b=-0.5. resolver = cirq.ParamResolver({a: 0.5, b: -0.5}) output_state_vector = cirq.Simulator().simulate(circuit, resolver).final_state_vector output_state_vector # + [markdown] id="-SUlLpXBeicF" # State vectors are not directly accessible outside of simulation (notice the complex numbers in the output above). To be physically realistic, you must specify a measurement, which converts a state vector into a real number that classical computers can understand. Cirq specifies measurements using combinations of the <a target="_blank" href="https://en.wikipedia.org/wiki/Pauli_matrices" class="external">Pauli operators</a> $\hat{X}$, $\hat{Y}$, and $\hat{Z}$. As illustration, the following code measures $\hat{Z}_0$ and $\frac{1}{2}\hat{Z}_0 + \hat{X}_1$ on the state vector you just simulated: # + id="hrSnOCi3ehr_" colab={"base_uri": "https://localhost:8080/"} outputId="f876235b-9c7a-4ecf-cbf8-502e9b469558" z0 = cirq.Z(q0) qubit_map={q0: 0, q1: 1} z0.expectation_from_state_vector(output_state_vector, qubit_map).real # + id="OZ0lWFXv6pII" colab={"base_uri": "https://localhost:8080/"} outputId="53a714a8-7595-40dd-c139-24ee51656660" z0x1 = 0.5 * z0 + cirq.X(q1) z0x1.expectation_from_state_vector(output_state_vector, qubit_map).real # + [markdown] id="bkC-yjIolDNr" # ### 1.2 Quantum circuits as tensors # # TensorFlow Quantum (TFQ) provides `tfq.convert_to_tensor`, a function that converts Cirq objects into tensors. This allows you to send Cirq objects to our <a target="_blank" href="https://www.tensorflow.org/quantum/api_docs/python/tfq/layers">quantum layers</a> and <a target="_blank" href="https://www.tensorflow.org/quantum/api_docs/python/tfq/get_expectation_op">quantum ops</a>. The function can be called on lists or arrays of Cirq Circuits and Cirq Paulis: # + id="1gLQjA02mIyy" colab={"base_uri": "https://localhost:8080/"} outputId="fc6f16cb-bab8-4e1d-aee7-0f2fdb3ea89a" # Rank 1 tensor containing 1 circuit. circuit_tensor = tfq.convert_to_tensor([circuit]) print(circuit_tensor.shape) print(circuit_tensor.dtype) # + [markdown] id="SJy6AkbU6pIP" # This encodes the Cirq objects as `tf.string` tensors that `tfq` operations decode as needed. # + id="aX_vEmCKmpQS" colab={"base_uri": "https://localhost:8080/"} outputId="6362ba7c-bdfc-40e1-8f86-6f60ff26398b" # Rank 1 tensor containing 2 Pauli operators. pauli_tensor = tfq.convert_to_tensor([z0, z0x1]) pauli_tensor.shape # + [markdown] id="FI1JLWe6m8JF" # ### 1.3 Batching circuit simulation # # TFQ provides methods for computing expectation values, samples, and state vectors. For now, let's focus on *expectation values*. # # The highest-level interface for calculating expectation values is the `tfq.layers.Expectation` layer, which is a `tf.keras.Layer`. In its simplest form, this layer is equivalent to simulating a parameterized circuit over many `cirq.ParamResolvers`; however, TFQ allows batching following TensorFlow semantics, and circuits are simulated using efficient C++ code. # # Create a batch of values to substitute for our `a` and `b` parameters: # + id="1fsVZhF5lIXp" batch_vals = np.array(np.random.uniform(0, 2 * np.pi, (5, 2)), dtype=np.float32) # + [markdown] id="Ip7jlGXIf22u" # Batching circuit execution over parameter values in Cirq requires a loop: # + id="RsfF53UCJtr9" colab={"base_uri": "https://localhost:8080/"} outputId="557d75af-6889-4925-ac06-450bd53fd506" cirq_results = [] cirq_simulator = cirq.Simulator() for vals in batch_vals: resolver = cirq.ParamResolver({a: vals[0], b: vals[1]}) final_state_vector = cirq_simulator.simulate(circuit, resolver).final_state_vector cirq_results.append( [z0.expectation_from_state_vector(final_state_vector, { q0: 0, q1: 1 }).real]) print('cirq batch results: \n {}'.format(np.array(cirq_results))) # + [markdown] id="W0JlZEu-f9Ac" # The same operation is simplified in TFQ: # + id="kGZVdcZ6y9lC" colab={"base_uri": "https://localhost:8080/"} outputId="bbe5bf35-7198-436d-8706-0c857d870499" tfq.layers.Expectation()(circuit, symbol_names=[a, b], symbol_values=batch_vals, operators=z0) # + [markdown] id="wppQ3TJ23mWC" # ## 2. Hybrid quantum-classical optimization # # Now that you've seen the basics, let's use TensorFlow Quantum to construct a *hybrid quantum-classical neural net*. You will train a classical neural net to control a single qubit. The control will be optimized to correctly prepare the qubit in the `0` or `1` state, overcoming a simulated systematic calibration error. This figure shows the architecture: # # <img src="https://github.com/tensorflow/quantum/blob/master/docs/tutorials/images/nn_control1.png?raw=1" width="1000"> # # Even without a neural network this is a straightforward problem to solve, but the theme is similar to the real quantum control problems you might solve using TFQ. It demonstrates an end-to-end example of a quantum-classical computation using the `tfq.layers.ControlledPQC` (Parametrized Quantum Circuit) layer inside of a `tf.keras.Model`. # + [markdown] id="NlyxF3Q-6pIe" # For the implementation of this tutorial, this is architecture is split into 3 parts: # # - The *input circuit* or *datapoint circuit*: The first three $R$ gates. # - The *controlled circuit*: The other three $R$ gates. # - The *controller*: The classical neural-network setting the parameters of the controlled circuit. # + [markdown] id="VjDf-nTM6ZSs" # ### 2.1 The controlled circuit definition # # Define a learnable single bit rotation, as indicated in the figure above. This will correspond to our controlled circuit. # + id="N-j7SCl-51-q" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="20077cd6-6c96-44ed-cf78-9cabef9833b7" # Parameters that the classical NN will feed values into. control_params = sympy.symbols('theta_1 theta_2 theta_3') # Create the parameterized circuit. qubit = cirq.GridQubit(0, 0) model_circuit = cirq.Circuit( cirq.rz(control_params[0])(qubit), cirq.ry(control_params[1])(qubit), cirq.rx(control_params[2])(qubit)) SVGCircuit(model_circuit) # + [markdown] id="wfjSbsvb7g9f" # ### 2.2 The controller # # Now define controller network: # + id="1v4CK2jD6pIj" # The classical neural network layers. controller = tf.keras.Sequential([ tf.keras.layers.Dense(10, activation='elu'), tf.keras.layers.Dense(3) ]) # + [markdown] id="QNimbsAt6pIm" # Given a batch of commands, the controller outputs a batch of control signals for the controlled circuit. # # The controller is randomly initialized so these outputs are not useful, yet. # + id="kZbYRTe16pIm" colab={"base_uri": "https://localhost:8080/"} outputId="bcac6c56-abb2-4c7f-9a0c-93b27fb9ff55" controller(tf.constant([[0.0],[1.0]])).numpy() # + [markdown] id="XizLExg56pIp" # ### 2.3 Connect the controller to the circuit # + [markdown] id="I5Pmy5-V6pIq" # Use `tfq` to connect the controller to the controlled circuit, as a single `keras.Model`. # # See the [Keras Functional API guide](https://www.tensorflow.org/guide/keras/functional) for more about this style of model definition. # # First define the inputs to the model: # + id="UfHF8NNE6pIr" # This input is the simulated miscalibration that the model will learn to correct. circuits_input = tf.keras.Input(shape=(), # The circuit-tensor has dtype `tf.string` dtype=tf.string, name='circuits_input') # Commands will be either `0` or `1`, specifying the state to set the qubit to. commands_input = tf.keras.Input(shape=(1,), dtype=tf.dtypes.float32, name='commands_input') # + [markdown] id="y9xN2mNl6pIu" # Next apply operations to those inputs, to define the computation. # + id="Zvt2YGmZ6pIu" dense_2 = controller(commands_input) # TFQ layer for classically controlled circuits. expectation_layer = tfq.layers.ControlledPQC(model_circuit, # Observe Z operators = cirq.Z(qubit)) expectation = expectation_layer([circuits_input, dense_2]) # + [markdown] id="Ip2jNA9h6pIy" # Now package this computation as a `tf.keras.Model`: # + id="Xs6EMhah6pIz" # The full Keras model is built from our layers. model = tf.keras.Model(inputs=[circuits_input, commands_input], outputs=expectation) # + [markdown] id="w7kgqm3t6pI3" # The network architecture is indicated by the plot of the model below. # Compare this model plot to the architecture diagram to verify correctness. # # Note: May require a system install of the `graphviz` package. # + id="ERXNPe4F6pI4" colab={"base_uri": "https://localhost:8080/", "height": 232} outputId="bb60e4d8-48c0-4253-976d-8326075d6ae9" tf.keras.utils.plot_model(model, show_shapes=True, dpi=70) # + [markdown] id="-Pbemgww6pI7" # This model takes two inputs: The commands for the controller, and the input-circuit whose output the controller is attempting to correct. # + [markdown] id="hpnIBK916pI8" # ### 2.4 The dataset # + [markdown] id="yJSC9qH76pJA" # The model attempts to output the correct correct measurement value of $\hat{Z}$ for each command. The commands and correct values are defined below. # + id="ciMIJAuH6pJA" # The command input values to the classical NN. commands = np.array([[0], [1]], dtype=np.float32) # The desired Z expectation value at output of quantum circuit. expected_outputs = np.array([[1], [-1]], dtype=np.float32) # + [markdown] id="kV1LM_hZ6pJD" # This is not the entire training dataset for this task. # Each datapoint in the dataset also needs an input circuit. # + [markdown] id="bbiVHvSYVW4H" # ### 2.4 Input circuit definition # # The input-circuit below defines the random miscalibration the model will learn to correct. # + id="_VYfzHffWo7n" random_rotations = np.random.uniform(0, 2 * np.pi, 3) noisy_preparation = cirq.Circuit( cirq.rx(random_rotations[0])(qubit), cirq.ry(random_rotations[1])(qubit), cirq.rz(random_rotations[2])(qubit) ) datapoint_circuits = tfq.convert_to_tensor([ noisy_preparation ] * 2) # Make two copied of this circuit # + [markdown] id="FvOkMyKI6pJI" # There are two copies of the circuit, one for each datapoint. # + id="6nk2Yr3e6pJJ" colab={"base_uri": "https://localhost:8080/"} outputId="5ac61df7-5981-46b4-8493-e879556f833f" datapoint_circuits.shape # + [markdown] id="gB--UhZZYgVY" # ### 2.5 Training # + [markdown] id="jATjqUIv6pJM" # With the inputs defined you can test-run the `tfq` model. # + id="Lwphqvs96pJO" colab={"base_uri": "https://localhost:8080/"} outputId="66a9e6b4-f2af-477c-b99d-f575d3ed03f8" model([datapoint_circuits, commands]).numpy() # + [markdown] id="9gyg5qSL6pJR" # Now run a standard training process to adjust these values towards the `expected_outputs`. # + id="dtPYqbNi8zeZ" optimizer = tf.keras.optimizers.Adam(learning_rate=0.05) loss = tf.keras.losses.MeanSquaredError() model.compile(optimizer=optimizer, loss=loss) history = model.fit(x=[datapoint_circuits, commands], y=expected_outputs, epochs=30, verbose=0) # + id="azE-qV0OaC1o" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="e88e4462-eccc-4d2c-f61f-1001f788846c" plt.plot(history.history['loss']) plt.title("Learning to Control a Qubit") plt.xlabel("Iterations") plt.ylabel("Error in Control") plt.show() # + [markdown] id="GTd5DGcRmmOK" # From this plot you can see that the neural network has learned to overcome the systematic miscalibration. # + [markdown] id="C2RfWismj66S" # ### 2.6 Verify outputs # Now use the trained model, to correct the qubit calibration errors. With Cirq: # + id="RoIlb7r7j5SY" colab={"base_uri": "https://localhost:8080/"} outputId="7e75fe1a-dde6-4f68-c618-2158c5d28fe4" def check_error(command_values, desired_values): """Based on the value in `command_value` see how well you could prepare the full circuit to have `desired_value` when taking expectation w.r.t. Z.""" params_to_prepare_output = controller(command_values).numpy() full_circuit = noisy_preparation + model_circuit # Test how well you can prepare a state to get expectation the expectation # value in `desired_values` for index in [0, 1]: state = cirq_simulator.simulate( full_circuit, {s:v for (s,v) in zip(control_params, params_to_prepare_output[index])} ).final_state_vector expt = cirq.Z(qubit).expectation_from_state_vector(state, {qubit: 0}).real print(f'For a desired output (expectation) of {desired_values[index]} with' f' noisy preparation, the controller\nnetwork found the following ' f'values for theta: {params_to_prepare_output[index]}\nWhich gives an' f' actual expectation of: {expt}\n') check_error(commands, expected_outputs) # + [markdown] id="wvW_ZDwmsws6" # The value of the loss function during training provides a rough idea of how well the model is learning. The lower the loss, the closer the expectation values in the above cell is to `desired_values`. If you aren't as concerned with the parameter values, you can always check the outputs from above using `tfq`: # + id="aYskLTacs8Ku" colab={"base_uri": "https://localhost:8080/"} outputId="957f5b96-6693-4db0-fb8f-ab885d5e0d20" model([datapoint_circuits, commands]) # + [markdown] id="jNrW0NXR-lDC" # ## 3 Learning to prepare eigenstates of different operators # # The choice of the $\pm \hat{Z}$ eigenstates corresponding to 1 and 0 was arbitrary. You could have just as easily wanted 1 to correspond to the $+ \hat{Z}$ eigenstate and 0 to correspond to the $-\hat{X}$ eigenstate. One way to accomplish this is by specifying a different measurement operator for each command, as indicated in the figure below: # # <img src="https://github.com/tensorflow/quantum/blob/master/docs/tutorials/images/nn_control2.png?raw=1" width="1000"> # # This requires use of <code>tfq.layers.Expectation</code>. Now your input has grown to include three objects: circuit, command, and operator. The output is still the expectation value. # + [markdown] id="Ci3WMZ9CjEM1" # ### 3.1 New model definition # # Lets take a look at the model to accomplish this task: # + id="hta0G3Nc6pJY" # Define inputs. commands_input = tf.keras.layers.Input(shape=(1), dtype=tf.dtypes.float32, name='commands_input') circuits_input = tf.keras.Input(shape=(), # The circuit-tensor has dtype `tf.string` dtype=tf.dtypes.string, name='circuits_input') operators_input = tf.keras.Input(shape=(1,), dtype=tf.dtypes.string, name='operators_input') # + [markdown] id="dtdnkrZm6pJb" # Here is the controller network: # + id="n_aTG4g3-y0F" # Define classical NN. controller = tf.keras.Sequential([ tf.keras.layers.Dense(10, activation='elu'), tf.keras.layers.Dense(3) ]) # + [markdown] id="q9aN2ciy6pJf" # Combine the circuit and the controller into a single `keras.Model` using `tfq`: # + id="IMHjiKit6pJg" dense_2 = controller(commands_input) # Since you aren't using a PQC or ControlledPQC you must append # your model circuit onto the datapoint circuit tensor manually. full_circuit = tfq.layers.AddCircuit()(circuits_input, append=model_circuit) expectation_output = tfq.layers.Expectation()(full_circuit, symbol_names=control_params, symbol_values=dense_2, operators=operators_input) # Contruct your Keras model. two_axis_control_model = tf.keras.Model( inputs=[circuits_input, commands_input, operators_input], outputs=[expectation_output]) # + [markdown] id="VQTM6CCiD4gU" # ### 3.2 The dataset # # Now you will also include the operators you wish to measure for each datapoint you supply for `model_circuit`: # + id="4gw_L3JG0_G0" # The operators to measure, for each command. operator_data = tfq.convert_to_tensor([[cirq.X(qubit)], [cirq.Z(qubit)]]) # The command input values to the classical NN. commands = np.array([[0], [1]], dtype=np.float32) # The desired expectation value at output of quantum circuit. expected_outputs = np.array([[1], [-1]], dtype=np.float32) # + [markdown] id="ALCKSvwh0_G2" # ### 3.3 Training # # Now that you have your new inputs and outputs you can train once again using keras. # + id="nFuGA73MAA4p" colab={"base_uri": "https://localhost:8080/"} outputId="7de53d9b-130e-4dea-df57-59c1d4aba063" optimizer = tf.keras.optimizers.Adam(learning_rate=0.05) loss = tf.keras.losses.MeanSquaredError() two_axis_control_model.compile(optimizer=optimizer, loss=loss) history = two_axis_control_model.fit( x=[datapoint_circuits, commands, operator_data], y=expected_outputs, epochs=30, verbose=1) # + id="Cf_G-GdturLL" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="995578d5-2d90-40be-b2c8-d22886ea775e" plt.plot(history.history['loss']) plt.title("Learning to Control a Qubit") plt.xlabel("Iterations") plt.ylabel("Error in Control") plt.show() # + [markdown] id="sdCPDH9NlJBl" # The loss function has dropped to zero. # + [markdown] id="NzY8eSVm6pJs" # The `controller` is available as a stand-alone model. Call the controller, and check its response to each command signal. It would take some work to correctly compare these outputs to the contents of `random_rotations`. # + id="uXmH0TQ76pJt" colab={"base_uri": "https://localhost:8080/"} outputId="cb935dd1-8a59-4706-a4b6-441ca620948f" controller.predict(np.array([0,1])) # + [markdown] id="n2WtXnsxubD2" # Success: See if you can adapt the `check_error` function from your first model to work with this new model architecture.
Copy_of_hello_many_worlds.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import torch import torch.nn as nn from torchvision import models from torchvision.models import resnet50 import torch.nn.functional as F from torchvision import transforms from PIL import Image class Flatten(nn.Module): def forward(self, x): return x.view(x.size(0), -1) class FaceNetModel(nn.Module): def __init__(self, pretrained=False): super(FaceNetModel, self).__init__() self.model = resnet50(pretrained) embedding_size = 128 num_classes = 500 self.cnn = nn.Sequential( self.model.conv1, self.model.bn1, self.model.relu, self.model.maxpool, self.model.layer1, self.model.layer2, self.model.layer3, self.model.layer4) # modify fc layer based on https://arxiv.org/abs/1703.07737 self.model.fc = nn.Sequential( Flatten(), # nn.Linear(100352, 1024), # nn.BatchNorm1d(1024), # nn.ReLU(), nn.Linear(100352, embedding_size)) self.model.classifier = nn.Linear(embedding_size, num_classes) def l2_norm(self, input): input_size = input.size() buffer = torch.pow(input, 2) normp = torch.sum(buffer, 1).add_(1e-10) norm = torch.sqrt(normp) _output = torch.div(input, norm.view(-1, 1).expand_as(input)) output = _output.view(input_size) return output def freeze_all(self): for param in self.model.parameters(): param.requires_grad = False def unfreeze_all(self): for param in self.model.parameters(): param.requires_grad = True def freeze_fc(self): for param in self.model.fc.parameters(): param.requires_grad = False def unfreeze_fc(self): for param in self.model.fc.parameters(): param.requires_grad = True def freeze_only(self, freeze): for name, child in self.model.named_children(): if name in freeze: for param in child.parameters(): param.requires_grad = False else: for param in child.parameters(): param.requires_grad = True def unfreeze_only(self, unfreeze): for name, child in self.model.named_children(): if name in unfreeze: for param in child.parameters(): param.requires_grad = True else: for param in child.parameters(): param.requires_grad = False # returns face embedding(embedding_size) def forward(self, x): x = self.cnn(x) x = self.model.fc(x) features = self.l2_norm(x) # Multiply by alpha = 10 as suggested in https://arxiv.org/pdf/1703.09507.pdf alpha = 10 features = features * alpha return features def forward_classifier(self, x): features = self.forward(x) res = self.model.classifier(features) return res model = FaceNetModel() path='../code/log/best_state_2.pth' device = torch.device("cuda:0") model = FaceNetModel() model.to(device) model.eval() state = torch.load(path, map_location=lambda storage, loc: storage) model.load_state_dict(state["state_dict"]) # + trfrm = transforms.Compose([ lambda x: x.convert('RGB'), transforms.Resize(224), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) topil = transforms.ToPILImage() totensor = transforms.Compose(trfrm.transforms[:-1]) def get_distance(img1, img2,model): model.eval() with torch.no_grad(): x1 = trfrm(img1).unsqueeze(0) x2 = trfrm(img2).unsqueeze(0) x1,x2 = x1.to('cuda:0'), x2.to('cuda:0') embed1 = model(x1) embed2 = model(x2) return F.pairwise_distance(embed1, embed2) # - img1 = 'C:/Users/Gemmechu/Documents/files/UMich/DogOwner/code/datasets/dogOwner/7/1.jpg' img2 = 'C:/Users/Gemmechu/Documents/files/UMich/DogOwner/code/datasets/dogOwner/7/2.jpg' img3 = 'C:/Users/Gemmechu/Pictures/dog_and_owner/Newfolder/sarah.png' img4 = 'C:/Users/Gemmechu/Pictures/dog_and_owner/Newfolder/sarahd.jpg' imgA = Image.open(img3).convert('RGB') imgB = Image.open(img4).convert('RGB') dist = get_distance(imgA, imgB,model) dist
dogOwner.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Text Summarization - Support Both English & Chinese Inputs # ### www.KudosData.com # #### By: <NAME> # #### March, 2017 # # <NAME>: pending to install nlkt stanford_segmenter # + # import nltk # - # http://www.nltk.org/data.html from nltk.tokenize.stanford_segmenter import StanfordSegmenter segmenter = StanfordSegmenter(path_to_jar='stanford-segmenter-3.4.1.jar', path_to_sihan_corpora_dict='./data', path_to_model='./data/pku.gz', path_to_dict='./data/dict-chris6.ser.gz') sentence = u'这是斯坦福中文分词器测试' segmenter.segment(sentence) # u'\u8fd9 \u662f \u65af\u5766\u798f \u4e2d\u6587 \u5206\u8bcd\u5668 \u6d4b\u8bd5\n' segmenter.segment_file('input_stanford.txt') # u'\u9762\u5bf9 \u65b0 \u4e16\u7eaa \uff0c \u4e16\u754c \u5404\u56fd . outfile = open('output_stanford.txt', 'w') result = segmenter.segment(sentence) outfile.write(result.encode('UTF-8')) outfile.close()
topic_summary/on-going-experiment/topic_summary_nltk_StanfordSegmenter_exp_v001.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.9 64-bit # language: python # name: python3 # --- # # World of Warcraft Log Toy # # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mjsmagalhaes/examples-datascience/blob/main/data_wow_log.ipynb) # # This notebook toys with data generated from World of Warcraft (WoW) logs. # # 4 helper classes were created (in the wow folder): # - **wow.log.Record** # - *Parses and stores an entry of the log* # - Each line may have differente structure so more classes deriving from this one may be necessary. # - Each record also represent an in game event. # - **wow.log.Encounter** # - *A set of records that represents a fight agains a boss in the game* # - Each starts with a ENCOUNTER_START event and go until an ENCOUNTER_END event is found. # - **wow.query.Query** # - *An object to help create a chain of iterators to build queries (using filters and map like functions)* # - **wow.query.Predicate** # - *A Collection of functions that will be applied to those iterators* # # There are also another 2 modules in addition to both modules containing classes above: # - **ui** # - contain ui elements in this notebook # # - **fights** # - contains scripts to analyze each fight (only nerzhul for now) # # + # %load_ext autoreload # %autoreload 2 # Colab # # !git clone https://github.com/mjsmagalhaes/examples-datascience.git repo # # %cd repo # # %pip install -r requirements.txt # + # Initialize import pandas as pd import wow.ui as ui import wow.helper as help # import wow.fights from wow.query import Predicate from wow.log import Log # (z, f) = ui.import_file() # - # # Create Data Structures # + # help.unzip('WoWCombatLog-012722_214646.zip', 'wow') # - # file = r'wow\WoWCombatLog-012722_214646.txt' # file = r'C:\Program Files (x86)\World of Warcraft\_retail_\Logs\RaiderIOLogsArchive\WoWCombatLog-012722_214646.txt' file = r'C:\Program Files (x86)\World of Warcraft\_retail_\Logs\WoWCombatLog-030322_214650.txt' log = Log.parse(file) # # Detailed Analysis # Define Encounter being Analysed encSelect = ui.pick_encounter(log.encounters) # + e = log.encounters[encSelect.value] r = e.getReport() # e.timestamp_begin.strftime('encounters_%Y_%m_%d') # log.save_encounters() # - # ## Who was in the fight? r.listPlayers(); # + (spDmg, mDmg) = r.getDamage() dmg = pd.merge( spDmg, mDmg.drop(['Name'], axis='columns'), how='outer', on='Unit ID' ) dmg['Player ID'] = dmg['Player ID'].fillna(dmg['Unit ID']) dmg = dmg.fillna(0) t = dmg.join(pd.DataFrame( dmg['Total (Spell)'] + dmg['Total (Melee)'], columns=['Total']) ).groupby( ['Player ID', 'Name'] ).sum() # - e.getReport().showMeleeDamage() e.q.filter( Predicate.all([ Predicate.any([Predicate.isPlayerAction(), Predicate.isPetAction()]), Predicate.isTargetHostile(), Predicate.isEventIn([ 'SPELL_DAMAGE', 'SPELL_PERIODIC_DAMAGE', 'RANGE_DAMAGE' ]), ]) ).map(( Predicate.getActorId(), Predicate.getActor(), lambda x: int(x[29]), # Predicate.getData(), )).list()
dsexamples/data_wow_log.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Define the Convolutional Neural Network # # After you've looked at the data you're working with and, in this case, know the shapes of the images and of the keypoints, you are ready to define a convolutional neural network that can *learn* from this data. # # In this notebook and in `models.py`, you will: # 1. Define a CNN with images as input and keypoints as output # 2. Construct the transformed FaceKeypointsDataset, just as before # 3. Train the CNN on the training data, tracking loss # 4. See how the trained model performs on test data # 5. If necessary, modify the CNN structure and model hyperparameters, so that it performs *well* **\*** # # **\*** What does *well* mean? # # "Well" means that the model's loss decreases during training **and**, when applied to test image data, the model produces keypoints that closely match the true keypoints of each face. And you'll see examples of this later in the notebook. # # --- # # ## CNN Architecture # # Recall that CNN's are defined by a few types of layers: # * Convolutional layers # * Maxpooling layers # * Fully-connected layers # # You are required to use the above layers and encouraged to add multiple convolutional layers and things like dropout layers that may prevent overfitting. You are also encouraged to look at literature on keypoint detection, such as [this paper](https://arxiv.org/pdf/1710.00977.pdf), to help you determine the structure of your network. # # # ### TODO: Define your model in the provided file `models.py` file # # This file is mostly empty but contains the expected name and some TODO's for creating your model. # # --- # ## PyTorch Neural Nets # # To define a neural network in PyTorch, you define the layers of a model in the function `__init__` and define the feedforward behavior of a network that employs those initialized layers in the function `forward`, which takes in an input image tensor, `x`. The structure of this Net class is shown below and left for you to fill in. # # Note: During training, PyTorch will be able to perform backpropagation by keeping track of the network's feedforward behavior and using autograd to calculate the update to the weights in the network. # # #### Define the Layers in ` __init__` # As a reminder, a conv/pool layer may be defined like this (in `__init__`): # ``` # # 1 input image channel (for grayscale images), 32 output channels/feature maps, 3x3 square convolution kernel # self.conv1 = nn.Conv2d(1, 32, 3) # # # maxpool that uses a square window of kernel_size=2, stride=2 # self.pool = nn.MaxPool2d(2, 2) # ``` # # #### Refer to Layers in `forward` # Then referred to in the `forward` function like this, in which the conv1 layer has a ReLu activation applied to it before maxpooling is applied: # ``` # x = self.pool(F.relu(self.conv1(x))) # ``` # # Best practice is to place any layers whose weights will change during the training process in `__init__` and refer to them in the `forward` function; any layers or functions that always behave in the same way, such as a pre-defined activation function, should appear *only* in the `forward` function. # #### Why models.py # # You are tasked with defining the network in the `models.py` file so that any models you define can be saved and loaded by name in different notebooks in this project directory. For example, by defining a CNN class called `Net` in `models.py`, you can then create that same architecture in this and other notebooks by simply importing the class and instantiating a model: # ``` # from models import Net # net = Net() # ``` # + # import the usual resources import matplotlib.pyplot as plt import numpy as np # watch for any changes in model.py, if it changes, re-load it automatically # %load_ext autoreload # %autoreload 2 # + ## TODO: Define the Net in models.py import torch import torch.nn as nn import torch.nn.functional as F ## TODO: Once you've define the network, you can instantiate it # one example conv layer has been provided for you from models import Net net = Net() print(net) # - # ## Transform the dataset # # To prepare for training, create a transformed dataset of images and keypoints. # # ### TODO: Define a data transform # # In PyTorch, a convolutional neural network expects a torch image of a consistent size as input. For efficient training, and so your model's loss does not blow up during training, it is also suggested that you normalize the input images and keypoints. The necessary transforms have been defined in `data_load.py` and you **do not** need to modify these; take a look at this file (you'll see the same transforms that were defined and applied in Notebook 1). # # To define the data transform below, use a [composition](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html#compose-transforms) of: # 1. Rescaling and/or cropping the data, such that you are left with a square image (the suggested size is 224x224px) # 2. Normalizing the images and keypoints; turning each RGB image into a grayscale image with a color range of [0, 1] and transforming the given keypoints into a range of [-1, 1] # 3. Turning these images and keypoints into Tensors # # These transformations have been defined in `data_load.py`, but it's up to you to call them and create a `data_transform` below. **This transform will be applied to the training data and, later, the test data**. It will change how you go about displaying these images and keypoints, but these steps are essential for efficient training. # # As a note, should you want to perform data augmentation (which is optional in this project), and randomly rotate or shift these images, a square image size will be useful; rotating a 224x224 image by 90 degrees will result in the same shape of output. # + from torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils # the dataset we created in Notebook 1 is copied in the helper file `data_load.py` from data_load import FacialKeypointsDataset # the transforms we defined in Notebook 1 are in the helper file `data_load.py` from data_load import Rescale, RandomCrop, Normalize, ToTensor ## TODO: define the data_transform using transforms.Compose([all tx's, . , .]) # order matters! i.e. rescaling should come before a smaller crop data_transform = transforms.Compose([Rescale((224, 224)), Normalize(), ToTensor()]) # testing that you've defined a transform assert(data_transform is not None), 'Define a data_transform' # + # create the transformed dataset transformed_dataset = FacialKeypointsDataset(csv_file='data/training_frames_keypoints.csv', root_dir='data/training/', transform=data_transform) print('Number of images: ', len(transformed_dataset)) # iterate through the transformed dataset and print some stats about the first few samples for i in range(4): sample = transformed_dataset[i] print(i, sample['image'].size(), sample['keypoints'].size()) # - # ## Batching and loading data # # Next, having defined the transformed dataset, we can use PyTorch's DataLoader class to load the training data in batches of whatever size as well as to shuffle the data for training the model. You can read more about the parameters of the DataLoader, in [this documentation](http://pytorch.org/docs/master/data.html). # # #### Batch size # Decide on a good batch size for training your model. Try both small and large batch sizes and note how the loss decreases as the model trains. # # **Note for Windows users**: Please change the `num_workers` to 0 or you may face some issues with your DataLoader failing. # + # load training data in batches batch_size = 12 train_loader = DataLoader(transformed_dataset, batch_size=batch_size, shuffle=True, num_workers=4) # - # ## Before training # # Take a look at how this model performs before it trains. You should see that the keypoints it predicts start off in one spot and don't match the keypoints on a face at all! It's interesting to visualize this behavior so that you can compare it to the model after training and see how the model has improved. # # #### Load in the test dataset # # The test dataset is one that this model has *not* seen before, meaning it has not trained with these images. We'll load in this test data and before and after training, see how your model performs on this set! # # To visualize this test data, we have to go through some un-transformation steps to turn our images into python images from tensors and to turn our keypoints back into a recognizable range. # + # load in the test data, using the dataset class # AND apply the data_transform you defined above # create the test dataset test_dataset = FacialKeypointsDataset(csv_file='data/test_frames_keypoints.csv', root_dir='data/test/', transform=data_transform) print ("test dataset", len(test_dataset)) # + # load test data in batches batch_size = 10 test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True, num_workers=4) # - # ## Apply the model on a test sample # # To test the model on a test sample of data, you have to follow these steps: # 1. Extract the image and ground truth keypoints from a sample # 2. Make sure the image is a FloatTensor, which the model expects. # 3. Forward pass the image through the net to get the predicted, output keypoints. # # This function test how the network performs on the first batch of test data. It returns the images, the transformed images, the predicted keypoints (produced by the model), and the ground truth keypoints. # + # test the model on a batch of test images def net_sample_output(): # iterate through the test dataset for i, sample in enumerate(test_loader): # get sample data: images and ground truth keypoints images = sample['image'] key_pts = sample['keypoints'] # convert images to FloatTensors images = images.type(torch.FloatTensor) # forward pass to get net output output_pts = net(images) # reshape to batch_size x 68 x 2 pts output_pts = output_pts.view(output_pts.size()[0], 68, -1) # break after first image is tested if i == 0: return images, output_pts, key_pts # - # #### Debugging tips # # If you get a size or dimension error here, make sure that your network outputs the expected number of keypoints! Or if you get a Tensor type error, look into changing the above code that casts the data into float types: `images = images.type(torch.FloatTensor)`. # + # call the above function # returns: test images, test predicted keypoints, test ground truth keypoints test_images, test_outputs, gt_pts = net_sample_output() # print out the dimensions of the data to see if they make sense print(test_images.data.size()) print(test_outputs.data.size()) print(gt_pts.size()) # - # ## Visualize the predicted keypoints # # Once we've had the model produce some predicted output keypoints, we can visualize these points in a way that's similar to how we've displayed this data before, only this time, we have to "un-transform" the image/keypoint data to display it. # # Note that I've defined a *new* function, `show_all_keypoints` that displays a grayscale image, its predicted keypoints and its ground truth keypoints (if provided). def show_all_keypoints(image, predicted_key_pts, gt_pts=None): """Show image with predicted keypoints""" # image is grayscale plt.imshow(image, cmap='gray') plt.scatter(predicted_key_pts[:, 0], predicted_key_pts[:, 1], s=20, marker='.', c='m') # plot ground truth points as green pts if gt_pts is not None: plt.scatter(gt_pts[:, 0], gt_pts[:, 1], s=20, marker='.', c='g') # #### Un-transformation # # Next, you'll see a helper function. `visualize_output` that takes in a batch of images, predicted keypoints, and ground truth keypoints and displays a set of those images and their true/predicted keypoints. # # This function's main role is to take batches of image and keypoint data (the input and output of your CNN), and transform them into numpy images and un-normalized keypoints (x, y) for normal display. The un-transformation process turns keypoints and images into numpy arrays from Tensors *and* it undoes the keypoint normalization done in the Normalize() transform; it's assumed that you applied these transformations when you loaded your test data. # + # visualize the output # by default this shows a batch of 10 images def visualize_output(test_images, test_outputs, gt_pts=None, batch_size=10): for i in range(batch_size): plt.figure(figsize=(20,10)) ax = plt.subplot(1, batch_size, i+1) # un-transform the image data image = test_images[i].data # get the image from it's wrapper image = image.numpy() # convert to numpy array from a Tensor image = np.transpose(image, (1, 2, 0)) # transpose to go from torch to numpy image # un-transform the predicted key_pts data predicted_key_pts = test_outputs[i].data predicted_key_pts = predicted_key_pts.numpy() # undo normalization of keypoints predicted_key_pts = predicted_key_pts*50.0+100 # plot ground truth points for comparison, if they exist ground_truth_pts = None if gt_pts is not None: ground_truth_pts = gt_pts[i] ground_truth_pts = ground_truth_pts*50.0+100 # call show_all_keypoints show_all_keypoints(np.squeeze(image), predicted_key_pts, ground_truth_pts) plt.axis('off') plt.show() # call it visualize_output(test_images, test_outputs, gt_pts) # - # ## Training # # #### Loss function # Training a network to predict keypoints is different than training a network to predict a class; instead of outputting a distribution of classes and using cross entropy loss, you may want to choose a loss function that is suited for regression, which directly compares a predicted value and target value. Read about the various kinds of loss functions (like MSE or L1/SmoothL1 loss) in [this documentation](http://pytorch.org/docs/master/_modules/torch/nn/modules/loss.html). # # ### TODO: Define the loss and optimization # # Next, you'll define how the model will train by deciding on the loss function and optimizer. # # --- # + ## TODO: Define the loss and optimization import torch.optim as optim criterion = torch.nn.SmoothL1Loss() optimizer = optim.Adam(net.parameters(), lr = 0.001) # - # ## Training and Initial Observation # # Now, you'll train on your batched training data from `train_loader` for a number of epochs. # # To quickly observe how your model is training and decide on whether or not you should modify it's structure or hyperparameters, you're encouraged to start off with just one or two epochs at first. As you train, note how your the model's loss behaves over time: does it decrease quickly at first and then slow down? Does it take a while to decrease in the first place? What happens if you change the batch size of your training data or modify your loss function? etc. # # Use these initial observations to make changes to your model and decide on the best architecture before you train for many epochs and create a final model. def train_net(n_epochs): # prepare the net for training net.train() for epoch in range(n_epochs): # loop over the dataset multiple times running_loss = 0.0 # train on batches of data, assumes you already have train_loader for batch_i, data in enumerate(train_loader): # get the input images and their corresponding labels images = data['image'] key_pts = data['keypoints'] # flatten pts key_pts = key_pts.view(key_pts.size(0), -1) # convert variables to floats for regression loss key_pts = key_pts.type(torch.FloatTensor) images = images.type(torch.FloatTensor) # forward pass to get outputs output_pts = net(images) # calculate the loss between predicted and target keypoints loss = criterion(output_pts, key_pts) # zero the parameter (weight) gradients optimizer.zero_grad() # backward pass to calculate the weight gradients loss.backward() # update the weights optimizer.step() # print loss statistics # to convert loss into a scalar and add it to the running_loss, use .item() running_loss += loss.item() if batch_i % 10 == 9: # print every 10 batches print('Epoch: {}, Batch: {}, Avg. Loss: {}'.format(epoch + 1, batch_i+1, running_loss/10)) running_loss = 0.0 print('Finished Training') # + # train your network n_epochs = 2 # start small, and increase when you've decided on your model structure and hyperparams # sum6 = 3+3+4 + 2 train_net(n_epochs) # - # ## Test data # # See how your model performs on previously unseen, test data. We've already loaded and transformed this data, similar to the training data. Next, run your trained model on these images to see what kind of keypoints are produced. You should be able to see if your model is fitting each new face it sees, if the points are distributed randomly, or if the points have actually overfitted the training data and do not generalize. # + # get a sample of test data again test_images, test_outputs, gt_pts = net_sample_output() print(test_images.data.size()) print(test_outputs.data.size()) print(gt_pts.size()) # key_pts = key_pts.type(torch.FloatTensor) loss = criterion(test_outputs.type(torch.FloatTensor), gt_pts.type(torch.FloatTensor)) # backward pass to calculate the weight gradients loss.backward() print ("Loss ", loss.item() / gt_pts.size(0)) # + ## TODO: visualize your test output # you can use the same function as before, by un-commenting the line below: visualize_output(test_images, test_outputs, gt_pts) # - # Once you've found a good model (or two), save your model so you can load it and use it later! # + ## TODO: change the name to something uniqe for each new model model_dir = 'saved_models/' model_name = 'keypoints_model_6_1.pt' # after training, save your model parameters in the dir 'saved_models' torch.save(net.state_dict(), model_dir+model_name) # - # After you've trained a well-performing model, answer the following questions so that we have some insight into your training and architecture selection process. Answering all questions is required to pass this project. # ### Question 1: What optimization and loss functions did you choose and why? # # **Answer**: As an optimization fucntion I chose Adam because it usually converges faster and SmoothL1Loss as a loss function because it seemed to give slightly better results. # ### Question 2: What kind of network architecture did you start with and how did it change as you tried different architectures? Did you decide to add more convolutional layers or any layers to avoid overfitting the data? # **Answer**: I tried approximately 10 different model architectures starting from very simple where there are 1 conv layer, 1 max pooling and 1 fully connected layer and then added more layers, introduced batch norm, dropout and the final model contained 3 conv layers, 3 max poolings and 2 fully connected layers. # # To reduce overfitting I added dropout after each conv layer and batch norm between fc1 and fc2. I also altogether added 3 conv layers. # ### Question 3: How did you decide on the number of epochs and batch_size to train your model? # **Answer**: First, I chose 2 epochs and checked if the loss was decreasing. If it did in decrease, then performed 3 more epochs. # If the loss did not change during last 2 epochs, then decreased learning rate and tried 2 more epochs. If the loss didn't decrease, then stopped training. # I decided on batch_size=16 because I could not fit larger in memory. # ## Feature Visualization # # Sometimes, neural networks are thought of as a black box, given some input, they learn to produce some output. CNN's are actually learning to recognize a variety of spatial patterns and you can visualize what each convolutional layer has been trained to recognize by looking at the weights that make up each convolutional kernel and applying those one at a time to a sample image. This technique is called feature visualization and it's useful for understanding the inner workings of a CNN. # In the cell below, you can see how to extract a single filter (by index) from your first convolutional layer. The filter should appear as a grayscale grid. # + # Get the weights in the first conv layer, "conv1" # if necessary, change this to reflect the name of your first conv layer weights1 = net.conv6_3.weight.data w = weights1.numpy() filter_index = 0 print(w[filter_index][0]) print(w[filter_index][0].shape) # display the filter weights plt.imshow(w[filter_index][0], cmap='gray') # - # ## Feature maps # # Each CNN has at least one convolutional layer that is composed of stacked filters (also known as convolutional kernels). As a CNN trains, it learns what weights to include in it's convolutional kernels and when these kernels are applied to some input image, they produce a set of **feature maps**. So, feature maps are just sets of filtered images; they are the images produced by applying a convolutional kernel to an input image. These maps show us the features that the different layers of the neural network learn to extract. For example, you might imagine a convolutional kernel that detects the vertical edges of a face or another one that detects the corners of eyes. You can see what kind of features each of these kernels detects by applying them to an image. One such example is shown below; from the way it brings out the lines in an the image, you might characterize this as an edge detection filter. # # <img src='images/feature_map_ex.png' width=50% height=50%/> # # # Next, choose a test image and filter it with one of the convolutional kernels in your trained CNN; look at the filtered output to get an idea what that particular kernel detects. # # ### TODO: Filter an image to see the effect of a convolutional kernel # --- # + #TODO: load in and display any image from the transformed test dataset test_obj = next(iter(test_loader)) test_img = test_obj["image"].data.numpy()[0][0] fig=plt.figure(figsize=(5, 5)) plt.imshow(test_img, cmap='gray') ## TODO: Using cv's filter2D function, ## apply a specific set of filter weights (like the one displayed above) to the test image import cv2 weights = net.conv6_1.weight.data w = weights.numpy() c = cv2.filter2D(test_img, -1, w[0][0]) fig=plt.figure(figsize=(5, 5)) plt.imshow(c, cmap='gray') # - # ### Question 4: Choose one filter from your trained CNN and apply it to a test image; what purpose do you think it plays? What kind of feature do you think it detects? # # **Answer**: It seems it has learned to blur out the noise. # --- # ## Moving on! # # Now that you've defined and trained your model (and saved the best model), you are ready to move on to the last notebook, which combines a face detector with your saved model to create a facial keypoint detection system that can predict the keypoints on *any* face in an image!
2. Define the Network Architecture.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Guinea-Bissau # # * Homepage of project: https://oscovida.github.io # * Plots are explained at http://oscovida.github.io/plots.html # * [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Guinea-Bissau.ipynb) # + import datetime import time start = datetime.datetime.now() print(f"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}") # - # %config InlineBackend.figure_formats = ['svg'] from oscovida import * overview("Guinea-Bissau", weeks=5); overview("Guinea-Bissau"); compare_plot("Guinea-Bissau", normalise=True); # + # load the data cases, deaths = get_country_data("Guinea-Bissau") # get population of the region for future normalisation: inhabitants = population("Guinea-Bissau") print(f'Population of "Guinea-Bissau": {inhabitants} people') # compose into one table table = compose_dataframe_summary(cases, deaths) # show tables with up to 1000 rows pd.set_option("max_rows", 1000) # display the table table # - # # Explore the data in your web browser # # - If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Guinea-Bissau.ipynb) # - and wait (~1 to 2 minutes) # - Then press SHIFT+RETURN to advance code cell to code cell # - See http://jupyter.org for more details on how to use Jupyter Notebook # # Acknowledgements: # # - Johns Hopkins University provides data for countries # - <NAME> Institute provides data for within Germany # - Atlo Team for gathering and providing data from Hungary (https://atlo.team/koronamonitor/) # - Open source and scientific computing community for the data tools # - Github for hosting repository and html files # - Project Jupyter for the Notebook and binder service # - The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/)) # # -------------------- print(f"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and " f"deaths at {fetch_deaths_last_execution()}.") # + # to force a fresh download of data, run "clear_cache()" # - print(f"Notebook execution took: {datetime.datetime.now()-start}")
ipynb/Guinea-Bissau.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python36 # --- # Copyright (c) Microsoft Corporation. All rights reserved. # # Licensed under the MIT License. # ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.png) # + [markdown] nbpresent={"id": "bf74d2e9-2708-49b1-934b-e0ede342f475"} # # Training, hyperparameter tune, and deploy with Keras # # ## Introduction # This tutorial shows how to train a simple deep neural network using the MNIST dataset and Keras on Azure Machine Learning. MNIST is a popular dataset consisting of 70,000 grayscale images. Each image is a handwritten digit of `28x28` pixels, representing number from 0 to 9. The goal is to create a multi-class classifier to identify the digit each image represents, and deploy it as a web service in Azure. # # For more information about the MNIST dataset, please visit [Yan LeCun's website](http://yann.lecun.com/exdb/mnist/). # # ## Prerequisite: # * Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning # * If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) to: # * install the AML SDK # * create a workspace and its configuration file (`config.json`) # * For local scoring test, you will also need to have `tensorflow` and `keras` installed in the current Jupyter kernel. # - # Let's get started. First let's import some Python libraries. # + nbpresent={"id": "c377ea0c-0cd9-4345-9be2-e20fb29c94c3"} # %matplotlib inline import numpy as np import os import matplotlib.pyplot as plt # + nbpresent={"id": "edaa7f2f-2439-4148-b57a-8c794c0945ec"} import azureml from azureml.core import Workspace # check core SDK version number print("Azure ML SDK Version: ", azureml.core.VERSION) # - # ## Initialize workspace # Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#workspace) object from the existing workspace you created in the Prerequisites step. `Workspace.from_config()` creates a workspace object from the details stored in `config.json`. ws = Workspace.from_config() print('Workspace name: ' + ws.name, 'Azure region: ' + ws.location, 'Subscription id: ' + ws.subscription_id, 'Resource group: ' + ws.resource_group, sep='\n') # + [markdown] nbpresent={"id": "59f52294-4a25-4c92-bab8-3b07f0f44d15"} # ## Create an Azure ML experiment # Let's create an experiment named "keras-mnist" and a folder to hold the training scripts. The script runs will be recorded under the experiment in Azure. # + nbpresent={"id": "bc70f780-c240-4779-96f3-bc5ef9a37d59"} from azureml.core import Experiment script_folder = './keras-mnist' os.makedirs(script_folder, exist_ok=True) exp = Experiment(workspace=ws, name='keras-mnist') # - # ## Explore data # # Before you train a model, you need to understand the data that you are using to train it. In this section you learn how to: # # * Download the MNIST dataset # * Display some sample images # # ### Download the MNIST dataset # # Download the MNIST dataset and save the files into a `data` directory locally. Images and labels for both training and testing are downloaded. # + import urllib.request data_folder = os.path.join(os.getcwd(), 'data') os.makedirs(data_folder, exist_ok=True) urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', filename=os.path.join(data_folder, 'train-images.gz')) urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz', filename=os.path.join(data_folder, 'train-labels.gz')) urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', filename=os.path.join(data_folder, 'test-images.gz')) urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', filename=os.path.join(data_folder, 'test-labels.gz')) # - # ### Display some sample images # # Load the compressed files into `numpy` arrays. Then use `matplotlib` to plot 30 random images from the dataset with their labels above them. Note this step requires a `load_data` function that's included in an `utils.py` file. This file is included in the sample folder. Please make sure it is placed in the same folder as this notebook. The `load_data` function simply parses the compressed files into numpy arrays. # + # make sure utils.py is in the same directory as this code from utils import load_data, one_hot_encode # note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the model converge faster. X_train = load_data(os.path.join(data_folder, 'train-images.gz'), False) / 255.0 X_test = load_data(os.path.join(data_folder, 'test-images.gz'), False) / 255.0 y_train = load_data(os.path.join(data_folder, 'train-labels.gz'), True).reshape(-1) y_test = load_data(os.path.join(data_folder, 'test-labels.gz'), True).reshape(-1) # now let's show some randomly chosen images from the training set. count = 0 sample_size = 30 plt.figure(figsize = (16, 6)) for i in np.random.permutation(X_train.shape[0])[:sample_size]: count = count + 1 plt.subplot(1, sample_size, count) plt.axhline('') plt.axvline('') plt.text(x=10, y=-10, s=y_train[i], fontsize=18) plt.imshow(X_train[i].reshape(28, 28), cmap=plt.cm.Greys) plt.show() # - # Now you have an idea of what these images look like and the expected prediction outcome. # + [markdown] nbpresent={"id": "defe921f-8097-44c3-8336-8af6700804a7"} # ## Create a FileDataset # A FileDataset references one or multiple files in your datastores or public urls. The files can be of any format. FileDataset provides you with the ability to download or mount the files to your compute. By creating a dataset, you create a reference to the data source location. If you applied any subsetting transformations to the dataset, they will be stored in the dataset as well. The data remains in its existing location, so no extra storage cost is incurred. [Learn More](https://aka.ms/azureml/howto/createdatasets) # + from azureml.core.dataset import Dataset web_paths = [ 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', 'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz', 'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', 'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz' ] dataset = Dataset.File.from_files(path = web_paths) # - # Use the `register()` method to register datasets to your workspace so they can be shared with others, reused across various experiments, and referred to by name in your training script. dataset = dataset.register(workspace = ws, name = 'mnist dataset', description='training and test dataset', create_new_version=True) # ## Create or Attach existing AmlCompute # You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource. # If we could not find the cluster with the given name, then we will create a new cluster here. We will create an `AmlCompute` cluster of `STANDARD_NC6` GPU VMs. This process is broken down into 3 steps: # 1. create the configuration (this step is local and only takes a second) # 2. create the cluster (this step will take about **20 seconds**) # 3. provision the VMs to bring the cluster to the initial size (of 1 in this case). This step will take about **3-5 minutes** and is providing only sparse output in the process. Please make sure to wait until the call returns before moving to the next cell # + from azureml.core.compute import ComputeTarget, AmlCompute from azureml.core.compute_target import ComputeTargetException # choose a name for your cluster cluster_name = "gpu-cluster" try: compute_target = ComputeTarget(workspace=ws, name=cluster_name) print('Found existing compute target') except ComputeTargetException: print('Creating a new compute target...') compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6', max_nodes=4) # create the cluster compute_target = ComputeTarget.create(ws, cluster_name, compute_config) # can poll for a minimum number of nodes and for a specific timeout. # if no min node count is provided it uses the scale settings for the cluster compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20) # use get_status() to get a detailed status for the current cluster. print(compute_target.get_status().serialize()) # - # Now that you have created the compute target, let's see what the workspace's `compute_targets` property returns. You should now see one entry named "gpu-cluster" of type `AmlCompute`. compute_targets = ws.compute_targets for name, ct in compute_targets.items(): print(name, ct.type, ct.provisioning_state) # ## Copy the training files into the script folder # The Keras training script is already created for you. You can simply copy it into the script folder, together with the utility library used to load compressed data file into numpy array. # + import shutil # the training logic is in the keras_mnist.py file. shutil.copy('./keras_mnist.py', script_folder) # the utils.py just helps loading data from the downloaded MNIST dataset into numpy arrays. shutil.copy('./utils.py', script_folder) # + [markdown] nbpresent={"id": "2039d2d5-aca6-4f25-a12f-df9ae6529cae"} # ## Construct neural network in Keras # In the training script `keras_mnist.py`, it creates a very simple DNN (deep neural network), with just 2 hidden layers. The input layer has 28 * 28 = 784 neurons, each representing a pixel in an image. The first hidden layer has 300 neurons, and the second hidden layer has 100 neurons. The output layer has 10 neurons, each representing a targeted label from 0 to 9. # # ![DNN](nn.png) # - # ### Azure ML concepts # Please note the following three things in the code below: # 1. The script accepts arguments using the argparse package. In this case there is one argument `--data_folder` which specifies the FileDataset in which the script can find the MNIST data # ``` # parser = argparse.ArgumentParser() # parser.add_argument('--data_folder') # ``` # 2. The script is accessing the Azure ML `Run` object by executing `run = Run.get_context()`. Further down the script is using the `run` to report the loss and accuracy at the end of each epoch via callback. # ``` # run.log('Loss', log['loss']) # run.log('Accuracy', log['acc']) # ``` # 3. When running the script on Azure ML, you can write files out to a folder `./outputs` that is relative to the root directory. This folder is specially tracked by Azure ML in the sense that any files written to that folder during script execution on the remote target will be picked up by Run History; these files (known as artifacts) will be available as part of the run history record. # The next cell will print out the training code for you to inspect. with open(os.path.join(script_folder, './keras_mnist.py'), 'r') as f: print(f.read()) # ## Create TensorFlow estimator & add Keras # Next, we construct an `azureml.train.dnn.TensorFlow` estimator object, use the `gpu-cluster` as compute target, and pass the mount-point of the datastore to the training code as a parameter. # The TensorFlow estimator is providing a simple way of launching a TensorFlow training job on a compute target. It will automatically provide a docker image that has TensorFlow installed. In this case, we add `keras` package (for the Keras framework obviously), and `matplotlib` package for plotting a "Loss vs. Accuracy" chart and record it in run history. # + dataset = Dataset.get_by_name(ws, 'mnist dataset') # list the files referenced by mnist dataset dataset.to_path() # + from azureml.train.dnn import TensorFlow script_params = { '--data-folder': dataset.as_named_input('mnist').as_mount(), '--batch-size': 50, '--first-layer-neurons': 300, '--second-layer-neurons': 100, '--learning-rate': 0.001 } est = TensorFlow(source_directory=script_folder, script_params=script_params, compute_target=compute_target, entry_script='keras_mnist.py', pip_packages=['keras==2.2.5','azureml-dataprep[pandas,fuse]','matplotlib']) # - # ## Submit job to run # Submit the estimator to the Azure ML experiment to kick off the execution. run = exp.submit(est) # ### Monitor the Run # As the Run is executed, it will go through the following stages: # 1. Preparing: A docker image is created matching the Python environment specified by the TensorFlow estimator and it will be uploaded to the workspace's Azure Container Registry. This step will only happen once for each Python environment -- the container will then be cached for subsequent runs. Creating and uploading the image takes about **5 minutes**. While the job is preparing, logs are streamed to the run history and can be viewed to monitor the progress of the image creation. # # 2. Scaling: If the compute needs to be scaled up (i.e. the AmlCompute cluster requires more nodes to execute the run than currently available), the cluster will attempt to scale up in order to make the required amount of nodes available. Scaling typically takes about **5 minutes**. # # 3. Running: All scripts in the script folder are uploaded to the compute target, data stores are mounted/copied and the `entry_script` is executed. While the job is running, stdout and the `./logs` folder are streamed to the run history and can be viewed to monitor the progress of the run. # # 4. Post-Processing: The `./outputs` folder of the run is copied over to the run history # # There are multiple ways to check the progress of a running job. We can use a Jupyter notebook widget. # # **Note: The widget will automatically update ever 10-15 seconds, always showing you the most up-to-date information about the run** from azureml.widgets import RunDetails RunDetails(run).show() # We can also periodically check the status of the run object, and navigate to Azure portal to monitor the run. run run.wait_for_completion(show_output=True) # In the outputs of the training script, it prints out the Keras version number. Please make a note of it. # ### The Run object # The Run object provides the interface to the run history -- both to the job and to the control plane (this notebook), and both while the job is running and after it has completed. It provides a number of interesting features for instance: # * `run.get_details()`: Provides a rich set of properties of the run # * `run.get_metrics()`: Provides a dictionary with all the metrics that were reported for the Run # * `run.get_file_names()`: List all the files that were uploaded to the run history for this Run. This will include the `outputs` and `logs` folder, azureml-logs and other logs, as well as files that were explicitly uploaded to the run using `run.upload_file()` # # Below are some examples -- please run through them and inspect their output. run.get_details() run.get_metrics() run.get_file_names() # ## Download the saved model # In the training script, the Keras model is saved into two files, `model.json` and `model.h5`, in the `outputs/models` folder on the gpu-cluster AmlCompute node. Azure ML automatically uploaded anything written in the `./outputs` folder into run history file store. Subsequently, we can use the `run` object to download the model files. They are under the the `outputs/model` folder in the run history file store, and are downloaded into a local folder named `model`. # + # create a model folder in the current directory os.makedirs('./model', exist_ok=True) for f in run.get_file_names(): if f.startswith('outputs/model'): output_file_path = os.path.join('./model', f.split('/')[-1]) print('Downloading from {} to {} ...'.format(f, output_file_path)) run.download_file(name=f, output_file_path=output_file_path) # - # ## Predict on the test set # Let's check the version of the local Keras. Make sure it matches with the version number printed out in the training script. Otherwise you might not be able to load the model properly. # + import keras import tensorflow as tf print("Keras version:", keras.__version__) print("Tensorflow version:", tf.__version__) # - # Now let's load the downloaded model. # + from keras.models import model_from_json # load json and create model json_file = open('model/model.json', 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) # load weights into new model loaded_model.load_weights("model/model.h5") print("Model loaded from disk.") # - # Feed test dataset to the persisted model to get predictions. # + # evaluate loaded model on test data loaded_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) y_test_ohe = one_hot_encode(y_test, 10) y_hat = np.argmax(loaded_model.predict(X_test), axis=1) # print the first 30 labels and predictions print('labels: \t', y_test[:30]) print('predictions:\t', y_hat[:30]) # - # Calculate the overall accuracy by comparing the predicted value against the test set. print("Accuracy on the test set:", np.average(y_hat == y_test)) # ## Intelligent hyperparameter tuning # We have trained the model with one set of hyperparameters, now let's how we can do hyperparameter tuning by launching multiple runs on the cluster. First let's define the parameter space using random sampling. # + from azureml.train.hyperdrive import RandomParameterSampling, BanditPolicy, HyperDriveConfig, PrimaryMetricGoal from azureml.train.hyperdrive import choice, loguniform ps = RandomParameterSampling( { '--batch-size': choice(25, 50, 100), '--first-layer-neurons': choice(10, 50, 200, 300, 500), '--second-layer-neurons': choice(10, 50, 200, 500), '--learning-rate': loguniform(-6, -1) } ) # - # Next, we will create a new estimator without the above parameters since they will be passed in later by Hyperdrive configuration. Note we still need to keep the `data-folder` parameter since that's not a hyperparamter we will sweep. est = TensorFlow(source_directory=script_folder, script_params={'--data-folder': dataset.as_named_input('mnist').as_mount()}, compute_target=compute_target, entry_script='keras_mnist.py', pip_packages=['keras==2.2.5','azureml-dataprep[pandas,fuse]','matplotlib']) # Now we will define an early termnination policy. The `BanditPolicy` basically states to check the job every 2 iterations. If the primary metric (defined later) falls outside of the top 10% range, Azure ML terminate the job. This saves us from continuing to explore hyperparameters that don't show promise of helping reach our target metric. policy = BanditPolicy(evaluation_interval=2, slack_factor=0.1) # Now we are ready to configure a run configuration object, and specify the primary metric `Accuracy` that's recorded in your training runs. If you go back to visit the training script, you will notice that this value is being logged after every epoch (a full batch set). We also want to tell the service that we are looking to maximizing this value. We also set the number of samples to 20, and maximal concurrent job to 4, which is the same as the number of nodes in our computer cluster. hdc = HyperDriveConfig(estimator=est, hyperparameter_sampling=ps, policy=policy, primary_metric_name='Accuracy', primary_metric_goal=PrimaryMetricGoal.MAXIMIZE, max_total_runs=20, max_concurrent_runs=4) # Finally, let's launch the hyperparameter tuning job. hdr = exp.submit(config=hdc) # We can use a run history widget to show the progress. Be patient as this might take a while to complete. RunDetails(hdr).show() hdr.wait_for_completion(show_output=True) # ### Warm start a Hyperparameter Tuning experiment and resuming child runs # Often times, finding the best hyperparameter values for your model can be an iterative process, needing multiple tuning runs that learn from previous hyperparameter tuning runs. Reusing knowledge from these previous runs will accelerate the hyperparameter tuning process, thereby reducing the cost of tuning the model and will potentially improve the primary metric of the resulting model. When warm starting a hyperparameter tuning experiment with Bayesian sampling, trials from the previous run will be used as prior knowledge to intelligently pick new samples, so as to improve the primary metric. Additionally, when using Random or Grid sampling, any early termination decisions will leverage metrics from the previous runs to determine poorly performing training runs. # # Azure Machine Learning allows you to warm start your hyperparameter tuning run by leveraging knowledge from up to 5 previously completed hyperparameter tuning parent runs. # # Additionally, there might be occasions when individual training runs of a hyperparameter tuning experiment are cancelled due to budget constraints or fail due to other reasons. It is now possible to resume such individual training runs from the last checkpoint (assuming your training script handles checkpoints). Resuming an individual training run will use the same hyperparameter configuration and mount the storage used for that run. The training script should accept the "--resume-from" argument, which contains the checkpoint or model files from which to resume the training run. You can also resume individual runs as part of an experiment that spends additional budget on hyperparameter tuning. Any additional budget, after resuming the specified training runs is used for exploring additional configurations. # # For more information on warm starting and resuming hyperparameter tuning runs, please refer to the [Hyperparameter Tuning for Azure Machine Learning documentation](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-tune-hyperparameters) # # ## Find and register best model # When all the jobs finish, we can find out the one that has the highest accuracy. best_run = hdr.get_best_run_by_primary_metric() print(best_run.get_details()['runDefinition']['arguments']) # Now let's list the model files uploaded during the run. print(best_run.get_file_names()) # We can then register the folder (and all files in it) as a model named `keras-dnn-mnist` under the workspace for deployment. model = best_run.register_model(model_name='keras-mlp-mnist', model_path='outputs/model') # ## Deploy the model in ACI # Now we are ready to deploy the model as a web service running in Azure Container Instance [ACI](https://azure.microsoft.com/en-us/services/container-instances/). Azure Machine Learning accomplishes this by constructing a Docker image with the scoring logic and model baked in. # ### Create score.py # First, we will create a scoring script that will be invoked by the web service call. # # * Note that the scoring script must have two required functions, `init()` and `run(input_data)`. # * In `init()` function, you typically load the model into a global object. This function is executed only once when the Docker container is started. # * In `run(input_data)` function, the model is used to predict a value based on the input data. The input and output to `run` typically use JSON as serialization and de-serialization format but you are not limited to that. # + # %%writefile score.py import json import numpy as np import os from keras.models import model_from_json from azureml.core.model import Model def init(): global model model_root = Model.get_model_path('keras-mlp-mnist') # load json and create model json_file = open(os.path.join(model_root, 'model.json'), 'r') model_json = json_file.read() json_file.close() model = model_from_json(model_json) # load weights into new model model.load_weights(os.path.join(model_root, "model.h5")) model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) def run(raw_data): data = np.array(json.loads(raw_data)['data']) # make prediction y_hat = np.argmax(model.predict(data), axis=1) return y_hat.tolist() # - # ### Create myenv.yml # We also need to create an environment file so that Azure Machine Learning can install the necessary packages in the Docker image which are required by your scoring script. In this case, we need to specify conda packages `tensorflow` and `keras`. # + from azureml.core.conda_dependencies import CondaDependencies cd = CondaDependencies.create() cd.add_tensorflow_conda_package() cd.add_conda_package('keras==2.2.5') cd.add_pip_package("azureml-defaults") cd.save_to_file(base_directory='./', conda_file_path='myenv.yml') print(cd.serialize_to_string()) # - # ### Deploy to ACI # We are almost ready to deploy. Create the inference configuration and deployment configuration and deploy to ACI. This cell will run for about 7-8 minutes. # + from azureml.core.webservice import AciWebservice from azureml.core.model import InferenceConfig from azureml.core.model import Model from azureml.core.environment import Environment myenv = Environment.from_conda_specification(name="myenv", file_path="myenv.yml") inference_config = InferenceConfig(entry_script="score.py", environment=myenv) aciconfig = AciWebservice.deploy_configuration(cpu_cores=1, auth_enabled=True, # this flag generates API keys to secure access memory_gb=1, tags={'name': 'mnist', 'framework': 'Keras'}, description='Keras MLP on MNIST') service = Model.deploy(workspace=ws, name='keras-mnist-svc', models=[model], inference_config=inference_config, deployment_config=aciconfig) service.wait_for_deployment(True) print(service.state) # - # **Tip: If something goes wrong with the deployment, the first thing to look at is the logs from the service by running the following command:** `print(service.get_logs())` # This is the scoring web service endpoint: print(service.scoring_uri) # ### Test the deployed model # Let's test the deployed model. Pick 30 random samples from the test set, and send it to the web service hosted in ACI. Note here we are using the `run` API in the SDK to invoke the service. You can also make raw HTTP calls using any HTTP tool such as curl. # # After the invocation, we print the returned predictions and plot them along with the input images. Use red font color and inversed image (white on black) to highlight the misclassified samples. Note since the model accuracy is pretty high, you might have to run the below cell a few times before you can see a misclassified sample. # + import json # find 30 random samples from test set n = 30 sample_indices = np.random.permutation(X_test.shape[0])[0:n] test_samples = json.dumps({"data": X_test[sample_indices].tolist()}) test_samples = bytes(test_samples, encoding='utf8') # predict using the deployed model result = service.run(input_data=test_samples) # compare actual value vs. the predicted values: i = 0 plt.figure(figsize = (20, 1)) for s in sample_indices: plt.subplot(1, n, i + 1) plt.axhline('') plt.axvline('') # use different color for misclassified sample font_color = 'red' if y_test[s] != result[i] else 'black' clr_map = plt.cm.gray if y_test[s] != result[i] else plt.cm.Greys plt.text(x=10, y=-10, s=y_test[s], fontsize=18, color=font_color) plt.imshow(X_test[s].reshape(28, 28), cmap=clr_map) i = i + 1 plt.show() # - # We can retrieve the API keys used for accessing the HTTP endpoint. # Retrieve the API keys. Two keys were generated. key1, Key2 = service.get_keys() print(key1) # We can now send construct raw HTTP request and send to the service. Don't forget to add key to the HTTP header. # + import requests # send a random row from the test set to score random_index = np.random.randint(0, len(X_test)-1) input_data = "{\"data\": [" + str(list(X_test[random_index])) + "]}" headers = {'Content-Type':'application/json', 'Authorization': 'Bearer ' + key1} resp = requests.post(service.scoring_uri, input_data, headers=headers) print("POST to url", service.scoring_uri) #print("input data:", input_data) print("label:", y_test[random_index]) print("prediction:", resp.text) # - # Let's look at the workspace after the web service was deployed. You should see # * a registered model named 'keras-mlp-mnist' and with the id 'model:1' # * a webservice called 'keras-mnist-svc' with some scoring URL # + models = ws.models for name, model in models.items(): print("Model: {}, ID: {}".format(name, model.id)) webservices = ws.webservices for name, webservice in webservices.items(): print("Webservice: {}, scoring URI: {}".format(name, webservice.scoring_uri)) # - # ## Clean up # You can delete the ACI deployment with a simple delete API call. service.delete()
how-to-use-azureml/training-with-deep-learning/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %%HTML <style> code {background-color : pink !important;} </style> # Camera Calibration with OpenCV # === # # ### Run the code in the cell below to extract object points and image points for camera calibration. # + import numpy as np import cv2 import glob import matplotlib.pyplot as plt # %matplotlib qt # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) objp = np.zeros((6*8,3), np.float32) objp[:,:2] = np.mgrid[0:8, 0:6].T.reshape(-1,2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d points in real world space imgpoints = [] # 2d points in image plane. # Make a list of calibration images images = glob.glob('calibration_wide/GO*.jpg') # Step through the list and search for chessboard corners for idx, fname in enumerate(images): img = cv2.imread(fname) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Find the chessboard corners ret, corners = cv2.findChessboardCorners(gray, (8,6), None) # If found, add object points, image points if ret == True: objpoints.append(objp) imgpoints.append(corners) # Draw and display the corners cv2.drawChessboardCorners(img, (8,6), corners, ret) #write_name = 'corners_found'+str(idx)+'.jpg' #cv2.imwrite(write_name, img) cv2.imshow('img', img) cv2.waitKey(500) cv2.destroyAllWindows() # - # ### If the above cell ran sucessfully, you should now have `objpoints` and `imgpoints` needed for camera calibration. Run the cell below to calibrate, calculate distortion coefficients, and test undistortion on an image! # + import pickle # %matplotlib inline # Test undistortion on an image img = cv2.imread('calibration_wide/test_image.jpg') img_size = (img.shape[1], img.shape[0]) # Do camera calibration given object points and image points ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None) dst = cv2.undistort(img, mtx, dist, None, mtx) cv2.imwrite('calibration_wide/test_undist.jpg',dst) # Save the camera calibration result for later use (we won't worry about rvecs / tvecs) dist_pickle = {} dist_pickle["mtx"] = mtx dist_pickle["dist"] = dist pickle.dump( dist_pickle, open( "calibration_wide/wide_dist_pickle.p", "wb" ) ) #dst = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB) # Visualize undistortion f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(img) ax1.set_title('Original Image', fontsize=30) ax2.imshow(dst) ax2.set_title('Undistorted Image', fontsize=30)
CarND-Camera-Calibration/camera_calibration.ipynb
import skimage import skimage.io import skimage.transform import os import numpy as np import utils import matplotlib.pyplot as plt if __name__ == "__main__": # DO NOT CHANGE impath = os.path.join("images", "noisy_moon.png") im = utils.read_im(impath) ### START YOUR CODE HERE ### (You can change anything inside this block) im_filtered = im ### END YOUR CODE HERE ### utils.save_im("moon_filtered.png", utils.normalize(im_filtered))
TDT4195/image_processing/A2/assignment2/task4c.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import sqlalchemy as sq import plotly_express as px # - engine = sq.create_engine('mysql+pymysql://saurabh:saurabh_sql@localhost:3306/uc_davis') athlete = pd.read_sql('SELECT * FROM athlete_events', con = engine) athlete.head(50) reviews.isnull().sum() # ### If I perform fill_na() method, it will then Falsify my data for Analysis # # Many people dont have their Age, Height and Weight Values, We can see that Medal columns also have NaN values, Since not all participant wins all the matches.. COUNTs = pd.read_sql('SELECT COUNT(*) AS `Total Data` FROM athlete_events', con = engine) COUNTs
Scripts/clean_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd data_url = "https://forge.scilab.org/index.php/p/rdataset/source/file/master/csv/datasets/cars.csv" df_python = pd.read_csv(data_url) df_python.drop(df_python.columns[[0]], axis=1, inplace=True) df_python import saspy session_sas = saspy.SASsession(cfgname='winlocal') session_sas df_sas = session_sas.df2sd(df_python) print(session_sas.saslog()) df_sas.describe() print(session_sas.saslog())
Issue_examples/Issue168.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import numpy as np import scipy as sp from scipy import io import matplotlib.pyplot as plt import os import sys import h5py import json #sys.path.append('/Users/rgao/Documents/Code/research/neurodsp/') from neurodsp import spectral import neurodsp as ndsp # + # get all the subject file names in a list datafolder = '/Users/rgao/Documents/Data/EEGData_Tom/' subj_list = [i for i in range(1000,1015)] for i in range(2000,2014): subj_list.append(i) keys = ['oz_rest_data','oz_trial_data'] key=keys[0] # cycle through for subj in subj_list: data = io.loadmat(datafolder+ str(subj) + '.mat', squeeze_me=True) x = data[key] fs = 500. freq, psd = spectral.psd(x, fs, nperseg=int(fs)) plt.loglog(freq, psd) plt.xlim([1.,100.]) # + # cycle through keys = ['oz_rest_data','oz_trial_data'] key=keys[1] for subj in subj_list: data = io.loadmat(datafolder+ str(subj) + '.mat', squeeze_me=True) x = data[key] fs = 500. freq, scv = spectral.scv(x, fs, nperseg=int(fs), outlierpct=2.) plt.loglog(freq, scv, 'k', lw=0.5, alpha=0.5) plt.xlim([1.,100.]) plt.ylim([0.5,5]) plt.plot([1, 100], [1, 1], 'k--') # + subj = subj_list[11] data = io.loadmat(datafolder+ str(subj) + '.mat', squeeze_me=True) fs = 500. OL=2. plt.figure(figsize=(8,4)) plt.subplot(1,2,1) freq, psd = spectral.psd(data['oz_rest_data'], fs, nperseg=int(fs)) plt.loglog(freq, psd) freq, psd = spectral.psd(data['oz_trial_data'], fs, nperseg=int(fs)) plt.loglog(freq, psd) plt.xlim([1.,100.]) plt.legend(('Rest','Trial')) plt.subplot(1,2,2) freq, scv = spectral.scv(data['oz_rest_data'], fs, nperseg=int(fs), outlierpct=OL) plt.loglog(freq,scv); freq, scv = spectral.scv(data['oz_trial_data'], fs, nperseg=int(fs), outlierpct=OL) plt.loglog(freq,scv); plt.xlim([1.,100.]) plt.plot([1, 100], [1, 1], 'k--') plt.tight_layout() # - subj = subj_list[0] data = io.loadmat(datafolder+ str(subj) + '.mat', squeeze_me=True) x = data['oz_rest_data'] #x = data['oz_trial_data'] freq, scv = spectral.scv(x, fs, nperseg=int(fs), outlierpct=2.) freq, T, scv_rs = spectral.scv_rs(x, fs, nperseg=int(fs), method='bootstrap', rs_params=(10,100)) plt.loglog(freq,scv); plt.loglog(freq,np.mean(scv_rs,1), alpha=0.8); plt.legend(('Point Estimate','Resampled Mean')) plt.xlim([1.,100.]) plt.plot([1, 100], [1, 1], 'k--')
.ipynb_checkpoints/EEGbread-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # NumPy Array Basics - Vectorization import sys print(sys.version) import numpy as np print(np.__version__) npa = np.random.randint(0,50,20) # Now I’ve harped on about vectorization in the last couple of videos and I’ve told you that it’s great but I haven’t shown you how it’s so great. # # Here are the two powerful reasons # - Concise # - Efficient # # The fundamental idea behind array programming is that operations apply at once to an entire set of values. This makes it a high-level programming model as it allows the programmer to think and operate on whole aggregates of data, without having to resort to explicit loops of individual scalar operations. # # You can read more here: # https://en.wikipedia.org/wiki/Array_programming npa # With vectorization we can apply changes to the entire array extremely efficiently, no more for loops. If we want to double the array, we just multiply by 2 if we want to cube it we just cube it. npa * 2 npa ** 3 [x * 2 for x in npa] # So who cares? Again it’s going to be efficiency thing just like boolean selection Let’s try something a bit more complex. # Define a function named new_func that cubes the value if it is less than 5 and squares it if it is greater or equal to 5. def new_func(numb): if numb < 10: return numb**3 else: return numb**2 new_func(npa) # However we can’t just pass in the whole vector because we’re going to get this array ambiguity. # ?np.vectorize # We need to vectorize this operation and we do that with np.vectorize # # # We can then apply that to our entire array and it takes care of the complexity for us. We can think in terms of the data without having to think about each individual element. vect_new_func = np.vectorize(new_func) type(vect_new_func) vect_new_func(npa) [new_func(x) for x in npa] # It's also much faster to vectorize operations and while these are simple examples the benefits will become apparent as we continue through this course. # # *this has changed since python3 and the list comprehension has gotten much faster. However, this doesn't mean that vectorization is slower, just that it's a bit heavier because it places a lot more tools at your disposal like we'll see in the next video.* # %timeit [new_func(x) for x in npa] # %timeit vect_new_func(npa) npa2 = np.random.random_integers(0,100,20*1000) # Speed comparisons with size. # %timeit [new_func(x) for x in npa2] # %timeit vect_new_func(npa2)
Data_Analysis_with_Pandas/01-Numpy Basics/1-3 NumPy Array Basics - Vectorization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + active="" # .. _tfn_userguide: # # TFN Strings # ============ # + active="" # Introduction # ------------ # # The function :func:`clean_au_tfn() <dataprep.clean.clean_au_tfn.clean_au_tfn>` cleans a column containing Australian Tax File Numbers (TFN) strings, and standardizes them in a given format. The function :func:`validate_au_tfn() <dataprep.clean.clean_au_tfn.validate_au_tfn>` validates either a single TFN strings, a column of TFN strings or a DataFrame of TFN strings, returning `True` if the value is valid, and `False` otherwise. # - # TFN strings can be converted to the following formats via the `output_format` parameter: # # * `compact`: only number strings without any seperators or whitespace, like "123456782" # * `standard`: TFN strings with proper whitespace in the proper places, like "123 456 782" # # Invalid parsing is handled with the `errors` parameter: # # * `coerce` (default): invalid parsing will be set to NaN # * `ignore`: invalid parsing will return the input # * `raise`: invalid parsing will raise an exception # # The following sections demonstrate the functionality of `clean_au_tfn()` and `validate_au_tfn()`. # ### An example dataset containing TFN strings import pandas as pd import numpy as np df = pd.DataFrame( { "tfn": [ "123 456 782", "999 999 999", "123456782", "51 824 753 556", "hello", np.nan, "NULL" ], "address": [ "123 Pine Ave.", "main st", "1234 west main heights 57033", "apt 1 789 s maple rd manhattan", "robie house, 789 north main street", "(staples center) 1111 S Figueroa St, Los Angeles", "hello", ] } ) df # ## 1. Default `clean_au_tfn` # # By default, `clean_au_tfn` will clean tfn strings and output them in the standard format with proper separators. from dataprep.clean import clean_au_tfn clean_au_tfn(df, column = "tfn") # ## 2. Output formats # This section demonstrates the output parameter. # ### `standard` (default) clean_au_tfn(df, column = "tfn", output_format="standard") # ### `compact` clean_au_tfn(df, column = "tfn", output_format="compact") # ## 3. `inplace` parameter # # This deletes the given column from the returned DataFrame. # A new column containing cleaned TFN strings is added with a title in the format `"{original title}_clean"`. clean_au_tfn(df, column="tfn", inplace=True) # ## 4. `errors` parameter # ### `coerce` (default) clean_au_tfn(df, "tfn", errors="coerce") # ### `ignore` clean_au_tfn(df, "tfn", errors="ignore") # ## 4. `validate_au_tfn()` # `validate_au_tfn()` returns `True` when the input is a valid TFN. Otherwise it returns `False`. # # The input of `validate_au_tfn()` can be a string, a Pandas DataSeries, a Dask DataSeries, a Pandas DataFrame and a dask DataFrame. # # When the input is a string, a Pandas DataSeries or a Dask DataSeries, user doesn't need to specify a column name to be validated. # # When the input is a Pandas DataFrame or a dask DataFrame, user can both specify or not specify a column name to be validated. If user specify the column name, `validate_au_tfn()` only returns the validation result for the specified column. If user doesn't specify the column name, `validate_au_tfn()` returns the validation result for the whole DataFrame. from dataprep.clean import validate_au_tfn print(validate_au_tfn("123 456 782")) print(validate_au_tfn("99 999 999")) print(validate_au_tfn("123456782")) print(validate_au_tfn("51 824 753 556")) print(validate_au_tfn("hello")) print(validate_au_tfn(np.nan)) print(validate_au_tfn("NULL")) # ### Series validate_au_tfn(df["tfn"]) # ### DataFrame + Specify Column validate_au_tfn(df, column="tfn") # ### Only DataFrame validate_au_tfn(df)
docs/source/user_guide/clean/clean_au_tfn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Python basics: Expressions and strings # # By [<NAME>](http://www.decontextualize.com/) # # In this tutorial, I introduce the basics of how to use Python to process text, starting with the concept of expressions and evaluation. I go into particular detail on Python's string manipulation functions. # ### A note on Python versions # # There are two main "branches" of Python in current use: Python 2 and Python 3. Both of these branches have their own versions: the latest version of Python 2 (as of this writing) is Python 2.7.x, and the latest version of Python 3 is Python 3.7.x. The branches and versions all have slightly different capabilities and their syntax and structure are slightly different. Python 2.7.x still has a larger number of users overall, and many new projects continue to support it. But most data scientists and data journalists using Python today use the newer version, and following their lead, we'll be using we're using Python 3.6 or later in this course (specifically, the version included with the latest version of [Anaconda](https://www.anaconda.com/download/)). # # (The main reason you need to know this information is that you should be careful when looking up Python information on the Internet---make sure whatever tutorial you're looking at is about Python 3, not Python 2.) # ## Expressions and evaluation # # Let's start with a very high-level description of how computer programming works. When you're writing a computer program, you're describing to the computer what you want, and then asking the computer to figure that thing out for you. Your description of what you want is called an *expression*. The process that the computer uses to turn your expression into whatever that expression means is called *evaluation.* # # Think of a science fiction movie where a character asks the computer, out loud, "What's the square root of nine billion?" or "How many people older than 50 live in Paris, France?" Those are examples of expressions. The process that the computer uses to transform those expressions into a response is evaluation. # # When the process of evaluation is complete, you're left with a single "value". Think of it schematically like so: # # ![Expression -> Evaluation -> Value](http://static.decontextualize.com/snaps/expressiondiagram.png) # # What makes computer programs powerful is that they make it possible to write very precise and sophisticated expressions. And importantly, you can embed the results of evaluating one expression inside of another expression, or save the results of evaluating an expression for later in your program. # # Unfortunately, computers can't understand and intuit your desires simply from a verbal description. That's why we need computer programming languages: to give us a way to write expressions in a way that the computer can understand. Because programming languages are designed to be precise, they can also be persnickety (and frustrating). And every programming language is different. It's tricky, but worth it. # ## Arithmetic expressions # # Let's start with simple arithmetic expressions. The way that you write arithmetic expressions in Python is very similar to the way that you write arithmetic expressions in, say, grade school arithmetic, or algebra. In the example below, `3 + 5` is the expression. You can tell Python to evaluate the expression and display its value simply by typing in the expression in a new notebook cell and typing CTRL+ENTER. 1 + 5 # Arithmetic expressions in Python can be much more sophisticated than this, of course. We won't go over all of the details right now, but one thing you should know immediately is that Python arithmetic operations are evaluated using the typical order of operations, which you can override with parentheses: 4 + 5 * 6 (4 + 5) * 6 # You can write arithmetic expressions with or without spaces between the numbers and the operators (but usually it's considered better style to include spaces): 10+20+30 # Expressions in Python can also be very simple. In fact, a number on its own is its own expression, which Python evaluates to that number itself: 19 # If you write an expression that Python doesn't understand, then you'll get an error. Here's what that looks like: + 20 19 # ## Expressions of inequality # # You can also ask Python whether two expressions evaluate to the same value, or if one expression evaluates to a value greater than another expression, using a similar familiar syntax. When evaluating such expressions, Python will return one of two special values: either `True` or `False`. # # The `==` operator compares the expression on its left side to the expression on its right side. It evaluates to `True` if the values are equal, and `False` if they're not equal. 3 * 5 == 9 + 6 20 == 7 * 3 # The `<` operator compares the expression on its left side to the expression on its right side, evaluating to `True` if the left-side expression is less than the right-side expression, `False` otherwise. The `>` does the same thing, except checking to see if the left-side expression is greater than the right-side expression: 17 < 18 17 > 18 # The `>=` and `<=` operators translate to "greater than or equal" and "lesser than or equal," respectively: 22 >= 22 22 <= 22 # Make sure to get the order of the angle bracket and the equal sign right! 22 =< 22 # ## Variables # # You can save the result of evaluating an expression for later using the `=` operator (called the "assignment operator"). On the left-hand side of the `=`, write a word that you'd like to use to refer to the value of the expression, and on the right-hand side, write the expression itself. After you've assigned a value like this, whenever you include that word in your Python code, Python will evaluate the word and replace it with the value you assigned to it earlier. Like so: x = (4 + 5) * 6 x # (Notice that the line `x = (4 + 5) * 6` didn't cause Python to display anything. That's because an assignment in Python isn't an expression, it's a "statement"---we'll discuss the difference later.) # # Now, whenever you use the variable `x` in your program, it "stands in" for the result of the expression that you assigned to it. x / 6 # You can create as many variables as you want! another_variable = (x + 2) * 4 another_variable # Variable names can contain letters, numbers and underscores, but must begin with a letter or underscore. There are other, more technical constraints on variable names; you can review them [here](http://en.wikibooks.org/wiki/Think_Python/Variables,_expressions_and_statements#Variable_names_and_keywords). # # If you attempt to use a the name of a variable that you haven't defined in the notebook, Python will raise an error: voldemort # If you assign a value to a variable, and then assign a value to it again, the previous value of the variable will be overwritten: x = 15 x x = 42 x # The fact that variables can be overwritten with new values can be helpful in some contexts (e.g., if you're writing a program and you're using the variable to keep track of some value that changes over time). But it can also be annoying if you use the same variable name twice on accident and overwrite values in one part of your program that another part of your program is using the same variable name to keep track of! # ## Types # # Another important thing to know is that when Python evaluates an expression, it assigns the result to a "type." A type is a description of what kind of thing a value is, and Python uses that information to determine later what you can do with that value, and what kinds of expressions that value can be used in. You can ask Python what type it thinks a particular expression evaluates to, or what type a particular value is, using the `type()` function: type(100 + 1) # The word int stands for "integer." ("Integers" are numbers that don't have a fractional component, i.e., -2, -1, 0, 1, 2, etc.) Python has many, many other types, and lots of (sometimes arcane) rules for how those types interact with each other when used in the same expression. For example, you can create a floating point type (i.e., a number with a decimal point in it) by writing a number with a decimal point in it: type(3.14) # Interestingly, the result of adding a floating-point number and an integer number together is always a floating point number: type(3.14 + 17) # ... and the result of dividing one integer by another integer is a floating point number: type(4 / 3) # Throwing an expression into the `type()` function is a good way to know whether or not the value you're working with is the value you were expecting to work with. We'll use it for debugging some example code later. # ## Strings # # Another type of value in Python is called a "string." Strings are a way of representing in our computer programs stretches of text: one or more letters in sequential order. To make an expression that evaluates to a string in Python, simply enclose some text inside of quotes and put it into the interactive interpreter: "Suppose there is a pigeon, suppose there is." # Asking Python for the type of a string returns `str`: type("Suppose there is a pigeon, suppose there is.") # You can use single quotes or double quotes to enclose strings (I tend to use them interchangeably), as long as the opening quote matches the closing quote: 'Suppose there is a pigeon, suppose there is.' # (When you ask Python to evaluate a string expression, it will display it with single quotes surrounding it.) # # You can assign strings to variables, just like any other value: roastbeef = "Suppose there is a pigeon, suppose there is." roastbeef # In versions of Python previous to Python 3, it could be tedious to use any characters inside of strings that weren't ASCII characters (i.e., the letters, numbers and punctuation used most commonly when writing English). In Python 3, you can easily include whatever characters you want by typing them into the string directly: cat_message = "我爱猫!😻" cat_message # ### "Escaping" special characters in strings # # Normally, if there are any characters you want in your string, all you have to do to put them there is type the characters in on your keyboard, or paste in the text that you want from some other source. There are some characters, however, that require special treatment and can't be typed into a string directly. # # For example, say you have a double-quoted string. Now, the rules about quoting strings (as outlined above) is that the quoted string begins with a double-quote character and ends with a double-quote character. But what if you want to include a double-quote character INSIDE the string? You might think you could do this: # # "And then he said, "I think that's a cool idea," and vanished." # # But that won't work: "And then he said, "I think that's a cool idea," and vanished." # It doesn't work because Python interprets the first double-quote it sees after the beginning of the string as the double-quote that marks the end of the string. Then it sees all of the stuff after the string and says, "okay, the programmer must not be having a good day?" and displays a syntax error. Clearly, we need a way to tell Python "I want you to interpret this character not with the special meaning it has in Python, but LITERALLY as the thing that I typed." # # We can do this exact thing by putting a backslash in front of the characters that we want Python to interpret literally, like so: "And then he said, \"I think that's a cool idea,\" and vanished." # A character indicated in this way is called an "escape" character (because you've "escaped" from the typical meaning of the character). There are several other useful escape characters to know about: # # * I showed `\"` above, but you can also use `\'` in a single-quoted string. # * Use `\n` if you want to include a new line in your string. # * Use `\t` instead of hitting the tab key to put a tab in your string. # * Because `\` is itself the character used to escape other characters, you need to type `\\` if you actually want a backslash in your string. # # ### Printing vs. evaluating # # There are two ways to see the result of an expression in the interactive interpreter. You can either type the expression directly: 7 + 15 "\tA \"string\" with escape\ncharacters." # Or you can "print" the expression using the `print()` function by putting the expression inside the parentheses: print(7 + 15) print("\tA \"string\" with escape\ncharacters.") # As you can see, the `print()` function doesn't make a huge difference when displaying the result of an arithmetic expression. But it *does* make a difference when displaying a string. When you simply type an expression that evaluates to a string in order to display it, without the `print()` function, Python won't "interpolate" any special characters in the string. ("Interpolate" is a fancy computer programming term that means "replace symbols in something with whatever those symbols represent.") The `print()` function, on the other hand, *will* perform the interpolation. # # Typing the expression itself results in Python showing you *exactly* the code you'd need to copy and paste in order to replicate the vale. Typing the expression into `print()` tells Python to do its best to make the result of the expression look "nice." (The `print()` function also sends the result of the expression to standard output, which will be important to know when we're writing our own Python programs on the command line later on.) # ### Asking questions about strings # # Now that we can get some text into our program, let's talk about some of the ways Python allows us to do interesting things with that text. # # Let's talk about the `len()` function first. If you take an expression that evaluates to a string and put it inside the parentheses of `len()`, you get an integer value that indicates how long the string is. Like so: len("Suppose there is a pigeon, suppose there is.") # The value that `len()` evaluates to can itself be used in other expressions (just like any other value!): len("Camembert") + len("Cheddar") # Next up: the `in` operator, which lets us check to see if a particular string is found inside of another string. "foo" in "buffoon" "foo" in "reginald" # The `in` operator takes one expression evaluating to a string on the left and another on the right, and returns `True` if the string on the left occurs somewhere inside of the string on the right. # # We can check to see if a string begins with or ends with another string using that string's `.startswith()` and `.endswith()` methods, respectively: "foodie".startswith("foo") "foodie".endswith("foo") # The `.isdigit()` method returns `True` if Python thinks the string could represent an integer, and `False` otherwise: "foodie".isdigit() "4567".isdigit() # The `.isdigit()` method (along with many of the other methods discussed in this section) works not just for ASCII characters but generally across Unicode. For example, it returns `True` for a full-width digit: "7".isdigit() # And the `.islower()` and `.isupper()` methods return `True` if the string is in all lower case or all upper case, respectively (and `False` otherwise). "foodie".islower() "foodie".isupper() "YELLING ON THE INTERNET".islower() "YELLING ON THE INTERNET".isupper() # The `in` operator discussed above will tell us if a substring occurs in some other string. If we want to know *where* that substring occurs, we can use the `.find()` method. The `.find()` method takes a single parameter between its parentheses: an expression evaluating to a string, which will be searched for within the string whose `.find()` method was called. If the substring is found, the entire expression will evaluate to the index at which the substring is found. If the substring is not found, the expression evaluates to `-1`. To demonstrate: "Now is the winter of our discontent".find("win") "Now is the winter of our discontent".find("lose") # The `.count()` method will return the number of times a particular substring is found within the larger string: "I got rhythm, I got music, I got my man, who could ask for anything more".count("I got") # Finally, remember the `==` operator that we discussed earlier? You can use that in Python to check to see if two strings contain the same characters in the same order: "pants" == "pants" "pants" == "trousers" # ### Simple string transformations # # Python strings have a number of different methods which, when called on a string, return a copy of that string with a simple transformation applied to it. These are helpful for normalizing and cleaning up data, or preparing it to be displayed. # # Let's start with `.lower()`, which evaluates to a copy of the string in all lower case: "ARGUMENTATION! DISAGREEMENT! STRIFE!".lower() # The converse of `.lower()` is `.upper()`: "e.e. cummings is. not. happy about this.".upper() # The method `.title()` evaluates to a copy of the string it's called on, replacing every letter at the beginning of a word in the string with a capital letter: "dr. strangelove, or, how I learned to love the bomb".title() # The `.strip()` method removes any whitespace from the beginning or end of the string (but not between characters later in the string): " got some random whitespace in some places here ".strip() # Finally, the `.replace()` method takes two parameters: a string to find, and a string to replace that string with whenever it's found. You can use this to make sad stories. "I got rhythm, I got music, I got my man, who could ask for anything more".replace("I got", "I used to have") # The `.replace()` method works with non-ASCII characters as well, of course: "我爱猫!".replace("猫", "狗") # ### Reading in the contents of a file as a string # # So far we've just been typing our strings directly into the interactive interpreter by writing *string literals* (i.e., characters in between quotation marks). This is nice but for larger chunks of text it's desirable to be able to read files from your file system directly. Fortunately, Python makes it easy to do this! The code below will read the contents of the file `sea_rose.txt` into a variable called `text`: text = open("sea_rose.txt").read() # You can change the name of the variable to whatever you want, of course, and you can choose a different file name as well. Once the text is loaded, it's just a regular string, and you can do whatever you want with it! You could just print it out: print(text) # Or you can ask questions about it: text.count("you") # Or you can transform it: print(text.replace("a", "aaaa")) # Some caveats: # # * The file you specify must be located in the same directory as the interactive interpreter. # * The file needs to be in *plain text* format. [More information on plain text](http://air.decontextualize.com/plain-text/) # * The file needs to be in either ASCII or UTF-8 encoding. (We'll talk more about encodings later, but if the text you want to work with isn't in UTF-8 format, most text editors will allow you to modify the encoding of a file when you save it.) # ## Functions and methods # # Okay, we're getting somewhere together! But I've still been using a lot of jargon when explaning this stuff. One thing that might confuse you: what's a "function" and what's a "method"? # # We've talked about two "functions" so far: `len()` and `type()`. A function is a special word that you can use in Python expressions that runs some pre-defined code: you put your expression inside the parentheses, and Python sends the result of evaluating that expression to the code in the function. That code operates on the value that you gave it, and then itself evaluates to another value. Using a function in this way is usually called "calling" it or "invoking" it. The stuff that you put inside the parentheses is called a "parameter" or "argument"; the value that the function gives back is called its "return value." # # ![Function diagram](http://static.decontextualize.com/snaps/functiondiagram.png) # # The `len()` and `type()` functions are two of what are called "built-in functions," i.e. functions that come with Python and are available whenever you're writing Python code. In Python, built-in functions tend to be able to take many different types of value as parameters. ([There are a lot of other built-in functions](https://docs.python.org/2/library/functions.html), not just `len()` and `type()`! We'll discuss them as the need arises.) # # > NOTE: You can also write your own functions---we'll learn how to do this later in the class. Writing functions is a good way to avoid repetition in your code and to compartmentalize it.) # # "Methods" work a lot like functions, except in how it looks when you use them. Instead of putting the expression that you want to use them with inside the parentheses, you put the call to the method directly AFTER the expression that you want to call it on, following a period (`.`). Methods, unlike built-in functions, are usually only valid for one type of value; e.g., values of the string type have a `.strip()` method, but integer values don't. # # It's important to remember that methods can be called both on an expression that evaluates to a particular value AND on a variable that contains that value. So you can do this: "hello".find('e') # ...and this: s = "hello" s.find('e') # ## Getting help in the interactive interpreter # # The interactive interpreter has all kinds of nuggets to help you program in Python. The first one worth mentioning is the `help()` function. Pass any function or method as a parameter to `help()` and you'll get a handy description of the method or function and what it does: >>> help(len) # Remember above when we were talking about how certain types of value have certain "methods" that you can only use with that type of value? Sometimes it's helpful to be reminded of exactly which methods an object supports. You can find this out right in the interactive interpreter without having to look it up in the documentation using the `dir()` built-in function. Just pass the value that you want to know more about to `dir()`: >>> dir("hello") # This is a list of all of the methods that the string type supports. (Ignore anything that begins with two underscores (`__`) for now---those are special weird built-in methods that aren't very useful to call on their own.) If you want to know more about one method in particular, you can type this (note again that you need to NOT include the parentheses after the method): help("hello".swapcase) # Hey awesome! We've learned something about another string method. Let's try this method out: "New York University".swapcase() # > EXERCISE: Use `dir()` and `help()` to find and research a string method that isn't mentioned in the notes. Then write an expression using that method. # ## String indexing # # Python has some powerful language constructions that allow you to access parts of the string by their numerical position in the string. You can get an individual character of a string by putting square brackets (`[]`) right after an expression that evaluates to a string, and putting inside the square brackets the number that represents which character you want. Here's an example: "bungalow"[2] # You can also do this with variables that contain string values, of course: message = "bungalow" message[2] # If we were to say this expression out loud, it might read, "I have a string, consisting of the characters `b`, `u`, `n`, `g`, `a`, `l`, `o` and `w`, in that order. Give me back the second item in that string." Python evaluates that expression to `n`, which is indeed the second letter in the word "bungalow." # # ### The second letter? Am I seeing things. "u" is clearly the second letter. # # You're right---good catch. But for reasons too complicated to go into here, Python (along with many other programming languages!) starts counting at 0, instead of 1. So what looks like the third letter of the string to human eyes is actually the second letter to Python. The first letter of the string is accessed using index 0, like so: message[0] # The way I like to conceptualize this is to think of list indexes not as specifying the number of the item you want, but instead specifying how "far away" from the beginning of the list to look for that value. # # If you attempt to use a value for the index of a list that is beyond the end of the list (i.e., the value you use is higher than the last index in the list), Python gives you an error: message[17] # An individual character from a string still has the same type as the string it came from: type(message[3]) # And, of course, a string containing an individual character has a length of 1: len(message[3]) # ### Indexes can be expressions too # # The thing that goes inside of the index brackets doesn't have to be a number that you've just typed in there. Any Python expression that evaluates to an integer can go in there. message[2 * 3] x = 3 message[x] message[message.find("a")] # ### Negative indexes # # If you use `-1` as the value inside of the brackets, something interesting happens: message[-1] # The expression evaluates to the *last* character in the string. This is essentially the same thing as the following code: message[len(message) - 1] # ... except easier to write. In fact, you can use any negative integer in the brackets, and Python will count that many items from the end of the string, and the expression evaluates to that item. message[-3] # If the value in the brackets would "go past" the beginning of the list, Python will raise an error: message[-987] # ## String slices # # The index bracket syntax explained above allows you to write an expression that evaluates to a character in a string, based on its position in the string. Python also has a powerful way for you to write expressions that return a *section* of a string, starting from a particular index and ending with another index. In Python parlance we'll call this section a *slice*. # # Writing an expression to get a slice of a string looks a lot like writing an expression to get a single character. The difference is that instead of putting one number between square brackets, we put *two* numbers, separated by a colon. The first number tells Python where to begin the slice, and the second number tells Python where to end it. message[1:4] # Note that the value after the colon specifies at which index the slice should end, but the slice does *not* include the value at that index. I would translate the expression above as saying "give me characters one through four of the string in the "message" variable, NOT INCLUDING character four." # # The fact that slice indexes aren't inclusive means that you can tell how long the slice will be by subtracting the value before the colon from the value after it: message[1:4] len(message[1:4]) 4 - 1 # Also note that---as always!---any expression that evaluates to an integer can be used for either value in the brackets. For example: x = 3 message[x:x+2] # Finally, note that the type of a slice is still `str`: type(message[5:7]) # ### Omitting slice values # # Because it's so common to use the slice syntax to get a string that is either a slice starting at the beginning of the string or a slice ending at the end of the string, Python has a special shortcut. Instead of writing: message[0:3] # You can leave out the `0` and write this instead: message[:3] # Likewise, if you wanted a slice that starts at index 4 and goes to the end of the string, you might write: message[4:] # ### Negative index values in slices # # Now for some tricky stuff: You can use negative index values in slice brackets as well! For example, to get a slice of a string from the fourth-to-last element of the string up to (but not including) the second-to-last element of the string: message[-4:-2] # (Even with negative slice indexes, the numbers have the property that subtracting the first from the second yields the length of the slice, i.e. `-2 - (-4)` is `2`). # # To get the last three elements of the string: message[:-3] # > EXERCISE: Write an expression, or a series of expressions, that prints out "Sea Rose" from the first occurence of the string `sand` up until the end of the poem. (Hint: Use the `.find()` method, discussed above.) # ## Putting strings together # # Earlier, we discussed how the `+` operator can be used to create an expression that evaluates to the sum of two numbers. E.g.: 17 + 92 # The `+` operator can also be used to create a new string from two other strings. This is called "concatenation": "Spider" + "man" part1 = "Nickel, what is nickel, " part2 = "it is originally rid of a cover." part1 + part2 # You can combine as many strings as you want this way, using the `+` operator multiple times in the same expression: "bas" + "ket" + "ball" # > EXERCISE: Write an expression that evaluates to a string containing the first fifty characters of "Sea Rose" followed by the last fifty characters of "Sea Rose." # ### Strings and numbers # # It's important to remember that a string that contains what looks like a number does *not* behave like an actual integer or floating point number does. For example, attempting to subtract one string containing a number from another string containing a number will cause an error to be raised: "15" - "4" # The "unsupported operand type(s)" error means that you tried to use an operator (in this case `+`) with two types that the operator in question doesn't know how to work with. (Python is saying: "You asked me to subtract a string from another string. That doesn't make sense to me.") # # Attempting to add an integer or floating-point number to a string that has (what looks like) a number inside of it will raise a similar error: 16 + "8.9" # Fortunately, there are built-in functions whose purpose is to convert from one type to another; notably, you can put a string inside the parentheses of the `int()` and `float()` functions, and it will evaluate to (what Python interprets as) the integer and floating-point values (respectively) of the string: type("17") int("17") type(int("17")) type("3.14159") float("3.14159") type(float("3.14159")) # If you give a string to one of these functions that Python can't interpret as an integer or floating-point number, Python will raise an error: int("shumai") # ### Strings with multiple lines # # Sometimes we want to work with strings that have more than one "line" of text in them. The problem with this is that Python interprets your having pressed "Enter" with your having finished your input, so if you try to cut-and-paste in some text with new line characters, you'll get an error: poem = "Rose, harsh rose, marred and with stint of petals, meagre flower, thin, spare of leaf," # (`EOL while scanning string literal` is Python's way of saying "you hit enter too soon.") One way to work around this is to include `\n` (newline character) inside the string when we type it into our program: poem = "Rose, harsh rose,\nmarred and with stint of petals,\nmeagre flower, thin,\nspare of leaf," print(poem) # This works, but it's kind of inconvenient! A better solution is to use a different way of quoting strings in Python, the triple-quote. It looks like this: poem = """Rose, harsh rose, marred and with stint of petals, meagre flower, thin, spare of leaf,""" print(poem) # When you use three quotes instead of one, Python allows you to put new line characters directly into the string. Nice! We'll be using this for some of the examples below. # > Exercise: Create a variable called `poem` and assign the text of "Sea Rose" to that variable. Use the `len()` function to find out how many characters are in it. Then, use the `count()` method to find out how many times the string `rose` occurs within it. # ## Conclusion # # This section introduces many of the basic building blocks you'll need in order to use computer programs to write poems. We've talked about how to use the interactive interpreter, and about expressions and values, and about the distinction between functions and methods; and we've discussed the details of how strings work and how to manipulate them. # # Further reading: # # * From [Think Python](http://www.greenteapress.com/thinkpython/html/index.html): [Variables, expressions and statements](http://greenteapress.com/thinkpython2/html/thinkpython2003.html); [Strings](http://greenteapress.com/thinkpython2/html/thinkpython2009.html). #
expressions-and-strings.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="6nOTljC_mTMn" # # Unified Planning Basic Demo # # This python notebook shows the basic usage of the unified planning library. # + [markdown] id="t8dCcpf7mivV" # ## Setup the library and the planners # # We start by downloading (from github) the unified planning library and the two planners we currently have at our disposal, namely `pyperplan` and `tamer`. # + [markdown] id="CwlvEzKrm1jT" # First, we install unified_planning library and its dependencies from PyPi. Here, we use the `--pre` flag to use the latest development build. # + id="GPKRUQgNZBo8" # begin of installation # + id="BoqALxJWdfl8" # !pip install --pre unified-planning[pyperplan,tamer] # + [markdown] id="iNHFHxQKnKIp" # We are now ready to use the Unified-Planning library! # + id="uzkOkJbSdMTY" # end of installation # + [markdown] id="9dP5scv7nNJu" # ## Unified-Planning Demo # # ### Basic imports # The basic imports we need for this demo are abstracted in the `shortcuts` package. Moreover we import the PDDL input/output modules. # + id="06rETnGAfQHg" import unified_planning from unified_planning.shortcuts import * from unified_planning.io.pddl_writer import PDDLWriter from unified_planning.io.pddl_reader import PDDLReader # + [markdown] id="i8J7rP0cnvXq" # ### Problem definition via code # # In this example, we will model a very simple robot navigation problem. # # #### Types # # The first thing to do is to introduce a "UserType" to model the concept of a location. It is possible to introduce as many types as needed; then, for each type we will define a set of objects of that type. # # In addition to `UserType`s we have three built-in types: `Bool`, `Real` and `Integer`. # + id="huAy2IbVn0GZ" Location = UserType('Location') # + [markdown] id="fDukLfPPn20t" # #### Fluents and constants # # The basic variables of a planning problem are called "fluents" and are quantities that can change over time. Fluents can have differen types, in this first example we will stick to classical "predicates" that are fluents of boolean type. Moreover, fluents can have parameters: effectively describing multiple variables. # # For example, a booean fluent `connected` with two parameters of type `Location` (that can be interpreted as `from` and `to`) can be used to model a graph of locations: there exists an edge between two locations `a` and `b` if `connected(a, b)` is true. # # In this example, `connected` will be a constant (i.e. it will never change in any execution), but another fluent `robot_at` will be used to model where the robot is: the robot is in locatiopn `l` if and only if `robot_at(l)` is true (we will ensure that exactly one such `l` exists, so that the robot is always in one single location). # + id="LZUgad7ZoA2p" robot_at = unified_planning.model.Fluent('robot_at', BoolType(), l=Location) connected = unified_planning.model.Fluent('connected', BoolType(), l_from=Location, l_to=Location) # + [markdown] id="rVzqSj3XoDPa" # #### Actions # # Now we have the problem variables, but in order to describe the possible evolutions of a systems we need to describe how these variables can be changed and how they can evolve. We model this problem using classical, action-based planning, where a set of actions is used to characterize the possible transitions of the system from a state to another. # # An action is a transition that can be applied if a specified set of preconditions is satisfied and that prescribes a set of effects that change the value of some fluents. All the fluents that are subjected to the action effects are unchanged. # # We allow _lifted_ actions, that are action with parameters: the parameters can be used to specify preconditions or effects and the planner will select among the possible values of each parameters the ones to be used to characterize a specific action. # # In our example, we introduce an action called `move` that has two parameters of type `Location` indicating the current position of the robot `l_from` and the intended destination of the movement `l_to`. The `move(a, b)` action is applicable only when the robot is in position `a` (i.e. `robot_at(a)`) and if `a` and `b` are connected locations (i.e. `connected(a, b)`). As a result of applying the action `move(a, b)`, the robot is no longer in `a` and is instead in location `b`. # # In the unified_planning, we can create actions by instantiating the `unified_planning.InstantaneousAction` class; parameters are specified as keyword arguments to the constructor as shown below. Preconditions and effects are added by means of the `add_precondition` and `add_effect` methods. # + id="dRfrnEOfoHD8" move = unified_planning.model.InstantaneousAction('move', l_from=Location, l_to=Location) l_from = move.parameter('l_from') l_to = move.parameter('l_to') move.add_precondition(connected(l_from, l_to)) move.add_precondition(robot_at(l_from)) move.add_effect(robot_at(l_from), False) move.add_effect(robot_at(l_to), True) print(move) # + [markdown] id="iMuggWWioJ8K" # #### Creating the problem # # The class that represents a planning problem is `unified_planning.Problem`, it contains the set of fluents, the actions, the objects, an intial value for all the fluents and a goal to be reached by the planner. We start by adding the entities we created so far. Note that entities are not bound to one problem, we can create the actions and fluents one and create multiple problems with them. # + id="pgrJOj6ioMSC" problem = unified_planning.model.Problem('robot') problem.add_fluent(robot_at, default_initial_value=False) problem.add_fluent(connected, default_initial_value=False) problem.add_action(move) # + [markdown] id="35A3dp--oOOS" # The set of objects is a set of `unified_planning.Object` instances, each represnting an element of the domain. In this example, we create `NLOC` (set to 10) locations named `l0` to `l9`. We can create the set of objects and add it to the problem as follows. # + id="jbwJbJv8oQ9B" NLOC = 10 locations = [unified_planning.model.Object('l%s' % i, Location) for i in range(NLOC)] problem.add_objects(locations) # + [markdown] id="L-MnST4ioTKo" # Then, we need to specify the initial state. We used the `default_initial_value` specification when adding the fluents, so it suffices to indicate the fluents that are initially true (this is called "small-world assumption". Without this specification, we would need to initialize all the possible instantiation of all the fluents). # # In this example, we connect location `li` with location `li+1`, creating a simple "linear" graph lof locations and we set the initial position of the robot in location `l0`. # + id="t7jLGJ1xoVxq" problem.set_initial_value(robot_at(locations[0]), True) for i in range(NLOC - 1): problem.set_initial_value(connected(locations[i], locations[i+1]), True) # + [markdown] id="re1sYZHKoYx5" # Finally, we set the goal of the problem. In this example, we set ourselves to reach location `l9`. # + id="4zKqcGHlocdY" problem.add_goal(robot_at(locations[-1])) print(problem) # + [markdown] id="OTDDF5M1oezl" # ### Solving Planning Problems # # The most direct way to solve a planning problem is to select an available planning engine by name and use it to solve the problem. In the following we use `pyperplan` to solve the problem and print the plan. # + id="8FTO4AoTojko" with OneshotPlanner(name='pyperplan') as planner: result = planner.solve(problem) if result.status == up.engines.PlanGenerationResultStatus.SOLVED_SATISFICING: print("Pyperplan returned: %s" % result.plan) else: print("No plan found.") # + [markdown] id="Q-Pju4K2q_bM" # The unified_planning can also automatically select, among the available planners installed on the system, one that is expressive enough for the problem at hand. # + id="wuTcp_xTxvTj" with OneshotPlanner(problem_kind=problem.kind) as planner: result = planner.solve(problem) print("%s returned: %s" % (planner.name, result.plan)) # + [markdown] id="6KEe1f_Zx71o" # In this example, Pyperplan was selected. The `problem.kind` property, returns an object that describes the characteristics of the problem. # + id="Zmz6B_CcyABQ" print(problem.kind.features) # + [markdown] id="J3tblkI9yEnW" # #### Beyond plan generation # + [markdown] id="xbY7bAPByL35" # `OneshotPlanner` is not the only operation mode we can invoke from the unified_planning, it is just one way to interact with a planning engine. Another useful functionality is `PlanValidation` that checks if a plan is valid for a problem. # + id="p5s7ZwhzyPKG" plan = result.plan with PlanValidator(problem_kind=problem.kind, plan_kind=plan.kind) as validator: if validator.validate(problem, plan): print('The plan is valid') else: print('The plan is invalid') # + [markdown] id="FtY51vyASTcp" # It is also possible to use the `Grounding` operation mode to create an equivalent formulation of a problem that does not use parameters for the actions. This openarion mode is implemented by an internal python code, but also some engines offer advanced grounding techniques. # + id="2mTQ3DlrSoRk" with Compiler(problem_kind=problem.kind, compilation_kind=CompilationKind.GROUNDING) as grounder: grounding_result = grounder.compile(problem, CompilationKind.GROUNDING) ground_problem = grounding_result.problem print(ground_problem) # The grounding_result can be used to "lift" a ground plan back to the level of the original problem with OneshotPlanner(problem_kind=ground_problem.kind) as planner: ground_plan = planner.solve(ground_problem).plan print('Ground plan: %s' % ground_plan) # Replace the action instances of the grounded plan with their correspoding lifted version lifted_plan = ground_plan.replace_action_instances(grounding_result.map_back_action_instance) print('Lifted plan: %s' % lifted_plan) # Test the problem and plan validity with PlanValidator(problem_kind=problem.kind, plan_kind=ground_plan.kind) as validator: ground_validation = validator.validate(ground_problem, ground_plan) lift_validation = validator.validate(problem, lifted_plan) Valid = up.engines.ValidationResultStatus.VALID assert ground_validation.status == Valid assert lift_validation.status == Valid # + [markdown] id="bbVeET7FyVB3" # #### Parallel planning # + [markdown] id="16WuqVp3yX9j" # We can invoke different instances of a planner in parallel or different planners and return the first plan that is generated effortlessly. # + id="aeUm0TPZya7e" with OneshotPlanner(names=['tamer', 'tamer', 'pyperplan'], params=[{'heuristic': 'hadd'}, {'heuristic': 'hmax'}, {}]) as planner: plan = planner.solve(problem).plan print("%s returned: %s" % (planner.name, plan)) # + [markdown] id="qi-tOYPAyezo" # ### PDDL I/O # + [markdown] id="Bb70DVgayiiX" # The library allows to read and write PDDL problems effortlessly. # + id="L7ZibzXAyk4z" w = PDDLWriter(problem) print(w.get_domain()) print(w.get_problem()) # + id="JU2CJJgvjn6n" # !wget https://raw.githubusercontent.com/aiplan4eu/unified-planning/master/unified_planning/test/pddl/depot/domain.pddl -O /tmp/depot_domain.pddl # + id="lprAyCOgj9J3" # !wget https://raw.githubusercontent.com/aiplan4eu/unified-planning/master/unified_planning/test/pddl/depot/problem.pddl -O /tmp/depot_problem.pddl # + id="jsW0X8T9yqXI" reader = PDDLReader() pddl_problem = reader.parse_problem('/tmp/depot_domain.pddl', '/tmp/depot_problem.pddl') print(pddl_problem) # + [markdown] id="4x4o5D9dyoZe" # A parsed PDDL problem is just a normal problem that can be solved. # + id="P1dexnd1yvdi" print(pddl_problem.kind.features) with OneshotPlanner(name='pyperplan') as planner: result = planner.solve(pddl_problem) print("%s returned: %s" % (planner.name, result.plan))
notebooks/Unified_Planning_Basics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # **start() & end()** # These expressions return the indices of the start and end of the substring matched by the group. # # # >>> import re # >>> m = re.search(r'\d+','1234') # >>> m.end() # 4 # >>> m.start() # 0 # + import re string = input() substring = input() pattern = re.compile(substring) match = pattern.search(string) if not match: print('(-1, -1)') while match: print(f'({match.start()}, {match.end() - 1})') match = pattern.search(string, match.start() + 1) # -
Python/13. regex and parsing/77. re start() & re end().ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <div class="alert alert-block alert-info" style="margin-top: 20px"> # <a href="https://cocl.us/topNotebooksPython101Coursera"> # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/TopAd.png" width="750" align="center"> # </a> # </div> # <a href="https://cognitiveclass.ai/"> # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/CCLog.png" width="200" align="center"> # </a> # <h1>String Operations</h1> # <p><strong>Welcome!</strong> This notebook will teach you about the string operations in the Python Programming Language. By the end of this notebook, you'll know the basics string operations in Python, including indexing, escape sequences and operations.</p> # <h2>Table of Contents</h2> # <div class="alert alert-block alert-info" style="margin-top: 20px"> # <ul> # <li> # <a href="#strings">What are Strings?</a> # </li> # <li> # <a href="#index">Indexing</a> # <ul> # <li><a href="neg">Negative Indexing</a></li> # <li><a href="slice">Slicing</a></li> # <li><a href="stride">Stride</a></li> # <li><a href="concat">Concatenate Strings</a></li> # </ul> # </li> # <li> # <a href="#escape">Escape Sequences</a> # </li> # <li> # <a href="#operations">String Operations</a> # </li> # <li> # <a href="#quiz">Quiz on Strings</a> # </li> # </ul> # <p> # Estimated time needed: <strong>15 min</strong> # </p> # </div> # # <hr> # <h2 id="strings">What are Strings?</h2> # The following example shows a string contained within 2 quotation marks: # + # Use quotation marks for defining string "<NAME>" # - # We can also use single quotation marks: # + # Use single quotation marks for defining string '<NAME>' # - # A string can be a combination of spaces and digits: # + # Digitals and spaces in string '1 2 3 4 5 6 ' # - # A string can also be a combination of special characters : # + # Special characters in string '@#2_#]&*^%$' # - # We can print our string using the print statement: # + # Print the string print("hello!") # - # We can bind or assign a string to another variable: # # + # Assign string to variable Name = "<NAME>" Name # - # <hr> # <h2 id="index">Indexing</h2> # It is helpful to think of a string as an ordered sequence. Each element in the sequence can be accessed using an index represented by the array of numbers: # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%201/Images/StringsIndex.png" width="600" align="center" /> # The first index can be accessed as follows: # <hr/> # <div class="alert alert-success alertsuccess" style="margin-top: 20px"> # [Tip]: Because indexing starts at 0, it means the first index is on the index 0. # </div> # <hr/> # + # Print the first element in the string print(Name[0]) # - # We can access index 6: # + # Print the element on index 6 in the string print(Name[6]) # - # Moreover, we can access the 13th index: # + # Print the element on the 13th index in the string print(Name[13]) # - # <h3 id="neg">Negative Indexing</h3> # We can also use negative indexing with strings: # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%201/Images/StringsNeg.png" width="600" align="center" /> # Negative index can help us to count the element from the end of the string. # The last element is given by the index -1: # + # Print the last element in the string print(Name[-1]) # - # The first element can be obtained by index -15: # + # Print the first element in the string print(Name[-15]) # - # We can find the number of characters in a string by using <code>len</code>, short for length: # + # Find the length of string len("<NAME>") # - # <h3 id="slice">Slicing</h3> # We can obtain multiple characters from a string using slicing, we can obtain the 0 to 4th and 8th to the 12th element: # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%201/Images/StringsSlice.png" width="600" align="center" /> # <hr/> # <div class="alert alert-success alertsuccess" style="margin-top: 20px"> # [Tip]: When taking the slice, the first number means the index (start at 0), and the second number means the length from the index to the last element you want (start at 1) # </div> # <hr/> # + # Take the slice on variable Name with only index 0 to index 3 Name[0:4] # + # Take the slice on variable Name with only index 8 to index 11 Name[8:12] # - # <h3 id="stride">Stride</h3> # We can also input a stride value as follows, with the '2' indicating that we are selecting every second variable: # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%201/Images/StringsStride.png" width="600" align="center" /> # + # Get every second element. The elments on index 1, 3, 5 ... Name[::2] # - # We can also incorporate slicing with the stride. In this case, we select the first five elements and then use the stride: # + # Get every second element in the range from index 0 to index 4 Name[0:5:2] # - # <h3 id="concat">Concatenate Strings</h3> # We can concatenate or combine strings by using the addition symbols, and the result is a new string that is a combination of both: # # + # Concatenate two strings Statement = Name + "is the best" Statement # - # To replicate values of a string we simply multiply the string by the number of times we would like to replicate it. In this case, the number is three. The result is a new string, and this new string consists of three copies of the original string: # + # Print the string for 3 times 3 * "<NAME>" # - # You can create a new string by setting it to the original variable. Concatenated with a new string, the result is a new string that changes from <NAME> to “<NAME> is the best". # # + # Concatenate strings Name = "<NAME>" Name = Name + " is the best" Name # - # <hr> # <h2 id="escape">Escape Sequences</h2> # Back slashes represent the beginning of escape sequences. Escape sequences represent strings that may be difficult to input. For example, back slash "n" represents a new line. The output is given by a new line after the back slash "n" is encountered: # + # New line escape sequence print(" <NAME> \n is the best" ) # - # Similarly, back slash "t" represents a tab: # + # Tab escape sequence print(" <NAME> \t is the best" ) # - # If you want to place a back slash in your string, use a double back slash: # + # Include back slash in string print(" <NAME> \\ is the best" ) # - # We can also place an "r" before the string to display the backslash: # + # r will tell python that string will be display as raw string print(r" <NAME> \ is the best" ) # - # <hr> # <h2 id="operations">String Operations</h2> # There are many string operation methods in Python that can be used to manipulate the data. We are going to use some basic string operations on the data. # Let's try with the method <code>upper</code>; this method converts lower case characters to upper case characters: # + # Convert all the characters in string to upper case A = "Thriller is the sixth studio album" print("before upper:", A) B = A.upper() print("After upper:", B) # - # The method <code>replace</code> replaces a segment of the string, i.e. a substring with a new string. We input the part of the string we would like to change. The second argument is what we would like to exchange the segment with, and the result is a new string with the segment changed: # # + # Replace the old substring with the new target substring is the segment has been found in the string A = "<NAME> is the best" B = A.replace('Michael', 'Janet') B # - # The method <code>find</code> finds a sub-string. The argument is the substring you would like to find, and the output is the first index of the sequence. We can find the sub-string <code>jack</code> or <code>el<code>. # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%201/Images/StringsFind.png" width="600" align="center" /> # + # Find the substring in the string. Only the index of the first elment of substring in string will be the output Name = "<NAME>" Name.find('el') # + # Find the substring in the string. Name.find('Jack') # - # If the sub-string is not in the string then the output is a negative one. For example, the string 'Jasdfasdasdf' is not a substring: # + # If cannot find the substring in the string Name.find('Jasdfasdasdf') # - # <hr> # <h2 id="quiz">Quiz on Strings</h2> # What is the value of the variable <code>A</code> after the following code is executed? # + # Write your code below and press Shift+Enter to execute A = "1" # - # Double-click <b>here</b> for the solution. # # <!-- Your answer is below: # "1" # --> # What is the value of the variable <code>B</code> after the following code is executed? # + # Write your code below and press Shift+Enter to execute B = "2" # - # Double-click <b>here</b> for the solution. # # <!-- Your answer is below: # "2" # --> # What is the value of the variable <code>C</code> after the following code is executed? # + # Write your code below and press Shift+Enter to execute C = A + B # - # Double-click <b>here</b> for the solution. # # <!-- Your answer is below: # "12" # --> # <hr> # Consider the variable <code>D</code> use slicing to print out the first three elements: # + # Write your code below and press Shift+Enter to execute D = "ABCDEFG" # - # Double-click <b>here</b> for the solution. # # <!-- Your answer is below: # print(D[:3]) # # or # print(D[0:3]) # --> # <hr> # Use a stride value of 2 to print out every second character of the string <code>E</code>: # + # Write your code below and press Shift+Enter to execute E = 'clocrkr1e1c1t' # - # Double-click <b>here</b> for the solution. # # <!-- Your answer is below: # print(E[::2]) # --> # <hr> # Print out a backslash: # + # Write your code below and press Shift+Enter to execute # - # Double-click <b>here</b> for the solution. # <!-- Your answer is below: # print("\\") # or # print(r" \ ") # --> # <hr> # Convert the variable <code>F</code> to uppercase: # + # Write your code below and press Shift+Enter to execute F = "You are wrong" # - # Double-click <b>here</b> for the solution. # # <!-- Your answer is below: # F.upper() # --> # <hr> # Consider the variable <code>G</code>, and find the first index of the sub-string <code>snow</code>: # + # Write your code below and press Shift+Enter to execute G = "Mary had a little lamb Little lamb, little lamb Mary had a little lamb \ Its fleece was white as snow And everywhere that Mary went Mary went, Mary went \ Everywhere that Mary went The lamb was sure to go" # - # Double-click __here__ for the solution. # # <!-- Your answer is below: # G.find("snow") # --> # In the variable <code>G</code>, replace the sub-string <code>Mary</code> with <code>Bob</code>: # + # Write your code below and press Shift+Enter to execute # - # Double-click __here__ for the solution. # # <!-- Your answer is below: # G.replace("Mary", "Bob") # --> # <hr> # <h2>The last exercise!</h2> # <p>Congratulations, you have completed your first lesson and hands-on lab in Python. However, there is one more thing you need to do. The Data Science community encourages sharing work. The best way to share and showcase your work is to share it on GitHub. By sharing your notebook on GitHub you are not only building your reputation with fellow data scientists, but you can also show it off when applying for a job. Even though this was your first piece of work, it is never too early to start building good habits. So, please read and follow <a href="https://cognitiveclass.ai/blog/data-scientists-stand-out-by-sharing-your-notebooks/" target="_blank">this article</a> to learn how to share your work. # <hr> # <div class="alert alert-block alert-info" style="margin-top: 20px"> # <h2>Get IBM Watson Studio free of charge!</h2> # <p><a href="https://cocl.us/bottemNotebooksPython101Coursera"><img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/BottomAd.png" width="750" align="center"></a></p> # </div> # <h3>About the Authors:</h3> # <p><a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank"><NAME></a> is a Data Scientist at IBM, and holds a PhD in Electrical Engineering. His research focused on using Machine Learning, Signal Processing, and Computer Vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.</p> # Other contributors: <a href="www.linkedin.com/in/jiahui-mavis-zhou-a4537814a"><NAME></a> # <hr> # <p>Copyright &copy; 2018 IBM Developer Skills Network. This notebook and its source code are released under the terms of the <a href="https://cognitiveclass.ai/mit-license/">MIT License</a>.</p>
Applied-Data-Science-Specialization-IBM/IBM - Python for Data Science/week1- Python/PY0101EN-1-2-Strings.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import collections from collections import defaultdict import sys import json import random from jsmin import jsmin from io import StringIO import numpy as np import copy import importlib from functools import partial import math import os import compress_pickle # script_n = os.path.basename(__file__).split('.')[0] script_n = 'mf_bouton_210519_compare_dist' sys.path.insert(0, '/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc') import my_plot importlib.reload(my_plot) from my_plot import MyPlotData import tools_mf_graph importlib.reload(tools_mf_graph) # fname = ('/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc/'\ # 'mf_grc_model/input_graph_210407_all.gz') fname = ('/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc/' \ 'gen_db/mf_grc/input_graph_210520_all_100_2.gz') input_graph = compress_pickle.load(fname) z_min = 19800-10000 z_max = 29800+10000 # GrCs are fully reconstructed and proofread from 90k to 150k x_min = 320*1000 x_max = 560*1000 pair_reps = defaultdict(int) mfs_within_box = set() mf_locs_within_box = set() loc_to_mf = dict() for mf_id, mf in input_graph.mfs.items(): rosette_loc_size = {} mf.get_all_mf_locs_size(rosette_loc_size) for rosette_loc, size in rosette_loc_size.items(): x, y, z = rosette_loc if x < x_min or x > x_max: continue if z < z_min or z > z_max: continue mfs_within_box.add(mf_id) mf_locs_within_box.add(rosette_loc) loc_to_mf[rosette_loc] = mf_id # input_graph.randomize(preserve_mf_degree='soft') # input_graph.randomize(preserve_mf_degree=False) connected_grcs = set() mf_connections = defaultdict(set) mf_size = defaultdict(int) for grc_id in input_graph.grcs: grc = input_graph.grcs[grc_id] connected_edge_count = 0 for edge in grc.edges: mf_id, mf_loc = edge if mf_loc not in mf_locs_within_box: continue mf_connections[mf_id].add(grc_id) mf_size[mf_loc] += 1 connected_edge_count += 1 connected_grcs.add((grc_id, connected_edge_count)) mf_size_sorted = [] for mf_id in mf_locs_within_box: mf_size_sorted.append((mf_id, mf_size[mf_id])) mf_size_sorted.sort(key=lambda x: x[1]) mpd_count = MyPlotData() i = 0 for mf_id in mf_size_sorted: mf_id, size = mf_id mpd_count.add_data_point( # count=len(mf_connections[mf_id]), count=size, mf_id=i, ) i += 1 importlib.reload(my_plot); my_plot.my_relplot( mpd_count, x='mf_id', y='count', # kind='hist', context='paper', linewidth=2.5, ylim=[0, 50], # kde=True, # stat='density', height=4, aspect=2, y_axis_label='# of connected GrCs', x_axis_label='Sorted MFs', save_filename=f'{script_n}_line.svg', show=True, ) # - mf_size_sorted # + mf_size_map = {} for mf, size in mf_size_sorted: mf_size_map[mf] = size out_n = f'{script_n}_mf_size_map.gz' print(out_n) import compress_pickle compress_pickle.dump(mf_size_map, out_n) # - mf_size_sorted total = 0 top10_n = int(len(mf_size_sorted)/3) for n in mf_size_sorted: total += n[1] print(total) total_top = 0 for n in mf_size_sorted[-top10_n:]: total_top += n[1] print(total_top) total_bottom = 0 for n in mf_size_sorted[0:top10_n]: total_bottom += n[1] print(total_bottom) print(total_top/total_bottom) # + # importlib.reload(my_plot) # importlib.reload(my_plot_data) # mpd_hist_observed = MyPlotData() # for k, v in mpd_count.get_histogram(count_var='count').items(): # mpd_hist_observed.add( # num_grcs=k, # count=v, # model='Reconstructed') # mpd_count_observed.add_key_value('model', 'Reconstructed') # def my_to_histogram(self, count_var=None, count_name='count', new_count_var=None): # hist = self.get_histogram(count_var, count_name) # ret = MyPlotData() # if new_count_var is None: # new_count_var = count_var # for k, v in hist.items(): # args = { # f'{count_var}': k, # f'{count_name}': v, # } # ret.add(**args) # return ret # mpd_count.to_histogram = my_to_histogram # mpd_hist_observed = MyPlotData() # mpd_hist_observed = mpd_count.to_histogram('count', 'count', new_count_var='num_grcs') # for k, v in mpd_count.get_histogram(count_var='count').items(): # mpd_hist_observed.add( # num_grcs=k, # count=v, # model='Reconstructed') # mpd_count_observed.add_key_value('model', 'Reconstructed') import sys import importlib sys.path.insert(0, '/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc') import my_plot importlib.reload(my_plot) from my_plot import MyPlotData import compress_pickle import my_plot_data mpd_random = compress_pickle.load( 'mf_bouton_210519_random_circle_dist_X_420000_540000_Z_19800_29800.gz').add_key_value( 'model', 'Random') # mpd_random = mpd_random.to_histogram() mpd_random.to_histogram('count', hist_key='num_grcs', hist_val='count') mpd_random.add_cdf('count') mpd_all.append(mpd_random) # + mpd_count_observed = copy.copy(mpd_count) mpd_count_observed.add_key_value('model', 'Observed') mpd_all = MyPlotData() mpd_all.append(mpd_count_observed) mpd_random = compress_pickle.load( 'mf_bouton_210519_random_circle_dist_X_420000_540000_Z_19800_29800.gz').add_key_value( 'model', 'Random') mpd_all.append(mpd_random) def custom_legend_fn(plt): # plt.legend(bbox_to_anchor=(1.025, .8), loc='upper left', borderaxespad=0.) plt.legend(loc='lower right', frameon=False, fontsize=12) importlib.reload(my_plot); my_plot.my_relplot( mpd_all, x='mf_id', y='count', hue='model', # kind='hist', context='paper', linewidth=2.5, ylim=[0, 50], # kde=True, # stat='density', height=3, width=4, # aspect=2, y_axis_label='# of postsyn GrCs', x_axis_label='Sorted MF boutons', save_filename=f'{script_n}_observed_vs_random.svg', custom_legend_fn=custom_legend_fn, show=True, ) # + mpd_count_observed = copy.copy(mpd_count) mpd_count_observed.add_key_value('model', 'Observed') mpd_all = MyPlotData() mpd_all.append(mpd_count_observed) mpd_random = compress_pickle.load( 'mf_bouton_210519_random_circle_X_420000_540000_Z_19800_29800.gz').add_key_value( 'model', 'Circle-Average') mpd_all.append(mpd_random) mpd_random = compress_pickle.load( 'mf_bouton_210519_random_circle_dist_X_420000_540000_Z_19800_29800.gz').add_key_value( 'model', 'Circle-Distribution') mpd_all.append(mpd_random) mpd_random = compress_pickle.load( # 'mf_bouton_210519_random_test_X_420000_540000_Z_19800_29800.gz').add_key_value( # 'mf_bouton_210519_random_edge_X_420000_540000_Z_19800_29800.gz').add_key_value( # 'mf_bouton_210519_random_edge_10000_X_420000_540000_Z_19800_29800.gz').add_key_value( 'mf_bouton_210519_random_edge_global_15000_X_420000_540000_Z_19800_29800.gz').add_key_value( # 'mf_bouton_210519_random_edge_global_10000_X_420000_540000_Z_19800_29800.gz').add_key_value( # 'mf_bouton_210519_random_edge_global_20000_X_420000_540000_Z_19800_29800.gz').add_key_value( 'model', 'Vector-Shuffle') mpd_all.append(mpd_random) def custom_legend_fn(plt): # plt.legend(bbox_to_anchor=(1.025, .8), loc='upper left', borderaxespad=0.) plt.legend(loc='lower right', frameon=False, fontsize=12) importlib.reload(my_plot); my_plot.my_relplot( mpd_all, x='mf_id', y='count', hue='model', # kind='hist', context='paper', linewidth=2.5, ylim=[0, 50], # kde=True, # stat='density', height=4, width=6, # aspect=2, y_axis_label='# of postsyn GrCs', x_axis_label='Sorted MF boutons', save_filename=f'{script_n}_observed_vs_random_supp.svg', # custom_legend_fn=custom_legend_fn, show=True, ) # + mpd_all = MyPlotData() mpd_random = compress_pickle.load('mf_bouton_210519_random_circle_X_420000_540000_Z_19800_29800.gz') mpd_random = mpd_random.to_histogram('count', hist_key='num_grcs', hist_val='count') mpd_random.add_key_value('model', 'Radius-Average') mpd_random.add_cdf('count') mpd_all.append(mpd_random) mpd_random = compress_pickle.load( 'mf_bouton_210519_random_circle_dist_X_420000_540000_Z_19800_29800.gz') mpd_random = mpd_random.to_histogram('count', hist_key='num_grcs', hist_val='count') mpd_random.add_key_value('model', 'Radius-Distribution') mpd_random.add_cdf('count') mpd_all.append(mpd_random) mpd_random = compress_pickle.load( 'mf_bouton_210519_random_edge_global_15000_X_420000_540000_Z_19800_29800.gz') mpd_random = mpd_random.to_histogram('count', hist_key='num_grcs', hist_val='count') mpd_random.add_key_value('model', 'Vector-Shuffle') mpd_random.add_cdf('count') mpd_all.append(mpd_random) def custom_legend_fn(plt): # plt.legend(bbox_to_anchor=(1.025, .8), loc='upper left', borderaxespad=0.) plt.legend(loc='lower right', frameon=False, fontsize=12) mpd_count_observed = copy.copy(mpd_count) mpd_count_observed.add_key_value('model', 'Reconstructed') import matplotlib.pyplot as plt plt.subplots(figsize=(4, 3)) import seaborn as sns ax = sns.ecdfplot(data=mpd_count_observed.to_dataframe(), x="count", palette=palette, hue='model') importlib.reload(my_plot); my_plot.my_lineplot( mpd_all, x='num_grcs', y='count_cdf', hue='model', context='paper', # palette=palette, linewidth=1.25, ylim=[0, 1.02], height=3, width=4.5, # aspect=2, y_axis_label='Fraction of GrCs', x_axis_label='# of other GrCs sharing 2 MFs', save_filename=f'{script_n}_observed_vs_random_supp_cdf.svg', no_show=True, ) import matplotlib.patches as mpatches red_patch = mpatches.Patch(color='red', label='Reconstructed') patch0 = mpatches.Patch(color=sns.color_palette()[0], label='Radius-Average') patch1 = mpatches.Patch(color=sns.color_palette()[1], label='Radius-Distribution') patch2 = mpatches.Patch(color=sns.color_palette()[2], label='Vector-Shuffle') # plt.legend(handles=[red_patch, black_patch]) plt.legend(frameon=False, fontsize=12, handles=[red_patch, patch0, patch1, patch2], bbox_to_anchor=(1.025, .8),) # plt.legend(labels=['legendEntry1', 'legendEntry2']) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) plt.savefig(f'{script_n}_supp_cdf.svg', bbox_inches='tight', transparent=True) plt.show() plt.close() # + mpd_count_observed = copy.copy(mpd_count) mpd_count_observed.add_key_value('model', 'Observed') mpd_all = MyPlotData() mpd_all.append(mpd_count_observed) mpd_random = compress_pickle.load( 'mf_bouton_210519_random_circle_dist_X_420000_540000_Z_19800_29800.gz').add_key_value( 'model', 'Random') mpd_all.append(mpd_random) def custom_legend_fn(plt): # plt.legend(bbox_to_anchor=(1.025, .8), loc='upper left', borderaxespad=0.) plt.legend(loc='lower right', frameon=False, fontsize=12) importlib.reload(my_plot); my_plot.my_displot( mpd_all, x='count', hue='model', # kind='kde', kind='ecdf', context='paper', # linewidth=2.5, # ylim=[0, 50], height=4, width=4, # aspect=2, y_axis_label='MF boutons', x_axis_label='# of postsyn GrCs', save_filename=f'{script_n}_observed_vs_random_ecdf.svg', # custom_legend_fn=custom_legend_fn, show=True, ) # + mpd_count_observed = copy.copy(mpd_count) mpd_count_observed.add_key_value('model', 'Observed') import seaborn as sns sns.ecdfplot(data=mpd_count_observed.to_dataframe(), x="count") # + importlib.reload(my_plot); my_plot.my_displot( mpd_all, x='count', hue='model', kde=True, # kind='kde', # kind='ecdf', context='paper', # linewidth=2.5, # ylim=[0, 50], height=4, width=4, # aspect=2, y_axis_label='MF boutons', x_axis_label='# of postsyn GrCs', save_filename=f'{script_n}_observed_vs_random_pdf.svg', # custom_legend_fn=custom_legend_fn, show=True, ) # - mpd_random.data # + # mpd_count_observed = mpd_count.to_histogram('count', hist_key='num_grcs', hist_val='count') # mpd_count_observed.add_key_value('model', 'Reconstructed') # mpd_count_observed.add_cdf('count') mpd_all = MyPlotData() # mpd_all.append(mpd_count_observed) db = compress_pickle.load('mf_bouton_210519_random_gen_circle_100.gz') for trial in db: hist = defaultdict(int) for v in trial: hist[v] += 1 mpd_random = MyPlotData() for k, v in sorted(hist.items()): mpd_random.add( num_grcs=k, count=v, model='Random' ) mpd_random.add_cdf('count') mpd_all.append(mpd_random) def custom_legend_fn(plt): # plt.legend(bbox_to_anchor=(1.025, .8), loc='upper left', borderaxespad=0.) plt.legend(loc='lower right', frameon=False, fontsize=12) palette = { 'Reconstructed': 'red', 'Random': 'black', } mpd_count_observed = copy.copy(mpd_count) mpd_count_observed.add_key_value('model', 'Reconstructed') import matplotlib.pyplot as plt plt.subplots(figsize=(4, 3)) import seaborn as sns ax = sns.ecdfplot(data=mpd_count_observed.to_dataframe(), x="count", palette=palette, hue='model') importlib.reload(my_plot); my_plot.my_lineplot( mpd_all, ax=ax, x='num_grcs', y='count_cdf', hue='model', # kind='kde', # kind='line', context='paper', palette=palette, linewidth=.75, ylim=[0, 1.02], height=3, width=4.5, ci=99, # aspect=2, y_axis_label='Fraction of MF boutons', x_axis_label='# of postsyn GrCs', save_filename=f'{script_n}_observed_vs_random_cdf.svg', # custom_legend_fn=custom_legend_fn, # show=True, no_show=True, ) import matplotlib.patches as mpatches red_patch = mpatches.Patch(color='red', label='Reconstructed') black_patch = mpatches.Patch(color='black', label='Random') # plt.legend(handles=[red_patch, black_patch]) plt.legend(loc='top right', frameon=False, fontsize=12, handles=[red_patch, black_patch]) # plt.legend(labels=['legendEntry1', 'legendEntry2']) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) plt.savefig(f'{script_n}_observed_vs_random_cdf.svg', bbox_inches='tight', transparent=True) plt.show() plt.close() # - mpd_count_observed.data from scipy.stats import kurtosis observed = [] for i in mpd_count.data: observed.append(i['count']) print(f'Observed: {kurtosis(observed)}') random_vals = [] for i in mpd_random.data: random_vals.append(i['count']) print(f'Random: {kurtosis(random_vals)}') import scipy.stats scipy.stats.ranksums(observed, random_vals) from mlxtend.evaluate import permutation_test p_value = permutation_test(observed, random_vals, method='approximate', num_rounds=10000, seed=0) print(p_value) db = compress_pickle.load('mf_bouton_210519_random_gen_circle.gz') more_random_vals = [j for i in db for j in i] from mlxtend.evaluate import permutation_test p_value = permutation_test(observed, more_random_vals, method='approximate', num_rounds=100, seed=0) print(p_value) db = compress_pickle.load('mf_bouton_210519_random_gen_circle_100.gz') more_random_vals = [j for i in db for j in i] from mlxtend.evaluate import permutation_test p_value = permutation_test(observed, more_random_vals, method='approximate', num_rounds=100, seed=0) print(p_value) import scipy.stats scipy.stats.ranksums(observed, more_random_vals) # + # kurtosis test from scipy.stats import kurtosis db = compress_pickle.load('mf_bouton_210519_random_gen_circle.gz') observed = [] for i in mpd_count.data: observed.append(i['count']) observed_k = [kurtosis(observed)] random_k = [] for trial in db: random_k.append(kurtosis(trial)) # - import scipy.stats print(scipy.stats.ranksums(observed_k, random_k)) from mlxtend.evaluate import permutation_test p_value = permutation_test(observed_k, random_k, method='approximate', num_rounds=10000, seed=0) print(p_value) # + # kurtosis test from scipy.stats import kurtosis db = compress_pickle.load('mf_bouton_210519_random_gen_circle_1000.gz') observed = [] for i in mpd_count.data: observed.append(i['count']) observed_k = [kurtosis(observed)] random_k = [] for trial in db: random_k.append(kurtosis(trial)) # random_k *= 10 import scipy.stats print(scipy.stats.ranksums(observed_k, random_k)) from mlxtend.evaluate import permutation_test p_value = permutation_test(observed_k, random_k, method='approximate', num_rounds=10000, seed=0) print(p_value) # + # kurtosis test from scipy.stats import kurtosis db = compress_pickle.load('mf_bouton_210519_random_gen_circle_1000.gz') observed = [] for i in mpd_count.data: observed.append(i['count']) observed_k = [scipy.stats.variation(observed)] random_k = [] for trial in db: random_k.append(scipy.stats.variation(trial)) random_k *= 10 import scipy.stats print(scipy.stats.ranksums(observed_k, random_k)) from mlxtend.evaluate import permutation_test p_value = permutation_test(observed_k, random_k, method='approximate', num_rounds=10000, seed=0) print(p_value)
analysis/mf_grc_analysis/mf_combination_representation/mf_bouton_210519_compare_dist.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline #from matplotlib import pyplot as plt #import sncosmo from sntd import plotting, io, simulation, fitting # Test 1 : simulate a doubly-imaged Type Ib SN and fit for time delays modname = 'snana-2004gv' snType = 'Ib' lcs = simulation.createMultiplyImagedSN( modname, snType, .1, bands=['bessellb', 'bessellv', 'bessellr'], zp=27., cadence=5., epochs=15., mjdRange=[0,100.], time_delays=[0., 15.], magnifications=[1.,5.], objectName='Test', telescopename='HST', microlensing=False) ifig = 0 print("Simulated strongly lensed SN \n") lcs.plot_lightcurve(bands=['bessellb', 'bessellv', 'bessellr'], showmodel='sim', showfig=True) for k in lcs.images.keys(): print("image {} t0={}".format(k, lcs.images[k].simMeta['t0'])) lcs_tdfit=fitting.fit_data(lcs, snType='Ib', models=['snana-2004gv'], params=['amplitude','t0'], combined_or_separate='separate', method='minuit') # TODO: make this more general, i.e. use keywords list instead of s1 and s2. t0_s1 = lcs_tdfit.images['S1'].fits.model.get('t0') t0_s2 = lcs_tdfit.images['S2'].fits.model.get('t0') A_s1 = lcs_tdfit.images['S1'].fits.model.get('amplitude') A_s2 = lcs_tdfit.images['S2'].fits.model.get('amplitude') lcs_combined = lcs.combine_curves(tds={'S1':1,'S2':t0_s2-t0_s1}, mus={'S1':1,'S2':A_s2/A_s1}) lcs.plot_lightcurve(bands=['bessellb', 'bessellv', 'bessellr'], combined=True, showmodel='fit', showfig=True)
sntd/examples/sim_and_fit.ipynb
#!/usr/bin/env python # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Principle of Maximum Likelihood # # # ## Description: # # Python script for illustrating the principle of maximum likelihood and a likelihood fit. # # __This is both an exercise, but also an attempt to illustrate four things:__ # 1. How to make a (binned and unbinned) Likelihood function/fit. # 2. The difference and a comparison between a Chi-square and a (binned) Likelihood. # 3. The difference and a comparison between a binned and unbinned Likelihood. # 4. What goes on behind the scenes in Minuit, when it is asked to fit something. # # In this respect, the exercise is more of an illustration rather than something to be used directly, which is why it is followed later by another exercise, where you can test if you have understood the differences, and how and when to apply which fit method. # # The example uses 50 exponentially distributed random times, with the goal of finding the best estimate of the lifetime (data is generated with lifetime, tau = 1). Three estimates are considered: # 1. Chi-square fit (chi2) # 2. Binned Likelihood fit (bllh) # 3. Unbinned Likelihood fit (ullh) # # The three methods are based on a scan of values for tau in the range [0.5, 2.0]. For each value of tau, the chi2, bllh, and ullh are calculated. In the two likelihood cases, it is actually -2*log(likelihood) which is calculated, which you should (by now) understand why. # # Note that the unbinned likelihood is in principle the "optimal" fit, but also the most difficult for several reasons (convergence, numerical problems, implementation, speed, etc.). However, all three methods/constructions essentially yield the same results, when there is enough statistics (i.e. errors are Gaussian), though the $\chi^2$ also gives a fit quality. # # The problem is explicitly chosen to have only one fit parameter, such that simple 1D graphs can show what goes on. In this case, the analytical solution (simple mean) is actually prefered (see Barlow). Real world problems will almost surely be more complex. # # Also, the exercise is mostly for illustration. In reality, one would hardly ever calculate and plot the Chi-square or Likelihood values, but rather do the minimization using an algorithm (Minuit) to do the hard work. # # ### Authors: # - <NAME> (<NAME> Institute, <EMAIL>) # - <NAME> (<EMAIL>) # # ### Date: # - 26-11-2021 (latest update) # # ### Reference: # - Barlow, chapter 5 (5.1-5.7) # - Cowan, chapter 6 # # *** import numpy as np # Matlab like syntax for linear algebra and functions import matplotlib.pyplot as plt # Plots and figures like you know them from Matlab import seaborn as sns # Make the plots nicer to look at from iminuit import Minuit # The actual fitting tool, better than scipy's import sys # Module to see files and folders in directories from scipy import stats # + sys.path.append('../../../External_Functions') from ExternalFunctions import Chi2Regression, BinnedLH, UnbinnedLH from ExternalFunctions import nice_string_output, add_text_to_ax # useful functions to print fit results on figure plt.rcParams['font.size'] = 16 # set some basic plotting parameters # - # ## Program settings: # + save_plots = False # Determining if plots are saved or not verbose = True # Should the program print or not? veryverbose = True # Should the program print a lot or not? ScanChi2 = True # In addition to fit for minimum, do a scan... # Parameters of the problem: Ntimes = 50 # Number of time measurements. tau_truth = 1.0; # We choose (like Gods!) the lifetime. # Binning: Nbins = 50 # Number of bins in histogram tmax = 10.0 # Maximum time in histogram binwidth = tmax / Nbins # Size of bins (s) # General settings: r = np.random # Random numbers r.seed(42) # We set the numbers to be random, but the same for each run # - # # ## Generate data: # Produce array of exponentially distributed times and put them in a histogram: t = r.exponential(tau_truth, Ntimes) # Exponential with lifetime tau. yExp, xExp_edges = np.histogram(t, bins=Nbins, range=(0, tmax)) # Is the data plotted like we wouls like to? Let's check... # In case you want to check that the numbers really come out as you want to (very healthy to do at first): if (veryverbose) : for index, time in enumerate(t) : print(f" {index:2d}: t = {time:5.3f}") if index > 10: break # let's restrain ourselves # Looks like values are coming int, but are they actually giving an exponential? Remember the importance of __plotting your data before hand__! X_center = xExp_edges[:-1] + (xExp_edges[1]-xExp_edges[0])/2.0 # Get the value of the histogram bin centers plt.plot(X_center,yExp,'o') plt.show() # Check that it looks like you are producing the data that you want. If this is the case, move on (and possibly comment out the plot!). # ## Analyse data: # The following is "a manual fit", i.e. scanning over possible values of the fitting parameter(s) - here luckely only one, tau - and seeing what value of chi2, bllh, and ullh it yields. When plotting these, one should find a <b>parabola</b>, the minimum value of which is the optimal fitting parameter of tau. The rate of increase around this minimum represents the uncertainty of the fitting parameter. # Define the number of tau values and their range to test in Chi2 and LLH: # As we know the "truth", namely tau = 1, the range [0.5, 1.5] seems fitting for the mean. # The number of bins can be increased at will, but for now 50 seems fitting. Ntau_steps = 50 min_tau = 0.5 max_tau = 1.5 delta_tau = (max_tau-min_tau) / Ntau_steps # Loop over hypothesis for the value of tau and calculate Chi2 and (B)LLH: chi2_minval = 999999.9 # Minimal Chi2 value found chi2_minpos = 0.0 # Position (i.e. time) of minimal Chi2 value bllh_minval = 999999.9 bllh_minpos = 0.0 ullh_minval = 999999.9 ullh_minpos = 0.0 tau = np.zeros(Ntau_steps+1) chi2 = np.zeros(Ntau_steps+1) bllh = np.zeros(Ntau_steps+1) ullh = np.zeros(Ntau_steps+1) # Now loop of POSSIBLE tau estimates: for itau in range(Ntau_steps+1): tau_hypo = min_tau + itau*delta_tau # Scan in values of tau tau[itau] = tau_hypo # Calculate Chi2 and binned likelihood (from loop over bins in histogram): chi2[itau] = 0.0 bllh[itau] = 0.0 for ibin in range (Nbins) : # Note: The number of EXPECTED events is the intergral over the bin! xlow_bin = xExp_edges[ibin] xhigh_bin = xExp_edges[ibin+1] # Given the start and end of the bin, we calculate the INTEGRAL over the bin, # to get the expected number of events in that bin: nexp = Ntimes * (np.exp(-xlow_bin/tau_hypo) - np.exp(-xhigh_bin/tau_hypo)) # The observed number of events... that is just the data! nobs = yExp[ibin] if (nobs > 0): # For ChiSquare but not LLH, we need to require Nobs > 0, as we divide by this: chi2[itau] += (nobs-nexp)**2 / nobs # Chi2 summation/function bllh[itau] += -2.0*np.log(stats.poisson.pmf(int(nobs), nexp)) # Binned LLH function if (veryverbose and itau == 0) : print(f" Nexp: {nexp:10.7f} Nobs: {nobs:3.0f} Chi2: {chi2[itau]:5.1f} BLLH: {bllh[itau]:5.1f}") # Calculate Unbinned likelihood (from loop over events): ullh[itau] = 0.0 for time in t : # i.e. for every data point generated... ullh[itau] += -2.0*np.log(1.0/tau_hypo*np.exp(-time/tau_hypo)) # Unbinned LLH function if (verbose) : print(f" {itau:3d}: tau = {tau_hypo:4.2f} chi2 = {chi2[itau]:6.2f} log(bllh) = {bllh[itau]:6.2f} log(ullh) = {ullh[itau]:6.2f}") # Search for minimum values of chi2, bllh, and ullh: if (chi2[itau] < chi2_minval) : chi2_minval = chi2[itau] chi2_minpos = tau_hypo if (bllh[itau] < bllh_minval) : bllh_minval = bllh[itau] bllh_minpos = tau_hypo if (ullh[itau] < ullh_minval) : ullh_minval = ullh[itau] ullh_minpos = tau_hypo print(f" Decay time of minimum found: chi2: {chi2_minpos:7.4f}s bllh: {bllh_minpos:7.4f}s ullh: {ullh_minpos:7.4f}s") print(f" Chi2 value at minimum: chi2 = {chi2_minval:.1f}") # ### Plot and fit results: # Define range around minimum to be fitted: min_fit = 0.15 max_fit = 0.20 # + fig, axes = plt.subplots(2, 2, figsize=(16, 12)) ax_chi2 = axes[0,0] ax_bllh = axes[1,0] ax_ullh = axes[0,1] # A fourth plot is available for plotting whatever you want :) # ChiSquare: # ---------- ax_chi2.plot(tau, chi2, 'k.', label='chi2') ax_chi2.set_xlim(chi2_minpos-2*min_fit, chi2_minpos+2*max_fit) ax_chi2.set_title("ChiSquare") ax_chi2.set_xlabel(r"Value of $\tau$") ax_chi2.set_ylabel("Value of ChiSquare") # Binned Likelihood: # ---------- ax_bllh.plot(tau, bllh,'bo') ax_bllh.set_xlim(bllh_minpos-2*min_fit, bllh_minpos+2*max_fit) ax_bllh.set_title("Binned Likelihood") ax_bllh.set_xlabel(r"Value of $\tau$") ax_bllh.set_ylabel(r"Value of $\ln{LLH}$") # Unbinned Likelihood: # ---------- ax_ullh.plot(tau, ullh, 'g.') ax_ullh.set_xlim(ullh_minpos-2*min_fit, ullh_minpos+2*max_fit) ax_ullh.set_title("Unbinned Likelihood") ax_ullh.set_xlabel(r"Value of $\tau$") ax_ullh.set_ylabel(r"Value of $\ln{LLH}$") fig; # - # --- # ## Parabola function # Note that the parabola is defined differently than normally. The parameters are: # * `minval`: Minimum value (i.e. constant) # * `minpos`: Minimum position (i.e. x of minimum) # * `quadratic`: Quadratic term. def func_para(x, minval, minpos, quadratic) : return minval + quadratic*(x-minpos)**2 func_para_vec = np.vectorize(func_para) # Note: This line makes it possible to send vectors through the function! # --- # ## Double parabola with different slopes on each side of the minimum: # In case the uncertainties are asymmetric, the parabola will also be so, and hence needs to be fitted with two separate parabolas meeting at the top point. Parameters are now as follows: # * `minval`: Minimum value (i.e. constant) # * `minpos`: Minimum position (i.e. x of minimum) # * `quadlow`: Quadratic term on lower side # * `quadhigh`: Quadratic term on higher side def func_asympara(x, minval, minpos, quadlow, quadhigh) : if (x < minpos) : return minval + quadlow*(x-minpos)**2 else : return minval + quadhigh*(x-minpos)**2 func_asympara_vec = np.vectorize(func_asympara) # Note: This line makes it possible to send vectors through the function! # ## Perform both fits: # + # Fit chi2 values with our parabola: indexes = (tau>chi2_minpos-min_fit) & (tau<chi2_minpos+max_fit) # Fit with parabola: chi2_object_chi2 = Chi2Regression(func_para, tau[indexes], chi2[indexes]) minuit_chi2 = Minuit(chi2_object_chi2, minval=chi2_minval, minpos=chi2_minpos, quadratic=20.0) minuit_chi2.errordef = 1.0 minuit_chi2.migrad() # Fit with double parabola: chi2_object_chi2_doublep = Chi2Regression(func_asympara, tau[indexes], chi2[indexes]) minuit_chi2_doublep = Minuit(chi2_object_chi2_doublep, minval=chi2_minval, minpos=chi2_minpos, quadlow=20.0, quadhigh=20.0) minuit_chi2_doublep.errordef = 1.0 minuit_chi2_doublep.migrad(); # + # Plot (simple) fit: minval, minpos, quadratic = minuit_chi2.values # Note how one can "extract" the three values from the object. print(minval) minval_2p, minpos_2p, quadlow_2p, quadhigh_2p = minuit_chi2_doublep.values print(minval_2p) x_fit = np.linspace(chi2_minpos-min_fit, chi2_minpos+max_fit, 1000) y_fit_simple = func_para_vec(x_fit, minval, minpos, quadratic) ax_chi2.plot(x_fit, y_fit_simple, 'b-') d = {'Chi2 value': minval, 'Fitted tau (s)': minpos, 'quadratic': quadratic} text = nice_string_output(d, extra_spacing=3, decimals=3) add_text_to_ax(0.02, 0.95, text, ax_chi2, fontsize=14) fig.tight_layout() if save_plots: fig.savefig("FitMinimum.pdf", dpi=600) fig # + # Given the parabolic fit, we can now extract the uncertainty on tau (think about why the below formula works!): err = 1.0 / np.sqrt(quadratic) # For comparison, I give one extra decimal, than I would normally do: print(f" Chi2 fit gives: tau = {minpos:.3f} +- {err:.3f}") # For the asymmetric case, there are naturally two errors to calculate. #err_lower = 1.0 / np.sqrt(quadlow) #err_upper = 1.0 / np.sqrt(quadhigh) # - # Go through tau values to find minimum and +-1 sigma: # This assumes knowing the minimum value, and Chi2s above Chi2_min+1 if (ScanChi2) : if (((chi2[0] - chi2_minval) > 1.0) and ((chi2[Ntau_steps] - chi2_minval) > 1.0)) : found_lower = False found_upper = False for itau in range (Ntau_steps+1) : if ((not found_lower) and ((chi2[itau] - chi2_minval) < 1.0)) : tau_lower = tau[itau] found_lower = True if ((found_lower) and (not found_upper) and ((chi2[itau] - chi2_minval) > 1.0)) : tau_upper = tau[itau] found_upper = True print(f" Chi2 scan gives: tau = {chi2_minpos:6.4f} + {tau_upper-chi2_minpos:6.4f} - {chi2_minpos-tau_lower:6.4f}") else : print(f" Error: Chi2 values do not fulfill requirements for finding minimum and errors!") # ### Discussion: # One could here of course have chosen a finer binning, but that is still not very satisfactory, and in any case very slow. That is why we of course want to use e.g. iMinuit to perform the fit, and extract all the relevant fitting parameters in a nice, fast, numerically stable, etc. way. # --- # # # Fit the data using iminuit (both chi2 and binned likelihood fits) # # Now we want to see, what a "real" fit gives, in order to compare our result with the one provided by Minuit. # + # Define the function to fit with: def func_exp(x, N0, tau) : return N0 * binwidth / tau * np.exp(-x/tau) # Define the function to fit with: def func_exp2(x, tau) : return Ntimes * binwidth / tau * np.exp(-x/tau) # - # ### $\chi^2$ fit: # + # Prepare figure fig_fit, ax_fit = plt.subplots(figsize=(8, 6)) ax_fit.set_title("tau values directly fitted with iminuit") ax_fit.set_xlabel("Lifetimes [s]") ax_fit.set_ylabel("Frequency [ev/0.1s]") # Plot our tau values indexes = yExp>0 # only bins with values! xExp = (xExp_edges[1:] + xExp_edges[:-1])/2 # Move from bins edges to bin centers syExp = np.sqrt(yExp) # Uncertainties ax_fit.errorbar(xExp[indexes], yExp[indexes], syExp[indexes], fmt='k_', ecolor='k', elinewidth=1, capsize=2, capthick=1) # Chisquare-fit tau values with our function: chi2_object_fit = Chi2Regression(func_exp, xExp[indexes], yExp[indexes], syExp[indexes]) # NOTE: The constant for normalization is NOT left free in order to have only ONE parameter! minuit_fit_chi2 = Minuit(chi2_object_fit, N0=Ntimes, tau=tau_truth) minuit_fit_chi2.fixed["N0"] = True minuit_fit_chi2.errordef = 1.0 minuit_fit_chi2.migrad() # Plot fit x_fit = np.linspace(0, 10, 1000) y_fit_simple = func_exp(x_fit, *minuit_fit_chi2.values) ax_fit.plot(x_fit, y_fit_simple, 'b-', label="ChiSquare fit") # + # Print the obtained fit results: # print(minuit_fit_chi2.values["tau"], minuit_fit_chi2.errors["tau"]) tau_fit = minuit_fit_chi2.values["tau"] etau_fit = minuit_fit_chi2.errors["tau"] print(f" Decay time of minimum found: chi2: {tau_fit:.3f} +- {etau_fit:.3f}s") print(f" Chi2 value at minimum: chi2 = {minuit_fit_chi2.fval:.1f}") # - # Alternatively to the above, one can in iMinuit actually ask for the Chi2 curve to be plotted by one command: minuit_fit_chi2.draw_mnprofile('tau') # --- # # ### Binned likelihood fit: # # Below is an example of a binned likelihood fit. Try to write an unbinned likelihood fit yourself! # + # Binned likelihood-fit tau values with our function # extended=True because we have our own normalization in our fit function bllh_object_fit = BinnedLH(func_exp2, t, bins=Nbins, bound=(0, tmax), extended=True) minuit_fit_bllh = Minuit(bllh_object_fit, tau=tau_truth) minuit_fit_bllh.errordef = 0.5 # Value for likelihood fit minuit_fit_bllh.migrad() # Plot fit x_fit = np.linspace(0, 10, 1000) y_fit_simple = func_exp2(x_fit, *minuit_fit_bllh.values[:]) ax_fit.plot(x_fit, y_fit_simple, 'r-', label="Binned Likelihood fit") # Define the ranges: ax_fit.set_xlim(0, 5) ax_fit.set_ylim(bottom=0) # We don't want to see values below this! fig_fit.legend(loc=[0.45, 0.75]) fig_fit.tight_layout() fig_fit # - if (save_plots) : fig_fit.savefig("ExponentialDist_Fitted.pdf", dpi=600) # --- # # ## Summary: # # Make sure that you understand how the likelihood is different from the ChiSquare, # and how the binned likelihood is different from the unbinned. If you don't do it, # this exercise, and much of the course and statistics in general will be a bit lost # on you! :-) # # The binned likelihood resembels the ChiSquare a bit, only the evaluation in each bin # is different, especially if the number of events in the bin is low, as the PDF # considered (Poisson for the LLH, Gaussian for the ChiSquare) is then different. # At high statistics, they give the same result, but the ChiSquare fit quality can be evaluated. # # The unbinned likelihood uses each single event, and is thus different at its core. # This can make a difference, if there are only few events and/or if each event has # several attributes, which can't be summarized in a simple histogram with bins. # # ## Conclusion: # Fitting "manually" is damn hard, cumbersome, and not a thing that one wants to do. Always let a well tested program (e.g. iMinuit) do it, and instead take the inspired position of checking that the fitting program actually is doing what it is supposed to do, and that everything comes out reasonable. # # The art of fitting is multiple. **Very importantly, a fit requires good input parameters**, as it will otherwise not converge. Also, the Chi-square fit is more robust, so it is often a good idea to start with this, and if the fit converges, one can use the fitting parameters as input values for subsequent (likelihood) fits. Finally, one needs to consider the binning and fitting range carefully, and make good use of the p-value from the Chi-square. # # # # # # Questions: # # 1) Consider the four plots (bottom right one empty) showing chi2, bllh, and ullh as a function of lifetime, tau. Do the four curves resemble each other in shape? Are they identical in shape? Do the three methods give similar results, or are they different? Do you see the relation between the curves and the fit result? This question requires that you also fit a parabola to the other two cases. Remember to consider both central value and uncertainty of tau. # # Example solution 1: # The main thing to see is, that the two likelihood curves (and especially the unbinned one) rise faster, and thus have a smaller uncertainty. Also, if repeating the experiment many times, it will be clear that the likelihood is a better estimate, while the Chi2 is biased towards smaller values (tau < 1), as the higher mostly empty bins are disregarded (well depending on how you define the numerator in the Pearson Chi2). Finally, the minimum likelihood values don't give any information in themselves, unlike the Chi2 value, which can be used to test the fit goodness. # # --- # # 2) Now consider the two (chi2 and bllh) fits by iMinuit. How alike results do they obtain? Again, consider both the central values and the uncertainty. # # 3) Try to decrease the number of exponential numbers you consider to say 10, and see how things change. Does the difference between Chi2, bllh, and ullh get bigger or not? # # Example solution 2 and 3: # In the limit of large statistics, the three converge, but at low statistics, the Chi2 can become a really poor estimate. The two likelihood methods do much better. Their difference is due to the binning, which if too coarse gives the unbinned likelihood fit an advantage. All of this is best investigated by running many experiments (a bit like god!) to see the outcome statistically. # # --- # # 4) Try to increase the number of exponential numbers you consider to say 10000, and see what happens to the difference between Chi2 and BLLH? Also, does the errors become more symetric? Perhaps you will need to consider a shorter range of the fit around the mimimal value, and have to also increase the number of points you calculate the chi2/bllh/ullh (or decrease the range you search!), and possibly change the ranges of your plotting. # # Example solution 4: # With more statistics, all methods converge, and also the asymmetry of the chi2/llh curve decreases. On a large scale, it may be (and still is) asymmetric, but locally around the minimum it becomes almost perfectly symmetric. In general, uncertainties are more or less symmetric, and become increasingly so with increasing statistics. # # # ### Advanced Questions: # # 5) Make (perhaps in a new program) a loop over the production of random data, # and try to see, if you can print (or plot) the Chi2 and BLLH results for each # turn. Can you spot any general trends? I.e. is the Chi2 uncertainty always # lower or higher than the (B/U)LLH? And are any of the estimators biased? # # 6) Make a copy of the program and put in a different PDF (i.e. not the exponential). # Run it, and see if the errors are still asymetric. For the function, try either # e.g. a Polynomial or a Gaussian.
AppStat2022/Week2/ExampleSolutions/LikelihoodFit/LikelihoodFit_ExampleSolution.ipynb
# # Operators: Sym import mechkit import numpy as np np.set_printoptions( linewidth=140, precision=2, suppress=True, ) np.random.seed(1) converter = mechkit.notation.Converter() def print_in_mandel9(tensor): print(converter.to_mandel9(tensor)) # ## Split random tensor of fourth order into completely symmetric and skew parts tensor = np.random.rand(3, 3, 3, 3) sym_operator = mechkit.operators.Sym(axes=None) # by default all axes are symmetrized sym_part = sym_operator(tensor) skew_part = tensor - sym_part print("tensor=") print_in_mandel9(tensor) print("sym_part=") print_in_mandel9(sym_part) print("skew_part=") print_in_mandel9(skew_part) print("sym_part + skew_part") print_in_mandel9(sym_part + skew_part) assert np.allclose(tensor, sym_part + skew_part) # ## Split into part which has inner symmetry and the remaining part sym_inner_part = mechkit.operators.Sym_Fourth_Order_Special(label="inner")(tensor) remaining = tensor - sym_inner_part print("tensor=") print_in_mandel9(tensor) print("sym_inner_part=") print_in_mandel9(sym_inner_part) print("remaining=") print_in_mandel9(remaining)
docs/source/notebooks/05.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import altair as alt import numpy as np # # Initial Beta Only def run_sir_penn(beta, gamma, population, num_infected, N): i_n = num_infected s_n = population - i_n r_n = 0. inew_n = 1 s_a, i_a, r_a, inew_a = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) for index in range(N): s_a[index] = s_n i_a[index] = i_n r_a[index] = r_n inew_a[index] = inew_n s = s_n i = i_n r = r_n inew = inew_n s_n = (-beta * s * i) + s i_n = (beta * s * i - gamma * i) + i r_n = gamma * i + r inew_n = (beta * s * i) return s_a, i_a, r_a, inew_a # + infectious_days = 14. doubling_rate = 3.5 population = 1000000. num_infected = 100. N = 250 gamma = 1 / infectious_days growth_rate = 2 ** (1 / doubling_rate) - 1 beta = (growth_rate + gamma) / (population - 1) s, i, r, i_new = run_sir_penn(beta, gamma, population, num_infected, N) # + d = pd.DataFrame({ 'day': np.arange(len(s)), 'susceptable': s, 'proportion_susceptable': s / s.max(), "infected": i, "recovered": r, "new_infections": i_new, "infections_cumulative": np.cumsum(i_new), "penn_ever_infected": i + r, }) (alt .Chart(d) .transform_fold(['susceptable', 'infected', 'recovered', 'new_infections', 'infections_cumulative']) .encode(x='day:Q', y="value:Q", color='key:N') .mark_line() ) # - # ## Get $R_t$ for Each Day # ### Imports import pandas as pd import numpy as np import matplotlib.pyplot as plt from matplotlib.dates import date2num, num2date from matplotlib import dates as mdates from matplotlib import ticker from matplotlib.colors import ListedColormap import scipy.stats as sps from scipy.interpolate import interp1d # ### Constants # + # We create an array for every possible value of Rt R_T_MAX = 12 r_t_range = np.linspace(0, R_T_MAX, R_T_MAX*100+1) # Gamma is 1/serial interval # https://wwwnc.cdc.gov/eid/article/26/7/20-0282_article # https://www.nejm.org/doi/full/10.1056/NEJMoa2001316 GAMMA = 1/7 # %config InlineBackend.figure_format = 'retina' # - # ### Functions # + from IPython.core.debugger import set_trace def prepare_cases(cases, min_cases=25, win_size=7): new_cases = cases.diff() smoothed = new_cases.rolling( win_size, win_type='gaussian', min_periods=1, center=True).mean(std=2).round() idx_start = np.searchsorted(smoothed, min_cases) smoothed = smoothed.iloc[idx_start:] original = new_cases.loc[smoothed.index] return original, smoothed # - def get_posteriors(sr, sigma=0.15): # (1) Calculate Lambda lam = sr[:-1].values * np.exp(GAMMA * (r_t_range[:, None] - 1)) # (2) Calculate each day's likelihood likelihoods = pd.DataFrame( data = sps.poisson.pmf(sr[1:].values, lam), index = r_t_range, columns = sr.index[1:]) # (3) Create the Gaussian Matrix process_matrix = sps.norm(loc=r_t_range, scale=sigma ).pdf(r_t_range[:, None]) # (3a) Normalize all rows to sum to 1 process_matrix /= process_matrix.sum(axis=0) # (4) Calculate the initial prior prior0 = sps.gamma(a=4).pdf(r_t_range) prior0 /= prior0.sum() # Create a DataFrame that will hold our posteriors for each day # Insert our prior as the first posterior. posteriors = pd.DataFrame( index=r_t_range, columns=sr.index, data={sr.index[0]: prior0} ) # We said we'd keep track of the sum of the log of the probability # of the data for maximum likelihood calculation. log_likelihood = 0.0 # (5) Iteratively apply Bayes' rule for previous_day, current_day in zip(sr.index[:-1], sr.index[1:]): #(5a) Calculate the new prior current_prior = process_matrix @ posteriors[previous_day] #(5b) Calculate the numerator of Bayes' Rule: P(k|R_t)P(R_t) numerator = likelihoods[current_day] * current_prior #(5c) Calcluate the denominator of Bayes' Rule P(k) denominator = np.sum(numerator) # Execute full Bayes' Rule posteriors[current_day] = numerator/denominator # Add to the running sum of log likelihoods log_likelihood += np.log(denominator) return posteriors, log_likelihood # ### Get Confidence Intervals def highest_density_interval(pmf, p=.9): # If we pass a DataFrame, just call this recursively on the columns if(isinstance(pmf, pd.DataFrame)): return pd.DataFrame([highest_density_interval(pmf[col], p=p) for col in pmf], index=pmf.columns) cumsum = np.cumsum(pmf.values) # N x N matrix of total probability mass for each low, high total_p = cumsum - cumsum[:, None] # Return all indices with total_p > p lows, highs = (total_p > p).nonzero() # Find the smallest range (highest density) best = (highs - lows).argmin() low = pmf.index[lows[best]] high = pmf.index[highs[best]] return pd.Series([low, high], index=[f'Low_{p*100:.0f}', f'High_{p*100:.0f}']) def plot_rt(result, ax, state_name): ax.set_title(f"{state_name}") # Colors ABOVE = [1,0,0] MIDDLE = [1,1,1] BELOW = [0,0,0] cmap = ListedColormap(np.r_[ np.linspace(BELOW,MIDDLE,25), np.linspace(MIDDLE,ABOVE,25) ]) color_mapped = lambda y: np.clip(y, .5, 1.5)-.5 index = result['ML'].index.get_level_values('date') values = result['ML'].values # Plot dots and line ax.plot(index, values, c='k', zorder=1, alpha=.25) ax.scatter(index, values, s=40, lw=.5, c=cmap(color_mapped(values)), edgecolors='k', zorder=2) # Aesthetically, extrapolate credible interval by 1 day either side lowfn = interp1d(date2num(index), result['Low_75'].values, bounds_error=False, fill_value='extrapolate') highfn = interp1d(date2num(index), result['High_75'].values, bounds_error=False, fill_value='extrapolate') extended = pd.date_range(start=pd.Timestamp('2020-03-01'), end=index[-1]+pd.Timedelta(days=1)) ax.fill_between(extended, lowfn(date2num(extended)), highfn(date2num(extended)), color='k', alpha=.1, lw=0, zorder=3) ax.axhline(1.0, c='k', lw=1, alpha=.25); # Formatting ax.xaxis.set_major_locator(mdates.MonthLocator()) ax.xaxis.set_major_formatter(mdates.DateFormatter('%b')) ax.xaxis.set_minor_locator(mdates.DayLocator()) ax.yaxis.set_major_locator(ticker.MultipleLocator(1)) ax.yaxis.set_major_formatter(ticker.StrMethodFormatter("{x:.1f}")) ax.yaxis.tick_right() ax.spines['left'].set_visible(False) ax.spines['bottom'].set_visible(False) ax.spines['right'].set_visible(False) ax.margins(0) ax.grid(which='major', axis='y', c='k', alpha=.1, zorder=-2) ax.margins(0) ax.set_ylim(0.0, 5.0) ax.set_xlim(pd.Timestamp('2020-03-01'), result.index.get_level_values('date')[-1]+pd.Timedelta(days=1)) fig = plt.gcf() fig.set_facecolor('w') # ### Get $R_t$ for Each Day of Actuals # + def plot_rt_top(cases, axes, county_name="SIR Simulation"): cases_prepped, smoothed = prepare_cases(cases, min_cases=25) # Get cases plot ax = axes[0] ax.plot( cases_prepped.index, cases_prepped.values, c='k', linestyle=':', alpha=.5, label='Actual', ) ax.set_title(f"Cases per Day ({county_name})") ax.plot(smoothed.index, smoothed.values, label='Smoothed', ) ax.legend() # Get R_t plot posteriors, log_likelihood = get_posteriors(smoothed, sigma=.25) # Confidence Intervals # Note that this takes a while to execute - it's not the most efficient algorithm hdis = highest_density_interval(posteriors, p=.75) most_likely = posteriors.idxmax().rename('ML') # Look into why you shift -1 result = pd.concat([most_likely, hdis], axis=1) # Plot R_t with intervals ax = axes[1] plot_rt(result, ax, county_name) ax.set_title(f'Real-time $R_t$ for {county_name}') ax.xaxis.set_major_locator(mdates.WeekdayLocator()) ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %d')) return result def display_counties(counties, convert_to_admits=False, **kwargs): if type(counties) is not list: counties = [counties] fig, axes = plt.subplots(nrows=len(counties), ncols=2, figsize=(1200/72,(400 * len(counties))/72)) fig.set_facecolor('w') for county_name, axes in zip(counties, axes): cases = get_county_cases(county_name) if convert_to_admits: cases = county_to_admissions(cases, **kwargs) plot_rt(cases, axes) # - d_trunc = d.iloc[:120].copy() cases = ( d_trunc [["infections_cumulative"]] .assign(date = pd.date_range("2020-03-20", periods=len(d_trunc), freq="D")) .set_index('date') .squeeze() ) fix, axes = plt.subplots(1, 2, figsize=(1200/72, 400/72)) result = plot_rt_top(cases, axes) axes[1].plot((result.ML / d.proportion_susceptable.iloc[:120].values), label="Adjusted by Proportion Susceptable") axes[1].legend() result ax = (result.ML / d.proportion_susceptable.iloc[:120].values).plot() result.ML
modeling/RtExploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Install required packages # Run the line below to ensure that all required packages are installed. # !pip install -r requirements.txt # ## Getting the Data # In order to access the data that I use below you will need to join the [March Machine Learning Mania 2021 - NCAAM](https://www.kaggle.com/c/ncaam-march-mania-2021) Kaggle competition [here](https://www.kaggle.com/account/login?returnUrl=%2Fc%2Fncaam-march-mania-2021%2Frules). # Then you will need to get your Kaggle API token by following the instructions [here](https://www.kaggle.com/docs/api). # # Once you have completed these steps, run the cell below to download and decompress the competition data # !kaggle competitions download -c ncaam-march-mania-2021 -p data # !unzip -f data/ncaam-march-mania-2021.zip -d data # # Imports # Run the cell below to import all required modules. import pandas as pd from seaborn import heatmap from matplotlib import pyplot as plt file_path = 'data/MRegularSeasonCompactResults.csv' # # Inspecting the Compact Tournament results. # In this notebook we will inspect the compact regular season results provided by the `MRegularSeasonCompactResults.csv` file. df = pd.read_csv(file_path) df.head() # ### Outcome Matrix # Below we will create an all-time outcome matrix, which will count the total number of wins for every ordered tuple in the form `(WTeamID, LTeamID)`. def compute_outcome_matrix(file_path, season=None): """Takes a file path to a compact results file and returns an outcome matrix for all Mens NCAA games. Given an optional season, outcomes are restricted to the season in question. """ df = pd.read_csv(file_path) if season: df = df[df['Season'] == season] team_ids_by_game = df[['WTeamID', 'LTeamID']] win_counts = team_ids_by_game.value_counts().reset_index() col_names = win_counts.columns.to_list() col_names[2] = 'Wins' win_counts.columns = col_names outcome_matrix = win_counts.pivot_table(values= 'Wins', index='WTeamID', columns='LTeamID').fillna(0) return outcome_matrix def show_outcome_matrix(file_path, season=None): """Takes the file path to MNCAATourneyCompactResults.csv and plots a heatmap of outcomes for all Mens NCAA games. Outcomes can be restricted to a specific season by passing a season.""" outcome_matrix = compute_outcome_matrix(file_path, season) fig, ax = plt.subplots(figsize=(20,15)); if season: ax.set_title(f'Outcome matrix for {season} season.') else: ax.set_title('All-Time Outcome Matrix'); heatmap(outcome_matrix, cmap='Blues', ax=ax); return fig # #### All-time outcome heat map # Below, we see that the all-time outcome matrix is fairly sparse, with the majority of pairings having never occurred. fig = show_outcome_matrix(file_path) # #### Outcome heat map for 2015 season # Below, we see that even seasonal data is sparse. It would be wise to group teams by conference to obtain a more informative visual. fig = show_outcome_matrix(file_path, season=2019)
HeatmapNCAA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (fastai_dev) # language: python # name: fastai_dev # --- # + # default_exp fwie # - # # FWIe # > Functions to calculate FWIe from FWI anc CHI #hide from nbdev.export import notebook2script from IPython.core.debugger import set_trace #export import numpy as np import pandas as pd import matplotlib.pyplot as plt from pathlib import Path import scipy.io as sio from tqdm import tqdm import requests # + #export def load_table(table_path): path = Path(table_path) table_file = path/'FWIeCalc_table.mat' if not table_file.is_file(): print('Downloading FWIe table...') path.mkdir(exist_ok=True) url = 'https://github.com/mnpinto/FWIe/raw/master/data/FWIeCalc_table.mat' file = requests.get(url) open(str(table_file), 'wb').write(file.content) return sio.loadmat(str(table_file)) def FWIe_calc(FWI:np.ndarray, CHI:np.ndarray, table_path='data'): table = load_table(table_path) assert FWI.shape == CHI.shape rs, cs = FWI.shape FWIe = np.zeros_like(FWI)*np.nan for i in tqdm(range(rs)): for j in range(cs): fwi_argmin = np.argmin(np.abs(table['FWI'].reshape(-1) - FWI[i,j])) chi_argmin = np.argmin(np.abs(table['CHI'].reshape(-1) - CHI[i,j])) FWIe[i,j] = table['FWIe'][fwi_argmin, chi_argmin] return np.round(FWIe,1) # + assert FWIe_calc(np.array([[50]]), np.array([[12]]))[0,0] == 61.7 CHI = sio.loadmat('data/CHI_ERA5.mat')['CHI'][240] FWI = sio.loadmat('data/FWI_ERA5.mat')['FWI'][240] FWIe = FWIe_calc(FWI, CHI) assert FWIe.shape == (35, 53) # - #hide notebook2script()
fwie.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Comparative geometric mean returns # # We focus mathematically on geometric mean returns since they # optimally express mean-variance under logarithmic utility. # However, for visualization, we use boxplots # to see the persistence of returns. # # Accuracy has been improved by using our research on # Gaussian mixtures presented in https://git.io/gmix -- # specifically, our function gemrat() computes the geometric mean # rate by taking into account the fourth central moment, kurtosis. # This is crucial for *leptokurtotic* ("fat-tailed") assets. # # We examine some long-term economic and financial time series, # then use Holt-Winters to forecast one-year ahead # as a generic way to illustrate future expected returns. # Daily data for bonds, stocks, and gold is also analyzed. # # [We shall cover portfolio optimization in another notebook.] # *Dependencies:* # # - Repository: https://github.com/rsvp/fecon235 # # *CHANGE LOG* # # 2017-06-21 Replace groupgeoret() by groupgemrat(). # 2016-01-05 MAJOR REWRITE: use pattern from monthly and daily series # for new functions groupget, grouppc, groupgeoret. # Forecast print out replaced by preservable groupholtf. # Dictionary comprehension clarifies code. # 2016-01-03 Fix issue #2 with v4 and p6 upgrades. # 2015-05-26 Code revision using template v14.12.21. # 2014-10-11 Code review. Template 2014-09-28. # 2014-09-01 First version. from fecon235.fecon235 import * # + # PREAMBLE-p6.15.1223d :: Settings and system details from __future__ import absolute_import, print_function, division system.specs() pwd = system.getpwd() # present working directory as variable. print(" :: $pwd:", pwd) # If a module is modified, automatically reload it: # %load_ext autoreload # %autoreload 2 # Use 0 to disable this feature. # Notebook DISPLAY options: # Represent pandas DataFrames as text; not HTML representation: import pandas as pd pd.set_option( 'display.notebook_repr_html', False ) from IPython.display import HTML # useful for snippets # e.g. HTML('<iframe src=http://en.mobile.wikipedia.org/?useformat=mobile width=700 height=350></iframe>') from IPython.display import Image # e.g. Image(filename='holt-winters-equations.png', embed=True) # url= also works from IPython.display import YouTubeVideo # e.g. YouTubeVideo('1j_HxD4iLn8', start='43', width=600, height=400) from IPython.core import page get_ipython().set_hook('show_in_pager', page.as_hook(page.display_page), 0) # Or equivalently in config file: "InteractiveShell.display_page = True", # which will display results in secondary notebook pager frame in a cell. # Generate PLOTS inside notebook, "inline" generates static png: # %matplotlib inline # "notebook" argument allows interactive zoom and resize. # - # ## Download data and construct a dataframe # # We retrieve the following data of monthly frequency: **(aggregated) inflation, # bonds (zero coupon equivalent of 10-y Treasury), equities (S&P 500), and # gold (London PM fix)** -- all denominated in US dollars -- **then lastly, the # real trade-weighted USD index (Federal Reserve) and US home prices (per Case-Shiller).** # The details for each series is given in their respective notebooks. # If the available data has daily frequency, we use the pandas method called # "resampling" to induce monthly data (enter "monthly??" in an # input cell for more details). # # ATTENTION: *The inclusion of home prices, unfortunately, will create a 3-month lag, # due to their release cycle. Since this is a comparative study, # other data will appear out-of-date, but this # section is intended for long-term trends.* # Second half of this notebook will examine more responsive daily data. # + # Specify monthly series of interest as a dictionary: msdic = {'Infl' : m4infl, 'Zero10' : m4zero10, 'SPX' : m4spx, 'XAU' : m4xau, 'USD' : m4usdrtb, 'Homes' : m4homepx } # Download data into a dataframe: msdf = groupget( msdic ) # "groupget??" at input cell gives function details. # - # After downloading the level series, we compute the YoY percentage change # for each series. *This will be the a trailing 12-month statistic, # thus it is overlapping.* # Construct the mega YoY dataframe: mega = grouppc( msdf, freq=12 ) # ### Define start time and get stats # + # Define start time as t0 t0 = '1988' # We can easily rerun the rest of this notebook # by specifying another start time, then: Cell > Run All Below # - # Slice the data: stats( mega[t0:] ) # - There is not much correlation among our assets, # except a mild negative between gold XAU and USD. # (2015-05-26 at -0.51) # # - The difference between the maximum and minimum percentage change # for each item is remarkable. The index for maximum and minimum # quickly gives us the historical epoch. # # - The indexes of the minimums for Homes and SPX show the worst # from the Great Recession was over by 2009-Q1. # ## Boxplot of overlapping annual changes # # The boxplot gives us an idea of the range of annual returns, # and their ***persistence due to overlap***. # Thus trends can be discerned. # For example, if the blue box is located above zero, # then the bulk of returns is positive, and we can # expect the median, arithmetic, and geometric rates to be positive. # Returns outside the box, especially the notches, # perhaps could indicate unusual market conditions # or just plain data entry errors. # # It is also a visual aid for the geometric mean returns # which is most significant as an investment metric. # What appears as outliers may be due to leptokurtosis. # # As usual, the ***red line*** indicates the *median*, but # the **red dot** represents the *latest point*. # Overlapping YoY percentage change: boxplot(mega[t0:], 'Assets YoYm') # where the red dot represents the latest point. # *Red dot outside the mid-range box alerts us to unusual conditions.* # Attention should also be paid to the extreme value "slash" marks # (where outliers are also revealed). # # ## Geometric mean returns on non-overlapping periods # # <NAME>, famous for his proprietary hedge fund, remarked that # one of the most important equations in finance is the penalization # of arithmetic mean by one-half of variance: # # $ g = \mu - (\sigma^2 / 2) $ # # which turns out to be a second-order approximation of geometric mean return. # It is good enough to maximize, instead of considering # intricate mean-variance trade-offs. # We find it useful also as a metric for economic variables. # # Many assets have leptokurtotic returns ("fat-tails") and so a better # approximation for the geometric mean rate is needed # which takes into account the fourth central moment called *kurtosis*. # Details are given on Gaussian mixtures in our research # at https://git.io/gmix # # The source code shows us that gemrat() first gives us # the **geometric** mean return, followed by # the **arithmetic mean return, volatility, and Pearson kurtosis**, # then yearly frequency and sample size -- in list format. # + # How are we computing geometric mean returns? # Just add "?" or "??" to variables, procedures, etc. # to find out the details, e.g. # gemrat() is the user friendly version of gemrate() # which in turn relies on the following: # gemreturn_Jean?? # + # Geometric mean rates, non-overlapping, annualized: groupgemrat( msdf[t0:], yearly=12 ) # Note that we applied groupgemrat to msdf, not mega. # Generally gemrat requires price levels. # Note that groupgemrat is just gemrat() for group dataframes. # - # The first element in each list gives us the geometric mean rate. # They should be compared with the respective median rates previously computed. # # The fourth element in each list gives us the kurtosis statistic # where 3.0 is theoretically expected if a distribution is Gaussian. # # Even home prices have slightly fat tails. # The equities market shows the highest level of leptokurtosis. # # Kurtosis is generally observed to increase when the # frequency of data is increased. # #### Note: the geometric mean rates do not include interest and dividend payouts for bonds and equities. # # Some observations in chronological order: # # - 2014-10-12, georet since 1988 # - Inflation in the long-run about 3% annually. # - Bond price alone increases 2% annually (excludes interest income). # - Gold at 1.73% does not keep up with inflation. # - USD at break-even over the long-run. # - Home prices have georet of 3.6%. # # # - 2015-05-27, georet since 1988 # - Inflation in the long-run drops 70 bp to about 2.3% annually. # - Bond price continues its increase at 2% annually. # - Gold at 3.4% reacting more to stronger USD (cf. correlation). # - Home prices also have georet of 3.4% (but low 2.6% volatility). # # # - 2016-01-03, georet since 1988 # - Long-run inflation is 2.3% annually (Current Fed target: 2.0%). # - Bond price increases at 2% annually (but Fed has just hiked rates!). # - Equities at robust 7.6% annually (but ZIRP is finished). # - Gold moving along at 2.9% (reflecting horrible 2015 year). # - Nominal home prices at steady 3.4% per annum. # # # - 2017-06-18, gemrat since 1988 # - Long-run inflation is 2.3% annually (Current Fed target: 2.0%). # - Bond price increases at 1.6% annually (Fed Funds range: 100-125 bp). # - Equities at robust 6.97% annually. # - Gold stagnating at 1.6%. # - Nominal home prices at steady 3.5% per annum. # # Forecasts using Holt-Winters method # # We forecast one-year ahead using the monthly data # and Holt-Winters with default parameters # (which could be optimized for each individual time-series). # # Note that the most current inflation level is rebased to 1, # thus 1.02 would signify 2% increase. # + # These 12-periods ahead forecasts use default alpha and beta values # found empirically to be optimal for a fixed Kalman filter. groupholtf( msdf, h=12 ) # - # ### Forecast log for monthly data # # Changing Holt-Winters *alpha* from 0.20 to 0.10 varies the forecast only slightly. The important parameter is *beta* to capture trend effects. Currently we shall rely on default Holt-Winters settings for robustness. # # # - 2014-09-01, Twelve-month Forecasts given data through 2014-07-01: # - Inflation at 1.44%. # - 10-y Bonds price -6.7%, thus rate +75 bp given zero10dur. # - SPX +16.6% to 2280. # - Gold tanks from 1286 to 1067. # - USD +1.4% broadly. # # # - 2014-10-11, Twelve-month Forecasts given ten-year data, robust HW: # - Inflation at 1.9% # - Zero10 indicates slight downward pressure on interest rates. # - SPX to 2239, but market seems skeptical. # - Gold tanks to 1184 (region which we have seen just recently). # - USD definitely has an upward bias against all FX, even NZD and AUD. # - Home prices looking to increase from \$203K to \$220K # # # - 2015-05-28, Twelve-month Forecasts given data through 2015-03-01, robust HW: # - Inflation at 0.5% (which seems dramatic). # - Zero10 price increases by 8.92%, thus 10-year rate decreases by 100 bp. # - SPX to 2322, but no metric says it's fair valued. # - Gold to 1130, which would break support. # - USD very strong, up 12% globally (QE-EU started, and possible Grexit). # - Home prices looking to increase from \$214K to \$223K # # # - 2016-01-03, Twelve-month Forecasts given data through 2015-10-01, robust HW: # - Inflation at 1.1% (still below Fed target, see https://git.io/fed) # - Bonds and equities, forecasting Zero10 and SPX, will be flat. # - Gold to continue down trend, expected to fall to \$941 in a year. # - USD 10% higher in light of divergence between Fed hike(s) and ECB QE. # - Home prices expected to be flat (but watch mortgage spreads). # # # - 2017-06-18, Twelve-month Forecasts given data through 2017-03-01, robust HW: # - Inflation at 2.1% (to slightly exceed Fed target of 2.0%) # - Bonds: Zero10 price to decline -7.3%, so higher bond rates. # - SPX to climb higher by +13.3%. # - Gold expected to fall to \$1211 in a year. # - USD 5.2% higher in light of higher expected interest rates. # - Home prices expected higher by 5.7% (some regions are over-bid). # # DAILY DATA, including major FX # # We examine bonds (zero coupon equivalent of 10-y Treasury), # equities (SPX), gold (XAU), EURUSD, and USDJPY # at higher frequency (daily) for the most recent developments. # # We skip: *inflation and the real trade-weighted USD index # which have a slow monthly release schedule. # As for US home price data, there is a three month lag.* # Specify daily series of interest as a dictionary # where key is name, and value is its data code: dsdic = { 'Zero10' : d4zero10, 'SPX' : d4spx, 'XAU' : d4xau, 'EURUSD' : d4eurusd, 'USDJPY' : d4usdjpy } # Download data into a dataframe: dsdf = groupget( dsdic ) # Construct the dega YoY percent dataframe: dega = grouppc( dsdf, freq=256 ) # ^for daily data # Set the start date for daily series: u0 = '2010-01-01' # + # Plot overlapping percentage changes: boxplot( dega[u0:], 'Assets YoYd' ) # Note that the "last" timestamp will be more # recent than for the monthly series. # - # Although monthly data is more suitable for making long-term forecasts, # daily data is more sensitive to immediate market perturbations. # # 2016-01-03 Good example of the foregoing remark is the # reaction in the overall market due to the first Fed rate hike # in almost a decade on 2015-12-16. ZIRP, zero interest rate program, # has been terminated, along with US quantitative easing, # thus asset prices must adjust to financing constraints. # Note how equities and gold are now below their mid-range boxes. # # 2017-06-18 Only Zero10 is below its mid-range box, # due to recent Fed rate hikes. # The upper outliers for the SPX is quite remarkable. stats(dega[u0:]) # - 2015-05-29, Suprisingly, very little correlation between EURUSD and USDJPY: -6%. Gold appears more correlated with USDJPY at -87% than EURUSD at +6% # # - 2016-01-05, Given the latest Fed hike, the correlation to watch is between equities and bonds (-0.69 SPX and Zero10). # # - 2017-06-18, Gold less correlated with USDJPY at -80%. Those expecting a sharp stock market decline are long bonds as a hedge, correlation at -61%. # What are the latest daily prices? tail( dsdf ) # Geometric mean rates, non-overlapping, annualized: groupgemrat( dsdf[u0:], yearly=256 ) # ### Closing remarks on daily data # # - 2014-10-11, Really near-term picture is too bright for SPX while XAU looks dark. Sell stocks, and start to accumulate gold. # # # - 2015-05-28, XAU georet changed from 2.6% to 1.6%. Zero10 monthly forecast is basically unchanged. Real rate is what matters for gold. USD stronger by 4.8% against both the EUR and JPY. # # # - 2016-01-03, XAU georet changed from 1.6% to -0.41%, commodities including oil going through a bear market. Bonds have not sold off despite 2015-12-16 Fed rate hike, probably due to world appetite for USD which is stronger by about 4.3% against EUR and JPY. SPX looks vulnerable given the past maxims about rate hikes, but the Fed is actually still very accomodative. # # # - 2017-06-18, XAU gemrat appears stagnant at 0.44%. SPX still looks vulnerable given recent rate hikes, but the Fed is still accomodative despite very low unemployment. Bonds are set to accept money flow when the stock market tanks.
nb/fred-georeturns.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:test] # language: python # name: conda-env-test-py # --- # ## Illustrative example for path finding # + import os import random import numpy as np import matplotlib.pyplot as plt from itertools import permutations from functions import * # + # setup seed def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) SEED = 0 seed_everything(SEED) # - simulations_list = [] # #### Define random case # + nc=4 nr=3 X = np.array([10,9,3,0,3,10,10,7,0,1,1,10]).reshape(nr,nc) plt.imshow(X, cmap='Blues') plt.axis('off') for (j,i),label in np.ndenumerate(X): plt.text(i,j,label,ha='center',va='center', fontsize=15) plt.savefig('figs/ex_normal.png') plt.show() # - # ### Get list of unique routes # - Assume that we started from (0,0) to (2,3) # - Define 1 as rightward, -1 as downword unique_routes = get_unique_routes(X) unique_routes unique_paths = get_path_from_routes(X, unique_routes) unique_paths # ### Get the route info # - Calculate the weight sum of each path # - Find the path with the largest weight sum route_info_list = get_path_info_from_matrix(X) route_info_list # ### Bayseian Bootstrap # - Conduct baysian bootstrap with B=1000 # - Sample statistics find the largest weight sum route index # - Using Dirichlet prior and data $X$ # + boot_sample = bayesian_bootstrap(X, get_max_path_idx, 1000, unique_paths) boot_sample_out = [] for idx in range(0,len(unique_routes)): boot_sample_out.append([idx, sum(np.asarray(boot_sample)==idx)]) boot_sample_out = np.vstack(boot_sample_out) boot_sample_out # - # ### Determine baseline # - The baseline route is determined by the sampling frequency # - **baseline**: Best route determined by bayesian bootstrap # - **max_weight_route**: Largest weight route with the constraints baseline = unique_paths[np.argmax(boot_sample_out[:,1])] baseline max_weight_route = unique_paths[route_info_list[0]] max_weight_route # + simulations_list.append(boot_sample_out) plt.bar(boot_sample_out[:,0], boot_sample_out[:,1]) plt.title('Path sampling with Bayesian Bootstrap: normal') plt.xlabel('Path index') plt.ylabel('Counts') plt.xticks(range(len(unique_paths))) plt.savefig('figs/ex_normal_distribution.png') plt.show() # - # ## Robustness # #### Noisy case # + nc=4 nr=3 X = np.array([12,11,4,2,5,13,13,9,2,5,4,13]).reshape(nr,nc) plt.imshow(X, cmap='Blues') plt.axis('off') for (j,i),label in np.ndenumerate(X): plt.text(i,j,int(label),ha='center',va='center', fontsize=15) plt.savefig('figs/ex_noisy.png') plt.show() # + unique_routes = get_unique_routes(X) unique_paths = get_path_from_routes(X, unique_routes) #path_info_list = get_path_info_from_matrix(X) boot_sample = bayesian_bootstrap(X, get_max_path_idx, 1000, unique_paths) boot_sample_out = [] for idx in range(0,len(unique_routes)): boot_sample_out.append([idx, sum(np.asarray(boot_sample)==idx)]) boot_sample_out = np.vstack(boot_sample_out) best_route = unique_paths[np.argmax(boot_sample_out[:,1])] # + simulations_list.append(boot_sample_out) plt.bar(boot_sample_out[:,0], boot_sample_out[:,1]) plt.title('Path sampling with Bayesian Bootstrap: noisy') plt.xlabel('Path index') plt.ylabel('Counts') plt.xticks(range(len(unique_paths))) plt.savefig('figs/ex_noisy_distribution.png') plt.show() # - # ## Data missing case # + nc=4 nr=3 X = np.array([10,9,3,0,3,10,0,0,0,0,0,10]).reshape(nr,nc) plt.imshow(X, cmap='Blues') plt.axis('off') for (j,i),label in np.ndenumerate(X): plt.text(i,j,int(label),ha='center',va='center', fontsize=15) plt.savefig('figs/ex_missing.png') plt.show() # + unique_routes = get_unique_routes(X) unique_paths = get_path_from_routes(X, unique_routes) #path_info_list = get_path_info_from_matrix(X) boot_sample = bayesian_bootstrap(X, get_max_path_idx, 1000, unique_paths) boot_sample_out = [] for idx in range(0,len(unique_routes)): boot_sample_out.append([idx, sum(np.asarray(boot_sample)==idx)]) boot_sample_out = np.vstack(boot_sample_out) best_route = unique_paths[np.argmax(boot_sample_out[:,1])] # + simulations_list.append(boot_sample_out) plt.bar(boot_sample_out[:,0], boot_sample_out[:,1]) plt.title('Path sampling with Bayesian Bootstrap: missing') plt.xlabel('Path index') plt.ylabel('Counts') plt.xticks(range(len(unique_paths))) plt.savefig('figs/ex_missing_distribution.png') plt.show() # - # ## Compare results result_normal = simulations_list[0] result_noisy = simulations_list[1] result_missing = simulations_list[2] # + plt.bar(result_normal[:,0]-0.2, result_normal[:,1], width=0.2, align='center', label='normal') plt.bar(result_noisy[:,0], result_noisy[:,1], width=0.2, align='center', label='noisy') plt.bar(result_missing[:,0]+0.2, result_missing[:,1], width=0.2, align='center', label='missing') plt.legend() plt.title('Robust simulation for Path sampling with Bayesian Bootstrap') plt.xlabel('Path index') plt.ylabel('Counts') plt.xticks(range(len(unique_paths))) plt.savefig('figs/ex_simluation_comparison.png') plt.show() # - print('best_route selection probability for normal: \t{:.2f}%'.format(result_normal[6,1]/np.sum(result_normal[:,1]))) print('best_route selection probability for noisy: \t{:.2f}%'.format(result_noisy[6,1]/np.sum(result_noisy[:,1]))) print('best_route selection probability for missing: \t{:.2f}%'.format(result_missing[6,1]/np.sum(result_missing[:,1])))
tutorial codes/Illustrative example simulation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # Cosmo MCMC is landing on biased HOD + Cosmology. I'm gonna first look at the code to make preds, and compare to the emulator. from pearce.emulator import OriginalRecipe, ExtraCrispy, SpicyBuffalo from pearce.mocks import cat_dict import numpy as np from os import path import h5py from ast import literal_eval import matplotlib #matplotlib.use('Agg') from matplotlib import pyplot as plt # %matplotlib inline import seaborn as sns sns.set() chain_fname = '/home/users/swmclau2/scratch/PearceMCMC/b0r0_xi_gm.hdf5' f = h5py.File(chain_fname, 'r') # + emu_type_dict = {'OriginalRecipe':OriginalRecipe, 'ExtraCrispy': ExtraCrispy, 'SpicyBuffalo': SpicyBuffalo} fixed_params = f.attrs['fixed_params'] fixed_params = {} if fixed_params is None else literal_eval(fixed_params) #metric = f.attrs['metric'] if 'metric' in f.attrs else {} emu_hps = f.attrs['emu_hps'] emu_hps = {} if emu_hps is None else literal_eval(emu_hps) seed = f.attrs['seed'] seed = int(time()) if seed is None else seed training_file = f.attrs['training_file'] emu_type = f.attrs['emu_type'] if type(training_file) is str: training_file = [training_file] if type(emu_type) is str: emu_type = [emu_type] emus = [] np.random.seed(seed) for et, tf in zip(emu_type, training_file): # TODO iterate over the others? emu = emu_type_dict[et](tf, fixed_params = fixed_params, **emu_hps) emus.append(emu) # TODO write hps to the file too # - params = {} sim_params = literal_eval(f.attrs['sim']) params.update(sim_params['hod_params']) params.update(sim_params['cosmo_params']) del params['logMmin'] r_bins = np.logspace(-1.1, 1.6, 19) rpoints = (r_bins[1:]+r_bins[:-1])/2.0 sim_params = literal_eval(f.attrs['sim']) cosmo_param_dict = sim_params['cosmo_params'] em_params = params y_emu = 10**emu.emulate_wrt_r(em_params)[0] boxno, realization = sim_params['sim_hps']['boxno'], sim_params['sim_hps']['realization'] y_calc_jk = f['data'] #y_calc_mean = np.loadtxt('/home/users/swmclau2/Git/pearce/bin/mcmc/xi_gm_true_mean_%d%d.npy'%(boxno, realization)) y_cov_jk = f['cov'] emu_cov = np.loadtxt(path.join("/home/users/swmclau2/Git/pearce/bin/mcmc/config/", literal_eval(f.attrs['cov'])['emu_cov_fname'])) im = plt.imshow(np.log10(emu_cov) ) plt.colorbar(im) plt.show() im = plt.imshow(np.log10(y_cov_jk) ) plt.colorbar(im) plt.show() im = plt.imshow(np.log10(y_cov_jk-emu_cov) ) plt.colorbar(im) plt.show() print np.diag(emu_cov) print np.diag(y_cov_jk-emu_cov) y_err = np.sqrt(np.diag(y_cov_jk)) n_walkers, n_burn = f.attrs['nwalkers'], 10000 chain = f['chain'].value[n_walkers*n_burn:] param_names = f.attrs['param_names'] MAP = chain.mean(axis = 0) print MAP em_params.update(dict(zip(param_names, MAP))) MAP_xi = 10**emu.emulate_wrt_r(em_params)[0] # + varied_param_name = 'H0' bounds = emu.get_param_bounds(varied_param_name) Nvp = 100 vp_vals = np.linspace(bounds[0], bounds[1], Nvp) varied_param_xis = [] for val in vp_vals: em_params[varied_param_name] = val varied_param_xis.append(10**emu.emulate_wrt_r(em_params)) # + chain_vals = [] rand_idxs = np.random.choice(chain.shape[0], size = 2000, replace = False) rand_chain = chain[rand_idxs] for row in rand_chain: cosmo_params = dict(zip(param_names, row)) #cosmo_params['Neff'] = cosmo_param_dict['Neff'] #cosmo_params['ln10As'] = cosmo_param_dict['ln10As'] em_params.update(cosmo_params) chain_vals.append(10**emu.emulate_wrt_r(em_params)) # - vp_palette = sns.cubehelix_palette(Nvp) # + fig = plt.figure(figsize = (10,6)) for val in chain_vals: plt.plot(rpoints, val[0], c= 'm', alpha = 0.1 ) #for i, (val, pval) in enumerate(zip(varied_param_xis, vp_vals)): # plt.plot(rpoints, val[0], color = vp_palette[i], alpha = 0.8) #plt.plot(rpoints, MAP_xi, label = 'MAP') plt.errorbar(rpoints, y_calc_jk, yerr= y_err, c = 'k', label = 'Truth') #plt.plot(rpoints, y_calc_mean , label = 'Mean') plt.errorbar(rpoints, y_emu, yerr = y_err ,c = 'g',lw =2, label = 'Emu at Truth') #plt.xscale('log') plt.loglog() plt.title('Varying %s'%varied_param_name) plt.legend(loc = 'best') plt.title('Emu from Chain vs. Truth') plt.xlabel('r [Mpc]') plt.ylabel(r'$\xi_{gm}(r)$') plt.show() # - print y_err # + fig = plt.figure(figsize = (10,6)) for val in chain_vals: plt.plot(rpoints, np.abs(val[0]-y_calc_jk)/y_calc_jk, c= 'm', alpha = 0.1 ) #for i, (val, pval) in enumerate(zip(varied_param_xis, vp_vals)): # plt.plot(rpoints, val[0], color = vp_palette[i], alpha = 0.8) #plt.plot(rpoints, np.abs(MAP_xi-y_calc_jk)/y_calc_jk, label = 'MAP') #plt.errorbar(rpoints, y_calc_jk.value-y_calc_jk.value, yerr= y_err, c = 'k', label = 'Truth') #plt.plot(rpoints, y_calc_mean , label = 'Mean') plt.errorbar(rpoints, np.abs(y_emu-y_calc_jk)/y_calc_jk, yerr = y_err ,c = 'g',lw =2, label = 'Emu at Truth') #plt.xscale('log') plt.loglog() plt.title('Varying %s'%varied_param_name) plt.legend(loc = 'best') plt.show() # + fig = plt.figure(figsize = (10,6)) #for val in chain_vals: # plt.plot(rpoints, val[0]-y_calc_jk, c= 'm', alpha = 0.1 ) for i, (val, pval) in enumerate(zip(varied_param_xis, vp_vals)): plt.plot(rpoints, val[0], color = vp_palette[i], alpha = 0.05) #plt.plot(rpoints, MAP_xi, label = 'MAP') plt.errorbar(rpoints, y_calc_jk, yerr= y_err, c = 'k', label = 'Truth') #plt.plot(rpoints, y_calc_mean , label = 'Mean') #plt.plot(rpoints, y_emu, c = 'g',lw =2, label = 'Emu at Truth') #plt.xscale('log') plt.loglog() plt.title('Varying %s'%varied_param_name) plt.legend(loc = 'best') plt.show() # + for val in chain_vals: plt.plot(rpoints, val[0], c= 'r', alpha = 0.1 ) #plt.errorbar(rpoints, y_calc, yerr= y_err, c = 'k', label = 'Truth') plt.errorbar(rpoints, y_calc_jk, yerr= y_err_jk, c = 'b', label = 'Truth Jk') plt.plot(rpoints, y_emu, c = 'g',lw =2, label = 'Emu at Truth') plt.xscale('log') #plt.loglog() plt.legend(loc = 'best') plt.show() # + fig = plt.figure(figsize = (12, 5)) plt.subplot(121) #plt.plot(rpoints, y_calc, label = 'Sim') plt.plot(rpoints, y_calc, label = 'Sim') plt.plot(rpoints, y_pred, label = 'Emu') plt.xscale('log') plt.legend(loc = 'best') plt.ylabel(r'$\xi_{gg}(r)$') plt.xlabel(r'$r$ [Mpc]') plt.subplot(122) #plt.plot(rpoints, y_calc/y_point, label = 'Sim') plt.plot(rpoints, y_pred/y_calc, label = 'Emu/Sim') plt.legend(loc = 'best') plt.xlabel(r'$r$ [Mpc]') plt.xscale('log') plt.show() # - print y_pred/y_calc # + active="" # #plt.plot(emu.scale_bin_centers, xi_vals.T, alpha = 0.4) # #plt.plot(emu.scale_bin_centers, y) # plt.plot(emu.scale_bin_centers, np.abs(y_pred/xi_vals).T) # # plt.xscale('log') # plt.ylim([0.0, 2.0]) # - fixed_params = {'z':z, 'cosmo': 3}#, 'r':0.53882047} train_x, train_y, _, info = emu.get_data(test_file, fixed_params, None)#, skip_nans = False) info cpv = cat._get_cosmo_param_names_vals() cosmo_params = dict(zip(cpv[0], cpv[1])) for idx in xrange(100): x_point = train_x[idx*emu.n_bins, :-1] y_point = train_y[idx*emu.n_bins:(idx+1)*emu.n_bins] pop_params = dict(zip(info['ordered_params'].keys(), x_point)) #add_logMmin(pop_params, cat) print pop_params #_xi_vals = [] #for i in xrange(10): # cat.populate(pop_params, min_ptcl=100) # _xi_vals.append(cat.calc_xi(r_bins)) #xi_vals = np.log10(np.array(_xi_vals)) #y_calc = xi_vals.mean(axis = 0) pop_params.update(cosmo_params) #del pop_params['logMmin'] y_pred = emu.emulate_wrt_r(pop_params)[0] fig = plt.figure(figsize = (14, 6)) plt.subplot(121) #plt.plot(rpoints, y_calc, label = 'Sim') plt.plot(rpoints, y_point, label = 'Data') plt.plot(rpoints, y_pred, label = 'Emu') plt.xscale('log') plt.legend(loc = 'best') plt.subplot(122) #plt.plot(rpoints, y_calc/y_point, label = 'Sim') plt.plot(rpoints, y_pred/y_point, label = 'Emu/True') plt.legend(loc = 'best') plt.xscale('log') plt.show()
notebooks/Emus Preds from Chains XiGM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6.9 64-bit # name: python3 # --- # # nnUNet Segmentation Service Client # # This Jupyter Notebook provides an example on how to interact with a running nnUNet Segmentation service # # ### Getting Started: # 1. Set the host name or IP address of the server running the service # 2. Set the appropriate port which the service is running on # 3. Create an API key on the server for this client and enter it below # 4. Leave the algorithm name, since this should always be running as 'nnUNet Segmentation' # 5. While experimenting, set the log_level to 'DEBUG'. This will generate a lot of output so set it to 'INFO' when running over a lot of data to reduce the amount of output # + import sys sys.path.append("../../..") import os from platipy.backend.client import PlatiPyClient from platipy.imaging.tests.data import get_lung_nifti from loguru import logger host = "127.0.0.1" # Set the host name or IP of the server running the service here host = "10.55.72.183" port = 8001 # Set the port the service was configured to run on here api_key = 'XXX' # Put API key here api_key = "fc1858e6-4432-47a4-b3b6-6df0ff652c38" algorithm_name = "nnUNet Segmentation" # The name of the algorithm, in this case it should be left as is log_level = "INFO" # Choose an appropriate level of logging output: "DEBUG" or "INFO" logger.remove() handler_id = logger.add(sys.stderr, level=log_level) # - # ### Fetch some data # # The next cell fetches some test data to work with from TCIA. We can use this as our atlas for this example as well as use one of the images to infer the auto-segmentation. lung_data = get_lung_nifti() # ### Create Client Instance # # The PlatiPyClient provides useful functions to interact with the running service # + tags=[] client = PlatiPyClient(host, port, api_key, algorithm_name) # - # ### Add a dataset # # Images can only be added to a dataset. In theory you could add multiple images to one dataset and the algorithm would run on all of them. But often better control can be gained by only adding one image per dataset and runnin the algorithm on each separately. # + tags=[] dataset = client.add_dataset() # - # ### Add an image to the dataset # # The following cell grabs the first test image file and adds it as a data object to the dataset created above. # # This is the image that will be inferred by the service. We will configure the path to the atlas below. pat_id = list(images.keys())[0] ct_file = os.path.join(images[pat_id], "CT.nii.gz") data_object = client.add_data_object(dataset, file_path=ct_file) # ### Refresh the dataset # # The next cell demonstrates how to refresh the dataset and see that the image has been added as a input data object client.get_dataset(dataset) # ### View and modify the algorithm configuration # # Here we can modify the default settings for the algorithm. There are a number of settings which can be modified. Here the path to the atlas images # + atlas_cases = list(images.keys())[1:] atlas_path = os.path.dirname(images[atlas_cases[0]]) settings = client.get_default_settings() # Atlas settings settings["atlasSettings"]["atlasPath"] = atlas_path settings["atlasSettings"]["atlasStructures"] = ["Heart","Lung_L","Lung_R"] settings["atlasSettings"]["atlasIdList"] = atlas_cases settings["atlasSettings"]["atlasImageFormat"] = '{0}/CT.nii.gz' settings["atlasSettings"]["atlasLabelFormat"] = '{0}/Struct_{1}.nii.gz' # Run the DIR a bit more than default settings['deformableSettings']['iterationStaging'] = [75,50,50] # Run the IAR using the heart settings["IARSettings"]["referenceStructure"] = 'Lung_L' # Set the threshold settings['labelFusionSettings']["optimalThreshold"] = {"Heart":0.5, "Lung_L": 0.5, "Lung_R": 0.5} # No vessels settings['vesselSpliningSettings']['vesselNameList'] = [] # - # ### Run the algorithm # # Now everything is ready to run the algorithm using the dataset and the modified settings we generated above # + tags=[] for status in client.run_algorithm(dataset, config=settings): print('.', end='') # - # ### Download the output # # Once the algorithm has finished running, we can download the output objects (here downloaded into the results directory)r.json() output_directory = os.path.join(".", "results", pat_id) client.download_output_objects(dataset, output_path=output_directory)
examples/experimental/nnunet_service.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="ChjuaQjm_iBf" # ##### Copyright 2020 The TensorFlow Authors. # + colab={} colab_type="code" id="uWqCArLO_kez" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="ikhIvrku-i-L" # # Taking advantage of context features # # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/recommenders/examples/context_features"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/recommenders/blob/main/docs/examples/context_features.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/recommenders/blob/main/docs/examples/context_features.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/recommenders/docs/examples/context_features.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + [markdown] colab_type="text" id="rrDVNe7Vdqhr" # In [the featurization tutorial](featurization) we incorporated multiple features beyond just user and movie identifiers into our models, but we haven't explored whether those features improve model accuracy. # # Many factors affect whether features beyond ids are useful in a recommender model: # # 1. __Importance of context__: if user preferences are relatively stable across contexts and time, context features may not provide much benefit. If, however, users preferences are highly contextual, adding context will improve the model significantly. For example, day of the week may be an important feature when deciding whether to recommend a short clip or a movie: users may only have time to watch short content during the week, but can relax and enjoy a full-length movie during the weekend. Similarly, query timestamps may play an imporatant role in modelling popularity dynamics: one movie may be highly popular around the time of its release, but decay quickly afterwards. Conversely, other movies may be evergreens that are happily watched time and time again. # 2. __Data sparsity__: using non-id features may be critical if data is sparse. With few observations available for a given user or item, the model may struggle with estimating a good per-user or per-item representation. To build an accurate model, other features such as item categories, descriptions, and images have to be used to help the model generalize beyond the training data. This is especially relevant in [cold-start](https://en.wikipedia.org/wiki/Cold_start_(recommender_systems))) situations, where relatively little data is available on some items or users. # # In this tutorial, we'll experiment with using features beyond movie titles and user ids to our MovieLens model. # + [markdown] colab_type="text" id="D7RYXwgbAcbU" # ## Preliminaries # # We first import the necessary packages. # + colab={} colab_type="code" id="2bK2g6_Mbn73" # !pip install -q tensorflow-recommenders # !pip install -q --upgrade tensorflow-datasets # + colab={} colab_type="code" id="XbwMjnLP5nZ_" import os import tempfile import numpy as np import tensorflow as tf import tensorflow_datasets as tfds import tensorflow_recommenders as tfrs # + [markdown] colab_type="text" id="tgKIjpQLAiax" # We follow [the featurization tutorial](featurization) and keep the user id, timestamp, and movie title features. # + colab={} colab_type="code" id="kc2REbOO52Fl" ratings = tfds.load("movie_lens/100k-ratings", split="train") movies = tfds.load("movie_lens/100k-movies", split="train") ratings = ratings.map(lambda x: { "movie_title": x["movie_title"], "user_id": x["user_id"], "timestamp": x["timestamp"], }) movies = movies.map(lambda x: x["movie_title"]) # + [markdown] colab_type="text" id="5YZ2q5RXYNI6" # We also do some housekeeping to prepare feature vocabularies. # + colab={} colab_type="code" id="G5CVveCS9Doq" timestamps = np.concatenate(list(ratings.map(lambda x: x["timestamp"]).batch(100))) max_timestamp = timestamps.max() min_timestamp = timestamps.min() timestamp_buckets = np.linspace( min_timestamp, max_timestamp, num=1000, ) unique_movie_titles = np.unique(np.concatenate(list(movies.batch(1000)))) unique_user_ids = np.unique(np.concatenate(list(ratings.batch(1_000).map( lambda x: x["user_id"])))) # + [markdown] colab_type="text" id="mFJcCVMUQou3" # ## Model definition # + [markdown] colab_type="text" id="PtS6a4sgmI-c" # ### Query model # # We start with the user model defined in [the featurization tutorial](featurization) as the first layer of our model, tasked with converting raw input examples into feature embeddings. However, we change it slightly to allow us to turn timestamp features on or off. This will allow us to more easily demonstrate the effect that timestamp features have on the model. In the code below, the `use_timestamps` parameter gives us control over whether we use timestamp features. # + colab={} colab_type="code" id="_ItzYwMW42cb" class UserModel(tf.keras.Model): def __init__(self, use_timestamps): super().__init__() self._use_timestamps = use_timestamps self.user_embedding = tf.keras.Sequential([ tf.keras.layers.experimental.preprocessing.StringLookup( vocabulary=unique_user_ids), tf.keras.layers.Embedding(len(unique_user_ids) + 2, 32), ]) if use_timestamps: self.timestamp_embedding = tf.keras.Sequential([ tf.keras.layers.experimental.preprocessing.Discretization(timestamp_buckets.tolist()), tf.keras.layers.Embedding(len(timestamp_buckets) + 2, 32), ]) self.normalized_timestamp = tf.keras.layers.experimental.preprocessing.Normalization() self.normalized_timestamp.adapt(timestamps) def call(self, inputs): if not self._use_timestamps: return self.user_embedding(inputs["user_id"]) return tf.concat([ self.user_embedding(inputs["user_id"]), self.timestamp_embedding(inputs["timestamp"]), self.normalized_timestamp(inputs["timestamp"]), ], axis=1) # + [markdown] colab_type="text" id="B9IqNTLmpJzs" # Note that our use of timestamp features in this tutorial interacts with our choice of training-test split in an undesirable way. Because we have split our data randomly rather than chronologically (to ensure that events that belong to the test dataset happen later than those in the training set), our model can effectively learn from the future. This unrealistic: after all, we cannot train a model today on data from tomorrow. # # This means that adding time features to the model lets it learn _future_ interaction patterns. We do this for illustration purposes only: the MovieLens dataset itself is very dense, and unlike many real-world datasets does not benefit greatly from features beyond user ids and movie titles. # # This caveat aside, real-world models may well benefit from other time-based features such as time of day or day of the week, especially if the data has strong seasonal patterns. # + [markdown] colab_type="text" id="XleMceZNHC__" # ### Candidate model # # For simplicity, we'll keep the candidate model fixed. Again, we copy it from the [featurization](featurization) tutorial: # + colab={} colab_type="code" id="oQZHX8bEHPOk" class MovieModel(tf.keras.Model): def __init__(self): super().__init__() max_tokens = 10_000 self.title_embedding = tf.keras.Sequential([ tf.keras.layers.experimental.preprocessing.StringLookup( vocabulary=unique_movie_titles), tf.keras.layers.Embedding(len(unique_movie_titles) + 2, 32) ]) self.title_vectorizer = tf.keras.layers.experimental.preprocessing.TextVectorization( max_tokens=max_tokens) self.title_text_embedding = tf.keras.Sequential([ self.title_vectorizer, tf.keras.layers.Embedding(max_tokens, 32, mask_zero=True), tf.keras.layers.GlobalAveragePooling1D(), ]) self.title_vectorizer.adapt(movies) def call(self, titles): return tf.concat([ self.title_embedding(titles), self.title_text_embedding(titles), ], axis=1) # + [markdown] colab_type="text" id="Cc4KbTNwHSvD" # ### Combined model # # With both `UserModel` and `QueryModel` defined, we can put together a combined model and implement our loss and metrics logic. # # Note that we also need to make sure that the user model and query model output embeddings of compatible size. Because we'll be varying their sizes by adding more features, the easiest way to accomplish this is to use a dense projection layer after each model: # # # + colab={} colab_type="code" id="26_hNJPKIh4-" class MovielensModel(tfrs.models.Model): def __init__(self, use_timestamps): super().__init__() self.query_model = tf.keras.Sequential([ UserModel(use_timestamps), tf.keras.layers.Dense(32) ]) self.candidate_model = tf.keras.Sequential([ MovieModel(), tf.keras.layers.Dense(32) ]) self.task = tfrs.tasks.Retrieval( metrics=tfrs.metrics.FactorizedTopK( candidates=movies.batch(128).map(self.candidate_model), ), ) def compute_loss(self, features, training=False): # We only pass the user id and timestamp features into the query model. This # is to ensure that the training inputs would have the same keys as the # query inputs. Otherwise the discrepancy in input structure would cause an # error when loading the query model after saving it. query_embeddings = self.query_model({ "user_id": features["user_id"], "timestamp": features["timestamp"], }) movie_embeddings = self.candidate_model(features["movie_title"]) return self.task(query_embeddings, movie_embeddings) # + [markdown] colab_type="text" id="8YXjsRsLTVzt" # ## Experiments # + [markdown] colab_type="text" id="QY7MTwMruoKh" # ### Prepare the data # # We first split the data into a training set and a testing set. # + colab={} colab_type="code" id="wMFUZ4dyTdYd" tf.random.set_seed(42) shuffled = ratings.shuffle(100_000, seed=42, reshuffle_each_iteration=False) train = shuffled.take(80_000) test = shuffled.skip(80_000).take(20_000) cached_train = train.shuffle(100_000).batch(2048) cached_test = test.batch(4096).cache() # + [markdown] colab_type="text" id="I2HEuTBzJ9w5" # ### Baseline: no timestamp features # # We're ready to try out our first model: let's start with not using timestamp features to establish our baseline. # + colab={} colab_type="code" id="NkoLkiQdK4Um" model = MovielensModel(use_timestamps=False) model.compile(optimizer=tf.keras.optimizers.Adagrad(0.1)) model.fit(cached_train, epochs=3) train_accuracy = model.evaluate( cached_train, return_dict=True)["factorized_top_k/top_100_categorical_accuracy"] test_accuracy = model.evaluate( cached_test, return_dict=True)["factorized_top_k/top_100_categorical_accuracy"] print(f"Top-100 accuracy (train): {train_accuracy:.2f}.") print(f"Top-100 accuracy (test): {test_accuracy:.2f}.") # + [markdown] colab_type="text" id="p90vFk8LvJXp" # This gives us a baseline top-100 accuracy of around 0.2. # # # + [markdown] colab_type="text" id="BjJ1anzuLXgN" # ### Capturing time dynamics with time features # # Do the result change if we add time features? # + colab={} colab_type="code" id="11qAr5gGMUxE" model = MovielensModel(use_timestamps=True) model.compile(optimizer=tf.keras.optimizers.Adagrad(0.1)) model.fit(cached_train, epochs=3) train_accuracy = model.evaluate( cached_train, return_dict=True)["factorized_top_k/top_100_categorical_accuracy"] test_accuracy = model.evaluate( cached_test, return_dict=True)["factorized_top_k/top_100_categorical_accuracy"] print(f"Top-100 accuracy (train): {train_accuracy:.2f}.") print(f"Top-100 accuracy (test): {test_accuracy:.2f}.") # + [markdown] colab_type="text" id="NHnzYfQrOj8I" # This is quite a bit better: not only is the training accuracy much higher, but the test accuracy is also substantially improved. # + [markdown] colab_type="text" id="dB09crfpgBx7" # ## Next Steps # # This tutorial shows that even simple models can become more accurate when incorporating more features. However, to get the most of your features it's often necessary to build larger, deeper models. Have a look at the [deep retrieval tutorial](deep_recommenders) to explore this in more detail.
docs/examples/context_features.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import requests from jupyter_widget_stixview.widget import StixviewGraph def create_new_cell(contents): from IPython.core.getipython import get_ipython shell = get_ipython() payload = dict( source='set_next_input', text=contents, replace=False, ) shell.payload_manager.write_payload(payload, single=False) # + repo_url = "https://api.github.com/repos/oasis-open/cti-stix-elevator/contents/idioms-json-2.1" r = requests.get(repo_url) r.raise_for_status() show_large = False large_bundle_filenames = ['fireeye-pivy-report-with-indicators.json'] file_urls = [f['download_url'] for f in r.json()] limit = 100 for url in file_urls[:limit]: if not show_large and any([y in url for y in large_bundle_filenames]): print("skipping " + url) continue # Uncomment following line and execute this cell to add new cells per source url # # create_new_cell("StixviewGraph(\nurl='{}',\nproperties={{'graphHeight':300}})".format(url)) # - StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/yara-test-mechanism.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/victim-targeting.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/victim-targeting-sector.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/threat-actor-leveraging-attack-patterns-and-malware.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/snort-test-mechanism.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/resolve_to.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/pattern_id_ref_issue.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/observable_composition.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/observable-with-networkconnection-pattern.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/observable-with-networkconnection-pattern-advanced.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/observable-with-networkconnection-instance.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/network-traffic-request-pattern.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/network-socket-pattern.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/network-socket-observable.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/multiple-reports-in-package.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/malware-indicator-for-file-hash.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/malware-characterization-using-maec.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/malicious-email-indicator-with-attachment.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/malicious-email-indicator-with-addresses.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/kill-chain.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/issue62.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/issue-162.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/issue-148.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/indicator-w-kill-chain.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/indicator-for-malicious-url.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/indicator-for-c2-ip-address.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/indicator-and-observable.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/incident-malware.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/identifying-a-threat-actor-group.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/icmp_pattern.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/icmp_observable.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/fix-embedded-relationship-example.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/file-hash-reputation.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/file-and-directory.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/file-and-directory-pattern.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/email-links-observable.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/cve-in-exploit-target.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/command-and-control-ip-list.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/campaign-v-actors.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/block-network-traffic.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/archive-file.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/archive-file-pattern.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/account_indicator.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/Mandiant_APT1_Report.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/Appendix_G_IOCs_Full.json', properties={'graphHeight':300}) StixviewGraph( url='https://raw.githubusercontent.com/oasis-open/cti-stix-elevator/master/idioms-json-2.1/141-TLP-marking-structures.json', properties={'graphHeight':300})
stixview-stix21-idioms.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- import numpy as np class Perceptron(object): """ Perceptron classifier. Parameters ----------- eta : float Learning Rate (between 0.0 and 1.0) n_iter : int Passes over the training set Attributes ----------- w_ : 1d array Weights after fitting. errors_ : list Number of misclassifications in every epoch. """ def __init__(self, eta=0.01, n_iter=10): self.eta = eta self.n_iter = n_iter def fit(self, X,y): """Fit training data. Parameters ----------- X : {array-like}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values Returns ----------- self : object """ self.w_ = np.zeros(1 + X.shape[1]) self.errors_ = [] for _ in range(self.n_iter): errors = 0 for xi, target in zip(X, y): update = self.eta * (target - self.predict(xi)) self.w_[1:] += update * xi self.w_[0] += update errors += int(update != 0.0) self.errors_.append(errors) return self def net_input(self, X): """Calculate net input""" return np.dot(X, self.w_[1:]) + self.w_[0] def predict(self, X): """Return class label after unit step""" return np.where(self.net_input(X) >= 0.0, 1, -1) import pandas as pd df = pd.read_csv('https://archive.ics.uci.edu/ml/' 'machine-learning-databases/iris/iris.data', header=None) df.tail() import matplotlib.pyplot as plt import numpy as np y = df.iloc[0:100, 4].values y = np.where(y == 'Iris-setosa', -1, 1) X = df.iloc[0:100, [0, 2]].values plt.scatter(X[:50, 0], X[:50, 1], color='red', marker = 'o', label = 'setosa') plt.scatter(X[50:100,0], X[50:100, 1], color = 'blue', marker = 'x', label = 'versicolor') plt.xlabel('petal length') plt.ylabel('sepal length') plt.legend(loc='upper left') plt.show() # ### Now we'll fit the perceptron # #### First initiialize it, then fit to data, then plot ppn = Perceptron(eta = 0.1, n_iter=10) ppn.fit(X,y) plt.plot(range(1, len(ppn.errors_) +1), ppn.errors_, marker = 'o') plt.xlabel('Epochs') plt.ylabel('number of misclassifications') plt.show() # #### Now writing a small function to more easily be able to visualize the decision boundaries from matplotlib.colors import ListedColormap def plot_decision_regions(X, y, classifier, resolution = 0.02): # setup marker generator and color map markers = ('s', 'x', 'o', '^', 'v') colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan') cmap = ListedColormap(colors[:len(np.unique(y))]) #plot the decision surface x1_min, x1_max = X[:,0].min() -1, X[:,0].max() +1 x2_min, x2_max = X[:, 1].min() - 1, X[:,1].max() + 1 xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution)) Z= classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T) Z= Z.reshape(xx1.shape) plt.contourf(xx1, xx2, Z, alpha = 0.4, cmap = cmap) plt.xlim(xx1.min(), xx1.max()) plt.ylim(xx2.min(), xx2.max()) for idx, cl in enumerate(np.unique(y)): plt.scatter(x=X[y==cl,0], y=X[y == cl, 1], alpha = 0.8, c = cmap(idx), marker = markers[idx], label = cl) plot_decision_regions(X,y, classifier=ppn, resolution = .02) plt.xlabel('sepal length [cm]') plt.ylabel('petal length [cm]') plt.legend(loc='upper left') plt.show()
ch02_perceptrons/ch_02_Perceptron.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # ApplePy instructions! # print('Hello ApplePi user!') # # Hello! # This document will serve as an intro to the use of Python in the ApplePi ESN environment.\ # In this tutorial I'll walk you through the steps you will need to take to cater the ApplyPi environment to your individual needs as a user, no matter whether you're an agriculturalist or academic.\ # This is meant to be as simple as possible! (that's the intention, at least)
ApplePy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Dataset Exploration import numpy as np import pandas as pd # Restricting number of displaying rows, just for convenience pd.set_option('max_rows', 8) # ## Load data films = pd.read_csv('data/movie.csv') films.head() # ## Descriptive attributes # Types of columns films.dtypes # Number of dimensions - 1 for Series and 2 for DataFrame films.ndim # Shape of dataframe - its height (number of rows) and width (number of columns) films.shape # Number of elements in df - product of width and height films.size # Return series with number of nonmissing values in columns films.count() # `describe` method is used to take a look into data. Depending on column type its output will vary # It has these parameters: # * percentiles - list of percentiles to fetch, [0.25, 0.50, 0.75] by default # * include - dtype or list of dtypes to conduct analysis on # * exclude - dtype or list of dtypes which will be excluded from analysis # Descriptive statistics for numeric data includes # min() # max() # mean() # std() # count() method results and 3 quantiles films.describe() # Note that in the result above no columns with object type - they are dropped. It is default behaviour when you passed dataframe with mixed categorical and numerical data types. # To circumvent it we can pass `include='all'` as an argument or filter out numeric types before calling method. # Descriptive statistics for categorical data includes count, number of unique values, most frequent one and its frequency films.describe(include='all') # Select object columns and describe them films.select_dtypes(object).describe() # Method for finding percentiles only, takes iterable with percentiles or 1 value and return # dataframe or series correspondingly # Can operate on rows or on columns - axis parameter films.quantile((0.1, 0.9)) # In addition there is an `info()` method showing column names, number of non missed samples, column dtypes and memory usage films.info() # ### Additional note about describe # In `describe()` most results are evaluated with skipping NA. So missed observations don't take part in computations. It can be repaired by using included in `describe()` functions. # Considering NA. It leads to absence of result in column if it has NA. films.min(skipna=False)
pandas/2_df_exploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #hide # !nvidia-smi #hide import sys if 'google.colab' in sys.modules: # !pip install -Uqq fastai einops datasets axial_positional_embedding wandb # !pip install -qq git+git://github.com/arampacha/reformer_fastai.git #hide # %load_ext autoreload # %autoreload 2 # + #all_slow # - # # LSH attention # # > model performance as function of number of hashing rounds #hide from fastai.text.all import * from reformer_fastai.all import * # ## Experiment Tracking # Make sure you have wandb and are logged in: #hide # !wandb login # Load Experiment Tracking with Weights & Biases: # + import wandb WANDB_NAME = 'test_n_hashes_enwik8' GROUP = 'TEST' NOTES = 'LSHLM on enwik8 sl 4096' CONFIG = {} TAGS = ['lm','lsh','enwik8', 'test'] # - # ## Download and Unpack enwik8 Data # # Download and unzip enwik8 data path = untar_data('http://mattmahoney.net/dc/enwik8.zip', dest='/data') # ## Prepare Data df = pd.DataFrame({'text':read_lines(path)}) df.head() btt = ByteTextTokenizer(is_lm=True, add_bos=False, add_eos=False) # %%time df['toks'] = df['text'].apply(btt) df['lens'] = df['toks'].apply(len) df['lens_cum_sum'] = df.lens.cumsum() # + train_cutoff = df.lens.sum() - 10_000_000 # keep all but 10M characters for val and test train_idxs = df.loc[df['lens_cum_sum'] < train_cutoff].index.values train_idxs = list(range(0, max(train_idxs))) remaining_idxs = len(df) - max(train_idxs) validation_idxs = list(range(max(train_idxs), max(train_idxs) + int(remaining_idxs/2))) test_idxs = list(range(max(validation_idxs), len(df))) splits = [train_idxs, validation_idxs] # - tfms = [attrgetter("text"), btt] dsets = Datasets(df, [tfms], splits=splits, dl_type=LMDataLoader) # %%time bs, sl = 4, 4096 # pad_seq2seq = partial(pad_input, pad_idx=bte.pad_token_id, pad_fields=[0,1]) dl_kwargs = [{'lens':df['lens'].values[train_idxs]}, {'val_lens':df['lens'].values[validation_idxs]}] dls = dsets.dataloaders(bs=bs, seq_len=sl, dl_kwargs=dl_kwargs, shuffle_train=True, n_workers=2) #collapse_output dls.show_batch(max_n=2) vocab_sz = btt.vocab_size xb, yb = dls.one_batch() xb.shape, yb.shape #hide del xb, yb torch.cuda.empty_cache() # ## Training #hide_output wandb.init(reinit=True, project="reformer-fastai", entity="fastai_community", name=WANDB_NAME, group=GROUP, notes=NOTES, tags=TAGS, config=CONFIG) config = NHashesConfig(n_hashes=2) learn = Learner(dls, LSHLM.from_config(config), loss_func=CrossEntropyLossFlat(), opt_func=adafactor, cbs = [GradientAccumulation(n_acc=8), GradientClip(1.0), PadBatchCallback(bucket_size=config.bucket_size)], metrics=[accuracy, perplexity, bpc]).to_fp16() # + #hide # learn.lr_find() # - learn.fit(1, cbs=WandbCallback(log_model=False, log_preds=False)) learn.recorder.plot_loss()
nbs/13_experiment.enwik8-n_hashes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (SHARP) # language: python # name: sharp # --- import xarray as xr import numpy as np import glob, os from shutil import copyfile # # User-supplied elevinfile = '/glade/p/work/manab/ff/islandpark/input/us_09066300_elev_bands.nc' elevoutfile = '/glade/p/work/manab/ff/islandpark/input/ip_elev_bands.nc' forcdir = '/glade/p/work/manab/ff/islandpark/inputnew' # # Change Latitude, Longitude elev = xr.open_dataset(elevinfile) elev['latitude'] = elev['latitude']*0 + 44.5118087293063 elev['longitude'] = elev['longitude']*0 -111.434567218585 elev = elev.sel(elevation_band = slice(None, 4)) print(elev) # # Area/Precip Fractions and mean_elev # + lat = elev['latitude'].values long = elev['longitude'].values elevation_band = elev['elevation_band'].values mean_elev = np.array([2250, 2550, 2550, 3350]).reshape((4, 1, 1)) #The 1-X is to ensure that sum of fractions is equal to 1. frac = np.array([0.65418, 0.099, 0.233, (1-(0.65418 + 0.099 + 0.233))]).reshape((4, 1, 1)) me = xr.DataArray(mean_elev, coords={'latitude': lat, 'longitude': long, 'elevation_band': elevation_band}, dims=['elevation_band', 'latitude', 'longitude']) fr = xr.DataArray(frac, coords={'latitude': lat, 'longitude': long, 'elevation_band': elevation_band}, dims=['elevation_band', 'latitude', 'longitude']) elev['mean_elev'] = me elev['area_frac'] = fr elev['prec_frac'] = fr elev.to_netcdf(elevoutfile) # - # # Create copies of elevation bands for each ensemble # + forcfiles = glob.glob(forcdir + '/*nc') forcfiles = [x for x in forcfiles if "elev_bands" not in x] forcfiles = sorted(forcfiles) for count, value in enumerate(forcfiles): outfile = os.path.join(forcdir, os.path.basename(value).split('.')[0] + '_elev_bands.nc') copyfile(elevoutfile, outfile) # -
convertelevband.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="oFQJsjKrwFsS" # ### Import Libraries # + id="3akodfuSvE3o" import numpy as np from sklearn.base import clone import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap from sklearn.datasets import make_circles from sklearn.datasets import make_classification from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import accuracy_score from sklearn.model_selection import cross_val_score from sklearn.ensemble import BaggingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import StackingClassifier # + id="heWiRN0lxwms" def plotDataset(X, y): for label in np.unique(y): plt.scatter(X[y == label, 0], X[y == label, 1], label=label) plt.legend() plt.show() def plotEstimator(trX, trY, teX, teY, estimator, title=''): estimator = clone(estimator).fit(trX, trY) h = .02 x_min, x_max = teX[:, 0].min() - .5, teX[:, 0].max() + .5 y_min, y_max = teX[:, 1].min() - .5, teX[:, 1].max() + .5 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) cm = plt.cm.RdBu cm_bright = ListedColormap(['#FF0000', '#0000FF']) Z = estimator.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1] Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=cm, alpha=0.8) plt.scatter(teX[:, 0], teX[:, 1], c=teY, cmap=cm_bright, edgecolors='k', alpha=0.6) #plt.legend() plt.title(title) plt.show() # + [markdown] id="2-M6Z6qEwcW7" # ### Data Sets # + [markdown] id="HBDPamsxOeIH" # #### Circle dataset # + id="QWwxt7ejwbqO" rs = 0 X, y = make_circles(300, noise=0.1, random_state=rs) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,random_state=rs) # + colab={"base_uri": "https://localhost:8080/", "height": 513} id="WcAnKLeex_QK" outputId="e3dcfa7f-434f-4dcf-da31-99e73507473f" plotDataset(X,y) # + [markdown] id="Ml0QIRB2Oj47" # #### Classification dataset # + id="4-naeMWtOo17" rs = 0 X2, y2 = make_classification(300, random_state=rs) X_train2, X_test2, y_train2, y_test2 = train_test_split(X2, y2, test_size=0.2,random_state=rs) # + colab={"base_uri": "https://localhost:8080/", "height": 513} id="7nRj1ioyO09X" outputId="46744ea2-926c-44d2-e949-46ca33174446" plotDataset(X2,y2) # + [markdown] id="GYNtYEJLwNDE" # ### Decision Tree # + [markdown] id="dxF4lLBSwSfQ" # #### **(4)** Use Circle Dataset. Apply decision tree on the Circle Dataset, set criterion as gini and entropy, get the accuracy of the testing results, plot the decision boundaries and explain the difference between these criterion # + [markdown] id="TQlZI67xLcth" # ##### (4.1) DT with gini Index # + colab={"base_uri": "https://localhost:8080/", "height": 298} id="YSK5BpHgwMmf" outputId="a17ef081-18dd-41bd-a7c0-9fe409f60fad" dtEstimator_gini = DecisionTreeClassifier(criterion="gini") dtEstimator_gini.fit(X_train, y_train) predY = dtEstimator_gini.predict(X_test) dtAccuracy = accuracy_score(y_test, predY) print("test accuracy is: ",round(dtAccuracy,3)) plotEstimator(X_train, y_train, X_test, y_test, dtEstimator_gini, 'Decision Tree with gini index') # + [markdown] id="qQ9C4gbUMFp4" # ##### (4.2) DT with entropy # + colab={"base_uri": "https://localhost:8080/", "height": 298} id="t70PWFs6wEqw" outputId="5c26bb8a-63be-4719-83c1-7989c774bc70" dtEstimator_entropy = DecisionTreeClassifier(criterion="entropy") dtEstimator_entropy.fit(X_train, y_train) predY = dtEstimator_entropy.predict(X_test) dtAccuracy = accuracy_score(y_test, predY) print("test accuracy is: ",round(dtAccuracy,3)) plotEstimator(X_train, y_train, X_test, y_test, dtEstimator_entropy, 'Decision Tree with entropy') # + [markdown] id="F0pRd7wbN-JL" # Gini measurement is the probability of a random sample being classified incorrectly if we randomly pick a label according to the distribution in a branch. # # Entropy is a measurement of information (or rather lack thereof). You calculate the information gain by making a split. Which is the difference in entripies. This measures how you reduce the uncertainty about the label. # + [markdown] id="gnfdNhuIPm6u" # #### **(5)** Use Classification Dataset. Use training set to obtain the importance of features. Plot Validation Accuracy (y-axis) vs Top K Important Feature (x-axis) curve; where 4-fold cross validation should be used, and also plot Test Accuracy vs Top K Important Feature curve # + id="UXLawkZS5Wah" def plot_importance_vs_accuracys(values, axis_values, title): plt.figure(figsize=(8,5)) if len(axis_values) == 4: axis_1 = plt.plot(values, axis_values[0], color='red', marker='*', linestyle='-', label = '1st fold') axis_2 = plt.plot(values, axis_values[1], color='green', marker='*', linestyle='-', label = '2nd fold') axis_3 = plt.plot(values, axis_values[2], color='blue', marker='*', linestyle='-', label = '3rd fold') axis_4 = plt.plot(values, axis_values[3], color='yellow', marker='*', linestyle='-', label = '4th fold') plt.title(title) plt.xlabel('Top K Important Features') plt.ylabel('Validation Accuracy') plt.xticks([x for x in range(len(values))]) y_ticks = [x for x in range(60,101,5)] plt.yticks(y_ticks) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) else: axis1 = plt.plot(values, axis_values, color='blue', marker='*', linestyle='-') plt.title(title) plt.xlabel('Top K Important Features') plt.ylabel('Test Accuracy') plt.xticks([x for x in range(len(values))]) y_ticks = [x for x in range(80,101,2)] plt.yticks(y_ticks) plt.show # + [markdown] id="_B-p-MPQUgW2" # ##### **(5.1)** get top K important features # + id="g8hdEf5eMal8" colab={"base_uri": "https://localhost:8080/"} outputId="7077dc98-1caf-4d22-c9ab-3ad3d473c115" tree_model = DecisionTreeClassifier(random_state=0) tree_model.fit(X_train2, y_train2) features_import = tree_model.feature_importances_ idx_sorted = np.argsort(-features_import)[0:7] idx_sorted # + [markdown] id="wkkCGONNUvrp" # ##### **(5.2)** fit DT model with top K features using 4-folds cross validation # + id="9KSWlr7nNHiO" test_accuracy = [] validation_accuracy = [] l1 = idx_sorted[0:1] l2 = idx_sorted[0:2] l3 = idx_sorted[0:3] l4 = idx_sorted[0:4] l5 = idx_sorted[0:5] l6 = idx_sorted[0:6] l7 = idx_sorted[0:7] feature_list = [l1,l2,l3,l4,l5,l6,l7] for features in feature_list: valid_acc = cross_val_score(tree_model, X_train2[:,features], y_train2, cv=4, scoring='accuracy') validation_accuracy.append(valid_acc * 100) tree_model.fit(X_train2[:,features], y_train2) y_pred = tree_model.predict(X_test2[:, features]) test_acc = accuracy_score(y_test2, y_pred) test_accuracy.append(test_acc * 100) valid_acc = list(map(list, zip(*validation_accuracy))) # + [markdown] id="6bMBqDugVFWd" # ##### **(5.3)** Plot Validation Accuracy (y-axis) vs Top K Important Feature (x-axis) curve with 4-folds # + colab={"base_uri": "https://localhost:8080/", "height": 350} id="HSVvDH2kP22T" outputId="68a1bec3-d257-4666-f3c7-f960b88c896b" values = [x for x in range(1,8)] plot_importance_vs_accuracys(values[:8], valid_acc, "Top K Features VS Fold accuracy") # + [markdown] id="r1v85hL9YCTq" # ##### **(5.4)** plot Test Accuracy vs Top K Important Feature curve # + colab={"base_uri": "https://localhost:8080/", "height": 350} id="rDvzhiivTmA8" outputId="66792b7b-dda9-45cf-e5e0-f37ea359922c" values = [x for x in range(1,8)] plot_importance_vs_accuracys(values[:8], test_accuracy, "Top K Features VS Test Accuracy"); # + [markdown] id="O5NGyHwPdoti" # ### Bagging # + [markdown] id="JGc9MchKdQeM" # #### **(6)** Use Circle Dataset. Set the number of estimators as 2, 5, 15, 20 respectively, and generate the results accordingly (i.e., accuracy and decision boundary) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="KWlTJlJrYGBd" outputId="55694fde-baeb-4d4c-b48e-7bd1e09ebbde" for n_est in [2,5,15,20]: estimator = BaggingClassifier(n_estimators=n_est, random_state=0) score = estimator.fit(X_train, y_train).score(X_test, y_test) plotEstimator(X_train, y_train, X_test, y_test, estimator, f'Bagging with n_estimator = {n_est} has accuracy = {score}') # + [markdown] id="_sWzdw7gfrnb" # #### **(7)** Explain why bagging can reduce the variance and mitigate the overfitting problem # + [markdown] id="gtGuSVFEf315" # Bagging can create many predictors by bootstrapping the data randomly subsample the dataset many times, and train a model using each subsample. # We can then aggregate our models, e.g., averaging out the predictions of each model. and this can reduce variance and overfitting # + [markdown] id="HvqtR0SsgHcP" # ### Random Forest # + [markdown] id="lvpt3ncWgJ2M" # #### **(8)** Use Circle Dataset. Set the number of estimators as 2, 5, 15, 20 respectively, and generate the results accordingly (i.e., accuracy and decision boundary) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="BjNz2bCbeKrD" outputId="ce579274-a6d5-4c3f-d0f6-eeb83aa6d009" for n_est in [2,5,15,20]: estimator = RandomForestClassifier(n_estimators=n_est, random_state=0) score = estimator.fit(X_train, y_train).score(X_test, y_test) plotEstimator(X_train, y_train, X_test, y_test, estimator, f'RF with n_estimator = {n_est} has accuracy = {score}') # + [markdown] id="RXEupQV9j454" # #### **(9)** Compare with bagging results and explain the difference between Bagging and Random Forest # + [markdown] id="Em1kiTGvkD24" # The fundamental difference is that in Random forests, only a subset of features are selected at random out of the total and the best split feature from the subset is used to split each node in a tree, unlike in bagging where all features are considered for splitting a node. # + [markdown] id="rs30FX87khco" # ### Boosting # + [markdown] id="reQ46BL5km34" # #### **(10)** Use Circle Dataset. There are 2 important hyperparameters in AdaBoost, i.e., the number of estimators (ne), and learning rate (lr). Please plot 12 subfigures as the following table's setup. Each figure should plot the decision boundary and each of their title should be the same format as {n_estimaotrs}, {learning_rate}, {accuracy} # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="iu1bTvuyjvss" outputId="ec515645-516e-4787-bdd2-7758d2a55ca5" n_estimator = [10,50,100,200] l_rate = [0.1,1,2] for l in l_rate: for n_est in n_estimator: estimator = AdaBoostClassifier(n_estimators= n_est, learning_rate= l) score = estimator.fit(X_train, y_train).score(X_test, y_test) plotEstimator(X_train, y_train, X_test, y_test, estimator, f' {n_est} , {l} , {score}') # + [markdown] id="t4Zq7pIlo5VR" # ### Stacking # + [markdown] id="98EOABtIpPf_" # #### **(11)** We have tuned the Decision Tree, Bagging, Random Forest, and AdaBoost in the previous section. Use these fine tuned model as base estimators and use Naive Bayes, Logistic Regression, and Decision Tree as aggregators to generate the results accordingly (i.e., accuracy and decision boundary) # + [markdown] id="rbow16utJ_x3" # ##### Base Estimaters # + id="gWyrM0lACLk-" base_estimaters = list() base_estimaters.append(('DT',DecisionTreeClassifier(criterion="entropy", random_state=0))) base_estimaters.append(('Bagging' ,BaggingClassifier(n_estimators=5, random_state=0))) base_estimaters.append(('RF', RandomForestClassifier(n_estimators=5, random_state=0))) base_estimaters.append(('Adaboost', AdaBoostClassifier(n_estimators=50, learning_rate= 1, random_state=0))) # + [markdown] id="3OarQ9-h0k_A" # #### (11.1) Naive Bayes as Aggregator # + colab={"base_uri": "https://localhost:8080/", "height": 281} id="2CGz_wOWnAUE" outputId="3dcfd925-601b-42f9-f04e-d22a7fc939c2" aggregator1 =GaussianNB() model1 = StackingClassifier(estimators=base_estimaters, final_estimator=aggregator1, cv=5) score = model1.fit(X_train, y_train).score(X_test, y_test) plotEstimator(X_train, y_train, X_test, y_test, model1, f'Accuracy of Gaussian as aggregator = {score}') # + [markdown] id="WllecKIsBJuv" # #### (11.2) Logistic Regression as Aggregator # + colab={"base_uri": "https://localhost:8080/", "height": 281} id="wN6rKEvQ2s0p" outputId="67455625-88e1-470e-d41e-35b4f116a008" aggregator2 =LogisticRegression() model2 = StackingClassifier(estimators=base_estimaters, final_estimator=aggregator2, cv=5) score = model2.fit(X_train, y_train).score(X_test, y_test) plotEstimator(X_train, y_train, X_test, y_test, model2, f'Accuracy of Logistic Regression as aggregator = {score}') # + [markdown] id="iblP6PqCDdck" # #### (11.3) Decision Tree as Aggregator # + colab={"base_uri": "https://localhost:8080/", "height": 281} id="YguXuL_CDA6u" outputId="711d12d5-5bd5-4c47-d699-d706be776e6f" aggregator3 =DecisionTreeClassifier() model3 = StackingClassifier(estimators=base_estimaters, final_estimator=aggregator3, cv=5) score = model3.fit(X_train, y_train).score(X_test, y_test) plotEstimator(X_train, y_train, X_test, y_test, model3, f'Accuracy of DT as aggregator = {score}')
Assignment_4(Ensemble_Learning).ipynb