code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.6 64-bit # name: python3 # --- import twint import gc gc.collect() import twint import nest_asyncio nest_asyncio.apply() # Configure c = twint.Config() c.Username = "DiyanetTV" c.Limit = 300 c.Store_csv = True c.Output = "none.csv" twint.run.Search(c) import nest_asyncio nest_asyncio.apply() def get_followers_following(username): dic={} c = twint.Config() c.Username = username c.Hide_output = True c.Store_object = True sonuc= twint.run.Lookup(c) fol = twint.output.users_list[:] dic["followers"] = fol[0].followers dic["following"] = fol[0].following return dic tweets=[] c = twint.Config() c.Username = 'DiyanetTV' c.Since = "2019-3-3" c.Limit = 5000 c.Store_object = True c.Store_csv = True c.Output = "DiyanetTV.csv" twint.run.Search(c) tweets = twint.output.tweets_list print(len(tweets)) # + import io hashtags={} #print(type(tweets), len(tweets), type(hashtags)) # first iterate over the tweets for t in tweets: #print('t:', t, type(t), t.hashtags, type(t.hashtags)) # then iterate over the hashtags of that single tweet for h in t.hashtags: # increment the count if the hashtag already exists, otherwise initialize it to 1 #print('h:',h,type(h),t.username) if(t.username in hashtags): if(h in hashtags[t.username]): hashtags[t.username][h] += 1 else: hashtags[t.username][h]=1 else: hashtags[t.username]={h:1} # now save the data with io.open('hashtags.csv', 'w', encoding="utf-8") as output: output.write('username,hashtag,count\n') for user in hashtags: for h in hashtags[user]: output.write('{},{},{}\n'.format(user, h, hashtags[user][h])) # + from collections import Counter replies = twint.Config() replies.Since = "2018-03-01" replies.Pandas = True replies.To = "@DiyanetTV" twint.run.Search(replies) df = twint.storage.panda.Tweets_df #print(df) # - #print(df.head()) df.to_csv('Replies.csv', index=False) Replies = {x:y for x,y in zip(df['conversation_id'],df['nreplies'])} fetchedReplies =Counter(df['conversation_id']) #print(df[df['conversation_id']=='1243481878402723840']) for tweet in Replies: print(tweet, "\t{}\t{}\t".format(Replies[tweet],fetchedReplies[tweet])) # + mentions={} #print(tweets) for t in tweets: #print('t:', t, type(t), t.hashtags, type(t.hashtags)) # then iterate over the hashtags of that single tweet for m in t.mentions: #print(m, len(mentions)) # increment the count if the hashtag already exists, otherwise initialize it to 1 #print('h:',h,type(h),t.username) if(m['screen_name'] in mentions): mentions[m['screen_name']]+=1 else: mentions[m['screen_name']]=1 print(mentions) # - import pandas as pd mentions_df= pd.DataFrame(mentions.items(), columns=['screen_name', 'count']) mentions_df.to_csv('mentions.csv', index=False) import sys print(sys.executable) # + df_tweet=pd.read_csv('DiyanetTV.csv') print(df_tweet.columns) print(df_tweet.describe()) df_tweet['tweet_date']=pd.to_datetime(df_tweet['date'] + ' ' + df_tweet['time']) df_tweet = df_tweet.rename(columns={'tweet': 'Tweet', 'tweet_date': 'Timestamp', 'hashtags': 'Subject'}) print(df_tweet.head()) # - def twint2pd(columns): return twint.output.panda.Tweets_df[columns] tweet_df= df_tweet[['Timestamp','username','Tweet', 'Subject','mentions','replies_count','retweets_count','likes_count']] print(tweet_df.head()) tweet_list=tweet_df['Tweet'].tolist() from matplotlib import pyplot series = df_tweet[['date','likes_count']] print(series.head()) series.plot() pyplot.show() def count_plot_data(df, freq): plot_df = df.set_index('Timestamp').groupby('Subject').resample(freq).id.count().unstack(level=0, fill_value=0) plot_df.index.rename('Date', inplace=True) plot_df = plot_df.rename_axis(None, axis='columns') return plot_df
other_social/twitter_page.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import os.path import itertools import pandas as pd import scipy.stats import numpy as np import qiime2 import qiime2.plugins.feature_table # - md = qiime2.Metadata.load('../sample-metadata-temp.tsv') # + treatments = ['UDCA', 'placebo'] # treatmentgroup visits = ['pre', 'post'] # visit # number of samples a feature must be observed in to be included in these analyses min_samples_fraction = 0.33 cwd = os.getcwd() # - qiime2.Artifact.peek('../data/table.qza') ft = qiime2.Artifact.load('../data/table.qza') ft = qiime2.plugins.feature_table.actions.filter_samples(ft, metadata=md, where="IncludedIn2017Analysis='Yes'").filtered_table min_samples = int(ft.view(pd.DataFrame).shape[0] * min_samples_fraction) ft = qiime2.plugins.feature_table.actions.filter_features(ft, min_samples=min_samples).filtered_table ft_summary = qiime2.plugins.feature_table.actions.summarize(ft, sample_metadata=md).visualization ft_summary # for this analysis we need to keep all samples, so even sampling depth is set to the minimum sample frequency. # Samples with extremely low total frequencies have already been filtered from this table (see the # IncludedIn2017Analysis metadata category). even_sampling_depth = int(ft.view(pd.DataFrame).sum(axis=1).min()) # + # Since this step is non-deterministic, I comment it out so it can't accidentally be re-run. # ft_rare = qiime2.plugins.feature_table.actions.rarefy(ft, even_sampling_depth).rarefied_table # - print(ft_rare.view(pd.DataFrame).shape) # Compute correlations with Spearman and Pearson, and generate commands to compute SparCC correlations. SparCC is Python 2.6 software, so needs to run in its own environment. # + sparcc_command_template = ( "python /Users/gregcaporaso/code/crc-udca1/network-analysis/run-sparcc.py " " \"%s\" 1000 \"%s\"") sparcc_cmds = [] for t, v in itertools.product(treatments, visits): output_dir = os.path.join(cwd, '%s-%s' % (t, v)) # SparCC takes a long time to run, so this should fail if the # output directory already exists so those results aren't overwritten. os.makedirs(output_dir, exist_ok=False) temp_ft = qiime2.plugins.feature_table.actions.filter_samples(ft_rare, metadata=md, where="treatmentgroup='%s' AND visit='%s'" % (t, v)).filtered_table temp_ft.save(os.path.join(output_dir, 'table.qza')) df = temp_ft.view(pd.DataFrame) table_fn = "sparcc-table.tsv" table_fp = os.path.join(output_dir, table_fn) sparcc_output_dn = "sparcc" sparcc_output_dp = os.path.join(output_dir, sparcc_output_dn) df.T.to_csv(table_fp, sep='\t', index_label='OTU_ID') sparc_cmd = sparcc_command_template % (table_fp, sparcc_output_dp) sparcc_cmds.append(sparc_cmd) spearman_rho, spearman_p = scipy.stats.spearmanr(df) pd.DataFrame(spearman_rho, index=df.columns, columns=df.columns).to_csv( os.path.join(output_dir, "spearman_rho.tsv"), sep='\t', index_label='OTU_ID') pd.DataFrame(spearman_p, index=df.columns, columns=df.columns).to_csv( os.path.join(output_dir, "spearman_p.tsv"), sep='\t', index_label='OTU_ID') # scipy.stats.pearsonr has a different interface than scipy.stats.spearmanr :( pearson_r = [] pearson_p = [] for _, r1 in df.T.iterrows(): pearson_r_row = [] pearson_p_row = [] for _, r2 in df.T.iterrows(): r, p = scipy.stats.pearsonr(r1, r2) pearson_r_row.append(r) pearson_p_row.append(p) pearson_r.append(pearson_r_row) pearson_p.append(pearson_p_row) pd.DataFrame(pearson_r, index=df.columns, columns=df.columns).to_csv( os.path.join(output_dir, "pearson_r.tsv"), sep='\t', index_label='OTU_ID') pd.DataFrame(pearson_p, index=df.columns, columns=df.columns).to_csv( os.path.join(output_dir, "pearson_p.tsv"), sep='\t', index_label='OTU_ID') print(' && '.join(sparcc_cmds)) # + alphas = [0.001, 0.01, 0.05] summary = [] summary_columns = ['treatmentgroup', 'visit', 'alpha', 'Spearman significant', 'Pearson significant', 'SparCC significant', 'Ensemble significant', 'Same sign', 'Reported interactions'] for alpha in alphas: for t, v in itertools.product(treatments, visits): row_summary = [t, v, alpha] data_dir = os.path.join(cwd, '%s-%s' % (t, v)) spearman_rho_df = pd.read_csv(os.path.join(data_dir, "spearman_rho.tsv"), sep='\t').set_index('OTU_ID') spearman_p_df = pd.read_csv(os.path.join(data_dir, "spearman_p.tsv"), sep='\t').set_index('OTU_ID') row_summary.append(np.count_nonzero(spearman_p_df <= alpha)) pearson_r_df = pd.read_csv(os.path.join(data_dir, "pearson_r.tsv"), sep='\t').set_index('OTU_ID') pearson_p_df = pd.read_csv(os.path.join(data_dir, "pearson_p.tsv"), sep='\t').set_index('OTU_ID') row_summary.append(np.count_nonzero(pearson_p_df <= alpha)) sparcc_r_df = pd.read_csv(os.path.join(data_dir, 'sparcc', 'corr.out'), sep='\t').set_index('OTU_ID') sparcc_p_df = pd.read_csv(os.path.join(data_dir, 'sparcc', 'p-value.out'), sep='\t').set_index('OTU_ID') row_summary.append(np.count_nonzero(sparcc_p_df <= alpha)) significance_df = (pearson_p_df <= alpha) & (spearman_p_df <= alpha) & (sparcc_p_df <= alpha) same_sign_df = (np.sign(pearson_r_df) == np.sign(spearman_rho_df)) == np.sign(sparcc_r_df) report_interaction_df = significance_df & same_sign_df row_summary.append(np.count_nonzero(significance_df)) row_summary.append(np.count_nonzero(same_sign_df)) row_summary.append(np.count_nonzero(report_interaction_df)) significance_df.to_csv(os.path.join(data_dir, 'ensemble-significance-%f.tsv' % alpha), sep='\t', index_label='OTU_ID') same_sign_df.to_csv(os.path.join(data_dir, 'ensemble-same-sign.tsv'), sep='\t', index_label='OTU_ID') report_interaction_df.to_csv(os.path.join(data_dir, 'report-interaction-%f.tsv' % alpha), sep='\t', index_label='OTU_ID') summary.append(row_summary) summary_df = pd.DataFrame(summary, columns=summary_columns) # - summary_df
network-analysis/compute-correlations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Data Compression and Regression with Singular Value Decomposition # ## Line fitting revisit # We'll compare The simple Least Squares model and SVD's performance in regression Points = np.loadtxt('Points.csv', delimiter=',') # We'll define our least squares equation # + # %matplotlib inline import os import pandas as pd import matplotlib.pyplot as plt import numpy as np import statsmodels.api as sm import statsmodels.formula.api as smf from scipy.stats import t as tdist import scipy.stats as stats import pylab from statsmodels.stats.outliers_influence import outlier_test from statsmodels.sandbox.regression.predstd import wls_prediction_std as pi def least_squares(data): x = data[:,0] n = x.shape[0] ones = np.repeat(1,n) newx = np.column_stack((ones,x)) xtx = np.linalg.inv((np.dot(newx.T,newx))) y = data[:,1] newy = np.dot(newx.T,y) A = np.dot(xtx,newy) return A x = Points[:,0] y = Points[:,1] n = x.shape[0] ones = np.repeat(1,n) Amat = np.column_stack((ones,x)) A = least_squares(Points) print A print "a:" print A[0] print "b:" print A[1] # - # This is the predicted line plt.plot(x,A[0] + A[1]*x) plt.scatter(x,y,color='r') axes = plt.gca() axes.set_xlim([-3,3]) plt.show() # Now we'll try interpolating using SVD, approximating to one rank. First we must center the points about 0 # We subtract each point from the average to center about 0 xavg = np.mean(x) yavg = np.mean(y) avg = np.column_stack((xavg,yavg)) CP = Points - avg # We'll use np.linalg.svd to decompose the data matrix # + def svdtransform(data,rank): U, s, V = np.linalg.svd(data,full_matrices=True) Sred = np.diag(s)[:rank,:rank] newU = U[:,:rank] newV = V[:rank,:] end = np.dot(newU,np.dot(Sred,newV)) return [end,[newU,Sred,newV]] CP1 = svdtransform(CP,1)[0] print CP1 # - # CP1 has rank-1 and approximates CP. This means all rows of CP **approximately** lie on the line spanned by rows of CP1. If we shift this line back to the centroid of CP, that is the best fitting line obtained from SVD. # # We'll plot the least squares line in black and the SVD one in blue SVDnew = CP1 + avg newSVD = least_squares(SVDnew) print SVDnew.shape print newSVD print "slope:" print newSVD[1] plt.plot(x,A[0] + A[1]*x, color='black') plt.plot(x,newSVD[0] + newSVD[1]*x, color='blue') plt.scatter(x,y, color='r') axes = plt.gca() axes.set_xlim([-3,3]) plt.show() # ## Data compression # We want to feed data to a clustering algorithm that outputs a fixed number of cluster centers. Because d is large, however, the algorithm takes too long or is unstable, so we want to reduce the number of variables using SVD. # (6) Data.csv stores certain information of 1000 people. Each row is a person. First load this to a numpy array, call it Data df = pd.read_csv('Data.csv') Data = df.values # We center the data the generate the matrix E. c = np.zeros((1, Data.shape[1])) for i in range(Data.shape[1]): c[:,i] = np.mean(Data[:,i]) E = Data - c # (8) Compute SVD of E. Print out the singular values. r is the number of significant singular values SVDe = U, s, V = np.linalg.svd(E,full_matrices=True) print SVDe[1] #to pick r, I choose the numbers that are multiplied by e^2, bigger than 1.5 r = 4 print "r:" print r # We generate the new V matrix with only 4 columns. We then calculate the new Data matrix Y c = c print c.shape V = SVDe[2].T HatV = V[:,:4] Y = np.dot(E,HatV) print Y.shape # If everything goes smoothly, we can use the following command to Store $Y, \hat V, c$ to "compressedData.csv". import csv arr = [Y,HatV,c] b = open('compressedData.csv', 'w') a = csv.writer(b) a.writerows(arr) b.close() # Wou can see that this file size is smaller than "Data.csv". We'll print out the compression rate Datasize = os.path.getsize('Data.csv') CompressedSize = os.path.getsize('compressedData.csv') compression_rate = float(Datasize)/CompressedSize print "compression rate:" print compression_rate # We use $Y, \hat V, c$ to get the approximate Data matrix. Dapprox = np.dot(Y,HatV.T) + c print Dapprox[0,:] print Data[0,:] # **Finally, we probably will do more projects on SVD using this one.** # ## Just for fun we demonstrate that gradient ascent can be used to find where the maximum of a function is # You can use $f(x)=\frac{1}{1+X^2}$ on [-0.5,1] as an example # + import random def f(x): return 1/(1 + x**2) def deriv(x): return -(2*x/((x**2 + 1)**2)) def grad_ascent(func,deriv,ran,alpha): prevx = random.uniform(ran[0], ran[1]) x = random.uniform(ran[0], ran[1])/2 l = [] m = [] while abs(func(x) - func(prevx)) > 0.000000001: prevx = x x = x + alpha*deriv(x) l.append(func(x)) m.append(x) return [x,l,m,func(x)] nx = np.linspace(-0.5, 1, num=10) grad = grad_ascent(f,deriv,[-0.5,1],0.005) print "max value and corresponding x value:" print [grad[3],grad[0]] plt.plot(nx,f(nx), color='black') plt.scatter(grad[0],grad[3],color='r') plt.show()
Applications/SVD-DataCompress-Regression/SVD Data Compression and Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- library(data.table) library(dplyr) library(tidyr) library(ggplot2) library(cowplot) library(ggsci) d = fread("../data/benchmark/2comb/res2-2.txt") d <- d %>% mutate( sigtype = as.factor(sigtype), sig1 = as.factor(sig1), sig2 = as.factor(sig2), ratio = as.factor(ratio), noise = as.factor(noise), nmut = as.factor(nmut) ) head(d) # + # MSE for signatures: # value - orig for tested signatures # + d_mse <- d %>% spread(method, value) %>% gather(key = method, value = value, mle, mlez, ds) %>% mutate(e = (value - original)) %>% group_by(sigtype, sig1, sig2, ratio, noise, nmut, method) %>% summarise( n = n(), error = mean(e) ) %>% mutate(method = recode(method, ds='deconstructSigs', mle='MutaGene MLE', mlez='MutaGene MLE*' )) # head(d_mse) options(repr.plot.width=5, repr.plot.height=3) ggplot(d_mse %>% filter(nmut==500 & ratio==7 & noise==10)) + geom_boxplot(aes(x=sigtype, y=error)) + theme(axis.text.x = element_text(angle = 45, hjust = 1)) + facet_wrap(~method) ggplot(d_mse %>% filter(sigtype==30 & nmut==500 & ratio==7)) + geom_boxplot(aes(x=method, y=error)) + theme(axis.text.x = element_text(angle = 45, hjust = 1)) + facet_wrap(~noise) ggplot(d_mse %>% filter(sigtype==30 & nmut==500 & noise==10)) + geom_boxplot(aes(x=method, y=error)) + theme(axis.text.x = element_text(angle = 45, hjust = 1)) + facet_wrap(~ratio) ggplot(d_mse %>% filter(sigtype==30 & ratio==7 & noise==10)) + geom_boxplot(aes(x=method, y=error)) + theme(axis.text.x = element_text(angle = 45, hjust = 1)) + facet_wrap(~nmut) # + d_mse <- d %>% filter(sigtype == 10 & noise == 10 & nmut == 500) %>% spread(method, value) %>% gather(key = method, value = value, mle, mlez, ds) %>% mutate(e = (value - original)) %>% group_by(sigtype, sig1, sig2, ratio, noise, nmut, method) %>% summarise( n = n(), error = mean(e) ) %>% mutate(method = recode(method, ds='deconstructSigs', mle='MutaGene MLE', mlez='MutaGene MLE*' )) # head(d_mse) options(repr.plot.width=3.5, repr.plot.height=3) error_vs_method <- ggplot(d_mse) + geom_boxplot(aes(x=method, y=error)) + theme(axis.text.x = element_text(angle = 45, hjust = 1)) error_vs_method # + d_mse <- d %>% filter(sigtype == 5 & ratio == 7) %>% spread(method, value) %>% gather(key = method, value = value, mle, mlez, ds) %>% mutate( SE = (value - original)**2 ) %>% group_by(sig1, sig2, ratio, noise, nmut, method) %>% summarise( n = n(), MSE = mean(SE) ) %>% mutate(method = recode(method, ds='deconstructSigs', mle='MutaGene MLE', mlez='MutaGene MLE_corr' )) # head(d_mse) mse_vs_method <- ggplot(d_mse) + geom_boxplot(aes(x=method, y=MSE)) + theme(axis.text.x = element_text(angle = 45, hjust = 1)) options(repr.plot.width=2, repr.plot.height=3) mse_vs_method ggplot(d_mse) + geom_boxplot(aes(x=method, color=as.factor(noise), y=MSE)) + theme(axis.text.x = element_text(angle = 45, hjust = 1)) # - ggplot(d_mse) + geom_boxplot(aes(x=as.factor(method), y=MSE)) # + plot_grid( , labels=c('A')) # - options(repr.plot.width=8, repr.plot.height=2*30) d2 = d %>% filter(sigtype == 30 & ratio==5 & noise==10 & nmut==500) ggplot(d2) + geom_boxplot(aes(x=as.factor(sig2), y=value, fill=method), show.legend=T) + facet_wrap(~as.factor(sig1), nrow=30) options(repr.plot.width=8, repr.plot.height=2) d2 = d %>% filter(sigtype == 5 & ratio==7 & noise==20 & nmut==50) ggplot(d2) + geom_boxplot(aes(x=as.factor(sig2), y=value, color=method), show.legend=T) + facet_wrap(~as.factor(sig1), nrow=1) d %>% filter(sigtype == 5) %>% group_by(sig1, sig2, nmut, noise, method) %>% summarise( n = n(), mvalue = mean(value, na.rm = TRUE) ) options(repr.plot.width=8, repr.plot.height=7) d2 = d %>% filter(sigtype == 10 & ratio==5 & noise==10 & nmut==500) ggplot(d2) + geom_boxplot(aes(x=as.factor(sig2), y=value, color=method), show.legend=T) + facet_wrap(~as.factor(sig1), nrow=5) + scale_color_npg() options(repr.plot.width=8, repr.plot.height=7) d2 = d %>% filter(sigtype == 10 & ratio==5 & nmut==500 & method=="mlez") ggplot(d2) + geom_boxplot(aes(x=as.factor(sig2), y=value, color=as.factor(noise)), show.legend=T) + facet_wrap(~as.factor(sig1), nrow=5) + scale_color_npg() options(repr.plot.width=8, repr.plot.height=7) d2 = d %>% filter(sigtype == 10 & ratio==7 & noise==10 & method=="mlez") ggplot(d2) + geom_boxplot(aes(x=as.factor(sig2), y=value, color=as.factor(nmut)), show.legend=T) + facet_wrap(~as.factor(sig1), nrow=5) + scale_color_npg() options(repr.plot.width=8, repr.plot.height=7) d2 = d %>% filter(sigtype == 10 & ratio==7 & nmut==500 & method=="mlez") ggplot(d2) + geom_boxplot(aes(x=as.factor(sig2), y=value, color=as.factor(noise)), show.legend=T) + facet_wrap(~as.factor(sig1), nrow=5) + scale_color_npg() options(repr.plot.width=8, repr.plot.height=7) d2 = d %>% filter(sigtype == 10 & ratio==7 & nmut==500 & method=="mlez") ggplot(d2) + geom_boxplot(aes(x=as.factor(sig2), y=value, color=as.factor(noise)), show.legend=T) + facet_wrap(~as.factor(sig1), nrow=5) + scale_color_npg() # + s0 = d %>% filter(sigtype == 10 & ratio == 7) %>% group_by(sig1, sig2, nmut, noise, method) %>% summarise( n = n(), mvalue = mean(value, na.rm = TRUE) ) s1 = s0 %>% spread(method, mvalue) s2 = s0 %>% spread(nmut, mvalue) s3 = s0 %>% spread(noise, mvalue) # + s2 = s1 %>% mutate(ds = original - ds, mle=original-mle) %>% select(-original) options(repr.plot.width=4, repr.plot.height=2) plot_grid(ggplot(s2) + geom_histogram(aes(mle)) + theme(axis.text.x = element_text(angle = 90, hjust = 1)), ggplot(s2) + geom_histogram(aes(ds)) + theme(axis.text.x = element_text(angle = 90, hjust = 1)), labels = c("A", "B"), align = "h") # - options(repr.plot.width=2, repr.plot.height=2) ggplot(s0 %>% filter(method != "original")) + geom_boxplot(aes(y=mvalue, x=method, fill=method), show.legend=F) options(repr.plot.width=4, repr.plot.height=1.8) plot_grid(ggplot(s2) + geom_point(aes(ds, mle)), ggplot(s) + geom_point(aes(ds, mle)), labels = c("A", "B"), align = "h")
notebooks/Figure1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] deletable=true editable=true # # Welcome to the SETI Institute Code Challenge! # # This first tutorial will explain a little bit on what the data is and where to get it. # # # Introduction # # For the Code Challenge, you will be using the **"primary" data set**, as we've called it. The primary data set is # # * labeled data set of 350,000 simulated signals # * 7 different labels, or "signal classifications" # * total of about 128 GB of data # # This data set should be used to train your models. # # **You do not need to use all the data to train your models** if you do not want to or need to consume the entire set. # # There are also a **`small` and a `medium` sized subset** of these primary data files. # # # ## Simple Data Format # # Each data file has a simple format: # # * file name = <UUID>.dat # * a JSON header in the first line that contains: # * UUID # * signal_classification (label) # * followed by stream complex-valued time-series data. # # The `ibmseti` Python package is available to assist in reading this data and performing some basic operations for you. # # ## Basic Warmup Data Set. # # There is also a second, simple and clean data set that you may use for warmup, which we call the **"basic" data set**. This basic set should be used as a sanity check and for very early-stage prototyping. We recommend that everybody starts with this. # # * Only 4 different signal classifications # * 1000 simulation files for each class: 4000 files total # * Available as single zip file # * ~1 GB in total. # # ### Basic Set versus Primary Set # # > The difference between the `basic` and `primary` data sets is that the signals simulated in the `basic` set have, on average, much higher signal to noise ratio (they are larger amplitude signals). They also have other characteristics that will make the different signal classes very distinguishable. **You should be able to get very high signal classification accuracy with the basic data set.** The primary data set has smaller amplitude signals and can look more similar to each other, making classification accuracy more difficult with this data set. There are also only 4 classes in the basic data set and 7 classes in the primary set. # # # ## Primary Data Sets # # ### Primary Small # # The `primary small` is a subset of the full primary data set. Use for early-stage prototyping. # # * All 7 signal classifications # * 1000 simulations / class (7 classes = 7000 files) # * Available as single zip file # * ~2 GB in total # # ### Primary Medium # # The `primary medium` is a subset of the full primary data set. Use for early-stage prototyping & model building. # # * All 7 signal classifications # * 10000 simulations / class (7 classes = 70000 files) # * Large enough for relatively robust model construction # * Available in 6 separate zip files # * ~20 GB in total # # ### Primary Full # # The `primary full` is the entire primary data set. Use only if you want an enourmous training data set. You will need a small data center to process these data in a reasonable amount of time. # # * All 7 signal classifications # * 50000 simulations / class (7 classes = 350000 files) # * Only available in 350k individual files # * one must read through the index file and download files individually, which will take some time from outside of IBM Cloud systems # * ~130 GB in total # # # ## Index Files # # For all data sets, there exists an **index** file. That file is a CSV file. Each row holds the UUID, signal_classification (label) for a simulation file in the data set. You can use these index files in a few different ways (from using to keep track of your downloads, to facilitate parallelization of your analysis on Spark). # # # # ## Direct Data URLs if you are working from outside of IBM Data Science Experience # # ### Basic4 # Data: https://dal.objectstorage.open.softlayer.com/v1/AUTH_cdbef52bdf7a449c96936e1071f0a46b/simsignals_basic_v2/basic4.zip # # Index: # https://dal.objectstorage.open.softlayer.com/v1/AUTH_cdbef52bdf7a449c96936e1071f0a46b/simsignals_files/public_list_basic_v2_26may_2017.csv # # # ### Primary Small # Data: https://dal.objectstorage.open.softlayer.com/v1/AUTH_cdbef52bdf7a449c96936e1071f0a46b/simsignals_v2_zipped/primary_small.zip # # Index: # https://dal.objectstorage.open.softlayer.com/v1/AUTH_cdbef52bdf7a449c96936e1071f0a46b/simsignals_files/public_list_primary_v2_small_1june_2017.csv # # ### Primary Medium # # Data: # 1. https://dal.objectstorage.open.softlayer.com/v1/AUTH_cdbef52bdf7a449c96936e1071f0a46b/simsignals_v2_zipped/primary_medium_1.zip # # 2. https://dal.objectstorage.open.softlayer.com/v1/AUTH_cdbef52bdf7a449c96936e1071f0a46b/simsignals_v2_zipped/primary_medium_2.zip # # 3. https://dal.objectstorage.open.softlayer.com/v1/AUTH_cdbef52bdf7a449c96936e1071f0a46b/simsignals_v2_zipped/primary_medium_3.zip # # 4. https://dal.objectstorage.open.softlayer.com/v1/AUTH_cdbef52bdf7a449c96936e1071f0a46b/simsignals_v2_zipped/primary_medium_4.zip # # 5. https://dal.objectstorage.open.softlayer.com/v1/AUTH_cdbef52bdf7a449c96936e1071f0a46b/simsignals_v2_zipped/primary_medium_5.zip # # 6. https://dal.objectstorage.open.softlayer.com/v1/AUTH_cdbef52bdf7a449c96936e1071f0a46b/simsignals_v2_zipped/primary_medium_6.zip # # Index: https://dal.objectstorage.open.softlayer.com/v1/AUTH_cdbef52bdf7a449c96936e1071f0a46b/simsignals_files/public_list_primary_v2_medium_1june_2017.csv # # ## Test Data Set # # There is one `primary_test` data set. Each data file is the same as the above training data except the JSON header does NOT contain the 'signal_classification' key. # # * All 7 classes # * Roughly 1000 simulations per class (+- 50) (7014 total files) # * JSON header with UUID only # * Available as single zip file # * ~2 GB in total # # ### Direct Download Link # # Data: https://dal.objectstorage.open.softlayer.com/v1/AUTH_cdbef52bdf7a449c96936e1071f0a46b/simsignals_v2_zipped/primary_testset.zip # # Index: https://dal.objectstorage.open.softlayer.com/v1/AUTH_cdbef52bdf7a449c96936e1071f0a46b/simsignals_files/public_list_primary_testset_1k_1june_2017.csv # # ### Submitting Classification Results # # See the [Judging Criteria](../Judging_Criteria.ipynb) notebook for information on submitting your test-set classifications. # + [markdown] deletable=true editable=true # # Programmatically Accessing the Data # # The data are stored in `containers` on IBM Object Storage. You can access these data with HTTP calls. Here we use system level `curl`, but you could easily use the Python `requests` package. # # The URL for all data files is composed of # # `base_url/container/objectname`. # # The `base_url` is: # + deletable=true editable=true #If you are running this in IBM Apache Spark (via Data Science Experience) base_url = 'https://dal05.objectstorage.service.networklayer.com/v1/AUTH_cdbef52bdf7a449c96936e1071f0a46b' #ELSE, if you are outside of IBM: #base_url = 'https://dal.objectstorage.open.softlayer.com/v1/AUTH_cdbef52bdf7a449c96936e1071f0a46b' #NOTE: if you are outside of IBM, pulling down data will be slower. :/ # + deletable=true editable=true #Defining a local data folder to dump data import os mydatafolder = os.path.join( os.environ['PWD'], 'my_data_folder' ) if os.path.exists(mydatafolder) is False: os.makedirs(mydatafolder) # + [markdown] deletable=true editable=true # ## Basic Data Set # # We'll start with the basic data set. Because the basic data set is small, we've created a `.zip` file of the full data set that you can download directly. # + deletable=true editable=true import os # + deletable=true editable=true basic_container = 'simsignals_basic_v2' basic4_zip_file = 'basic4.zip' # + deletable=true editable=true os.system('curl {}/{}/{} > {}'.format(base_url, basic_container, basic4_zip_file, mydatafolder + '/' + basic4_zip_file)) # + deletable=true editable=true # !ls -al my_data_folder/basic4.zip # + [markdown] deletable=true editable=true # ## Primary Data Set # # # ### Primary Small # # The `primary_small` subset can be found in a zip file in # * contianer = 'simignals_v2_zipped' # * objectname = 'primary_small.zip' # + deletable=true editable=true filename = 'primary_small.zip' primary_small_url = '{}/simsignals_v2_zipped/{}'.format(base_url, filename) os.system('curl {} > {}'.format(primary_small_url, mydatafolder +'/'+filename)) # + [markdown] deletable=true editable=true # ###### Primary Small Index File # # A CSV file containing the UUID, signal classifications for each file in the `primary_small` subset. # + deletable=true editable=true filename = 'public_list_primary_v2_small_1june_2017.csv' primary_small_csv_url = '{}/simsignals_files/{}'.format(base_url, filename) os.system('curl {} > {}'.format(primary_small_csv_url, mydatafolder +'/'+filename)) # + [markdown] deletable=true editable=true # ### Primary Medium # # Similarly, the `primary_medium` subset can be found in a handful of zip files # # * contianer = 'simignals_v2_zipped' # * objectname = 'primary_medium_N.zip' # * for N = 1, 2, 3, 4, 5, 6 # + deletable=true editable=true med_N = '{}/simsignals_v2_zipped/primary_medium_{}.zip' for i in range(1,7): med_url = med_N.format(base_url, i) output_file = mydatafolder + '/primary_medium_{}.zip'.format(i) print 'GETing', output_file os.system('curl {} > {}'.format(med_url, output_file )) # + [markdown] deletable=true editable=true # ###### Primary Medium Index File # # Here too, there is a CSV file containing the UUID, signal classifications for each file in the `primary_medium` subset. # + deletable=true editable=true filename = 'public_list_primary_v2_medium_1june_2017.csv' med_csv_url = '{}/simsignals_files/{}'.format(base_url, filename) os.system('curl {} > {}'.format(med_csv_url, mydatafolder +'/'+filename)) # + [markdown] deletable=true editable=true # ### Primary Full set # # Because the full set is so incredibly large, we only have these 350,000 files available individually on object storage. # # The `primary_full` list can be found here: # + deletable=true editable=true filename = 'public_list_primary_v2_full_1june_2017.csv' prim_full = '{}/simsignals_files/{}'.format(base_url, filename) os.system('curl {} > {}'.format(prim_full, mydatafolder +'/'+filename)) # + [markdown] deletable=true editable=true # One can download this list and begin to pull down files individually if desired. Warning, **however, this will take approximately a billion years if you are not running on IBM Apache Spark** -- IBM Apache Spark and Object Storage exist in the same data center and share a fast network connection. # # The data are found in # # `base_url/simsignals_v2/<uuid>.dat` # # For example: # # https://dal.objectstorage.open.softlayer.com/v1/AUTH_cdbef52bdf7a449c96936e1071f0a46b/simsignals_v2/aa7d082f-9263-4533-a9d4-5595c5cdde25.dat # # **We are working to make the primary full data set more easily, as this current setup is less than ideal. You will be notified if that becomes available. The data will be directly available for participants of the hackathon, however.** # # If you wish to programmatically begin to download the full data set you may use the following code. # + deletable=true editable=true import requests import copy # + deletable=true editable=true file_list_container = 'simsignals_files' file_list = 'public_list_primary_v2_full_1june_2017.csv' primary_data_container = 'simsignals_v2' # + deletable=true editable=true r = requests.get('{}/{}/{}'.format(base_url, file_list_container, file_list), timeout=(9.0, 21.0)) filecontents = copy.copy(r.content) # + deletable=true editable=true full_primary_files = [line.split(',') for line in filecontents.split('\n')] full_primary_files = full_primary_files[1:-1] #strip the header and empty last element full_primary_files = map(lambda x: x[0]+".dat", full_primary_files) #now list of file names (<uuid>.dat) # + deletable=true editable=true #save your data into a local subfolder save_to_folder = mydatafolder + '/primary_data_set' if os.path.exists(save_to_folder) is False: os.mkdir(save_to_folder) # + deletable=true editable=true count = 0 total = len(full_primary_files) for row in full_primary_files: r = requests.get('{}/{}/{}'.format(base_url, primary_data_container, row), timeout=(9.0, 21.0)) if count % 100 == 0: print 'done ', count, ' out of ', total count += 1 with open('{}/{}'.format(save_to_folder, row), 'w' ) as fout: fout.write(r.content) # + [markdown] deletable=true editable=true # ### This is really a lot of data # # This will be a difficult data set to consume and process if you are using free-tier levels of software from any Cloud provider. You will likely want to have a robust machine, or sets of machines, with many threads and GPUs if you want to train models with such a large dat set. # # For example, if you have access to an IBM Spark Enterprise cluster, because the network connection between IBM Spark and IBM Object Storage is so fast, we recommend that you **do NOT** download each file. Instead you could parallelize the index file and then retrieve and process each file on a worker node. # + deletable=true editable=true ## Using Spark -- can parallelize the job across your worker nodes import ibmseti def retrieve_and_process(row): try: r = requests.get('{}/{}/{}'.format(base_url, primary_data_container, row), timeout=(9.0, 21.0)) except Exception as e: return (row, 'failed', []) aca = ibmseti.compamp.SimCompamp(r.content) spectrogram = aca.get_spectrogram() # or do something else features = my_feature_extractor(spectrogram) #example external function for reducing the spectrogram into a handful of features, perhaps signal_class = aca.header()['signal_classifiation'] return (row, signal_class, features) npartitions = 60 rdd = sc.parallelize(full_primary_files, npartitions) #Now ask Spark to run the job process_results = rdd.map(retrieve_and_process).collect() # + [markdown] deletable=true editable=true # # Test Data Set # # Once you've trained your model, done all of your testing, and tweaks and are ready to submit an entry to the contest, you'll need to download the test data set and apply your model to that. # # The test data set is similar to the labeled data, except that the JSON header is missing the 'signal_classification' key, and just contains the 'uuid'. # # Like the other sets, this set is found in a `.zip` file in the `simsignals_v2_zipped` container; # + deletable=true editable=true filename = 'primary_testset.zip' test_set_url = '{}/simsignals_v2_zipped/{}'.format(base_url, filename) os.system('curl {} > {}'.format(test_set_url, mydatafolder +'/'+filename)) # + [markdown] deletable=true editable=true # There are approximately 1000 simulations of each of the 7 signal classes -- but not exactly 1000 (+- some largeish number) so you can't cheat :). # # See the [Judging Criteria document](https://github.com/setiQuest/ML4SETI/blob/master/JudgingCriteria.ipynb) for more details. # + deletable=true editable=true filename = 'public_list_primary_testset_1k_1june_2017.csv' test_set_csv_url = '{}/simsignals_files/{}'.format(base_url, filename) os.system('curl {} > {}'.format(test_set_csv_url, mydatafolder + '/' + filename))
tutorials/Step_1_Get_Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="FZgP6jwmzFtZ" papermill={"duration": 0.024072, "end_time": "2022-02-01T21:25:10.441743", "exception": false, "start_time": "2022-02-01T21:25:10.417671", "status": "completed"} tags=[] import pandas as pd import matplotlib.pyplot as plt import time import os.path import warnings warnings.filterwarnings('ignore') # + id="TIiMsyouzFth" papermill={"duration": 18.188334, "end_time": "2022-02-01T21:25:28.639112", "exception": false, "start_time": "2022-02-01T21:25:10.450778", "status": "completed"} tags=[] # install DenMune clustering algorithm using pip command from the offecial Python repository, PyPi # from https://pypi.org/project/denmune/ # !pip install denmune # then import it from denmune import DenMune # + id="ezFJy4kJSCH-" outputId="5cdc60a7-ff89-4a50-a971-dbac7dfb4ce1" papermill={"duration": 7.543733, "end_time": "2022-02-01T21:25:36.201249", "exception": false, "start_time": "2022-02-01T21:25:28.657516", "status": "completed"} tags=[] # clone datasets from our repository datasets if not os.path.exists('datasets'): # !git clone https://github.com/egy1st/datasets # + id="CiMDjLQJzFtj" outputId="9ecf7d40-83a7-4b72-f25f-6d84badba29b" papermill={"duration": 1253.287405, "end_time": "2022-02-01T21:46:29.511451", "exception": false, "start_time": "2022-02-01T21:25:36.224046", "status": "completed"} tags=[] data_path = 'datasets/denmune/chameleon/' chameleon_dataset = "t7.10k" #["t4.8k", "t5.8k", "t7.10k", "t8.8k"] # train file detected_clusers = [] noise_type1 = [] noise_type2 = [] data_file = data_path + chameleon_dataset + '.csv' X_train = pd.read_csv(data_file, sep=',', header=None) from IPython.display import clear_output for knn in range (1, 100, 1): print ("knn", knn ) clear_output(wait=True) dm = DenMune(train_data=X_train, k_nearest=knn, rgn_tsne=False ) labels, validity = dm.fit_predict(show_analyzer=False) n_clusters = dm.analyzer['n_clusters']['detected'] pre_noise = dm.analyzer['n_points']['noise']['type-1'] post_noise = dm.analyzer['n_points']['noise']['type-2'] detected_clusers.append([knn, n_clusters ]) noise_type1.append([knn, pre_noise ]) noise_type2.append([knn, post_noise ]) print('knn:',knn , ' :: we detected', n_clusters, 'clusters:' , ' :: pre-noise:', pre_noise, 'post_noise', post_noise) time.sleep(0.2) # + id="h5E971-oTzag" outputId="1bc37681-b07c-482f-a081-e3cfbe5f96b9" papermill={"duration": 0.307764, "end_time": "2022-02-01T21:46:29.844219", "exception": false, "start_time": "2022-02-01T21:46:29.536455", "status": "completed"} tags=[] x, y = zip(*detected_clusers) f1 = plt.figure(1) # Creating figure and axis objects using subplots() fig, ax = plt.subplots(figsize=[20, 8]) ax.plot(x, y, marker='.', linewidth=2, label='evolution of detected clusters') plt.xticks(rotation=60) ax.set_xlabel('k-nearest neighbor') ax.set_ylabel('number of detected clusters') plt.legend() plt.show() # + id="Aj3EF_5RU-ku" outputId="c1b0c7c9-4412-416c-8148-91627884e5fb" papermill={"duration": 0.285614, "end_time": "2022-02-01T21:46:30.157377", "exception": false, "start_time": "2022-02-01T21:46:29.871763", "status": "completed"} tags=[] x, y = zip(*noise_type1) # Creating figure and axis objects using subplots() fig, ax = plt.subplots(figsize=[20, 8]) ax.plot(x, y, marker='.', linewidth=2, label='Noise detection') plt.xticks(rotation=60) ax.set_xlabel('k-nearest neighbor') ax.set_ylabel('pre-identified noise') plt.legend() plt.show() # + id="mRya0DSBax5n" outputId="43e99ffd-4cff-4bf7-d205-261d2673dbe3" papermill={"duration": 0.291319, "end_time": "2022-02-01T21:46:30.476934", "exception": false, "start_time": "2022-02-01T21:46:30.185615", "status": "completed"} tags=[] x, y = zip(*noise_type2) # Creating figure and axis objects using subplots() fig, ax = plt.subplots(figsize=[20, 8]) ax.plot(x, y, marker='.', linewidth=2, label='Noise detection') plt.xticks(rotation=60) ax.set_xlabel('k-nearest neighbor') ax.set_ylabel('post-identified noise') plt.legend() plt.show() # + id="mfgJkB-ocQI2" outputId="65212151-d3d3-4704-e22c-5418878f52e6" papermill={"duration": 0.271581, "end_time": "2022-02-01T21:46:30.779211", "exception": false, "start_time": "2022-02-01T21:46:30.507630", "status": "completed"} tags=[] # Creating figure and axis objects using subplots() fig, ax = plt.subplots(figsize=[20, 8]) x, y = zip(*detected_clusers) ax.plot(x, y, marker='.', linewidth=2, label='detected clusters') x, y = zip(*noise_type1) ax.plot(x, y, marker='.', linewidth=2, label='pre-identified noise') x, y = zip(*noise_type2) ax.plot(x, y, marker='.', linewidth=2, label='post-identified noise') plt.xticks(rotation=60) ax.set_xlabel('k-nearest neighbor') ax.set_ylabel('pre-identified noise') plt.legend() plt.show()
kaggle/k-nearest-evolution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Problem # You are given an array of integers. Each integer represents a jump of its value in the array. # # For instance, integer 2 represents two forward jumps in the array, while -3 represents 3 jumps backwards in the array. # # If the jump spills past the array bounds, it wraps over to the other side. # For example, a jump of -1 from the first index brings back to the last index of the array. # # Write a function that returns a boolean if the jumps in the array form a single cycle. A single cycle occurs if starting from any index, the jumps visit each element exactly once until before landing back on the starting index. # + # solution: O(n) time | O(1) space def has_single_cycle(array): elements_visited = 0 current_index = 0 while elements_visited < len(array): if elements_visited > 0 and current_index == 0: return False elements_visited += 1 current_index = get_next_index(current_index, array) return current_index == 0 def get_next_index(current_idx, array): jump = array[current_idx] next_index = (current_idx + jump) % len(array) if next_index >= 0: return next_index else: # change negative index to be an actual positive index on the array return next_index + len(array) # - has_single_cycle([2, -1, 1, -2]) # The time complexity is O(n) since we have a single while loop that iterates through the entire array of length n, # while the space complexity is O(1) constant time, since we are not storing any auxilliary array, just a couple of variables.
graphs/single_cycle_check.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import utility import matplotlib.pyplot as plt import math import logging import data_loader from utility import Position from pathlib import Path from evaluator import GoMapEvaluator from IPython.display import display from multiprocessing import Pool, cpu_count from pandas import DataFrame from GoMapClustering import AngleBalancingDBSCAN, AngleMetricDBSCAN, DBSCANFCM4DD, DBSCANx2, DBSCAN from typing import List from tqdm.auto import trange, tqdm from sklearn import metrics # - # # Load Data # + logging.basicConfig(stream=sys.stdout, level=logging.INFO) # data_frame = data_loader.load_aal_detections() # truths = data_loader.load_aal_truths() data_frame = data_loader.load_gomap_test_detections() truths = data_loader.load_gomap_test_truths() # - results = DataFrame() def test(data: DataFrame, cluster_algo): approach_name = type(cluster_algo).__name__ print(f'Testing approach: {approach_name}') result = DataFrame() df = utility.cluster(data, cluster_algo) # Find the max trip count for a traffic sign max_trip_count = df.groupby(['cid', 'classifier']).agg(trip_count=('trip_id', 'nunique'))['trip_count'].max() df = df.sort_values('trip_start_time') for i in range(1, max_trip_count + 1): subset = DataFrame() print(f'\rAllowing {i} trip(s)', end='') for (cid, classifier), data in df.groupby(['cid', 'classifier']): trips = data['trip_id'].unique()[:i] # Only use data from i newest trips data = data[data['trip_id'].isin(trips)] subset = subset.append(data) predictions = utility.get_predictions(subset, cluster_algo, utility.compute_cluster_centroid) evaluation = GoMapEvaluator(predictions, truths).evaluate() metrics = { 'name': approach_name, 'max_trip_count': i, 'min_trip_count': predictions['trip_count'].min(), 'f1': evaluation.f1, 'rmse_location': evaluation.rmse_location, 'mae_location': evaluation.mae_location, 'rmse_angle': evaluation.rmse_direction_degrees, 'mae_angle': evaluation.mae_direction_degrees } result = result.append(metrics, ignore_index=True) print() return result for approach in utility.get_approaches(): result = test(data_frame, approach) results = results.append(result) # + figsize = (8, 5) results.pivot(index='max_trip_count', columns='name', values='f1')\ .plot(xlabel='Max. Trip Count', xlim=(0), ylabel='F1', figsize=figsize) results.pivot(index='max_trip_count', columns='name', values='f1')\ .plot(xlabel='Max. Trip Count', xlim=(1, 10), xticks=range(11), ylabel='F1', figsize=figsize) results.pivot(index='max_trip_count', columns='name', values='rmse_location')\ .plot(xlabel='Max. Trip Count', xlim=(0), ylabel='RMSE Location', figsize=figsize) results.pivot(index='max_trip_count', columns='name', values='mae_location')\ .plot(xlabel='Max. Trip Count', xlim=(0), ylabel='MAE Location', figsize=figsize) results.pivot(index='max_trip_count', columns='name', values='rmse_angle')\ .plot(xlabel='Max. Trip Count', xlim=(0), ylabel='RMSE Angle', figsize=figsize) results.pivot(index='max_trip_count', columns='name', values='mae_angle')\ .plot(xlabel='Max. Trip Count', xlim=(0), ylabel='MAE Angle', figsize=figsize) # + output = Path() / 'output' output.mkdir(parents=True, exist_ok=True) results.pivot(index='max_trip_count', columns='name', values='f1').to_csv(output.joinpath('f1_vs_max_trip.csv')) results.pivot(index='max_trip_count', columns='name', values='mae_location').to_csv(output.joinpath('mae_location_vs_max_trip.csv')) results.pivot(index='max_trip_count', columns='name', values='mae_angle').to_csv(output.joinpath('mae_angle_vs_max_trip.csv'))
src/notebooks/trips.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] nbsphinx="hidden" # # Quantization of Signals # # *This jupyter notebook is part of a [collection of notebooks](../index.ipynb) on various topics of Digital Signal Processing. Please direct questions and suggestions to [<EMAIL>](mailto:<EMAIL>).* # - # ## Spectral Shaping of the Quantization Noise # # The quantized signal $x_Q[k]$ can be expressed by the continuous amplitude signal $x[k]$ and the quantization error $e[k]$ as # # \begin{equation} # x_Q[k] = \mathcal{Q} \{ x[k] \} = x[k] + e[k] # \end{equation} # # According to the [introduced model](linear_uniform_quantization_error.ipynb#Model-for-the-Quantization-Error), the quantization noise can be modeled as uniformly distributed white noise. Hence, the noise is distributed over the entire frequency range. The basic concept of [noise shaping](https://en.wikipedia.org/wiki/Noise_shaping) is a feedback of the quantization error to the input of the quantizer. This way the spectral characteristics of the quantization noise can be modified, i.e. spectrally shaped. Introducing a generic filter $h[k]$ into the feedback loop yields the following structure # # ![Feedback structure for noise shaping](noise_shaping.png) # # The quantized signal can be deduced from the block diagram above as # # \begin{equation} # x_Q[k] = \mathcal{Q} \{ x[k] - e[k] * h[k] \} = x[k] + e[k] - e[k] * h[k] # \end{equation} # # where the additive noise model from above has been introduced and it has been assumed that the impulse response $h[k]$ is normalized such that the magnitude of $e[k] * h[k]$ is below the quantization step $Q$. The overall quantization error is then # # \begin{equation} # e_H[k] = x_Q[k] - x[k] = e[k] * (\delta[k] - h[k]) # \end{equation} # # The power spectral density (PSD) of the quantization error with noise shaping is calculated to # # \begin{equation} # \Phi_{e_H e_H}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) = \Phi_{ee}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) \cdot \left| 1 - H(\mathrm{e}^{\,\mathrm{j}\,\Omega}) \right|^2 # \end{equation} # # Hence the PSD $\Phi_{ee}(\mathrm{e}^{\,\mathrm{j}\,\Omega})$ of the quantizer without noise shaping is weighted by $| 1 - H(\mathrm{e}^{\,\mathrm{j}\,\Omega}) |^2$. Noise shaping allows a spectral modification of the quantization error. The desired shaping depends on the application scenario. For some applications, high-frequency noise is less disturbing as low-frequency noise. # ### Example - First-Order Noise Shaping # # If the feedback of the error signal is delayed by one sample we get with $h[k] = \delta[k-1]$ # # \begin{equation} # \Phi_{e_H e_H}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) = \Phi_{ee}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) \cdot \left| 1 - \mathrm{e}^{\,-\mathrm{j}\,\Omega} \right|^2 # \end{equation} # # For linear uniform quantization $\Phi_{ee}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) = \sigma_e^2$ is constant. Hence, the spectral shaping constitutes a high-pass characteristic of first order. The following simulation evaluates the noise shaping quantizer of first order. # + # %matplotlib inline import numpy as np import matplotlib.pyplot as plt import scipy.signal as sig w = 8 # wordlength of the quantized signal xmin = -1 # minimum of input signal N = 32768 # number of samples def uniform_midtread_quantizer_w_ns(x, Q): # limiter x = np.copy(x) idx = np.where(x <= -1) x[idx] = -1 idx = np.where(x > 1 - Q) x[idx] = 1 - Q # linear uniform quantization with noise shaping xQ = Q * np.floor(x/Q + 1/2) e = xQ - x xQ = xQ - np.concatenate(([0], e[0:-1])) return xQ[1:] # quantization step Q = 1/(2**(w-1)) # compute input signal np.random.seed(5) x = np.random.uniform(size=N, low=xmin, high=(-xmin-Q)) # quantize signal xQ = uniform_midtread_quantizer_w_ns(x, Q) e = xQ - x[1:] # estimate PSD of error signal nf, Pee = sig.welch(e, nperseg=64) # estimate SNR SNR = 10*np.log10((np.var(x)/np.var(e))) print('SNR = {:2.1f} dB'.format(SNR)) plt.figure(figsize=(10,5)) Om = nf*2*np.pi plt.plot(Om, Pee*6/Q**2, label='estimated PSD') plt.plot(Om, np.abs(1 - np.exp(-1j*Om))**2, label='theoretic PSD') plt.plot(Om, np.ones(Om.shape), label='PSD w/o noise shaping') plt.title('PSD of quantization error') plt.xlabel(r'$\Omega$') plt.ylabel(r'$\hat{\Phi}_{e_H e_H}(e^{j \Omega}) / \sigma_e^2$') plt.axis([0, np.pi, 0, 4.5]); plt.legend(loc='upper left') plt.grid() # - # **Exercise** # # * The overall average SNR is lower than for the quantizer without noise shaping. Why? # # Solution: The average power per frequency is lower that without noise shaping for frequencies below $\Omega \approx \pi$. However, this comes at the cost of a larger average power per frequency for frequencies above $\Omega \approx \pi$. The average power of the quantization noise is given as the integral over the PSD of the quantization noise. It is larger for noise shaping and the resulting SNR is consequently lower. Noise shaping is nevertheless beneficial in applications where a lower quantization error in a limited frequency region is desired. # + [markdown] nbsphinx="hidden" # **Copyright** # # This notebook is provided as [Open Educational Resource](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebook for your own purposes. The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *<NAME>, Digital Signal Processing - Lecture notes featuring computational examples, 2016-2018*.
quantization/noise_shaping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] button=false new_sheet=false run_control={"read_only": false} # # How Debuggers Work # # Interactive _debuggers_ are tools that allow you to selectively observe the program state during an execution. In this chapter, you will learn how such debuggers work – by building your own debugger. # - from bookutils import YouTubeVideo YouTubeVideo("4aZ0t7CWSjA") # + [markdown] button=false new_sheet=false run_control={"read_only": false} # **Prerequisites** # # * You should have read the [Chapter on Tracing Executions](Tracer.ipynb). # * Again, knowing a bit of _Python_ is helpful for understanding the code examples in the book. # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"} import bookutils # - import sys from Tracer import Tracer # + [markdown] slideshow={"slide_type": "skip"} # ## Synopsis # <!-- Automatically generated. Do not edit. --> # # To [use the code provided in this chapter](Importing.ipynb), write # # ```python # >>> from debuggingbook.Debugger import <identifier> # ``` # # and then make use of the following features. # # # This chapter provides an interactive debugger for Python functions. The debugger is invoked as # # ```python # with Debugger(): # function_to_be_observed() # ... # ``` # While running, you can enter _debugger commands_ at the `(debugger)` prompt. Here's an example session: # # ```python # >>> with Debugger(): # >>> ret = remove_html_markup('abc') # ``` # The `Debugger` class can be easily extended in subclasses. A new method `NAME_command(self, arg)` will be invoked whenever a command named `NAME` is entered, with `arg` holding given command arguments (empty string if none). # # # + [markdown] button=false new_sheet=true run_control={"read_only": false} # ## Debuggers # # _Interactive Debuggers_ (or short *debuggers*) are tools that allow you to observe program executions. A debugger typically offers the following features: # # * _Run_ the program # * Define _conditions_ under which the execution should _stop_ and hand over control to the debugger. Conditions include # * a particular location is reached # * a particular variable takes a particular value # * a particular variables is accessed # * or some other condition of choice. # * When the program stops, you can _observe_ the current state, including # * the current location # * variables and their values # * the current function and its callers # * When the program stops, you can _step_ through program execution, having it stop at the next instruction again. # * Finally, you can also _resume_ execution to the next stop. # + [markdown] button=false new_sheet=true run_control={"read_only": false} # This functionality often comes as a _command-line interface_, typing commands at a prompt; or as a _graphical user interface_, selecting commands from the screen. Debuggers can come as standalone tools, or be integrated into a programming environment of choice. # + [markdown] button=false new_sheet=true run_control={"read_only": false} # Debugger interaction typically follows a _loop_ pattern. First, you identify the location(s) you want to inspect, and tell the debugger to stop execution once one of these _breakpoints_ is reached. Here's a command that could instruct a command-line debugger to stop at Line 239: # # ``` # (debugger) break 239 # (debugger) _ # ``` # # Then you have the debugger resume or start execution. The debugger will stop at the given location. # # ``` # (debugger) continue # Line 239: s = x # (debugger) _ # ``` # # When it stops at the given location, you use debugger commands to inspect the state (and check whether things are as expected). # # ``` # (debugger) print s # s = 'abc' # (debugger) _ # ``` # # You can then step through the program, executing more lines. # # ``` # (debugger) step # Line 240: c = s[0] # (debugger) print c # c = 'a' # (debugger) _ # ``` # # You can also define new stop conditions, investigating other locations, variables, and conditions. # - # ## Debugger Interaction # # Let us now show how to build such a debugger. The key idea of an _interactive_ debugger is to set up the _tracing function_ such that it actually _asks_ what to do next, prompting you to enter a _command_. For the sake of simplicity, we collect such a command interactively from a command line, using the Python `input()` function. # Our debugger holds a number of variables to indicate its current status: # * `stepping` is True whenever the user wants to step into the next line. # * `breakpoints` is a set of breakpoints (line numbers) # * `interact` is True while the user stays at one position. # # We also store the current tracing information in three attributes `frame`, `event`, and `arg`. The variable `local_vars` holds local variables. from types import FrameType # ignore from typing import Any, Optional, Callable, Dict, List, Tuple, Set, TextIO class Debugger(Tracer): """Interactive Debugger""" def __init__(self, *, file: TextIO = sys.stdout) -> None: """Create a new interactive debugger.""" self.stepping: bool = True self.breakpoints: Set[int] = set() self.interact: bool = True self.frame: FrameType self.event: Optional[str] = None self.arg: Any = None self.local_vars: Dict[str, Any] = {} super().__init__(file=file) # The `traceit()` method is the main entry point for our debugger. If we should stop, we go into user interaction. class Debugger(Debugger): def traceit(self, frame: FrameType, event: str, arg: Any) -> None: """Tracing function; called at every line. To be overloaded in subclasses.""" self.frame = frame self.local_vars = frame.f_locals # Dereference exactly once self.event = event self.arg = arg if self.stop_here(): self.interaction_loop() # We stop whenever we are stepping through the program or reach a breakpoint: class Debugger(Debugger): def stop_here(self) -> bool: """Return True if we should stop""" return self.stepping or self.frame.f_lineno in self.breakpoints # Our interaction loop shows the current status, reads in commands, and executes them. class Debugger(Debugger): def interaction_loop(self) -> None: """Interact with the user""" self.print_debugger_status(self.frame, self.event, self.arg) # type: ignore self.interact = True while self.interact: command = input("(debugger) ") self.execute(command) # type: ignore # For a moment, let us implement two commands, `step` and `continue`. `step` steps through the program: class Debugger(Debugger): def step_command(self, arg: str = "") -> None: """Execute up to the next line""" self.stepping = True self.interact = False class Debugger(Debugger): def continue_command(self, arg: str = "") -> None: """Resume execution""" self.stepping = False self.interact = False # The `execute()` method dispatches between these two. class Debugger(Debugger): def execute(self, command: str) -> None: if command.startswith('s'): self.step_command() elif command.startswith('c'): self.continue_command() # Our debugger is now ready to run! Let us invoke it on the buggy `remove_html_markup()` variant from the [Introduction to Debugging](Intro_Debugging.ipynb): def remove_html_markup(s): # type: ignore tag = False quote = False out = "" for c in s: if c == '<' and not quote: tag = True elif c == '>' and not quote: tag = False elif c == '"' or c == "'" and tag: quote = not quote elif not tag: out = out + c return out # We invoke the debugger just like `Tracer`, using a `with` clause. The code # # ```python # with Debugger(): # remove_html_markup('abc') # ``` # gives us a debugger prompt # ``` # (debugger) _ # ``` # where we can enter one of our two commands. # Let us do two steps through the program and then resume execution: from bookutils import input, next_inputs # ignore next_inputs(["step", "step", "continue"]) with Debugger(): remove_html_markup('abc') # ignore assert not next_inputs() # Try this out for yourself by running the above invocation in the interactive notebook! If you are reading the Web version, the top menu entry `Resources` -> `Edit as Notebook` will do the trick. Navigate to the above invocation and press `Shift`+`Enter`. # ### A Command Dispatcher # Our `execute()` function is still a bit rudimentary. A true command-line tool should provide means to tell which commands are available (`help`), automatically split arguments, and not stand in line of extensibility. # We therefore implement a better `execute()` method which does all that. Our revised `execute()` method _inspects_ its class for methods that end in `_command()`, and automatically registers their names as commands. Hence, with the above, we already get `step` and `continue` as possible commands. # ### Excursion: Implementing execute() # Let us detail how we implement `execute()`. The `commands()` method returns a list of all commands (as strings) from the class. class Debugger(Debugger): def commands(self) -> List[str]: """Return a list of commands""" cmds = [method.replace('_command', '') for method in dir(self.__class__) if method.endswith('_command')] cmds.sort() return cmds d = Debugger() d.commands() # The `command_method()` method converts a given command (or its abbrevation) into a method to be called. class Debugger(Debugger): def help_command(self, command: str) -> None: ... def command_method(self, command: str) -> Optional[Callable[[str], None]]: """Convert `command` into the method to be called. If the method is not found, return `None` instead.""" if command.startswith('#'): return None # Comment possible_cmds = [possible_cmd for possible_cmd in self.commands() if possible_cmd.startswith(command)] if len(possible_cmds) != 1: self.help_command(command) return None cmd = possible_cmds[0] return getattr(self, cmd + '_command') d = Debugger() d.command_method("step") d = Debugger() d.command_method("s") # The revised `execute()` method now determines this method and executes it with the given argument. class Debugger(Debugger): def execute(self, command: str) -> None: """Execute `command`""" sep = command.find(' ') if sep > 0: cmd = command[:sep].strip() arg = command[sep + 1:].strip() else: cmd = command.strip() arg = "" method = self.command_method(cmd) if method: method(arg) # If `command_method()` cannot find the command, or finds more than one matching the prefix, it invokes the `help` command providing additional assistance. `help` draws extra info on each command from its documentation string. class Debugger(Debugger): def help_command(self, command: str = "") -> None: """Give help on given `command`. If no command is given, give help on all""" if command: possible_cmds = [possible_cmd for possible_cmd in self.commands() if possible_cmd.startswith(command)] if len(possible_cmds) == 0: self.log(f"Unknown command {repr(command)}. Possible commands are:") possible_cmds = self.commands() elif len(possible_cmds) > 1: self.log(f"Ambiguous command {repr(command)}. Possible expansions are:") else: possible_cmds = self.commands() for cmd in possible_cmds: method = self.command_method(cmd) self.log(f"{cmd:10} -- {method.__doc__}") d = Debugger() d.execute("help") d = Debugger() d.execute("foo") # ### End of Excursion # ## Printing Values # # With `execute()`, we can now easily extend our class – all it takes is for a new command `NAME` is a new `NAME_command()` method. Let us start by providing a `print` command to print all variables. We use similar code as for the `Tracer` class in the [chapter on tracing](Tracer.ipynb). class Debugger(Debugger): def print_command(self, arg: str = "") -> None: """Print an expression. If no expression is given, print all variables""" vars = self.local_vars self.log("\n".join([f"{var} = {repr(value)}" for var, value in vars.items()])) # ignore next_inputs(["step", "step", "step", "print", "continue"]); with Debugger(): remove_html_markup('abc') # ignore assert not next_inputs() # Let us extend `print` such that if an argument is given, it only evaluates and prints out this argument. class Debugger(Debugger): def print_command(self, arg: str = "") -> None: """Print an expression. If no expression is given, print all variables""" vars = self.local_vars if not arg: self.log("\n".join([f"{var} = {repr(value)}" for var, value in vars.items()])) else: try: self.log(f"{arg} = {repr(eval(arg, globals(), vars))}") except Exception as err: self.log(f"{err.__class__.__name__}: {err}") # ignore next_inputs(["p s", "c"]); with Debugger(): remove_html_markup('abc') # ignore assert not next_inputs() # Note how we would abbreviate commands to speed things up. The argument to `print` can be any Python expression: # ignore next_inputs(["print (s[0], 2 + 2)", "continue"]); with Debugger(): remove_html_markup('abc') # Our `help` command also properly lists `print` as a possible command: # ignore next_inputs(["help print", "continue"]); with Debugger(): remove_html_markup('abc') # ignore assert not next_inputs() # ## Listing Source Code # # We implement a `list` command that shows the source code of the current function. import inspect from bookutils import getsourcelines # like inspect.getsourcelines(), but in color class Debugger(Debugger): def list_command(self, arg: str = "") -> None: """Show current function.""" source_lines, line_number = getsourcelines(self.frame.f_code) for line in source_lines: self.log(f'{line_number:4} {line}', end='') line_number += 1 # ignore next_inputs(["list", "continue"]); with Debugger(): remove_html_markup('abc') # ignore assert not next_inputs() # ## Setting Breakpoints # # Stepping through the program line by line is a bit cumbersome. We therefore implement _breakpoints_ – a set of lines that cause the program to be interrupted as soon as this line is met. class Debugger(Debugger): def break_command(self, arg: str = "") -> None: """Set a breakoint in given line. If no line is given, list all breakpoints""" if arg: self.breakpoints.add(int(arg)) self.log("Breakpoints:", self.breakpoints) # Here's an example, setting a breakpoint at the end of the loop: # ignore _, remove_html_markup_starting_line_number = \ inspect.getsourcelines(remove_html_markup) next_inputs([f"break {remove_html_markup_starting_line_number + 13}", "continue", "print", "continue", "continue", "continue"]); with Debugger(): remove_html_markup('abc') # ignore assert not next_inputs() from bookutils import quiz quiz("What happens if we enter the command `break 2 + 3`?", [ "A breakpoint is set in Line 2.", "A breakpoint is set in Line 5.", "Two breakpoints are set in Lines 2 and 3.", "The debugger raises a `ValueError` exception." ], '12345 % 7') # Try it out yourself by executing the above code block! # ## Deleting Breakpoints # # To delete breakpoints, we introduce a `delete` command: class Debugger(Debugger): def delete_command(self, arg: str = "") -> None: """Delete breakoint in line given by `arg`. Without given line, clear all breakpoints""" if arg: try: self.breakpoints.remove(int(arg)) except KeyError: self.log(f"No such breakpoint: {arg}") else: self.breakpoints = set() self.log("Breakpoints:", self.breakpoints) # ignore next_inputs([f"break {remove_html_markup_starting_line_number + 15}", "continue", "print", f"delete {remove_html_markup_starting_line_number + 15}", "continue"]); with Debugger(): remove_html_markup('abc') # ignore assert not next_inputs() quiz("What does the command `delete` (without argument) do?", [ "It deletes all breakpoints", "It deletes the source code", "It lists all breakpoints", "It stops execution" ], '[n for n in range(2 // 2, 2 * 2) if n % 2 / 2]' ) # ## Listings with Benefits # # Let us extend `list` a bit such that # # 1. it can also list a given function, and # 2. it shows the current line (`>`) as well as breakpoints (`#`) class Debugger(Debugger): def list_command(self, arg: str = "") -> None: """Show current function. If `arg` is given, show its source code.""" try: if arg: obj = eval(arg) source_lines, line_number = inspect.getsourcelines(obj) current_line = -1 else: source_lines, line_number = \ getsourcelines(self.frame.f_code) current_line = self.frame.f_lineno except Exception as err: self.log(f"{err.__class__.__name__}: {err}") source_lines = [] line_number = 0 for line in source_lines: spacer = ' ' if line_number == current_line: spacer = '>' elif line_number in self.breakpoints: spacer = '#' self.log(f'{line_number:4}{spacer} {line}', end='') line_number += 1 # ignore _, remove_html_markup_starting_line_number = \ inspect.getsourcelines(remove_html_markup) next_inputs([f"break {remove_html_markup_starting_line_number + 13}", "list", "continue", "delete", "list", "continue"]); with Debugger(): remove_html_markup('abc') # ignore assert not next_inputs() # ### Quitting # # In the Python debugger interface, we can only observe, but not alter the control flow. To make sure we can always exit out of our debugging session, we introduce a `quit` command that deletes all breakpoints and resumes execution until the observed function finishes. class Debugger(Debugger): def quit_command(self, arg: str = "") -> None: """Finish execution""" self.breakpoints = set() self.stepping = False self.interact = False # With this, our command palette is pretty complete, and we can use our debugger to happily inspect Python executions. # ignore next_inputs(["help", "quit"]); with Debugger(): remove_html_markup('abc') # ignore assert not next_inputs() # ## Synopsis # This chapter provides an interactive debugger for Python functions. The debugger is invoked as # # ```python # with Debugger(): # function_to_be_observed() # ... # ``` # While running, you can enter _debugger commands_ at the `(debugger)` prompt. Here's an example session: # ignore _, remove_html_markup_starting_line_number = \ inspect.getsourcelines(remove_html_markup) next_inputs(["help", f"break {remove_html_markup_starting_line_number + 13}", "list", "continue", "step", "print out", "quit"]); with Debugger(): ret = remove_html_markup('abc') # ignore assert not next_inputs() # The `Debugger` class can be easily extended in subclasses. A new method `NAME_command(self, arg)` will be invoked whenever a command named `NAME` is entered, with `arg` holding given command arguments (empty string if none). # ignore from ClassDiagram import display_class_hierarchy # ignore display_class_hierarchy(Debugger, public_methods=[ Tracer.__init__, Tracer.__enter__, Tracer.__exit__, Tracer.traceit, Debugger.__init__, ], project='debuggingbook') # + [markdown] button=false new_sheet=true run_control={"read_only": false} # ## Lessons Learned # # * _Debugging hooks_ from interpreted languages allow for simple interactive debugging. # * A command-line debugging framework can be very easily extended with additional functionality. # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ## Next Steps # # In the next chapter, we will see how [assertions](Assertions.ipynb) check correctness at runtime. # - # ## Background # # The command-line interface in this chapter is modeled after [GDB, the GNU debugger](https://www.gnu.org/software/gdb/), whose interface in turn goes back to earlier command-line debuggers such as [dbx](https://en.wikipedia.org/wiki/Dbx_%28debugger%29). All modern debuggers build on the functionality and concepts realized in these debuggers, be it breakpoints, stepping through programs, or inspecting program state. # # The concept of time travel debugging (see the Exercises, below) has been invented (and reinvented) many times. One of the most impactful tools comes from King et al. \cite{King2005}, integrating _a time-traveling virtual machine_ (TTVM) for debugging operating systems, integrated into GDB. The recent [record+replay "rr" debugger](https://rr-project.org) also implements time travel debugging on top of the GDB command line debugger; it is applicable for general-purpose programs and available as open source. # + [markdown] button=false new_sheet=true run_control={"read_only": false} # ## Exercises # # + [markdown] button=false new_sheet=false run_control={"read_only": false} solution2="hidden" solution2_first=true # ### Exercise 1: Changing State # # Some Python implementations allow to alter the state, by assigning values to `frame.f_locals`. Implement a `assign VAR=VALUE` command that allows to change the value of (local) variable `VAR` to the new value `VALUE`. # # Note: As detailed in [this blog post](https://utcc.utoronto.ca/~cks/space/blog/python/FLocalsAndTraceFunctions), # `frame.f_locals` is re-populated with every access, so assign to our local alias `self.local_vars` instead. # + [markdown] slideshow={"slide_type": "skip"} solution2="hidden" # **Solution.** Here is an `assign` command that gets things right on CPython. # + slideshow={"slide_type": "skip"} solution2="hidden" class Debugger(Debugger): def assign_command(self, arg: str) -> None: """Use as 'assign VAR=VALUE'. Assign VALUE to local variable VAR.""" sep = arg.find('=') if sep > 0: var = arg[:sep].strip() expr = arg[sep + 1:].strip() else: self.help_command("assign") return vars = self.local_vars try: vars[var] = eval(expr, self.frame.f_globals, vars) except Exception as err: self.log(f"{err.__class__.__name__}: {err}") # + slideshow={"slide_type": "skip"} solution2="hidden" # ignore next_inputs(["assign s = 'xyz'", "print", "step", "print", "step", "assign tag = True", "assign s = 'abc'", "print", "step", "print", "continue"]); # + slideshow={"slide_type": "skip"} solution2="hidden" with Debugger(): remove_html_markup('abc') # + slideshow={"slide_type": "skip"} solution2="hidden" # ignore assert not next_inputs() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ### Exercise 2: More Commands # # Extending the `Debugger` class with extra features and commands is a breeze. The following commands are inspired from [the GNU command-line debugger (GDB)](https://www.gnu.org/software/gdb/): # + [markdown] button=false new_sheet=false run_control={"read_only": false} # #### Named breakpoints ("break") # # With `break FUNCTION` and `delete FUNCTION`, set and delete a breakpoint at `FUNCTION`. # + [markdown] button=false new_sheet=false run_control={"read_only": false} # #### Step over functions ("next") # # When stopped at a function call, the `next` command should execute the entire call, stopping when the function returns. (In contrast, `step` stops at the first line of the function called.) # + [markdown] button=false new_sheet=false run_control={"read_only": false} # #### Print call stack ("where") # # Implement a `where` command that shows the stack of calling functions. # + [markdown] button=false new_sheet=false run_control={"read_only": false} # #### Move up and down the call stack ("up" and "down") # # After entering the `up` command, explore the source and variables of the _calling_ function rather than the current function. Use `up` repeatedly to move further up the stack. `down` returns to the caller. # + [markdown] button=false new_sheet=false run_control={"read_only": false} # #### Execute until line ("until") # # With `until LINE`, resume execution until a line greater than `LINE` is reached. If `LINE` is not given, resume execution until a line greater than the current is reached. This is useful to avoid stepping through multiple loop iterations. # + [markdown] button=false new_sheet=false run_control={"read_only": false} # #### Execute until return ("finish") # # With `finish`, resume execution until the current function returns. # + [markdown] button=false new_sheet=false run_control={"read_only": false} # #### Watchpoints ("watch") # # With `watch CONDITION`, stop execution as soon as `CONDITION` changes its value. (Use the code from our `EventTracer` class in the [chapter on Tracing](Tracer.ipynb).) `delete CONDITION` removes the watchpoint. Keep in mind that some variable names may not exist at all times. # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ### Exercise 3: Time-Travel Debugging # # Rather than inspecting a function at the moment it executes, you can also _record_ the entire state (call stack, local variables, etc.) during execution, and then run an interactive session to step through the recorded execution. Your time travel debugger would be invoked as # # ```python # with TimeTravelDebugger(): # function_to_be_tracked() # ... # ``` # # The interaction then starts at the end of the `with` block. # + [markdown] button=false new_sheet=false run_control={"read_only": false} # #### Part 1: Recording Values # # Start with a subclass of `Tracer` from the [chapter on tracing](Tracer.ipynb) (say, `TimeTravelTracer`) to execute a program while recording all values. Keep in mind that recording even only local variables at each step quickly consumes large amounts of memory. As an alternative, consider recording only _changes_ to variables, with the option to restore an entire state from a baseline and later changes. # + [markdown] button=false new_sheet=false run_control={"read_only": false} # #### Part 2: Command Line Interface # # Create `TimeTravelDebugger` as subclass of both `TimeTravelTracer` and `Debugger` to provide a command line interface as with `Debugger`, including additional commands which get you back to earlier states: # # * `back` is like `step`, except that you go one line back # * `restart` gets you to the beginning of the execution # * `rewind` gets you to the beginning of the current function invocation # + [markdown] button=false new_sheet=false run_control={"read_only": false} # #### Part 3: Graphical User Interface # # Create `GUItimeTravelDebugger` to provide a _graphical user interface_ that allows you to explore a recorded execution, using HTML and JavaScript. # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Here's a simple example to get you started. Assume you have recorded the following line numbers and variable values: # - recording: List[Tuple[int, Dict[str, Any]]] = [ (10, {'x': 25}), (11, {'x': 25}), (12, {'x': 26, 'a': "abc"}), (13, {'x': 26, 'a': "abc"}), (10, {'x': 30}), (11, {'x': 30}), (12, {'x': 31, 'a': "def"}), (13, {'x': 31, 'a': "def"}), (10, {'x': 35}), (11, {'x': 35}), (12, {'x': 36, 'a': "ghi"}), (13, {'x': 36, 'a': "ghi"}), ] # Then, the following function will provide a _slider_ that will allow you to explore these values: from bookutils import HTML def slider(rec: List[Tuple[int, Dict[str, Any]]]) -> str: lines_over_time = [line for (line, var) in rec] vars_over_time = [] for (line, vars) in rec: vars_over_time.append(", ".join(f"{var} = {repr(value)}" for var, value in vars.items())) # print(lines_over_time) # print(vars_over_time) template = f''' <div class="time_travel_debugger"> <input type="range" min="0" max="{len(lines_over_time) - 1}" value="0" class="slider" id="time_slider"> Line <span id="line">{lines_over_time[0]}</span>: <span id="vars">{vars_over_time[0]}</span> </div> <script> var lines_over_time = {lines_over_time}; var vars_over_time = {vars_over_time}; var time_slider = document.getElementById("time_slider"); var line = document.getElementById("line"); var vars = document.getElementById("vars"); time_slider.oninput = function() {{ line.innerHTML = lines_over_time[this.value]; vars.innerHTML = vars_over_time[this.value]; }} </script> ''' # print(template) return HTML(template) slider(recording) # Explore the HTML and JavaScript details of how `slider()` works, and then expand it to a user interface where you can # # * see the current source code (together with the line being executed) # * search for specific events, such as a line being executed or a variable changing its value # # Just like `slider()`, your user interface should come in pure HTML and JavaScript such that it can run in a browser (or a Jupyter notebook) without interacting with a Python program.
notebooks/Debugger.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 2.1: Creating Interactive Plots import numpy as np import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation from matplotlib.widgets import Slider # + # %matplotlib notebook TWOPI = 2*np.pi fig, ax = plt.subplots() t = np.arange(0.0, TWOPI, 0.001) initial_amp = .5 s = initial_amp*np.sin(t) l, = plt.plot(t, s, lw=2) ax = plt.axis([0,TWOPI,-1,1]) axamp = plt.axes([0.25, .03, 0.50, 0.02]) # Slider samp = Slider(axamp, 'Amp', 0, 1, valinit=initial_amp) def update(val): # amp is the current value of the slider amp = samp.val # update curve l.set_ydata(amp*np.sin(t)) # redraw canvas while idle fig.canvas.draw_idle() # call update function on slider value change samp.on_changed(update) plt.show() # - # ### 2.2 Event Handling with Plot Callbacks def data_gen(): while True: yield np.random.rand(10) # + # %matplotlib notebook fig, ax = plt.subplots() line, = ax.plot(np.random.rand(10)) ax.set_ylim(0, 1) def update(data): line.set_ydata(data) return line, ani = FuncAnimation(fig, update, data_gen, interval=100) plt.show() # - # ### 2.3: GUI Neutral Widgets # + from matplotlib.widgets import Button # %matplotlib notebook freqs = np.arange(2, 20, 3) fig, ax = plt.subplots() plt.subplots_adjust(bottom=0.2) t = np.arange(0.0, 1.0, 0.001) s = np.sin(2*np.pi*freqs[0]*t) l, = plt.plot(t, s, lw=2) class Index(object): ind = 0 def next(self, event): self.ind += 1 i = self.ind % len(freqs) ydata = np.sin(2*np.pi*freqs[i]*t) l.set_ydata(ydata) plt.draw() def prev(self, event): self.ind -= 1 i = self.ind % len(freqs) ydata = np.sin(2*np.pi*freqs[i]*t) l.set_ydata(ydata) plt.draw() callback = Index() axprev = plt.axes([0.7, 0.05, 0.1, 0.075]) axnext = plt.axes([0.81, 0.05, 0.1, 0.075]) bnext = Button(axnext, 'Next') bnext.on_clicked(callback.next) bprev = Button(axprev, 'Previous') bprev.on_clicked(callback.prev) plt.show() # - # ### 2.3: GUI Neutral Widgets # + # %matplotlib notebook fig, ax = plt.subplots(figsize=(5, 3)) ax.set(xlim=(-3, 3), ylim=(-1, 1)) x = np.linspace(-3, 3, 91) t = np.linspace(1, 25, 30) X2, T2 = np.meshgrid(x, t) sinT2 = np.sin(2*np.pi*T2/T2.max()) F = 0.9*sinT2*np.sinc(X2*(1 + sinT2)) line = ax.plot(x, F[0, :], color='k', lw=2)[0] def animate(i): line.set_ydata(F[i, :]) anim = FuncAnimation( fig, animate, interval=100, frames=len(t)-1) plt.draw() plt.show() # + import matplotlib.animation as animation # %matplotlib notebook n = 100 number_of_frames = 10 data = np.random.rand(n, number_of_frames) def update_hist(num, data): plt.cla() plt.hist(data[num]) fig = plt.figure() hist = plt.hist(data[0]) animation = animation.FuncAnimation(fig, update_hist, number_of_frames, fargs=(data, ) ) plt.show() # + # %matplotlib notebook def data_gen(): t = data_gen.t cnt = 0 while cnt < 1000: cnt+=1 t += 0.05 y1 = np.sin(2*np.pi*t) * np.exp(-t/10.) y2 = np.cos(2*np.pi*t) * np.exp(-t/10.) yield t, y1, y2 data_gen.t = 0 # two subplots fig, (ax1, ax2) = plt.subplots(2,1) # intialize two line objects (one in each axes) line1, = ax1.plot([], [], lw=2) line2, = ax2.plot([], [], lw=2, color='r') line = [line1, line2] # the same axes initalizations as before for ax in [ax1, ax2]: ax.set_ylim(-1.1, 1.1) ax.set_xlim(0, 5) ax.grid() # initialize the data xdata, y1data, y2data = [], [], [] def run(data): # update the data t, y1, y2 = data xdata.append(t) y1data.append(y1) y2data.append(y2) # axis limits checking for ax in [ax1, ax2]: xmin, xmax = ax.get_xlim() if t >= xmax: ax.set_xlim(xmin, 2*xmax) ax.figure.canvas.draw() # update the data of both line objects line[0].set_data(xdata, y1data) line[1].set_data(xdata, y2data) return line ani = FuncAnimation(fig, run, data_gen, blit=True, interval=10, repeat=False) plt.show() # - # ### 2.4: Converting interactive plots into videos # + import matplotlib.animation as manimation FFMpegWriter = manimation.writers['ffmpeg'] metadata = dict(title='Movie Test', artist='Matplotlib', comment='Movie support!') writer = FFMpegWriter(fps=15, metadata=metadata) fig = plt.figure() l, = plt.plot([], [], 'k-o') plt.xlim(-5, 5) plt.ylim(-5, 5) x0, y0 = 0, 0 with writer.saving(fig, "writer_test.mp4", 100): for i in range(100): x0 += 0.1 * np.random.randn() y0 += 0.1 * np.random.randn() l.set_data(x0, y0) writer.grab_frame() # -
Section 2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # User Case # PROFAB is a benchmarking platform that is expected to fill the gap of datasets about protein functions with total 7656 datasets. In addition to protein function datasets, ProFAB provides complete sets of preprocessing-training-evaluation triangle to speed up machine learning usage in biological studies. Since the workflow is dense, an easy to implement user case is prepared. Here, user dataset importing is shown. # ## 1. Data Importing # ProFAB provides users to import their datasets that are not available in ProFAB. To import data, SelfGet() function will be savior: from profab.import_dataset import SelfGet data = SelfGet(delimiter = '\t', name = False, label = False).get_data(file_name = "sample.txt") # Explanation of parameters is available in "import_dataset" section. With these functions, users can manage dataset # construction. If s/he has positive set of any term available in ProFAB, only negative set can be obtained by setting # parameter "label = 'negative'". For example, let's say user has positive set for EC number 1-2-7 and wants to get # negative set to use in prediction, following lines can be executed: from profab.import_dataset import SelfGet, ECNO negative_set = ECNO(label = 'negative').get_data('ecNo_1-2-7') positive_set = SelfGet().get_data('users_1-2-7_positive_set.txt') # After loading datasets, preprocessing step comes in. # ## 2. PreProcessing # Preprocessing is applicable in three sections which are featurization, splitting and scaling. # ### a. Featurization # Featurization is used to convert protein fasta file into numearical feature data with many protein descriptors. Detailed # explanation can be found in "model_preprocess". This function is only applicable with LINUX and MAC operation systems and input file format must be '.fasta'. Following lines can be run: from profab.model_preprocess import extract_protein_feature extract_protein_feature('edp', 1, 'directory_folder_input_file', 'sample') # After running this function, a new file that holds numerical features of proteins will be formed and it can be imported via SelfGet() function as shown in previous section. # ### b. Splitting # Another preprocessing module is splitting module that is to prepare train, validation (if needed) and test sets # for prediction. Detailed information is available in "model_preprocess" and reading it is highly recommended to see how function is working. If one has X (feature matrix) and y # (label matrix), by defining fraction of test set, splitting can be done: from profab.model_preprocess import ttv_split X_train,X_test,y_train,y_test = ttv_split(X,y,ratio) # Rather than giving all data, user can choose to feed 'ttv_split' function with positive and negative sets and s/he can be obtain splitted data, eventually. from profab.model_preprocess import ttv_split X_train,X_test,y_train,y_test = ttv_split(X_pos,X_neg,ratio) # If data is regression tasked, then y (label matrix) must be given. # ### c. Scaling # Scaling is a function to rearange the range of inputs points. The reason to do it prevent imbalance problem. If data # is stable then this function is unnecessary to apply. like other preprocessing steps, its detailed introduction can # found in 'model_preprocess'. A use case: from profab.model_preprocess import scale_methods X_train,scaler = scale_methods(X_train,scale_type = 'standard') X_test = scaler.transform(X_test) # Scaling function returns fitted train (X_train) data and fitting model (scaler) to transform other sets as can be seen in use case. The rest is exactly the same as 'test_file_1'. # ## 3. Training # PROFAB can train any type of data. It provides both classification and regression training. Since our datasets are based on classication of proteins, as an example, classification method will be shown. # # After training session, outcome of training can be stored in 'model_path' ```if path is not None```. Because this process lasts to long, saving the outcome will be time-saver. Stored model must be exported and be imported with 'pickle' a python based package. # + from profab.model_learn import classification_methods #Let's define model path where training model will be saved. model_path = 'model_path.txt' model = classification_methods(ml_type = 'logistic_reg', X_train = X_train, y_train = y_train, path = model_path ) # - # ## 3. Evaluation # After training session is done, evaluation can be done with following lines of code. The output of evaluation is given below of code. # ### a. Get Scores # + from profab.model_evaluate import evaluate_score score_train,f_train = evaluate_score(model,X_train,y_train,preds = True) score_test,f_test = evaluate_score(model,X_test,y_test,preds = True) score_validation,f_validation = evaluate_score(model,X_validation,y_validation,preds = True) # - # The score of train and test are given for data: 'ecNo_1-2-7 'target'. # ### b. Table Formating # To get the data in table format, a dictionary that consists of scores of different sets must be given. Following lines of code can be executed to tabularize the results: # + #If user wants to see all results in a table, following codes can be run: from profab.model_evaluate import form_table score_path = 'score_path.csv' #To save the results. scores = {'train':score_train,'test':score_test,'validation':score_validation} form_table(scores = scores, path = score_path) # - # 'form_table' function will write scores for one dataset. # ## 5. Working with Multiple Set # If user wants to predict mutliple data and see performance results, ProFAB can be handle with utilization of 'for-loop'. Let's say, user has negative sets for 3 GO terms, a use case will be like these lines: # + from profab.import_dataset import GOID, SelfGet from profab.model_preprocess import ttv_split from profab.model_learn import classification_methods from profab.model_evaluate import evaluate_score, multiple_form_table GO_list = ['GO_0000018','GO_0019935','GO_0021515'] scores = {} for go_term in GO_List: #GO_List: variable includes GO terms #Importing data negative_set = SelfGet().get_data(go_term + '_negative_data.txt') positive_set = GOID(label = 'positive').get_data(go_term) #splitting X_train,X_test,X_validation,y_train,y_test,y_validation = (X_pos = positive_set, X_neg = negative_set, ratio = [0.1,0.2]) #prediction model = classification_methods(ml_type = 'SVM', X_train = X_train, X_valid = X_validation, y_train = y_train, y_valid = y_validation) #evaluation score_train = evaluate_score(model,X_train,y_train) score_test = evaluate_score(model,X_test,y_test) set_scores = {'train':score_train,'test': score_test} scores.update({go_term:set_scores}) #tabularizing the scores score_path = 'score_path.csv' multiple_form_table(scores, score_path) # -
test_file_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## PCA Projection with ECFP6 Fingerprint # This notebook shows how to visualize chemical space of `B3DB` by projecting ECFP6 fingerprint to 2D using PCA. The Fingerprint will be computed with `RDKit` and PCA is performed with `sklearn`. import numpy as np import pandas as pd from rdkit import Chem from rdkit.Chem import AllChem from sklearn.decomposition import PCA # data file name for BBB dataset with categorical data bbb_fpath = "../B3DB/B3DB_classification.tsv" # load data df = pd.read_csv(bbb_fpath, sep="\t") df # + # compute ECFP6 fingerprints fps = [] for idx, row in df.iterrows(): mol = Chem.MolFromSmiles(row["SMILES"]) mol = Chem.AddHs(mol) fp = AllChem.GetMorganFingerprintAsBitVect(mol=mol, radius=3, nBits=2048, useChirality=True, useFeatures=False) fps.append(fp.ToBitString()) # - fps_arr = np.array([np.fromiter(fp, "u1") for fp in fps]) fps_arr print("fingerprint array shape: ", fps_arr.shape) # + # visualization chemcial space using PCA import matplotlib.pyplot as plt import seaborn as sns # %matplotlib notebook df_fps = pd.DataFrame(fps_arr, index=df.index) df_new = pd.concat([df, df_fps], axis=1) fig = plt.figure(figsize=(5, 4)) pca = PCA(n_components=2) arr_fp_embedded = pca.fit_transform(fps_arr) df_new["PC_1"] = arr_fp_embedded[:, 0] df_new["PC_2"] = arr_fp_embedded[:, 1] sns.scatterplot(data=df_new, x="PC_1", y="PC_2", hue="BBB+/BBB-", palette=sns.color_palette(["hotpink", "dodgerblue"]), linewidth=0.1, ) plt.xlabel("PC 1", fontsize=14) plt.ylabel("PC 2", fontsize=14)
notebooks/PCA_projection_fingerprint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: venv # language: python # name: venv # --- import json # will be needed for saving preprocessing details import numpy as np # for data manipulation import pandas as pd # for data manipulation from sklearn.model_selection import train_test_split # will be used for data split from sklearn.preprocessing import LabelEncoder # for preprocessing from sklearn.ensemble import RandomForestClassifier # for training the algorithm from sklearn.ensemble import ExtraTreesClassifier # for training the algorithm import joblib # for saving algorithm and preprocessing objects # load dataset df = pd.read_csv('https://raw.githubusercontent.com/pplonski/datasets-for-start/master/adult/data.csv', skipinitialspace=True) x_cols = [c for c in df.columns if c != 'income'] # set input matrix and target column X = df[x_cols] y = df['income'] # show first rows of data df.head() # data split train / test X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state=1234) train_mode = dict(X_train.mode().iloc[0]) X_train = X_train.fillna(train_mode) print(train_mode) # convert categoricals encoders = {} for column in ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex','native-country']: categorical_convert = LabelEncoder() X_train[column] = categorical_convert.fit_transform(X_train[column]) encoders[column] = categorical_convert # train the Random Forest algorithm rf = RandomForestClassifier(n_estimators = 100) rf = rf.fit(X_train, y_train) # train the Extra Trees algorithm et = ExtraTreesClassifier(n_estimators = 100) et = et.fit(X_train, y_train) # save preprocessing objects and RF algorithm joblib.dump(train_mode, "./train_mode.joblib", compress=True) joblib.dump(encoders, "./encoders.joblib", compress=True) joblib.dump(rf, "./random_forest.joblib", compress=True) joblib.dump(et, "./extra_trees.joblib", compress=True)
research/loading_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Writing Testable Numerics Code # # Copyright (C) 2020 <NAME> # # <details> # <summary>MIT License</summary> # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # </details> # Here's the contents of a file containing numerics code: # !pygmentize norms.py # Note: # # - Docstring # - Defensive programming # !pygmentize test_norms.py # * Now use [pytest](https://pytest.org) to run the test. # !python -m pytest # A typical use for these tests would be to run them on every commit to a codebase. # # Example: https://github.com/inducer/boxtree (click the "Pipeline" button)
cleared-demos/error_and_fp/Writing Testable Numerics Code.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.metrics import jaccard_score import missingno as msno # - sns.set(style="whitegrid", palette="muted") data = pd.read_csv("./cervical-cancer.csv") data.head() data.describe() data.isna().sum().sort_values(ascending=False) # Two features are filled mostly with NAs: STDs: Time since first diagnosis and STDs: Time since last diagnosis.<br> # There are many observations with missing values for different kind of STDs, IUD and Contraceptives. <br> # Also information about smoking habits are missing for a few patients. <br> # For now we are removing two columns with most missing data, maybe later we will add indicator features for modelling. msno.matrix(data) # Two features: STDs:cervical condylomatosis and STDs:AIDS have only one unique value: 0. <br> # For now, removing this uninformative features <br> # Features with 2 unique values are converted to boolean data.drop(["STDs: Time since first diagnosis", "STDs: Time since last diagnosis"], axis=1, inplace=True) data.drop(["STDs:cervical condylomatosis", "STDs:AIDS"], axis=1, inplace=True) for col in data.columns: if data[col].nunique() == 2: data[col] = data[col].astype("bool") data.dtypes # Filling rest of missing data with mode data = data.fillna(data.mode().iloc[0]) # Significant correlation between number of STDs and number of diangonsis and two features regarding smoking habits, may remove later. # + plt.figure(figsize=(10, 10)) sns.heatmap(data.select_dtypes(exclude=['bool']).corr(), annot=True, cmap="coolwarm", square=True) # + data_bool = data.select_dtypes(include="bool") jaccard_matrix = pd.DataFrame(0, index=data_bool.columns, columns=data_bool.columns) for i in range(len(data_bool.columns)): for j in range(len(data_bool.columns)): jaccard_matrix.iloc[i, j] = jaccard_score(data_bool.iloc[:, i], data_bool.iloc[:, j]) # - # Since correlation between two binary features can be misleading, here we computed Jaccard score between them <br> # All STDs feature highly overlap, should remove most of them before modeling. # + plt.figure(figsize=(20, 20)) sns.heatmap(jaccard_matrix, annot=True, cmap="coolwarm", square=True) # - # There are 4 target variables: Biopsy, Hinselmann, Schiller and Citology, which represent different ways of diagnosing cervix cancer <br> # Deciding to drop all but Biopsy for further modelling. # # Significant class imbalance, will cause problems during classification # + categorical_feat = ['Biopsy', 'Hinselmann', 'Schiller', 'Citology'] fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12, 8)) axes = axes.flatten() for idx, ax in enumerate(axes): plt.sca(ax) ax = sns.countplot(x = categorical_feat[idx], data=data) sns.despine() plt.tight_layout() plt.show() # + categorical_feat = ['Biopsy', 'Hinselmann', 'Schiller', 'Citology'] fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12, 8)) axes = axes.flatten() for idx, ax in enumerate(axes): plt.sca(ax) ax = sns.swarmplot(x = "Biopsy", hue = categorical_feat[idx], y = "Age", data=data) sns.despine() plt.tight_layout() plt.show() # - data.drop(["Hinselmann", "Schiller", "Citology"], axis=1, inplace=True)
Projekty/Projekt1/Grupa2/PrzybylinskiPingielski/Milestone_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python # language: python # name: conda-env-python-py # --- # + [markdown] button=false new_sheet=false run_control={"read_only": false} # <center> # <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork/labs/Module%203/images/IDSNlogo.png" width="300" alt="cognitiveclass.ai logo" /> # </center> # # # Pie Charts, Box Plots, Scatter Plots, and Bubble Plots # # Estimated time needed: **30** minutes # # ## Objectives # # After completing this lab you will be able to: # # * Explore Matplotlib library further # * Create pie charts, box plots, scatter plots and bubble charts # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ## Table of Contents # # <div class="alert alert-block alert-info" style="margin-top: 20px"> # # 1. [Exploring Datasets with *p*andas](#0)<br> # 2. [Downloading and Prepping Data](#2)<br> # 3. [Visualizing Data using Matplotlib](#4) <br> # 4. [Pie Charts](#6) <br> # 5. [Box Plots](#8) <br> # 6. [Scatter Plots](#10) <br> # 7. [Bubble Plots](#12) <br> # # </div> # # + [markdown] button=false new_sheet=false run_control={"read_only": false} tags=[] # # Exploring Datasets with *pandas* and Matplotlib<a id="0"></a> # # Toolkits: The course heavily relies on [*pandas*](http://pandas.pydata.org/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01) and [**Numpy**](http://www.numpy.org/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01) for data wrangling, analysis, and visualization. The primary plotting library we will explore in the course is [Matplotlib](http://matplotlib.org/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01). # # Dataset: Immigration to Canada from 1980 to 2013 - [International migration flows to and from selected countries - The 2015 revision](http://www.un.org/en/development/desa/population/migration/data/empirical2/migrationflows.shtml?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01) from United Nation's website. # # The dataset contains annual data on the flows of international migrants as recorded by the countries of destination. The data presents both inflows and outflows according to the place of birth, citizenship or place of previous / next residence both for foreigners and nationals. In this lab, we will focus on the Canadian Immigration data. # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # # Downloading and Prepping Data <a id="2"></a> # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Import primary modules. # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} import numpy as np # useful for many scientific computing in Python import pandas as pd # primary data structure library # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Let's download and import our primary Canadian Immigration dataset using *pandas*'s `read_excel()` method. Normally, before we can do that, we would need to download a module which *pandas* requires reading in Excel files. This module was **openpyxl** (formerlly **xlrd**). For your convenience, we have pre-installed this module, so you would not have to worry about that. Otherwise, you would need to run the following line of code to install the **openpyxl** module: # # ``` # # # ! pip3 install openpyxl # ``` # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Download the dataset and read it into a *pandas* dataframe. # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} df_can = pd.read_excel( 'https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork/Data%20Files/Canada.xlsx', sheet_name='Canada by Citizenship', skiprows=range(20), skipfooter=2 ) print('Data downloaded and read into a dataframe!') # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Let's take a look at the first five items in our dataset. # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} df_can.head() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Let's find out how many entries there are in our dataset. # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} # print the dimensions of the dataframe print(df_can.shape) # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Clean up data. We will make some modifications to the original dataset to make it easier to create our visualizations. Refer to *Introduction to Matplotlib and Line Plots* and *Area Plots, Histograms, and Bar Plots* for a detailed description of this preprocessing. # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} # clean up the dataset to remove unnecessary columns (eg. REG) df_can.drop(['AREA', 'REG', 'DEV', 'Type', 'Coverage'], axis=1, inplace=True) # let's rename the columns so that they make sense df_can.rename(columns={'OdName':'Country', 'AreaName':'Continent','RegName':'Region'}, inplace=True) # for sake of consistency, let's also make all column labels of type string df_can.columns = list(map(str, df_can.columns)) # set the country name as index - useful for quickly looking up countries using .loc method df_can.set_index('Country', inplace=True) # add total column df_can['Total'] = df_can.sum(axis=1) # years that we will be using in this lesson - useful for plotting later on years = list(map(str, range(1980, 2014))) print('data dimensions:', df_can.shape) # + [markdown] button=false new_sheet=false run_control={"read_only": false} # # Visualizing Data using Matplotlib<a id="4"></a> # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Import `Matplotlib`. # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} # %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt mpl.style.use('ggplot') # optional: for ggplot-like style # check for latest version of Matplotlib print('Matplotlib version: ', mpl.__version__) # >= 2.0.0 # + [markdown] button=false new_sheet=false run_control={"read_only": false} # # Pie Charts <a id="6"></a> # # A `pie chart` is a circular graphic that displays numeric proportions by dividing a circle (or pie) into proportional slices. You are most likely already familiar with pie charts as it is widely used in business and media. We can create pie charts in Matplotlib by passing in the `kind=pie` keyword. # # Let's use a pie chart to explore the proportion (percentage) of new immigrants grouped by continents for the entire time period from 1980 to 2013. # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Step 1: Gather data. # # We will use *pandas* `groupby` method to summarize the immigration data by `Continent`. The general process of `groupby` involves the following steps: # # 1. **Split:** Splitting the data into groups based on some criteria. # 2. **Apply:** Applying a function to each group independently: # .sum() # .count() # .mean() # .std() # .aggregate() # .apply() # .etc.. # 3. **Combine:** Combining the results into a data structure. # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork/labs/Module%203/images/Mod3Fig4SplitApplyCombine.png" height=400 align="center"> # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} # group countries by continents and apply sum() function df_continents = df_can.groupby('Continent', axis=0).sum() # note: the output of the groupby method is a `groupby' object. # we can not use it further until we apply a function (eg .sum()) print(type(df_can.groupby('Continent', axis=0))) df_continents.head() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Step 2: Plot the data. We will pass in `kind = 'pie'` keyword, along with the following additional parameters: # # * `autopct` - is a string or function used to label the wedges with their numeric value. The label will be placed inside the wedge. If it is a format string, the label will be `fmt%pct`. # * `startangle` - rotates the start of the pie chart by angle degrees counterclockwise from the x-axis. # * `shadow` - Draws a shadow beneath the pie (to give a 3D feel). # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} # autopct create %, start angle represent starting point df_continents['Total'].plot(kind='pie', figsize=(5, 6), autopct='%1.2f%%', # add in percentages startangle=90, # start angle 90° (Africa) shadow=True, # add shadow ) plt.title('Immigration to Canada by Continent [1980 - 2013]') plt.axis('equal') # Sets the pie chart to look like a circle. plt.show() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # The above visual is not very clear, the numbers and text overlap in some instances. Let's make a few modifications to improve the visuals: # # * Remove the text labels on the pie chart by passing in `legend` and add it as a seperate legend using `plt.legend()`. # * Push out the percentages to sit just outside the pie chart by passing in `pctdistance` parameter. # * Pass in a custom set of colors for continents by passing in `colors` parameter. # * **Explode** the pie chart to emphasize the lowest three continents (Africa, North America, and Latin America and Caribbean) by passing in `explode` parameter. # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} colors_list = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue', 'lightgreen', 'pink'] explode_list = [0.1, 0, 0, 0, 0.1, 0.1] # ratio for each continent with which to offset each wedge. df_continents['Total'].plot(kind='pie', figsize=(15, 6), autopct='%1.2f%%', startangle=55, shadow=True, labels=None, # turn off labels on pie chart pctdistance=1.12, # the ratio between the center of each pie slice and the start of the text generated by autopct colors=colors_list, # add custom colors explode=explode_list # 'explode' lowest 3 continents ) # scale the title up by 12% to match pctdistance plt.title('Immigration to Canada by Continent [1980 - 2013]', y=1.12) plt.axis('equal') # add legend plt.legend(labels=df_continents.index, loc='upper left') plt.show() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # **Question:** Using a pie chart, explore the proportion (percentage) of new immigrants grouped by continents in the year 2013. # # **Note**: You might need to play with the explore values in order to fix any overlapping slice values. # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} explode_list = [0, 0, 0, 0.1, 0.1, 0.2] df_continents['2013'].plot(kind='pie', figsize=(15, 6), autopct='%1.1f%%', startangle=90, shadow=True, labels=None, pctdistance=1.12, explode=explode_list ) plt.title('Immigration to Canada by Continent in 2013', y=1.1) plt.axis('equal') plt.legend(labels=df_continents.index, loc='upper left') plt.show() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # <details><summary>Click here for a sample python solution</summary> # # ```python # #The correct answer is: # explode_list = [0.0, 0, 0, 0.1, 0.1, 0.2] # ratio for each continent with which to offset each wedge. # # df_continents['2013'].plot(kind='pie', # figsize=(15, 6), # autopct='%1.1f%%', # startangle=90, # shadow=True, # labels=None, # turn off labels on pie chart # pctdistance=1.12, # the ratio between the pie center and start of text label # explode=explode_list # 'explode' lowest 3 continents # ) # # # scale the title up by 12% to match pctdistance # plt.title('Immigration to Canada by Continent in 2013', y=1.12) # plt.axis('equal') # # # add legend # plt.legend(labels=df_continents.index, loc='upper left') # # # show plot # plt.show() # # ``` # # </details> # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # # Box Plots <a id="8"></a> # # A `box plot` is a way of statistically representing the *distribution* of the data through five main dimensions: # # * **Minimum:** The smallest number in the dataset excluding the outliers. # * **First quartile:** Middle number between the `minimum` and the `median`. # * **Second quartile (Median):** Middle number of the (sorted) dataset. # * **Third quartile:** Middle number between `median` and `maximum`. # * **Maximum:** The largest number in the dataset excluding the outliers. # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork/labs/Module%203/images/boxplot_complete.png" width=440, align="center"> # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # To make a `boxplot`, we can use `kind=box` in `plot` method invoked on a *pandas* series or dataframe. # # Let's plot the box plot for the Japanese immigrants between 1980 - 2013. # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Step 1: Get the subset of the dataset. Even though we are extracting the data for just one country, we will obtain it as a dataframe. This will help us with calling the `dataframe.describe()` method to view the percentiles. # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} # to get a dataframe, place extra square brackets around 'Japan'. df_japan = df_can.loc[['Japan'], years].transpose() df_japan.head() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Step 2: Plot by passing in `kind='box'`. # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} df_japan.plot(kind='box', figsize=(8, 6)) plt.title('Box plot of Japanese Immigrants from 1980 - 2013') plt.ylabel('Number of Immigrants') plt.show() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # We can immediately make a few key observations from the plot above: # # 1. The minimum number of immigrants is around 200 (min), maximum number is around 1300 (max), and median number of immigrants is around 900 (median). # 2. 25% of the years for period 1980 - 2013 had an annual immigrant count of \~500 or fewer (First quartile). # 3. 75% of the years for period 1980 - 2013 had an annual immigrant count of \~1100 or fewer (Third quartile). # # We can view the actual numbers by calling the `describe()` method on the dataframe. # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} df_japan.describe() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # One of the key benefits of box plots is comparing the distribution of multiple datasets. In one of the previous labs, we observed that China and India had very similar immigration trends. Let's analyze these two countries further using box plots. # # **Question:** Compare the distribution of the number of new immigrants from India and China for the period 1980 - 2013. # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Step 1: Get the dataset for China and India and call the dataframe **df_CI**. # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} df_CI = df_can.loc[['China', 'India'], years].transpose() df_CI.head() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # <details><summary>Click here for a sample python solution</summary> # # ```python # #The correct answer is: # df_CI= df_can.loc[['China', 'India'], years].transpose() # df_CI.head() # ``` # # </details> # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Let's view the percentiles associated with both countries using the `describe()` method. # # - df_CI.describe() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # <details><summary>Click here for a sample python solution</summary> # # ```python # #The correct answer is: # df_CI.describe() # ``` # # </details> # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Step 2: Plot data. # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} df_CI.plot(kind='box', figsize=(10, 6)) plt.title('Box plots of Immigrants from China and India (1980 - 2013)') plt.ylabel('Number of Immigrants') plt.show() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # <details><summary>Click here for a sample python solution</summary> # # ```python # #The correct answer is: # df_CI.plot(kind='box', figsize=(10, 7)) # # plt.title('Box plots of Immigrants from China and India (1980 - 2013)') # plt.ylabel('Number of Immigrants') # # plt.show() # # ``` # # </details> # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # We can observe that, while both countries have around the same median immigrant population (\~20,000), China's immigrant population range is more spread out than India's. The maximum population from India for any year (36,210) is around 15% lower than the maximum population from China (42,584). # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # If you prefer to create horizontal box plots, you can pass the `vert` parameter in the **plot** function and assign it to *False*. You can also specify a different color in case you are not a big fan of the default red color. # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} # horizontal box plots df_CI.plot(kind='box', figsize=(10, 7), color='blue', vert=False) plt.title('Box plots of Immigrants from China and India (1980 - 2013)') plt.xlabel('Number of Immigrants') plt.ylabel('Countries') plt.show() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # **Subplots** # # Often times we might want to plot multiple plots within the same figure. For example, we might want to perform a side by side comparison of the box plot with the line plot of China and India's immigration. # # To visualize multiple plots together, we can create a **`figure`** (overall canvas) and divide it into **`subplots`**, each containing a plot. With **subplots**, we usually work with the **artist layer** instead of the **scripting layer**. # # Typical syntax is : <br> # # ```python # fig = plt.figure() # create figure # ax = fig.add_subplot(nrows, ncols, plot_number) # create subplots # ``` # # Where # # * `nrows` and `ncols` are used to notionally split the figure into (`nrows` \* `ncols`) sub-axes, # * `plot_number` is used to identify the particular subplot that this function is to create within the notional grid. `plot_number` starts at 1, increments across rows first and has a maximum of `nrows` \* `ncols` as shown below. # # <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork/labs/Module%203/images/Mod3Fig5Subplots_V2.png" width=500 align="center"> # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # We can then specify which subplot to place each plot by passing in the `ax` paramemter in `plot()` method as follows: # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} fig = plt.figure() # create figure ax0 = fig.add_subplot(1, 2, 1) # add subplot 1 (1 row, 2 columns, first plot) ax1 = fig.add_subplot(1, 2, 2) # add subplot 2 (1 row, 2 columns, second plot). See tip below** # Subplot 1: Box plot df_CI.plot(kind='box', color='blue', vert=False, figsize=(20, 6), ax=ax0) # add to subplot 1 ax0.set_title('Box Plots of Immigrants from China and India (1980 - 2013)') ax0.set_xlabel('Number of Immigrants') ax0.set_ylabel('Countries') # Subplot 2: Line plot df_CI.plot(kind='line', figsize=(20, 6), ax=ax1) # add to subplot 2 ax1.set_title ('Line Plots of Immigrants from China and India (1980 - 2013)') ax1.set_ylabel('Number of Immigrants') ax1.set_xlabel('Years') plt.show() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # **Tip regarding subplot convention** # # In the case when `nrows`, `ncols`, and `plot_number` are all less than 10, a convenience exists such that a 3-digit number can be given instead, where the hundreds represent `nrows`, the tens represent `ncols` and the units represent `plot_number`. For instance, # # ```python # subplot(211) == subplot(2, 1, 1) # ``` # # produces a subaxes in a figure which represents the top plot (i.e. the first) in a 2 rows by 1 column notional grid (no grid actually exists, but conceptually this is how the returned subplot has been positioned). # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Let's try something a little more advanced. # # Previously we identified the top 15 countries based on total immigration from 1980 - 2013. # # **Question:** Create a box plot to visualize the distribution of the top 15 countries (based on total immigration) grouped by the *decades* `1980s`, `1990s`, and `2000s`. # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Step 1: Get the dataset. Get the top 15 countries based on Total immigrant population. Name the dataframe **df_top15**. # # - df_top15 = df_can.sort_values(['Total'], ascending=False, axis=0).head(15) df_top15 # + [markdown] button=false new_sheet=false run_control={"read_only": false} # <details><summary>Click here for a sample python solution</summary> # # ```python # #The correct answer is: # df_top15 = df_can.sort_values(['Total'], ascending=False, axis=0).head(15) # df_top15 # # ``` # # </details> # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Step 2: Create a new dataframe which contains the aggregate for each decade. One way to do that: # # 1. Create a list of all years in decades 80's, 90's, and 00's. # 2. Slice the original dataframe df_can to create a series for each decade and sum across all years for each country. # 3. Merge the three series into a new data frame. Call your dataframe **new_df**. # # + # create a list of all years in decades 80's, 90's, and 00's years_80s = list(map(str, range(1980, 1990))) years_90s = list(map(str, range(1990, 2000))) years_00s = list(map(str, range(2000, 2010))) # slice the original dataframe df_can to create a series for each decade df_80s = df_top15.loc[:, years_80s].sum(axis=1) df_90s = df_top15.loc[:, years_90s].sum(axis=1) df_00s = df_top15.loc[:, years_00s].sum(axis=1) # merge the three series into a new data frame new_df = pd.DataFrame({'1980s': df_80s, '1990s': df_90s, '2000s':df_00s}) # display dataframe new_df.head() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # <details><summary>Click here for a sample python solution</summary> # # ```python # #The correct answer is: # # # create a list of all years in decades 80's, 90's, and 00's # years_80s = list(map(str, range(1980, 1990))) # years_90s = list(map(str, range(1990, 2000))) # years_00s = list(map(str, range(2000, 2010))) # # # slice the original dataframe df_can to create a series for each decade # df_80s = df_top15.loc[:, years_80s].sum(axis=1) # df_90s = df_top15.loc[:, years_90s].sum(axis=1) # df_00s = df_top15.loc[:, years_00s].sum(axis=1) # # # merge the three series into a new data frame # new_df = pd.DataFrame({'1980s': df_80s, '1990s': df_90s, '2000s':df_00s}) # # # display dataframe # new_df.head() # # # ``` # # </details> # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Let's learn more about the statistics associated with the dataframe using the `describe()` method. # # - new_df.describe() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # <details><summary>Click here for a sample python solution</summary> # # ```python # #The correct answer is: # new_df.describe() # ``` # # </details> # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Step 3: Plot the box plots. # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} new_df.plot(kind='box', figsize=(10, 6)) plt.title('Immigration from top 15 countries for decased 80s, 90s, and 2000s') plt.show() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # <details><summary>Click here for a sample python solution</summary> # # ```python # #The correct answer is: # new_df.plot(kind='box', figsize=(10, 6)) # # plt.title('Immigration from top 15 countries for decades 80s, 90s and 2000s') # # plt.show() # # ``` # # </details> # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Note how the box plot differs from the summary table created. The box plot scans the data and identifies the outliers. In order to be an outlier, the data value must be:<br> # # * larger than Q3 by at least 1.5 times the interquartile range (IQR), or, # * smaller than Q1 by at least 1.5 times the IQR. # # Let's look at decade 2000s as an example: <br> # # * Q1 (25%) = 36,101.5 <br> # * Q3 (75%) = 105,505.5 <br> # * IQR = Q3 - Q1 = 69,404 <br> # # Using the definition of outlier, any value that is greater than Q3 by 1.5 times IQR will be flagged as outlier. # # Outlier > 105,505.5 + (1.5 \* 69,404) <br> # Outlier > 209,611.5 # # - new_df = new_df.reset_index() new_df[new_df['2000s'] > 209611.5] # <details><summary>Click here for a sample python solution</summary> # # ```python # #The correct answer is: # new_df=new_df.reset_index() # new_df[new_df['2000s']> 209611.5] # # ``` # # </details> # # <!-- The correct answer is: # new_df[new_df['2000s']> 209611.5] # --> # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # China and India are both considered as outliers since their population for the decade exceeds 209,611.5. # # The box plot is an advanced visualizaiton tool, and there are many options and customizations that exceed the scope of this lab. Please refer to [Matplotlib documentation](http://matplotlib.org/api/pyplot_api.html?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01#matplotlib.pyplot.boxplot) on box plots for more information. # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # # Scatter Plots <a id="10"></a> # # A `scatter plot` (2D) is a useful method of comparing variables against each other. `Scatter` plots look similar to `line plots` in that they both map independent and dependent variables on a 2D graph. While the data points are connected together by a line in a line plot, they are not connected in a scatter plot. The data in a scatter plot is considered to express a trend. With further analysis using tools like regression, we can mathematically calculate this relationship and use it to predict trends outside the dataset. # # Let's start by exploring the following: # # Using a `scatter plot`, let's visualize the trend of total immigrantion to Canada (all countries combined) for the years 1980 - 2013. # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Step 1: Get the dataset. Since we are expecting to use the relationship betewen `years` and `total population`, we will convert `years` to `int` type. # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} # we can use the sum() method to get the total population per year df_tot = pd.DataFrame(df_can[years].sum(axis=0)) # change the years to type int (useful for regression later on) df_tot.index = map(int, df_tot.index) # reset the index to put in back in as a column in the df_tot dataframe df_tot.reset_index(inplace = True) # rename columns df_tot.columns = ['year', 'total'] # view the final dataframe df_tot.head() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Step 2: Plot the data. In `Matplotlib`, we can create a `scatter` plot set by passing in `kind='scatter'` as plot argument. We will also need to pass in `x` and `y` keywords to specify the columns that go on the x- and the y-axis. # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} df_tot.plot(kind='scatter', x='year', y='total', figsize=(10, 6), color='darkblue') plt.title('Total Immigration to Canada from 1980 - 2013') plt.xlabel('Year') plt.ylabel('Number of Immigrants') plt.show() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Notice how the scatter plot does not connect the data points together. We can clearly observe an upward trend in the data: as the years go by, the total number of immigrants increases. We can mathematically analyze this upward trend using a regression line (line of best fit). # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # So let's try to plot a linear line of best fit, and use it to predict the number of immigrants in 2015. # # Step 1: Get the equation of line of best fit. We will use **Numpy**'s `polyfit()` method by passing in the following: # # * `x`: x-coordinates of the data. # * `y`: y-coordinates of the data. # * `deg`: Degree of fitting polynomial. 1 = linear, 2 = quadratic, and so on. # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} x = df_tot['year'] # year on x-axis y = df_tot['total'] # total on y-axis fit = np.polyfit(x, y, deg=1) fit # + [markdown] button=false new_sheet=false run_control={"read_only": false} # The output is an array with the polynomial coefficients, highest powers first. Since we are plotting a linear regression `y= a * x + b`, our output has 2 elements `[5.56709228e+03, -1.09261952e+07]` with the the slope in position 0 and intercept in position 1. # # Step 2: Plot the regression line on the `scatter plot`. # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} df_tot.plot(kind='scatter', x='year', y='total', figsize=(10, 6), color='darkblue') plt.title('Total Immigration to Canada from 1980 - 2013') plt.xlabel('Year') plt.ylabel('Number of Immigrants') # plot line of best fit plt.plot(x, fit[0] * x + fit[1], color='red') # recall that x is the Years plt.annotate('y={0:.0f} x + {1:.0f}'.format(fit[0], fit[1]), xy=(2000, 150000)) plt.show() # print out the line of best fit 'No. Immigrants = {0:.0f} * Year + {1:.0f}'.format(fit[0], fit[1]) # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Using the equation of line of best fit, we can estimate the number of immigrants in 2015: # # ```python # No. Immigrants = 5567 * Year - 10926195 # No. Immigrants = 5567 * 2015 - 10926195 # No. Immigrants = 291,310 # ``` # # When compared to the actual from Citizenship and Immigration Canada's (CIC) [2016 Annual Report](http://www.cic.gc.ca/english/resources/publications/annual-report-2016/index.asp?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01), we see that Canada accepted 271,845 immigrants in 2015. Our estimated value of 291,310 is within 7% of the actual number, which is pretty good considering our original data came from United Nations (and might differ slightly from CIC data). # # As a side note, we can observe that immigration took a dip around 1993 - 1997. Further analysis into the topic revealed that in 1993 Canada introcuded Bill C-86 which introduced revisions to the refugee determination system, mostly restrictive. Further amendments to the Immigration Regulations cancelled the sponsorship required for "assisted relatives" and reduced the points awarded to them, making it more difficult for family members (other than nuclear family) to immigrate to Canada. These restrictive measures had a direct impact on the immigration numbers for the next several years. # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # **Question**: Create a scatter plot of the total immigration from Denmark, Norway, and Sweden to Canada from 1980 to 2013? # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # **Step 1**: Get the data: # # 1. Create a dataframe the consists of the numbers associated with Denmark, Norway, and Sweden only. Name it **df_countries**. # 2. Sum the immigration numbers across all three countries for each year and turn the result into a dataframe. Name this new dataframe **df_total**. # 3. Reset the index in place. # 4. Rename the columns to **year** and **total**. # 5. Display the resulting dataframe. # # + # create df_countries dataframe df_countries = df_can.loc[['Denmark', 'Norway', 'Sweden'], years].transpose() # create df_total by summing across three countries for each year df_total = pd.DataFrame(df_countries.sum(axis=1)) # reset index in place df_total.reset_index(inplace=True) # rename columns df_total.columns = ['year', 'total'] # change column year from string to int to create scatter plot df_total['year'] = df_total['year'].astype(int) # show resulting dataframe df_total.head() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # <details><summary>Click here for a sample python solution</summary> # # ```python # #The correct answer is: # # # create df_countries dataframe # df_countries = df_can.loc[['Denmark', 'Norway', 'Sweden'], years].transpose() # # # create df_total by summing across three countries for each year # df_total = pd.DataFrame(df_countries.sum(axis=1)) # # # reset index in place # df_total.reset_index(inplace=True) # # # rename columns # df_total.columns = ['year', 'total'] # # # change column year from string to int to create scatter plot # df_total['year'] = df_total['year'].astype(int) # # # show resulting dataframe # df_total.head() # # # ``` # # </details> # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # **Step 2**: Generate the scatter plot by plotting the total versus year in **df_total**. # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} # generate scatter plot df_total.plot(kind='scatter', x='year', y='total', figsize=(10, 6), color='darkblue') # add title and label to axes plt.title('Immigration from Denmark, Norway, and Sweden to Canada from 1980 - 2013') plt.xlabel('Year') plt.ylabel('Number of Immigrants') # show plot plt.show() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # <details><summary>Click here for a sample python solution</summary> # # ```python # #The correct answer is: # # # generate scatter plot # df_total.plot(kind='scatter', x='year', y='total', figsize=(10, 6), color='darkblue') # # # add title and label to axes # plt.title('Immigration from Denmark, Norway, and Sweden to Canada from 1980 - 2013') # plt.xlabel('Year') # plt.ylabel('Number of Immigrants') # # # show plot # plt.show() # # # ``` # # </details> # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # # Bubble Plots <a id="12"></a> # # A `bubble plot` is a variation of the `scatter plot` that displays three dimensions of data (x, y, z). The data points are replaced with bubbles, and the size of the bubble is determined by the third variable `z`, also known as the weight. In `maplotlib`, we can pass in an array or scalar to the parameter `s` to `plot()`, that contains the weight of each point. # # **Let's start by analyzing the effect of Argentina's great depression**. # # Argentina suffered a great depression from 1998 to 2002, which caused widespread unemployment, riots, the fall of the government, and a default on the country's foreign debt. In terms of income, over 50% of Argentines were poor, and seven out of ten Argentine children were poor at the depth of the crisis in 2002. # # Let's analyze the effect of this crisis, and compare Argentina's immigration to that of it's neighbour Brazil. Let's do that using a `bubble plot` of immigration from Brazil and Argentina for the years 1980 - 2013. We will set the weights for the bubble as the *normalized* value of the population for each year. # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # **Step 1**: Get the data for Brazil and Argentina. Like in the previous example, we will convert the `Years` to type int and include it in the dataframe. # # + # transposed dataframe df_can_t = df_can[years].transpose() # cast the Years (the index) to type int df_can_t.index = map(int, df_can_t.index) # let's label the index. This will automatically be the column name when we reset the index df_can_t.index.name = 'Year' # reset index to bring the Year in as a column df_can_t.reset_index(inplace=True) # view the changes df_can_t.head() # + [markdown] button=false new_sheet=false run_control={"read_only": false} # **Step 2**: Create the normalized weights. # # There are several methods of normalizations in statistics, each with its own use. In this case, we will use [feature scaling](https://en.wikipedia.org/wiki/Feature_scaling?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01) to bring all values into the range \[0, 1]. The general formula is: # # <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork/labs/Module%203/images/Mod3Fig3FeatureScaling.png" align="center"> # # where $X$ is the original value, $X'$ is the corresponding normalized value. The formula sets the max value in the dataset to 1, and sets the min value to 0. The rest of the data points are scaled to a value between 0-1 accordingly. # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} # normalize Brazil data norm_brazil = (df_can_t['Brazil'] - df_can_t['Brazil'].min()) / (df_can_t['Brazil'].max() - df_can_t['Brazil'].min()) # normalize Argentina data norm_argentina = (df_can_t['Argentina'] - df_can_t['Argentina'].min()) / (df_can_t['Argentina'].max() - df_can_t['Argentina'].min()) # + [markdown] button=false new_sheet=false run_control={"read_only": false} # **Step 3**: Plot the data. # # * To plot two different scatter plots in one plot, we can include the axes one plot into the other by passing it via the `ax` parameter. # * We will also pass in the weights using the `s` parameter. Given that the normalized weights are between 0-1, they won't be visible on the plot. Therefore, we will: # * multiply weights by 2000 to scale it up on the graph, and, # * add 10 to compensate for the min value (which has a 0 weight and therefore scale with $\times 2000$). # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} # Brazil ax0 = df_can_t.plot(kind='scatter', x='Year', y='Brazil', figsize=(14, 8), alpha=0.5, # transparency color='green', s=norm_brazil * 2000 + 10, # pass in weights xlim=(1975, 2015) ) # Argentina ax1 = df_can_t.plot(kind='scatter', x='Year', y='Argentina', alpha=0.5, color="blue", s=norm_argentina * 2000 + 10, ax=ax0 ) ax0.set_ylabel('Number of Immigrants') ax0.set_title('Immigration from Brazil and Argentina from 1980 to 2013') ax0.legend(['Brazil', 'Argentina'], loc='upper left', fontsize='x-large') # + [markdown] button=false new_sheet=false run_control={"read_only": false} # The size of the bubble corresponds to the magnitude of immigrating population for that year, compared to the 1980 - 2013 data. The larger the bubble is, the more immigrants are in that year. # # From the plot above, we can see a corresponding increase in immigration from Argentina during the 1998 - 2002 great depression. We can also observe a similar spike around 1985 to 1993. In fact, Argentina had suffered a great depression from 1974 to 1990, just before the onset of 1998 - 2002 great depression. # # On a similar note, Brazil suffered the *Samba Effect* where the Brazilian real (currency) dropped nearly 35% in 1999. There was a fear of a South American financial crisis as many South American countries were heavily dependent on industrial exports from Brazil. The Brazilian government subsequently adopted an austerity program, and the economy slowly recovered over the years, culminating in a surge in 2010. The immigration data reflect these events. # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # **Question**: Previously in this lab, we created box plots to compare immigration from China and India to Canada. Create bubble plots of immigration from China and India to visualize any differences with time from 1980 to 2013. You can use **df_can_t** that we defined and used in the previous example. # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Step 1: Normalize the data pertaining to China and India. # # + button=false new_sheet=false run_control={"read_only": false} # normalized Chinese data norm_china = (df_can_t['China'] - df_can_t['China'].min()) / (df_can_t['China'].max() - df_can_t['China'].min()) # normalized Indian data norm_india = (df_can_t['India'] - df_can_t['India'].min()) / (df_can_t['India'].max() - df_can_t['India'].min()) # + [markdown] button=false new_sheet=false run_control={"read_only": false} # <details><summary>Click here for a sample python solution</summary> # # ```python # #The correct answer is: # # # normalized Chinese data # norm_china = (df_can_t['China'] - df_can_t['China'].min()) / (df_can_t['China'].max() - df_can_t['China'].min()) # # normalized Indian data # norm_india = (df_can_t['India'] - df_can_t['India'].min()) / (df_can_t['India'].max() - df_can_t['India'].min()) # # # ``` # # </details> # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Step 2: Generate the bubble plots. # # + button=false jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false} # China ax0 = df_can_t.plot(kind='scatter', x='Year', y='China', figsize=(14, 8), alpha=0.5, # transparency color='green', s=norm_china * 2000 + 10, # pass in weights xlim=(1975, 2015) ) # India ax1 = df_can_t.plot(kind='scatter', x='Year', y='India', alpha=0.5, color="blue", s=norm_india * 2000 + 10, ax = ax0 ) ax0.set_ylabel('Number of Immigrants') ax0.set_title('Immigration from China and India from 1980 - 2013') ax0.legend(['China', 'India'], loc='upper left', fontsize='x-large') # + [markdown] button=false new_sheet=false run_control={"read_only": false} # <details><summary>Click here for a sample python solution</summary> # # ```python # #The correct answer is: # # # China # ax0 = df_can_t.plot(kind='scatter', # x='Year', # y='China', # figsize=(14, 8), # alpha=0.5, # transparency # color='green', # s=norm_china * 2000 + 10, # pass in weights # xlim=(1975, 2015) # ) # # # India # ax1 = df_can_t.plot(kind='scatter', # x='Year', # y='India', # alpha=0.5, # color="blue", # s=norm_india * 2000 + 10, # ax = ax0 # ) # # ax0.set_ylabel('Number of Immigrants') # ax0.set_title('Immigration from China and India from 1980 - 2013') # ax0.legend(['China', 'India'], loc='upper left', fontsize='x-large') # # # ``` # # </details> # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ### Thank you for completing this lab! # # ## Author # # <a href="https://www.linkedin.com/in/aklson/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01" target="_blank"><NAME></a> # # ### Other Contributors # # [<NAME>](https://www.linkedin.com/in/jayrajasekharan?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01), # [<NAME>](https://www.linkedin.com/in/ehsanmkermani?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01), # [<NAME>](https://www.linkedin.com/in/slobodan-markovic?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01), # [<NAME>](https://www.linkedin.com/in/weiqing-wang-641640133?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01). # # ## Change Log # # | Date (YYYY-MM-DD) | Version | Changed By | Change Description | # | ----------------- | ------- | ------------ | ---------------------------------- | # | 2021-05-29 | 2.6 | <NAME> | Fixed typos and code smells. | # | 2021-01-20 | 2.5 | LakshmiHolla | Changed TOC markdown section | # | 2021-01-05 | 2.4 | LakshmiHolla | Changed markdown for outliers | # | 2020-11-12 | 2.3 | LakshmiHolla | Added example code for outliers | # | 2020-11-03 | 2.2 | LakshmiHolla | Changed URL of excel file | # | 2020-09-29 | 2.1 | LakshmiHolla | Made fix to a boxplot label | # | 2020-08-27 | 2.0 | Lavanya | Moved lab to course repo in GitLab | # # ## <h3 align="center"> © IBM Corporation 2020. All rights reserved. <h3/> #
8_Data Visualization with Python/2-2-Pie-Charts-Box-Plots-Scatter-Plots-and-Bubble-Plots-py-v2.0.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import copy import h5py import itertools import numpy as np import os import scipy import scipy.interpolate import tqdm import matplotlib import matplotlib.pyplot as plt import palettable import yt import trident import unyt as u import kalepy as kale import verdict import one_zone # # Parameters # Analysis parameters seed = 15482 rng = np.random.default_rng( seed ) verbose = False # Data management parameters distribution_fp = './data/EAGLE/histogram_galaxies_logM200c-Msun-12.0-12.5_200_seed0_hneutralssh.hdf5' data_dir = './data/synthetic_data/sample1' observer_data_dir = './data/synthetic_data_samples/sample1' # Ray parameters redshift = 0.25 n_sightlines = 100 min_clouds_per_sightline = 1 max_clouds_per_sightline = 3 velocity_range = [ -150., 150. ] # In km/s finite_cloud_max_logT = 5 # We'll only allow one cloud per line of sight with temperatures greater than this # Spectra parameters ions = [ 'H I', 'O I', 'C II', 'C III', 'N II', 'N III', 'Si II', 'Si III', 'Si IV', # 'N V', 'O VI', 'Mg II' ] fields = [ 'H_p0_number_density', 'O_p0_number_density', 'C_p1_number_density', 'C_p2_number_density', 'N_p1_number_density', 'N_p2_number_density', 'Si_p1_number_density', 'Si_p2_number_density', 'Si_p3_number_density', # 'N_p4_number_density', 'O_p5_number_density', 'Mg_p1_number_density' ] snr = 30 # # Load Data sls = verdict.Dict.from_hdf5( './data/synthetic_data/sample1/sightlines.h5', jagged_flag='sl') clouds = {} for key, item in sls.items(): clouds[key] = np.concatenate( item ) # Objects for use ldb = trident.LineDatabase('lines.txt') sg_cos = trident.SpectrumGenerator('COS-G130M') # Spectrum Generator for Mg II from ground lambda_mg = ldb.select_lines( 'Mg', 'II' )[0].wavelength * ( 1. + redshift ) sg_mg = trident.SpectrumGenerator( lambda_min = lambda_mg - 15., lambda_max = lambda_mg + 15., dlambda = 0.01, lsf_kernel = os.path.join( trident.path, 'data', 'lsf_kernels', 'avg_COS.txt' ), ) # # Investigate Individual Sightlines for Consistency # # We'll take one individual sightline, regenerate it, make sure the regenerated spectra matches with what we gave modelers, and look at why the spectra looks as it does. i = 4 sg = sg_mg density = 10.**sls['Density'][i] * u.g * u.cm**-3 temperature = 10.**sls['Temperature'][i] * u.K metallicity = 10.**sls['Metallicity'][i] * u.Zsun / 0.014 HI_column = 10.**sls['HI Column'][i] * u.cm**-2 velocity = sls['LOS Velocity'][i] * u.km / u.s # + lengths_i = [] taus_i = [] for j, density_j in enumerate( density ): # First, let's create a one-zone dataset for our desired density, # temperature, metallicity, and redshift. We'll arbitrarily set it to # be 1 kpc in width. ds = trident.make_onezone_dataset( density = density[j], temperature = temperature[j], metallicity = metallicity[j], domain_width = 1.*u.kpc ) ds.current_redshift = redshift # Now let's add our desired ions to this dataset, using Trident's # lookup table based on the Haardt-Madau 2012 UV background. trident.add_ion_fields(ds, ions=ions) # Since we now know the HI number density for this dataset, and we # have a desired HI column density from the simulation distribution, we can divide # these two to get a desired length for the dataset. length = HI_column[j] / ds.r[('gas', 'H_p0_number_density')][0] lengths_i.append( length ) if verbose: print("DEBUG: For a log HI column of %s, we require a Pathlength of %s" % (np.log10(HI_column[i]), length.to('kpc'))) if verbose: # Print out the dataset to STDOUT one_zone.print_dataset(ray, ions, fields) # Correct the redshift z_vel = np.sqrt( ( 1 + velocity[j] / u.c) / ( 1 - velocity[j] / u.c) ) - 1. z_cloud = ( 1. + redshift )*( 1. + z_vel ) - 1. # Now that we have a length for our dataset, which will produce our # desired HI column density, let's generate a one-zone # LightRay (skewer) using this length, density, temperature, & redshift. # And add the relevant ions to this dataset. ray = trident.make_onezone_ray( density = density[j], temperature = temperature[j], metallicity = metallicity[j], length = length, redshift = z_cloud, ) trident.add_ion_fields(ray, ions=ions) spectrum_fp = './debug/sl_{}_{}.h5'.format( i, j ) sg.make_spectrum( ray, lines=ions ) sg.save_spectrum( spectrum_fp ) sg.plot_spectrum( './debug/spectrum_{}_{}.png'.format( i, j )) d = verdict.Dict.from_hdf5( spectrum_fp ) wavelength = d['wavelength'] taus_i.append( d['tau'] ) lengths_i = np.array( lengths_i ) taus_i = np.array( taus_i ) # Combine into a new spectrum tau_i = np.sum( taus_i, axis=0 ) flux_i = np.exp( -tau_i ) sg.load_spectrum( wavelength * u.Angstrom, tau_i, flux_i ) # + # Noise sg.apply_lsf() sg.add_gaussian_noise( snr ) # Save sg.save_spectrum( './debug/spectrum_sl{:04d}.h5'.format( i ) ) sg.plot_spectrum( './debug/spectrum_sl{:04d}.png'.format( i ) ) # - import matplotlib.image as mpimg # Constituent spectra for j, density_j in enumerate( density ): plt.imshow( mpimg.imread( './debug/spectrum_{}_{}.png'.format( i, j ) ) ) plt.show() # Final spectra most recently produced plt.imshow( mpimg.imread( './debug/spectrum_sl{:04d}.png'.format( i ) ) ) # Spectra given to observers plt.imshow( mpimg.imread( './debug/spectrum_MgII_sl{:04d}.png'.format( i ) ) ) # Looks good (*IF* N V is included, which was contaminating)! Now why is it so intense? j = 0 ( density[j] / u.mp ).to( 'cm**-3' ), temperature[j], metallicity[j] # Woah! Look at that metallicity! Let's compare it to the simulation metallicity distribution. ii = copy.copy( i ) jj = copy.copy( j ) f = h5py.File( distribution_fp, 'r' ) param_labels = [ 'Temperature', 'Density', 'Metallicity', 'HI Column' ] # + # Get centers, reformat edges histogram_axes = [ 'Temperature', 'Density', 'Metallicity', 'hneutralssh' ] centers = [] bins = [] dxs = [] for i, key in enumerate( histogram_axes ): arr = copy.copy( f['histogram_axes'][key][...] ) dx = arr[2] - arr[1] # For convenience, let's not have a -inf edge lying around if key == 'Metallicity': arr[0] = arr[1] - dx centers_i = arr[:-1] + 0.5 * dx dxs.append( dx ) centers.append( centers_i ) bins.append( arr ) # + # Normalize into a pdf norm = f['histogram'][...].sum() * dxs[0] * dxs[1] * dxs[2] * dxs[3] pdf = f['histogram'][...] / norm # - # Set up combinations to iterate through inds = range( len( histogram_axes ) ) combinations = [ _ for _ in itertools.combinations( inds, 2 ) ] phase_diagram = verdict.Dict.from_hdf5( './data/m12i_phase_diagram_z0.25.h5' ) labels = [ 'Temperature (K)', r'Density (g/cm$^{3}$)', 'Metallicity (mass fraction)', r'$N_{\rm HI}$ (cm$^{-2}$)' ] for k, (i, j) in enumerate( combinations ): # Show data fig = plt.figure( figsize=(8,8), facecolor='w' ) ax = plt.gca() sum_axes = copy.copy( list( inds ) ) sum_axes.remove( i ) sum_axes.remove( j ) projection = pdf.sum( axis=tuple( sum_axes ) ).transpose() x, y = np.meshgrid( centers[i], centers[j] ) ax.pcolormesh( centers[i], centers[j], np.log10( projection ), cmap = 'cubehelix_r', ) ax.scatter( sls[param_labels[i]][ii][jj], sls[param_labels[j]][ii][jj], color = 'blue', s = 100, ) ax.set_xlabel( labels[i], fontsize=22 ) ax.set_ylabel( labels[j], fontsize=22 ) ax.set_aspect( 'auto' ) plt.savefig( './figures/sample1/clouddist_{}_{}.png'.format( param_labels[i].replace( ' ', '' ), param_labels[j].replace( ' ', '' ) ), bbox_inches = 'tight', ) # 1D histograms for i, param_label in enumerate( param_labels ): fig = plt.figure( figsize=(10,4), facecolor='w' ) ax = plt.gca() sum_axes = copy.copy( list( inds ) ) sum_axes.remove( i ) projection = pdf.sum( axis=tuple( sum_axes ) ) projection /= projection.sum() * dxs[i] ax.step( centers[i], projection, where = 'mid', color = palettable.cartocolors.qualitative.Safe_10.mpl_colors[0], linewidth = 3, ) # For convenience, let's not have a -inf edge lying around used_bins = copy.copy( bins[i] ) if param_label == 'Metallicity': used_bins[-1] = 1.0001 hist, used_bins = np.histogram( clouds[param_labels[i]], bins = used_bins, ) hist = hist / ( hist.sum() * dxs[i] ) ax.step( used_bins[1:], hist, where = 'pre', color = 'k', linewidth = 3, ) ax.axvline( sls[param_label][ii][jj], color = 'k', linewidth = 3, linestyle = '--', ) ax.set_xlabel( labels[i], fontsize=20 ) # Consistent with the metallicity distribution. Asking Nastasha how such enriched gas is produced in the EAGLE sims. # ## Check Sameer and Jane's First Results indices = [ 5, 71, 76, ] den_comb = [] met_comb = [] temp_comb = [] for i in indices: print( 'Sightline {:03d}'.format( i ) ) density = 10.**sls['Density'][i] * u.g * u.cm**-3 / u.mp * 0.75 temperature = 10.**sls['Temperature'][i] * u.K metallicity = 10.**sls['Metallicity'][i] / Z_sun HI_column = 10.**sls['HI Column'][i] * u.cm**-2 velocity = sls['LOS Velocity'][i] * u.km / u.s lengths = sls['Lengths'][i] * u.cm for j, den in enumerate( density ): print( ' logZ = {:.3g}, logT = {:.3g}, logn = {:.3g}'.format( np.log10( metallicity[j] ), np.log10( temperature[j] ), np.log10( den ), ) ) if len( velocity ) == 2: print( ' delta_v = {:.3g}'.format( np.abs( velocity[1] - velocity[0] ) ) ) den = ( density * lengths ).sum() / lengths.sum() temp = ( temperature * density * lengths ).sum() / ( density * lengths ).sum() met = ( metallicity * Z_sun * density * lengths ).sum() / ( Z_sun * density * lengths ).sum() print( ' Combined, logZ = {:.3g}, logT = {:.3g}, logn = {:.3g}'.format( np.log10( met ), np.log10( temp ), np.log10( den ), ) ) den_comb.append( den ) met_comb.append( met ) temp_comb.append( temp ) sameer_charlton = { 5: { 'Density': [ ( -2.21, 0.49, 0.21 ), ( -4.83, 4.10, 1.13) ], 'Temperature': [ ( 4.76, 0.02, 0.02 ), ( 5.48, 0.22, 1.74 ) ], 'Metallicity': [ ( 1.14, 0.08, 0.08 ), ( 0.11, 1.25, 1.63 ) ], }, 71: { 'Density': [ ( -3.11, 0.16, 0.11 ), ], 'Temperature': [ ( 4.50, 0.01, 0.02 ), ], 'Metallicity': [ ( 0.06, 0.09, 0.09 ), ], }, 76: { 'Density': [ ( -3.55, 0.24, 0.21 ), ( -3.46, 0.44, 0.29), ( -5.73, 0.59, 0.26) ], 'Temperature': [ ( 4.88, 0.07, 0.10 ), ( 5.00, 0.04, -0.04 ), (4.42, 0.5, 0.79, ) ], 'Metallicity': [ ( 0.58, 0.78, 0.85 ), ( -1.40, 0.18, 0.23), ( -0.55, 1.77, 0.45 ) ], }, } # + fig = plt.figure( figsize=(8,20), facecolor='w' ) # ax_main = plt.gca() gs = matplotlib.gridspec.GridSpec( 3, 1 ) main_xs = np.arange( len( indices ) ) + 1 ### Metallicity ############################ ax = fig.add_subplot( gs[0,0] ) # Combined ax.scatter( main_xs, np.log10( met_comb ), color = 'none', edgecolor = 'k', s = 200, ) gs.update( hspace=0.0001) # Individual clouds for i, ind in enumerate( indices ): ys = np.log10( 10.**sls['Metallicity'][ind] / Z_sun ) val_sc = np.array( sameer_charlton[ind]['Metallicity'] ) ys_sc = val_sc[:,0] xs = np.full( ys.size, i+1 ) ax.scatter( xs, ys, color = 'k', s = 100, zorder = 0, ) xs = np.full( ys_sc.size, i+1 ) # Add some offset if xs.size > 1: xs = xs + np.arange( xs.size ) * 0.02 ax.scatter( xs, ys_sc, color = colormap[0], s = 50, zorder = 10, ) ax.errorbar( xs, ys_sc, yerr = val_sc[:,[1,2]].transpose()[::-1], color = colormap[0], zorder = 9, linewidth = 0, elinewidth = 2, ) ax.set_ylabel( r'$\log_{10} Z / Z_\odot$', fontsize=22 ) ax.tick_params( axis='x', bottom=False, labelbottom=False ) ### Temperature ############################ ax = fig.add_subplot( gs[1,0] ) # Combined ax.scatter( main_xs, np.log10( temp_comb ), color = 'none', edgecolor = 'k', s = 200, ) # Individual clouds for i, ind in enumerate( indices ): ys = sls['Temperature'][ind] val_sc = np.array( sameer_charlton[ind]['Temperature'] ) ys_sc = val_sc[:,0] xs = np.full( ys.size, i+1 ) ax.scatter( xs, ys, color = 'k', s = 100, zorder = 0, ) xs = np.full( ys_sc.size, i+1 ) # Add some offset if xs.size > 1: xs = xs + np.arange( xs.size ) * 0.02 ax.scatter( xs, ys_sc, color = colormap[0], s = 50, zorder = 10, ) ax.errorbar( xs, ys_sc, yerr = val_sc[:,[1,2]].transpose()[::-1], color = colormap[0], zorder = 9, linewidth = 0, elinewidth = 2, ) ax.set_ylabel( r'$\log_{10} T / K$', fontsize=22 ) ax.tick_params( axis='x', bottom=False, labelbottom=False ) ### Density ############################ ax = fig.add_subplot( gs[2,0] ) # Combined ax.scatter( main_xs, np.log10( den_comb ), color = 'none', edgecolor = 'k', s = 200, ) # Individual clouds for i, ind in enumerate( indices ): ys = np.log10( 10.**sls['Density'][ind] * u.g * u.cm**-3 / u.mp * 0.75 ) val_sc = np.array( sameer_charlton[ind]['Density'] ) ys_sc = val_sc[:,0] xs = np.full( ys.size, i+1 ) ax.scatter( xs, ys, color = 'k', s = 100, zorder = 0, ) xs = np.full( ys_sc.size, i+1 ) # Add some offset if xs.size > 1: xs = xs + np.arange( xs.size ) * 0.02 ax.scatter( xs, ys_sc, color = colormap[0], s = 50, zorder = 10, ) ax.errorbar( xs, ys_sc, yerr = val_sc[:,[1,2]].transpose()[::-1], color = colormap[0], zorder = 9, linewidth = 0, elinewidth = 2, ) ax.set_ylabel( r'$\log_{10} n_{\rm H} / {\rm cm}^{-3}$', fontsize=22 ) ax.tick_params( axis='x', bottom=False, labelbottom=False ) # - # # Some Listed Lines are Actually Outside Observable Range suspect_ions = [ 'Si III', 'Si IV', 'N V' ] def print_observable_redshifts( ion_list ): for ion in ion_list: print( ion ) lines = ldb.select_lines( *ion.split( ' ' ) ) for i, line in enumerate( lines ): lambda_rest = line.wavelength * u.angstrom min_z = sg_cos.lambda_min / lambda_rest - 1. max_z = sg_cos.lambda_max / lambda_rest - 1. print( ' {} observable redshifts = [{:.2g}, {:.2g}]'.format( lambda_rest, min_z, max_z ) ) print_observable_redshifts( suspect_ions ) print_observable_redshifts( ions )
sample1_modeler_questions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] papermill={"duration": 0.067335, "end_time": "2021-05-20T23:08:09.881496", "exception": false, "start_time": "2021-05-20T23:08:09.814161", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[] # # RadarCOVID-Report # + [markdown] papermill={"duration": 0.06511, "end_time": "2021-05-20T23:08:10.008188", "exception": false, "start_time": "2021-05-20T23:08:09.943078", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[] # ## Data Extraction # + papermill={"duration": 2.611602, "end_time": "2021-05-20T23:08:12.676773", "exception": false, "start_time": "2021-05-20T23:08:10.065171", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] import datetime import json import logging import os import shutil import tempfile import textwrap import uuid import matplotlib.pyplot as plt import matplotlib.ticker import numpy as np import pandas as pd import pycountry import retry import seaborn as sns # %matplotlib inline # + papermill={"duration": 0.073243, "end_time": "2021-05-20T23:08:12.810085", "exception": false, "start_time": "2021-05-20T23:08:12.736842", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] current_working_directory = os.environ.get("PWD") if current_working_directory: os.chdir(current_working_directory) sns.set() matplotlib.rcParams["figure.figsize"] = (15, 6) extraction_datetime = datetime.datetime.utcnow() extraction_date = extraction_datetime.strftime("%Y-%m-%d") extraction_previous_datetime = extraction_datetime - datetime.timedelta(days=1) extraction_previous_date = extraction_previous_datetime.strftime("%Y-%m-%d") extraction_date_with_hour = datetime.datetime.utcnow().strftime("%Y-%m-%d@%H") current_hour = datetime.datetime.utcnow().hour are_today_results_partial = current_hour != 23 # + [markdown] papermill={"duration": 0.057293, "end_time": "2021-05-20T23:08:12.921423", "exception": false, "start_time": "2021-05-20T23:08:12.864130", "status": "completed"} tags=[] # ### Constants # + papermill={"duration": 0.157713, "end_time": "2021-05-20T23:08:13.134502", "exception": false, "start_time": "2021-05-20T23:08:12.976789", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] from Modules.ExposureNotification import exposure_notification_io spain_region_country_code = "ES" germany_region_country_code = "DE" default_backend_identifier = spain_region_country_code backend_generation_days = 7 * 2 daily_summary_days = 7 * 4 * 3 daily_plot_days = 7 * 4 tek_dumps_load_limit = daily_summary_days + 1 # + [markdown] papermill={"duration": 0.052953, "end_time": "2021-05-20T23:08:13.244866", "exception": false, "start_time": "2021-05-20T23:08:13.191913", "status": "completed"} tags=[] # ### Parameters # + papermill={"duration": 0.065737, "end_time": "2021-05-20T23:08:13.368325", "exception": false, "start_time": "2021-05-20T23:08:13.302588", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] environment_backend_identifier = os.environ.get("RADARCOVID_REPORT__BACKEND_IDENTIFIER") if environment_backend_identifier: report_backend_identifier = environment_backend_identifier else: report_backend_identifier = default_backend_identifier report_backend_identifier # + papermill={"duration": 0.068599, "end_time": "2021-05-20T23:08:13.498233", "exception": false, "start_time": "2021-05-20T23:08:13.429634", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] environment_enable_multi_backend_download = \ os.environ.get("RADARCOVID_REPORT__ENABLE_MULTI_BACKEND_DOWNLOAD") if environment_enable_multi_backend_download: report_backend_identifiers = None else: report_backend_identifiers = [report_backend_identifier] report_backend_identifiers # + papermill={"duration": 0.068887, "end_time": "2021-05-20T23:08:13.623015", "exception": false, "start_time": "2021-05-20T23:08:13.554128", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] environment_invalid_shared_diagnoses_dates = \ os.environ.get("RADARCOVID_REPORT__INVALID_SHARED_DIAGNOSES_DATES") if environment_invalid_shared_diagnoses_dates: invalid_shared_diagnoses_dates = environment_invalid_shared_diagnoses_dates.split(",") else: invalid_shared_diagnoses_dates = [] invalid_shared_diagnoses_dates # + [markdown] papermill={"duration": 0.056872, "end_time": "2021-05-20T23:08:13.739155", "exception": false, "start_time": "2021-05-20T23:08:13.682283", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[] # ### COVID-19 Cases # + papermill={"duration": 0.074536, "end_time": "2021-05-20T23:08:13.871992", "exception": false, "start_time": "2021-05-20T23:08:13.797456", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] report_backend_client = \ exposure_notification_io.get_backend_client_with_identifier( backend_identifier=report_backend_identifier) # + papermill={"duration": 0.862278, "end_time": "2021-05-20T23:08:14.789482", "exception": false, "start_time": "2021-05-20T23:08:13.927204", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] @retry.retry(tries=10, delay=10, backoff=1.1, jitter=(0, 10)) def download_cases_dataframe(): return pd.read_csv("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv") confirmed_df_ = download_cases_dataframe() confirmed_df_.iloc[0] # + papermill={"duration": 0.993677, "end_time": "2021-05-20T23:08:15.843270", "exception": false, "start_time": "2021-05-20T23:08:14.849593", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] confirmed_df = confirmed_df_.copy() confirmed_df = confirmed_df[["date", "new_cases", "iso_code"]] confirmed_df.rename( columns={ "date": "sample_date", "iso_code": "country_code", }, inplace=True) def convert_iso_alpha_3_to_alpha_2(x): try: return pycountry.countries.get(alpha_3=x).alpha_2 except Exception as e: logging.info(f"Error converting country ISO Alpha 3 code '{x}': {repr(e)}") return None confirmed_df["country_code"] = confirmed_df.country_code.apply(convert_iso_alpha_3_to_alpha_2) confirmed_df.dropna(inplace=True) confirmed_df["sample_date"] = pd.to_datetime(confirmed_df.sample_date, dayfirst=True) confirmed_df["sample_date"] = confirmed_df.sample_date.dt.strftime("%Y-%m-%d") confirmed_df.sort_values("sample_date", inplace=True) confirmed_df.tail() # + papermill={"duration": 0.085447, "end_time": "2021-05-20T23:08:15.986761", "exception": false, "start_time": "2021-05-20T23:08:15.901314", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] confirmed_days = pd.date_range( start=confirmed_df.iloc[0].sample_date, end=extraction_datetime) confirmed_days_df = pd.DataFrame(data=confirmed_days, columns=["sample_date"]) confirmed_days_df["sample_date_string"] = \ confirmed_days_df.sample_date.dt.strftime("%Y-%m-%d") confirmed_days_df.tail() # + papermill={"duration": 0.067045, "end_time": "2021-05-20T23:08:16.113010", "exception": false, "start_time": "2021-05-20T23:08:16.045965", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] def sort_source_regions_for_display(source_regions: list) -> list: if report_backend_identifier in source_regions: source_regions = [report_backend_identifier] + \ list(sorted(set(source_regions).difference([report_backend_identifier]))) else: source_regions = list(sorted(source_regions)) return source_regions # + papermill={"duration": 0.0799, "end_time": "2021-05-20T23:08:16.254872", "exception": false, "start_time": "2021-05-20T23:08:16.174972", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] report_source_regions = report_backend_client.source_regions_for_date( date=extraction_datetime.date()) report_source_regions = sort_source_regions_for_display( source_regions=report_source_regions) report_source_regions # + papermill={"duration": 0.079703, "end_time": "2021-05-20T23:08:16.394730", "exception": false, "start_time": "2021-05-20T23:08:16.315027", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] def get_cases_dataframe(source_regions_for_date_function, columns_suffix=None): source_regions_at_date_df = confirmed_days_df.copy() source_regions_at_date_df["source_regions_at_date"] = \ source_regions_at_date_df.sample_date.apply( lambda x: source_regions_for_date_function(date=x)) source_regions_at_date_df.sort_values("sample_date", inplace=True) source_regions_at_date_df["_source_regions_group"] = source_regions_at_date_df. \ source_regions_at_date.apply(lambda x: ",".join(sort_source_regions_for_display(x))) source_regions_at_date_df.tail() #%% source_regions_for_summary_df_ = \ source_regions_at_date_df[["sample_date", "_source_regions_group"]].copy() source_regions_for_summary_df_.rename(columns={"_source_regions_group": "source_regions"}, inplace=True) source_regions_for_summary_df_.tail() #%% confirmed_output_columns = ["sample_date", "new_cases", "covid_cases"] confirmed_output_df = pd.DataFrame(columns=confirmed_output_columns) for source_regions_group, source_regions_group_series in \ source_regions_at_date_df.groupby("_source_regions_group"): source_regions_set = set(source_regions_group.split(",")) confirmed_source_regions_set_df = \ confirmed_df[confirmed_df.country_code.isin(source_regions_set)].copy() confirmed_source_regions_group_df = \ confirmed_source_regions_set_df.groupby("sample_date").new_cases.sum() \ .reset_index().sort_values("sample_date") confirmed_source_regions_group_df = \ confirmed_source_regions_group_df.merge( confirmed_days_df[["sample_date_string"]].rename( columns={"sample_date_string": "sample_date"}), how="right") confirmed_source_regions_group_df["new_cases"] = \ confirmed_source_regions_group_df["new_cases"].clip(lower=0) confirmed_source_regions_group_df["covid_cases"] = \ confirmed_source_regions_group_df.new_cases.rolling(7, min_periods=0).mean().round() confirmed_source_regions_group_df = \ confirmed_source_regions_group_df[confirmed_output_columns] confirmed_source_regions_group_df = confirmed_source_regions_group_df.replace(0, np.nan) confirmed_source_regions_group_df.fillna(method="ffill", inplace=True) confirmed_source_regions_group_df = \ confirmed_source_regions_group_df[ confirmed_source_regions_group_df.sample_date.isin( source_regions_group_series.sample_date_string)] confirmed_output_df = confirmed_output_df.append(confirmed_source_regions_group_df) result_df = confirmed_output_df.copy() result_df.tail() #%% result_df.rename(columns={"sample_date": "sample_date_string"}, inplace=True) result_df = confirmed_days_df[["sample_date_string"]].merge(result_df, how="left") result_df.sort_values("sample_date_string", inplace=True) result_df.fillna(method="ffill", inplace=True) result_df.tail() #%% result_df[["new_cases", "covid_cases"]].plot() if columns_suffix: result_df.rename( columns={ "new_cases": "new_cases_" + columns_suffix, "covid_cases": "covid_cases_" + columns_suffix}, inplace=True) return result_df, source_regions_for_summary_df_ # + papermill={"duration": 0.962574, "end_time": "2021-05-20T23:08:17.422703", "exception": false, "start_time": "2021-05-20T23:08:16.460129", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] confirmed_eu_df, source_regions_for_summary_df = get_cases_dataframe( report_backend_client.source_regions_for_date) confirmed_es_df, _ = get_cases_dataframe( lambda date: [spain_region_country_code], columns_suffix=spain_region_country_code.lower()) # + [markdown] papermill={"duration": 0.11519, "end_time": "2021-05-20T23:08:17.619747", "exception": false, "start_time": "2021-05-20T23:08:17.504557", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[] # ### Extract API TEKs # + papermill={"duration": 79.196061, "end_time": "2021-05-20T23:09:36.881138", "exception": false, "start_time": "2021-05-20T23:08:17.685077", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] raw_zip_path_prefix = "Data/TEKs/Raw/" base_backend_identifiers = [report_backend_identifier] multi_backend_exposure_keys_df = \ exposure_notification_io.download_exposure_keys_from_backends( backend_identifiers=report_backend_identifiers, generation_days=backend_generation_days, fail_on_error_backend_identifiers=base_backend_identifiers, save_raw_zip_path_prefix=raw_zip_path_prefix) multi_backend_exposure_keys_df["region"] = multi_backend_exposure_keys_df["backend_identifier"] multi_backend_exposure_keys_df.rename( columns={ "generation_datetime": "sample_datetime", "generation_date_string": "sample_date_string", }, inplace=True) multi_backend_exposure_keys_df.head() # + papermill={"duration": 0.44887, "end_time": "2021-05-20T23:09:37.396170", "exception": false, "start_time": "2021-05-20T23:09:36.947300", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] early_teks_df = multi_backend_exposure_keys_df[ multi_backend_exposure_keys_df.rolling_period < 144].copy() early_teks_df["rolling_period_in_hours"] = early_teks_df.rolling_period / 6 early_teks_df[early_teks_df.sample_date_string != extraction_date] \ .rolling_period_in_hours.hist(bins=list(range(24))) # + papermill={"duration": 0.383434, "end_time": "2021-05-20T23:09:37.861585", "exception": false, "start_time": "2021-05-20T23:09:37.478151", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] early_teks_df[early_teks_df.sample_date_string == extraction_date] \ .rolling_period_in_hours.hist(bins=list(range(24))) # + papermill={"duration": 0.114575, "end_time": "2021-05-20T23:09:38.048047", "exception": false, "start_time": "2021-05-20T23:09:37.933472", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] multi_backend_exposure_keys_df = multi_backend_exposure_keys_df[[ "sample_date_string", "region", "key_data"]] multi_backend_exposure_keys_df.head() # + papermill={"duration": 4.704526, "end_time": "2021-05-20T23:09:42.819403", "exception": false, "start_time": "2021-05-20T23:09:38.114877", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] active_regions = \ multi_backend_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist() active_regions # + papermill={"duration": 4.762259, "end_time": "2021-05-20T23:09:47.663700", "exception": false, "start_time": "2021-05-20T23:09:42.901441", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] multi_backend_summary_df = multi_backend_exposure_keys_df.groupby( ["sample_date_string", "region"]).key_data.nunique().reset_index() \ .pivot(index="sample_date_string", columns="region") \ .sort_index(ascending=False) multi_backend_summary_df.rename( columns={"key_data": "shared_teks_by_generation_date"}, inplace=True) multi_backend_summary_df.rename_axis("sample_date", inplace=True) multi_backend_summary_df = multi_backend_summary_df.fillna(0).astype(int) multi_backend_summary_df = multi_backend_summary_df.head(backend_generation_days) multi_backend_summary_df.head() # + papermill={"duration": 5.666196, "end_time": "2021-05-20T23:09:53.402740", "exception": false, "start_time": "2021-05-20T23:09:47.736544", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] def compute_keys_cross_sharing(x): teks_x = x.key_data_x.item() common_teks = set(teks_x).intersection(x.key_data_y.item()) common_teks_fraction = len(common_teks) / len(teks_x) return pd.Series(dict( common_teks=common_teks, common_teks_fraction=common_teks_fraction, )) multi_backend_exposure_keys_by_region_df = \ multi_backend_exposure_keys_df.groupby("region").key_data.unique().reset_index() multi_backend_exposure_keys_by_region_df["_merge"] = True multi_backend_exposure_keys_by_region_combination_df = \ multi_backend_exposure_keys_by_region_df.merge( multi_backend_exposure_keys_by_region_df, on="_merge") multi_backend_exposure_keys_by_region_combination_df.drop( columns=["_merge"], inplace=True) if multi_backend_exposure_keys_by_region_combination_df.region_x.nunique() > 1: multi_backend_exposure_keys_by_region_combination_df = \ multi_backend_exposure_keys_by_region_combination_df[ multi_backend_exposure_keys_by_region_combination_df.region_x != multi_backend_exposure_keys_by_region_combination_df.region_y] multi_backend_exposure_keys_cross_sharing_df = \ multi_backend_exposure_keys_by_region_combination_df \ .groupby(["region_x", "region_y"]) \ .apply(compute_keys_cross_sharing) \ .reset_index() multi_backend_cross_sharing_summary_df = \ multi_backend_exposure_keys_cross_sharing_df.pivot_table( values=["common_teks_fraction"], columns="region_x", index="region_y", aggfunc=lambda x: x.item()) multi_backend_cross_sharing_summary_df # + papermill={"duration": 4.220993, "end_time": "2021-05-20T23:09:57.690030", "exception": false, "start_time": "2021-05-20T23:09:53.469037", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] multi_backend_without_active_region_exposure_keys_df = \ multi_backend_exposure_keys_df[multi_backend_exposure_keys_df.region != report_backend_identifier] multi_backend_without_active_region = \ multi_backend_without_active_region_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist() multi_backend_without_active_region # + papermill={"duration": 0.142111, "end_time": "2021-05-20T23:09:57.902209", "exception": false, "start_time": "2021-05-20T23:09:57.760098", "status": "completed"} tags=[] exposure_keys_summary_df = multi_backend_exposure_keys_df[ multi_backend_exposure_keys_df.region == report_backend_identifier] exposure_keys_summary_df.drop(columns=["region"], inplace=True) exposure_keys_summary_df = \ exposure_keys_summary_df.groupby(["sample_date_string"]).key_data.nunique().to_frame() exposure_keys_summary_df = \ exposure_keys_summary_df.reset_index().set_index("sample_date_string") exposure_keys_summary_df.sort_index(ascending=False, inplace=True) exposure_keys_summary_df.rename(columns={"key_data": "shared_teks_by_generation_date"}, inplace=True) exposure_keys_summary_df.head() # + [markdown] papermill={"duration": 0.122151, "end_time": "2021-05-20T23:09:58.106790", "exception": false, "start_time": "2021-05-20T23:09:57.984639", "status": "completed"} tags=[] # ### Dump API TEKs # + papermill={"duration": 1.806272, "end_time": "2021-05-20T23:10:00.003939", "exception": false, "start_time": "2021-05-20T23:09:58.197667", "status": "completed"} tags=[] tek_list_df = multi_backend_exposure_keys_df[ ["sample_date_string", "region", "key_data"]].copy() tek_list_df["key_data"] = tek_list_df["key_data"].apply(str) tek_list_df.rename(columns={ "sample_date_string": "sample_date", "key_data": "tek_list"}, inplace=True) tek_list_df = tek_list_df.groupby( ["sample_date", "region"]).tek_list.unique().reset_index() tek_list_df["extraction_date"] = extraction_date tek_list_df["extraction_date_with_hour"] = extraction_date_with_hour tek_list_path_prefix = "Data/TEKs/" tek_list_current_path = tek_list_path_prefix + f"/Current/RadarCOVID-TEKs.json" tek_list_daily_path = tek_list_path_prefix + f"Daily/RadarCOVID-TEKs-{extraction_date}.json" tek_list_hourly_path = tek_list_path_prefix + f"Hourly/RadarCOVID-TEKs-{extraction_date_with_hour}.json" for path in [tek_list_current_path, tek_list_daily_path, tek_list_hourly_path]: os.makedirs(os.path.dirname(path), exist_ok=True) tek_list_base_df = tek_list_df[tek_list_df.region == report_backend_identifier] tek_list_base_df.drop(columns=["extraction_date", "extraction_date_with_hour"]).to_json( tek_list_current_path, lines=True, orient="records") tek_list_base_df.drop(columns=["extraction_date_with_hour"]).to_json( tek_list_daily_path, lines=True, orient="records") tek_list_base_df.to_json( tek_list_hourly_path, lines=True, orient="records") tek_list_base_df.head() # + [markdown] papermill={"duration": 0.075671, "end_time": "2021-05-20T23:10:00.150518", "exception": false, "start_time": "2021-05-20T23:10:00.074847", "status": "completed"} tags=[] # ### Load TEK Dumps # + papermill={"duration": 0.087771, "end_time": "2021-05-20T23:10:00.313241", "exception": false, "start_time": "2021-05-20T23:10:00.225470", "status": "completed"} tags=[] import glob def load_extracted_teks(mode, region=None, limit=None) -> pd.DataFrame: extracted_teks_df = pd.DataFrame(columns=["region"]) file_paths = list(reversed(sorted(glob.glob(tek_list_path_prefix + mode + "/RadarCOVID-TEKs-*.json")))) if limit: file_paths = file_paths[:limit] for file_path in file_paths: logging.info(f"Loading TEKs from '{file_path}'...") iteration_extracted_teks_df = pd.read_json(file_path, lines=True) extracted_teks_df = extracted_teks_df.append( iteration_extracted_teks_df, sort=False) extracted_teks_df["region"] = \ extracted_teks_df.region.fillna(spain_region_country_code).copy() if region: extracted_teks_df = \ extracted_teks_df[extracted_teks_df.region == region] return extracted_teks_df # + papermill={"duration": 0.821221, "end_time": "2021-05-20T23:10:01.211490", "exception": false, "start_time": "2021-05-20T23:10:00.390269", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] daily_extracted_teks_df = load_extracted_teks( mode="Daily", region=report_backend_identifier, limit=tek_dumps_load_limit) daily_extracted_teks_df.head() # + papermill={"duration": 0.094343, "end_time": "2021-05-20T23:10:01.377534", "exception": false, "start_time": "2021-05-20T23:10:01.283191", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] exposure_keys_summary_df_ = daily_extracted_teks_df \ .sort_values("extraction_date", ascending=False) \ .groupby("sample_date").tek_list.first() \ .to_frame() exposure_keys_summary_df_.index.name = "sample_date_string" exposure_keys_summary_df_["tek_list"] = \ exposure_keys_summary_df_.tek_list.apply(len) exposure_keys_summary_df_ = exposure_keys_summary_df_ \ .rename(columns={"tek_list": "shared_teks_by_generation_date"}) \ .sort_index(ascending=False) exposure_keys_summary_df = exposure_keys_summary_df_ exposure_keys_summary_df.head() # + [markdown] papermill={"duration": 0.073987, "end_time": "2021-05-20T23:10:01.536073", "exception": false, "start_time": "2021-05-20T23:10:01.462086", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[] # ### Daily New TEKs # + papermill={"duration": 0.197462, "end_time": "2021-05-20T23:10:01.813627", "exception": false, "start_time": "2021-05-20T23:10:01.616165", "status": "completed"} tags=[] tek_list_df = daily_extracted_teks_df.groupby("extraction_date").tek_list.apply( lambda x: set(sum(x, []))).reset_index() tek_list_df = tek_list_df.set_index("extraction_date").sort_index(ascending=True) tek_list_df.head() # + papermill={"duration": 4.085636, "end_time": "2021-05-20T23:10:05.983430", "exception": false, "start_time": "2021-05-20T23:10:01.897794", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] def compute_teks_by_generation_and_upload_date(date): day_new_teks_set_df = tek_list_df.copy().diff() try: day_new_teks_set = day_new_teks_set_df[ day_new_teks_set_df.index == date].tek_list.item() except ValueError: day_new_teks_set = None if pd.isna(day_new_teks_set): day_new_teks_set = set() day_new_teks_df = daily_extracted_teks_df[ daily_extracted_teks_df.extraction_date == date].copy() day_new_teks_df["shared_teks"] = \ day_new_teks_df.tek_list.apply(lambda x: set(x).intersection(day_new_teks_set)) day_new_teks_df["shared_teks"] = \ day_new_teks_df.shared_teks.apply(len) day_new_teks_df["upload_date"] = date day_new_teks_df.rename(columns={"sample_date": "generation_date"}, inplace=True) day_new_teks_df = day_new_teks_df[ ["upload_date", "generation_date", "shared_teks"]] day_new_teks_df["generation_to_upload_days"] = \ (pd.to_datetime(day_new_teks_df.upload_date) - pd.to_datetime(day_new_teks_df.generation_date)).dt.days day_new_teks_df = day_new_teks_df[day_new_teks_df.shared_teks > 0] return day_new_teks_df shared_teks_generation_to_upload_df = pd.DataFrame() for upload_date in daily_extracted_teks_df.extraction_date.unique(): shared_teks_generation_to_upload_df = \ shared_teks_generation_to_upload_df.append( compute_teks_by_generation_and_upload_date(date=upload_date)) shared_teks_generation_to_upload_df \ .sort_values(["upload_date", "generation_date"], ascending=False, inplace=True) shared_teks_generation_to_upload_df.tail() # + papermill={"duration": 0.110766, "end_time": "2021-05-20T23:10:06.180503", "exception": false, "start_time": "2021-05-20T23:10:06.069737", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] today_new_teks_df = \ shared_teks_generation_to_upload_df[ shared_teks_generation_to_upload_df.upload_date == extraction_date].copy() today_new_teks_df.tail() # + papermill={"duration": 0.498493, "end_time": "2021-05-20T23:10:06.759794", "exception": false, "start_time": "2021-05-20T23:10:06.261301", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] if not today_new_teks_df.empty: today_new_teks_df.set_index("generation_to_upload_days") \ .sort_index().shared_teks.plot.bar() # + papermill={"duration": 0.121221, "end_time": "2021-05-20T23:10:06.965319", "exception": false, "start_time": "2021-05-20T23:10:06.844098", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] generation_to_upload_period_pivot_df = \ shared_teks_generation_to_upload_df[ ["upload_date", "generation_to_upload_days", "shared_teks"]] \ .pivot(index="upload_date", columns="generation_to_upload_days") \ .sort_index(ascending=False).fillna(0).astype(int) \ .droplevel(level=0, axis=1) generation_to_upload_period_pivot_df.head() # + papermill={"duration": 0.141413, "end_time": "2021-05-20T23:10:07.191481", "exception": false, "start_time": "2021-05-20T23:10:07.050068", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] new_tek_df = tek_list_df.diff().tek_list.apply( lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index() new_tek_df.rename(columns={ "tek_list": "shared_teks_by_upload_date", "extraction_date": "sample_date_string",}, inplace=True) new_tek_df.tail() # + papermill={"duration": 0.104592, "end_time": "2021-05-20T23:10:07.378564", "exception": false, "start_time": "2021-05-20T23:10:07.273972", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] shared_teks_uploaded_on_generation_date_df = shared_teks_generation_to_upload_df[ shared_teks_generation_to_upload_df.generation_to_upload_days == 0] \ [["upload_date", "shared_teks"]].rename( columns={ "upload_date": "sample_date_string", "shared_teks": "shared_teks_uploaded_on_generation_date", }) shared_teks_uploaded_on_generation_date_df.head() # + papermill={"duration": 0.12161, "end_time": "2021-05-20T23:10:07.611526", "exception": false, "start_time": "2021-05-20T23:10:07.489916", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] estimated_shared_diagnoses_df = shared_teks_generation_to_upload_df \ .groupby(["upload_date"]).shared_teks.max().reset_index() \ .sort_values(["upload_date"], ascending=False) \ .rename(columns={ "upload_date": "sample_date_string", "shared_teks": "shared_diagnoses", }) invalid_shared_diagnoses_dates_mask = \ estimated_shared_diagnoses_df.sample_date_string.isin(invalid_shared_diagnoses_dates) estimated_shared_diagnoses_df[invalid_shared_diagnoses_dates_mask] = 0 estimated_shared_diagnoses_df.head() # + [markdown] papermill={"duration": 0.096569, "end_time": "2021-05-20T23:10:07.795501", "exception": false, "start_time": "2021-05-20T23:10:07.698932", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[] # ### Hourly New TEKs # + papermill={"duration": 0.325354, "end_time": "2021-05-20T23:10:08.207907", "exception": false, "start_time": "2021-05-20T23:10:07.882553", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] hourly_extracted_teks_df = load_extracted_teks( mode="Hourly", region=report_backend_identifier, limit=25) hourly_extracted_teks_df.head() # + papermill={"duration": 0.139256, "end_time": "2021-05-20T23:10:08.426839", "exception": false, "start_time": "2021-05-20T23:10:08.287583", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] hourly_new_tek_count_df = hourly_extracted_teks_df \ .groupby("extraction_date_with_hour").tek_list. \ apply(lambda x: set(sum(x, []))).reset_index().copy() hourly_new_tek_count_df = hourly_new_tek_count_df.set_index("extraction_date_with_hour") \ .sort_index(ascending=True) hourly_new_tek_count_df["new_tek_list"] = hourly_new_tek_count_df.tek_list.diff() hourly_new_tek_count_df["new_tek_count"] = hourly_new_tek_count_df.new_tek_list.apply( lambda x: len(x) if not pd.isna(x) else 0) hourly_new_tek_count_df.rename(columns={ "new_tek_count": "shared_teks_by_upload_date"}, inplace=True) hourly_new_tek_count_df = hourly_new_tek_count_df.reset_index()[[ "extraction_date_with_hour", "shared_teks_by_upload_date"]] hourly_new_tek_count_df.head() # + papermill={"duration": 0.117075, "end_time": "2021-05-20T23:10:08.641190", "exception": false, "start_time": "2021-05-20T23:10:08.524115", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] hourly_summary_df = hourly_new_tek_count_df.copy() hourly_summary_df.set_index("extraction_date_with_hour", inplace=True) hourly_summary_df = hourly_summary_df.fillna(0).astype(int).reset_index() hourly_summary_df["datetime_utc"] = pd.to_datetime( hourly_summary_df.extraction_date_with_hour, format="%Y-%m-%d@%H") hourly_summary_df.set_index("datetime_utc", inplace=True) hourly_summary_df = hourly_summary_df.tail(-1) hourly_summary_df.head() # + [markdown] papermill={"duration": 0.08176, "end_time": "2021-05-20T23:10:08.805115", "exception": false, "start_time": "2021-05-20T23:10:08.723355", "status": "completed"} tags=[] # ### Official Statistics # + papermill={"duration": 0.20102, "end_time": "2021-05-20T23:10:09.096532", "exception": false, "start_time": "2021-05-20T23:10:08.895512", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] import requests import pandas.io.json official_stats_response = requests.get("https://radarcovid.covid19.gob.es/kpi/statistics/basics") official_stats_response.raise_for_status() official_stats_df_ = pandas.io.json.json_normalize(official_stats_response.json()) # + papermill={"duration": 0.124879, "end_time": "2021-05-20T23:10:09.315254", "exception": false, "start_time": "2021-05-20T23:10:09.190375", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] official_stats_df = official_stats_df_.copy() official_stats_df["date"] = pd.to_datetime(official_stats_df["date"], dayfirst=True) official_stats_df.head() # + papermill={"duration": 0.099108, "end_time": "2021-05-20T23:10:09.501908", "exception": false, "start_time": "2021-05-20T23:10:09.402800", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] official_stats_column_map = { "date": "sample_date", "applicationsDownloads.totalAcummulated": "app_downloads_es_accumulated", "communicatedContagions.totalAcummulated": "shared_diagnoses_es_accumulated", } accumulated_suffix = "_accumulated" accumulated_values_columns = \ list(filter(lambda x: x.endswith(accumulated_suffix), official_stats_column_map.values())) interpolated_values_columns = \ list(map(lambda x: x[:-len(accumulated_suffix)], accumulated_values_columns)) # + papermill={"duration": 0.130584, "end_time": "2021-05-20T23:10:09.721626", "exception": false, "start_time": "2021-05-20T23:10:09.591042", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] official_stats_df = \ official_stats_df[official_stats_column_map.keys()] \ .rename(columns=official_stats_column_map) official_stats_df["extraction_date"] = extraction_date official_stats_df.head() # + papermill={"duration": 0.118596, "end_time": "2021-05-20T23:10:09.933293", "exception": false, "start_time": "2021-05-20T23:10:09.814697", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] official_stats_path = "Data/Statistics/Current/RadarCOVID-Statistics.json" previous_official_stats_df = pd.read_json(official_stats_path, orient="records", lines=True) previous_official_stats_df["sample_date"] = pd.to_datetime(previous_official_stats_df["sample_date"], dayfirst=True) official_stats_df = official_stats_df.append(previous_official_stats_df) official_stats_df.head() # + papermill={"duration": 0.107136, "end_time": "2021-05-20T23:10:10.125442", "exception": false, "start_time": "2021-05-20T23:10:10.018306", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] official_stats_df = official_stats_df[~(official_stats_df.shared_diagnoses_es_accumulated == 0)] official_stats_df.sort_values("extraction_date", ascending=False, inplace=True) official_stats_df.drop_duplicates(subset=["sample_date"], keep="first", inplace=True) official_stats_df.head() # + papermill={"duration": 0.130141, "end_time": "2021-05-20T23:10:10.340821", "exception": false, "start_time": "2021-05-20T23:10:10.210680", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] official_stats_stored_df = official_stats_df.copy() official_stats_stored_df["sample_date"] = official_stats_stored_df.sample_date.dt.strftime("%Y-%m-%d") official_stats_stored_df.to_json(official_stats_path, orient="records", lines=True) # + papermill={"duration": 0.125956, "end_time": "2021-05-20T23:10:10.555680", "exception": false, "start_time": "2021-05-20T23:10:10.429724", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] official_stats_df.drop(columns=["extraction_date"], inplace=True) official_stats_df = confirmed_days_df.merge(official_stats_df, how="left") official_stats_df.sort_values("sample_date", ascending=False, inplace=True) official_stats_df.head() # + papermill={"duration": 0.113393, "end_time": "2021-05-20T23:10:10.756062", "exception": false, "start_time": "2021-05-20T23:10:10.642669", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] official_stats_df[accumulated_values_columns] = \ official_stats_df[accumulated_values_columns] \ .astype(float).interpolate(limit_area="inside") official_stats_df[interpolated_values_columns] = \ official_stats_df[accumulated_values_columns].diff(periods=-1) official_stats_df.drop(columns="sample_date", inplace=True) official_stats_df.head() # + [markdown] papermill={"duration": 0.087432, "end_time": "2021-05-20T23:10:10.932438", "exception": false, "start_time": "2021-05-20T23:10:10.845006", "status": "completed"} tags=[] # ### Data Merge # + papermill={"duration": 0.113252, "end_time": "2021-05-20T23:10:11.131383", "exception": false, "start_time": "2021-05-20T23:10:11.018131", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] result_summary_df = exposure_keys_summary_df.merge( new_tek_df, on=["sample_date_string"], how="outer") result_summary_df.head() # + papermill={"duration": 0.124758, "end_time": "2021-05-20T23:10:11.344365", "exception": false, "start_time": "2021-05-20T23:10:11.219607", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] result_summary_df = result_summary_df.merge( shared_teks_uploaded_on_generation_date_df, on=["sample_date_string"], how="outer") result_summary_df.head() # + papermill={"duration": 0.121505, "end_time": "2021-05-20T23:10:11.551705", "exception": false, "start_time": "2021-05-20T23:10:11.430200", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] result_summary_df = result_summary_df.merge( estimated_shared_diagnoses_df, on=["sample_date_string"], how="outer") result_summary_df.head() # + papermill={"duration": 0.114049, "end_time": "2021-05-20T23:10:11.755534", "exception": false, "start_time": "2021-05-20T23:10:11.641485", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] result_summary_df = result_summary_df.merge( official_stats_df, on=["sample_date_string"], how="outer") result_summary_df.head() # + papermill={"duration": 0.116053, "end_time": "2021-05-20T23:10:11.957443", "exception": false, "start_time": "2021-05-20T23:10:11.841390", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] result_summary_df = confirmed_eu_df.tail(daily_summary_days).merge( result_summary_df, on=["sample_date_string"], how="left") result_summary_df.head() # + papermill={"duration": 0.140608, "end_time": "2021-05-20T23:10:12.185593", "exception": false, "start_time": "2021-05-20T23:10:12.044985", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] result_summary_df = confirmed_es_df.tail(daily_summary_days).merge( result_summary_df, on=["sample_date_string"], how="left") result_summary_df.head() # + papermill={"duration": 0.125437, "end_time": "2021-05-20T23:10:12.403310", "exception": false, "start_time": "2021-05-20T23:10:12.277873", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] result_summary_df["sample_date"] = pd.to_datetime(result_summary_df.sample_date_string) result_summary_df = result_summary_df.merge(source_regions_for_summary_df, how="left") result_summary_df.set_index(["sample_date", "source_regions"], inplace=True) result_summary_df.drop(columns=["sample_date_string"], inplace=True) result_summary_df.sort_index(ascending=False, inplace=True) result_summary_df.head() # + papermill={"duration": 0.14692, "end_time": "2021-05-20T23:10:12.639206", "exception": false, "start_time": "2021-05-20T23:10:12.492286", "status": "completed"} tags=[] with pd.option_context("mode.use_inf_as_na", True): result_summary_df = result_summary_df.fillna(0).astype(int) result_summary_df["teks_per_shared_diagnosis"] = \ (result_summary_df.shared_teks_by_upload_date / result_summary_df.shared_diagnoses).fillna(0) result_summary_df["shared_diagnoses_per_covid_case"] = \ (result_summary_df.shared_diagnoses / result_summary_df.covid_cases).fillna(0) result_summary_df["shared_diagnoses_per_covid_case_es"] = \ (result_summary_df.shared_diagnoses_es / result_summary_df.covid_cases_es).fillna(0) result_summary_df.head(daily_plot_days) # + papermill={"duration": 0.107274, "end_time": "2021-05-20T23:10:12.854087", "exception": false, "start_time": "2021-05-20T23:10:12.746813", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] def compute_aggregated_results_summary(days) -> pd.DataFrame: aggregated_result_summary_df = result_summary_df.copy() aggregated_result_summary_df["covid_cases_for_ratio"] = \ aggregated_result_summary_df.covid_cases.mask( aggregated_result_summary_df.shared_diagnoses == 0, 0) aggregated_result_summary_df["covid_cases_for_ratio_es"] = \ aggregated_result_summary_df.covid_cases_es.mask( aggregated_result_summary_df.shared_diagnoses_es == 0, 0) aggregated_result_summary_df = aggregated_result_summary_df \ .sort_index(ascending=True).fillna(0).rolling(days).agg({ "covid_cases": "sum", "covid_cases_es": "sum", "covid_cases_for_ratio": "sum", "covid_cases_for_ratio_es": "sum", "shared_teks_by_generation_date": "sum", "shared_teks_by_upload_date": "sum", "shared_diagnoses": "sum", "shared_diagnoses_es": "sum", }).sort_index(ascending=False) with pd.option_context("mode.use_inf_as_na", True): aggregated_result_summary_df = aggregated_result_summary_df.fillna(0).astype(int) aggregated_result_summary_df["teks_per_shared_diagnosis"] = \ (aggregated_result_summary_df.shared_teks_by_upload_date / aggregated_result_summary_df.covid_cases_for_ratio).fillna(0) aggregated_result_summary_df["shared_diagnoses_per_covid_case"] = \ (aggregated_result_summary_df.shared_diagnoses / aggregated_result_summary_df.covid_cases_for_ratio).fillna(0) aggregated_result_summary_df["shared_diagnoses_per_covid_case_es"] = \ (aggregated_result_summary_df.shared_diagnoses_es / aggregated_result_summary_df.covid_cases_for_ratio_es).fillna(0) return aggregated_result_summary_df # + papermill={"duration": 0.131828, "end_time": "2021-05-20T23:10:13.080187", "exception": false, "start_time": "2021-05-20T23:10:12.948359", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] aggregated_result_with_7_days_window_summary_df = compute_aggregated_results_summary(days=7) aggregated_result_with_7_days_window_summary_df.head() # + papermill={"duration": 0.101689, "end_time": "2021-05-20T23:10:13.271366", "exception": false, "start_time": "2021-05-20T23:10:13.169677", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] last_7_days_summary = aggregated_result_with_7_days_window_summary_df.to_dict(orient="records")[1] last_7_days_summary # + papermill={"duration": 0.122076, "end_time": "2021-05-20T23:10:13.483441", "exception": false, "start_time": "2021-05-20T23:10:13.361365", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] aggregated_result_with_14_days_window_summary_df = compute_aggregated_results_summary(days=13) last_14_days_summary = aggregated_result_with_14_days_window_summary_df.to_dict(orient="records")[1] last_14_days_summary # + [markdown] papermill={"duration": 0.093555, "end_time": "2021-05-20T23:10:13.672013", "exception": false, "start_time": "2021-05-20T23:10:13.578458", "status": "completed"} tags=[] # ## Report Results # + papermill={"duration": 0.098807, "end_time": "2021-05-20T23:10:13.872129", "exception": false, "start_time": "2021-05-20T23:10:13.773322", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] display_column_name_mapping = { "sample_date": "Sample\u00A0Date\u00A0(UTC)", "source_regions": "Source Countries", "datetime_utc": "Timestamp (UTC)", "upload_date": "Upload Date (UTC)", "generation_to_upload_days": "Generation to Upload Period in Days", "region": "Backend", "region_x": "Backend\u00A0(A)", "region_y": "Backend\u00A0(B)", "common_teks": "Common TEKs Shared Between Backends", "common_teks_fraction": "Fraction of TEKs in Backend (A) Available in Backend (B)", "covid_cases": "COVID-19 Cases (Source Countries)", "shared_teks_by_generation_date": "Shared TEKs by Generation Date (Source Countries)", "shared_teks_by_upload_date": "Shared TEKs by Upload Date (Source Countries)", "shared_teks_uploaded_on_generation_date": "Shared TEKs Uploaded on Generation Date (Source Countries)", "shared_diagnoses": "Shared Diagnoses (Source Countries – Estimation)", "teks_per_shared_diagnosis": "TEKs Uploaded per Shared Diagnosis (Source Countries)", "shared_diagnoses_per_covid_case": "Usage Ratio (Source Countries)", "covid_cases_es": "COVID-19 Cases (Spain)", "app_downloads_es": "App Downloads (Spain – Official)", "shared_diagnoses_es": "Shared Diagnoses (Spain – Official)", "shared_diagnoses_per_covid_case_es": "Usage Ratio (Spain)", } # + papermill={"duration": 0.0999, "end_time": "2021-05-20T23:10:14.064818", "exception": false, "start_time": "2021-05-20T23:10:13.964918", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] summary_columns = [ "covid_cases", "shared_teks_by_generation_date", "shared_teks_by_upload_date", "shared_teks_uploaded_on_generation_date", "shared_diagnoses", "teks_per_shared_diagnosis", "shared_diagnoses_per_covid_case", "covid_cases_es", "app_downloads_es", "shared_diagnoses_es", "shared_diagnoses_per_covid_case_es", ] summary_percentage_columns= [ "shared_diagnoses_per_covid_case_es", "shared_diagnoses_per_covid_case", ] # + [markdown] papermill={"duration": 0.099777, "end_time": "2021-05-20T23:10:14.284426", "exception": false, "start_time": "2021-05-20T23:10:14.184649", "status": "completed"} tags=[] # ### Daily Summary Table # + papermill={"duration": 0.138499, "end_time": "2021-05-20T23:10:14.511904", "exception": false, "start_time": "2021-05-20T23:10:14.373405", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] result_summary_df_ = result_summary_df.copy() result_summary_df = result_summary_df[summary_columns] result_summary_with_display_names_df = result_summary_df \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) result_summary_with_display_names_df # + [markdown] papermill={"duration": 0.09304, "end_time": "2021-05-20T23:10:14.694054", "exception": false, "start_time": "2021-05-20T23:10:14.601014", "status": "completed"} tags=[] # ### Daily Summary Plots # + papermill={"duration": 4.447655, "end_time": "2021-05-20T23:10:19.254506", "exception": false, "start_time": "2021-05-20T23:10:14.806851", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] result_plot_summary_df = result_summary_df.head(daily_plot_days)[summary_columns] \ .droplevel(level=["source_regions"]) \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) summary_ax_list = result_plot_summary_df.sort_index(ascending=True).plot.bar( title=f"Daily Summary", rot=45, subplots=True, figsize=(15, 30), legend=False) ax_ = summary_ax_list[0] ax_.get_figure().tight_layout() ax_.get_figure().subplots_adjust(top=0.95) _ = ax_.set_xticklabels(sorted(result_plot_summary_df.index.strftime("%Y-%m-%d").tolist())) for percentage_column in summary_percentage_columns: percentage_column_index = summary_columns.index(percentage_column) summary_ax_list[percentage_column_index].yaxis \ .set_major_formatter(matplotlib.ticker.PercentFormatter(1.0)) # + [markdown] papermill={"duration": 0.101836, "end_time": "2021-05-20T23:10:19.459907", "exception": false, "start_time": "2021-05-20T23:10:19.358071", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[] # ### Daily Generation to Upload Period Table # + papermill={"duration": 0.1284, "end_time": "2021-05-20T23:10:19.701460", "exception": false, "start_time": "2021-05-20T23:10:19.573060", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] display_generation_to_upload_period_pivot_df = \ generation_to_upload_period_pivot_df \ .head(backend_generation_days) display_generation_to_upload_period_pivot_df \ .head(backend_generation_days) \ .rename_axis(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) # + papermill={"duration": 2.086899, "end_time": "2021-05-20T23:10:21.897537", "exception": false, "start_time": "2021-05-20T23:10:19.810638", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] fig, generation_to_upload_period_pivot_table_ax = plt.subplots( figsize=(12, 1 + 0.6 * len(display_generation_to_upload_period_pivot_df))) generation_to_upload_period_pivot_table_ax.set_title( "Shared TEKs Generation to Upload Period Table") sns.heatmap( data=display_generation_to_upload_period_pivot_df .rename_axis(columns=display_column_name_mapping) .rename_axis(index=display_column_name_mapping), fmt=".0f", annot=True, ax=generation_to_upload_period_pivot_table_ax) generation_to_upload_period_pivot_table_ax.get_figure().tight_layout() # + [markdown] papermill={"duration": 0.11922, "end_time": "2021-05-20T23:10:22.125934", "exception": false, "start_time": "2021-05-20T23:10:22.006714", "status": "completed"} tags=[] # ### Hourly Summary Plots # + papermill={"duration": 0.744778, "end_time": "2021-05-20T23:10:22.984651", "exception": false, "start_time": "2021-05-20T23:10:22.239873", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] hourly_summary_ax_list = hourly_summary_df \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .plot.bar( title=f"Last 24h Summary", rot=45, subplots=True, legend=False) ax_ = hourly_summary_ax_list[-1] ax_.get_figure().tight_layout() ax_.get_figure().subplots_adjust(top=0.9) _ = ax_.set_xticklabels(sorted(hourly_summary_df.index.strftime("%Y-%m-%d@%H").tolist())) # + [markdown] papermill={"duration": 0.121052, "end_time": "2021-05-20T23:10:23.228972", "exception": false, "start_time": "2021-05-20T23:10:23.107920", "status": "completed"} tags=[] # ### Publish Results # + papermill={"duration": 0.152711, "end_time": "2021-05-20T23:10:23.488233", "exception": false, "start_time": "2021-05-20T23:10:23.335522", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] github_repository = os.environ.get("GITHUB_REPOSITORY") if github_repository is None: github_repository = "pvieito/Radar-STATS" github_project_base_url = "https://github.com/" + github_repository display_formatters = { display_column_name_mapping["teks_per_shared_diagnosis"]: lambda x: f"{x:.2f}" if x != 0 else "", display_column_name_mapping["shared_diagnoses_per_covid_case"]: lambda x: f"{x:.2%}" if x != 0 else "", display_column_name_mapping["shared_diagnoses_per_covid_case_es"]: lambda x: f"{x:.2%}" if x != 0 else "", } general_columns = \ list(filter(lambda x: x not in display_formatters, display_column_name_mapping.values())) general_formatter = lambda x: f"{x}" if x != 0 else "" display_formatters.update(dict(map(lambda x: (x, general_formatter), general_columns))) daily_summary_table_html = result_summary_with_display_names_df \ .head(daily_plot_days) \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .to_html(formatters=display_formatters) multi_backend_summary_table_html = multi_backend_summary_df \ .head(daily_plot_days) \ .rename_axis(columns=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) \ .to_html(formatters=display_formatters) def format_multi_backend_cross_sharing_fraction(x): if pd.isna(x): return "-" elif round(x * 100, 1) == 0: return "" else: return f"{x:.1%}" multi_backend_cross_sharing_summary_table_html = multi_backend_cross_sharing_summary_df \ .rename_axis(columns=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) \ .to_html( classes="table-center", formatters=display_formatters, float_format=format_multi_backend_cross_sharing_fraction) multi_backend_cross_sharing_summary_table_html = \ multi_backend_cross_sharing_summary_table_html \ .replace("<tr>","<tr style=\"text-align: center;\">") extraction_date_result_summary_df = \ result_summary_df[result_summary_df.index.get_level_values("sample_date") == extraction_date] extraction_date_result_hourly_summary_df = \ hourly_summary_df[hourly_summary_df.extraction_date_with_hour == extraction_date_with_hour] covid_cases = \ extraction_date_result_summary_df.covid_cases.item() shared_teks_by_generation_date = \ extraction_date_result_summary_df.shared_teks_by_generation_date.item() shared_teks_by_upload_date = \ extraction_date_result_summary_df.shared_teks_by_upload_date.item() shared_diagnoses = \ extraction_date_result_summary_df.shared_diagnoses.item() teks_per_shared_diagnosis = \ extraction_date_result_summary_df.teks_per_shared_diagnosis.item() shared_diagnoses_per_covid_case = \ extraction_date_result_summary_df.shared_diagnoses_per_covid_case.item() shared_teks_by_upload_date_last_hour = \ extraction_date_result_hourly_summary_df.shared_teks_by_upload_date.sum().astype(int) display_source_regions = ", ".join(report_source_regions) if len(report_source_regions) == 1: display_brief_source_regions = report_source_regions[0] else: display_brief_source_regions = f"{len(report_source_regions)} 🇪🇺" # + papermill={"duration": 0.114538, "end_time": "2021-05-20T23:10:23.716372", "exception": false, "start_time": "2021-05-20T23:10:23.601834", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] def get_temporary_image_path() -> str: return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) + ".png") def save_temporary_plot_image(ax): if isinstance(ax, np.ndarray): ax = ax[0] media_path = get_temporary_image_path() ax.get_figure().savefig(media_path) return media_path def save_temporary_dataframe_image(df): import dataframe_image as dfi df = df.copy() df_styler = df.style.format(display_formatters) media_path = get_temporary_image_path() dfi.export(df_styler, media_path) return media_path # + papermill={"duration": 9.154534, "end_time": "2021-05-20T23:10:32.977481", "exception": false, "start_time": "2021-05-20T23:10:23.822947", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] summary_plots_image_path = save_temporary_plot_image( ax=summary_ax_list) summary_table_image_path = save_temporary_dataframe_image( df=result_summary_with_display_names_df) hourly_summary_plots_image_path = save_temporary_plot_image( ax=hourly_summary_ax_list) multi_backend_summary_table_image_path = save_temporary_dataframe_image( df=multi_backend_summary_df) generation_to_upload_period_pivot_table_image_path = save_temporary_plot_image( ax=generation_to_upload_period_pivot_table_ax) # + [markdown] papermill={"duration": 0.114683, "end_time": "2021-05-20T23:10:33.223680", "exception": false, "start_time": "2021-05-20T23:10:33.108997", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[] # ### Save Results # + papermill={"duration": 0.157877, "end_time": "2021-05-20T23:10:33.493472", "exception": false, "start_time": "2021-05-20T23:10:33.335595", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] report_resources_path_prefix = "Data/Resources/Current/RadarCOVID-Report-" result_summary_df.to_csv( report_resources_path_prefix + "Summary-Table.csv") result_summary_df.to_html( report_resources_path_prefix + "Summary-Table.html") hourly_summary_df.to_csv( report_resources_path_prefix + "Hourly-Summary-Table.csv") multi_backend_summary_df.to_csv( report_resources_path_prefix + "Multi-Backend-Summary-Table.csv") multi_backend_cross_sharing_summary_df.to_csv( report_resources_path_prefix + "Multi-Backend-Cross-Sharing-Summary-Table.csv") generation_to_upload_period_pivot_df.to_csv( report_resources_path_prefix + "Generation-Upload-Period-Table.csv") _ = shutil.copyfile( summary_plots_image_path, report_resources_path_prefix + "Summary-Plots.png") _ = shutil.copyfile( summary_table_image_path, report_resources_path_prefix + "Summary-Table.png") _ = shutil.copyfile( hourly_summary_plots_image_path, report_resources_path_prefix + "Hourly-Summary-Plots.png") _ = shutil.copyfile( multi_backend_summary_table_image_path, report_resources_path_prefix + "Multi-Backend-Summary-Table.png") _ = shutil.copyfile( generation_to_upload_period_pivot_table_image_path, report_resources_path_prefix + "Generation-Upload-Period-Table.png") # + [markdown] papermill={"duration": 0.124274, "end_time": "2021-05-20T23:10:33.736772", "exception": false, "start_time": "2021-05-20T23:10:33.612498", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[] # ### Publish Results as JSON # + papermill={"duration": 0.147043, "end_time": "2021-05-20T23:10:34.018475", "exception": false, "start_time": "2021-05-20T23:10:33.871432", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] def generate_summary_api_results(df: pd.DataFrame) -> list: api_df = df.reset_index().copy() api_df["sample_date_string"] = \ api_df["sample_date"].dt.strftime("%Y-%m-%d") api_df["source_regions"] = \ api_df["source_regions"].apply(lambda x: x.split(",")) return api_df.to_dict(orient="records") summary_api_results = \ generate_summary_api_results(df=result_summary_df) today_summary_api_results = \ generate_summary_api_results(df=extraction_date_result_summary_df)[0] summary_results = dict( backend_identifier=report_backend_identifier, source_regions=report_source_regions, extraction_datetime=extraction_datetime, extraction_date=extraction_date, extraction_date_with_hour=extraction_date_with_hour, last_hour=dict( shared_teks_by_upload_date=shared_teks_by_upload_date_last_hour, shared_diagnoses=0, ), today=today_summary_api_results, last_7_days=last_7_days_summary, last_14_days=last_14_days_summary, daily_results=summary_api_results) summary_results = \ json.loads(pd.Series([summary_results]).to_json(orient="records"))[0] with open(report_resources_path_prefix + "Summary-Results.json", "w") as f: json.dump(summary_results, f, indent=4) # + [markdown] papermill={"duration": 0.141583, "end_time": "2021-05-20T23:10:34.286697", "exception": false, "start_time": "2021-05-20T23:10:34.145114", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[] # ### Publish on README # + papermill={"duration": 0.121161, "end_time": "2021-05-20T23:10:34.516637", "exception": false, "start_time": "2021-05-20T23:10:34.395476", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] with open("Data/Templates/README.md", "r") as f: readme_contents = f.read() readme_contents = readme_contents.format( extraction_date_with_hour=extraction_date_with_hour, github_project_base_url=github_project_base_url, daily_summary_table_html=daily_summary_table_html, multi_backend_summary_table_html=multi_backend_summary_table_html, multi_backend_cross_sharing_summary_table_html=multi_backend_cross_sharing_summary_table_html, display_source_regions=display_source_regions) with open("README.md", "w") as f: f.write(readme_contents) # + [markdown] papermill={"duration": 0.125918, "end_time": "2021-05-20T23:10:34.760698", "exception": false, "start_time": "2021-05-20T23:10:34.634780", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[] # ### Publish on Twitter # + papermill={"duration": 6.180425, "end_time": "2021-05-20T23:10:41.071524", "exception": false, "start_time": "2021-05-20T23:10:34.891099", "status": "completed"} pycharm={"name": "#%%\n"} tags=[] enable_share_to_twitter = os.environ.get("RADARCOVID_REPORT__ENABLE_PUBLISH_ON_TWITTER") github_event_name = os.environ.get("GITHUB_EVENT_NAME") if enable_share_to_twitter and github_event_name == "schedule" and \ (shared_teks_by_upload_date_last_hour or not are_today_results_partial): import tweepy twitter_api_auth_keys = os.environ["RADARCOVID_REPORT__TWITTER_API_AUTH_KEYS"] twitter_api_auth_keys = twitter_api_auth_keys.split(":") auth = tweepy.OAuthHandler(twitter_api_auth_keys[0], twitter_api_auth_keys[1]) auth.set_access_token(twitter_api_auth_keys[2], twitter_api_auth_keys[3]) api = tweepy.API(auth) summary_plots_media = api.media_upload(summary_plots_image_path) summary_table_media = api.media_upload(summary_table_image_path) generation_to_upload_period_pivot_table_image_media = api.media_upload(generation_to_upload_period_pivot_table_image_path) media_ids = [ summary_plots_media.media_id, summary_table_media.media_id, generation_to_upload_period_pivot_table_image_media.media_id, ] if are_today_results_partial: today_addendum = " (Partial)" else: today_addendum = "" def format_shared_diagnoses_per_covid_case(value) -> str: if value == 0: return "–" return f"≤{value:.2%}" display_shared_diagnoses_per_covid_case = \ format_shared_diagnoses_per_covid_case(value=shared_diagnoses_per_covid_case) display_last_14_days_shared_diagnoses_per_covid_case = \ format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case"]) display_last_14_days_shared_diagnoses_per_covid_case_es = \ format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case_es"]) status = textwrap.dedent(f""" #RadarCOVID – {extraction_date_with_hour} Today{today_addendum}: - Uploaded TEKs: {shared_teks_by_upload_date:.0f} ({shared_teks_by_upload_date_last_hour:+d} last hour) - Shared Diagnoses: ≤{shared_diagnoses:.0f} - Usage Ratio: {display_shared_diagnoses_per_covid_case} Last 14 Days: - Usage Ratio (Estimation): {display_last_14_days_shared_diagnoses_per_covid_case} - Usage Ratio (Official): {display_last_14_days_shared_diagnoses_per_covid_case_es} Info: {github_project_base_url}#documentation """) status = status.encode(encoding="utf-8") api.update_status(status=status, media_ids=media_ids)
Notebooks/RadarCOVID-Report/Daily/RadarCOVID-Report-2021-05-20.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.9 64-bit (''algae'': conda)' # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/alexlib/algae_population_model/blob/master/notebooks/figure1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="LkzNaT7uMISw" # ## Grazing # # Plot 3 # # a) grazing - $\sigma$ constant Y as a function of grazing 0-30 # # b) mechanical destruction young 0 old 50%. # + id="NhlwmgZhxo9o" import matplotlib.pyplot as plt from scipy.integrate import solve_ivp import numpy as np from algae_population import * np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)}) # - import pickle data = pickle.load(open('figure3.p','rb')) sigmas = [0., 0.1, 0.2, 0.3, 0.4, 0.5] # <- copy from figure3.ipynb fig, ax = plt.subplots(len(sigmas),1,figsize=(4,20)) for j in range(len(sigmas)): for i in range(len(scenarios)): # print(j,i) # print(data[j][i]['s'][0]) ind = np.where(data[j][i].t <= 120)[0][-1] ax[j].plot(data[j][i].t[:ind], data[j][i].y[:-1, :ind].sum(axis=0).T, label=data[j][i]['s'][0]) ax[j].set_title(f'$\sigma$ = {sigmas[j]}') plt.show() data = pickle.load(open('figure3.p','rb')) import numpy as np # + # sigmas = [0., 0.1, 0.2, 0.3, 0.4, 0.5] settling_time = np.zeros((len(sigmas),len(scenarios))) fig, ax = plt.subplots(len(sigmas),1,figsize=(4,20)) for j in range(len(sigmas)): for i in range(len(scenarios)): # print(j,i) # print(data[j][i]['s'][0]) ind = np.where(data[j][i].t <= 120)[0][-1] biomass = data[j][i].y[:-1, :ind].sum(axis=0).T revenue = biomass - 0.2 settling = np.argmax(revenue >= 0.9*K) settling_time[j][i] = settling ax[j].plot(data[j][i].t[:ind], revenue, label=data[j][i]['s'][0]) ax[j].plot(data[j][i].t[settling], revenue[settling],'o') ax[j].set_title(f'$\gamma_I$ = {sigmas[j]}') plt.show() # - fig, ax = plt.subplots(figsize=(8,6)) settling_time[settling_time == 0] = np.nan hmap = ax.pcolor(settling_time, shading='auto') cbar = plt.colorbar(hmap) ax.set_ylim([0,4]) ax.set_xlim([0,10]) cbar.set_label('days',fontsize=16)#, rotation=270) plt.xticks(ticks=np.arange(11)+0.5,labels=scenarios.keys()) plt.yticks(ticks=np.arange(len(sigmas[:-1]))+0.5,labels=np.round(sigmas[:-1],2)) plt.xlabel(r'Initial age distribution',fontsize=16) plt.ylabel(r'$\sigma$',fontsize=16); # + import seaborn as sns grid_kws = {"height_ratios": (.9, .05), "hspace": .5} f, (ax, cbar_ax) = plt.subplots(2, gridspec_kw=grid_kws) # fig, ax = plt.subplots(figsize=(8,6)) settling_time[settling_time == 0] = np.nan # hmap = ax.pcolor(settling_time, shading='auto') hmap = sns.heatmap(settling_time,ax=ax, cbar_ax=cbar_ax, cbar_kws={"orientation": "horizontal"}, center=25, cmap="YlGnBu", annot=True, fmt=".1f") # cbar = plt.colorbar(hmap) ax.set_ylim([0,4]) ax.set_xlim([0,10]) # bar_ax.set_label('days',fontsize=16)#, rotation=270) ax.set_xticks(ticks=np.arange(11)+0.5,labels=scenarios.keys()) ax.set_yticks(ticks=np.arange(len(sigmas[:-1]))+0.5,labels=np.round(sigmas[:-1],2)) ax.set_xlabel(r'Initial age distribution',fontsize=14) ax.set_ylabel(r'$\sigma$',fontsize=14); # ax.grid('on',color='white',linestyle=':') plt.text(0.1,0.1,'a)',color='black',fontsize=12);
old_figure3_create_plot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="m1AebNH3puZx" import tensorflow as tf # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="zRI6Gaoipy8o" outputId="19650919-9d2d-42f4-df43-aecaec56780f" tf.__version__ # + colab={"base_uri": "https://localhost:8080/"} id="omwAP5DXrZL2" outputId="ee1fceff-2520-4c5f-b349-41e64b1b01cc" x_data = [[0,0],[1,0],[0,1],[1,1]] y_data = [[0],[1],[1],[1]] type(x_data),type(y_data) # + colab={"base_uri": "https://localhost:8080/"} id="Den4m1pSrkFa" outputId="a2718c36-2bf0-46de-9ab2-f80c7f418d7d" import numpy as np x_train = np.array(x_data) y_train = np.array(y_data) x_train.shape , y_train.shape # + colab={"base_uri": "https://localhost:8080/"} id="i06SsZSAwPsG" outputId="c8672ee7-d9c2-4526-98f0-9c25f0f5c93d" model = tf.keras.models.Sequential() model.add(tf.keras.Input(shape=(2,))) model.add(tf.keras.layers.Dense(1)) model.compile(optimizer='sgd', loss='mse') # + colab={"base_uri": "https://localhost:8080/", "height": 201} id="5nDeDi3REQF_" outputId="c5633036-a0d3-4b3b-cfbf-7ee400617604" tf.keras.utils.plot_model(model, show_shapes=True) # + id="wZ-yfxnzyZUB" model.fit(x_train, y_train, epochs=500) # + colab={"base_uri": "https://localhost:8080/"} id="1JS2M4h2GUAs" outputId="48579d8d-d3d9-46bd-ac91-5d5eea6ad287" model.predict([[0,1]]) # + colab={"base_uri": "https://localhost:8080/"} id="aLsiZQvJXt_G" outputId="e86317ff-af18-4d40-e6b7-9769a3d8cbe4" model.predict([[0,1]]) # + colab={"base_uri": "https://localhost:8080/"} id="Ka8yTFZmXvLH" outputId="a50f0e3d-c527-4158-8e7c-d9ac8f50f444" model.predict([[0,1]]) # + colab={"base_uri": "https://localhost:8080/"} id="kjHVrakADtRS" outputId="204ceaac-736f-4cc7-c480-546d353daf05" model.get_weights() # + id="m48dRbYtMo_2"
single_perceptron.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:lb_main_analysis] # language: python # name: conda-env-lb_main_analysis-py # --- # + # %matplotlib inline import matplotlib.pyplot as plt plt.style.use('ggplot') # Eventually, for Anaconda warnings. # Can be commented out. import warnings warnings.filterwarnings("ignore") # %matplotlib inline # %load_ext autoreload # %autoreload 2 # - # Load basic libraries import seaborn; seaborn.set() from itertools import chain from collections import OrderedDict import pickle, copy, random random.seed(100) import numpy as np import scipy.stats data = pickle.load(open("data.p","rb")) # check data print(len(data)) print(len([x for x,y in data.items() if y["y"]==0])) print(len([x for x,y in data.items() if y["y"]==1])) data['58e4ce01fe7683152b59e56e'] from supporting_functions import cleanup data_docs = [cleanup(x["surface"]) for x in data.values() if x["y"]==1] data_targets = [x["asve"] for x in data.values() if x["y"]==1] data_docs[11] data_targets[11] # cleanup targets # calculate a dict of occurrences # replace infrequent observations but if possibly consolidate to a higher level freq_dict = {x:len([z for z in data_targets if z==x]) for x in data_targets} freq_dict = OrderedDict(sorted(freq_dict.items(),key=lambda x:x[1])) threshold = 5 discard_list = list() consolidate_dict = {x:x for x in freq_dict.keys()} for x,y in freq_dict.items(): if y < threshold: if len(x.split(".")) == 1: discard_list.append(x) else: for n in range(1,len(x.split("."))): new_id = ".".join(x.split(".")[:-n]) if new_id in freq_dict.keys() and freq_dict[new_id] >= threshold: consolidate_dict[x] = new_id break # consolidate data_docs = [cleanup(x["surface"]) for x in data.values() if x["y"]==1 and not x["asve"] in discard_list] data_targets = [consolidate_dict[x["asve"]] for x in data.values() if x["y"]==1 and not x["asve"] in discard_list] # train test from sklearn.model_selection import train_test_split # TRAIN/TEST X_train, X_test, y_train, y_test = train_test_split(data_docs, data_targets, test_size=0.25) # baseline model from sklearn.feature_extraction.text import CountVectorizer,TfidfTransformer from sklearn.naive_bayes import MultinomialNB from sklearn.linear_model import SGDClassifier from sklearn.svm import LinearSVC from sklearn.ensemble import RandomForestClassifier from sklearn.pipeline import Pipeline #text_clf = Pipeline([('vect', CountVectorizer()), # ('tfidf', TfidfTransformer()), # ('clf', MultinomialNB())]) #text_clf = Pipeline([('vect', CountVectorizer()), # ('tfidf', TfidfTransformer()), # ('clf', SGDClassifier(loss='hinge', penalty='l2',alpha=1e-3, n_iter=5, random_state=42))]) #text_clf = Pipeline([('vect', CountVectorizer()), # ('tfidf', TfidfTransformer()), # ('clf', SGDClassifier(loss='hinge', penalty='l2',alpha=1e-3, n_iter=5, random_state=42))]) text_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', RandomForestClassifier(n_estimators=100, criterion='gini', max_depth=None, n_jobs=1, random_state=100))]) text_clf = text_clf.fit(X_train, y_train) base_predicted = text_clf.predict(X_test) probs = text_clf.predict_proba(X_test) np.mean(base_predicted == y_test) # + # Grid search from sklearn.model_selection import GridSearchCV text_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', LinearSVC())]) parameters = {'vect__ngram_range': [(1,1),(1,3),(1,5)], 'tfidf__use_idf': (True, False), 'clf__dual': (True,False), 'clf__C': (1,0.1,0.01,0.001)} """ text_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', RandomForestClassifier())]) parameters = {'vect__ngram_range': [(1,1),(1,2),(1, 3), (1, 4), (1, 5)], 'tfidf__use_idf': (True, False), 'clf__n_estimators': (10,50,100,500), 'clf__criterion': ('gini'), 'clf__max_depth': (None,5,10,50)} """ gs_clf = GridSearchCV(text_clf, parameters, n_jobs=2) gs_clf = gs_clf.fit(X_train,y_train) # - print(gs_clf.best_score_) for param_name in sorted(parameters.keys()): print("%s: %r" % (param_name, gs_clf.best_params_[param_name])) # final baseline model #text_clf = Pipeline([('vect', CountVectorizer(ngram_range=(1,3))), # ('tfidf', TfidfTransformer(use_idf=True)), # ('clf', MultinomialNB(alpha=0.01))]) text_clf = Pipeline([('vect', CountVectorizer(ngram_range=(1,3))), ('tfidf', TfidfTransformer(use_idf=True)), ('clf', LinearSVC(C=1,dual=True))]) text_clf = text_clf.fit(X_train, y_train) base_predicted = text_clf.predict(X_test) np.mean(base_predicted == y_test) from sklearn import metrics print(metrics.classification_report(y_test, base_predicted)) # train final clf_final = Pipeline([('vect', CountVectorizer(ngram_range=(1,3))), ('tfidf', TfidfTransformer(use_idf=True)), ('clf', LinearSVC(C=1,dual=True))]) clf_final = clf_final.fit(data_docs, data_targets) # persist model from sklearn.externals import joblib joblib.dump(clf_final, 'models/asve_ids.pkl') clf_final.predict(["ciao mamma"])[0]
disambiguation/primary/model_dev/ASVe_IDs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm #Data Loading data = np.genfromtxt('sgd_data.txt',delimiter = ',') x = np.zeros((40,1), dtype = np.float) y = np.zeros((40,1), dtype = np.float) for i in range(data.shape[0]): x[i] = data[i][0] for i in range(data.shape[0]): y[i] = data[i][1] print("Input data shape = {}".format(x.shape)) print("Output data shape = {}".format(y.shape)) #Helper Functions def f(x,w,b): '''Sigmoid Function''' f = 1/(1+np.exp(-(w*x+b))) return f def mse(x,y,w,b): '''Mean Squared Loss Function''' L = 0.0 for i in range(x.shape[0]): L += 0.5*(y[i]-f(x[i],w,b))**2 return L def cross_entropy(x,y,w,b): '''Cross Entropy Loss Function''' L = 0.0 for i in range(x.shape[0]): L += -(y[i]*np.log(f(x[i],w,b))) return L def grad_w_mse(x,y,w,b): fx = f(x,w,b) dw = (fx - y)*fx*(1-fx)*x return dw def grad_b_mse(x,y,w,b): fx = f(x,w,b) db = (fx - y)*fx*(1-fx) return db def grad_w_cross(x,y,w,b): fx = f(x,w,b) dw = (- y)*(1-fx)*x return dw def grad_b_cross(x,y,w,b): fx = f(x,w,b) db = (- y)*(1-fx) return db #Gradient Discent def Decay_GD(x,y,epochs,batch_size,loss,lr): w = np.random.randn() b = np.random.randn() count = 0 #A counter to see how many times the iteration ran l_list = [] w_list = [] b_list = [] w_cache,b_cache = w,b #Caches for current epoch w_prev,b_prev = 0,0 #Contain previous parameters points,epoch_val = 0,0 prev_loss,current_loss = 100,0 #High Value for first previous loss ep = [i for i in range(epochs+1)] dw,db = 0,0 while (epoch_val <= epochs): count += 1 dw,db = 0,0 w_prev, b_prev = w_cache,b_cache for j in range(x.shape[0]): if (loss == 'mse'): dw += grad_w_mse(x[j],y[j],w_cache,b_cache) db += grad_b_mse(x[j],y[j],w_cache,b_cache) elif (loss == 'cross_entropy'): dw += grad_w_cross(x[j],y[j],w_cache,b_cache) db += grad_b_cross(x[j],y[j],w_cache,b_cache) points += 1 if(points % batch_size == 0): w_cache = w_cache - lr*dw b_cache = b_cache - lr*db dw,db = 0,0 if (loss == 'mse'): current_loss = mse(x,y,w_cache,b_cache)[0] elif (loss == 'cross_entropy'): current_loss = cross_entropy(x,y,w_cache,b_cache)[0] #Successful Epoch if (current_loss < prev_loss): epoch_val += 1 prev_loss = current_loss #Load the new updates of parameters print('Loss after {}th epoch = {}\n'.format(epoch_val,current_loss)) l_list.append(current_loss) w_list.append(w_cache[0]) b_list.append(b_cache[0]) elif (current_loss >= prev_loss): lr = lr/2 w_cache,b_cache = w_prev,b_prev print('\n\nDecaying Learning rate Gradient ran for {} iterations for {} epochs\n\n'.format(count,epochs)) plt.xlabel('Epochs') plt.ylabel('Loss') plt.title('Loss vs Epoch Curve\nAlgotithm :Mini Batch Decaying Learning Rate Gradient Decent\nBatch Size = {}\nInitial Learning Rate = {}\nLoss Function = {}'.format(batch_size,lr,loss)) plt.plot(ep,l_list) plt.show() return w_list,b_list W,B = Decay_GD(x,y,500,10,'mse',0.01) print('Weight list = \n{}'.format(W)) print('\n\nBias list = \n{}'.format(B)) W,B = Decay_GD(x,y,500,10,'cross_entropy',0.01) print('Weight list = \n{}'.format(W)) print('\n\nBias list = \n{}'.format(B)) #Error Surface MSE w = np.linspace(-10,10,num = 1000,dtype = np.float) b = np.linspace(-10,10,num = 1000,dtype = np.float) w,b = np.meshgrid(w,b) mse_list = [] for i in range(w.shape[0]): Loss = mse(x,y,w[i],b[i]) mse_list.append(Loss) fig = plt.figure() ax = fig.gca(projection='3d') surf = ax.plot_surface(w, b, mse_list, cmap=cm.coolwarm,linewidth=0, antialiased=False) plt.title('MSE Error Suface') plt.show() #Error Surface Cross Entropy cross_list = [] for i in range(w.shape[0]): Loss = cross_entropy(x,y,w[i],b[i]) cross_list.append(Loss) fig = plt.figure() ax = fig.gca(projection='3d') surf = ax.plot_surface(w, b, cross_list, cmap=cm.coolwarm,linewidth=0, antialiased=False) plt.title('Cross Entropy Error Suface') plt.show()
Examples/Gradient Descent with Decaying Learning Rate.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.10.0 64-bit (''advent-env'': conda)' # language: python # name: python3 # --- # # Puzzle 1 # + import pandas as pd df = pd.read_csv("input.txt", header=None, sep=" ") df.columns = ["Direction", "Amount"] df["delta_x"] = ( df["Direction"].map({"forward": 1, "down": 0, "up": 0}) * df["Amount"] ) df["delta_y"] = ( df["Direction"].map({"forward": 0, "down": 1, "up": -1}) * df["Amount"] ) df["x"] = df.delta_x.cumsum() df["y"] = df.delta_y.cumsum() df.loc[-1] = {"x": 0, "y": 0} df = df.sort_index() df # - df.x[999] * df.y[999] df.x.iloc[-1] * df.y.iloc[-1] # # Puzzle 2 # + df = pd.read_csv("input.txt", header=None, sep=" ") df.columns = ["Direction", "Amount"] for col in ["delta_aim", "Aim", "delta_x", "delta_y", "x", "y"]: df[col] = np.NaN df["delta_aim"] = ( df["Direction"].map({"forward": 0, "down": 1, "up": -1}) * df["Amount"] ) df["Aim"] = df["delta_aim"].cumsum() df["delta_x"] = ( df["Direction"].map({"forward": 1, "down": 0, "up": 0}) * df["Amount"] ) df["delta_y"] = ( df["Direction"].map({"forward": 1, "down": 0, "up": 0}) * df["Amount"] * df["Aim"] ) df["x"] = df.delta_x.cumsum() df["y"] = df.delta_y.cumsum() df.loc[-1] = {"x": 0, "y": 0} df = df.sort_index() df # - df.x.iloc[-1] * df.y.iloc[-1] 0
2021/day2/day2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lab 2 # ## Introduction # This lab introduces slope fields and a numerical DE solver, an improved version of # Euler’s Method. Using these techniques involves a number of commands. # # ### Slope fields # Plot the slope field for the differential equation # \begin{align*} # \frac{\mathrm{d}y}{\mathrm{d}x} = x - y # \end{align*} # for $-1<x<5$ and $-2<y<4$. # # Start by importing the NumPy and Plotly methods we will need. This week, we will start to make more sophisticated plots using Plotly graph objects. We will also need a SciPy method. SciPy extends the capabilities of NumPy. # + from plotly.figure_factory import create_quiver from plotly import graph_objs as go from numpy import meshgrid, linspace, sqrt from numpy.testing import assert_almost_equal from scipy.integrate import odeint # - # Now plot the slope field. A slope field is a special type of _quiver_ plot. We create NumPy arrays that say where to plot the line segments (`x` and `y`) and arrays to point them in the right direction (`1/L` and `S/L`). The `layout.update` method is optional, but makes the plot prettier and easier to understand. x, y = meshgrid(linspace(-1, 5, 25), linspace(-2, 4, 25)) S = x - y L = sqrt(1 + S**2) fig = create_quiver(x, y, 1/L, S/L, scale=0.3, arrow_scale=1e-16) fig.layout.update(title_text="Slopefield for dy/dx = x - y", yaxis=dict(scaleanchor='x', scaleratio=1)) fig.show('png') # `1/L` and `S/L` in the `create_quiver` command set the $x$ and $y$ lengths (components) of the line segment at each point in the grid. # # Note that NumPy operates element-wise by default, so `x - y` creates an array of differences, and `S/L` creates an array of quotients. For `1/L`, NumPy does something special called _broadcasting_. It assumes that you meant "divide an array of ones by the elements of `L`". # # The slope of the line segment is then $(S/L)/(1/L) = S$, and the length is # \begin{align*} # \sqrt{\left(\frac{1}{L}\right)^2 + \left(\frac{S}{L}\right)^2} &= \sqrt{\frac{1+S^2}{L^2}}\\ # &= 1. # \end{align*} # ### Numerical/graphical solution of an initial-value problem # Plot the (approximate) solution to the initial-value problem # \begin{align*} # \frac{\mathrm{d}y}{\mathrm{d}x} = x - y\qquad y(-1)=0 # \end{align*} # for $-1 < x <5$. Find $y(5)$. # # Here we use a numerical DE solver `scipy.integrate.odeint` (which we imported as `odeint`), an improved version of Euler’s Method. To use `odeint`, we need to define the differential equation in a _function_ and then feed it to `odeint`. # # First define the function. Note that in Python, [white space is important](https://xkcd.com/353/). That is, you have to indent the contents of your function or Python will complain. Most of the time Jupyter will figure out your intentions and auto-indent. def diff_eq(y, x): return x - y # - The `def` keyword tells Python you would like to define a function. # - In this case the function is called `diff_eq` and takes arguments `y` and `x`. # - The `return` statement tells Python what you would like to return. # - When you stop indenting, the function is over. # # Note that `odeint` expects the function (`diff_eq` here) to take (at least) two arguments, where the first (`y` here) is the dependent variable and the second (`x` here) is the independent variable, even if these variables do not appear in the RHS of the DE. # # Now ask `odeint` to generate a solution to our DE. x = linspace(-1, 5, 61) y = odeint(diff_eq, 0, x).flatten() # - `linspace` creates an array of (`61`, in this case) equally-spaced elements. # - `odeint` calculates `y` for each value of `x`. # - In Python, functions are objects like any other. In this case we pass `diff_eq` as an argument to `odeint`. # - The second argument to `odeint` (`0` here) is the initial value of $y$. It must correspond to the first value of `x`. # - `odeint` returns a 2D array. We need a 1D array for plotting so we `flatten` it. # # The following will plot `x` and `y` in a line diagram. Notice the syntax is more complicated than last week. We create the figure, add the line to it, then show it. The advantage of this syntax is that it can be extended to show more than one line on a graph, as we will see below. fig = go.Figure() fig.add_trace(go.Scatter(x=x, y=y)) fig.show('png') # Finally, to calculate $y(5)$, we realise that the values calculated by `odeint` are stored in the array `y`. So display `y`. y # Here we just want the last value. We can grab the last element of the array with `y[-1]`. (`y[-2]` gives the second last element.) y[-1] # `x[-1]` is th elast element of `x`. Check it too. x[-1] # Now we will plot multiple (approximate) solutions on the same graph. We do that by just adding each line as we go. Does the resulting figure make sense? fig = go.Figure() x = linspace(-1, 5, 61) y = odeint(diff_eq, 0, x).flatten() fig.add_trace(go.Scatter(x=x, y=y)) y = odeint(diff_eq, 2, x).flatten() fig.add_trace(go.Scatter(x=x, y=y)) y = odeint(diff_eq, -2, x).flatten() fig.add_trace(go.Scatter(x=x, y=y)) fig.layout.update(showlegend=False) fig.show('png') # Now let’s put the slope field and the numerical solutions together. Copy and paste the code from above where we created the quiver plot into the cell below. Delete the `fig.show()` command and replace it with the code from the above cell. Delete the command where we create an empty figure (`fig = go.Figure()`), because we want to add our line plots to the slope field. # # If you have done it properly, the result should look like this: # # ![](images/week-2.png) # + [markdown] nbgrader={"grade": false, "grade_id": "cell-17e44717e17ca409", "locked": true, "schema_version": 1, "solution": false} # ## Exercises # # Start by importing the tools we will need. # + [markdown] nbgrader={"grade": false, "grade_id": "cell-def8f5ac90289a79", "locked": true, "schema_version": 1, "solution": false} # ### Slope field and DE solution plot # # Plot on the one figure the slopefield for the DE # \begin{align*} # \frac{\mathrm{d} y}{\mathrm{d} x} = 2.5y (1 − y)\qquad y(0) = 0.5, # \end{align*} # and the solutions $y(x)$ with $y(0) = 0.2$, $y(0) = 0.5$ and $y(0) = 0.8$. # # Start by writing down a new definition for `diff_eq` below. Do not change the function's name or inputs. # + nbgrader={"grade": false, "grade_id": "cell-05cc4f7824ab2d84", "locked": false, "schema_version": 1, "solution": true} def diff_eq(y, x): return 2.5*y*(1-y) # - # If you have implemented `diff_eq` correctly, the following should print "nice job". # + nbgrader={"grade": true, "grade_id": "cell-0a0fa9099e30078d", "locked": true, "points": 1, "schema_version": 1, "solution": false} assert_almost_equal(diff_eq(0.4, 0), 0.6) assert_almost_equal(diff_eq(0.4, 10), 0.6) assert_almost_equal(diff_eq(1, 0), 0) print("nice job") # + [markdown] nbgrader={"grade": false, "grade_id": "cell-4e81a7c558ed87e4", "locked": true, "schema_version": 1, "solution": false} # Now create your graph. Note that you will have to redefine `S` (from the lab). You can do that using your new definition for `diff_eq` or by writing out the RHS of the equation again. # # You will also have to change your definition of the meshgrid for the slopefield and the domain and initial values in the `odeint` commands. You want about 21 steps in the x and y ranges in meshgrid. If you change the scaling factor from 0.3 to 0.04 in `create_quiver`, you will get a better slope field. # # Create the plot for the region $0 < x < 1$ and $0 < y < 1$. # + nbgrader={"grade": true, "grade_id": "cell-8945b9507fff370f", "locked": false, "points": 2, "schema_version": 1, "solution": true} def diff_eq(y, x): return 2.5*y*(1-y) x, y = meshgrid(linspace(0, 1, 21), linspace(0, 1, 21)) S = 2.5*y*(1-y) L = sqrt(1 + S**2) fig = create_quiver(x, y, 1/L, S/L, scale=0.04, arrow_scale=1e-16) fig.layout.update(title_text="Slopefield for dy/dx = x - y", yaxis=dict(scaleanchor='x', scaleratio=1)) x = linspace(0, 1, 61) y = odeint(diff_eq, 0.5, x).flatten() fig.add_trace(go.Scatter(x=x, y=y)) y = odeint(diff_eq, 0.2, x).flatten() fig.add_trace(go.Scatter(x=x, y=y)) y = odeint(diff_eq, 0.8, x).flatten() fig.add_trace(go.Scatter(x=x, y=y)) fig.layout.update(showlegend=False) fig.show('png') # + [markdown] nbgrader={"grade": false, "grade_id": "cell-0050a7948893bc7b", "locked": true, "schema_version": 1, "solution": false} # ### Solution at a point # What is $y(1)$ if $y(0)=0.8$? # + nbgrader={"grade": false, "grade_id": "cell-962d55b6bbeb85ad", "locked": false, "schema_version": 1, "solution": true} odeint(diff_eq, 0.8, [0,1]) # -
notebooks/lab-02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # European Centre for Disease Prevention and Control Dataset import pandas as pd import datetime import pycountry import re import os import numpy as np # + tags=["parameters"] # papermill parameters output_folder = "../output/" # - # ### Fetch data df = pd.read_csv("https://opendata.ecdc.europa.eu/covid19/casedistribution/csv") # ### Parse date df["dateRep"] = pd.to_datetime(df["dateRep"], format="%d/%m/%Y") # ### Add difference df['CASES_SINCE_PREV_DAY'] = df.groupby(['countriesAndTerritories','continentExp'])['cases'].diff().fillna(0).astype(int) df['DEATHS_SINCE_PREV_DAY'] = df.groupby(['countriesAndTerritories','continentExp'])['deaths'].diff().fillna(0).astype(int) # ### Drop cols df = df.drop(columns=["day", "month", "year", "countryterritoryCode"]) int_conveyance = df["geoId"].loc["JPG11668" == df["geoId"]].index df["geoId"].iloc[int_conveyance] = np.nan df["popData2019"].iloc[int_conveyance] = np.nan df["continentExp"].iloc[int_conveyance] = np.nan df["countriesAndTerritories"].iloc[int_conveyance] = "Cases on an international conveyance Japan" # ### Resolve Country/Region name country_codes = df["geoId"].unique() for code in country_codes: try: pyc = pycountry.countries.get(alpha_2=code) if pyc: df["countriesAndTerritories"].loc[code == df["geoId"]] = pyc.name except LookupError: df["countriesAndTerritories"].loc[code == df["geoId"]] = None # ### Set Last Update Date and Last Reported Flag df["LAST_UPDATE_DATE"] = datetime.datetime.utcnow() df["LAST_REPORTED_FLAG"] = df["dateRep"].max() == df["dateRep"] # ### Rename Cols df = df.rename(columns={ "dateRep": "DATE", "countriesAndTerritories": "COUNTRY_REGION", "geoId": "ISO3166_1", "popData2018": "POPULATION", }) # ### Save dataframe df.to_csv(output_folder + "ECDC_GLOBAL.csv", index=False, columns=[ "COUNTRY_REGION", "continentExp", "ISO3166_1", "cases", "deaths", "CASES_SINCE_PREV_DAY", "DEATHS_SINCE_PREV_DAY", "popData2019", "DATE", "LAST_UPDATE_DATE", "LAST_REPORTED_FLAG" ])
notebooks/ECDC_GLOBAL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="ariHTDRGlolu" # # **Regression: Predict Fuel Efficiency `tf-1.x`** # # --- # + [markdown] id="RgvrLLBul9Dn" # [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/kyle-w-brown/tensorflow-1.x.git/HEAD) # + [markdown] id="lOJzsez8HfGU" # In a regression problem, we aim to predict the output of a continuous value, like a price or a probability. Contrast this with a classification problem, where we aim to select a class from a list of classes (for example, where a picture contains an apple or an orange, recognizing which fruit is in the picture). # # This notebook uses the classic Auto MPG Dataset and builds a model to predict the fuel efficiency of late-1970s and early 1980s automobiles. To do this, we'll provide the model with a description of many automobiles from that time period. This description includes attributes like: cylinders, displacement, horsepower, and weight. # # This example uses the tf.keras API, see this guide for details. # + id="QrJIW2LOt3Fz" colab={"base_uri": "https://localhost:8080/"} outputId="c93d5646-74cb-4f18-b909-bb49689e4224" from __future__ import absolute_import, division, print_function import pathlib import pandas as pd import seaborn as sns # %tensorflow_version 1.x import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers print(tf.__version__) # + [markdown] id="Dsz7QEvCHqIR" # ## The Auto MPG dataset # # # --- # # # The dataset is available from the UCI Machine Learning Repository. # # ### Get the data # # First download the dataset. # + id="-gMKdwSIt7Nl" colab={"base_uri": "https://localhost:8080/", "height": 90} outputId="52c701a6-6db4-4bdd-e317-3462b6a48459" dataset_path = keras.utils.get_file("auto-mpg.data", "https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data") dataset_path # + [markdown] id="_3aLDwljH3uX" # Import it using pandas # + id="466hIRIXt7U7" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="ee9e0a4f-eef2-41ad-e115-9a9f0d122a74" column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight', 'Acceleration', 'Model Year', 'Origin'] raw_dataset = pd.read_csv(dataset_path, names=column_names, na_values = "?", comment='\t', sep=" ", skipinitialspace=True) dataset = raw_dataset.copy() dataset.tail() # + [markdown] id="a1U6vRafH7fU" # ### Clean the data # # The dataset contains a few unknown values. # + id="4Jd0xCh-t7Xl" colab={"base_uri": "https://localhost:8080/"} outputId="e9181808-9d4a-4f0b-c25d-96851a5733d3" dataset.isna().sum() # + [markdown] id="dW5VHPcuH_8E" # To keep this initial tutorial simple drop those rows. # + id="558n8qZEt7aK" dataset = dataset.dropna() # + [markdown] id="jF8D2XtpIDBg" # The "Origin" column is really categorical, not numeric. So convert that to a one-hot: # + id="Hft7PAcEt7eA" origin = dataset.pop('Origin') # + id="l0ZuAUOst7gx" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="65f88e99-e1d8-47b2-a0e1-f3c1ba2e8792" dataset['USA'] = (origin == 1)*1.0 dataset['Europe'] = (origin == 2)*1.0 dataset['Japan'] = (origin == 3)*1.0 dataset.tail() # + [markdown] id="CMhgzHdwIIDC" # ### Split the data into train and test # # Now split the dataset into a training set and a test set. # # We will use the test set in the final evaluation of our model. # + id="vk_X7SNtt7jm" train_dataset = dataset.sample(frac=0.8,random_state=0) test_dataset = dataset.drop(train_dataset.index) # + [markdown] id="--9ZC05jIL8S" # ### Inspect the data # # Have a quick look at the joint distribution of a few pairs of columns from the training set. # + id="qEwO3z1ut7oT" colab={"base_uri": "https://localhost:8080/", "height": 743} outputId="f9c0248e-50db-4407-d9d5-363e42dc6e19" sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde") # + [markdown] id="8pqoSGkgIQNu" # Also look at the overall statistics: # + id="5orzGxi3t7tj" colab={"base_uri": "https://localhost:8080/", "height": 325} outputId="3dcb13e6-1839-4a31-97f6-70c5737f08f1" train_stats = train_dataset.describe() train_stats.pop("MPG") train_stats = train_stats.transpose() train_stats # + [markdown] id="_vlqd5wCInA9" # ### Split features from labels # # Separate the target value, or "label", from the features. This label is the value that you will train the model to predict. # + id="ugBQY-Lgt71a" train_labels = train_dataset.pop('MPG') test_labels = test_dataset.pop('MPG') # + [markdown] id="sF1-aJ6oIrTL" # ### Normalize the data # # Look again at the train_stats block above and note how different the ranges of each feature are. # # It is good practice to normalize features that use different scales and ranges. Although the model might converge without feature normalization, it makes training more difficult, and it makes the resulting model dependent on the choice of units used in the input. # # Note: Although we intentionally generate these statistics from only the training dataset, these statistics will also be used to normalize the test dataset. We need to do that to project the test dataset into the same distribution that the model has been trained on. # + id="TAj8Y84St76i" def norm(x): return (x - train_stats['mean']) / train_stats['std'] normed_train_data = norm(train_dataset) normed_test_data = norm(test_dataset) # + [markdown] id="xrIwTyWjI1iG" # This normalized data is what we will use to train the model. # # Caution: The statistics used to normalize the inputs here (mean and standard deviation) need to be applied to any other data that is fed to the model, along with the one-hot encoding that we did earlier. That includes the test set as well as live data when the model is used in production. # + [markdown] id="DAbIjE-iI6sN" # ## The model # # --- # # # # ### Build the model # # Let's build our model. Here, we'll use a Sequential model with two densely connected hidden layers, and an output layer that returns a single, continuous value. The model building steps are wrapped in a function, build_model, since we'll create a second model, later on. # + id="Eq0H47Dst8AS" def build_model(): model = keras.Sequential([ layers.Dense(64, activation=tf.nn.relu, input_shape=[len(train_dataset.keys())]), layers.Dense(64, activation=tf.nn.relu), layers.Dense(1) ]) optimizer = tf.keras.optimizers.RMSprop(0.001) model.compile(loss='mse', optimizer=optimizer, metrics=['mae', 'mse']) return model # + id="aYzrHn2Tt75B" model = build_model() # + [markdown] id="lqv4btjiJGN1" # ### Inspect the model # # Use the .summary method to print a simple description of the model # + id="LFL6NjkFt7z-" colab={"base_uri": "https://localhost:8080/"} outputId="224886fe-1440-4a9f-eb5a-0a709659d5ba" model.summary() # + [markdown] id="PvCoaLldJLou" # Now try out the model. Take a batch of 10 examples from the training data and call model.predict on it. # + id="tMIzSNKut7yp" colab={"base_uri": "https://localhost:8080/"} outputId="dc694f08-76b7-426b-8a14-8d2c31f014a7" example_batch = normed_train_data[:10] example_result = model.predict(example_batch) example_result # + [markdown] id="Aw42DESDJPM4" # It seems to be working, and it produces a result of the expected shape and type. # + [markdown] id="ID6018rOJQH3" # ### Train the model # # Train the model for 1000 epochs, and record the training and validation accuracy in the history object. # + id="DTWSDuwwt7sE" colab={"base_uri": "https://localhost:8080/"} outputId="617ddf0f-ce7f-4035-c384-d4567204f2a9" # Display training progress by printing a single dot for each completed epoch class PrintDot(keras.callbacks.Callback): def on_epoch_end(self, epoch, logs): if epoch % 100 == 0: print('') print('.', end='') EPOCHS = 1000 history = model.fit( normed_train_data, train_labels, epochs=EPOCHS, validation_split = 0.2, verbose=0, callbacks=[PrintDot()]) # + [markdown] id="IdntDqOKJWjW" # Visualize the model's training progress using the stats stored in the history object. # + id="d42h-N4nt7mg" colab={"base_uri": "https://localhost:8080/", "height": 222} outputId="0e02aab2-e0d2-457f-c4be-957b0542d084" hist = pd.DataFrame(history.history) hist['epoch'] = history.epoch hist.tail() # + id="-PBlFj24t7cv" colab={"base_uri": "https://localhost:8080/", "height": 549} outputId="a1dd94e5-6962-4568-ac82-42b9d066d218" import matplotlib.pyplot as plt def plot_history(history): hist = pd.DataFrame(history.history) hist['epoch'] = history.epoch plt.figure() plt.xlabel('Epoch') plt.ylabel('Mean Abs Error [MPG]') plt.plot(hist['epoch'], hist['mean_absolute_error'], label='Train Error') plt.plot(hist['epoch'], hist['val_mean_absolute_error'], label = 'Val Error') plt.legend() plt.ylim([0,5]) plt.figure() plt.xlabel('Epoch') plt.ylabel('Mean Square Error [$MPG^2$]') plt.plot(hist['epoch'], hist['mean_squared_error'], label='Train Error') plt.plot(hist['epoch'], hist['val_mean_squared_error'], label = 'Val Error') plt.legend() plt.ylim([0,20]) plot_history(history) # + [markdown] id="fQsXGNgDJdzM" # This graph shows little improvement, or even degradation in the validation error after about 100 epochs. Let's update the ***model.fit*** call to automatically stop training when the validation score doesn't improve. We'll use an EarlyStopping callback that tests a training condition for every epoch. If a set amount of epochs elapses without showing improvement, then automatically stop the training. # # You can learn more about this callback [here](https://https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/EarlyStopping). # + id="HDS6AwPjt7Tw" colab={"base_uri": "https://localhost:8080/", "height": 584} outputId="e7da5664-35f6-4d43-f900-fa69a78f9bc8" model = build_model() # The patience parameter is the amount of epochs to check for improvement early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10) history = model.fit(normed_train_data, train_labels, epochs=EPOCHS, validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()]) plot_history(history) # + [markdown] id="rHX-DR2KJs3h" # The graph shows that on the validation set, the average error is usually around +/- 2 MPG. Is this good? We'll leave that decision up to you. # # Let's see how well the model generalizes by using the **test** set, which we did not use when training the model. This tells us how well we can expect the model to predict when we use it in the real world. # + id="BxJPklD8vml3" colab={"base_uri": "https://localhost:8080/"} outputId="8cadfbf0-ac54-46a8-f7ad-ce7315c8b6a2" loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=0) print("Testing set Mean Abs Error: {:5.2f} MPG".format(mae)) # + [markdown] id="fXq1uuZYJ0DH" # ### Make predictions # # Finally, predict MPG values using data in the testing set: # + id="aq5cYnHPvmuo" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="b8e55213-1d12-4be2-e2f6-5b8738d34ece" test_predictions = model.predict(normed_test_data).flatten() plt.scatter(test_labels, test_predictions) plt.xlabel('True Values [MPG]') plt.ylabel('Predictions [MPG]') plt.axis('equal') plt.axis('square') plt.xlim([0,plt.xlim()[1]]) plt.ylim([0,plt.ylim()[1]]) _ = plt.plot([-100, 100], [-100, 100]) # + [markdown] id="I2oNdZnkJ32Z" # It looks like our model predicts reasonably well. Let's take a look at the error distribution. # + id="0fXbUCA6vms3" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="1db1621d-37c0-46f1-f901-0a9ecbf4ff91" error = test_predictions - test_labels plt.hist(error, bins = 25) plt.xlabel("Prediction Error [MPG]") _ = plt.ylabel("Count") # + [markdown] id="Njm4RbdEJ7JY" # It's not quite gaussian, but we might expect that because the number of samples is very small. # + [markdown] id="cC40jEBGKAD2" # ## Conclusion # # --- # # # # This notebook introduced a few techniques to handle a regression problem. # # * Mean Squared Error (MSE) is a common loss function used for regression problems (different loss functions are used for classification problems). # # * Similarly, evaluation metrics used for regression differ from classification. A common regression metric is Mean Absolute Error (MAE). # # * When numeric input data features have values with different ranges, each feature should be scaled independently to the same range. # # * If there is not much training data, one technique is to prefer a small network with few hidden layers to avoid overfitting. # # * Early stopping is a useful technique to prevent overfitting.
regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # \__init_subclass__() # # 重写\__init_subclass__方法, 先看下效果 # + class Hook: def __init_subclass__(cls, **kwargs): print("__init_subclass__", cls, kwargs) class A(Hook, name="satori", age=16): pass # - # 首先我们定义了一个Hook类,然后让A这个类继承它。**发现我们还没有没有实例化,而是在创建类的时候就有输出结果了。** # # 对于一个类,如果这个类被作为父类继承,那么会触发其内部的\__init_subclass__方法,这里的Hook被A继承,那么Hook中的\__init_subclass__就会被触发。而且看到,里面的cls,就是我们的A,也就是继承它的类,**kwargs,就是我们额外传递的参数。 # # 但是我们发现,第一个参数不是self,而是cls,而且这个cls还不是我们的Hook,而是继承它的类。其实这个方法是隐式的被classmethod装饰了 # # # 有时候我们想控制类的生成过程,怎么办呢?显然可以通过元类的的方式,但是如果场景比较简单,也没必要使用元类。直接使用\__init_subclass__即可 # + class Hook: def __init_subclass__(cls, **kwargs): for k, v in kwargs.items(): type.__setattr__(cls, k, v) class A(Hook, name="satori", age=16): pass a = A() a.name, a.age # - # 可以看到,我们在不使用元类的情况下,通过__init_subclass__实现了类的自定义过程。当然这比较简单,也可以实现更复杂的逻辑,在某些场景下,可以替代元类。 # --- # # # 获取所有子类 # # 新型类(即objectPython中默认的from的子类)具有一种\__subclasses__返回子类的方法: # + class Foo(object): pass class Bar(Foo): pass class Baz(Foo): pass class Bing(Bar): pass class Cing(Bing): pass # - # 这是子类的名称 print([cls.__name__ for cls in Foo.__subclasses__()]) # 这是子类本身 print(Foo.__subclasses__()) # 确认确实将子类Foo列为其基础 for cls in Foo.__subclasses__(): print(cls.__base__) # + # 请注意,如果需要子类,则必须递归 def all_subclasses(cls): return set(cls.__subclasses__()).union( [s for c in cls.__subclasses__() for s in all_subclasses(c)]) print(all_subclasses(Foo)) # - # 请注意,如果尚未执行子类的类定义(例如,如果尚未导入子类的模块),则该子类尚不存在,\__subclasses__也找不到它。 # # --- # # # 通过字符串查找类对象 # # 如果确实有一个表示类名称的字符串,并且想要查找该类的子类,有两步:找到给定名称的类,然后使用\__subclasses__上述方法查找子类。 globals()['Foo'] locals()['Foo'] # 如果该类可以位于任何模块中,则你的名称字符串应包含完全限定的名称- ‘pkg.module.Foo’而不是just ‘Foo’。使用importlib加载类的模块,然后获取相应的属性 # + import importlib name = 'lxml.etree.HTML' modname, _, clsname = name.rpartition('.') modname, clsname # - mod = importlib.import_module(modname) cls = getattr(mod, clsname) cls
基础/子类相关.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # https://github.com/nipy/nipype/blob/master/examples/fmri_ants_openfmri.py # # Step 9: Connecting to Databases # + from os.path import abspath as opap from nipype.interfaces.io import XNATSource from nipype.pipeline.engine import Node, Workflow from nipype.interfaces.fsl import BET subject_id = 'xnat_S00001' dg = Node(XNATSource(infields=['subject_id'], outfields=['struct'], config='/Users/satra/xnat_configs/nitrc_ir_config'), name='xnatsource') dg.inputs.query_template = ('/projects/fcon_1000/subjects/%s/experiments/xnat_E00001' '/scans/%s/resources/NIfTI/files') dg.inputs.query_template_args['struct'] = [['subject_id', 'anat_mprage_anonymized']] dg.inputs.subject_id = subject_id bet = Node(BET(), name='skull_stripper') wf = Workflow(name='testxnat') wf.base_dir = opap('xnattest') wf.connect(dg, 'struct', bet, 'in_file') # + from nipype.interfaces.io import XNATSink ds = Node(XNATSink(config='/Users/satra/xnat_configs/central_config'), name='xnatsink') ds.inputs.project_id = 'NPTEST' ds.inputs.subject_id = 'NPTEST_xnat_S00001' ds.inputs.experiment_id = 'test_xnat' ds.inputs.reconstruction_id = 'bet' ds.inputs.share = True wf.connect(bet, 'out_file', ds, 'brain')
notebooks/z_advanced_databases.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import matplotlib.pyplot as plt import numpy as np from sklearn import datasets from sklearn.metrics import mean_squared_error, r2_score from BA_SVR import svr_mj from sklearn.svm import SVR # + # Load the diabetes dataset diabetes = datasets.load_diabetes() X = diabetes.data y = diabetes.target X /= X.std(axis=0) print('X.shape: ',X.shape) # + # Split the data into training/testing sets diabetes_X_train = X[:-20] diabetes_X_test = X[-20:] # Split the targets into training/testing sets diabetes_y_train = y[:-20] diabetes_y_test = y[-20:] # - svr = svr_mj(loss='epsilon-insensitive',kernel='rbf',C=1.0,epsilon=0.001,gamma=10) svr.fit(diabetes_X_train,diabetes_y_train) pred = svr.predict(diabetes_X_test) mse = mean_squared_error(diabetes_y_test,pred) r2 = r2_score(diabetes_y_test,pred) print('mse: ',mse) print('r2: ',r2) # + svr_sklearn = SVR(kernel='rbf',C=1.0,epsilon=0.001,gamma=10) svr_sklearn.fit(diabetes_X_train,diabetes_y_train) pred_sklearn = svr_sklearn.predict(diabetes_X_test) mse_sklearn = mean_squared_error(diabetes_y_test,pred_sklearn) r2_sklearn = r2_score(diabetes_y_test,pred_sklearn) print('mse: ',mse_sklearn) print('r2: ',r2_sklearn)
02 Kernel-based Learning/Tutorial 05 - Support Vector Regression/main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 1. Decision Tree import pandas as pd from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import train_test_split from sklearn import metrics # for accuracy calculations data = pd.read_csv('Lab 5 diabetes.csv') data.head() # ## split dataset in feature and target variable feature_column = ['Pregnancies', 'Insulin', 'BMI', 'Age', 'Glucose', 'BloodPressure', 'DiabetesPedigreeFunction' ] X = data[feature_column] y = data.Outcome # target variable X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.15, random_state = 13) dtree = DecisionTreeClassifier() dtree = dtree.fit(X_train, y_train) y_pred = dtree.predict(X_test) print('Accuracy of dtree : ', metrics.accuracy_score(y_test,y_pred)) # # 2. Random Forest # + import cudf import numpy as np import pandas as pd from cuml.ensemble import RandomForestClassifer as curfc from cuml.metrics import accuracy_score from sklearn.ensemble import RandomForestClassifier as skrfc from sklearn.datasets import make_classification from sklearn.model_selection import train_test_split # Define Parameter n_samples = 2**18 n_features = 399 n_info = 300 data_type = np.float32 # + # %%time # Generate data X,y = make_classification(n_samples = n_samples, n_features = n_features, random_state = 123, n_classes = 2) X = pd.DataFrame(X.astype(data_type)) y = pd.Series(y) X_train,X_test,y_train,y_test=train_test_split(X,y, test_size=0.2, random_state=0) # - # %%time X_cudf_train = cudf.DataFrame.from_pandas(X_train) X_cudf_test = cudf.DataFrame.from_pandas(X_test) y_cudf_train = cudf.DataFrame.from_pandas(y_train.values) y_cudf_test = cudf.DataFrame.from_pandas(y_test.values) # %%time # Scikit learn model sk_model = skrfc(n_estimators=35, max_depth=15, max_features=1.0,random_state=23) sk_model.fit(X_train, y_train) # %%time # Evaluate sk_predict = sk_model.predict(X_test) sk_acc = accuracy_score(y_test, sk_predict) print('Accuracy is : ', sk_acc) # %%time cuml_model = curfc(n_estimators=35, max_depth=15, max_features=1.0,random_state=23) cuml_model.fit(X_cudf_train, y_cudf_train) # %%time pred_y = cuml_model.predict(X_cudf_test) cuml_accuracy = accuracy_score(y_cudf_test, pred_y) print('Accuracy is : ', cuml_accuracy) # ## Self Practice # + import numpy as np import pandas as pd from sklearn.ensemble import RandomForestClassifier as skrfc from sklearn.datasets import make_classification from sklearn.model_selection import train_test_split # Define Parameter n_samples = 2**18 n_features = 399 n_info = 300 data_type = np.float32 # + # %%time # Generate data X,y = make_classification(n_samples = n_samples, n_features = n_features, random_state = 123, n_classes = 2) X = pd.DataFrame(X.astype(data_type)) y = pd.Series(y) X_train,X_test,y_train,y_test=train_test_split(X,y, test_size=0.2, random_state=0) # - X.head() y.tail() # + # %%time # Generate data X,y = make_classification(n_samples = n_samples, n_features = n_features, random_state = 123, n_classes = 2) X = pd.DataFrame(X.astype(data_type)) y = pd.Series(y) X_train,X_test,y_train,y_test=train_test_split(X,y, test_size=0.2, random_state=0) # - # %%time # Scikit learn model sk_model = skrfc(n_estimators=35, max_depth=15, max_features=1.0,random_state=23) sk_model.fit(X_train, y_train) # %%time # Evaluate sk_predict = sk_model.predict(X_test) sk_acc = accuracy_score(y_test, sk_predict) print('Accuracy is : ', sk_acc)
A_5 Random Forest and Decision Tree.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Plotting a single ping example # This script demonstrates ploting a single ping as power for every channel in a # specified raw file. # # NOTE: This example was written before the get_power method was added to the ProcessedData object, where you just call EK60.raw_data.get_power() to get the power data. So instead, this example reaches into the EK60RawData objects directly to pull out power. # ## Setup import numpy as np import matplotlib.pyplot as plt from matplotlib.pyplot import cm from echolab2.instruments import EK60 # ## Read in the data # + # Define the path to the data file. raw_filename = "./data/EK60/DY1706_EK60-D20170609-T005736.raw" # Create an instance of our EK60 object. ek60 = EK60.EK60() # Read in the .raw data file. print('Reading raw file %s' % (raw_filename)) ek60.read_raw(raw_filename) # Print some info about the state of our EK60 object. print(ek60) # - # ## Parse and plot the data # + # Set up the colormap for plotting. color = iter(cm.rainbow(np.linspace(0, 1, len(ek60.channel_ids)))) # Create a matplotlib figure to plot our echograms on. fig = plt.figure(figsize=(7, 7)) # Get references. # Specify the ping number to plot. ping_number = 100 # Plot power from the specified ping from all channels. for channel_id in ek60.channel_ids: # Get a reference to the RawData object for this channel. raw_data = ek60.get_raw_data(channel_id=channel_id) # Get a color for this channel. c = next(color) # Create a sample index so we can pass it as the Y axis in the figure so # we plot the data up and down. n_samples = len(raw_data.power[ping_number, :]) yaxis = np.arange(n_samples) # Plot power. plt.plot(raw_data.power[ping_number, :], yaxis, color=c, label=channel_id) # Label the figure and set other display properties. plt.gca().invert_yaxis() plt.ylabel('Sample') plt.xlabel('Power') title = 'Ping %i' % (ping_number) fig.suptitle(title, fontsize=14) plt.legend() # Display plot. plt.show() # -
examples/jupyter notebooks/plot_single_ping_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ## Load and select data # # sf_use have 20000 samples import sframe import pandas as pd import numpy as np sf = sframe.load_sframe('./data_sf/text_receiver_sender_small') sf.show # + #sf_use = sf.sample(.1, seed=5) #sf = sf_use.save('./data_sf/text_receiver_sender_small') # - sf_use = sf.to_dataframe() sf_use.shape # + from sklearn.cross_validation import train_test_split sf_train, sf_test = train_test_split(sf_use, test_size = 0.2) print('Sample number of train and test are: %d, %d' % (len(sf_train), len(sf_test))) # - # ## Seperate test to known label and unknown label # + unique_train_senders = np.unique(sf_train['senders']) unique_train_receivers = np.unique(sf_train['receivers']) sf_test.loc[:, 'known_s'] = sf_test['senders'].apply(lambda x: 1 if x in unique_train_senders else 0) sf_test.loc[:, 'known_r'] = sf_test['receivers'].apply(lambda x: 1 if x in unique_train_receivers else 0) # - sf_test.head() # + print('Senders:') print('Number of known senders in train data: %d' % (np.unique(sf_train['senders']).shape[0])) print('Number of known senders in test data: %d' % (np.unique(sf_test['senders'])).shape[0]) print('Number of samples with known sender in test data: %d' % (sf_test['known_s'].sum())) print('Percentage of samples with known senders in test data: %.3f'% (float(sf_test['known_s'].sum()) / sf_test.shape[0])) print '' print('Receivers:') print('Number of known receivers in train data: %d' % (np.unique(sf_train['receivers']).shape[0])) print('Number of known receivers in test data: %d' % (np.unique(sf_test['receivers'])).shape[0]) print('Number of samples with known receivers in test data: %d' % (sf_test['known_r'].sum())) print('Percentage of samples with known receivers in test data: %.3f'% (float(sf_test['known_r'].sum()) / sf_test.shape[0])) # - (sf_test['known_s']==1).mean() # ## Only looking at the samples with known labels: sf_test_known_s = sf_test[sf_test['known_s']==1] sf_test_known_r = sf_test[sf_test['known_r']==1] # + #import cPickle as pickle from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.preprocessing import LabelEncoder from sklearn.naive_bayes import MultinomialNB from sklearn.metrics import f1_score, accuracy_score, precision_score import time import pylab as plt # %matplotlib inline sv = LabelEncoder() rv = LabelEncoder() dv = TfidfVectorizer(max_df=0.06, min_df=0.001) # - # ## label all known sender and receiver in sf_use sv.fit(sf_use['senders']) # label all known sender in sf_use rv.fit(sf_use['receivers']) # label all known receivers in sf_use sv.classes_.shape rv.classes_.shape train_X = dv.fit_transform(sf_train['text']) train_s = sv.transform(sf_train['senders']) train_r = rv.transform(sf_train['receivers']) # + val_X_s = dv.transform(sf_test_known_s['text']) val_s = sv.transform(sf_test_known_s['senders']) val_X_r = dv.transform(sf_test_known_r['text']) val_r = rv.transform(sf_test_known_r['receivers']) # - train_X.shape val_X_s.shape val_X_r.shape # ### double check if all the labels in val exist in train labels (should all included) l3 = np.array([x for x in val_s if x not in train_s]) 1 - float(l3.shape[0]) / val_s.shape[0] train_s.shape u, c = np.unique(train_s, return_counts=True) indices = np.argsort(c)[::-1] fig = plt.scatter(range(u.shape[0]), c[indices]) plt.yscale('log') u, c = np.unique(train_r, return_counts=True) indices = np.argsort(c)[::-1] fig = plt.scatter(range(u.shape[0]), c[indices]) plt.yscale('log') train_X.shape count_train_X = (train_X.toarray()).sum(axis = 0) # note that train_X is a sparse matrix, we change it to an array before plot type(train_X) fig = plt.hist(count_train_X, 50) plt.yscale('log') count_train_X[0:10] count_train_X.max() count_train_X.max()/13575 clf_s = MultinomialNB(alpha=0.0001, fit_prior=False) clf_s.fit(train_X, train_s) pred_s = clf_s.predict(val_X_s) print f1_score(val_s, pred_s, average='micro') print f1_score(val_s, pred_s, average='macro') print f1_score(val_s, pred_s, average='weighted') print precision_score(val_s, pred_s, average='weighted') pred_s val_s sv.inverse_transform(1360) clf_r = MultinomialNB(alpha=0.0001, fit_prior=False) clf_r.fit(train_X, train_r) pred_r = clf_r.predict(val_X_r) print f1_score(val_r, pred_r, average='micro') print f1_score(val_r, pred_r, average='macro') print f1_score(val_r, pred_r, average='weighted') print precision_score(val_r, pred_r, average='weighted') # # Test some other classifiers # + # Not working #from sklearn.linear_model import LogisticRegression #clf_s = LogisticRegression(n_jobs=-1) #clf_s.fit(train_X, train_s) #pred_s = clf_s.predict(val_X_s) #print f1_score(val_s, pred_s, average='micro') #print f1_score(val_s, pred_s, average='macro') #print f1_score(val_s, pred_s, average='weighted') #print precision_score(val_s, pred_s, average='weighted') # - # ## Check probability output # ### For sender: pred_prob_s = clf_s.predict_proba(val_X_s) pred_prob_s.shape pred_prob_s[0] pred_prob_s[0].max() clf_s.classes_ # #### Now, take a look for the unknown label test: sf_test_unknown_s = sf_test[sf_test['known_s']==0] val_X_unk_s = dv.transform(sf_test_unknown_s['text']) val_unk_s = sv.transform(sf_test_unknown_s['senders']) pred_unk_s = clf_s.predict(val_X_unk_s) print f1_score(val_unk_s, pred_unk_s, average='micro') print f1_score(val_unk_s, pred_unk_s, average='macro') print f1_score(val_unk_s, pred_unk_s, average='weighted') print precision_score(val_unk_s, pred_unk_s, average='weighted') # + clf_s = MultinomialNB(alpha=0.1, fit_prior=False) clf_s.fit(train_X, train_s) pred_s = clf_s.predict(val_X_s) print f1_score(val_s, pred_s, average='micro') print f1_score(val_s, pred_s, average='macro') print f1_score(val_s, pred_s, average='weighted') print precision_score(val_s, pred_s, average='weighted') pred_prob_s = clf_s.predict_proba(val_X_s) pred_prob_unk_s = clf_s.predict_proba(val_X_unk_s) plt.figure(figsize=(16,4)) plt.subplot(1, 2, 1) plt.hist(pred_prob_s.max(axis=1),20) plt.title('Histogram of predicted maximun probabilities for \n sender with known labels in training (4087 sampes) \n alpha=0.1, acc=0.94') #plt.yscale('log') plt.subplot(1, 2, 2) plt.hist(pred_prob_unk_s.max(axis=1),20) plt.title('Histogram of predicted maximun probabilities for \n sender with UNKNOWN labels in training (254 sampes) \n alpha=0.1, acc=0.0') #plt.yscale('log') # -
Step1-SimpleNBModel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Net Surgery # # Caffe networks can be transformed to your particular needs by editing the model parameters. The data, diffs, and parameters of a net are all exposed in pycaffe. # # Roll up your sleeves for net surgery with pycaffe! # + import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import Image # Make sure that caffe is on the python path: caffe_root = '../' # this file is expected to be in {caffe_root}/examples import sys sys.path.insert(0, caffe_root + 'python') import caffe # configure plotting plt.rcParams['figure.figsize'] = (10, 10) plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # - # ## Designer Filters # # To show how to load, manipulate, and save parameters we'll design our own filters into a simple network that's only a single convolution layer. This net has two blobs, `data` for the input and `conv` for the convolution output and one parameter `conv` for the convolution filter weights and biases. # + # Load the net, list its data and params, and filter an example image. caffe.set_mode_cpu() net = caffe.Net('net_surgery/conv.prototxt', caffe.TEST) print("blobs {}\nparams {}".format(net.blobs.keys(), net.params.keys())) # load image and prepare as a single input batch for Caffe im = np.array(Image.open('images/cat_gray.jpg')) plt.title("original image") plt.imshow(im) plt.axis('off') im_input = im[np.newaxis, np.newaxis, :, :] net.blobs['data'].reshape(*im_input.shape) net.blobs['data'].data[...] = im_input # - # The convolution weights are initialized from Gaussian noise while the biases are initialized to zero. These random filters give output somewhat like edge detections. # + # helper show filter outputs def show_filters(net): net.forward() plt.figure() filt_min, filt_max = net.blobs['conv'].data.min(), net.blobs['conv'].data.max() for i in range(3): plt.subplot(1,4,i+2) plt.title("filter #{} output".format(i)) plt.imshow(net.blobs['conv'].data[0, i], vmin=filt_min, vmax=filt_max) plt.tight_layout() plt.axis('off') # filter the image with initial show_filters(net) # - # Raising the bias of a filter will correspondingly raise its output: # pick first filter output conv0 = net.blobs['conv'].data[0, 0] print("pre-surgery output mean {:.2f}".format(conv0.mean())) # set first filter bias to 10 net.params['conv'][1].data[0] = 1. net.forward() print("post-surgery output mean {:.2f}".format(conv0.mean())) # Altering the filter weights is more exciting since we can assign any kernel like Gaussian blur, the Sobel operator for edges, and so on. The following surgery turns the 0th filter into a Gaussian blur and the 1st and 2nd filters into the horizontal and vertical gradient parts of the Sobel operator. # # See how the 0th output is blurred, the 1st picks up horizontal edges, and the 2nd picks up vertical edges. ksize = net.params['conv'][0].data.shape[2:] # make Gaussian blur sigma = 1. y, x = np.mgrid[-ksize[0]//2 + 1:ksize[0]//2 + 1, -ksize[1]//2 + 1:ksize[1]//2 + 1] g = np.exp(-((x**2 + y**2)/(2.0*sigma**2))) gaussian = (g / g.sum()).astype(np.float32) net.params['conv'][0].data[0] = gaussian # make Sobel operator for edge detection net.params['conv'][0].data[1:] = 0. sobel = np.array((-1, -2, -1, 0, 0, 0, 1, 2, 1), dtype=np.float32).reshape((3,3)) net.params['conv'][0].data[1, 0, 1:-1, 1:-1] = sobel # horizontal net.params['conv'][0].data[2, 0, 1:-1, 1:-1] = sobel.T # vertical show_filters(net) # With net surgery, parameters can be transplanted across nets, regularized by custom per-parameter operations, and transformed according to your schemes. # ## Casting a Classifier into a Fully Convolutional Network # # Let's take the standard Caffe Reference ImageNet model "CaffeNet" and transform it into a fully convolutional net for efficient, dense inference on large inputs. This model generates a classification map that covers a given input size instead of a single classification. In particular a 8 $\times$ 8 classification map on a 451 $\times$ 451 input gives 64x the output in only 3x the time. The computation exploits a natural efficiency of convolutional network (convnet) structure by amortizing the computation of overlapping receptive fields. # # To do so we translate the `InnerProduct` matrix multiplication layers of CaffeNet into `Convolutional` layers. This is the only change: the other layer types are agnostic to spatial size. Convolution is translation-invariant, activations are elementwise operations, and so on. The `fc6` inner product when carried out as convolution by `fc6-conv` turns into a 6 \times 6 filter with stride 1 on `pool5`. Back in image space this gives a classification for each 227 $\times$ 227 box with stride 32 in pixels. Remember the equation for output map / receptive field size, output = (input - kernel_size) / stride + 1, and work out the indexing details for a clear understanding. # !diff imagenet/bvlc_caffenet_full_conv.prototxt ../models/bvlc_reference_caffenet/deploy.prototxt # The only differences needed in the architecture are to change the fully connected classifier inner product layers into convolutional layers with the right filter size -- 6 x 6, since the reference model classifiers take the 36 elements of `pool5` as input -- and stride 1 for dense classification. Note that the layers are renamed so that Caffe does not try to blindly load the old parameters when it maps layer names to the pretrained model. # + # Make sure that caffe is on the python path: caffe_root = '../' # this file is expected to be in {caffe_root}/examples import sys sys.path.insert(0, caffe_root + 'python') import caffe # Load the original network and extract the fully connected layers' parameters. net = caffe.Net('../models/bvlc_reference_caffenet/deploy.prototxt', '../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel', caffe.TEST) params = ['fc6', 'fc7', 'fc8'] # fc_params = {name: (weights, biases)} fc_params = {pr: (net.params[pr][0].data, net.params[pr][1].data) for pr in params} for fc in params: print '{} weights are {} dimensional and biases are {} dimensional'.format(fc, fc_params[fc][0].shape, fc_params[fc][1].shape) # - # Consider the shapes of the inner product parameters. The weight dimensions are the output and input sizes while the bias dimension is the output size. # + # Load the fully convolutional network to transplant the parameters. net_full_conv = caffe.Net('net_surgery/bvlc_caffenet_full_conv.prototxt', '../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel', caffe.TEST) params_full_conv = ['fc6-conv', 'fc7-conv', 'fc8-conv'] # conv_params = {name: (weights, biases)} conv_params = {pr: (net_full_conv.params[pr][0].data, net_full_conv.params[pr][1].data) for pr in params_full_conv} for conv in params_full_conv: print '{} weights are {} dimensional and biases are {} dimensional'.format(conv, conv_params[conv][0].shape, conv_params[conv][1].shape) # - # The convolution weights are arranged in output $\times$ input $\times$ height $\times$ width dimensions. To map the inner product weights to convolution filters, we could roll the flat inner product vectors into channel $\times$ height $\times$ width filter matrices, but actually these are identical in memory (as row major arrays) so we can assign them directly. # # The biases are identical to those of the inner product. # # Let's transplant! for pr, pr_conv in zip(params, params_full_conv): conv_params[pr_conv][0].flat = fc_params[pr][0].flat # flat unrolls the arrays conv_params[pr_conv][1][...] = fc_params[pr][1] # Next, save the new model weights. net_full_conv.save('net_surgery/bvlc_caffenet_full_conv.caffemodel') # To conclude, let's make a classification map from the example cat image and visualize the confidence of "tiger cat" as a probability heatmap. This gives an 8-by-8 prediction on overlapping regions of the 451 $\times$ 451 input. # + import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # load input and configure preprocessing im = caffe.io.load_image('images/cat.jpg') transformer = caffe.io.Transformer({'data': net_full_conv.blobs['data'].data.shape}) transformer.set_mean('data', np.load('../python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1)) transformer.set_transpose('data', (2,0,1)) transformer.set_channel_swap('data', (2,1,0)) transformer.set_raw_scale('data', 255.0) # make classification map by forward and print prediction indices at each location out = net_full_conv.forward_all(data=np.asarray([transformer.preprocess('data', im)])) print out['prob'][0].argmax(axis=0) # show net input and confidence map (probability of the top prediction at each location) plt.subplot(1, 2, 1) plt.imshow(transformer.deprocess('data', net_full_conv.blobs['data'].data[0])) plt.subplot(1, 2, 2) plt.imshow(out['prob'][0,281]) # - # The classifications include various cats -- 282 = tiger cat, 281 = tabby, 283 = persian -- and foxes and other mammals. # # In this way the fully connected layers can be extracted as dense features across an image (see `net_full_conv.blobs['fc6'].data` for instance), which is perhaps more useful than the classification map itself. # # Note that this model isn't totally appropriate for sliding-window detection since it was trained for whole-image classification. Nevertheless it can work just fine. Sliding-window training and finetuning can be done by defining a sliding-window ground truth and loss such that a loss map is made for every location and solving as usual. (This is an exercise for the reader.) # *A thank you to <NAME> for first suggesting this trick.*
examples/net_surgery.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Triggering a Cloud Composer Pipeline with a Google Cloud Function # # In this advanced lab you will learn how to create and run an [Apache Airflow](http://airflow.apache.org/) workflow in Cloud Composer that completes the following tasks: # - Watches for new CSV data to be uploaded to a [Cloud Storage](https://cloud.google.com/storage/docs/) bucket # - A [Cloud Function](https://cloud.google.com/composer/docs/how-to/using/triggering-with-gcf#getting_the_client_id) call triggers the [Cloud Composer Airflow DAG](https://cloud.google.com/composer/docs/how-to/using/writing-dags) to run when a new file is detected # - The workflow finds the input file that triggered the workflow and executes a [Cloud Dataflow](https://cloud.google.com/dataflow/) job to transform and output the data to BigQuery # - Moves the original input file to a different Cloud Storage bucket for storing processed files # ## Part One: Create Cloud Composer environment and workflow # First, create a Cloud Composer environment if you don't have one already by doing the following: # 1. In the Navigation menu under Big Data, select **Composer** # 2. Select **Create** # 3. Set the following parameters: # - Name: mlcomposer # - Location: us-central1 # - Other values at defaults # 4. Select **Create** # # The environment creation process is completed when the green checkmark displays to the left of the environment name on the Environments page in the GCP Console. # It can take up to 20 minutes for the environment to complete the setup process. Move on to the next section - Create Cloud Storage buckets and BigQuery dataset. # # ## Set environment variables # + import os PROJECT = 'your-project-id' # REPLACE WITH YOUR PROJECT ID REGION = 'us-central1' # REPLACE WITH YOUR REGION e.g. us-central1 # do not change these os.environ['PROJECT'] = PROJECT os.environ['REGION'] = REGION # - # ## Create Cloud Storage buckets # Create two Cloud Storage Multi-Regional buckets in your project. # - project-id_input # - project-id_output # # Run the below to automatically create the buckets and load some sample data: # + language="bash" # ## create GCS buckets # exists=$(gsutil ls -d | grep -w gs://${PROJECT}_input/) # if [ -n "$exists" ]; then # echo "Skipping the creation of input bucket." # else # echo "Creating input bucket." # gsutil mb -l ${REGION} gs://${PROJECT}_input # echo "Loading sample data for later" # gsutil cp resources/usa_names.csv gs://${PROJECT}_input # fi # # exists=$(gsutil ls -d | grep -w gs://${PROJECT}_output/) # if [ -n "$exists" ]; then # echo "Skipping the creation of output bucket." # else # echo "Creating output bucket." # gsutil mb -l ${REGION} gs://${PROJECT}_output # fi # - # ## Create BigQuery Destination Dataset and Table # Next, we'll create a data sink to store the ingested data from GCS<br><br> # # ### Create a new Dataset # 1. In the Navigation menu, select **BigQuery** # 2. Then click on your qwiklabs project ID # 3. Click **Create Dataset** # 4. Name your dataset **ml_pipeline** and leave other values at defaults # 5. Click **Create Dataset** # # # ### Create a new empty table # 1. Click on the newly created dataset # 2. Click **Create Table** # 3. For Destination Table name specify **ingest_table** # 4. For schema click **Edit as Text** and paste in the below schema # # state: STRING,<br> # gender: STRING,<br> # year: STRING,<br> # name: STRING,<br> # number: STRING,<br> # created_date: STRING,<br> # filename: STRING,<br> # load_dt: DATE<br><br> # # 5. Click **Create Table** # ## Review of Airflow concepts # While your Cloud Composer environment is building, let’s discuss the sample file you’ll be using in this lab. # <br><br> # [Airflow](https://airflow.apache.org/) is a platform to programmatically author, schedule and monitor workflows # <br><br> # Use airflow to author workflows as directed acyclic graphs (DAGs) of tasks. The airflow scheduler executes your tasks on an array of workers while following the specified dependencies. # <br><br> # ### Core concepts # - [DAG](https://airflow.apache.org/concepts.html#dags) - A Directed Acyclic Graph is a collection of tasks, organised to reflect their relationships and dependencies. # - [Operator](https://airflow.apache.org/concepts.html#operators) - The description of a single task, it is usually atomic. For example, the BashOperator is used to execute bash command. # - [Task](https://airflow.apache.org/concepts.html#tasks) - A parameterised instance of an Operator; a node in the DAG. # - [Task Instance](https://airflow.apache.org/concepts.html#task-instances) - A specific run of a task; characterised as: a DAG, a Task, and a point in time. It has an indicative state: *running, success, failed, skipped, …*<br><br> # The rest of the Airflow concepts can be found [here](https://airflow.apache.org/concepts.html#). # # # ## Complete the DAG file # Cloud Composer workflows are comprised of [DAGs (Directed Acyclic Graphs)](https://airflow.incubator.apache.org/concepts.html#dags). The code shown in simple_load_dag.py is the workflow code, also referred to as the DAG. # <br><br> # Open the file now to see how it is built. Next will be a detailed look at some of the key components of the file. # <br><br> # To orchestrate all the workflow tasks, the DAG imports the following operators: # - DataFlowPythonOperator # - PythonOperator # <br><br> # Action: <span style="color:blue">**Complete the # TODOs in the simple_load_dag.py DAG file below**</span> file while you wait for your Composer environment to be setup. # + # %%writefile simple_load_dag.py # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A simple Airflow DAG that is triggered externally by a Cloud Function when a file lands in a GCS bucket. Once triggered the DAG performs the following steps: 1. Triggers a Google Cloud Dataflow job with the input file information received from the Cloud Function trigger. 2. Upon completion of the Dataflow job, the input file is moved to a gs://<target-bucket>/<success|failure>/YYYY-MM-DD/ location based on the status of the previous step. """ import datetime import logging import os from airflow import configuration from airflow import models from airflow.contrib.hooks import gcs_hook from airflow.contrib.operators import dataflow_operator from airflow.operators import python_operator from airflow.utils.trigger_rule import TriggerRule # We set the start_date of the DAG to the previous date. This will # make the DAG immediately available for scheduling. YESTERDAY = datetime.datetime.combine( datetime.datetime.today() - datetime.timedelta(1), datetime.datetime.min.time()) # We define some variables that we will use in the DAG tasks. SUCCESS_TAG = 'success' FAILURE_TAG = 'failure' # An Airflow variable called gcp_completion_bucket is required. # This variable will contain the name of the bucket to move the processed # file to. # '_names' must appear in CSV filename to be ingested (adjust as needed) # we are only looking for files with the exact name usa_names.csv (you can specify wildcards if you like) INPUT_BUCKET_CSV = 'gs://'+models.Variable.get('gcp_input_location')+'/usa_names.csv' # TODO: Populate the models.Variable.get() with the actual variable name for your output bucket COMPLETION_BUCKET = 'gs://'+models.Variable.get('gcp_completion_bucket') DS_TAG = '{{ ds }}' DATAFLOW_FILE = os.path.join( configuration.get('core', 'dags_folder'), 'dataflow', 'process_delimited.py') # The following additional Airflow variables should be set: # gcp_project: Google Cloud Platform project id. # gcp_temp_location: Google Cloud Storage location to use for Dataflow temp location. DEFAULT_DAG_ARGS = { 'start_date': YESTERDAY, 'retries': 2, # TODO: Populate the models.Variable.get() with the variable name for your GCP Project 'project_id': models.Variable.get('gcp_project'), 'dataflow_default_options': { 'project': models.Variable.get('gcp_project'), # TODO: Populate the models.Variable.get() with the variable name for temp location 'temp_location': 'gs://'+models.Variable.get('gcp_temp_location'), 'runner': 'DataflowRunner' } } def move_to_completion_bucket(target_bucket, target_infix, **kwargs): """A utility method to move an object to a target location in GCS.""" # Here we establish a connection hook to GoogleCloudStorage. # Google Cloud Composer automatically provides a google_cloud_storage_default # connection id that is used by this hook. conn = gcs_hook.GoogleCloudStorageHook() # The external trigger (Google Cloud Function) that initiates this DAG # provides a dag_run.conf dictionary with event attributes that specify # the information about the GCS object that triggered this DAG. # We extract the bucket and object name from this dictionary. source_bucket = models.Variable.get('gcp_input_location') source_object = models.Variable.get('gcp_input_location')+'/usa_names.csv' completion_ds = kwargs['ds'] target_object = os.path.join(target_infix, completion_ds, source_object) logging.info('Copying %s to %s', os.path.join(source_bucket, source_object), os.path.join(target_bucket, target_object)) conn.copy(source_bucket, source_object, target_bucket, target_object) logging.info('Deleting %s', os.path.join(source_bucket, source_object)) conn.delete(source_bucket, source_object) # Setting schedule_interval to None as this DAG is externally trigger by a Cloud Function. # The following Airflow variables should be set for this DAG to function: # bq_output_table: BigQuery table that should be used as the target for # Dataflow in <dataset>.<tablename> format. # e.g. lake.usa_names # input_field_names: Comma separated field names for the delimited input file. # e.g. state,gender,year,name,number,created_date # TODO: Name the DAG id GcsToBigQueryTriggered with models.DAG(dag_id='GcsToBigQueryTriggered', description='A DAG triggered by an external Cloud Function', schedule_interval=None, default_args=DEFAULT_DAG_ARGS) as dag: # Args required for the Dataflow job. job_args = { 'input': INPUT_BUCKET_CSV, # TODO: Populate the models.Variable.get() with the variable name for BQ table 'output': models.Variable.get('bq_output_table'), # TODO: Populate the models.Variable.get() with the variable name for input field names 'fields': models.Variable.get('input_field_names'), 'load_dt': DS_TAG } # Main Dataflow task that will process and load the input delimited file. # TODO: Specify the type of operator we need to call to invoke DataFlow dataflow_task = dataflow_operator.DataFlowPythonOperator( task_id="process-delimited-and-push", py_file=DATAFLOW_FILE, options=job_args) # Here we create two conditional tasks, one of which will be executed # based on whether the dataflow_task was a success or a failure. success_move_task = python_operator.PythonOperator(task_id='success-move-to-completion', python_callable=move_to_completion_bucket, # A success_tag is used to move # the input file to a success # prefixed folder. op_args=[models.Variable.get('gcp_completion_bucket'), SUCCESS_TAG], provide_context=True, trigger_rule=TriggerRule.ALL_SUCCESS) failure_move_task = python_operator.PythonOperator(task_id='failure-move-to-completion', python_callable=move_to_completion_bucket, # A failure_tag is used to move # the input file to a failure # prefixed folder. op_args=[models.Variable.get('gcp_completion_bucket'), FAILURE_TAG], provide_context=True, trigger_rule=TriggerRule.ALL_FAILED) # The success_move_task and failure_move_task are both downstream from the # dataflow_task. dataflow_task >> success_move_task dataflow_task >> failure_move_task # - # ## Viewing environment information # Now that you have a completed DAG, it's time to copy it to your Cloud Composer environment and finish the setup of your workflow.<br><br> # 1. Go back to **Composer** to check on the status of your environment. # 2. Once your environment has been created, click the **name of the environment** to see its details. # <br><br> # The Environment details page provides information, such as the Airflow web UI URL, Google Kubernetes Engine cluster ID, name of the Cloud Storage bucket connected to the DAGs folder. # <br><br> # Cloud Composer uses Cloud Storage to store Apache Airflow DAGs, also known as workflows. Each environment has an associated Cloud Storage bucket. Cloud Composer schedules only the DAGs in the Cloud Storage bucket. # ## Setting Airflow variables # Our DAG relies on variables to pass in values like the GCP Project. We can set these in the Admin UI. # # Airflow variables are an Airflow-specific concept that is distinct from [environment variables](https://cloud.google.com/composer/docs/how-to/managing/environment-variables). In this step, you'll set the following six [Airflow variables](https://airflow.apache.org/concepts.html#variables) used by the DAG we will deploy. ## Run this to display which key value pairs to input import pandas as pd pd.DataFrame([ ('gcp_project', PROJECT), ('gcp_input_location', PROJECT + '_input'), ('gcp_temp_location', PROJECT + '_output/tmp'), ('gcp_completion_bucket', PROJECT + '_output'), ('input_field_names', 'state,gender,year,name,number,created_date'), ('bq_output_table', 'ml_pipeline.ingest_table') ], columns = ['Key', 'Value']) # ### Option 1: Set the variables using the Airflow webserver UI # 1. In your Airflow environment, select **Admin** > **Variables** # 2. Populate each key value in the table with the required variables from the above table # ### Option 2: Set the variables using the Airflow CLI # The next gcloud composer command executes the Airflow CLI sub-command [variables](https://airflow.apache.org/cli.html#variables). The sub-command passes the arguments to the gcloud command line tool.<br><br> # To set the three variables, run the gcloud composer command **once for each row** from the above table. Just as an example, to set the variable `gcp_project` you could do this: # + language="bash" # gcloud composer environments run ENVIRONMENT_NAME \ # --location ${REGION} variables -- \ # --set gcp_project ${PROJECT}} # - # ### Copy your Airflow bucket name # 1. Navigate to your Cloud Composer [instance](https://console.cloud.google.com/composer/environments?project=)<br/><br/> # 2. Select __DAGs Folder__<br/><br/> # 3. You will be taken to the Google Cloud Storage bucket that Cloud Composer has created automatically for your Airflow instance<br/><br/> # 4. __Copy the bucket name__ into the variable below (example: us-central1-composer-08f6edeb-bucket) AIRFLOW_BUCKET = 'us-central1-composer-21587538-bucket' # REPLACE WITH AIRFLOW BUCKET NAME os.environ['AIRFLOW_BUCKET'] = AIRFLOW_BUCKET # ### Copy your Airflow files to your Airflow bucket # + language="bash" # gsutil cp simple_load_dag.py gs://${AIRFLOW_BUCKET}/dags # overwrite DAG file if it exists # gsutil cp -r dataflow/process_delimited.py gs://${AIRFLOW_BUCKET}/dags/dataflow/ # copy Dataflow job to be ran # - # *** # ## Navigating Using the Airflow UI # To access the Airflow web interface using the GCP Console: # 1. Go back to the **Composer Environments** page. # 2. In the **Airflow webserver** column for the environment, click the new window icon. # 3. The Airflow web UI opens in a new browser window. # ### Trigger DAG run manually # Running your DAG manually ensures that it operates successfully even in the absence of triggered events. # 1. Trigger the DAG manually **click the play button** under Links # # *** # # Part Two: Trigger DAG run automatically from a file upload to GCS # Now that your manual workflow runs successfully, you will now trigger it based on an external event. # ## Create a Cloud Function to trigger your workflow # We will be following this [reference guide](https://cloud.google.com/composer/docs/how-to/using/triggering-with-gcf) to setup our Cloud Function # 1. In the code block below, uncomment the project_id, location, and composer_environment and populate them # 2. Run the below code to get your **CLIENT_ID** (needed later) # + import google.auth import google.auth.transport.requests import requests import six.moves.urllib.parse # Authenticate with Google Cloud. # See: https://cloud.google.com/docs/authentication/getting-started credentials, _ = google.auth.default( scopes=['https://www.googleapis.com/auth/cloud-platform']) authed_session = google.auth.transport.requests.AuthorizedSession( credentials) project_id = 'your-project-id' location = 'us-central1' composer_environment = 'composer' environment_url = ( 'https://composer.googleapis.com/v1beta1/projects/{}/locations/{}' '/environments/{}').format(project_id, location, composer_environment) composer_response = authed_session.request('GET', environment_url) environment_data = composer_response.json() airflow_uri = environment_data['config']['airflowUri'] # The Composer environment response does not include the IAP client ID. # Make a second, unauthenticated HTTP request to the web server to get the # redirect URI. redirect_response = requests.get(airflow_uri, allow_redirects=False) redirect_location = redirect_response.headers['location'] # Extract the client_id query parameter from the redirect. parsed = six.moves.urllib.parse.urlparse(redirect_location) query_string = six.moves.urllib.parse.parse_qs(parsed.query) print(query_string['client_id'][0]) # - # ### Grant Service Account Permissions # # To authenticate to Cloud IAP, grant the Appspot Service Account (used by Cloud Functions) the Service Account Token Creator role on itself. To do this, execute the following command in [Cloud Shell](https://console.cloud.google.com/?cloudshell=true). Be sure to replace 'your-project-id' #Execute the following in Cloud Shell, it will not work here gcloud iam service-accounts add-iam-policy-binding \ <EMAIL> \ --member=serviceAccount:<EMAIL> \ --role=roles/iam.serviceAccountTokenCreator # ## Create the Cloud Function # # 1. Navigate to Compute > **Cloud Functions** # 2. Select **Create function** # 3. For name specify **'gcs-dag-trigger-function'** # 4. For trigger type select **'Cloud Storage'** # 5. For event type select '**Finalize/Create'** # 6. For bucket, **specify the input bucket** you created earlier # # Important: be sure to select the input bucket and not the output bucket to avoid an endless triggering loop) # ### populate index.js # Complete the four required constants defined below in index.js code and **paste it into the Cloud Function editor** (the js code will not run in this notebook). The constants are: # - PROJECT_ID # - CLIENT_ID (from earlier) # - WEBSERVER_ID (part of Airflow webserver URL) # - DAG_NAME (GcsToBigQueryTriggered) # + 'use strict'; const fetch = require('node-fetch'); const FormData = require('form-data'); /** * Triggered from a message on a Cloud Storage bucket. * * IAP authorization based on: * https://stackoverflow.com/questions/45787676/how-to-authenticate-google-cloud-functions-for-access-to-secure-app-engine-endpo * and * https://cloud.google.com/iap/docs/authentication-howto * * @param {!Object} data The Cloud Functions event data. * @returns {Promise} */ exports.triggerDag = async data => { // Fill in your Composer environment information here. // The project that holds your function const PROJECT_ID = 'your-project-id'; // Navigate to your webserver's login page and get this from the URL const CLIENT_ID = 'your-iap-client-id'; // This should be part of your webserver's URL: // {tenant-project-id}.appspot.com const WEBSERVER_ID = 'your-tenant-project-id'; // The name of the DAG you wish to trigger const DAG_NAME = 'GcsToBigQueryTriggered'; // Other constants const WEBSERVER_URL = `https://${WEBSERVER_ID}.appspot.com/api/experimental/dags/${DAG_NAME}/dag_runs`; const USER_AGENT = 'gcf-event-trigger'; const BODY = {conf: JSON.stringify(data)}; // Make the request try { const iap = await authorizeIap(CLIENT_ID, PROJECT_ID, USER_AGENT); return makeIapPostRequest( WEBSERVER_URL, BODY, iap.idToken, USER_AGENT, iap.jwt ); } catch (err) { throw new Error(err); } }; /** * @param {string} clientId The client id associated with the Composer webserver application. * @param {string} projectId The id for the project containing the Cloud Function. * @param {string} userAgent The user agent string which will be provided with the webserver request. */ const authorizeIap = async (clientId, projectId, userAgent) => { const SERVICE_ACCOUNT = `${<EMAIL>`; const JWT_HEADER = Buffer.from( JSON.stringify({alg: 'RS256', typ: 'JWT'}) ).toString('base64'); let jwt = ''; let jwtClaimset = ''; // Obtain an Oauth2 access token for the appspot service account const res = await fetch( `http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/${SERVICE_ACCOUNT}/token`, { headers: {'User-Agent': userAgent, 'Metadata-Flavor': 'Google'}, } ); const tokenResponse = await res.json(); if (tokenResponse.error) { return Promise.reject(tokenResponse.error); } const accessToken = tokenResponse.access_token; const iat = Math.floor(new Date().getTime() / 1000); const claims = { iss: SERVICE_ACCOUNT, aud: 'https://www.googleapis.com/oauth2/v4/token', iat: iat, exp: iat + 60, target_audience: clientId, }; jwtClaimset = Buffer.from(JSON.stringify(claims)).toString('base64'); const toSign = [JWT_HEADER, jwtClaimset].join('.'); const blob = await fetch( `https://iam.googleapis.com/v1/projects/${projectId}/serviceAccounts/${SERVICE_ACCOUNT}:signBlob`, { method: 'POST', body: JSON.stringify({ bytesToSign: Buffer.from(toSign).toString('base64'), }), headers: { 'User-Agent': userAgent, Authorization: `Bearer ${accessToken}`, }, } ); const blobJson = await blob.json(); if (blobJson.error) { return Promise.reject(blobJson.error); } // Request service account signature on header and claimset const jwtSignature = blobJson.signature; jwt = [JWT_HEADER, jwtClaimset, jwtSignature].join('.'); const form = new FormData(); form.append('grant_type', 'urn:ietf:params:oauth:grant-type:jwt-bearer'); form.append('assertion', jwt); const token = await fetch('https://www.googleapis.com/oauth2/v4/token', { method: 'POST', body: form, }); const tokenJson = await token.json(); if (tokenJson.error) { return Promise.reject(tokenJson.error); } return { jwt: jwt, idToken: tokenJson.id_token, }; }; /** * @param {string} url The url that the post request targets. * @param {string} body The body of the post request. * @param {string} idToken Bearer token used to authorize the iap request. * @param {string} userAgent The user agent to identify the requester. */ const makeIapPostRequest = async (url, body, idToken, userAgent) => { const res = await fetch(url, { method: 'POST', headers: { 'User-Agent': userAgent, Authorization: `Bearer ${idToken}`, }, body: JSON.stringify(body), }); if (!res.ok) { const err = await res.text(); throw new Error(err); } }; # - # ### populate package.json # Copy and paste the below into **package.json** { "name": "nodejs-docs-samples-functions-composer-storage-trigger", "version": "0.0.1", "dependencies": { "form-data": "^2.3.2", "node-fetch": "^2.2.0" }, "engines": { "node": ">=8.0.0" }, "private": true, "license": "Apache-2.0", "author": "Google Inc.", "repository": { "type": "git", "url": "https://github.com/GoogleCloudPlatform/nodejs-docs-samples.git" }, "devDependencies": { "@google-cloud/nodejs-repo-tools": "^3.3.0", "mocha": "^6.0.0", "proxyquire": "^2.1.0", "sinon": "^7.2.7" }, "scripts": { "test": "mocha test/*.test.js --timeout=20000" } } # 10. For **Function to execute**, specify **triggerDag** (note: case sensitive) # 11. Select **Create** # ## Upload CSVs and Monitor # 1. Practice uploading and editing CSVs named usa_names.csv into your input bucket (note: the DAG filters to only ingest CSVs with 'usa_names.csv' as the filepath. Adjust this as needed in the DAG code.) # 2. Troubleshoot Cloud Function call errors by monitoring the [logs](https://console.cloud.google.com/logs/viewer?). In the below screenshot we filter in Logging for our most recent Dataflow job and are scrolling through to ensure the job is processing and outputting records to BigQuery # # ![Dataflow logging](./img/dataflow_logging.jpg "Dataflow logging") # # 3. Troubleshoot Airflow workflow errors by monitoring the **Browse** > **DAG Runs** # ## Congratulations! # You’ve have completed this advanced lab on triggering a workflow with a Cloud Function.
courses/data-engineering/demos/composer_gcf_trigger/composertriggered.ipynb
# + # Code is from <NAME> et al, # "Bayesian Modeling and Comptuation In Python" # https://github.com/aloctavodia/BMCP/blob/master/Code/chp_3_5/splines.py import numpy as np import matplotlib.pyplot as plt from scipy import stats try: from patsy import bs, dmatrix except ModuleNotFoundError: # %pip install -qq patsy from patsy import bs, dmatrix try: import probml_utils as pml except ModuleNotFoundError: # %pip install -qq git+https://github.com/probml/probml-utils.git import probml_utils as pml x = np.linspace(0.0, 1.0, 20) knots = [0.25, 0.5, 0.75] B0 = dmatrix("bs(x, knots=knots, degree=0, include_intercept=True) - 1", {"x": x, "knots": knots}) B1 = dmatrix("bs(x, knots=knots, degree=1, include_intercept=True) - 1", {"x": x, "knots": knots}) B3 = dmatrix("bs(x, knots=knots, degree=3, include_intercept=True) - 1", {"x": x, "knots": knots}) _, axes = plt.subplots(1, 3, sharey=True) for idx, (B, title, ax) in enumerate( zip((B0, B1, B3), ("Piecewise constant", "Piecewise linear", "Cubic spline"), axes) ): # ax.imshow(B, cmap="cet_gray_r", aspect="auto") ax.imshow(B, cmap="Greys", aspect="auto") ax.set_xticks(np.arange(B.shape[1])) ax.set_yticks(np.arange(B.shape[0])) ax.set_yticklabels([np.round(v, 1) for v in x]) ax.spines["left"].set_visible(False) ax.spines["bottom"].set_visible(False) ax.set_title(title) axes[1].set_xlabel("B-splines") axes[0].set_ylabel("x", rotation=0, labelpad=15) pml.savefig("splines_basis_heatmap.pdf", dpi=300) titles = ["Piecewise constant", "Piecewise linear", "Cubic spline"] Bs = [B0, B1, B3] for i in range(3): B = Bs[i] title = titles[i] fig, ax = plt.subplots() # ax.imshow(B, cmap="cet_gray_r", aspect="auto") ax.imshow(B, cmap="Greys", aspect="auto") ax.set_xticks(np.arange(B.shape[1])) ax.set_yticks(np.arange(B.shape[0])) ax.set_yticklabels([np.round(v, 1) for v in x]) ax.spines["left"].set_visible(False) ax.spines["bottom"].set_visible(False) ax.set_title(title) plt.tight_layout() pml.savefig(f"splines_basis_heatmap{i}.pdf", dpi=300) plt.show()
notebooks/book1/11/splines_basis_heatmap.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # 1. Parameters # + tags=["parameters"] # Defaults simulation_dir = 'simulations/unset' reference_file = 'simulations/reference/reference.fa.gz' # + from pathlib import Path simulation_dir_path = Path(simulation_dir) case_name = str(simulation_dir_path.name) simulation_data_path = simulation_dir_path / 'simulated_data' simulated_variants_file = simulation_data_path / 'haplotypes.vcf.gz' case_name = str(simulation_dir_path.name) index_reads_path = simulation_dir_path / 'index-reads' index_assemblies_path = simulation_dir_path / 'index-assemblies' # - # # 2. Load simulated variants (VCF) # + import vcf import pandas as pd import time reader = vcf.Reader(filename=str(simulated_variants_file)) simulated_df = pd.DataFrame([vars(r) for r in reader]) simulated_df = simulated_df[['CHROM', 'POS', 'REF', 'samples']] simulated_df.head(5) # - # ## 2.1. Construct sample:variant identifiers # + before = time.time() # Explode dataframe so that we have one row per sample simulated_df_exploded = simulated_df.explode('samples') simulated_df_exploded['SAMPLE'] = simulated_df_exploded['samples'].apply(lambda x: x.sample) # Extrat ALT for each sample simulated_df_exploded['ALT'] = simulated_df_exploded['samples'].apply(lambda x: x.gt_bases) # Only keep mutations/those where REF and ALT are different simulated_df_exploded = simulated_df_exploded[simulated_df_exploded['REF'] != simulated_df_exploded['ALT']] # Create SPDI-like identifier with Sample name for comparison (SAMPLE:CHROM:POS:REF:ALT) simulated_df_exploded['ID'] = simulated_df_exploded.apply( lambda x: f"{x['SAMPLE']}:{x['CHROM']}:{x['POS']}:{x['REF']}:{x['ALT']}", axis='columns') simulated_variants = simulated_df_exploded['ID'].reset_index(drop=True) expected_sample_variants = set(simulated_variants) after = time.time() print(f'There are {len(expected_sample_variants)} expected sample/variant pairs like: ' f'{list(simulated_variants[0:3])}') print(f'Took {(after - before)/60:0.1f} minutes') # - simulated_df_exploded # ## 1.2. Load reference genome for use with positive/negative calculations # + import gzip from Bio import SeqIO with gzip.open(reference_file, mode='rt') as f: sequences = list(SeqIO.parse(f, 'fasta')) reference_length = len(sequences[0]) sample_names = set(simulated_df_exploded.groupby('SAMPLE').agg({'SAMPLE': 'count'}).index) sample_names = sample_names - {'reference'} number_samples = len(sample_names) print(f'Reference length: {reference_length}') print(f'Number samples: {number_samples}') # - # # 3. Load detected variants # # ## 3.1. Load from reads index # + from typing import Set import genomics_data_index.api as gdi def get_sample_variant_idenifiers(index_dir: Path) -> Set[str]: db = gdi.GenomicsDataIndex.connect(index_dir) q = db.samples_query() actual_sample_variants = set() for sample in q.tolist(): sample_features = q.isa(sample).features_summary().reset_index() sample_features_set = set(sample_features['Mutation'].apply(lambda x: f"{sample}:{x}")) actual_sample_variants.update(sample_features_set) return actual_sample_variants before = time.time() actual_sample_variants_reads = get_sample_variant_idenifiers(index_reads_path) after = time.time() print(f'There are {len(actual_sample_variants_reads)} actual sample/variant pairs (reads)') print(f'Took {(after - before)/60:0.1f} minutes') print(f'Actual variants look like: {list(actual_sample_variants_reads)[0:5]}') # - # ## 2.2. Load from assemblies index # + before = time.time() actual_sample_variants_assemblies = get_sample_variant_idenifiers(index_assemblies_path) after = time.time() print(f'There are {len(actual_sample_variants_assemblies)} actual sample/variant pairs (assemblies)') print(f'Took {(after - before)/60:0.1f} minutes') # - # # 3. Compare expected/actual variants # # ## 3.1. Compare with reads index # + def compare_expected_actual(name: str, actual_sample_variants: Set[str]) -> pd.DataFrame: number_expected = len(expected_sample_variants) number_actual = len(actual_sample_variants) true_positives = actual_sample_variants & expected_sample_variants false_negatives = expected_sample_variants - actual_sample_variants false_positives = actual_sample_variants - expected_sample_variants # I cannot get true negatives since I would need to know the total number of negatives (i.e., all possible # variants with respect to the reference genome that were not simulated). This would be a finite, but # very very large number (and I haven't worked out how to calculate it). # For example, one negative is Sample:1:A:T, another negative is Sample:1:AG:TT, and so on for the entire # length of the genome. # true_negatives = set() sensitivity = len(true_positives) / (len(true_positives) + len(false_negatives)) precision = len(true_positives) / (len(true_positives) + len(false_positives)) f1_score = 2 * len(true_positives) / (2 * len(true_positives) + len(false_positives) + len(false_negatives)) # Since true_negatives are a very large number, then for all intents and purposes # specificity will be very very close to 1. So instead of trying to calculate it # I just call it ~1, but it's also not very useful because of this. #specificity = len(true_negatives) / (len(true_negatives) + len(false_positives)) specificity = '~1 (not calculated)' print(f'Sensitivity: {sensitivity:0.4f}') print(f'Specificity: {specificity}') print(f'Precision: {precision:0.4f}') print(f'F1 Score: {f1_score:0.4f}') comparison_df = pd.DataFrame([{ 'Name': name, 'True Positives': len(true_positives), 'True Negatives': pd.NA, 'False Positives': len(false_positives), 'False Negatives': len(false_negatives), 'Sensitivity': sensitivity, 'Specificity': specificity, 'Precision': precision, 'F1 Score': f1_score, }]) data = { 'tp': true_positives, 'fp': false_positives, 'fn': false_negatives } return comparison_df, data comparison_reads_df, data_reads = compare_expected_actual(name=f'{case_name} reads', actual_sample_variants=actual_sample_variants_reads) comparison_reads_df # - # ## 3.2. Compare with assemblies index comparison_assemblies_df, data_assemblies = compare_expected_actual(name=f'{case_name} assemblies', actual_sample_variants=actual_sample_variants_assemblies) comparison_assemblies_df # ## 3.3. Combine results results_df = pd.concat([comparison_reads_df, comparison_assemblies_df]) results_df results_df_output = simulation_dir_path / 'variants-comparison.tsv' results_df.to_csv(results_df_output, sep='\t', index=False)
evaluations/simulation/template-4-variants-comparison.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Local Installation # # **Prerequisites** # # - None! # # # **Outcomes** # # - Install Python on your computer # - Know what the Jupyter notebook is # - Be able to start the Jupyter notebook on your computer # - Be able to open specific notebooks from your computer # ## Installation # # Visit [continuum.io](https://www.anaconda.com/download) and download the # Anaconda Python distribution for your operating system (Windows/Mac OS/Linux). # Be sure to download the Python 3.X (where X is some number greater than or # equal to 7) version, not the 2.7 version # # Now, buckle up. It’s time to begin. # ### Intro to Jupyter # # - Most of the material will be administered using # [Jupyter](http://jupyter.org/) notebooks # - It is a file format that allows us to mix text, code, and output # - Ideal for presentations/teaching and presenting results # - Has three components: # 1. **Kernel** that is responsible for executing code in a specific programming language (you can ignore this) # 2. **GUI** that runs in your web browser (what we will interact with) # 3. **Server** that manages communication between GUI and kernels (start this once per session) # ## Starting Jupyter # # Start the Jupyter notebook by following these steps # # 1. Open a terminal # - Windows: Open “Anaconda Command Prompt” (perhaps via search) # - Mac OS: Open terminal (command+space => type terminal => press enter) # - Linux: You probably know how, but, if not, then ask for help # 1. Type `jupyter lab` in the terminal and press enter # 1. Keep the terminal open the whole time you’re working in the notebook # 1. If your web browser doesn’t open, read what was written in terminal and look for something that looks like # + [markdown] hide-output=false # ```markdown # Copy/paste this URL into your browser when you connect for the first time, # to login with a token: # http://localhost:8888/?token=9a39d3741a4f0b200c6e4b07d8e5c04a089899cddc72e7f8 # ``` # # - # and copy/paste the line starting with `http://` into your web browser. # ## Opening this file # # - Once web browser is open, you should see something like this: # # # <img src="https://storage.googleapis.com/ds4e/_static/pyfun/jupyter_lab.png" alt="jupyter\_lab.png" style=""> # # # - The filenames will be different # - It *should* list the contents of your personal home directory (folder) # # **Download materials** # # - To downlaod all the materials for this class visit [http://localhost:8888/git-pull?repo=https%3A%2F%2Fgithub.com%2Fsglyon%2FUCF-MSDA-workshop&urlpath=lab%2Ftree%2FUCF-MSDA-workshop%2F](http://localhost:8888/git-pull?repo=https%3A%2F%2Fgithub.com%2Fsglyon%2FUCF-MSDA-workshop&urlpath=lab%2Ftree%2FUCF-MSDA-workshop%2F) (will post to slack channel also) # # # <blockquote> # # **Check for understanding** # # Open this file in Jupyter by navigating to the `2019-06-24__GettingStarted` and # clicking the `local_install.ipynb` file # # # </blockquote>
Year19-20/2019-06-24__GettingStarted/03_local_install.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import logging logging.basicConfig(filename=u"main.log", format=u'%(filename)s[LINE:%(lineno)d]# %(levelname)-8s [%(asctime)s] %(message)s', level=logging.DEBUG) logging.debug( u'debug' ) logging.info( u'info' ) import warnings warnings.filterwarnings('ignore') # + # import re # import time # from bs4 import BeautifulSoup # import requests # import json # #import Levenshtein as lv # import numpy as np # import spacy # #for findDOI # import feedparser # import jellyfish # import copy # import os # - import pandas as pd from gensim import corpora, models, similarities import numpy as np import operator as op from rss import rss # список рассылки from findDOI import findDOI # функция поиска doi по названию from ALT import altmetrics # функция поиска альтметрик (нужно понять, какие альтметрики нам нужны) from stoplist import stoplist #стоп-лист from rssParser import Parser # класс, который парсит рсс, в датафрейм, - предлагаю его использовать для наполненния from findJournal import findJournal # функция поиска названия журнала по названию статьи # p = Parser(rss) # df = p.main() #df.to_csv('example_table.csv') df = pd.read_csv('example_table.csv') df.head() # example findJournal(df.loc[1,]["article_name"]) import spacy nlp = spacy.load('en', parser=False, ner=False) # ### Models (gensim) new_path = '/home/BIOCAD/chuvakin/serge/science_search/pubmed_model/' new_path_CUB = '/home/BIOCAD/chuvakin/serge/science_search/CUB_models/' # + # загружаем словарь, векторное пространство, модель Lsi (вектороное пространство) # общий корпус натренированный на pubmed dictionary = corpora.Dictionary.load(new_path+'pubmed5.dict') #Here and later - the first one is PubMed-based LSI-object, other - CUB-based LSI-objects corpus = corpora.MmCorpus(new_path+'pubmed5.mm') lsi = models.LsiModel.load(new_path+'pubmed5.lsi') # химики куб dictionary_CUB_chem = corpora.Dictionary.load(new_path_CUB+"chem_CUB.dict") lsi_CUB_chem = models.LsiModel.load(new_path_CUB+"chem_CUB.lsi") corpus_CUB_chem = corpora.MmCorpus(new_path_CUB+"chem_CUB.mm") # онко куб dictionary_CUB_onco = corpora.Dictionary.load(new_path_CUB+"cancer_CUB.dict") lsi_CUB_onco = models.LsiModel.load(new_path_CUB+"cancer_CUB.lsi") corpus_CUB_onco = corpora.MmCorpus(new_path_CUB+"cancer_CUB.mm") # аутоимунные куб dictionary_CUB_aiz = corpora.Dictionary.load(new_path_CUB+"aiz_CUB.dict") lsi_CUB_aiz = models.LsiModel.load(new_path_CUB+"aiz_CUB.lsi") corpus_CUB_aiz = corpora.MmCorpus(new_path_CUB+"aiz_CUB.mm") # инфекции куб dictionary_CUB_inf = corpora.Dictionary.load(new_path_CUB+"infect_CUB.dict") lsi_CUB_inf = models.LsiModel.load(new_path_CUB+"infect_CUB.lsi") corpus_CUB_inf = corpora.MmCorpus(new_path_CUB+"infect_CUB.mm") # офтальмология куб dictionary_CUB_eye = corpora.Dictionary.load(new_path_CUB+"eye_CUB.dict") lsi_CUB_eye = models.LsiModel.load(new_path_CUB+"eye_CUB.lsi") corpus_CUB_eye = corpora.MmCorpus(new_path_CUB+"eye_CUB.mm") # гететические куб dictionary_CUB_gene = corpora.Dictionary.load(new_path_CUB+"gene_CUB.dict") lsi_CUB_gene = models.LsiModel.load(new_path_CUB+"gene_CUB.lsi") corpus_CUB_gene = corpora.MmCorpus(new_path_CUB+"gene_CUB.mm") # - # ### indexes index = similarities.MatrixSimilarity(lsi[corpus]) #Indexes index_CUB_chem = similarities.MatrixSimilarity(lsi_CUB_chem[corpus_CUB_chem]) index_CUB_onco = similarities.MatrixSimilarity(lsi_CUB_onco[corpus_CUB_onco]) index_CUB_aiz = similarities.MatrixSimilarity(lsi_CUB_aiz[corpus_CUB_aiz]) index_CUB_inf = similarities.MatrixSimilarity(lsi_CUB_inf[corpus_CUB_inf]) index_CUB_eye = similarities.MatrixSimilarity(lsi_CUB_eye[corpus_CUB_eye]) index_CUB_gene = similarities.MatrixSimilarity(lsi_CUB_gene[corpus_CUB_gene]) # ----- # + [print(i) for i in lsi.print_topics()] print( ''' 0) глаза 1) онко 2) геннетические 3) аутоимунные 4) инфекции ''') # - from bs4 import BeautifulSoup import re soup = BeautifulSoup(df.iloc[1,]['summary'], 'html.parser') article = soup.select('div p')[0].text #Declare some functions def lemmatization(text, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']): """https://spacy.io/api/annotation""" sentence = [] doc_lem = nlp(text) for token in doc_lem: if token.pos_ in allowed_postags: sentence.append(token.lemma_) else: sentence.append(token) docs_out = " ".join(str(i).lower() for i in sentence) docs_out = re.sub(r' - ', '-', docs_out, flags=re.I) docs_out = re.sub(r'[^A-Za-z0-9\-]', ' ', docs_out) docs_out = re.sub(r' \d+ ', ' ', docs_out) docs_out = re.sub(r'\s+', ' ', docs_out) del doc_lem return docs_out # + from enhancment_words import cancerdict, aizdict, infectdict, eyedict, genedict, chemdict article = soup.select('div p')[0].text def scores_themes(txt, cancerdict=cancerdict, aizdict=aizdict, infectdict=infectdict, eyedict=eyedict, genedict=genedict, chemdict=chemdict, short=False): ''' Note that the order of multipliers should correspond the order d rule. Function returns following values: - theme: topic based on cosine similarity - chem_multiplier: chemical score, which is number of token occurences in chemical dict - score_pubmed: lsi score, based on svd decomposition - score_CUB: wierd number, seems to be constant all the time - theme_lsi: theme based in svd decomposition NB: lsi model returns empty list on too short texts, therefore don't forget to check short=True TODO: remove extra scores. Focus on just appropriate values. Right order of themes. ''' assert type(txt)==str, 'на вход подается не текст' txt = txt.lower() # lower register words = corpora.Dictionary([txt.split()]) # own dictionary ids = words.token2id # dictionary of tokens # lists of ids of enhancement words cancer_idlist = [ids[i] for i in list(set(ids.keys()) & set(cancerdict)) if i in ids] aiz_idlist = [ids[i] for i in list(set(ids.keys()) & set(aizdict)) if i in ids] infect_idlist = [ids[i] for i in list(set(ids.keys()) & set(infectdict)) if i in ids] eye_idlist = [ids[i] for i in list(set(ids.keys()) & set(eyedict)) if i in ids] gene_idlist = [ids[i] for i in list(set(ids.keys()) & set(genedict)) if i in ids] chem_idlist = [ids[i] for i in list(set(ids.keys()) & set(chemdict)) if i in ids] # vectorize article abstract words = words.doc2bow(txt.split()) # lists of multipliers cancer_multiplier = sum([e[1] for e in words if e[0] in cancer_idlist]) aiz_multiplier = sum([e[1] for e in words if e[0] in aiz_idlist]) infect_multiplier = sum([e[1] for e in words if e[0] in infect_idlist]) eye_multiplier = sum([e[1] for e in words if e[0] in eye_idlist]) gene_multiplier = sum([e[1] for e in words if e[0] in gene_idlist]) chem_multiplier = sum([e[1] for e in words if e[0] in chem_idlist]) # one multiplier multipliers = [eye_multiplier, cancer_multiplier , gene_multiplier, aiz_multiplier, infect_multiplier] multipliers = np.array([(1+0.08*y) for y in multipliers]) if short==False: # preproc txt = lemmatization(txt, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']) txt = [x for x in txt.split() if x not in stoplist] # preporation for model estimation vec_bow = dictionary.doc2bow(txt) vec_bow_chem = dictionary_CUB_chem.doc2bow(txt) # modeling vec_lsi = lsi[vec_bow] # convert the query to LSI space vec_lsi_chem = lsi_CUB_chem[vec_bow_chem] # find similarities sims = index[vec_lsi] * multipliers sims_chem = index_CUB_chem[vec_lsi_chem] # наверное это лишнее # темы d = dict(zip(['глаза', 'онко', 'геннетические', 'аутоимунные', 'инфекции'], sims)) # find most relevant topic, which is more than 0.65 score similarity try: theme, score_pubmed = max(list(filter(lambda x: x[1]>0.65, d.items())), key=op.itemgetter(1)) except: theme, score_pubmed = 'uncategorized', np.nan score_chem = sum(sims_chem) # зачем это?? # define theme by lsi_model theme_lsi, score_lsi = sorted([(x[0],x[1]*y) for x, y in zip(vec_lsi, multipliers)], key=op.itemgetter(1), reverse=True)[0] theme_lsi = dict(enumerate(d.keys()))[theme_lsi] # count similirities with cub (Всегда отдает одно число!) if theme == "онко": vec_bow_CUB = dictionary_CUB_onco.doc2bow(txt) vec_lsi_CUB = lsi_CUB_onco[vec_bow_CUB] sims_CUB = index_CUB_onco[vec_lsi_CUB] elif theme == "аутоимунные": vec_bow_CUB = dictionary_CUB_aiz.doc2bow(txt) vec_lsi_CUB = lsi_CUB_aiz[vec_bow_CUB] sims_CUB = index_CUB_aiz[vec_lsi_CUB] elif theme == "инфекции": vec_bow_CUB = dictionary_CUB_inf.doc2bow(txt) vec_lsi_CUB = lsi_CUB_inf[vec_bow_CUB] sims_CUB = index_CUB_inf[vec_lsi_CUB] elif theme == "глаза": vec_bow_CUB = dictionary_CUB_eye.doc2bow(txt) vec_lsi_CUB = lsi_CUB_eye[vec_bow_CUB] sims_CUB = index_CUB_eye[vec_lsi_CUB] elif theme == "геннетические": vec_bow_CUB = dictionary_CUB_gene.doc2bow(txt) vec_lsi_CUB = lsi_CUB_gene[vec_bow_CUB] sims_CUB = index_CUB_gene[vec_lsi_CUB] else: sims_CUB = [0,0,0] score_CUB = sum(sims_CUB) else: score_CUB = "too short for similarities" theme = max(zip(['глаза', 'онко', 'геннетические', 'аутоимунные', 'инфекции'], multipliers), key=op.itemgetter(1))[0] theme_lsi = 'undefined' score_pubmed = 'undefined' return theme, chem_multiplier, score_pubmed, score_CUB, theme_lsi # - from scores import * scores_themes(article)
main/main(1).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # MOAI 2020 Body Morphometry AI Segmentation Online Challenge # Install Segmentation Model [link](https://segmentation-models.readthedocs.io/en/latest/api.html#linknet) # !pip install git+https://github.com/qubvel/segmentation_models # Set up Image Augmentation # + import numpy as np import matplotlib.pyplot as plt from tensorflow.keras.preprocessing.image import ImageDataGenerator MASK_COLORS = [ "red", "green", "blue", "yellow", "magenta", "cyan" ] # Runtime data augmentation def get_augmented( X_train, Y_train, X_val=None, Y_val=None, batch_size=32, s=10, data_gen_args=dict( rotation_range=10.0, # width_shift_range=0.02, height_shift_range=0.02, shear_range=5, # zoom_range=0.3, horizontal_flip=True, vertical_flip=False, fill_mode="constant", ), ): """[summary] Args: X_train (numpy.ndarray): [description] Y_train (numpy.ndarray): [description] X_val (numpy.ndarray, optional): [description]. Defaults to None. Y_val (numpy.ndarray, optional): [description]. Defaults to None. batch_size (int, optional): [description]. Defaults to 32. seed (int, optional): [description]. Defaults to 0. data_gen_args ([type], optional): [description]. Defaults to dict(rotation_range=10.0,# width_shift_range=0.02,height_shift_range=0.02,shear_range=5,# zoom_range=0.3,horizontal_flip=True,vertical_flip=False,fill_mode="constant",). Returns: [type]: [description] """ # Train data, provide the same seed and keyword arguments to the fit and flow methods X_datagen = ImageDataGenerator(**data_gen_args) Y_datagen = ImageDataGenerator(**data_gen_args) X_datagen.fit(X_train, augment=True, seed=s) Y_datagen.fit(Y_train, augment=True, seed=s) X_train_augmented = X_datagen.flow( X_train, batch_size=batch_size, shuffle=True, seed=s ) Y_train_augmented = Y_datagen.flow( Y_train, batch_size=batch_size, shuffle=True, seed=s ) train_generator = zip(X_train_augmented, Y_train_augmented) if not (X_val is None) and not (Y_val is None): # Validation data, no data augmentation, but we create a generator anyway X_datagen_val = ImageDataGenerator(**data_gen_args) Y_datagen_val = ImageDataGenerator(**data_gen_args) X_datagen_val.fit(X_val, augment=False, seed=s) Y_datagen_val.fit(Y_val, augment=False, seed=s) X_val_augmented = X_datagen_val.flow( X_val, batch_size=batch_size, shuffle=False, seed=s ) Y_val_augmented = Y_datagen_val.flow( Y_val, batch_size=batch_size, shuffle=False, seed=s ) # combine generators into one which yields image and masks val_generator = zip(X_val_augmented, Y_val_augmented) return train_generator, val_generator else: return train_generator def plot_segm_history(history, metrics=["iou", "val_iou"], losses=["loss", "val_loss"]): """[summary] Args: history ([type]): [description] metrics (list, optional): [description]. Defaults to ["iou", "val_iou"]. losses (list, optional): [description]. Defaults to ["loss", "val_loss"]. """ # summarize history for iou plt.figure(figsize=(12, 6)) for metric in metrics: plt.plot(history.history[metric], linewidth=3) plt.suptitle("metrics over epochs", fontsize=20) plt.ylabel("metric", fontsize=20) plt.xlabel("epoch", fontsize=20) # plt.yticks(np.arange(0.3, 1, step=0.02), fontsize=35) # plt.xticks(fontsize=35) plt.legend(metrics, loc="center right", fontsize=15) plt.show() # summarize history for loss plt.figure(figsize=(12, 6)) for loss in losses: plt.plot(history.history[loss], linewidth=3) plt.suptitle("loss over epochs", fontsize=20) plt.ylabel("loss", fontsize=20) plt.xlabel("epoch", fontsize=20) # plt.yticks(np.arange(0, 0.2, step=0.005), fontsize=35) # plt.xticks(fontsize=35) plt.legend(losses, loc="center right", fontsize=15) plt.show() def mask_to_red(mask): """ Converts binary segmentation mask from white to red color. Also adds alpha channel to make black background transparent. Args: mask (numpy.ndarray): [description] Returns: numpy.ndarray: [description] """ img_size = mask.shape[0] c1 = mask.reshape(img_size, img_size) c2 = np.zeros((img_size, img_size)) c3 = np.zeros((img_size, img_size)) c4 = mask.reshape(img_size, img_size) return np.stack((c1, c2, c3, c4), axis=-1) def mask_to_rgba(mask, color="red"): """ Converts binary segmentation mask from white to red color. Also adds alpha channel to make black background transparent. Args: mask (numpy.ndarray): [description] color (str, optional): Check `MASK_COLORS` for available colors. Defaults to "red". Returns: numpy.ndarray: [description] """ assert(color in MASK_COLORS) assert(mask.ndim==3 or mask.ndim==2) h = mask.shape[0] w = mask.shape[1] zeros = np.zeros((h, w)) ones = mask.reshape(h, w) if color == "red": return np.stack((ones, zeros, zeros, ones), axis=-1) elif color == "green": return np.stack((zeros, ones, zeros, ones), axis=-1) elif color == "blue": return np.stack((zeros, zeros, ones, ones), axis=-1) elif color == "yellow": return np.stack((ones, ones, zeros, ones), axis=-1) elif color == "magenta": return np.stack((ones, zeros, ones, ones), axis=-1) elif color == "cyan": return np.stack((zeros, ones, ones, ones), axis=-1) def plot_imgs( org_imgs, mask_imgs, pred_imgs=None, nm_img_to_plot=10, figsize=4, alpha=0.5, color="red"): """ Image plotting for semantic segmentation data. Last column is always an overlay of ground truth or prediction depending on what was provided as arguments. Args: org_imgs (numpy.ndarray): Array of arrays representing a collection of original images. mask_imgs (numpy.ndarray): Array of arrays representing a collection of mask images (grayscale). pred_imgs (numpy.ndarray, optional): Array of arrays representing a collection of prediction masks images.. Defaults to None. nm_img_to_plot (int, optional): How many images to display. Takes first N images. Defaults to 10. figsize (int, optional): Matplotlib figsize. Defaults to 4. alpha (float, optional): Transparency for mask overlay on original image. Defaults to 0.5. color (str, optional): Color for mask overlay. Defaults to "red". """ # NOQA E501 assert(color in MASK_COLORS) if nm_img_to_plot > org_imgs.shape[0]: nm_img_to_plot = org_imgs.shape[0] im_id = 0 org_imgs_size = org_imgs.shape[1] org_imgs = reshape_arr(org_imgs) mask_imgs = reshape_arr(mask_imgs) if not (pred_imgs is None): cols = 4 pred_imgs = reshape_arr(pred_imgs) else: cols = 3 fig, axes = plt.subplots( nm_img_to_plot, cols, figsize=(cols * figsize, nm_img_to_plot * figsize), squeeze=False ) axes[0, 0].set_title("original", fontsize=15) axes[0, 1].set_title("ground truth", fontsize=15) if not (pred_imgs is None): axes[0, 2].set_title("prediction", fontsize=15) axes[0, 3].set_title("overlay", fontsize=15) else: axes[0, 2].set_title("overlay", fontsize=15) for m in range(0, nm_img_to_plot): axes[m, 0].imshow(org_imgs[im_id], cmap=get_cmap(org_imgs)) axes[m, 0].set_axis_off() axes[m, 1].imshow(mask_imgs[im_id], cmap=get_cmap(mask_imgs)) axes[m, 1].set_axis_off() if not (pred_imgs is None): axes[m, 2].imshow(pred_imgs[im_id], cmap=get_cmap(pred_imgs)) axes[m, 2].set_axis_off() axes[m, 3].imshow(org_imgs[im_id], cmap=get_cmap(org_imgs)) axes[m, 3].imshow( mask_to_rgba( zero_pad_mask(pred_imgs[im_id], desired_size=org_imgs_size), color=color, ), cmap=get_cmap(pred_imgs), alpha=alpha, ) axes[m, 3].set_axis_off() else: axes[m, 2].imshow(org_imgs[im_id], cmap=get_cmap(org_imgs)) axes[m, 2].imshow( mask_to_rgba( zero_pad_mask(mask_imgs[im_id], desired_size=org_imgs_size), color=color, ), cmap=get_cmap(mask_imgs), alpha=alpha, ) axes[m, 2].set_axis_off() im_id += 1 plt.show() def zero_pad_mask(mask, desired_size): """[summary] Args: mask (numpy.ndarray): [description] desired_size ([type]): [description] Returns: numpy.ndarray: [description] """ pad = (desired_size - mask.shape[0]) // 2 padded_mask = np.pad(mask, pad, mode="constant") return padded_mask def reshape_arr(arr): """[summary] Args: arr (numpy.ndarray): [description] Returns: numpy.ndarray: [description] """ if arr.ndim == 3: return arr elif arr.ndim == 4: if arr.shape[3] == 3: return arr elif arr.shape[3] == 1: return arr.reshape(arr.shape[0], arr.shape[1], arr.shape[2]) def get_cmap(arr): """[summary] Args: arr (numpy.ndarray): [description] Returns: string: [description] """ if arr.ndim == 3: return "gray" elif arr.ndim == 4: if arr.shape[3] == 3: return "jet" elif arr.shape[3] == 1: return "gray" def get_patches(img_arr, size=256, stride=256): """ Takes single image or array of images and returns crops using sliding window method. If stride < size it will do overlapping. Args: img_arr (numpy.ndarray): [description] size (int, optional): [description]. Defaults to 256. stride (int, optional): [description]. Defaults to 256. Raises: ValueError: [description] ValueError: [description] Returns: numpy.ndarray: [description] """ # check size and stride if size % stride != 0: raise ValueError("size % stride must be equal 0") patches_list = [] overlapping = 0 if stride != size: overlapping = (size // stride) - 1 if img_arr.ndim == 3: i_max = img_arr.shape[0] // stride - overlapping for i in range(i_max): for j in range(i_max): # print(i*stride, i*stride+size) # print(j*stride, j*stride+size) patches_list.append( img_arr[ i * stride : i * stride + size, j * stride : j * stride + size ] ) elif img_arr.ndim == 4: i_max = img_arr.shape[1] // stride - overlapping for im in img_arr: for i in range(i_max): for j in range(i_max): # print(i*stride, i*stride+size) # print(j*stride, j*stride+size) patches_list.append( im[ i * stride : i * stride + size, j * stride : j * stride + size, ] ) else: raise ValueError("img_arr.ndim must be equal 3 or 4") return np.stack(patches_list) def plot_patches(img_arr, org_img_size, stride=None, size=None): """ Plots all the patches for the first image in 'img_arr' trying to reconstruct the original image Args: img_arr (numpy.ndarray): [description] org_img_size (tuple): [description] stride ([type], optional): [description]. Defaults to None. size ([type], optional): [description]. Defaults to None. Raises: ValueError: [description] """ # check parameters if type(org_img_size) is not tuple: raise ValueError("org_image_size must be a tuple") if img_arr.ndim == 3: img_arr = np.expand_dims(img_arr, axis=0) if size is None: size = img_arr.shape[1] if stride is None: stride = size i_max = (org_img_size[0] // stride) + 1 - (size // stride) j_max = (org_img_size[1] // stride) + 1 - (size // stride) fig, axes = plt.subplots(i_max, j_max, figsize=(i_max * 2, j_max * 2)) fig.subplots_adjust(hspace=0.05, wspace=0.05) jj = 0 for i in range(i_max): for j in range(j_max): axes[i, j].imshow(img_arr[jj]) axes[i, j].set_axis_off() jj += 1 def reconstruct_from_patches(img_arr, org_img_size, stride=None, size=None): """[summary] Args: img_arr (numpy.ndarray): [description] org_img_size (tuple): [description] stride ([type], optional): [description]. Defaults to None. size ([type], optional): [description]. Defaults to None. Raises: ValueError: [description] Returns: numpy.ndarray: [description] """ # check parameters if type(org_img_size) is not tuple: raise ValueError("org_image_size must be a tuple") if img_arr.ndim == 3: img_arr = np.expand_dims(img_arr, axis=0) if size is None: size = img_arr.shape[1] if stride is None: stride = size nm_layers = img_arr.shape[3] i_max = (org_img_size[0] // stride) + 1 - (size // stride) j_max = (org_img_size[1] // stride) + 1 - (size // stride) total_nm_images = img_arr.shape[0] // (i_max ** 2) nm_images = img_arr.shape[0] averaging_value = size // stride images_list = [] kk = 0 for img_count in range(total_nm_images): img_bg = np.zeros( (org_img_size[0], org_img_size[1], nm_layers), dtype=img_arr[0].dtype ) for i in range(i_max): for j in range(j_max): for layer in range(nm_layers): img_bg[ i * stride : i * stride + size, j * stride : j * stride + size, layer, ] = img_arr[kk, :, :, layer] kk += 1 # TODO add averaging for masks - right now it's just overwritting # for layer in range(nm_layers): # # average some more because overlapping 4 patches # img_bg[stride:i_max*stride, stride:i_max*stride, layer] //= averaging_value # # corners: # img_bg[0:stride, 0:stride, layer] *= averaging_value # img_bg[i_max*stride:i_max*stride+stride, 0:stride, layer] *= averaging_value # img_bg[i_max*stride:i_max*stride+stride, i_max*stride:i_max*stride+stride, layer] *= averaging_value # img_bg[0:stride, i_max*stride:i_max*stride+stride, layer] *= averaging_value images_list.append(img_bg) return np.stack(images_list) # - # Lib to read dicom images # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" from tensorflow.keras.preprocessing.image import load_img import numpy as np import pydicom def transform_to_hu(medical_image, image): hu_image = image * medical_image.RescaleSlope + medical_image.RescaleIntercept hu_image[hu_image < -1024] = -1024 return hu_image def window_image(image, window_center, window_width): window_image = image.copy() image_min = window_center - (window_width / 2) image_max = window_center + (window_width / 2) window_image[window_image < image_min] = image_min window_image[window_image > image_max] = image_max return window_image def resize_normalize(image): image = np.array(image, dtype=np.float64) image -= np.min(image) image /= np.max(image) return image def read_dicom(path, window_widht, window_level): image_medical = pydicom.dcmread(path) image_data = image_medical.pixel_array image_hu = transform_to_hu(image_medical, image_data) image_window = window_image(image_hu.copy(), window_level, window_widht) image_window_norm = resize_normalize(image_window) # image_window_norm = image_window image_window_norm = np.expand_dims(image_window_norm, axis=2) # (512, 512, 1) image_ths = np.concatenate([image_window_norm, image_window_norm, image_window_norm], axis=2) # (512, 512, 3) return image_ths # - # Show examples # + import matplotlib.pylab as plt # specify your image path image_path = "../input/body-morphometry-for-sarcopenia/train/DICOM/case072.dcm" im = read_dicom(image_path, 100, 50) # %pylab inline imgplot = plt.imshow(im) plt.show() # - plt.hist(im.flatten(), color='c') # Read mask values np.unique(im) import cv2 import pydicom import matplotlib.image as mpimg import matplotlib.pyplot as plt import numpy as np im = mpimg.imread("../input/body-morphometry-for-sarcopenia/train/Label/case001.png") im = np.around(im, 8) mask_1 = im <= 0.00784314 mask_2 = im >= 0.01176471 mask_3 = np.logical_and(mask_1, mask_2) mask_3 # im = np.expand_dims(im, axis=-1) # %pylab inline imgplot = plt.imshow(im) plt.show() mask_3_int = mask_2.astype(float) imgplot = plt.imshow(mask_3_int) plt.show() # + import os import sys import random import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt from tqdm import tqdm from itertools import chain from skimage.io import imread, imshow, imread_collection, concatenate_images from skimage.transform import resize from skimage.morphology import label from keras.models import Model, load_model from keras.layers import Input from keras.layers.core import Dropout, Lambda from keras.layers.convolutional import Conv2D, Conv2DTranspose from keras.layers.pooling import MaxPooling2D from keras.layers import concatenate from keras.callbacks import EarlyStopping, ModelCheckpoint from keras import backend as K import tensorflow as tf # Set some parameters IMG_WIDTH = 512 IMG_HEIGHT = 512 IMG_CHANNELS = 3 TRAIN_PATH_IMAGE = '../input/body-morphometry-for-sarcopenia/train/DICOM/' TRAIN_PATH_MASKS = '../input/body-morphometry-for-sarcopenia/train/Label/' TEST_PATH = '../input/body-morphometry-for-sarcopenia/test/DICOM/' warnings.filterwarnings('ignore', category=UserWarning, module='skimage') # - # Get train and test IDs train_ids = [f for f in sorted(os.listdir(TRAIN_PATH_IMAGE)) if os.path.isfile(os.path.join(TRAIN_PATH_IMAGE, f))] test_ids = [f for f in sorted(os.listdir(TEST_PATH)) if os.path.isfile(os.path.join(TEST_PATH, f))] # + def get_mask_innest(mask): mask_1 = mask < 0.00784314 mask_2 = mask >= 0.00392157 mask_3 = np.logical_and(mask_1, mask_2) mask_3_int = mask_3.astype(float) return mask_3_int def get_mask_middle(mask): mask_1 = mask < 0.01176471 mask_2 = mask >= 0.00784314 mask_3 = np.logical_and(mask_1, mask_2) mask_3_int = mask_3.astype(float) return mask_3_int def get_mask_outtest(mask): mask_1 = mask < 0.01176471 mask_2 = mask >= 0.01176471 mask_3 = np.logical_and(mask_1, mask_2) mask_3_int = mask_2.astype(float) return mask_3_int # - # Prepare DATA # + # Get and resize train images and masks for region inside X_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.float32) Y_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.float32) print('Getting and resizing train images and masks ... ') sys.stdout.flush() for n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)): if id_ == 'case001.dcm': print(n) path = TRAIN_PATH_IMAGE + id_ img = read_dicom(path, 100, 50) X_train[n] = img mask_path = TRAIN_PATH_MASKS + id_.replace('dcm', 'png') mask = mpimg.imread(mask_path) mask = np.around(mask, 8) m_in = get_mask_innest(mask) m_md = get_mask_middle(mask) m_ot = get_mask_outtest(mask) m_in = np.expand_dims(m_in, axis=-1) m_md = np.expand_dims(m_md, axis=-1) m_ot = np.expand_dims(m_ot, axis=-1) m_fn = np.concatenate([m_in, m_md, m_ot], axis=-1) Y_train[n] = m_fn # Get and resize test images X_test = np.zeros((len(test_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.float32) sizes_test = [] print('Getting and resizing test images ... ') sys.stdout.flush() for n, id_ in tqdm(enumerate(test_ids), total=len(test_ids)): path = TEST_PATH + id_ img = read_dicom(path, 100, 50) X_test[n] = img print('Done!') # + from sklearn.model_selection import train_test_split x_train, x_val, y_train, y_val = train_test_split(X_train, Y_train, test_size=0.1, random_state=0) print("x_train: ", x_train.shape) print("y_train: ", y_train.shape) print("x_val: ", x_val.shape) print("y_val: ", y_val.shape) # - # Callbacks # + from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau model_filename = 'segm_model_eff.h5' reduce_lr = ReduceLROnPlateau(monitor='val_dice_coef', factor=0.5, mode='max', patience=20, min_lr=0.0001) callback_checkpoint = ModelCheckpoint( model_filename, verbose=1, monitor='val_iou', mode = 'max', save_best_only=True, ) early_stop = tf.keras.callbacks.EarlyStopping( monitor='val_iou', patience=50, mode='max' ) # - # Loss function # + from tensorflow.keras import backend as K from tensorflow.keras.losses import binary_crossentropy def dice_coef(y_true, y_pred): y_true_f = K.flatten(y_true) y_pred = K.cast(y_pred, 'float32') y_pred_f = K.cast(K.greater(K.flatten(y_pred), 0.5), 'float32') intersection = y_true_f * y_pred_f score = 2. * K.sum(intersection) / (K.sum(y_true_f) + K.sum(y_pred_f)) return score def dice_loss(y_true, y_pred): smooth = 1. y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = y_true_f * y_pred_f score = (2. * K.sum(intersection) + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) return 1. - score def bce_dice_loss(y_true, y_pred): return binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred) def bce_logdice_loss(y_true, y_pred): return binary_crossentropy(y_true, y_pred) - K.log(1. - dice_loss(y_true, y_pred)) # - # Model # + # %env SM_FRAMEWORK=tf.keras from segmentation_models import Unet from segmentation_models import get_preprocessing from segmentation_models.losses import bce_jaccard_loss from segmentation_models.metrics import iou_score BACKBONE = 'efficientnetb7' preprocess_input = get_preprocessing(BACKBONE) model = Unet(BACKBONE, encoder_weights='imagenet', classes=3) model.summary() # + from tensorflow.keras.optimizers import Adam, SGD model.compile( optimizer=Adam(lr=0.0001), loss=bce_logdice_loss, metrics=[iou, iou_thresholded, dice_coef] ) # - train_gen = get_augmented( x_train, y_train, batch_size=2, data_gen_args = dict( rotation_range=15., width_shift_range=0.05, height_shift_range=0.05, shear_range=50, zoom_range=0.2, horizontal_flip=True, vertical_flip=True, fill_mode='constant' )) # Training history = model.fit( train_gen, steps_per_epoch=50, epochs=500, validation_data=(x_val, y_val), callbacks=[callback_checkpoint, reduce_lr, early_stop] ) # + # from tensorflow.keras_unet.utils import plot_segm_history plot_segm_history(history) # - model.load_weights(model_filename) preds_test = np.zeros_like(X_test) for i in range(50): inp = X_test[i,:,:,:] out = model.predict(np.expand_dims(inp, axis=0)) preds_test[i,:,:,:] = out preds_test_t = (preds_test > 0.5).astype(np.uint8) # Post Processing # + def postprocessing(mask): (thresh, blackAndWhiteImage) = cv2.threshold(np.uint8(mask*255), 127, 255, cv2.THRESH_BINARY) im_floodfill = blackAndWhiteImage.copy() h, w = blackAndWhiteImage.shape[:2] mask = np.zeros((h+2, w+2), np.uint8) cv2.floodFill(im_floodfill, mask, (0,0), 255); im_floodfill_inv = cv2.bitwise_not(im_floodfill) im_out = blackAndWhiteImage | im_floodfill_inv return np.float32(im_out/255.0) def remove_small_cts(img): img = np.uint8(img*255) # print(dtype(img)) cnts = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[0] if len(cnts) == 2 else cnts[1] for c in cnts: area = cv2.contourArea(c) if area < 5000: cv2.drawContours(img, [c], -1, (0,0,0), -1) return np.float32(img/255.0) # + ix = 43 path = TEST_PATH + test_ids[ix] img = read_dicom(path, 160, 50) img.shape out = model.predict(np.expand_dims(img, axis=0)) out[out > 0.5] = 1.0 out[out < 0.5] = 0.0 imshow(img) plt.show() imshow(out[0,:,:,2]) plt.show() # + ix = 43 path = TEST_PATH + test_ids[ix] img = read_dicom(path, 350, 50) out = model.predict(np.expand_dims(img, axis=0)) out[out > 0.5] = 1.0 out[out < 0.5] = 0.0 preds_test_t[ix,:,:,0] = out[0,:,:,0] # - i1 = preds_test_t[ix,:,:,0] i2 = preds_test_t[ix,:,:,1] i3 = preds_test_t[ix,:,:,2] i2 = remove_small_cts(i2) imshow(np.squeeze(i2)) plt.show() # + # imshow(np.squeeze(np.float32(preds_test_t[ix, :, :, 2]))) # plt.show() i1 = postprocessing(i1) i2 = postprocessing(i2) i3 = postprocessing(i3) i2[i1>0.5]=0 i3[i1>0.5]=0 i3[i2>0.5]=0 imshow(np.squeeze(i1)) plt.show() imshow(np.squeeze(i2)) plt.show() imshow(np.squeeze(i3)) plt.show() # - preds_test_t[ix,:,:,0] = i1 preds_test_t[ix,:,:,1] = i2 preds_test_t[ix,:,:,2] = i3 def remove_small_cts(img): img = np.uint8(img*255) # print(dtype(img)) cnts = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[0] if len(cnts) == 2 else cnts[1] for c in cnts: area = cv2.contourArea(c) if area < 3000: cv2.drawContours(img, [c], -1, (0,0,0), -1) return np.float32(img/255.0) # + # Run-length encoding stolen from https://www.kaggle.com/rakhlin/fast-run-length-encoding-python\ def rle_encode(mask_image): pixels = mask_image.flatten() # We avoid issues with '1' at the start or end (at the corners of # the original image) by setting those pixels to '0' explicitly. # We do not expect these to be non-zero for an accurate mask, # so this should not harm the score. pixels[0] = 0 pixels[-1] = 0 runs = np.where(pixels[1:] != pixels[:-1])[0] + 2 runs[1::2] = runs[1::2] - runs[:-1:2] return runs def rle_encode_(x): dots = np.where(x.T.flatten() == 1)[0] run_lengths = [] prev = -2 for b in dots: if (b>prev+1): run_lengths.extend((b + 1, 0)) run_lengths[-1] += 1 prev = b return run_lengths def my_encode(mask): x = np.zeros((mask.shape[0], mask.shape[1], 1)) x[mask[:,:,0] > 0.5] = 1 x[mask[:,:,1] > 0.5] = 2 x[mask[:,:,2] > 0.5] = 3 return x def prob_to_rles(x, cutoff=0.5): lab_img = label(x > cutoff) encoded = my_encode(lab_img) imshow(np.squeeze(np.float32(encoded))) plt.show() for i in range(1, 4): yield rle_encode(encoded == i) # - new_test_ids = [] rles = [] for n, id_ in enumerate(test_ids): rle = list(prob_to_rles(preds_test_t[n])) rles.extend(rle) for k in range(len(rle)): new_test_ids.extend([id_[:-4] + '_' + str(k+1)]) # Submission sub = pd.DataFrame() sub['ImageId'] = new_test_ids sub['EncodedPixels'] = pd.Series(rles).apply(lambda x: ' '.join(str(y) for y in x)) sub.to_csv('submission.csv', index=False)
Body Morphometry.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:mypython3] * # language: python # name: conda-env-mypython3-py # --- # ### scvi must be version 0.6.7 # %reload_ext autoreload # %autoreload 2 # + from scvi.models import VAE, SCANVI from scvi.inference import UnsupervisedTrainer, SemiSupervisedTrainer from sklearn.preprocessing import LabelEncoder from scvi.dataset import AnnDatasetFromAnnData import numpy as np import scvi import scanpy as sc sc.set_figure_params(figsize=(4, 4)) import sys #if True, will install via pypi, else will install from source stable = True IN_COLAB = "google.colab" in sys.modules if IN_COLAB and stable: # !pip install --quiet scvi-tools[tutorials] elif IN_COLAB and not stable: # !pip install --quiet --upgrade jsonschema # !pip install --quiet git+https://github.com/yoseflab/scvi-tools@master#egg=scvi-tools[tutorials] # to autoreload upon editing functions # %load_ext autoreload from matplotlib import rcParams import matplotlib.pyplot as plt # %matplotlib inline from scvi.models import VAE from scvi.inference import UnsupervisedTrainer from sklearn.preprocessing import LabelEncoder from scvi.dataset import AnnDatasetFromAnnData # - print('the latest used version before revision was 0.6.7') scvi.__version__ # + from utils import * # - adata = get_concatenated_dataset() print('datasets loaded...') # Check for counts data layer if 'counts' not in adata.layers: raise TypeError('Adata does not contain a `counts` layer in `adata.layers[`counts`]`') adata adata.obs['study'].value_counts() adata.obs[adata.obs['study'] == 'morse']['patient.id'].value_counts() # ### The batch correction will be attempted by patient adata.obs['study'].value_counts() batch = 'patient.id' labels = 'study' adata.obs['study'].value_counts() # ### Update 03.17.2021 # To verify whether the association between mdm and IPF is due to an oversampling of mdm from Adams, we are now sampling an equal proportion of IPF and control cells from that study. adata.obs # + # print('subsetting...') # ad = subset_anndata(adata, N=adata.shape[0]) # print('done.') # - adata.obs.groupby(['study', 'disease.status']).size() # + import random adams_copd = adata[(adata.obs['study'] == 'adams') & (adata.obs['disease.status'] == 'COPD'),:] adams_ipf = adata[(adata.obs['study'] == 'adams') & (adata.obs['disease.status'] == 'IPF'),:] adams_control = adata[(adata.obs['study'] == 'adams') & (adata.obs['disease.status'] == 'control'),:] # sample N_COPD cells from IPF and from control n_copd = adams_copd.shape[0] random.seed(500) adams_ipf = adams_ipf[adams_ipf.obs.index.isin(set(random.sample(list(adams_ipf.obs.index), n_copd))),:] adams_control = adams_control[adams_control.obs.index.isin(set(random.sample(list(adams_control.obs.index), n_copd))),:] adams_copd.shape, adams_ipf.shape, adams_control.shape adata = adata[adata.obs.study != 'adams',].concatenate(adams_copd).concatenate(adams_ipf).concatenate(adams_control) adata.obs.groupby(['study', 'disease.status']).size() # - ad = adata ad.shape marker_genes = get_marker_genes_ipf() ad.var['hvg.or.marker'] = ad.var['highly_variable'] | ad.var.index.isin(marker_genes['MP.all']) ad.var['hvg.or.marker'].value_counts() # ### Data preparation and iterative training. batch = 'patient.id' # batch = 'study' ad.obs[batch].value_counts() adata.n_obs adata.obs ad.shape ad.shape # + # Use non-normalized (count) data for scvi! # Expects data only on HVGs # Defaults from SCVI github tutorials scanpy_pbmc3k and harmonization n_epochs=np.min([round((20000/adata.n_obs)*400), 400]) n_epochs = 400 print('# epochs %i' % n_epochs) # - print('here...') net_adata = ad[:,ad.var['hvg.or.marker']].copy() print(net_adata.shape) net_adata.X = net_adata.layers['counts'] # [:,ad.var['hvg.or.marker']] del net_adata.layers['counts'] # Ensure that the raw counts are not accidentally used del net_adata.raw # Note that this only works from anndata 0.7 # + n_latent=30 n_hidden=128 n_layers=2 # Define batch indices le = LabelEncoder() net_adata.obs['batch_indices'] = le.fit_transform(net_adata.obs[batch].values) net_adata = AnnDatasetFromAnnData(net_adata) vae = VAE( net_adata.nb_genes, reconstruction_loss='nb', n_batch=net_adata.n_batches, n_layers=n_layers, n_latent=n_latent, n_hidden=n_hidden, ) trainer = UnsupervisedTrainer( vae, net_adata, train_size=1.0, use_cuda=True, ) trainer.train(n_epochs=n_epochs, lr=1e-3) full = trainer.create_posterior(trainer.model, net_adata, indices=np.arange(len(net_adata))) latent, _, _ = full.sequential().get_latent() ad.obsm['X_emb'] = latent # - latent import torch # + torch.save(trainer.model.state_dict(), 'mytraining_all_revision.pt') # - import matplotlib.pyplot as plt # %matplotlib inline # + from matplotlib import rcParams import matplotlib.pyplot as plt rcParams['figure.figsize'] = [6, 6] rcParams['figure.dpi'] = 110 from os.path import join, exists k = 'adams' output_path = join('data/figures/%s_revision.png' % k) if exists(output_path): print('skip %s' % k) if 'X_emb' in ad.obsm: print('neighbors... using %s' % 'X_emb' ) sc.pp.neighbors(ad, use_rep='X_emb') sc.tl.umap(ad) print('umap...') # - import anndata anndata.__version__ import scanpy as sc sc.__version__ import os; os.path.exists(outfilename) ad.write(outfilename, compression='lzf') print('done...') outfilename = '../data/integrated/morse_n_adams_n_bal_scvi_%s_%i_%i_revision.h5da' % (batch, ad.shape[0], n_epochs) outfilename print('done...') import scanpy as sc print('here...') print(outfilename) print('loading object...') ad = sc.read_h5ad(outfilename) print('done...') ad.shape ad.obs[ad.obs['study'] == 'adams'][['patient.id', 'disease.status']] ad.obs ad.obs[ad.obs['study'] == 'adams'][['patient.id', 'disease.status']].drop_duplicates('patient.id')['disease.status'].value_counts() ad.obs[ad.obs['study'] == 'budinger'][['patient.id', 'disease.status']].drop_duplicates('patient.id')['disease.status'].value_counts() ad.obs[ad.obs['study'] == 'BAL'][['patient.id', 'disease.status']].drop_duplicates('patient.id')['disease.status'].value_counts() ad.obs[ad.obs['study'] == 'morse'][['patient.id', 'disease.status']].drop_duplicates('patient.id')['disease.status'].value_counts() for k in ['MP.markers', 'MP.others']: print(k) next_marker_genes = marker_genes[k]; sc.tl.score_genes(ad, list(next_marker_genes), use_raw=False, score_name=k + ".score"); ad[ad.obs['study'] == 'morse',:] ad.obs['Subclass_Cell_Identity'].value_counts() adata.obs['study'].value_counts() ad.obs.groupby(['study', 'disease.status']).size().unstack(fill_value=0) ad.obs['disease.status'] = ad.obs['disease.status'].str.replace('Control', 'control') ad.obs['disease.status'] = np.where(ad.obs['disease.status'].str.contains('nan'), 'BAL', ad.obs['disease.status']) ad.obs['cell.type'] = np.where(ad.obs['cell.type'].str.contains('nan'), ad.obs['Subclass_Cell_Identity'], ad.obs['cell.type']) ad.obs['cell.type'] = np.where(ad.obs['cell.type'].str.contains('nan'), ad.obs['Celltype_2'], ad.obs['cell.type']) ad.shape # + print('plotting...') rcParams['figure.dpi'] = 110 fig, ax = plt.subplots(nrows = 2, ncols = 3, figsize=(18, 10)) ax = ax.flatten() sc.pl.umap(ad, color='study', title='study', ax=ax[0], show=False) ax[0].spines['right'].set_visible(False) ax[0].spines['top'].set_visible(False) sc.pl.umap(ad, color='patient.id', title='patient (n=%i)' % len(set(ad.obs['patient.id'])), ax=ax[1], show=False) ax[1].spines['right'].set_visible(False) ax[1].spines['top'].set_visible(False) ax[1].get_legend().remove() plt.xlabel('') plt.ylabel('') sc.pl.umap(ad, color='cell.type', title='cell type', ax=ax[2], show=False) ax[2].spines['right'].set_visible(False) ax[2].spines['top'].set_visible(False) plt.xlabel('') plt.ylabel('') sc.pl.umap(ad, color='MP.markers.score' if 'study' in adata.obs else batch, title='score - 20 marker genes', ax=ax[3], show=False) ax[3].spines['right'].set_visible(False) ax[3].spines['top'].set_visible(False) plt.xlabel('') plt.ylabel('') sc.pl.umap(ad, color='MP.others.score', title='score - 9 marker genes', ax=ax[4], show=False) ax[4].spines['right'].set_visible(False) ax[4].spines['top'].set_visible(False) plt.xlabel('') plt.ylabel('') sc.pl.umap(ad, color='disease.status', title='disease.status', ax=ax[5], show=False) ax[5].spines['right'].set_visible(False) ax[5].spines['top'].set_visible(False) plt.xlabel('') plt.ylabel('') plt.savefig('data/figures/umap_scvi_integrated_revision.pdf') plt.close() # - os.path.abspath('data/figures/umap_scvi_integrated_revision.pdf') print('done...') print('here...') # + from itertools import combinations ad_sel = subset_anndata(ad, ad.shape[0]) disease = ad_sel.obs['disease.status'] conn = ad_sel.uns['neighbors']['connectivities'] masks = [] obs = [] masks = [] positions = np.arange(ad_sel.shape[0]) # .reshape(w, h) # conn_array = conn.toarray() if (type(conn) != np.ndarray) else conn conn_array = conn # conn.toarray() if (type(conn) != np.ndarray) else conn pos_by_pair = {} for i, j in combinations(list(set(disease))[::-1], r=2): x = positions[disease == i] y = positions[disease == j] xy = cartesian([x, y]) x, y = xy[:, 0].flatten(), xy[:, 1].flatten() edges = conn_array[x, y] x_nodes = x[edges > 0] y_nodes = y[edges > 0] n_nodes_x, n_nodes_y = x_nodes.shape[0], y_nodes.shape[0] nx_uniq, ny_uniq = np.unique(x_nodes).shape[0], np.unique(y_nodes).shape[0] k = i + ':' + j if not k in pos_by_pair: pos_by_pair[i + ':' + j] = {} pos_by_pair[i + ':' + j][i] = np.unique(x_nodes) pos_by_pair[i + ':' + j][j] = np.unique(y_nodes) obs.append([i, j, nx_uniq, ny_uniq, nx_uniq + ny_uniq]) # - df = pd.DataFrame(obs, columns=['a', 'b', 'n.a', 'n.b', 'n.total']) df.sort_values('n.total', ascending=False) # + bal_ipf = ad_sel[pos_by_pair['BAL:IPF']['BAL'],:] bal_ipf.obs['selection'] = 'BAL:IPF' bal_ctrl = ad_sel[pos_by_pair['BAL:control']['BAL'],:] bal_ctrl.obs['selection'] = 'BAL:CTRL' print(bal_sel.obs['study'].value_counts()) print(bal_ctrl.obs['study'].value_counts()) bal_all = bal_ipf.concatenate(bal_ctrl) # - # ### Plot whether the expression of gene markers for IPF+BAL increase after subsetting for IPFs bal_all.obs
notebooks/02_integrate_scvi_revision.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas food_info = pandas.read_csv("food_info.csv") #print(type(food_info)) print (food_info.dtypes) first_rows = food_info.head() #first_rows #print(food_info.head(3)) #print (food_info.columns) #print (food_info.shape) # + #pandas uses zero-indexing #Series object representing the row at index 0. #print (food_info.loc[0]) # Series object representing the seventh row. #food_info.loc[6] # Will throw an error: "KeyError: 'the label [8620] is not in the [index]'" #food_info.loc[8620] #The object dtype is equivalent to a string in Python # + #object - For string values #int - For integer values #float - For float values #datetime - For time values #bool - For Boolean values #print(food_info.dtypes) # + # Returns a DataFrame containing the rows at indexes 3, 4, 5, and 6. #food_info.loc[3:6] # Returns a DataFrame containing the rows at indexes 2, 5, and 10. Either of the following approaches will work. # Method 1 #two_five_ten = [2,5,10] #food_info.loc[two_five_ten] # Method 2 #food_info.loc[[2,5,10]] # + # Series object representing the "NDB_No" column. #ndb_col = food_info["NDB_No"] #print ndb_col # Alternatively, you can access a column by passing in a string variable. #col_name = "NDB_No" #ndb_col = food_info[col_name] # + #columns = ["Zinc_(mg)", "Copper_(mg)"] #zinc_copper = food_info[columns] #print zinc_copper #print zinc_copper # Skipping the assignment. #zinc_copper = food_info[["Zinc_(mg)", "Copper_(mg)"]] # + #print(food_info.columns) #print(food_info.head(2)) col_names = food_info.columns.tolist() #print col_names gram_columns = [] for c in col_names: if c.endswith("(g)"): gram_columns.append(c) gram_df = food_info[gram_columns] print(gram_df.head(3)) # -
pandans/pandas_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Facco-Bruno/Alura-ML/blob/Master/Sistemas_de_recomendacao_de_filmes.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="K6AQ7Nj77ayH" # # Sistemas de Recomendação # + colab={"base_uri": "https://localhost:8080/", "height": 238} id="jzNTPt15y74E" outputId="7682b586-20ff-4903-ece3-9138fb343d3d" import pandas as pd filmes = pd.read_csv("movies.csv") filmes.columns = ["filmeId", "titulo", "generos"] filmes = filmes.set_index("filmeId") filmes.head() # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="9Pso4bnK7lI_" outputId="bc453d15-66ed-4908-f449-527379da52ad" notas = pd.read_csv("ratings.csv") notas.columns = ["usuarioId", "filmeId", "nota", "momento"] notas.head() # + colab={"base_uri": "https://localhost:8080/", "height": 300} id="u4r39Pp673he" outputId="437861db-d51c-4749-d88e-6d237ad2be0f" notas.describe() # + [markdown] id="zrcGotFe7-a0" # ## Recomendação Heurística de total de votos # + colab={"base_uri": "https://localhost:8080/"} id="3W6GYMnx76uG" outputId="9e50496f-c99a-4691-8d8a-8127e9e520e8" total_de_votos = notas["filmeId"].value_counts() total_de_votos.head(10) # + colab={"base_uri": "https://localhost:8080/", "height": 238} id="UrhFO5wy8G2l" outputId="95bb95b0-af31-49a3-c3f1-c8035027c879" filmes['total_de_votos'] = total_de_votos filmes.head() # + colab={"base_uri": "https://localhost:8080/", "height": 394} id="nAwwgGAD8RGu" outputId="0823dbfa-db94-419d-9be6-6acd4e2d9456" filmes.sort_values("total_de_votos", ascending = False).head(10) # + colab={"base_uri": "https://localhost:8080/"} id="iDH4YiYq8XSd" outputId="080939b3-30c1-45de-e782-a097d8b2c890" notas_medias = notas.groupby("filmeId").mean()["nota"] notas_medias.head() # + colab={"base_uri": "https://localhost:8080/", "height": 394} id="3JKeE3p08ivN" outputId="dd99e999-1754-4b42-dbc4-19c382ef9a67" filmes["nota_media"] = notas_medias filmes.sort_values("total_de_votos", ascending = False).head(10) # + [markdown] id="-9fD25M99GDu" # ## Nota média e votos # + colab={"base_uri": "https://localhost:8080/", "height": 394} id="6Ozf6V6N8mD9" outputId="ff988584-9611-41bc-e20f-c2c3f7ec958e" filmes.sort_values("nota_media", ascending = False).head(10) # + colab={"base_uri": "https://localhost:8080/", "height": 394} id="_lyJxQIS9Z2m" outputId="27a58749-325e-4c65-8863-eb052839538d" filmes.query("total_de_votos >= 10").sort_values("nota_media", ascending = False).head(10) # + colab={"base_uri": "https://localhost:8080/", "height": 394} id="qHXxvrPq9jV9" outputId="5c09c50b-adf2-40ec-988d-52c6f74a24d8" filmes_com_mais_de_50_votos = filmes.query("total_de_votos >= 50") filmes_com_mais_de_50_votos.sort_values("nota_media", ascending = False).head(10) # + colab={"base_uri": "https://localhost:8080/", "height": 300} id="nA-UqsCo93Ht" outputId="0bf31869-06ac-4369-de85-2e52009300b4" eu_assisti = [1, 21, 19, 10, 11, 7, 2] filmes.loc[eu_assisti] # + colab={"base_uri": "https://localhost:8080/", "height": 394} id="lJdmzwQoCbkC" outputId="678278f5-d8d1-4bfd-9ed6-f69e4c0b9af8" aventura_infantil_e_fantasia = filmes_com_mais_de_50_votos.query("generos=='Adventure|Children|Fantasy'") aventura_infantil_e_fantasia.drop(eu_assisti, errors='ignore').sort_values("nota_media", ascending = False).head(10) # + [markdown] id="aksJNpZNxUta" # # Collaborative filtering x Content based filtering # + id="-aRrcfboCdo7" colab={"base_uri": "https://localhost:8080/", "height": 298} outputId="d2364326-f626-4f1e-ae13-6ba1b690dfc8" import matplotlib.pyplot as plt plt.plot(4, 4.5, "go") plt.plot(5, 5, "yo") plt.legend(["João", "Maria"]) plt.title("Calcular a distância entre dois usuários") plt.plot([4, 5], [4.5, 4.5], color="b", linestyle="-") plt.plot([4, 5], [4.5, 5], color="b", linestyle="-") plt.plot([5, 5], [4.5, 5], color="b", linestyle="-") # + colab={"base_uri": "https://localhost:8080/"} id="QDpITlFHxYxX" outputId="1cebc01e-e5e0-41ff-b167-2e4d9ea311af" import numpy as np joao = np.array([4, 4.5]) maria = np.array([5, 5]) joao - maria # + colab={"base_uri": "https://localhost:8080/"} id="kvs4Yku4xz-H" outputId="571e64a5-5c00-4a12-ca30-56700b1fbc03" from math import sqrt def pitagoras(a,b): (delta_x, delta_y) = a - b return sqrt(delta_x * delta_x + delta_y * delta_y) pitagoras(joao, maria) # + colab={"base_uri": "https://localhost:8080/"} id="xmdIiuVGx61P" outputId="bd917440-7d55-4ebe-aed3-01a94c78d0e0" def pitagoras(a,b): return np.linalg.norm(a - b) pitagoras(joao, maria) # + colab={"base_uri": "https://localhost:8080/", "height": 298} id="T_eKske1x8ie" outputId="2398cb44-f2d5-4381-9a83-0ef45d27c7a6" import matplotlib.pyplot as plt plt.plot(4, 4.5, "go") plt.plot(5, 5, "yo") plt.plot(3.5, 4.5, "bo") plt.legend(["João", "Maria", "Joaquina"]) plt.title("Calcular a distância entre usuários") # + colab={"base_uri": "https://localhost:8080/"} id="sKeetuLrx_En" outputId="a472d8e4-dd8d-43a5-bc83-dfa95261c546" joaquina = np.array([3.5, 4.5]) print(pitagoras(joao, maria)) print(pitagoras(joao, joaquina)) # + id="3Hd54yqDyBWg" def distancia_de_vetores(a,b): return np.linalg.norm(a - b) # + id="xVWnf-WiyDmf" def notas_do_usuario(usuario): notas_do_usuario = notas.query("usuarioId==%d" % usuario) notas_do_usuario = notas_do_usuario[["filmeId", "nota"]].set_index("filmeId") return notas_do_usuario # + id="OZD2NP-CyGt3" usuario1 = notas_do_usuario(1) usuario4 = notas_do_usuario(4) # + colab={"base_uri": "https://localhost:8080/", "height": 238} id="Z0h1uw3QyKEW" outputId="4b7a2dae-d5e0-45d2-acdd-dd344b3a4842" usuario1.head() # + colab={"base_uri": "https://localhost:8080/", "height": 238} id="kPtLAxW_yZX_" outputId="863d51e2-6167-4580-a18f-6b52f963a7b1" usuario4.head() # + colab={"base_uri": "https://localhost:8080/"} id="643RmlG7ypf-" outputId="fe855f66-6444-40c5-fdd3-236aad108509" diferencas = usuario1.join(usuario4, lsuffix="_esquerda", rsuffix="_direita").dropna() distancia_de_vetores(diferencas['nota_esquerda'], diferencas['nota_direita']) # + colab={"base_uri": "https://localhost:8080/", "height": 238} id="JPh2qtXEqzmh" outputId="4dd5a9c6-49fe-455c-c8be-bea6cf299f82" diferencas.head() # + id="47foCio3ytV-" def distancia_de_usuarios(usuario_id1, usuario_id2): notas1 = notas_do_usuario(usuario_id1) notas2 = notas_do_usuario(usuario_id2) diferencas = notas1.join(notas2, lsuffix="_esquerda", rsuffix="_direita").dropna() return distancia_de_vetores(diferencas['nota_esquerda'], diferencas['nota_direita']) # + colab={"base_uri": "https://localhost:8080/"} id="APbIl8riyxvG" outputId="8e52aac4-747e-4a22-d3de-889b3399d643" distancia_de_usuarios(1,4) # + id="BXfVxuz622Xk" def distancia_de_usuarios(usuario_id1, usuario_id2): notas1 = notas_do_usuario(usuario_id1) notas2 = notas_do_usuario(usuario_id2) diferencas = notas1.join(notas2, lsuffix="_esquerda", rsuffix="_direita").dropna() distancia = distancia_de_vetores(diferencas['nota_esquerda'], diferencas['nota_direita']) return [usuario_id1, usuario_id2, distancia] # + colab={"base_uri": "https://localhost:8080/"} id="jMfQVWT_38Cj" outputId="835e7ea6-f0e7-4161-c1de-43790ec0f446" distancia_de_usuarios(1,4) # + colab={"base_uri": "https://localhost:8080/"} id="dC-3ZPUS4NuL" outputId="5ba27b25-cb58-40e4-a926-a39a2afcc6dc" quantidade_de_usuarios = len(notas['usuarioId'].unique()) print("Temos %d usuarios" % quantidade_de_usuarios) # + colab={"base_uri": "https://localhost:8080/"} id="JnvCpKE_4Qhc" outputId="afa69978-ef0a-4ed2-fbd6-7597922a4121" def distancia_de_todos(voce_id): distancias = [] for usuario_id in notas['usuarioId'].unique(): informacoes = distancia_de_usuarios(voce_id, usuario_id) distancias.append(informacoes) return distancias distancia_de_todos(1)[:5] # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="KA6Kpr-G4WFb" outputId="31d4d600-d50b-4979-a6d0-d20ac2c33969" def distancia_de_todos(voce_id): todos_os_usuarios = notas['usuarioId'].unique() distancias = [distancia_de_usuarios(voce_id, usuario_id) for usuario_id in todos_os_usuarios] distancias = pd.DataFrame(distancias, columns = ["voce", "outra_pessoa", "distancia"]) return distancias distancia_de_todos(1).head() # + id="dR8cUu4f4goj" def distancia_de_usuarios(usuario_id1, usuario_id2, minimo = 5): notas1 = notas_do_usuario(usuario_id1) notas2 = notas_do_usuario(usuario_id2) diferencas = notas1.join(notas2, lsuffix="_esquerda", rsuffix="_direita").dropna() if(len(diferencas) < minimo): return [usuario_id1, usuario_id2, 100000] distancia = distancia_de_vetores(diferencas['nota_esquerda'], diferencas['nota_direita']) return [usuario_id1, usuario_id2, distancia] # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="uDd0KBZy4tXb" outputId="fe81d12f-baf4-4378-f863-958ac538898b" distancia_de_todos(1).head() # + id="0j0CO7NM4uNb" def mais_proximos_de(voce_id): distancias = distancia_de_todos(voce_id) distancias = distancias.sort_values("distancia") distancias = distancias.set_index("outra_pessoa").drop(voce_id) return distancias # + colab={"base_uri": "https://localhost:8080/", "height": 238} id="3-3S1Tq-5PHD" outputId="a7ae07c6-e18b-4460-e0a8-e9930c91b81c" mais_proximos_de(1).head() # + id="p5hRllut8nJw" def mais_proximos_de(voce_id, n = None): distancias = distancia_de_todos(voce_id, n = n) distancias = distancias.sort_values("distancia") distancias = distancias.set_index("outra_pessoa").drop(voce_id) return distancias # + id="1gF3sX3NHkNY" def distancia_de_todos(voce_id, n = None): todos_os_usuarios = notas['usuarioId'].unique() if n: todos_os_usuarios = todos_os_usuarios[:n] distancias = [distancia_de_usuarios(voce_id, usuario_id) for usuario_id in todos_os_usuarios] distancias = pd.DataFrame(distancias, columns = ["voce", "outra_pessoa", "distancia"]) return distancias # + id="yjZ7f_e8HnO_" #mais_proximos_de(1, n = 50) # + id="Kkv-RJBTHp94" def distancia_de_usuarios(usuario_id1, usuario_id2, minimo = 5): notas1 = notas_do_usuario(usuario_id1) notas2 = notas_do_usuario(usuario_id2) diferencas = notas1.join(notas2, lsuffix="_esquerda", rsuffix="_direita").dropna() if(len(diferencas) < minimo): return None distancia = distancia_de_vetores(diferencas['nota_esquerda'], diferencas['nota_direita']) return [usuario_id1, usuario_id2, distancia] # + id="ZbUewNLAHspY" def distancia_de_todos(voce_id, numero_de_usuarios_a_analisar = None): todos_os_usuarios = notas['usuarioId'].unique() if numero_de_usuarios_a_analisar: todos_os_usuarios = todos_os_usuarios[:numero_de_usuarios_a_analisar] distancias = [distancia_de_usuarios(voce_id, usuario_id) for usuario_id in todos_os_usuarios] distancias = list(filter(None, distancias)) distancias = pd.DataFrame(distancias, columns = ["voce", "outra_pessoa", "distancia"]) return distancias # + id="Rj2vUcnhHvmf" def mais_proximos_de(voce_id, numero_de_usuarios_a_analisar = None): distancias = distancia_de_todos(voce_id, numero_de_usuarios_a_analisar = numero_de_usuarios_a_analisar) distancias = distancias.sort_values("distancia") distancias = distancias.set_index("outra_pessoa").drop(voce_id) return distancias # + id="2SBAKnSgHxzX" #mais_proximos_de(1, numero_de_usuarios_a_analisar = 50) # + id="bc62e_EiHz6f" def sugere_para(voce, numero_de_usuarios_a_analisar = None): notas_de_voce = notas_do_usuario(voce) filmes_que_voce_ja_viu = notas_de_voce.index similares = mais_proximos_de(voce, numero_de_usuarios_a_analisar = numero_de_usuarios_a_analisar) similar = similares.iloc[0].name notas_do_similar = notas_do_usuario(similar) notas_do_similar = notas_do_similar.drop(filmes_que_voce_ja_viu, errors='ignore') recomendacoes = notas_do_similar.sort_values("nota", ascending=False) return recomendacoes.join(filmes) # + id="lo4vzt5gH1sH" #sugere_para(1, numero_de_usuarios_a_analisar=50).head() # + id="qDQ92u5KH4HR" def mais_proximos_de(voce_id, n_mais_proximos=10, numero_de_usuarios_a_analisar = None): distancias = distancia_de_todos(voce_id, numero_de_usuarios_a_analisar = numero_de_usuarios_a_analisar) distancias = distancias.sort_values("distancia") distancias = distancias.set_index("outra_pessoa").drop(voce_id) return distancias.head(n_mais_proximos) # + id="OG7O7MfrH6KY" #mais_proximos_de(1, n_mais_proximos = 2, numero_de_usuarios_a_analisar=300) # + id="v_jH8HIGH8Hh" def sugere_para(voce, n_mais_proximos = 10, numero_de_usuarios_a_analisar = None): notas_de_voce = notas_do_usuario(voce) filmes_que_voce_ja_viu = notas_de_voce.index similares = mais_proximos_de(voce, n_mais_proximos = n_mais_proximos, numero_de_usuarios_a_analisar = numero_de_usuarios_a_analisar) usuarios_similares = similares.index notas_dos_similares = notas.set_index("usuarioId").loc[usuarios_similares] recomendacoes = notas_dos_similares.groupby("filmeId").mean()[["nota"]] recomendacoes = recomendacoes.sort_values("nota", ascending=False) return recomendacoes.join(filmes) # + id="M834bw8qH9gn" #sugere_para(1, numero_de_usuarios_a_analisar = 50).head() # + id="KAp2Y8pHH_V_" #sugere_para(1, numero_de_usuarios_a_analisar = 300).head() # + id="T4nvMwmiIB1_" def knn(voce_id, k_mais_proximos=10, numero_de_usuarios_a_analisar = None): distancias = distancia_de_todos(voce_id, numero_de_usuarios_a_analisar = numero_de_usuarios_a_analisar) distancias = distancias.sort_values("distancia") distancias = distancias.set_index("outra_pessoa").drop(voce_id) return distancias.head(k_mais_proximos) # + id="YDA83PpbIEff" def sugere_para(voce, k_mais_proximos = 10, numero_de_usuarios_a_analisar = None): notas_de_voce = notas_do_usuario(voce) filmes_que_voce_ja_viu = notas_de_voce.index similares = knn(voce, k_mais_proximos = k_mais_proximos, numero_de_usuarios_a_analisar = numero_de_usuarios_a_analisar) usuarios_similares = similares.index notas_dos_similares = notas.set_index("usuarioId").loc[usuarios_similares] recomendacoes = notas_dos_similares.groupby("filmeId").mean()[["nota"]] recomendacoes = recomendacoes.sort_values("nota", ascending=False) return recomendacoes.join(filmes) # + id="rA7P5UY59txC" def knn(voce_id, k_mais_proximos=10, numero_de_usuarios_a_analisar = None): distancias = distancia_de_todos(voce_id, numero_de_usuarios_a_analisar = numero_de_usuarios_a_analisar) distancias = distancias.sort_values("distancia") distancias = distancias.set_index("outra_pessoa").drop(voce_id, errors='ignore') return distancias.head(k_mais_proximos) # + id="w6Pk_Kmj9ydd" sugere_para(1, numero_de_usuarios_a_analisar=500)
Sistemas_de_recomendacao_de_filmes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + """ There are people sitting in a circular fashion, print every third member while removing them, the next counter starts immediately after the member is removed. Print till all the members are exhausted. For example: Input: consider 123456789 members sitting in a circular fashion, Output: 369485271 """ import unittest def circular_counter(int_list, skip): skip = skip - 1 index = 0 len_list = (len(int_list)) while len_list > 0: index = (skip+index) % len_list yield int_list.pop(index) len_list -= 1 class TestCircularCounter(unittest.TestCase): def test_circular_counter(self): a = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0'] CircCounter_generator = circular_counter(a, 3) self.assertEqual(next(CircCounter_generator), '3') self.assertEqual(next(CircCounter_generator), '6') self.assertEqual(next(CircCounter_generator), '9') self.assertEqual(next(CircCounter_generator), '2') self.assertEqual(next(CircCounter_generator), '7') self.assertEqual(next(CircCounter_generator), '1') self.assertEqual(next(CircCounter_generator), '8') self.assertEqual(next(CircCounter_generator), '5') self.assertEqual(next(CircCounter_generator), '0') self.assertEqual(next(CircCounter_generator), '4') self.assertRaises(StopIteration, next, CircCounter_generator) if __name__ == "__main__": unittest.main(argv=['first-arg-is-ignored'], exit=False) # -
Python/circular_counter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd # + # Read dataframe and show it df = pd.read_csv('input/190516_m2b_marine_biota.csv', sep=';', encoding = "ISO-8859-1", index_col='id') # - df.head() print('Columns in dataframe:') for idx, col in enumerate(df.columns): print(idx+1, col) print('Dataframe length:', len(df)) # + # Get only columns: ['lat', 'lon', 'depth_m', 'mea_ug_kg_orig'] cols = ['lon', 'lat', 'depth_m', 'mea_ug_kg_orig'] ndf = df[cols] # + # Limit data by latitude and longitude and depth # Latitude limits: 35.3N - 47.05N # Longitude limits: 6.37E - 18.31E # Depth limit: 0m - 20m lat_limit_bottom = ndf['lat'] > 35.3 lat_limit_top = ndf['lat'] < 47.05 lon_limit_bottom = ndf['lon'] > 6.37 lon_limit_top = ndf['lon'] < 18.31 depth_limit_top = ndf['depth_m'] >= 0 # inverted! depth_limit_bottom = ndf['depth_m'] < 20 # - ndf = ndf[lat_limit_bottom & lat_limit_top & lon_limit_bottom & lon_limit_top & depth_limit_bottom & depth_limit_top] ndf.head() len(ndf) # + # Describe data ndf.describe() # + # Save file ndf.to_csv('prepared_data_mercury_concentrations_volumetric.csv', encoding='utf-8', sep=',') # -
data analysis/notebooks/spatial_101/0103_spatial_analysis/idw-3d-and-mercury-concentrations-data-prep.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # imports import pandas as pd import numpy as np import matplotlib.pyplot as plt # constants pro_filename = r'https://dota-match-ids.s3.amazonaws.com/promatch_csvs/professional_promatch_data.csv' premier_filename = r'https://dota-match-ids.s3.amazonaws.com/promatch_csvs/premier_promatch_data.csv' intl_filename = r'https://dota-match-ids.s3.amazonaws.com/promatch_csvs/international_promatch_data.csv' # read the csv files df_pro = pd.read_csv(pro_filename) df_premier = pd.read_csv(premier_filename) df_intl = pd.read_csv(intl_filename) # display the data df_pro.head() df_premier.head() df_intl.head() # Drop columns that aren't relevant to Dire or Radiant team classification df_pro = df_pro.drop(columns=['Unnamed: 0', 'duration', 'start_time', 'leagueid', 'league_name', 'series_id','series_type'], axis=1) df_premier = df_premier.drop(columns=['Unnamed: 0', 'duration', 'start_time', 'leagueid', 'league_name', 'series_id','series_type'], axis=1) df_intl = df_intl.drop(columns=['Unnamed: 0', 'duration', 'start_time', 'leagueid', 'league_name', 'series_id','series_type'], axis=1) # Drop rows with missing values df_pro = df_pro.dropna(axis=0) df_premier = df_premier.dropna(axis=0) df_intl = df_intl.dropna(axis=0) # Display the cleaned data sets df_pro.head() df_premier.head() df_intl.head() # First we'll just examine the raw win percentages # Count the number of radiant wins and dire wins per league # + pro_radiant_wins = df_pro[df_pro.radiant_win == True].shape[0] pro_dire_wins = df_pro[df_pro.radiant_win == False].shape[0] premier_radiant_wins = df_premier[df_premier.radiant_win == True].shape[0] premier_dire_wins = df_premier[df_premier.radiant_win == False].shape[0] intl_radiant_wins = df_intl[df_intl.radiant_win == True].shape[0] intl_dire_wins = df_intl[df_intl.radiant_win == False].shape[0] # - # Plot the number of wins per team per league fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(16,5)) teams = ['Radiant', 'Dire'] wins = [pro_radiant_wins, pro_dire_wins, premier_radiant_wins, premier_dire_wins, intl_radiant_wins, intl_dire_wins] ax1.bar(teams[0], wins[0], color='b') ax1.bar(teams[1], wins[1], color='r') ax2.bar(teams[0], wins[2], color='b') ax2.bar(teams[1], wins[3], color='r') ax3.bar(teams[0], wins[4], color='b') ax3.bar(teams[1], wins[5], color='r') ax1.set_title("Professional League") ax2.set_title("Premier League") ax3.set_title("International") ax1.set_ylabel("Number of Wins") ax2.set_ylabel("Number of Wins") ax3.set_ylabel("Number of Wins") fig.suptitle("Radiant vs. Dire Wins") plt.show() # Calculate the numerical difference between the number of wins for each league pro_diff = pro_radiant_wins - pro_dire_wins premier_diff = premier_radiant_wins - premier_dire_wins intl_diff = intl_radiant_wins - intl_dire_wins # Calculate the ratio of radiant wins to dire winsfor each league pro_ratio = np.round(pro_radiant_wins / pro_dire_wins, decimals=3) premier_ratio = np.round(premier_radiant_wins / premier_dire_wins, decimals=3) intl_ratio = np.round(intl_radiant_wins / intl_dire_wins, decimals=3) # Display the difference print('Difference Between Radiant vs. Dire Wins') print('\tProfessional League') print('\t\tRadiant +{} wins over Dire'.format(pro_diff)) print('\t\tRatio: {}'.format(pro_ratio)) print('\tPremier League') print('\t\tRadiant +{} wins over Dire'.format(premier_diff)) print('\t\tRatio: {}'.format(premier_ratio)) print('\tInternational') print('\t\tRadiant +{} wins over Dire'.format(intl_diff)) print('\t\tRatio: {}'.format(intl_ratio)) # This is pretty interesting. While small, both the bar plots and the ratios of Radiant wins to Dire wins show that teams under the Radiant name win slightly more games than Dire teams. And the ratio of Radiant wins to Dire wins for each of the three levels of play are also similar. This may indicate that the hero-picking strategy may not be completely fair or there is another in-game factor leading to a small, but fairly consistent difference in the number of Radiant wins vs. Dire wins. # Now let's look at points scored per team per league # Calculate the mean score for each team for each league # + pro_radiant_mean = df_pro['radiant_score'].mean() pro_dire_mean = df_pro['dire_score'].mean() premier_radiant_mean = df_premier['radiant_score'].mean() premier_dire_mean = df_premier['dire_score'].mean() intl_radiant_mean = df_intl['radiant_score'].mean() intl_dire_mean = df_intl['dire_score'].mean() # - # Calculate the median of scores for each team for each league # + pro_radiant_median = df_pro['radiant_score'].median() pro_dire_median = df_pro['dire_score'].median() premier_radiant_median = df_premier['radiant_score'].median() premier_dire_median = df_premier['dire_score'].median() intl_radiant_median = df_intl['radiant_score'].median() intl_dire_median = df_intl['dire_score'].median() # - # Calculate the standard deviation of scores for each team for each league # + pro_radiant_std = df_pro['radiant_score'].std() pro_dire_std = df_pro['dire_score'].std() premier_radiant_std = df_premier['radiant_score'].std() premier_dire_std = df_premier['dire_score'].std() intl_radiant_std = df_intl['radiant_score'].std() intl_dire_std = df_intl['dire_score'].std() # - # Calculate the variance of scores for each team for each league # + pro_radiant_var = np.round(df_pro['radiant_score'].var(), decimals=3) pro_dire_var = np.round(df_pro['dire_score'].var(), decimals=3) premier_radiant_var = np.round(df_premier['radiant_score'].var(), decimals=3) premier_dire_var = np.round(df_premier['dire_score'].var(), decimals=3) intl_radiant_var = np.round(df_intl['radiant_score'].var(), decimals=3) intl_dire_var = np.round(df_intl['dire_score'].var(), decimals=3) # - # Calculate the max scores for each team for each league # + pro_radiant_max = df_pro['radiant_score'].max() pro_dire_max = df_pro['dire_score'].max() premier_radiant_max = df_premier['radiant_score'].max() premier_dire_max = df_premier['dire_score'].max() intl_radiant_max = df_intl['radiant_score'].max() intl_dire_max = df_intl['dire_score'].max() # - # Calculate the min scores for each team for each league # Plot the statistics regarding the scores # + fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(16,5)) x = range(10) ax1.plot(x, [pro_radiant_mean for _ in range(10)], color='b', linestyle='-', label='Radiant Mean') ax1.plot(x, [pro_dire_mean for _ in range(10)], color='r', linestyle='-', label='Dire Mean') ax1.plot(x, [pro_radiant_std for _ in range(10)], color='b', linestyle='--', label='Radiant Std') ax1.plot(x, [pro_dire_std for _ in range(10)], color='r', linestyle='--', label='Dire Std') ax1.plot(x, [pro_radiant_median for _ in range(10)], color='b', linestyle='-.', label='Radiant Median') ax1.plot(x, [pro_dire_median for _ in range(10)], color='r', linestyle='-.', label='Dire Median') ax2.plot(x, [premier_radiant_mean for _ in range(10)], color='b', linestyle='-', label='Radiant Mean') ax2.plot(x, [premier_dire_mean for _ in range(10)], color='r', linestyle='-', label='Dire Mean') ax2.plot(x, [premier_radiant_std for _ in range(10)], color='b', linestyle='--', label='Radiant Std') ax2.plot(x, [premier_dire_std for _ in range(10)], color='r', linestyle='--', label='Dire Std') ax2.plot(x, [premier_radiant_median for _ in range(10)], color='b', linestyle='-.', label='Radiant Median') ax2.plot(x, [premier_dire_median for _ in range(10)], color='r', linestyle='-.', label='Dire Median') ax3.plot(x, [intl_radiant_mean for _ in range(10)], color='b', linestyle='-', label='Radiant Mean') ax3.plot(x, [intl_dire_mean for _ in range(10)], color='r', linestyle='-', label='Dire Mean') ax3.plot(x, [intl_radiant_std for _ in range(10)], color='b', linestyle='--', label='Radiant Std') ax3.plot(x, [intl_dire_std for _ in range(10)], color='r', linestyle='--', label='Dire Std') ax3.plot(x, [intl_radiant_median for _ in range(10)], color='b', linestyle='-.', label='Radiant Median') ax3.plot(x, [intl_dire_median for _ in range(10)], color='r', linestyle='-.', label='Dire Median') ax1.set_title("Professional League") ax2.set_title("Premier League") ax3.set_title("International") ax1.set_ylabel("Score") ax2.set_ylabel("Score") ax3.set_ylabel("Score") ax1.set_ylim([7,17]) ax2.set_ylim([7,17]) ax3.set_ylim([7,17]) ax1.legend(loc="lower right") ax2.legend(loc="lower right") ax3.legend(loc="lower right") plt.setp(ax1.get_xticklabels(), visible=False) plt.setp(ax2.get_xticklabels(), visible=False) plt.setp(ax3.get_xticklabels(), visible=False) fig.suptitle("Radiant vs. Dire Score Statistics") plt.show() # - # Things to note from these plots: # - The average and median of Radiant teams in all 3 levels of play are higher than those of Dire teams # - The standard deviations of both teams are similar to each other in all three leagues # - In all three leagues, the median scores are lower than the mean scores # Print out the variance of the scores for each team for each league print('Variance of Radiant Scores and Dire Scores') print('\tProfessional League') print('\t\tRadiant Variance: {}'.format(pro_radiant_var)) print('\t\tDire Variance: {}'.format(pro_dire_var)) print('\tPremier League') print('\t\tRadiant Variance: {}'.format(premier_radiant_var)) print('\t\tDire Variance: {}'.format(premier_dire_var)) print('\tInternational') print('\t\tRadiant Variance: {}'.format(intl_radiant_var)) print('\t\tDire Variance: {}'.format(intl_dire_var)) # The variances of each of the teams in the three levels of play are all fairly high and are all of the same magnitude. The variance doesn't seem to tell us much about the scores. # Print out the max scores for each team for each league print('Maximum Score of Radiant and Dire Teams') print('\tProfessional League') print('\t\tRadiant Maximum Score: {}'.format(pro_radiant_max)) print('\t\tDire Maximum Score: {}'.format(pro_dire_max)) print('\tPremier League') print('\t\tRadiant Maximum Score: {}'.format(premier_radiant_max)) print('\t\tDire Maximum Score: {}'.format(premier_dire_max)) print('\tInternational') print('\t\tRadiant Maximum Score: {}'.format(intl_radiant_max)) print('\t\tDire Maximum Score: {}'.format(intl_dire_max)) # This is interesting as well. Radiant teams in all three leagues of play actually had a lower maximum score than Dire teams, even though it appears that Radiant teams win more games than Dire teams do from my previous analysis. Also of note is that the maximum score of both Radiant and Dire teams falls as the level of play increases; this is presumably because teams get better at playing defense as they get better and increase their level of play. # Conclusions # From my analysis, it does appear that Radiant teams do have a slight advantage over Dire teams as the data showed that Radiant teams win more games than Dire teams do at all 3 higher levels of play. This is also supported by the fact that the mean and median scores of Radiant teams are higher than those of Dire teams. Of interesting note, however; the maximum scores of Dire teams in all three levels of play were higher than the maximum scores of Radiant teams. # So Radiant teams may have an advantage, but where is it coming from? My first idea is that the picking strategy to set up the gameplay is slightly unfair. My second idea is that there is some other internal gameplay feature that makes it unfair. So I did some research on the internet. # The internet confirmed that there is, in fact, a win advantage for Radiant teams over Dire teams. One figure even said there was as high as a 9.6% advantage for Radiant teams over Dire teams. It also appears that there is a consensus on the cause of the advantage, and apparently it is perceived to be the way the game map is set up.
notebooks/GeneralStats/radiant_dire_stats.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np results = pd.DataFrame(data={"Dask":[604.84,726.58,1081.46,1734.15,2381.18,4847.182541], "Myria":[383.047918, 591.677918, 986.376918, 1821.948918, 2825.507918, 5515.205029], "Spark":[377.560062, 650.516533, 1035.264338, 1917.513293, 2711.942099, 5541.589612], #"SciDB":[1246.487, 2520.439, 5028.889, 10076.557, 14947.759, 31251.326], #"Tensorflow":[4072.3, 7772.6, 14628.3, 28643.8, 43266.459, 91091], "Cloudknot":[np.nan, 1272, 1919, 3380, 3848, 6026], "Subjects":[1, 2, 4, 8, 12, 25]}) results = results.reset_index().melt(id_vars=['Subjects']).set_index(['Subjects', 'variable']) results = results.reset_index() results = results[results["variable"] !="index"] results = results.rename({"variable":"System", "value":"Time(sec)"}, axis=1) results.head() # + import altair as alt from vega_datasets import data c = alt.Chart(results).mark_line() c.encode( alt.X("Subjects:Q", scale=alt.Scale(base=2, type="log"), axis=alt.Axis(values=[0, 1, 2, 4, 8, 12, 25])), alt.Y('Time(sec):Q', scale=alt.Scale(base=10, type="log")), color=alt.Color('System', legend=alt.Legend(orient="bottom-right", title="System")) ).configure_legend(strokeColor='black', strokeWidth=2, cornerRadius=5, padding=5) # - s = results[results["Subjects"]==25]["Time(sec)"] s.loc[29] / s
examples/scipy-2018-paper-examples/compare_results_mri.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ericaburdett/DS-Unit-2-Kaggle-Challenge/blob/master/module1-decision-trees/221_assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="DihjYP66WNHt" colab_type="text" # Lambda School Data Science # # *Unit 2, Sprint 2, Module 1* # # --- # + [markdown] colab_type="text" id="7IXUfiQ2UKj6" # # Decision Trees # # ## Assignment # - [ ] [Sign up for a Kaggle account](https://www.kaggle.com/), if you don’t already have one. Go to our Kaggle InClass competition website. You will be given the URL in Slack. Go to the Rules page. Accept the rules of the competition. Notice that the Rules page also has instructions for the Submission process. The Data page has feature definitions. # - [ ] Do train/validate/test split with the Tanzania Waterpumps data. # - [ ] Begin with baselines for classification. # - [ ] Select features. Use a scikit-learn pipeline to encode categoricals, impute missing values, and fit a decision tree classifier. # - [ ] Get your validation accuracy score. # - [ ] Get and plot your feature importances. # - [ ] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.) # - [ ] Commit your notebook to your fork of the GitHub repo. # # # ## Stretch Goals # # ### Reading # # - A Visual Introduction to Machine Learning # - [Part 1: A Decision Tree](http://www.r2d3.us/visual-intro-to-machine-learning-part-1/) # - [Part 2: Bias and Variance](http://www.r2d3.us/visual-intro-to-machine-learning-part-2/) # - [Decision Trees: Advantages & Disadvantages](https://christophm.github.io/interpretable-ml-book/tree.html#advantages-2) # - [How a Russian mathematician constructed a decision tree — by hand — to solve a medical problem](http://fastml.com/how-a-russian-mathematician-constructed-a-decision-tree-by-hand-to-solve-a-medical-problem/) # - [How decision trees work](https://brohrer.github.io/how_decision_trees_work.html) # - [Let’s Write a Decision Tree Classifier from Scratch](https://www.youtube.com/watch?v=LDRbO9a6XPU) — _Don’t worry about understanding the code, just get introduced to the concepts. This 10 minute video has excellent diagrams and explanations._ # - [Random Forests for Complete Beginners: The definitive guide to Random Forests and Decision Trees](https://victorzhou.com/blog/intro-to-random-forests/) # # # ### Doing # - [ ] Add your own stretch goal(s) ! # - [ ] Define a function to wrangle train, validate, and test sets in the same way. Clean outliers and engineer features. (For example, [what columns have zeros and shouldn't?](https://github.com/Quartz/bad-data-guide#zeros-replace-missing-values) What columns are duplicates, or nearly duplicates? Can you extract the year from date_recorded? Can you engineer new features, such as the number of years from waterpump construction to waterpump inspection?) # - [ ] Try other [scikit-learn imputers](https://scikit-learn.org/stable/modules/impute.html). # - [ ] Make exploratory visualizations and share on Slack. # # # #### Exploratory visualizations # # Visualize the relationships between feature(s) and target. I recommend you do this with your training set, after splitting your data. # # For this problem, you may want to create a new column to represent the target as a number, 0 or 1. For example: # # ```python # train['functional'] = (train['status_group']=='functional').astype(int) # ``` # # # # You can try [Seaborn "Categorical estimate" plots](https://seaborn.pydata.org/tutorial/categorical.html) for features with reasonably few unique values. (With too many unique values, the plot is unreadable.) # # - Categorical features. (If there are too many unique values, you can replace less frequent values with "OTHER.") # - Numeric features. (If there are too many unique values, you can [bin with pandas cut / qcut functions](https://pandas.pydata.org/pandas-docs/stable/getting_started/basics.html?highlight=qcut#discretization-and-quantiling).) # # You can try [Seaborn linear model plots](https://seaborn.pydata.org/tutorial/regression.html) with numeric features. For this classification problem, you may want to use the parameter `logistic=True`, but it can be slow. # # You do _not_ need to use Seaborn, but it's nice because it includes confidence intervals to visualize uncertainty. # # #### High-cardinality categoricals # # This code from a previous assignment demonstrates how to replace less frequent values with 'OTHER' # # ```python # # Reduce cardinality for NEIGHBORHOOD feature ... # # # Get a list of the top 10 neighborhoods # top10 = train['NEIGHBORHOOD'].value_counts()[:10].index # # # At locations where the neighborhood is NOT in the top 10, # # replace the neighborhood with 'OTHER' # train.loc[~train['NEIGHBORHOOD'].isin(top10), 'NEIGHBORHOOD'] = 'OTHER' # test.loc[~test['NEIGHBORHOOD'].isin(top10), 'NEIGHBORHOOD'] = 'OTHER' # ``` # # + colab_type="code" id="o9eSnDYhUGD7" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="df3a7de3-73f2-41b2-e018-fa460fdfbe9a" import sys # If you're on Colab: if 'google.colab' in sys.modules: DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/' # !pip install category_encoders==2.* # !pip install pandas-profiling==2.* # If you're working locally: else: DATA_PATH = '../data/' # + colab_type="code" id="QJBD4ruICm1m" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e082a46a-d677-41ad-91e5-6f04d2755039" import pandas as pd from sklearn.model_selection import train_test_split train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'), pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv')) test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv') sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv') train, val = train_test_split(train, train_size=0.80, test_size=0.20, stratify=train['status_group'], random_state=42) train.shape, val.shape, test.shape # + [markdown] id="b9fc85iaVEFv" colab_type="text" # ## I ran the pandas profile but didn't want it to run multiple times, so I saved it as html and removed all pertinent code # + id="088ZL7OOf5WX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 411} outputId="6e9eba27-35a1-41c0-e576-e292c426586c" train.head() # + id="jRk8E5p3u2Of" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="6ecfd7d3-bfdf-43bf-93f0-5322052382da" train.describe() # + id="sLUk0PZjf8kK" colab_type="code" colab={} import numpy as np def wrangle(X): """Wrangle train, validate, and test sets in the same way""" # Prevent SettingWithCopyWarning X = X.copy() # About 3% of the time, latitude has small values near zero, # outside Tanzania, so we'll treat these values like zero. X['latitude'] = X['latitude'].replace(-2e-08, 0) # When columns have zeros and shouldn't, they are like null values. # So we will replace the zeros with nulls, and impute missing values later. cols_with_zeros = ['longitude', 'latitude', 'construction_year', 'population', 'gps_height'] for col in cols_with_zeros: X[col] = X[col].replace(0, np.nan) # quantity & quantity_group are duplicates, so drop one X = X.drop(columns='quantity_group') # id column is 100% unique values so won't be helpful X = X.drop(columns='id') # recorded_by is the same for every row so won't be helpful X = X.drop(columns='recorded_by') # payment and payment_type are identical though worded differently X = X.drop(columns='payment_type') # return the wrangled dataframe return X train = wrangle(train) val = wrangle(val) test = wrangle(test) # + id="Tuf0xqIgi_FS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="2ff397a6-2b1e-4327-8060-ea8a132aaa3b" # Baselines for classification train['status_group'].value_counts(normalize=True) # + id="D1sg0ni1jSkd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="9ee64552-e0c1-4275-951e-12f1f9dcc0c5" val['status_group'].value_counts(normalize=True) # + id="yYiPesbxoaQ-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="e6fdb9bb-da47-4e3f-d06e-8c3eb7d01bc6" # Target is status_group target = 'status_group' # Get dataframe without target column train_features = train.drop(columns=[target]) # Get list of numeric features numeric_features = train_features.select_dtypes(include='number').columns.tolist() # Get cardinality of nonnumeric features cardinality = train_features.select_dtypes(exclude='number').nunique() # Get columns with cardinality under 50 categorical_features = cardinality[cardinality <= 50].index.tolist() # Combine features features = numeric_features + categorical_features print(len(features)) print(features) # + id="M6ZNHiRL1IOb" colab_type="code" colab={} X_train = train[features] y_train = train[target] X_val = val[features] y_val = val[target] X_test = test[features] # + id="4RL8um3piJha" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="b3337084-185f-43b1-e323-044d35f09093" # Imports to prepare for sklearn pipeline import category_encoders as ce from sklearn.impute import SimpleImputer from sklearn.tree import DecisionTreeClassifier from sklearn.pipeline import make_pipeline pipeline = make_pipeline( ce.OneHotEncoder(use_cat_names=True), SimpleImputer(strategy='median'), DecisionTreeClassifier(random_state = 42) ) # Fit on training set pipeline.fit(X_train, y_train) # Score on training and validation sets print('Training Accuracy:', pipeline.score(X_train, y_train)) print('Validation Accuracy:', pipeline.score(X_val, y_val)) # + [markdown] id="setqBz3NltOs" colab_type="text" # ### Validation accuracy is higher than baseline measures, so I think I'll stop there for now. # # + id="460nbmPpl31C" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="64789225-ebd2-446b-b111-c90892db4db8" # Get and plot feature importancees import matplotlib.pyplot as plt model = pipeline.named_steps['decisiontreeclassifier'] encoder = pipeline.named_steps['onehotencoder'] encoded_columns = encoder.transform(X_val).columns coefficients = pd.Series(model.feature_importances_, encoded_columns) # Plot plt.figure(figsize=(10, 30)) coefficients.sort_values().plot.barh(color='grey'); # + id="lo2MFJ6YgmDa" colab_type="code" colab={} # Get Kaggle submission y_pred = pipeline.predict(X_test) submission = sample_submission.copy() submission['status_group'] = y_pred submission.to_csv('submission_1', index=False)
module1-decision-trees/221_assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os os.environ['PYSPARK_PYTHON'] = '/opt/conda/bin/python' import datafaucet as dfc import datafaucet.logging as log dfc.project.load('prod') engine = dfc.project.engine() spark = engine.context() # + md = dfc.project.metadata() for path in md['resources'].keys(): md_src = dfc.project.resource(path, 'source') md_trg = dfc.project.resource(path, 'target') engine.copy(md_src,md_trg) # - engine.stop()
datafaucet/cli/templates/ingest/{{cookiecutter.project_name}}/src/daily.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # hw3 # # For this homework, you are a data scientist working for Pronto (before the end of their contract with the City of Seattle). Your job is to assist in determining how to do end-of-day adjustments in the number of bikes at stations so that all stations will have enough bikes for the next day of operation (as estimated by the weekday average for the station for the year). Your assistance will help in constructing a plan for each day of the week that specifies how many bikes should be moved from each station and how many bikes must be delievered to each station. Use the 2015 trip data. # # Your assignment is to construct plots of the differences between 'from' and 'to' counts for each station by day of the week. Do this as a set of 7 subplots. You should use at least one function to construct your plots. # # ### Grading # # 2-pts: create a dataframe with station counts averages by day-of-week # # 1-pt: structure the 7 day-of-week plots as subplots # # 1-pt: label the plots by day-of-week # # 1-pt: label the x-axis for plots in the last row and label the y-axis for plots in the left-most column # # # ## Solution # import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline data = pd.read_csv("2015_trip_data.csv") data.head() # ### Create a dataframe with station counts averages by day-of-week times = pd.DatetimeIndex(data['starttime']) data['day_of_week'] = times.dayofweek # calculate how many weeks during the time df = pd.DataFrame({'start' : pd.to_datetime(data['starttime'][0]), 'end' : pd.to_datetime(data['starttime'][len(data) - 1])}, index = [0]) (df['end']-df['start']).apply(lambda x: x/np.timedelta64(1,'W')) # **From_station average count** # create from_station average counts datafram by day of week week_name = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday'] ls_from = [] for i in range(0,7): subset = data[data.day_of_week == i] station_counts = pd.value_counts(subset['from_station_id']) station_mean = pd.DataFrame({week_name[i] : station_counts/52}) station_mean = round(station_mean,1) ls_from.append(station_mean) df_from = pd.concat(ls_from, axis=1) # **To_station average count** # create to_station average counts datafram by day of week ls_to = [] for i in range(0,7): subset = data[data.day_of_week == i] station_counts = pd.value_counts(subset['to_station_id']) station_mean = pd.DataFrame({week_name[i] : station_counts/52}) station_mean = round(station_mean,1) ls_to.append(station_mean) df_to = pd.concat(ls_to, axis=1) # ** Difference in average** df_diff = df_from - df_to df_diff # ### Create plots def plot_diff(df_diff): n_groups = len(df_diff.index) index = np.arange(n_groups) # The "raw" x-axis of the bar plot columns = df_diff.columns n_columns = len(columns) for i in range(0 , n_columns): fig = plt.figure(figsize=(12, 20)) # Controls global properties of the bar plot plt.subplot(n_columns, 1, i+1) rects = plt.bar(index,df_diff[columns[i]]) plt.xticks(index, df_diff.index) _, labels = plt.xticks() # Get the new labels of the plot plt.setp(labels, rotation=90) plt.xlabel('station') plt.ylabel('average count diff') plt.title(df_diff.columns[i]) plt.show() # call function plot_diff(df_diff)
HW3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- job_id = 'raw_openmiir_baseline_chmean' # + import os os.environ["THEANO_FLAGS"] = "floatX=float64,device=cpu" # usual # os.environ["THEANO_FLAGS"] = "floatX=float64,device=gpu" base_selectors = dict() SEED = 42 hdf5name = 'data/OpenMIIR-Perception-512Hz.hdf5' # defaults hyper_params = dict( only_1_inner_fold=False, # FIXME this is only for dev classification_target_source='targets', ch_mean=True ) # + import logging logging.basicConfig(level=logging.INFO) #logging.getLogger('deepthought.datasets').setLevel(logging.DEBUG) # debug dataset log = logging.getLogger('deepthought') import numpy as np np.set_printoptions(precision=4) # + from deepthought.experiments.encoding.datasets.openmiir import OpenMIIRNestedCVFoldGenerator from deepthought.experiments.encoding.experiment_templates.svc_baseline import SVCBaseline from deepthought.experiments.encoding.classifiers.linear_svc import UntunedLinearSVCClassifierFactory from deepthought.experiments.encoding.classifiers.simple_nn import SimpleNNClassifierFactory # + exp = SVCBaseline(job_id, hdf5name, OpenMIIRNestedCVFoldGenerator(), hyper_params=hyper_params, default_params=None, base_selectors=base_selectors) import time starttime=time.time() # exp.run(verbose=True) exp.run(verbose=False) endtime=time.time() print('Running time: %s Seconds'%(endtime-starttime)) # + # exp = SVCBaseline(job_id, # hdf5name, # OpenMIIRNestedCVFoldGenerator(), # hyper_params=hyper_params, # default_params=None, # base_selectors=base_selectors) # import time # starttime=time.time() # exp.run(classifiers=(('simple_nn', SimpleNNClassifierFactory()),)) # # # exp.run(verbose=True) # # exp.run(verbose=False) # endtime=time.time() # print('Running time: %s Seconds'%(endtime-starttime)) # TypeError: run() got an unexpected keyword argument 'classifiers' # + # print hdf5name # + # print OpenMIIRNestedCVFoldGenerator() # + # print base_selectors # + # print default_params # -
Train OpenMIIR baseline ch_mean RAW.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # linear regression # find f(x) such most fits sample data # often, mean square error avg(|y-f(x)|) is minimized # reference: https://qiita.com/m-hayashi/items/ee379c86e3e18f0ddc6d import numpy as np from matplotlib import pyplot as plt import pandas as pd from sklearn import linear_model # %matplotlib inline # datasets X = pd.DataFrame([0, 1, 2, 3, 4, 5]) Y = pd.DataFrame([3, 2, 5, 7, 6, 10]) # define model model = linear_model.LinearRegression() model.fit(X, Y) # warniing seems to be harmless # view model px = np.arange(int(X.min()), int(X.max()), 0.01)[:, np.newaxis] py = model.predict(px) plt.scatter(X, Y, color='red') plt.plot(px, py, color='blue') print(model.coef_) print(model.intercept_)
regression/linear_regression/simple.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # twee2vec # https://github.com/KavelCortex/twee2vec # # using tweepy to crawl specific user's twitter, then use word2vec to vectorize the user. # ## load models # It might take a few minutes to load the model. Usually, loading one model is enough since loading all of them takes significant amount of memories (26GB+ if fully loaded). import utils.model as model model_crawl=model.load_crawl() #model_googlenews=model.load_googlenews() model_crawl.similar_by_word('reddit') model_googlenews.similar_by_word('reddit') # ## Vectorize user import importlib importlib.reload(vectorizer) vector=vectorizer.vectorize_user(model=model_crawl,screen_name='alsareini',limit=200) vector
twee2vec_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/nikitakaraevv/pointnet/blob/master/nbs/PointNetClass.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="NpvL68OfBEQC" colab_type="text" # # PointNet # + [markdown] id="rbPm1WS7UWe6" colab_type="text" # This is an implementation of [PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation](https://arxiv.org/abs/1612.00593) using PyTorch. # # + [markdown] id="-z7n_pw4SMWl" colab_type="text" # ## Getting started # + [markdown] id="ZGjwJhn0VTVu" colab_type="text" # Don't forget to turn on GPU if you want to start training directly. # # # **Runtime** -> **Change runtime type**-> **Hardware accelerator** # # # + id="TJ47VNF7fmTS" colab_type="code" colab={} import numpy as np import math import random import os import torch import scipy.spatial.distance from torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils import plotly.graph_objects as go import plotly.express as px # + id="zncVRbuwtV2N" colab_type="code" outputId="e0ba582c-229f-444a-babc-2d398e1621ab" colab={"base_uri": "https://localhost:8080/", "height": 105} # !pip install path.py; from path import Path # + id="vpzTlKjmlr2q" colab_type="code" colab={} random.seed = 42 # + [markdown] id="vg6HhI7eU80o" colab_type="text" # Download the [dataset](http://3dvision.princeton.edu/projects/2014/3DShapeNets/) directly to the Google Colab Runtime. It comprises 10 categories, 3,991 models for training and 908 for testing. # + id="N7zSMatAwXAW" colab_type="code" outputId="5b8f6f86-5c71-4be1-8944-2acac2786d12" colab={"base_uri": "https://localhost:8080/", "height": 224} # !wget http://3dvision.princeton.edu/projects/2014/3DShapeNets/ModelNet10.zip # + id="k-JCNR1QyL07" colab_type="code" outputId="45166740-dedc-4ca8-c90e-632ad4c4f275" colab={"base_uri": "https://localhost:8080/", "height": 34} # !unzip -q ModelNet10.zip; # + id="Xyu78RWIQEQJ" colab_type="code" colab={} path = Path("ModelNet10") # + id="y2i_0ECIcR1X" colab_type="code" outputId="3fc754aa-4d96-4350-d7f5-c10ccbb62eea" colab={"base_uri": "https://localhost:8080/", "height": 187} folders = [dir for dir in sorted(os.listdir(path)) if os.path.isdir(path/dir)] classes = {folder: i for i, folder in enumerate(folders)}; classes # + [markdown] id="krbtoQtTXOBa" colab_type="text" # This dataset consists of **.off** files that contain meshes represented by *vertices* and *triangular faces*. # # We will need a function to read this type of files: # + id="TXEzgwr_Mfc0" colab_type="code" colab={} def read_off(file): if 'OFF' != file.readline().strip(): raise('Not a valid OFF header') n_verts, n_faces, __ = tuple([int(s) for s in file.readline().strip().split(' ')]) verts = [[float(s) for s in file.readline().strip().split(' ')] for i_vert in range(n_verts)] faces = [[int(s) for s in file.readline().strip().split(' ')][1:] for i_face in range(n_faces)] return verts, faces # + id="ddne6NHcPIHn" colab_type="code" colab={} with open(path/"bed/train/bed_0001.off", 'r') as f: verts, faces = read_off(f) # + id="CpH2NKXNlPl4" colab_type="code" colab={} i,j,k = np.array(faces).T x,y,z = np.array(verts).T # + id="Le4-KXs1j1E3" colab_type="code" outputId="50f1dec4-c58b-43f5-f62a-ae39e83af1b6" colab={"base_uri": "https://localhost:8080/", "height": 34} len(x) # + [markdown] id="H2XqwjkJXqLE" colab_type="text" # Don't be afraid of this huge function. It's just to display animated rotation of meshes and point clouds. # + id="2dbIQBLykGpX" colab_type="code" colab={} def visualize_rotate(data): x_eye, y_eye, z_eye = 1.25, 1.25, 0.8 frames=[] def rotate_z(x, y, z, theta): w = x+1j*y return np.real(np.exp(1j*theta)*w), np.imag(np.exp(1j*theta)*w), z for t in np.arange(0, 10.26, 0.1): xe, ye, ze = rotate_z(x_eye, y_eye, z_eye, -t) frames.append(dict(layout=dict(scene=dict(camera=dict(eye=dict(x=xe, y=ye, z=ze)))))) fig = go.Figure(data=data, layout=go.Layout( updatemenus=[dict(type='buttons', showactive=False, y=1, x=0.8, xanchor='left', yanchor='bottom', pad=dict(t=45, r=10), buttons=[dict(label='Play', method='animate', args=[None, dict(frame=dict(duration=50, redraw=True), transition=dict(duration=0), fromcurrent=True, mode='immediate' )] ) ] ) ] ), frames=frames ) return fig # + id="0huQ5maYxBa9" colab_type="code" outputId="9f0b8926-a38a-4bb3-877b-a1e726b7abf0" colab={"base_uri": "https://localhost:8080/", "height": 542} visualize_rotate([go.Mesh3d(x=x, y=y, z=z, color='lightpink', opacity=0.50, i=i,j=j,k=k)]).show() # + [markdown] id="1fpGrWndRVYw" colab_type="text" # This mesh definitely looks like a bed. # + id="y9hL_IOoMVzP" colab_type="code" outputId="6177fc83-9797-455e-dd8e-d200397c073b" colab={"base_uri": "https://localhost:8080/", "height": 542} visualize_rotate([go.Scatter3d(x=x, y=y, z=z, mode='markers')]).show() # + [markdown] id="ah0LVBEBRaGS" colab_type="text" # Unfortunately, that's not the case for its vertices. It would be difficult for PointNet to classify point clouds like this one. # + [markdown] id="jBNJ__37RBvi" colab_type="text" # First things first, let's write a function to accurately visualize point clouds so we could see vertices better. # + id="VovK365pQ12G" colab_type="code" colab={} def pcshow(xs,ys,zs): data=[go.Scatter3d(x=xs, y=ys, z=zs, mode='markers')] fig = visualize_rotate(data) fig.update_traces(marker=dict(size=2, line=dict(width=2, color='DarkSlateGrey')), selector=dict(mode='markers')) fig.show() # + id="h6CRZdE2Qw5J" colab_type="code" outputId="3b54278e-48ea-41b2-8125-9bbafc3fe63b" colab={"base_uri": "https://localhost:8080/", "height": 542} pcshow(x,y,z) # + [markdown] id="axdsyO0wWZEB" colab_type="text" # ## Transforms # + [markdown] id="7tJZHWppZ85P" colab_type="text" # As we want it to look more like a real bed, let's write a function to sample points on the surface uniformly. # + [markdown] id="Pee3OqfyhSdt" colab_type="text" # ### Sample points # + id="zCgPQhfvh7R3" colab_type="code" colab={} class PointSampler(object): def __init__(self, output_size): assert isinstance(output_size, int) self.output_size = output_size def triangle_area(self, pt1, pt2, pt3): side_a = np.linalg.norm(pt1 - pt2) side_b = np.linalg.norm(pt2 - pt3) side_c = np.linalg.norm(pt3 - pt1) s = 0.5 * ( side_a + side_b + side_c) return max(s * (s - side_a) * (s - side_b) * (s - side_c), 0)**0.5 def sample_point(self, pt1, pt2, pt3): # barycentric coordinates on a triangle # https://mathworld.wolfram.com/BarycentricCoordinates.html s, t = sorted([random.random(), random.random()]) f = lambda i: s * pt1[i] + (t-s)*pt2[i] + (1-t)*pt3[i] return (f(0), f(1), f(2)) def __call__(self, mesh): verts, faces = mesh verts = np.array(verts) areas = np.zeros((len(faces))) for i in range(len(areas)): areas[i] = (self.triangle_area(verts[faces[i][0]], verts[faces[i][1]], verts[faces[i][2]])) sampled_faces = (random.choices(faces, weights=areas, cum_weights=None, k=self.output_size)) sampled_points = np.zeros((self.output_size, 3)) for i in range(len(sampled_faces)): sampled_points[i] = (self.sample_point(verts[sampled_faces[i][0]], verts[sampled_faces[i][1]], verts[sampled_faces[i][2]])) return sampled_points # + id="Xwg7LG6mkzgN" colab_type="code" colab={} pointcloud = PointSampler(3000)((verts, faces)) # + id="m5sSdqp-iTuA" colab_type="code" outputId="f36bfad1-aa64-4f64-d152-3a564ac97482" colab={"base_uri": "https://localhost:8080/", "height": 542} pcshow(*pointcloud.T) # + [markdown] id="O5ZsXeLOrFTT" colab_type="text" # This pointcloud looks much more like a bed! # + [markdown] id="OXU9PdRqbbBx" colab_type="text" # ### Normalize # + [markdown] id="aCduIRX6uiDs" colab_type="text" # Unit sphere # + id="UR3r0WPdWbHN" colab_type="code" colab={} class Normalize(object): def __call__(self, pointcloud): assert len(pointcloud.shape)==2 norm_pointcloud = pointcloud - np.mean(pointcloud, axis=0) norm_pointcloud /= np.max(np.linalg.norm(norm_pointcloud, axis=1)) return norm_pointcloud # + id="rfMnH_o8aIWe" colab_type="code" colab={} norm_pointcloud = Normalize()(pointcloud) # + id="4fGlqqqjaQGF" colab_type="code" outputId="dca4cdf6-fffd-468a-9fe8-6623805fc0e3" colab={"base_uri": "https://localhost:8080/", "height": 542} pcshow(*norm_pointcloud.T) # + [markdown] id="eTz_SFrDhezz" colab_type="text" # Notice that axis limits have changed. # + [markdown] id="4LtFfliNuxw3" colab_type="text" # ### Augmentations # + [markdown] id="TbYrmnasZAUg" colab_type="text" # Let's add *random rotation* of the whole pointcloud and random noise to its points. # + id="FHAvoR7wuwS6" colab_type="code" colab={} class RandRotation_z(object): def __call__(self, pointcloud): assert len(pointcloud.shape)==2 theta = random.random() * 2. * math.pi rot_matrix = np.array([[ math.cos(theta), -math.sin(theta), 0], [ math.sin(theta), math.cos(theta), 0], [0, 0, 1]]) rot_pointcloud = rot_matrix.dot(pointcloud.T).T return rot_pointcloud class RandomNoise(object): def __call__(self, pointcloud): assert len(pointcloud.shape)==2 noise = np.random.normal(0, 0.02, (pointcloud.shape)) noisy_pointcloud = pointcloud + noise return noisy_pointcloud # + id="Aektc3DZwbc9" colab_type="code" colab={} rot_pointcloud = RandRotation_z()(norm_pointcloud) noisy_rot_pointcloud = RandomNoise()(rot_pointcloud) # + id="GcLIa7KmweAL" colab_type="code" outputId="bcc59024-9635-42a6-82a2-2422967321e9" colab={"base_uri": "https://localhost:8080/", "height": 542} pcshow(*noisy_rot_pointcloud.T) # + [markdown] id="AE6QmxhRbwsY" colab_type="text" # ### ToTensor # + id="ctHIvE-Kbr-m" colab_type="code" colab={} class ToTensor(object): def __call__(self, pointcloud): assert len(pointcloud.shape)==2 return torch.from_numpy(pointcloud) # + id="Z7FK8nVrel4z" colab_type="code" outputId="905e1f41-09a5-4df0-c689-1fa76c79f813" colab={"base_uri": "https://localhost:8080/", "height": 136} ToTensor()(noisy_rot_pointcloud) # + id="IdQhWT4Q1GbF" colab_type="code" colab={} def default_transforms(): return transforms.Compose([ PointSampler(1024), Normalize(), ToTensor() ]) # + [markdown] id="mMIT1MeNSSO8" colab_type="text" # ## Dataset # + [markdown] id="_Sl3iM3CZM5n" colab_type="text" # Now we can create a [custom PyTorch Dataset](https://pytorch.org/tutorials/beginner/data_loading_tutorial.html) # + id="i06OYFNR8fa_" colab_type="code" colab={} class PointCloudData(Dataset): def __init__(self, root_dir, valid=False, folder="train", transform=default_transforms()): self.root_dir = root_dir folders = [dir for dir in sorted(os.listdir(root_dir)) if os.path.isdir(root_dir/dir)] self.classes = {folder: i for i, folder in enumerate(folders)} self.transforms = transform if not valid else default_transforms() self.valid = valid self.files = [] for category in self.classes.keys(): new_dir = root_dir/Path(category)/folder for file in os.listdir(new_dir): if file.endswith('.off'): sample = {} sample['pcd_path'] = new_dir/file sample['category'] = category self.files.append(sample) def __len__(self): return len(self.files) def __preproc__(self, file): verts, faces = read_off(file) if self.transforms: pointcloud = self.transforms((verts, faces)) return pointcloud def __getitem__(self, idx): pcd_path = self.files[idx]['pcd_path'] category = self.files[idx]['category'] with open(pcd_path, 'r') as f: pointcloud = self.__preproc__(f) return {'pointcloud': pointcloud, 'category': self.classes[category]} # + [markdown] id="WOEaUDwzZY3v" colab_type="text" # Transforms for training. 1024 points per cloud as in the paper! # + id="4pOl95glmphX" colab_type="code" colab={} train_transforms = transforms.Compose([ PointSampler(1024), Normalize(), RandRotation_z(), RandomNoise(), ToTensor() ]) # + id="xpDsEx00mZrx" colab_type="code" colab={} train_ds = PointCloudData(path, transform=train_transforms) valid_ds = PointCloudData(path, valid=True, folder='test', transform=train_transforms) # + id="HbIZKqkIrdQE" colab_type="code" outputId="163d5067-4873-4054-ab0d-ef0407caa86e" colab={"base_uri": "https://localhost:8080/", "height": 187} inv_classes = {i: cat for cat, i in train_ds.classes.items()}; inv_classes # + id="arTK45IlBeiZ" colab_type="code" outputId="9fcc9a3f-13e1-425d-fd23-047321f8d63a" colab={"base_uri": "https://localhost:8080/", "height": 102} print('Train dataset size: ', len(train_ds)) print('Valid dataset size: ', len(valid_ds)) print('Number of classes: ', len(train_ds.classes)) print('Sample pointcloud shape: ', train_ds[0]['pointcloud'].size()) print('Class: ', inv_classes[train_ds[0]['category']]) # + id="cVGtKLa4PthS" colab_type="code" colab={} train_loader = DataLoader(dataset=train_ds, batch_size=32, shuffle=True) valid_loader = DataLoader(dataset=valid_ds, batch_size=64) # + [markdown] id="Isb_97zOA8Tl" colab_type="text" # ## Model # + id="ZV20opgrv23I" colab_type="code" colab={} import torch import torch.nn as nn import numpy as np import torch.nn.functional as F class Tnet(nn.Module): def __init__(self, k=3): super().__init__() self.k=k self.conv1 = nn.Conv1d(k,64,1) self.conv2 = nn.Conv1d(64,128,1) self.conv3 = nn.Conv1d(128,1024,1) self.fc1 = nn.Linear(1024,512) self.fc2 = nn.Linear(512,256) self.fc3 = nn.Linear(256,k*k) self.bn1 = nn.BatchNorm1d(64) self.bn2 = nn.BatchNorm1d(128) self.bn3 = nn.BatchNorm1d(1024) self.bn4 = nn.BatchNorm1d(512) self.bn5 = nn.BatchNorm1d(256) def forward(self, input): # input.shape == (bs,n,3) bs = input.size(0) xb = F.relu(self.bn1(self.conv1(input))) xb = F.relu(self.bn2(self.conv2(xb))) xb = F.relu(self.bn3(self.conv3(xb))) pool = nn.MaxPool1d(xb.size(-1))(xb) flat = nn.Flatten(1)(pool) xb = F.relu(self.bn4(self.fc1(flat))) xb = F.relu(self.bn5(self.fc2(xb))) #initialize as identity init = torch.eye(self.k, requires_grad=True).repeat(bs,1,1) if xb.is_cuda: init=init.cuda() matrix = self.fc3(xb).view(-1,self.k,self.k) + init return matrix class Transform(nn.Module): def __init__(self): super().__init__() self.input_transform = Tnet(k=3) self.feature_transform = Tnet(k=64) self.conv1 = nn.Conv1d(3,64,1) self.conv2 = nn.Conv1d(64,128,1) self.conv3 = nn.Conv1d(128,1024,1) self.bn1 = nn.BatchNorm1d(64) self.bn2 = nn.BatchNorm1d(128) self.bn3 = nn.BatchNorm1d(1024) def forward(self, input): matrix3x3 = self.input_transform(input) # batch matrix multiplication xb = torch.bmm(torch.transpose(input,1,2), matrix3x3).transpose(1,2) xb = F.relu(self.bn1(self.conv1(xb))) matrix64x64 = self.feature_transform(xb) xb = torch.bmm(torch.transpose(xb,1,2), matrix64x64).transpose(1,2) xb = F.relu(self.bn2(self.conv2(xb))) xb = self.bn3(self.conv3(xb)) xb = nn.MaxPool1d(xb.size(-1))(xb) output = nn.Flatten(1)(xb) return output, matrix3x3, matrix64x64 class PointNet(nn.Module): def __init__(self, classes = 10): super().__init__() self.transform = Transform() self.fc1 = nn.Linear(1024, 512) self.fc2 = nn.Linear(512, 256) self.fc3 = nn.Linear(256, classes) self.bn1 = nn.BatchNorm1d(512) self.bn2 = nn.BatchNorm1d(256) self.dropout = nn.Dropout(p=0.3) self.logsoftmax = nn.LogSoftmax(dim=1) def forward(self, input): xb, matrix3x3, matrix64x64 = self.transform(input) xb = F.relu(self.bn1(self.fc1(xb))) xb = F.relu(self.bn2(self.dropout(self.fc2(xb)))) output = self.fc3(xb) return self.logsoftmax(output), matrix3x3, matrix64x64 # + id="580NErhyP1zD" colab_type="code" colab={} def pointnetloss(outputs, labels, m3x3, m64x64, alpha = 0.0001): criterion = torch.nn.NLLLoss() bs=outputs.size(0) id3x3 = torch.eye(3, requires_grad=True).repeat(bs,1,1) id64x64 = torch.eye(64, requires_grad=True).repeat(bs,1,1) if outputs.is_cuda: id3x3=id3x3.cuda() id64x64=id64x64.cuda() diff3x3 = id3x3-torch.bmm(m3x3,m3x3.transpose(1,2)) diff64x64 = id64x64-torch.bmm(m64x64,m64x64.transpose(1,2)) return criterion(outputs, labels) + alpha * (torch.norm(diff3x3)+torch.norm(diff64x64)) / float(bs) # + [markdown] id="2mLBRcfwP2Sq" colab_type="text" # ## Training loop # + [markdown] id="nUJOEaWdmsRD" colab_type="text" # You can find a pretrained model [here](https://drive.google.com/open?id=1nDG0maaqoTkRkVsOLtUAR9X3kn__LMSL) # + id="nvmmwhcePvt2" colab_type="code" outputId="75c86650-507b-4477-c1d7-a2011d9ba5bf" colab={"base_uri": "https://localhost:8080/", "height": 34} device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device) # + id="l_DXKkfMPxP0" colab_type="code" colab={} pointnet = PointNet() pointnet.to(device); # + id="4ST7F9E5P0BI" colab_type="code" colab={} optimizer = torch.optim.Adam(pointnet.parameters(), lr=0.001) # + id="Rg8obt6FP6Ff" colab_type="code" colab={} def train(model, train_loader, val_loader=None, epochs=15, save=True): for epoch in range(epochs): pointnet.train() running_loss = 0.0 for i, data in enumerate(train_loader, 0): inputs, labels = data['pointcloud'].to(device).float(), data['category'].to(device) optimizer.zero_grad() outputs, m3x3, m64x64 = pointnet(inputs.transpose(1,2)) loss = pointnetloss(outputs, labels, m3x3, m64x64) loss.backward() optimizer.step() # print statistics running_loss += loss.item() if i % 10 == 9: # print every 10 mini-batches print('[Epoch: %d, Batch: %4d / %4d], loss: %.3f' % (epoch + 1, i + 1, len(train_loader), running_loss / 10)) running_loss = 0.0 pointnet.eval() correct = total = 0 # validation if val_loader: with torch.no_grad(): for data in val_loader: inputs, labels = data['pointcloud'].to(device).float(), data['category'].to(device) outputs, __, __ = pointnet(inputs.transpose(1,2)) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() val_acc = 100. * correct / total print('Valid accuracy: %d %%' % val_acc) # save the model if save: torch.save(pointnet.state_dict(), "save_"+str(epoch)".pth") # + id="Lp3uFKomP8AU" colab_type="code" outputId="90103bea-994f-49c1-aa4e-fedec9ab5d6a" colab={"base_uri": "https://localhost:8080/", "height": 1000} train(pointnet, train_loader, valid_loader, save=False) # + [markdown] id="_8W4gOI_P9a9" colab_type="text" # ## Test # + id="_iDtAJoYH4hE" colab_type="code" colab={} from sklearn.metrics import confusion_matrix # + id="pU70YWA7P-I_" colab_type="code" colab={} pointnet = PointNet() pointnet.load_state_dict(torch.load('save.pth')) pointnet.eval(); # + id="54EP7PAyC2iQ" colab_type="code" outputId="28667e52-75d5-4d79-8e01-aeb39e74cca3" colab={"base_uri": "https://localhost:8080/", "height": 272} all_preds = [] all_labels = [] with torch.no_grad(): for i, data in enumerate(valid_loader): print('Batch [%4d / %4d]' % (i+1, len(valid_loader))) inputs, labels = data['pointcloud'].float(), data['category'] outputs, __, __ = pointnet(inputs.transpose(1,2)) _, preds = torch.max(outputs.data, 1) all_preds += list(preds.numpy()) all_labels += list(labels.numpy()) # + id="NWNts-GKELNk" colab_type="code" outputId="07e86307-aa15-4924-dd2d-70a4fdafc820" colab={"base_uri": "https://localhost:8080/", "height": 187} cm = confusion_matrix(all_labels, all_preds); cm # + id="VcS7dXw5Rkae" colab_type="code" colab={} import itertools import numpy as np import matplotlib.pyplot as plt # function from https://deeplizard.com/learn/video/0LhiS6yu2qQ def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') # + id="Vg-hPQ8ERpj7" colab_type="code" outputId="a8c18903-b9ed-40ff-8e30-d56a32dfcc53" colab={"base_uri": "https://localhost:8080/", "height": 591} plt.figure(figsize=(8,8)) plot_confusion_matrix(cm, list(classes.keys()), normalize=True) # + id="BwRxAddUVxHT" colab_type="code" outputId="9e9ae444-0a08-48d8-a72d-7239ec26c01e" colab={"base_uri": "https://localhost:8080/", "height": 591} plt.figure(figsize=(8,8)) plot_confusion_matrix(cm, list(classes.keys()), normalize=False)
nbs/PointNetClass.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib notebook import matplotlib.pyplot as plt import pandas as pd import numpy as np data = pd.read_csv('data/gapminder_gdp_oceania.csv') data.head() data.info() data.columns data.columns[2] data.columns[1:5] data.describe() # - Load European data # - look at first five rows # - last five # - find column names # - summary statistics # - `data.info` gives information about dataframe # - `data.columns` shows column names # - `data.head()` shows first rows # - `data.tail()` shows last rows # - `data.describe()` shows summary stats data.info data.info() europe = pd.read_csv('data/gapminder_gdp_europe.csv', index_col="country") europe.head() europe.iloc[0, 1] europe.loc["Albania", "gdpPercap_1952"] europe.loc["Albania", :] europe.loc[:, "gdpPercap_1952"] europe.loc[:, "gdpPercap_1952"].mean() europe.loc[:, "gdpPercap_1952"].describe() europe.loc['Italy':'Poland', 'gdpPercap_1962': 'gdpPercap_1972'] subset = europe.loc['Italy':'Poland', 'gdpPercap_1962': 'gdpPercap_1972'] print('The subset of data is\n', subset) print('The subset of data is\n', subset > 10000) mask = subset > 10000 print(subset[mask]) gapminder_all = pd.read_csv('data/gapminder_all.csv') gapminder_all.head() # ### Ask Three Questions about dataframe and answer with slice and dice
python/day_ii_session_I.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # things we need for NLP import nltk from nltk.stem.lancaster import LancasterStemmer stemmer = LancasterStemmer() # things we need for Tensorflow import numpy as np import tflearn import tensorflow as tf import random # - # # We’ll build a simple state-machine to handle responses, using our intents model (from the previous step) as our classifier. [That’s how chatbots work](https://medium.freecodecamp.com/how-chat-bots-work-dfff656a35e2). # # > A contextual chatbot framework is a classifier within a state-machine. # # - we’ll un-pickle our model and documents as well as reload our intents file # - Remember our chatbot framework is separate from our model build — you don’t need to rebuild your model unless the intent patterns change # - With several hundred intents and thousands of patterns the model could take several minutes to build # + # restore all of our data structures import pickle data = pickle.load(open("training_data", "rb")) words = data['words'] classes = data['classes'] train_x = data['train_x'] train_y = data['train_y'] # import our chat-bot intents file import json with open('Mop intents.json') as json_data: intents = json.load(json_data) # - """""input = keras.input(shape=[len(intent_train_x[0])]) intent_net = input intent_net = layers.Dense(8)(intent_net) intent_net = layers.Dense(8)(intent_net) intent_net = layers.Dense(len(intent_train_y[0], activation='softmax')(intent_net) intent_model = keras.Model(inputs=[input], outputs=[output]) intent_model.load('model1') """"" # + # Build neural network net = tflearn.input_data(shape=[None, len(train_x[0])]) net = tflearn.fully_connected(net, 8) net = tflearn.fully_connected(net, 8) net = tflearn.fully_connected(net, len(train_y[0]), activation='softmax') net = tflearn.regression(net) # Define model and setup tensorboard model = tflearn.DNN(net, tensorboard_dir='tflearn_logs') # - # Util functions # + def clean_up_sentence(sentence): # tokenize the pattern sentence_words = nltk.word_tokenize(sentence) # stem each word sentence_words = [stemmer.stem(word.lower()) for word in sentence_words] return sentence_words # return bag of words array: 0 or 1 for each word in the bag that exists in the sentence def bow(sentence, words, show_details=False): # tokenize the pattern sentence_words = clean_up_sentence(sentence) # bag of words bag = [0]*len(words) for s in sentence_words: for i, w in enumerate(words): if w == s: bag[i] = 1 if show_details: print("found in bag: %s" % w) return(np.array(bag)) # - p1 = bow("what is mopedo?", words) print (p1) print (classes) # load our saved model model.load('./model.tflearn') # # # # # # # Checking that our model gives the same result as in the previous step # + def get_predicted_intent(predictions): return classes[np.argmax(predictions)] assert 'Mopedo' == get_predicted_intent(model.predict([p1])) # - # ## Response processor # + # data structure to hold user context context = {} ERROR_THRESHOLD = 0.25 def classify(sentence): # generate probabilities from the model results = model.predict([bow(sentence, words)])[0] # filter out predictions below a threshold results = [[i, r] for i, r in enumerate(results) if r > ERROR_THRESHOLD] # sort by strength of probability results.sort(key=lambda x: x[1], reverse=True) return_list = [] for r in results: return_list.append((classes[r[0]], r[1])) # return tuple of intent and probability return return_list def response(sentence, userID='123', show_details=False): results = classify(sentence) if results: for i in intents['intents']: if i['tag'] == results[0][0]: result = (random.choice(i['responses'])) return result else: return ("info@") # - classify("Mopedo") response('Mopedo') response('what services does mopedo provide?') classify("safety and maintenance") response('How safe is mopedo?') response('payments?') response('can I book ride for others?') response('Mopedo city boundaries?') response("how are they charged?") response("Any technical help") response(" help") response("..") response("ha") response("puja") response(".")
Chatbot2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.style.use('fivethirtyeight') # + w19 = pd.read_csv('Data/Processed/weekend_geo_19.csv', dtype={'WEEK':'object','DAY_WEEK':'object','ZIP':'object'}) w20 = pd.read_csv('Data/Processed/weekend_geo_20.csv',dtype={'WEEK':'object','DAY_WEEK':'object','ZIP':'object'}) w21 = pd.read_csv('Data/Processed/weekend_geo_21.csv',dtype={'WEEK':'object','DAY_WEEK':'object','ZIP':'object'}) w19 = w19[w19.BOROUGH.isin(['Manhattan','Brooklyn'])] w20 = w20[w20.BOROUGH.isin(['Manhattan','Brooklyn'])] w21 = w21[w21.BOROUGH.isin(['Manhattan','Brooklyn'])] # - w19 = w19.groupby(['STATION','WEEK','DAY_WEEK','ZIP','BOROUGH']).ENTRIES.sum().reset_index() w20 = w20.groupby(['STATION','WEEK','DAY_WEEK','ZIP']).ENTRIES.sum().reset_index() w21 = w21.groupby(['STATION','WEEK','DAY_WEEK','ZIP']).ENTRIES.sum().reset_index() weekend = w19.merge(w20, on=['STATION','WEEK','DAY_WEEK','ZIP'], suffixes=(['_19','_20'])) weekend = weekend.merge(w21, on= ['STATION','WEEK','DAY_WEEK','ZIP']) weekend.head() #weekend['ENTRIES'] = weekend.ENTRIES.shift(1) boozy_zips = ['10019','10003','10036','11211','11201','11215'] # ### weekend to weekend weekly_sum = weekend.groupby(['WEEK']).sum().reset_index() daily_sum = weekend.groupby(['DAY_WEEK']).sum().reset_index() boozy_weekly_sum = weekend[weekend.ZIP.isin(boozy_zips)].groupby('WEEK').sum().reset_index() boozy_weekly_sum = (boozy_weekly_sum.rename(columns={'ENTRIES_19':'2019', 'ENTRIES_20':'2020', 'ENTRIES':'2021'})) weekly_melted = (boozy_weekly_sum.melt(id_vars=['WEEK'], var_name='Year' ,value_name='WEEKEND_ENTRIES')) plt.figure(figsize=(10,15)) sns.relplot(data=weekly_melted, x="WEEK", y="WEEKEND_ENTRIES", hue="Year",kind="line", palette='Set2', legend=True) plt.title('Weekend Late-night Traffic') plt.ylabel('Total Entries (10M)') # + weekly_sum = (weekly_sum.rename(columns={'ENTRIES_19':'2019', 'ENTRIES_20':'2020', 'ENTRIES':'2021'})) weekly_melted = (weekly_sum.melt(id_vars=['WEEK'], var_name='Year' ,value_name='WEEKEND_ENTRIES')) plt.figure(figsize=(10,15)) sns.relplot(data=weekly_melted, x="WEEK", y="WEEKEND_ENTRIES", hue="Year",kind="line", palette='Set2', legend=True) plt.title('Weekend Late-night Traffic in Nightlife ZIPs') plt.ylabel('Total Entries (M)') plt.axvspan(xmin='26', xmax='27', color='DeepSkyBlue', alpha=.2, label='4th July week') plt.ylim(bottom=0) plt.annotate('4th July Week', xy=('27',10000000), xytext=('29',8000000), arrowprops={}) plt.yticks([2000000,4000000,6000000,8000000,10000000,12000000],[2,4,6,8,10,12]) plt.savefig('Visualizations/comparison_years_boozy.png', bbox_inches='tight') #plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False) # the dip in weeks 26 or 27 is 4th July weekend. # - weekend.head(2) # ### Top Stations manh = (weekend.loc[:,['STATION','ZIP','BOROUGH','ENTRIES']] [weekend.ZIP.isin(['10019','10003','10036'])]) brook = (weekend[weekend.ZIP.isin(['11211','11201','11215'])]) # + manh_top = (manh.groupby('STATION')['ENTRIES'].sum().reset_index()) manh_top = manh_top.sort_values('ENTRIES', ascending=False).reset_index(drop=True) brook_top = (brook.groupby('STATION')['ENTRIES'].sum().reset_index()) brook_top = brook_top.sort_values('ENTRIES', ascending=False).reset_index(drop=True) # + plt.figure(figsize=(8,6)) plt.bar(x=manh_top.STATION, height=manh_top.ENTRIES) plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False) plt.suptitle("Night Traffic in Manhattan's Nighthlife ZIPs", size=20, c='steelblue') plt.title('(10019, 10003, 10036) 01/06/21 - 31/08/21', size=10) plt.xlabel('Station') plt.xticks(rotation=90) plt.ylabel('Total Late Night Traffic (Thousands)') plt.yticks(range(250000,1750001,250000),[250,500,750,1000,1250,1500,1750]) plt.savefig('Visualizations/manh_top.png',bbox_inches='tight') # - plt.figure(figsize=(10,6)) plt.bar(x=brook_top.STATION, height=brook_top.ENTRIES) #plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False) plt.suptitle("Night Traffic in Brooklyn's Nighthlife ZIPs", size=20, c='steelblue') plt.title('(11211, 1121, 11215) 01/06/21 - 31/08/21', size=10) plt.xlabel('Station') plt.xticks(rotation=90) plt.ylabel('Total Late Night Traffic (Thousands)') plt.yticks(range(100000,700001,100000),[100,200,300,400,500,600,700]) plt.savefig('Visualizations/brook_top.png',bbox_inches='tight') # ### Top stations through the years boozy_total.bar() # + boozy_total_st = (weekend[weekend.ZIP.isin(boozy_zips)].groupby('ZIP').sum().reset_index() .sort_values('ENTRIES').reset_index(drop=True)) boozy_total_st = (boozy_total_st.rename(columns={'ENTRIES_19':'2019', 'ENTRIES_20':'2020', 'ENTRIES':'2021'})) boozy_total_st_melted = (boozy_total_st.melt(id_vars=['ZIP'], var_name='Year' ,value_name='WEEKEND_ENTRIES')) plt.figure(figsize=(9,5)) (sns.catplot(data=boozy_total_st_melted, x='ZIP', y='WEEKEND_ENTRIES', kind='bar', ci=None, hue='Year', palette='Set2', legend=True)) plt.title('Total Weekend Late-Night Entries by ZIP') plt.yticks(range(0,5000001,1000000),range(0,6)) plt.ylabel('Total Entries (M)') plt.savefig('Visualizations/total_entries_by_zip.png',bbox_inches='tight') # - boozy_total = weekend[weekend.ZIP.isin(boozy_zips)].sum().reset_index() boozy_total.sort_values('ENTRIES').reset_index(drop=True) boozy_total = (boozy_total.rename(columns={'ENTRIES_19':'2019', 'ENTRIES_20':'2020', 'ENTRIES':'2021'})) boozy_total_st_melted = (boozy_total_st.melt(id_vars=['ZIP'], var_name='Year' ,value_name='WEEKEND_ENTRIES')) #boozy_total_melted = (boozy_total.melt(id_vars=['ZIP'], var_name='Year' # ,value_name='WEEKEND_ENTRIES')) boozy_total = weekend[weekend.ZIP.isin(boozy_zips)] boozy_total # ### Day to day manh_top = (manh.groupby('STATION').sum().reset_index() .rename(columns={'ENTRIES_19':'2019','ENTRIES_20':'2020','ENTRIES':'2021'})) manh_melted = (manh_top.melt(id_vars=['STATION'], var_name='Year', value_name='TOTAL_ENTRIES')) sns.catplot(data=top_manh, x='STATION', y='ENTRIES_19', kind='bar') # + daily_sum = (daily_sum.rename(columns={'ENTRIES_19':'2019', 'ENTRIES_20':'2020', 'ENTRIES':'2021'})) daily_melted = (daily_sum.melt(id_vars=['DAY_WEEK'], var_name='Year' ,value_name='WEEKEND_ENTRIES')) sns.relplot(data=daily_melted, x="DAY_WEEK", y="WEEKEND_ENTRIES", hue="Year",kind="line", palette='Set2', legend=True) #sns.catplot(data=daily_sum, x='DAY_WEEK', y=['ENTRIES_19','ENTRIES_20','ENTRIES'], king='bar', palette='Set2') plt.title('Weekend Late-night Traffic') plt.ylabel('Total Entries') plt.ylim(bottom=0) plt.show() # the dip in weeks 26 or 27 is 4th July weekend. # - # ### Day of Week
MTA Graphs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Reference # - [Deep Learning with Keras](https://github.com/PacktPublishing/Deep-Learning-with-Keras) # - [直感 Deep Learning](https://github.com/oreilly-japan/deep-learning-with-keras-ja) from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow.keras.datasets import mnist from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Activation, Dropout from tensorflow.keras.optimizers import Adam NB_EPOCH = 20 BATCH_SIZE = 128 VERBOSE = 1 NB_CLASSES = 10 OPTIMIZER = Adam() N_HIDDEN = 128 VALIDATION_SPLIT = 0.2 DROPOUT = 0.3 # data: (X_train, y_train), (X_test, y_test) = mnist.load_data() # 28 × 28 = 784 RESHAPED = 784 X_train = X_train.reshape(60000, RESHAPED) X_test = X_test.reshape(10000, RESHAPED) X_train = X_train.astype('float32') X_test = X_test.astype('float32') # normalize X_train /= 255 X_test /= 255 print(X_train.shape[0], 'train samples') print(X_test.shape[0], 'test samples') np.random.seed(1671) # for reproducibility # convert class vectors to binary class metrics Y_train = tf.keras.utils.to_categorical(y_train, NB_CLASSES) Y_test = tf.keras.utils.to_categorical(y_test, NB_CLASSES) # Add hidden layers model = Sequential() model.add(Dense(N_HIDDEN, input_shape=(RESHAPED,))) model.add(Activation('relu')) model.add(Dropout(DROPOUT)) model.add(Dense(N_HIDDEN)) model.add(Activation('relu')) model.add(Dropout(DROPOUT)) model.add(Dense(NB_CLASSES)) model.add(Activation('softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer=OPTIMIZER, metrics=['accuracy']) model.fit(X_train, Y_train, batch_size=BATCH_SIZE, epochs=NB_EPOCH, verbose=VERBOSE, validation_split=VALIDATION_SPLIT) score = model.evaluate(X_test, Y_test, verbose=VERBOSE) print("\nTest score:", score[0]) print('Test accuracy:', score[1])
MLP/MINST_v5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="uFl5VYXPIY7D" # ## Breast Cancer Diagnosis via Linear Regression # + [markdown] colab_type="text" id="MkFIjttvIhDs" # We will use the widely-used breast cancer data set. This data set is described [here](https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin). # # # # Each sample is a collection of features that were manually recorded by a physician upon inspecting a sample of cells from fine needle aspiration. # # **The goal is to detect if the cells are benign or malignant.** # + [markdown] colab_type="text" id="9TNtqa-3I1K7" # #### Loading and Visualizing the Data # # We first load the packages as usual. # + colab={} colab_type="code" id="x2uPPzaUIP_b" import numpy as np import matplotlib import matplotlib.pyplot as plt import pandas as pd from sklearn import datasets, linear_model, preprocessing # %matplotlib inline # + [markdown] colab_type="text" id="VWEojdFDI8AK" # Next, we load the data. It is important to remove the missing values. # + colab={"base_uri": "https://localhost:8080/", "height": 238} colab_type="code" id="YCBZS-xbI9gt" outputId="f215f6c3-0272-4ec1-9415-0e5c9702c23d" names = ['id','thick','size','shape','marg','cell_size','bare', 'chrom','normal','mit','class'] df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/' + 'breast-cancer-wisconsin/breast-cancer-wisconsin.data', names=names,na_values='?',header=None) df = df.dropna() df.head(6) # + [markdown] colab_type="text" id="OYHiBO1UJEag" # After loading the data, we can create a scatter plot of the data labeling the class values with different colors. We will pick two of the features. # + colab={"base_uri": "https://localhost:8080/", "height": 305} colab_type="code" id="DnS-dnPIJGkc" outputId="14d1d1a5-d29b-4a21-e300-7cb710bdef8e" # Converting to a zero-one indicator. yraw = np.array(df['class']) BEN_VAL = 2 # value in the 'class' label for benign samples MAL_VAL = 4 # value in the 'class' label for malignant samples y = (yraw == MAL_VAL).astype(int) Iben = (y==0) Imal = (y==1) # Get two predictors xnames =['size','marg'] X = np.array(df[xnames]) # Create the scatter plot plt.plot(X[Imal,0],X[Imal,1],'r.') plt.plot(X[Iben,0],X[Iben,1],'g.') plt.xlabel(xnames[0], fontsize=16) plt.ylabel(xnames[1], fontsize=16) plt.ylim(0,14) plt.legend(['malign','benign'],loc='upper right') # + [markdown] colab_type="text" id="89PPJ0mNJIdg" # The above plot is not informative, since many of the points are on top of one another. Thus, we cannot see the relative frequency of points. # # One way to improve the plot is to draw circles on each point whose size is proportional to the count of samples at that point. We will re-use this code, so we define a function. # + colab={"base_uri": "https://localhost:8080/", "height": 305} colab_type="code" id="2D1BF4bRJKro" outputId="10d68b61-cb61-4cfa-e2a9-9752b71148dd" def plot_count(X,y): # Compute the bin edges for the 2d histogram x0val = np.array(list(set(X[:,0]))).astype(float) x1val = np.array(list(set(X[:,1]))).astype(float) x0, x1 = np.meshgrid(x0val,x1val) x0e= np.hstack((x0val,np.max(x0val)+1)) x1e= np.hstack((x1val,np.max(x1val)+1)) # Make a plot for each class yval = list(set(y)) color = ['g','r'] for i in range(len(yval)): I = np.where(y==yval[i])[0] count, x0e, x1e = np.histogram2d(X[I,0],X[I,1],[x0e,x1e]) x0, x1 = np.meshgrid(x0val,x1val) plt.scatter(x0.ravel(), x1.ravel(), s=2*count.ravel(),alpha=0.5, c=color[i],edgecolors='none') plt.ylim([0,14]) plt.legend(['benign','malign'], loc='upper right') plt.xlabel(xnames[0], fontsize=16) plt.ylabel(xnames[1], fontsize=16) return plt plot_count(X,y) # + [markdown] colab_type="text" id="aROvueqkJLdM" # # Exercise # # 1) Based on the above plot, what would you think a good "classifer" using the two features could be? That is, write a rule that can classify the benign region from the malignant region. # # 2) Define a metric function that computes TP, FP, TN, FN, the accuracy, the sensitivity and the precision. # # 3) Try to improve your classifier given those metrics # # 4) Use sklearn logistic regression and check each metric. # + colab={} colab_type="code" id="OhtSZi3LJNPk" def classifier(X): # To-do size = X[:, 0] marg = X[:, 1] y = (size + marg) >= 6 return y y_pred = classifier(X) # + def metrics(y, y_hat): # To-do TP = np.sum((y + y_hat) == 2) # y = 1, yhat = 1 TN = np.sum((y + y_hat) == 0) # y = 0, yhat = 0 FP = np.sum((y - y_hat) == -1) # y = 0, yhat = 1 FN = np.sum((y - y_hat) == 1) # y = 1, yhat = 0 return TP, TN, FP, FN TP, TN, FP, FN = metrics(y, y_pred) print("TP: {}, TN: {}, FP: {}, FN: {}".format(TP, TN, FP, FN)) print("Accuracy is {}, Sensitivity is {}, Precision is {}".format( (TP+TN)/(TP+TN+FP+FN), (TP)/(TP+FN), (TP)/(TP+FP) )) # - from sklearn.linear_model import LogisticRegression # To-do # + # Evaluate sklearn's model with your metric function # To-do
day04/Breast_Cancer_Demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Boosting to Uniformity # # In physical applications frequently we need to achieve uniformity of predictions along some features. # For instance, when testing the existence of new particle, we need classifier to be uniform in background along the mass (otherwise one can get false discovery due to peaking background). # # This notebook contains some comparison of classifiers. The target is to obtain flat effiency in __signal__ (without significally loosing quality of classification) in Dalitz features. # # The classifiers compared are # # * plain __GradientBoosting__ # * __uBoost__ # * gradient boosting with knn-Ada loss (__UGB+knnAda__) # * gradient boosting with FlatnessLoss (__UGB+FlatnessLoss__) # # We use dataset from paper about `uBoost` for demonstration purposes. # We have plenty of data here, so results are quite stable # downloading data # !wget -O ../data/dalitzdata.root -nc https://github.com/arogozhnikov/hep_ml/blob/data/data_to_download/dalitzdata.root?raw=true # %pylab inline # + import pandas, numpy from sklearn.cross_validation import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import GradientBoostingClassifier # this wrapper makes it possible to train on subset of features from rep.estimators import SklearnClassifier from hep_ml.commonutils import train_test_split from hep_ml import uboost, gradientboosting as ugb, losses # - # ###Loading data import root_numpy used_columns = ["Y1", "Y2", "Y3", "M2AB", "M2AC"] data = pandas.DataFrame(root_numpy.root2array('../data/dalitzdata.root', treename='tree')) labels = data['labels'] data = data.drop('labels', axis=1) # ## Distribution of events in different files in the Dalitz features # As we can see, the background is distributed mostly in the corners of Dalitz plot, <br /> # and for traditional classifiers this results in poor effieciency of signal in the corners. # + def plot_distribution(data_frame, var_name1='M2AB', var_name2='M2AC', bins=40): """The function to plot 2D distribution histograms""" pylab.hist2d(data_frame[var_name1], data_frame[var_name2], bins = 40, cmap=cm.Blues) pylab.xlabel(var_name1) pylab.ylabel(var_name2) pylab.colorbar() pylab.figure(figsize=(12, 6)) subplot(1, 2, 1), pylab.title("signal"), plot_distribution(data[labels==1]) subplot(1, 2, 2), pylab.title("background"), plot_distribution(data[labels==0]) pass # - # ## Preparation of train/test datasets trainX, testX, trainY, testY = train_test_split(data, labels, random_state=42) # ## Setting up classifiers, training uniform_features = ["M2AB", "M2AC"] train_features = ["Y1", "Y2", "Y3"] n_estimators = 150 base_estimator = DecisionTreeClassifier(max_depth=4) # __uBoost__ training takes much time, so we reduce number of efficiency_steps, use prediction smoothing and run uBoost in threads # + from rep.metaml import ClassifiersFactory classifiers = ClassifiersFactory() base_ada = GradientBoostingClassifier(max_depth=4, n_estimators=n_estimators, learning_rate=0.1) classifiers['AdaBoost'] = SklearnClassifier(base_ada, features=train_features) knnloss = ugb.KnnAdaLossFunction(uniform_features, knn=10, uniform_label=1) ugbKnn = ugb.UGradientBoostingClassifier(loss=knnloss, max_depth=4, n_estimators=n_estimators, learning_rate=0.4, train_features=train_features) classifiers['uGB+knnAda'] = SklearnClassifier(ugbKnn) uboost_clf = uboost.uBoostClassifier(uniform_features=uniform_features, uniform_label=1, base_estimator=base_estimator, n_estimators=n_estimators, train_features=train_features, efficiency_steps=12, n_threads=4) classifiers['uBoost'] = SklearnClassifier(uboost_clf) flatnessloss = ugb.KnnFlatnessLossFunction(uniform_features, fl_coefficient=3., power=1.3, uniform_label=1) ugbFL = ugb.UGradientBoostingClassifier(loss=flatnessloss, max_depth=4, n_estimators=n_estimators, learning_rate=0.1, train_features=train_features) classifiers['uGB+FL'] = SklearnClassifier(ugbFL) classifiers.fit(trainX, trainY, parallel_profile='threads-4') pass # - # ## Lets look at the results of training # dependence of quality on the number of trees built (ROC AUC - area under the ROC curve, the more the better) # + from rep.report.metrics import RocAuc report = classifiers.test_on(testX, testY) ylim(0.88, 0.94) report.learning_curve(RocAuc(), steps=1) # - # ## SDE (squared deviation of efficiency) learning curve # SDE vs the number of built trees. SDE is memtric of nonuniformity - less is better. from hep_ml.metrics import BinBasedSDE, KnnBasedCvM report.learning_curve(BinBasedSDE(uniform_features, uniform_label=1)) # ##CvM learning curve # CvM is metric of non-uniformity based on Cramer-von Mises distance. We are using Knn version now. report.learning_curve(KnnBasedCvM(uniform_features, uniform_label=1)) # # ROC curves after training report.roc().plot(new_plot=True, figsize=[10, 9]) # ## Signal efficiency # global cut corresponds to average signal efficiency=0.5. In ideal case the picture shall be white. report.efficiencies_2d(uniform_features, efficiency=0.5, signal_label=1, n_bins=15, labels_dict={1: 'signal'}) # the same for global efficiency = 0.7 report.efficiencies_2d(uniform_features, efficiency=0.7, signal_label=1, n_bins=15, labels_dict={1: 'signal'})
notebooks/BoostingToUniformity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Monte Carlo Methods # # In this notebook, you will write your own implementations of many Monte Carlo (MC) algorithms. # # While we have provided some starter code, you are welcome to erase these hints and write your code from scratch. # # ### Part 0: Explore BlackjackEnv # # We begin by importing the necessary packages. # + import sys import gym import numpy as np from collections import defaultdict from plot_utils import plot_blackjack_values, plot_policy # - # Use the code cell below to create an instance of the [Blackjack](https://github.com/openai/gym/blob/master/gym/envs/toy_text/blackjack.py) environment. env = gym.make('Blackjack-v0') # Each state is a 3-tuple of: # - the player's current sum $\in \{0, 1, \ldots, 31\}$, # - the dealer's face up card $\in \{1, \ldots, 10\}$, and # - whether or not the player has a usable ace (`no` $=0$, `yes` $=1$). # # The agent has two potential actions: # # ``` # STICK = 0 # HIT = 1 # ``` # Verify this by running the code cell below. print(env.observation_space) print(env.action_space) # Execute the code cell below to play Blackjack with a random policy. # # (_The code currently plays Blackjack three times - feel free to change this number, or to run the cell multiple times. The cell is designed for you to get some experience with the output that is returned as the agent interacts with the environment._) for i_episode in range(3): state = env.reset() while True: print(state) action = env.action_space.sample() state, reward, done, info = env.step(action) if done: print('End game! Reward: ', reward) print('You won :)\n') if reward > 0 else print('You lost :(\n') break # ### Part 1: MC Prediction # # In this section, you will write your own implementation of MC prediction (for estimating the action-value function). # # We will begin by investigating a policy where the player _almost_ always sticks if the sum of her cards exceeds 18. In particular, she selects action `STICK` with 80% probability if the sum is greater than 18; and, if the sum is 18 or below, she selects action `HIT` with 80% probability. The function `generate_episode_from_limit_stochastic` samples an episode using this policy. # # The function accepts as **input**: # - `bj_env`: This is an instance of OpenAI Gym's Blackjack environment. # # It returns as **output**: # - `episode`: This is a list of (state, action, reward) tuples (of tuples) and corresponds to $(S_0, A_0, R_1, \ldots, S_{T-1}, A_{T-1}, R_{T})$, where $T$ is the final time step. In particular, `episode[i]` returns $(S_i, A_i, R_{i+1})$, and `episode[i][0]`, `episode[i][1]`, and `episode[i][2]` return $S_i$, $A_i$, and $R_{i+1}$, respectively. def generate_episode_from_limit_stochastic(bj_env): episode = [] state = bj_env.reset() while True: probs = [0.8, 0.2] if state[0] > 18 else [0.2, 0.8] action = np.random.choice(np.arange(2), p=probs) next_state, reward, done, info = bj_env.step(action) episode.append((state, action, reward)) state = next_state if done: break return episode # Execute the code cell below to play Blackjack with the policy. # # (*The code currently plays Blackjack three times - feel free to change this number, or to run the cell multiple times. The cell is designed for you to gain some familiarity with the output of the `generate_episode_from_limit_stochastic` function.*) for i in range(3): print(generate_episode_from_limit_stochastic(env)) # Now, you are ready to write your own implementation of MC prediction. Feel free to implement either first-visit or every-visit MC prediction; in the case of the Blackjack environment, the techniques are equivalent. # # Your algorithm has three arguments: # - `env`: This is an instance of an OpenAI Gym environment. # - `num_episodes`: This is the number of episodes that are generated through agent-environment interaction. # - `generate_episode`: This is a function that returns an episode of interaction. # - `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`). # # The algorithm returns as output: # - `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`. def mc_prediction_q(env, num_episodes, generate_episode, gamma=1.0): # initialize empty dictionaries of arrays returns_sum = defaultdict(lambda: np.zeros(env.action_space.n)) N = defaultdict(lambda: np.zeros(env.action_space.n)) Q = defaultdict(lambda: np.zeros(env.action_space.n)) # loop over episodes for i_episode in range(1, num_episodes+1): # monitor progress if i_episode % 1000 == 0: print("\rEpisode {}/{}.".format(i_episode, num_episodes), end="") sys.stdout.flush() ## TODO: complete the function return Q # Use the cell below to obtain the action-value function estimate $Q$. We have also plotted the corresponding state-value function. # # To check the accuracy of your implementation, compare the plot below to the corresponding plot in the solutions notebook **Monte_Carlo_Solution.ipynb**. # + # obtain the action-value function Q = mc_prediction_q(env, 500000, generate_episode_from_limit_stochastic) # obtain the corresponding state-value function V_to_plot = dict((k,(k[0]>18)*(np.dot([0.8, 0.2],v)) + (k[0]<=18)*(np.dot([0.2, 0.8],v))) \ for k, v in Q.items()) # plot the state-value function plot_blackjack_values(V_to_plot) # - # ### Part 2: MC Control # # In this section, you will write your own implementation of constant-$\alpha$ MC control. # # Your algorithm has four arguments: # - `env`: This is an instance of an OpenAI Gym environment. # - `num_episodes`: This is the number of episodes that are generated through agent-environment interaction. # - `alpha`: This is the step-size parameter for the update step. # - `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`). # # The algorithm returns as output: # - `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`. # - `policy`: This is a dictionary where `policy[s]` returns the action that the agent chooses after observing state `s`. # # (_Feel free to define additional functions to help you to organize your code._) def mc_control(env, num_episodes, alpha, gamma=1.0): nA = env.action_space.n # initialize empty dictionary of arrays Q = defaultdict(lambda: np.zeros(nA)) # loop over episodes for i_episode in range(1, num_episodes+1): # monitor progress if i_episode % 1000 == 0: print("\rEpisode {}/{}.".format(i_episode, num_episodes), end="") sys.stdout.flush() ## TODO: complete the function return policy, Q # Use the cell below to obtain the estimated optimal policy and action-value function. Note that you should fill in your own values for the `num_episodes` and `alpha` parameters. # obtain the estimated optimal policy and action-value function policy, Q = mc_control(env, ?, ?) # Next, we plot the corresponding state-value function. # + # obtain the corresponding state-value function V = dict((k,np.max(v)) for k, v in Q.items()) # plot the state-value function plot_blackjack_values(V) # - # Finally, we visualize the policy that is estimated to be optimal. # plot the policy plot_policy(policy) # The **true** optimal policy $\pi_*$ can be found in Figure 5.2 of the [textbook](http://go.udacity.com/rl-textbook) (and appears below). Compare your final estimate to the optimal policy - how close are you able to get? If you are not happy with the performance of your algorithm, take the time to tweak the decay rate of $\epsilon$, change the value of $\alpha$, and/or run the algorithm for more episodes to attain better results. # # ![True Optimal Policy](images/optimal.png)
monte-carlo/Monte_Carlo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import pandas as pd from IPython.display import display pd.set_option('display.max_rows', 100) data_path_dict = { 'optum': os.path.join( '/local-scratch/nigam/secure/optum/spfohl/zipcode_cvd/optum/dod', 'experiments', 'ascvd_10yr_optum_dod_selected', ) } db_key = 'optum' data_path = data_path_dict[db_key] result_files = list(filter(lambda x: 'result_df_ci_' in x, os.listdir(data_path))) result_files result_df_dict = { key: pd.read_csv(os.path.join(data_path, key)) for key in result_files } for key in result_df_dict.keys(): result_df_dict[key] = ( result_df_dict[key] .query('tag != "erm_baseline"') if 'erm_tuning' not in key else result_df_dict[key] ) result_df_dict[key] = ( result_df_dict[key] .query('metric.str.contains("net_benefit_rr")') if 'net_benefit_rr' in key else result_df_dict[key] ) result = pd.concat(result_df_dict).reset_index(drop=True) result_df_name = 'result_df_ci.csv' result.to_csv(os.path.join(data_path, result_df_name), index=False) result for eval_attribute, the_df in result.groupby('eval_attribute'): print(eval_attribute) display(the_df.query('metric == "auc"')) for eval_attribute, the_df in result.groupby('eval_attribute'): print(eval_attribute) display(the_df.query('metric == "ace_abs_logistic_logit"')) for eval_attribute, the_df in result.groupby('eval_attribute'): print(eval_attribute) display(the_df.query('metric == "loss_bce"')) for eval_attribute, the_df in result.groupby('eval_attribute'): print(eval_attribute) display(the_df.query('metric == "net_benefit_rr_0.075"')) for eval_attribute, the_df in result.groupby('eval_attribute'): print(eval_attribute) display(the_df.query('metric == "net_benefit_rr_recalib_0.075"'))
notebooks/combine_boostrap_results.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # + hidden="all" school_cell_uuid="b1517c11f0414d42aad69c058bbf0b64" # %config InlineBackend.figure_format = 'png' # + [markdown] school_cell_uuid="ee014ddb5c2c4270b561568bf8c93dab" # # 데이터 분석의 소개 # + [markdown] school_cell_uuid="4ce155fc1e9a4b419269ad3be6d996eb" # 데이터 분석이란 용어는 상당히 광범위한 용어이므로 여기에서는 통계적 분석과 머신 러닝이라는 두가지 세부 영역에 국한하여 데이터 분석을 설명하도록 한다. # + [markdown] school_cell_uuid="369a272eed284107a30d6c049011224d" # ## 데이터 분석이란 # + [markdown] school_cell_uuid="5ac737fb7d3e462981869a1941d6313b" # 데이터 분석이란 어떤 데이터가 주어졌을 때 # # * 데이터 간의 관계를 파악하거나 # * 파악된 관계를 사용하여 원하는 데이터를 만들어 내는 과정 # # 으로 볼 수 있다. # + [markdown] school_cell_uuid="1aa7236a7ac94fbeb93233533b25aab3" # ## 데이터 분석의 유형 # + [markdown] bootstrap={"panel": {"class": "panel-danger", "heading": ""}} school_cell_uuid="08d1bee952dd4ec988f8e5cc45e5c899" # * 예측(Prediction) # * 클러스터링(Clustering) # * 모사(Approximation) # + [markdown] school_cell_uuid="4e03feeb5d124e8faa0adc9342fb7cb8" # 데이터 분석의 유형은 다양하다. 그 중 널리 사용되는 전형적인 방법으로는 예측(prediction), 클러스터링(clustering), 모사(approximation) 등이 있다. # + [markdown] school_cell_uuid="0fe231bf47e542f0ae1e75714de13159" # 예측은 어떤 특정한 유형의 입력 데이터가 주어지면 데이터 분석의 결과로 다른 유형의 데이터가 출력될 수 있는 경우이다. 예를 들어 다음과 같은 작업은 예측이라고 할 수 있다. # # * 부동산의 위치, 주거환경, 건축연도 등이 주어지면 해당 부동산의 가치를 추정한다. # * 꽃잎의 길이와 너비 등 식물의 외형적 특징이 주어지면 해당하는 식물의 종을 알아낸다. # * 얼굴 사진이 주어지면 해당하는 사람의 이름을 출력한다. # * 현재 바둑돌의 위치들이 주어지면 다음 바둑돌의 위치를 지정한다. # # 데이터 분석에서 말하는 예측이라는 용어는 시간상으로 미래의 의미는 포함하지 않는다. 시계열 분석에서는 시간상으로 미래의 데이터를 예측하는 경우가 있는데 이 경우에는 forecasting 이라는 용어를 사용한다. # + [markdown] school_cell_uuid="bb677dc6a05b448b890f79ea0d5b9187" # 클러스터링은 동일한 유형의 데이터가 주어졌을 때 유사한 데이터끼리 몇개의 집합으로 묶는 작업을 말한다. 예를 들어 다음과 같은 작업은 클러스터링이다. # # * 지리적으로 근처에 있는 지점들을 찾아낸다. # * 유사한 단어를 포함하고 있는 문서의 집합을 만든다. # * 유사한 상품을 구해한 고객 리스트를 생성한다. # + [markdown] school_cell_uuid="2c5f7d817309408aa409cc059f3b34ba" # 모사는 대량의 데이터를 대표하는 소량의 데이터를 생성하는 작업이다. # # * 이미지나 음악 데이터를 압축한다. # * 주식 시장의 움직임을 대표하는 지수 정보를 생성한다. # + [markdown] school_cell_uuid="987118ef33a64e53973e35201be25f37" # ## 입력 데이터와 출력 데이터 # + [markdown] school_cell_uuid="601aef3db91643a99b1a261b28a2fd62" # 만약 예측을 하고자 한다면 데이터의 유형을 입력 데이터와 출력 데이터라는 두 가지 유형의 데이터로 분류할 수 있어야 한다. # + [markdown] bootstrap={"panel": {"class": "panel-danger", "heading": ""}} school_cell_uuid="28ef8af466e349698cd38943fa6541a3" # # * 입력 $X$ # * 분석의 기반이 되는 데이터 # * 독립변수 independent variable # * feature, covariates, regressor, explanatory, attributes, stimulus # # # * 출력 $Y$ # * 추정하거나 예측하고자 하는 데이터 # * 종속변수 dependent variable # * target, response, regressand, label, tag # # + [markdown] school_cell_uuid="10c076f8f07841eaa31bfa913f11d7c9" # 예측 작업에서 생성하고자 하는 데이터 유형을 출력 데이터라고 하고 이 출력 데이터를 생성하기 위해 사용되는 기반 데이터를 입력 데이터라고 한다. 회귀 분석에서는 독립 변수와 종속 변수라는 용어를 사용하며 머신 러닝에서는 일반적으로 feature와 target이라는 용어를 사용한다. # # 입력 데이터와 출력 데이터의 개념을 사용하여 예측 작업을 다시 설명하면 다음과 같다. # + [markdown] school_cell_uuid="a8e56e41aa504ed6939aa9618485106b" # * $X$와 $Y$의 관계 $f$를 파악 한다. # # $$Y = f(X)$$ # # # * 현실적으로는 정확한 $f$를 구할 수 없으므로 $f$와 가장 유사한, 재현 가능한 $\hat{f}$을 구한다. # # $$Y \approx \hat{f}(X)$$ # # # * $\hat{f}$를 가지고 있다면 $X$가 주어졌을 때 $Y$의 예측(추정) $\hat{Y} = \hat{f}(X)$를 구할 수 있다. # # # * 확률론적으로 $\hat{f}$는 # # $$ \hat{f}(X) = \arg\max_{Y} P(Y | X) $$ # # + [markdown] school_cell_uuid="a92871f46a354363a745cfc16a45928a" # 예측은 입력 데이터와 출력 데이터 사이의 관계를 분석하고 분석한 관계를 이용하여 출력 데이터가 아직 없거나 혹은 가지고 있는 출력 여러가지 이유로 부정확하다고 생각될 경우 보다 합리적인 출력값을 추정하는 것이다. 따라서 입력 데이터와 출력 데이터의 관계에 대한 분석이 완료된 이후에는 출력 데이터가 필요 없어도 일단 관계를 분석하기 위해서는 입력 데이터와 출력 데이터가 모두 존재해야 한다. # + [markdown] school_cell_uuid="3b24a47f4fc34b6cbd03895249ebb024" # ## 데이터의 유형 # + [markdown] school_cell_uuid="1434dc16622a4187a0c54b1193f183e4" # 예측 작업에서 생성하고자 하는 데이터 유형을 출력 데이터라고 하고 이 출력 데이터를 생성하기 위해 사용되는 기반 데이터를 입력 데이터라고 한다. # # 예측은 입력 데이터와 출력 데이터 사이의 관계를 분석하고 분석한 관계를 이용하여 출력 데이터가 아직 없거나 혹은 가지고 있는 출력 여러가지 이유로 부정확하다고 생각될 경우 보다 합리적인 출력값을 추정하는 것이다. 따라서 입력 데이터와 출력 데이터의 관계에 대한 분석이 완료된 이후에는 출력 데이터가 필요 없어도 일단 관계를 분석하기 위해서는 입력 데이터와 출력 데이터가 모두 존재해야 한다. # # 입력 데이터와 출력 데이터의 개념을 사용하여 예측 작업을 다시 설명하면 다음과 같다. # + [markdown] school_cell_uuid="957c9e761f5d4cfda5957670e93de44b" # 통계적 분석이나 머신 러닝 등의 데이터 분석에 사용되는 데이터의 유형은 다음 숫자 혹은 카테고리 값 중 하나이어야 한다. # + [markdown] bootstrap={"panel": {"class": "panel-danger", "heading": ""}} school_cell_uuid="8c6cca460c294150ac2c867b45ad950b" # * 숫자 (number) # * 크기/순서 비교 가능 # * 무한 집합 # # # * 카테고리값 (category) # * 크기/순서 비교 불가 # * 유한 집합 # * Class # * Binary Class # * Multi Class # + [markdown] school_cell_uuid="fea2684c87674930a72f9644a401ebbb" # 숫자와 카테고리 값의 차이점은 두 개의 데이터가 있을 때 이들의 크기나 혹은 순서를 비교할 수 있는가 없는가의 차이이다. 예를 들어 10kg과 23kg이라는 두 개의 무게는 23이 "크다"라고 크기를 비교하는 것이 가능하다. 그러나 "홍길동"과 "이순신"이라는 두 개의 카테고리 값은 크기를 비교할 수 없다. # # 일반적으로 카테고리 값은 가질 수 있는 경우의 수가 제한되어 있다. 이러한 경우의 수를 클래스(class)라고 부르는데 동전을 던진 결과와 같이 "앞면(head)" 혹은 "뒷면(tail)"처럼 두 가지 경우만 가능하면 이진 클래스(binary class)라고 한다. 주사위를 던져서 나온 숫자와 같이 세 개 이상의 경우가 가능하면 다중 클래스(multi class)라고 한다. # # 카테고리값처럼 비 연속적이지만 숫자처럼 비교 가능한 경우도 있을 수 있다. 예를 들어 학점이 "A", "B", "C", "D"와 같이 표시되는 경우는 비 연속적이고 기호로 표시되지만 크기 혹은 순서를 비교할 수 있다. 이러한 경우는 서수형(ordinal) 자료라고 하며 분석의 목표에 따라 숫자로 표기하기도 하고 일반적인 카테고리값으로 표기하기도 한다. # + [markdown] school_cell_uuid="caf4e2850afd411884e35fccd7279714" # ## 데이터의 변환 및 전처리 # + [markdown] school_cell_uuid="271cb6ac3075465a9813eda067cd7868" # 숫자가 아닌 이미지나 텍스트 정보는 분석에 목표에 따라 숫자나 카테고리 값으로 변환해야 한다. 이 때 해당하는 원본 정보를 손실 없이 그대로 숫자나 카테고리 값으로 바꿀 수도 있지만 대부분의 경우에는 분석에 필요한 핵심적인 정보만을 뽑아낸다. 이러한 과정은 데이터의 전처리(preprocessing)에 해당한다. # + school_cell_uuid="9f6ff108391642edbf8c82694013630e" from sklearn.datasets import load_digits digits = load_digits() plt.imshow(digits.images[0], interpolation='nearest'); plt.grid(False) # + school_cell_uuid="2058ce66412b49f882bd665bfc4a0181" digits.images[0] # + school_cell_uuid="d773d9b0111648b09fad94210ba22fa5" from sklearn.datasets import fetch_20newsgroups news = fetch_20newsgroups() print(news.data[0]) # + school_cell_uuid="ed82670bbc87401885b304c011f5c371" from sklearn.feature_extraction.text import TfidfVectorizer vec = TfidfVectorizer(stop_words="english").fit(news.data[:100]) data = vec.transform(news.data[:100]) data # + school_cell_uuid="c3051200071b4cbd8466a7523bc8c7bd" plt.imshow(data.toarray()[:,:200], interpolation='nearest'); # + [markdown] school_cell_uuid="7926945639c643d9ba1a3aa972a91d71" # 예측도 출력 데이터가 숫자인가 카테고리 값인가에 따라 회귀 분석(regression analysis)과 분류(classification)로 구분된다. # + [markdown] bootstrap={"panel": {"class": "panel-danger", "heading": ""}} school_cell_uuid="e3f950ef9c424c86a0eb12cf4239425a" # * 회귀분석(regression) # * 얻고자 하는 답 $Y$가 숫자 # * 분류 (classification) # * 얻고자 하는 답 $Y$가 카테고리 값 # # | | X=Real | X=Category | # | ------------- | --------------- | --------------- | # |**Y=Real** | Regression | ANOVA | # |**Y=Category** | Classification | Classification | # # + [markdown] school_cell_uuid="f72526f17bf74677b43d200517e986e8" # ## 회귀 분석 # + school_cell_uuid="707ebc4ea18644e98ceb1b5f1e4598e1" from sklearn.datasets import load_boston boston = load_boston() print(boston.DESCR) # + school_cell_uuid="01c04614abba43908c3ff2601669ee52" df = pd.DataFrame(boston.data, columns=boston.feature_names) df["MEDV"] = boston.target df.tail() # + school_cell_uuid="b164d01ef00146a1b537fb7656747001" sns.pairplot(df[["MEDV", "RM", "AGE", "DIS"]]); # + school_cell_uuid="bdea6a86ca734dd5bac5e3919e5785f4" from sklearn.linear_model import LinearRegression predicted = LinearRegression().fit(boston.data, boston.target).predict(boston.data) plt.scatter(boston.target, predicted, c='r', s=20); plt.xlabel("Target"); plt.ylabel("Predicted"); # + [markdown] school_cell_uuid="842d2505d7494132bd71fbdf2fe1627c" # ## 분류 # + [markdown] school_cell_uuid="fdd2b4e35be44e5f8f2a7dd588ed0bc9" # * Iris # # | setosa | versicolor | virginica | # |---|---|---| # |<img src="https://upload.wikimedia.org/wikipedia/commons/5/56/Kosaciec_szczecinkowaty_Iris_setosa.jpg" style="width: 10em; height: 10em" />|<img src="https://upload.wikimedia.org/wikipedia/commons/4/41/Iris_versicolor_3.jpg" style="width: 10em; height: 10em" />|<img src="https://upload.wikimedia.org/wikipedia/commons/9/9f/Iris_virginica.jpg" style="width: 10em; height: 10em" />| # # + school_cell_uuid="df5b61df4eb94249ad90c437a34496b3" from sklearn.datasets import load_iris iris = load_iris() df = pd.DataFrame(iris.data, columns=iris.feature_names) sy = pd.Series(iris.target, dtype="category") sy = sy.cat.rename_categories(iris.target_names) df['species'] = sy df.tail() # + school_cell_uuid="180c445231f8489e961116834f73e43f" sns.pairplot(df, hue="species"); # + school_cell_uuid="06db62d235744ab0af74efd3af937ac7" from sklearn.cross_validation import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC X = iris.data[:, [2,3]] y = iris.target X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) sc = StandardScaler() sc.fit(X_train) X_train_std = sc.transform(X_train) X_test_std = sc.transform(X_test) model = SVC(kernel="linear", C=1.0, random_state=0) model.fit(X_train_std, y_train) XX_min = X_train_std[:, 0].min() - 1; XX_max = X_train_std[:, 0].max() + 1; YY_min = X_train_std[:, 1].min() - 1; YY_max = X_train_std[:, 1].max() + 1; XX, YY = np.meshgrid(np.linspace(XX_min, XX_max, 1000), np.linspace(YY_min, YY_max, 1000)) ZZ = model.predict(np.c_[XX.ravel(), YY.ravel()]).reshape(XX.shape) cmap = mpl.colors.ListedColormap(sns.color_palette("Set2")) plt.contourf(XX, YY, ZZ, cmap=cmap) plt.scatter(X_train_std[y_train == 0, 0], X_train_std[y_train == 0, 1], c=cmap.colors[0], s=100) plt.scatter(X_train_std[y_train == 1, 0], X_train_std[y_train == 1, 1], c=cmap.colors[2], s=100) plt.scatter(X_train_std[y_train == 2, 0], X_train_std[y_train == 2, 1], c=cmap.colors[1], s=100) plt.xlim(XX_min, XX_max); plt.ylim(YY_min, YY_max); # + [markdown] school_cell_uuid="908dee2050c94e3ab48780eb28241cc1" # ## 클러스터링(Clustering) # + school_cell_uuid="7277b157f44e40a5800ec225b38063cd" from sklearn.cluster import DBSCAN from sklearn.datasets.samples_generator import make_blobs from sklearn.preprocessing import StandardScaler X, labels_true = make_blobs(n_samples=750, centers=[[1, 1], [-1, -1], [1, -1]], cluster_std=0.4, random_state=0) X = StandardScaler().fit_transform(X) db = DBSCAN(eps=0.3, min_samples=10).fit(X) n_clusters_ = len(set(db.labels_)) - (1 if -1 in db.labels_ else 0) unique_labels = set(db.labels_) f = plt.figure() f.add_subplot(1,2,1) plt.plot(X[:, 0], X[:, 1], 'o', markerfacecolor='k', markeredgecolor='k', markersize=10) plt.title('Raw Data') f.add_subplot(1,2,2) colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels))) for k, col in zip(unique_labels, colors): if k == -1: col = 'k' class_member_mask = (db.labels_ == k) xy = X[class_member_mask] plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor='k', markersize=10); plt.title('Estimated number of clusters: %d' % n_clusters_); # + [markdown] school_cell_uuid="0189dda57560417eab97d0655d942081" # ## 모사(Approximation) # + school_cell_uuid="bb07e117a2d042a6a949e042e8707826" from sklearn.cluster import KMeans from sklearn.metrics import pairwise_distances_argmin from sklearn.datasets import load_sample_image from sklearn.utils import shuffle n_colors = 64 china = load_sample_image("china.jpg") china = np.array(china, dtype=np.float64) / 255 w, h, d = original_shape = tuple(china.shape) assert d == 3 image_array = np.reshape(china, (w * h, d)) image_array_sample = shuffle(image_array, random_state=0)[:1000] kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample) labels = kmeans.predict(image_array) def recreate_image(codebook, labels, w, h): d = codebook.shape[1] image = np.zeros((w, h, d)) label_idx = 0 for i in range(w): for j in range(h): image[i][j] = codebook[labels[label_idx]] label_idx += 1 return image print("{0:,} bytes -> {1:,} bytes : {2:5.2f}%".format(image_array.nbytes, labels.nbytes, float(labels.nbytes) / image_array.nbytes * 100.0)) f = plt.figure() ax1 = f.add_subplot(1,2,1) plt.axis('off') plt.title('Original image (96,615 colors)') ax1.imshow(china); ax2 = f.add_subplot(1,2,2) plt.axis('off') plt.title('Quantized image (64 colors, K-Means)') ax2.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h)); # + [markdown] school_cell_uuid="5a98f6b93a9343b291b48e5cee846f05" # ## 데이터 분석에 대한 오해와 진실 # + [markdown] bootstrap={"panel": {"class": "panel-danger", "heading": ""}} school_cell_uuid="59e2541bf315462a976997f0e7991082" # * 분석의 최대 성능은 방법론이 아닌 **데이터 자체**에 의존한다. # * 사람이 이론적으로 분석할 수 없는 데이터는 **어떤 머신러닝 알고리즘으로도 분석할 수 없다**. # + [markdown] school_cell_uuid="9e266496f0c5460cb2a7ffb0d95e0186" # ## <NAME> # + [markdown] bootstrap={"panel": {"class": "panel-danger", "heading": ""}} school_cell_uuid="de33e56d5369445e971b4330fb0a73b1" # * 머신러닝이란 자동화된 데이터 분석 # * 머신러닝은 분석의 **경제성과 효율성을 증가** # * 사람이 시간적, 경제적으로 할 수 없는 **규모**의 분석을 할 수 있다.
04. 데이터 분석의 소개/01. 데이터 분석의 소개.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Python 3 # Anchor extraction from html document from bs4 import BeautifulSoup import urllib.request # - with urllib.request.urlopen('https://stackoverflow.com/feeds/') as response: markup = response.read() soup = BeautifulSoup(markup, "xml") with open("stackoverflow.com_feeds.xml") as fp: soup = BeautifulSoup(fp) with open("stackoverflow.com_feeds.xml") as response: markup = response.read() soup = BeautifulSoup(markup) url = 'https://stackoverflow.com/feeds/' import urllib from bs4 import BeautifulSoup with urllib.request.urlopen(url) as response: soup = BeautifulSoup(response.read(), "lxml-xml") for entry in soup.find_all('entry'): print(entry)
notebooks/BeautifulSoup_XML.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Retention Time Preidiction # # This notebook is prepared to be run in Google [Colaboratory](https://colab.research.google.com/). In order to train the model faster, please change the runtime of Colab to use Hardware Accelerator, either GPU or TPU. # # This is an extension of the original walkthrough example available [here](https://github.com/wilhelm-lab/dlomix-resources/blob/main/notebooks/Example_RTModel_Walkthrough_colab.ipynb). # ### Task 4: Data Split # Similar to the initial notebook, we will initialize our model and train it. The target here is to experiment with different data splits and observe the impact on the performance and whether it reflects a realistic evaluation. # + # install the mlomix package in the current environment using pip # !python -m pip install -q git+https://github.com/wilhelm-lab/dlomix # - import numpy as np import pandas as pd import dlomix from dlomix.models import RetentionTimePredictor import tensorflow as tf from dlomix.eval import TimeDeltaMetric # The code below creates a dataset, creates the model, and trains it. You should try with the two available data splits (`split_a` and `split_b`). Please Refer to the initial notebook to analyze the results. # # Hint: Use the paths available below. Description for splits is as follows: # - suffix `_DATAPATH`: the original split used in the intial walkthrough notebook. # - suffix `_A`: split A # - suffix `_B`: split B # + TRAIN_DATAPATH = 'https://raw.githubusercontent.com/wilhelm-lab/dlomix-resources/main/example_dataset/proteomTools_train_val.csv' TRAIN_DATAPATH_A = 'https://raw.githubusercontent.com/wilhelm-lab/dlomix-resources/main/example_dataset/split_a/proteomTools_train_val_a.csv' TRAIN_DATAPATH_B = 'https://raw.githubusercontent.com/wilhelm-lab/dlomix-resources/main/example_dataset/split_b/proteomTools_train_val_b.csv' TEST_DATAPATH = 'https://raw.githubusercontent.com/wilhelm-lab/dlomix-resources/main/example_dataset/proteomTools_test.csv' TEST_DATAPATH_A = 'https://raw.githubusercontent.com/wilhelm-lab/dlomix-resources/main/example_dataset/split_a/proteomTools_test_a.csv' TEST_DATAPATH_B = 'https://raw.githubusercontent.com/wilhelm-lab/dlomix-resources/main/example_dataset/split_b/proteomTools_test_b.csv' # + from dlomix.data import RetentionTimeDataset BATCH_SIZE = 64 rtdata = RetentionTimeDataset(data_source=TRAIN_DATAPATH, seq_length=30, batch_size=BATCH_SIZE, val_ratio=0.2) # this is the test dataset object, do not forget to change it to the respective suffix (A or B) # when you change the training dataset test_rtdata = RetentionTimeDataset(data_source=TEST_DATAPATH, seq_length=30, batch_size=BATCH_SIZE, test=True) # + # create model model = RetentionTimePredictor(seq_length=30) optimizer = tf.keras.optimizers.SGD(learning_rate=1e-4, decay=1e-7) # compile the model with the optimizer and the metrics we want to use, we can add our custom time-delta metric model.compile(optimizer=optimizer, loss='mse', metrics=['mean_absolute_error', TimeDeltaMetric()]) history = model.fit(rtdata.train_data, validation_data=rtdata.val_data, epochs=15) # - # ### Bonus: # After analyzing the results, can you figure out what is wrong with these splits and how different are they from each other?
notebooks/tasks/4_DataSplit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/", "height": 72} id="hiQ6zAoYhyaA" outputId="0acee878-1207-42c3-9bee-a594acd44365" import urllib from IPython.display import Markdown as md ### change to reflect your notebook _nb_loc = "07_training/07c_export.ipynb" _nb_title = "Saving model state" _icons=["https://raw.githubusercontent.com/GoogleCloudPlatform/practical-ml-vision-book/master/logo-cloud.png", "https://www.tensorflow.org/images/colab_logo_32px.png", "https://www.tensorflow.org/images/GitHub-Mark-32px.png", "https://www.tensorflow.org/images/download_logo_32px.png"] _links=["https://console.cloud.google.com/vertex-ai/workbench/deploy-notebook?" + urllib.parse.urlencode({"name": _nb_title, "download_url": "https://github.com/takumiohym/practical-ml-vision-book-ja/raw/master/"+_nb_loc}), "https://colab.research.google.com/github/takumiohym/practical-ml-vision-book-ja/blob/master/{0}".format(_nb_loc), "https://github.com/takumiohym/practical-ml-vision-book-ja/blob/master/{0}".format(_nb_loc), "https://raw.githubusercontent.com/takumiohym/practical-ml-vision-book-ja/master/{0}".format(_nb_loc)] md("""<table class="tfo-notebook-buttons" align="left"><td><a target="_blank" href="{0}"><img src="{4}"/>Run in Vertex AI Workbench</a></td><td><a target="_blank" href="{1}"><img src="{5}" />Run in Google Colab</a></td><td><a target="_blank" href="{2}"><img src="{6}" />View source on GitHub</a></td><td><a href="{3}"><img src="{7}" />Download notebook</a></td></table><br/><br/>""".format(_links[0], _links[1], _links[2], _links[3], _icons[0], _icons[1], _icons[2], _icons[3])) # + [markdown] id="a8HQYsAtC0Fv" # # モデルの状態を保存する # # このノートブックでは、モデルをチェックポイントしてエクスポートします。 # + [markdown] id="5UOm2etrwYCs" # ## GPUを有効にし、ヘルパー関数を設定します # # このノートブックと、このリポジトリ内の他のほとんどすべてのノートブック # GPUを使用している場合は、より高速に実行されます。 # Colabについて: # - [編集]→[ノートブック設定]に移動します # - [ハードウェアアクセラレータ]ドロップダウンから[GPU]を選択します # # クラウドAIプラットフォームノートブック: # - https://console.cloud.google.com/ai-platform/notebooksに移動します # - GPUを使用してインスタンスを作成するか、インスタンスを選択してGPUを追加します # # 次に、テンソルフローを使用してGPUに接続できることを確認します。 # + colab={"base_uri": "https://localhost:8080/"} id="ugGJcxKAwhc2" outputId="8e946159-46cf-4aba-f53e-622e9ea8adee" import tensorflow as tf print('TensorFlow version' + tf.version.VERSION) print('Built with GPU support? ' + ('Yes!' if tf.test.is_built_with_cuda() else 'Noooo!')) print('There are {} GPUs'.format(len(tf.config.experimental.list_physical_devices("GPU")))) device_name = tf.test.gpu_device_name() if device_name != '/device:GPU:0': raise SystemError('GPU device not found') print('Found GPU at: {}'.format(device_name)) # - # ## トレーニングコード # # これは、[../06_preprocessing/06e_colordensityion.ipynb](../06_preprocessing/06e_colorConstraintion.ipynb)の元のコードです。 # [./07a_ingest.ipynb](./07a_ingest.ipynb)で決定された最も効率的な取り込みを使用するように変更されました。 # + import matplotlib.pylab as plt import numpy as np import tensorflow as tf import tensorflow_hub as hub import os # Load compressed models from tensorflow_hub os.environ['TFHUB_MODEL_LOAD_FORMAT'] = 'COMPRESSED' from tensorflow.data.experimental import AUTOTUNE IMG_HEIGHT = 448 # note *twice* what we used to have IMG_WIDTH = 448 IMG_CHANNELS = 3 CLASS_NAMES = 'daisy dandelion roses sunflowers tulips'.split() def training_plot(metrics, history): f, ax = plt.subplots(1, len(metrics), figsize=(5*len(metrics), 5)) for idx, metric in enumerate(metrics): ax[idx].plot(history.history[metric], ls='dashed') ax[idx].set_xlabel("Epochs") ax[idx].set_ylabel(metric) ax[idx].plot(history.history['val_' + metric]); ax[idx].legend([metric, 'val_' + metric]) class _Preprocessor: def __init__(self): # nothing to initialize pass def read_from_tfr(self, proto): feature_description = { 'image': tf.io.VarLenFeature(tf.float32), 'shape': tf.io.VarLenFeature(tf.int64), 'label': tf.io.FixedLenFeature([], tf.string, default_value=''), 'label_int': tf.io.FixedLenFeature([], tf.int64, default_value=0), } rec = tf.io.parse_single_example( proto, feature_description ) shape = tf.sparse.to_dense(rec['shape']) img = tf.reshape(tf.sparse.to_dense(rec['image']), shape) label_int = rec['label_int'] return img, label_int def read_from_jpegfile(self, filename): # same code as in 05_create_dataset/jpeg_to_tfrecord.py img = tf.io.read_file(filename) img = tf.image.decode_jpeg(img, channels=IMG_CHANNELS) img = tf.image.convert_image_dtype(img, tf.float32) return img def preprocess(self, img): return tf.image.resize_with_pad(img, IMG_HEIGHT, IMG_WIDTH) # most efficient way to read the data # as determined in 07a_ingest.ipynb # splits the files into two halves and interleaves datasets def create_preproc_dataset(pattern): """ Does interleaving, parallel calls, prefetch, batching Caching is not a good idea on large datasets. """ preproc = _Preprocessor() files = [filename for filename in tf.io.gfile.glob(pattern)] if len(files) > 1: print("Interleaving the reading of {} files.".format(len(files))) def _create_half_ds(x): if x == 0: half = files[:(len(files)//2)] else: half = files[(len(files)//2):] return tf.data.TFRecordDataset(half, compression_type='GZIP') trainds = tf.data.Dataset.range(2).interleave( _create_half_ds, num_parallel_calls=AUTOTUNE) else: trainds = tf.data.TFRecordDataset(files, compression_type='GZIP') def _preproc_img_label(img, label): return (preproc.preprocess(img), label) trainds = (trainds .map(preproc.read_from_tfr, num_parallel_calls=AUTOTUNE) .map(_preproc_img_label, num_parallel_calls=AUTOTUNE) .shuffle(200) .prefetch(AUTOTUNE) ) return trainds def create_preproc_image(filename): preproc = _Preprocessor() img = preproc.read_from_jpegfile(filename) return preproc.preprocess(img) class RandomColorDistortion(tf.keras.layers.Layer): def __init__(self, contrast_range=[0.5, 1.5], brightness_delta=[-0.2, 0.2], **kwargs): super(RandomColorDistortion, self).__init__(**kwargs) self.contrast_range = contrast_range self.brightness_delta = brightness_delta def call(self, images, training=None): if not training: return images contrast = np.random.uniform( self.contrast_range[0], self.contrast_range[1]) brightness = np.random.uniform( self.brightness_delta[0], self.brightness_delta[1]) images = tf.image.adjust_contrast(images, contrast) images = tf.image.adjust_brightness(images, brightness) images = tf.clip_by_value(images, 0, 1) return images # - # ## トレーニング # # インターリーブ、並列呼び出し、プリフェッチ、バッチ処理。 # 大規模なデータセットでは、キャッシュは適切ではありません。 #PATTERN_SUFFIX, NUM_EPOCHS = '-0000[01]-*', 3 # small PATTERN_SUFFIX, NUM_EPOCHS = '-*', 20 # full # + import os, shutil shutil.rmtree('chkpts', ignore_errors=True) os.mkdir('chkpts') def train_and_evaluate(batch_size = 32, lrate = 0.001, l1 = 0., l2 = 0., num_hidden = 16): regularizer = tf.keras.regularizers.l1_l2(l1, l2) train_dataset = create_preproc_dataset( 'gs://practical-ml-vision-book/flowers_tfr/train' + PATTERN_SUFFIX ).batch(batch_size) eval_dataset = create_preproc_dataset( 'gs://practical-ml-vision-book/flowers_tfr/valid' + PATTERN_SUFFIX ).batch(batch_size) layers = [ tf.keras.layers.experimental.preprocessing.RandomCrop( height=IMG_HEIGHT//2, width=IMG_WIDTH//2, input_shape=(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), name='random/center_crop' ), tf.keras.layers.experimental.preprocessing.RandomFlip( mode='horizontal', name='random_lr_flip/none' ), RandomColorDistortion(name='random_contrast_brightness/none'), hub.KerasLayer( "https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4", trainable=False, name='mobilenet_embedding'), tf.keras.layers.Dense(num_hidden, kernel_regularizer=regularizer, activation=tf.keras.activations.relu, name='dense_hidden'), tf.keras.layers.Dense(len(CLASS_NAMES), kernel_regularizer=regularizer, activation='softmax', name='flower_prob') ] # checkpoint and early stopping callbacks model_checkpoint_cb = tf.keras.callbacks.ModelCheckpoint( filepath='./chkpts', monitor='val_accuracy', mode='max', save_best_only=True) early_stopping_cb = tf.keras.callbacks.EarlyStopping( monitor='val_accuracy', mode='max', patience=2) # model training model = tf.keras.Sequential(layers, name='flower_classification') model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lrate), loss=tf.keras.losses.SparseCategoricalCrossentropy( from_logits=False), metrics=['accuracy'] ) print(model.summary()) history = model.fit(train_dataset, validation_data=eval_dataset, epochs=NUM_EPOCHS, callbacks=[model_checkpoint_cb, early_stopping_cb] ) training_plot(['loss', 'accuracy'], history) return model # + colab={"base_uri": "https://localhost:8080/", "height": 929} id="jlxxxpeaT6ea" outputId="ad4f09e8-bc33-4c92-dc47-5fc73ec12f9c" model = train_and_evaluate() # - # ## モデルを保存し、それをロードして予測を行います # # このように、モデルをメモリに保存する必要はありません import os, shutil shutil.rmtree('export', ignore_errors=True) os.mkdir('export') model.save('export/flowers_model') # !ls export/flowers_model # !saved_model_cli show --tag_set serve --signature_def serving_default --dir export/flowers_model ## for prediction, we won't have TensorFlow Records. ## this is how we'd predict for individual images filenames = [ 'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9818247_e2eac18894.jpg', 'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9853885425_4a82356f1d_m.jpg', 'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/daisy/9158041313_7a6a102f7a_n.jpg', 'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/daisy/9299302012_958c70564c_n.jpg', 'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/tulips/8733586143_3139db6e9e_n.jpg', 'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/tulips/8713397358_0505cc0176_n.jpg' ] serving_model = tf.keras.models.load_model('export/flowers_model') input_images = [create_preproc_image(f) for f in filenames] f, ax = plt.subplots(1, 6, figsize=(15,15)) for idx, img in enumerate(input_images): ax[idx].imshow((img.numpy())); batch_image = tf.reshape(img, [1, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS]) batch_pred = serving_model.predict(batch_image) pred = batch_pred[0] pred_label_index = tf.math.argmax(pred).numpy() pred_label = CLASS_NAMES[pred_label_index] prob = pred[pred_label_index] ax[idx].set_title('{} ({:.2f})'.format(pred_label, prob)) # ## 署名を提供 # # クライアントがこのすべての再形成を行うことを期待することは現実的ではありません。 # 2つのことをしましょう: # (1)単にファイルの名前をとるサービング署名を定義する # (2)ファイル名のバッチを受け入れるようにコードをベクトル化して、すべてを一度に実行できるようにします。 ## it's better to vectorize the prediction filenames = tf.convert_to_tensor([ 'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9818247_e2eac18894.jpg', 'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9853885425_4a82356f1d_m.jpg', 'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/daisy/9158041313_7a6a102f7a_n.jpg', 'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/daisy/9299302012_958c70564c_n.jpg', 'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/tulips/8733586143_3139db6e9e_n.jpg', 'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/tulips/8713397358_0505cc0176_n.jpg' ]) print(filenames) input_images = tf.map_fn(create_preproc_image, filenames, fn_output_signature=tf.float32) batch_pred = serving_model.predict(input_images) print('full probs:\n', batch_pred) top_prob = tf.math.reduce_max(batch_pred, axis=[1]) print('top prob:\n', top_prob) pred_label_index = tf.math.argmax(batch_pred, axis=1) print('top cls:\n', pred_label_index) pred_label = tf.gather(tf.convert_to_tensor(CLASS_NAMES), pred_label_index) print(pred_label) # + @tf.function(input_signature=[tf.TensorSpec([None,], dtype=tf.string)]) def predict_flower_type(filenames): input_images = tf.map_fn( create_preproc_image, filenames, fn_output_signature=tf.float32 ) batch_pred = model(input_images) # same as model.predict() top_prob = tf.math.reduce_max(batch_pred, axis=[1]) pred_label_index = tf.math.argmax(batch_pred, axis=1) pred_label = tf.gather(tf.convert_to_tensor(CLASS_NAMES), pred_label_index) return { 'probability': top_prob, 'flower_type_int': pred_label_index, 'flower_type_str': pred_label } shutil.rmtree('export', ignore_errors=True) os.mkdir('export') model.save('export/flowers_model', signatures={ 'serving_default': predict_flower_type }) # - serving_fn = tf.keras.models.load_model('export/flowers_model').signatures['serving_default'] filenames = [ 'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9818247_e2eac18894.jpg', 'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9853885425_4a82356f1d_m.jpg', 'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/daisy/9158041313_7a6a102f7a_n.jpg', 'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/daisy/9299302012_958c70564c_n.jpg', 'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/tulips/8733586143_3139db6e9e_n.jpg', 'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/tulips/8713397358_0505cc0176_n.jpg' ] pred = serving_fn(tf.convert_to_tensor(filenames)) print(pred) print('******') print(pred['flower_type_str'].numpy()) f, ax = plt.subplots(1, 6, figsize=(15,15)) for idx, (filename, prob, pred_label) in enumerate( zip(filenames, pred['probability'].numpy(), pred['flower_type_str'].numpy())): img = tf.io.read_file(filename) img = tf.image.decode_jpeg(img, channels=3) ax[idx].imshow((img.numpy())); ax[idx].set_title('{} ({:.2f})'.format(pred_label, prob)) # ## モデルをGoogleCloudStorageに保存します # # このNotebookインスタンスがシャットダウンされた後でも使用できるように、モデルを保存します。 # BUCKETを自分が所有するバケットに変更します。 # + language="bash" # PROJECT=$(gcloud config get-value project) # BUCKET=${PROJECT} # create a bucket with this name, or change to a bucket that you own. # # gsutil cp -r export/flowers_model gs://${BUCKET}/flowers_5_trained # + [markdown] id="Duu8mX3iXANE" # ## License # Copyright 2022 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # -
07_training/07c_export.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd from nltk.classify import NaiveBayesClassifier from nltk.corpus import subjectivity from nltk.sentiment import SentimentAnalyzer from nltk.sentiment.util import * import matplotlib.pyplot as mlpt import tweepy import csv import pandas as pd import random import numpy as np import pandas as pd # + consumer_key = '0HwwLndFzphxpc0FtcbTL1FHB' consumer_secret = '<KEY>' access_token = '<KEY>' access_token_secret = '<KEY>' auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth,wait_on_rate_limit=True) # - fetch_tweets=tweepy.Cursor(api.search, q="#sbin",count=100, lang ="en",since="2019-11-25", tweet_mode="extended").items() data=pd.DataFrame(data=[[tweet_info.created_at.date(),tweet_info.full_text]for tweet_info in fetch_tweets],columns=['Date','Tweets']) print(data) data.to_csv("Tweets.csv") cdata=pd.DataFrame(columns=['Date','Tweets']) total=100 index=0 for index,row in data.iterrows(): stre=row["Tweets"] my_new_string = re.sub('[^ a-zA-Z0-9]', '', stre) cdata.sort_index() cdata.at[index,'Date']=row["Date"] #cdata.set_value(index,'Date',row["Date"]) cdata.at[index,'Tweets']=my_new_string #cdata.set_value(index,'Tweets',my_new_string) index=index+1 print(cdata.dtypes) cdata ccdata=pd.DataFrame(columns=['Date','Tweets']) # + indx=0 get_tweet="" for i in range(0,len(cdata)-1): get_date=cdata.Date.iloc[i] next_date=cdata.Date.iloc[i+1] if(str(get_date)==str(next_date)): get_tweet=get_tweet+cdata.Tweets.iloc[i]+" " if(str(get_date)!=str(next_date)): ccdata.at[indx,'Date']=get_date #ccdata.set_value(indx,'Date',get_date) ccdata.at[indx,'Tweets']=get_tweet #ccdata.set_value(indx,'Tweets',get_tweet) indx=indx+1 get_tweet=" " # - ccdata read_stock_p=pd.read_csv('SBIN.csv') read_stock_p ccdata['Prices']="" indx=0 for i in range (0,len(ccdata)): for j in range (0,len(read_stock_p)): get_tweet_date=ccdata.Date.iloc[i] get_stock_date=read_stock_p.Date.iloc[j] if(str(get_stock_date)==str(get_tweet_date)): print(get_stock_date," ",get_tweet_date) ccdata.at[i,'Prices']=int(read_stock_p.Close[j]) #ccdata.set_value(i,'Prices',int(read_stock_p.Close[j])) break ccdata ccdata=ccdata[ccdata['Prices'].str.strip().astype(bool)] ccdata ccdata['Prices'] = ccdata['Prices'].apply(np.int64) ccdata["Comp"] = '' ccdata["Negative"] = '' ccdata["Neutral"] = '' ccdata["Positive"] = '' ccdata import nltk nltk.downloader.download('vader_lexicon') from nltk.sentiment.vader import SentimentIntensityAnalyzer from nltk.sentiment.vader import SentimentIntensityAnalyzer import unicodedata sentiment_i_a = SentimentIntensityAnalyzer() for indexx, row in ccdata.T.iteritems(): try: sentence_i = unicodedata.normalize('NFKD', ccdata.loc[indexx, 'Tweets']) sentence_sentiment = sentiment_i_a.polarity_scores(sentence_i) ccdata.at[indexx,'Comp']=sentence_sentiment['compound'] ccdata.at[indexx,'Negative']=sentence_sentiment['neg'] ccdata.at[indexx,'Neutral']=sentence_sentiment['neu'] ccdata.at[indexx,'Positive']=sentence_sentiment['pos'] #ccdata.set_value(indexx, 'Comp', sentence_sentiment['compound']) #ccdata.set_value(indexx, 'Negative', sentence_sentiment['neg']) #ccdata.set_value(indexx, 'Neutral', sentence_sentiment['neu']) #ccdata.set_value(indexx, 'Positive', sentence_sentiment['pos']) except TypeError: print (stocks_dataf.loc[indexx, 'Tweets']) print (indexx) ccdata posi=0 nega=0 xyz=ccdata['Comp'] print(xyz) xy=0 for i in ccdata.Comp: get_val=i if(float(get_val)<(0.2)): nega=nega+1 if(float(get_val>(0.7))): posi=posi+1 xy=xy+1 posper=(posi/(posi+nega))*100 negper=(nega/(posi+nega))*100 print("% of positive tweets= ",posper) print("% of negative tweets= ",negper) arr=np.asarray([posper,negper], dtype=int) mlpt.pie(arr,labels=['positive','negative']) mlpt.plot() df_=ccdata[['Date','Prices','Comp','Negative','Neutral','Positive']].copy() train_start_index = '0' train_end_index = '6' test_start_index = '7' test_end_index = '9' train = df_.loc[train_start_index : train_end_index] test = df_.loc[test_start_index:test_end_index] sentiment_score_list = [] for date, row in train.T.iteritems(): sentiment_score = np.asarray([df_.loc[date, 'Negative'],df_.loc[date, 'Positive']]) sentiment_score_list.append(sentiment_score) numpy_df_train = np.asarray(sentiment_score_list) print(numpy_df_train) sentiment_score_list = [] for date, row in test.T.iteritems(): sentiment_score = np.asarray([df_.loc[date, 'Negative'],df_.loc[date, 'Positive']]) sentiment_score_list.append(sentiment_score) numpy_df_test = np.asarray(sentiment_score_list) print(numpy_df_test) y_train = pd.DataFrame(train['Prices']) y_test = pd.DataFrame(test['Prices']) print(y_train) from treeinterpreter import treeinterpreter as ti from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import classification_report,confusion_matrix import matplotlib.pyplot as plt rf = RandomForestRegressor() rf.fit(numpy_df_train, y_train) prediction, bias, contributions = ti.predict(rf, numpy_df_test) print(prediction) print(prediction) idx=np.arange(int(test_start_index),int(test_end_index)) predictions_df_ = pd.DataFrame(data=prediction[:], index = idx, columns=['Prices']) predictions_df_ ax = predictions_df_.rename(columns={"Prices": "predicted_price"}).plot(title='Random Forest predicted prices')#predicted value ax.set_xlabel("Indexes") ax.set_ylabel("Stock Prices") fig = y_test.rename(columns={"Prices": "actual_price"}).plot(ax = ax).get_figure()#actual value fig.savefig("random forest.png") # + from treeinterpreter import treeinterpreter as ti from sklearn.tree import DecisionTreeRegressor from sklearn.linear_model import LinearRegression from sklearn.metrics import classification_report,confusion_matrix reg = LinearRegression() reg.fit(numpy_df_train, y_train) # - reg.predict(numpy_df_test) '''Since our dataset is very small and as you can see that fetching 600 tweets could only make data for just 10 days. Also the prediction is not very great in such small dataset. So we found this new dataset on internet which has the Text as "Tweets" and respective "close price" and "Adjusted close price". Adjusted Close Price: An adjusted closing price is a stock's closing price on any given day of trading that has been amended to include any distributions and corporate actions that occurred at any time before the next day's open.'''
stock.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # cMLP Lagged VAR Demo # # - In this notebook, we train a cMLP model on linear VAR data with lagged interactions. # - After examining the Granger causality discovery, we train a debiased model using only the discovered interactions. import torch import numpy as np import matplotlib.pyplot as plt from synthetic import simulate_lorenz_96, simulate_lorenz_96_nonstationary from models.cmlp import cMLP, cMLPSparse, train_model_ista, train_unregularized # For GPU acceleration device = torch.device('cuda') # Simulate data X_np, GC = simulate_lorenz_96_nonstationary(p=10, F=10, T=1000) X = torch.tensor(X_np[np.newaxis], dtype=torch.float32, device=device) # Plot data fig, axarr = plt.subplots(1, 2, figsize=(16, 5)) axarr[0].plot(X_np) axarr[0].set_xlabel('T') axarr[0].set_title('Entire time series') axarr[1].plot(X_np[:50]) axarr[1].set_xlabel('T') axarr[1].set_title('First 50 time points') plt.tight_layout() plt.show() # # Still need to tune $\lambda$ and perhaps lr # Set up model cmlp = cMLP(X.shape[-1], lag=1, hidden=[100]).cuda(device=device) # Train with ISTA train_loss_list = train_model_ista( cmlp, X, lam=25, lam_ridge=0.05, lr=5e-3, penalty='H', max_iter=50000, check_every=100) # Loss function plot plt.figure(figsize=(8, 5)) plt.plot(50 * np.arange(len(train_loss_list)), train_loss_list) plt.title('cMLP training') plt.ylabel('Loss') plt.xlabel('Training steps') plt.tight_layout() plt.show() # + # Verify learned Granger causality GC_est = cmlp.GC().cpu().data.numpy() print('True variable usage = %.2f%%' % (100 * np.mean(GC))) print('Estimated variable usage = %.2f%%' % (100 * np.mean(GC_est))) print('Accuracy = %.2f%%' % (100 * np.mean(GC == GC_est))) # Make figures fig, axarr = plt.subplots(1, 2, figsize=(16, 5)) axarr[0].imshow(GC, cmap='Blues') axarr[0].set_title('GC actual') axarr[0].set_ylabel('Affected series') axarr[0].set_xlabel('Causal series') axarr[0].set_xticks([]) axarr[0].set_yticks([]) axarr[1].imshow(GC_est, cmap='Blues', vmin=0, vmax=1, extent=(0, len(GC_est), len(GC_est), 0)) axarr[1].set_title('GC estimated') axarr[1].set_ylabel('Affected series') axarr[1].set_xlabel('Causal series') axarr[1].set_xticks([]) axarr[1].set_yticks([]) # Mark disagreements for i in range(len(GC_est)): for j in range(len(GC_est)): if GC[i, j] != GC_est[i, j]: rect = plt.Rectangle((j, i-0.05), 1, 1, facecolor='none', edgecolor='red', linewidth=1) axarr[1].add_patch(rect) plt.show() # - # Verify lag selection for i in range(len(GC_est)): # Get true GC GC_lag = np.zeros((1, len(GC_est))) GC_lag[:3, GC[i].astype(bool)] = 1.0 # Get estimated GC GC_est_lag = cmlp.GC(ignore_lag=False, threshold=False)[i].cpu().data.numpy().T[::-1] # Make figures fig, axarr = plt.subplots(1, 2, figsize=(16, 5)) axarr[0].imshow(GC_lag, cmap='Blues', extent=(0, len(GC_est), 1, 0)) axarr[0].set_title('Series %d true GC' % (i + 1)) axarr[0].set_ylabel('Lag') axarr[0].set_xlabel('Series') axarr[0].set_xticks(np.arange(len(GC_est)) + 0.5) axarr[0].set_xticklabels(range(len(GC_est))) axarr[0].set_yticks(np.arange(1) + 0.5) axarr[0].set_yticklabels(range(1, 1 + 1)) axarr[0].tick_params(axis='both', length=0) axarr[1].imshow(GC_est_lag, cmap='Blues', extent=(0, len(GC_est), 1, 0)) axarr[1].set_title('Series %d estimated GC' % (i + 1)) axarr[1].set_ylabel('Lag') axarr[1].set_xlabel('Series') axarr[1].set_xticks(np.arange(len(GC_est)) + 0.5) axarr[1].set_xticklabels(range(len(GC_est))) axarr[1].set_yticks(np.arange(1) + 0.5) axarr[1].set_yticklabels(range(1, 1 + 1)) axarr[1].tick_params(axis='both', length=0) # Mark nonzeros for i in range(len(GC_est)): for j in range(1): if GC_est_lag[j, i] > 0.0: rect = plt.Rectangle((i, j), 1, 1, facecolor='none', edgecolor='green', linewidth=1.0) axarr[1].add_patch(rect) plt.show() # # Train sparsified model # + # Create a debiased model sparsity = cmlp.GC().bool() cmlp_sparse = cMLPSparse(X.shape[-1], sparsity, lag=1, hidden=[100]).cuda(device=device) # Train train_loss_list = train_unregularized(cmlp_sparse, X, lr=1e-3, max_iter=10000, check_every=100, verbose=1) # - # Plot loss function plt.figure(figsize=(10, 5)) plt.title('Debiased model training') plt.ylabel('Loss') plt.xlabel('Training steps') plt.plot(100 * np.arange(len(train_loss_list)), train_loss_list) plt.show()
cmlp_lorenz_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + hideCode=true hideOutput=true import os, sys try: from synapse.lib.jupyter import * except ImportError as e: # Insert the root path of the repository to sys.path. # This assumes the notebook is located three directories away # From the root synapse directory. It may need to be varied synroot = os.path.abspath('../../../') sys.path.insert(0, synroot) from synapse.lib.jupyter import * # + hideCode=true hideOutput=true # Create a cortex core = await getTempCoreCmdr() # + active="" # .. highlight:: none # # .. _storm-adv-vars: # # Storm Reference - Advanced - Variables # ====================================== # # Storm supports the use of **variables.** A :ref:`gloss-variable` is a value that can change depending on conditions or on information passed to the Storm query. (Contrast this with a :ref:`gloss-constant`, which is a value that is fixed and does not change.) # # Variables can be used in a variety of ways, from providing simpler or more efficient ways to reference node properties, to facilitating bulk operations, to performing complex tasks or writing extensions to Synapse in Storm. # # These documents approach variables and their use from a **user** standpoint and aim to provide sufficient background for users to understand and begin to use variables. They do not provide an in-depth discussion of variables and their use from a fully developer-oriented perspective. # # - `Storm Operating Concepts`_ # - `Variable Concepts`_ # # - `Variable Scope`_ # - `Call Frame`_ # - `Runtsafe vs. Non-Runtsafe`_ # # - `Types of Variables`_ # # - `Built-In Variables`_ # - `User-Defined Variables`_ # # .. _op-concepts: # # Storm Operating Concepts # ------------------------ # # When leveraging variables in Storm, it is important to keep in mind the high-level :ref:`storm-op-concepts`. Specifically: # # - Storm operations (e.g., lifts, filters, pivots, etc.) are performed on **nodes.** # - Operations can be **chained** and are executed in order from left to right. # - Storm acts as an **execution pipeline,** with each node passed individually and independently through the chain of Storm operations. # - Most Storm operations **consume** nodes — that is, a given operation (such as a filter or pivot) acts upon the inbound node in some way and returns only the node or set of nodes that result from that operation. # # These principles apply to variables that reference nodes (or node properties) in Storm just as they apply to nodes, and so affect the way variables behave within Storm queries. # # .. _var-concepts: # # Variable Concepts # ----------------- # # .. _var-scope: # # Variable Scope # ++++++++++++++ # # A variable’s **scope** is its lifetime and under what conditions it may be accessed. There are two dimensions that impact a variable’s scope: its **call frame** and its **runtime safety** ("runtsafety"). # # .. _var-call-frame: # # Call Frame # ++++++++++ # # A variable’s **call frame** is where the variable is used. The main Storm query starts with its own call frame, and each call to a "pure" Storm command, function, or subquery creates a new call frame. The new call frame gets a copy of all the variables from the calling call frame. Changes to existing variables or the creation of new variables within the new call frame do not impact the calling scope. # # Runtsafe vs. Non-Runtsafe # +++++++++++++++++++++++++ # # An important distinction to keep in mind when using variables in Storm is whether the variable is runtime-safe (":ref:`gloss-runtsafe`") or non-runtime safe (":ref:`gloss-non-runtsafe`"). # # A variable that is **runtsafe** has a value independent of any nodes passing through the Storm pipeline. For example, a variable whose value is explicitly set, such as ``$string = mystring`` or ``$ipv4 = 8.8.8.8`` is considered runtsafe because the value does not change / is not affected by the specific node passing through the Storm pipeline. # # A variable that is **non-runtsafe** has a value derived from a node passing through the Storm pipeline. For example, a variable whose value is set to a node property value may change based on the specific node passing through the Storm pipeline. In other words, if your Storm query is operating on a set of DNS A nodes (``inet:dns:a``) and you define the variable ``$fqdn = :fqdn`` (setting the variable to the value of the ``:fqdn`` secondary property), the value of the variable will change based on the specific value of that property for each ``inet:dns:a`` node in the pipeline. # # All non-runtsafe variables are **scoped** to an individual node as it passes through the Storm pipeline. This means that a variable’s value based on a given node is not available when processing a different node (at least not without using special commands, methods, or libraries). In other words, the path of a particular node as it passes through the Storm pipeline is its own scope. # # The "safe" in non-runtsafe should **not** be interpreted as meaning the use of non-runtsafe variables is somehow "risky" or involves insecure programming or processing of data. It simply means the value of the variable is not safe from changing (i.e., it may change) as the Storm pipeline progresses. # # .. _var-types: # # Types of Variables # ------------------ # # Storm supports two types of variables: # # - **Built-in variables.** Built-in variables facilitate many common Storm operations. They may vary in their scope and in the context in which they can be used. # - **User-defined variables** User-defined variables are named and defined by the user. They are most often limited in scope and facilitate operations within a specific Storm query. # # .. _vars-builtin: # # Built-In Variables # ++++++++++++++++++ # # Storm includes a set of built-in variables and associated variable methods (:ref:`storm-adv-methods`) and libraries (:ref:`storm-adv-libs`) that facilitate Cortex-wide, node-specific, and context-specific operations. # # Built-in variables differ from user-defined variables in that built-in variable names: # # - are initialized at Cortex start, # - are reserved, # - can be accessed automatically (i.e., without needing to define them) from within Storm, and # - persist across user sessions and Cortex reboots. # # .. _vars-global: # # Global Variables # ~~~~~~~~~~~~~~~~ # # Global variables operate independently of any node. That is, they can be invoked in a Storm query in the absence of any nodes in the Storm execution pipeline (though they can also be leveraged when performing operations on nodes). # # .. _vars-global-lib: # # $lib # #### # # The library variable ( ``$lib`` ) is a built-in variable that provides access to the global Storm library. In Storm, libraries are accessed using built-in variable names (e.g., ``$lib.print()``). # # See :ref:`storm-adv-libs` for descriptions of the libraries available within Storm. # # .. _vars-node: # # Node-Specific Variables # ~~~~~~~~~~~~~~~~~~~~~~~ # # Storm includes node-specific variables that are designed to operate on or in conjunction with nodes and require one or more nodes in the Storm pipeline. # # .. NOTE:: # # Node-specific variables are always non-runtsafe. # # .. _vars-node-node: # # $node # ##### # # The node variable (``$node``) is a built-in Storm variable that **references the current node in the Storm query.** Specifically, this variable contains the inbound node’s node object, and provides access to the node’s attributes, properties, and associated attribute and property values. # # Invoking this variable during a Storm query is useful when you want to: # # - access the raw and entire node object, # - store the value of the current node before pivoting to another node, or # - use an aspect of the current node in subsequent query operations. # # The ``$node`` variable supports a number of built-in methods that can be used to access specific data or properties associated with a node. See the :ref:`meth-node` section of the :ref:`storm-adv-methods` document for additional detail and examples. # # .. _vars-node-path: # # $path # ##### # # The path variable (``$path``) is a built-in Storm variable that **references the path of a node as it travels through the pipeline of a Storm query.** # # The ``$path`` variable is not used on its own, but in conjunction with its methods. See the :ref:`meth-path` section of the :ref:`storm-adv-methods` documents for additional detail and examples. # # .. _vars-trigger: # # Trigger-Specific Variables # ~~~~~~~~~~~~~~~~~~~~~~~~~~ # # A :ref:`gloss-trigger` is used to support automation within a Cortex. Triggers use events (such as the creation of a node, setting the value of a node’s property, or applying a tag to a node) to fire ("trigger") the execution of a predefined Storm query. Storm uses a built-in variable specifically within the context of trigger-initiated Storm queries. # # .. _vars-trigger-tag: # # $tag # #### # # Within the context of triggers that fire on ``tag:add`` events, the ``$tag`` variable represents the name of the tag that caused the trigger to fire. # # For example: # # You write a trigger to fire when any tag matching the expression ``#foo.bar.*`` is added to a ``file:bytes`` node. The trigger executes the following Storm command: # # .. parsed-literal:: # # -> hash:md5 [ +#$tag ] # # Because the trigger uses a wildcard expression, it will fire on any tag that matches that expression (e.g., ``#foo.bar.hurr``, ``#foo.bar.derp``, etc.). The Storm snippet above will take the inbound ``file:bytes`` node, pivot to the file’s associated MD5 node (``hash:md5``), and apply the same tag that fired the trigger to the MD5. # # See the :ref:`auto-triggers` section of the :ref:`storm-ref-automation` document and the Storm :ref:`storm-trigger` command for a more detailed discussion of triggers and associated Storm commands. # # .. _vars-csvtool: # # CSVTool-Specific Variables # ~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Synapse's **CSVTool** is used to ingest (import) data into or export data from a Cortex using comma-separated value (CSV) format. Storm includes a built-in variable to facilitate bulk data ingest using CSV. # # .. _vars-csvtool-rows: # # $rows # ##### # # The ``$rows`` variable refers to the set of rows in a CSV file. When ingesting data into a Cortex, CSVTool reads a CSV file and a file containing a Storm query that tells CSVTool how to process the CSV data. The Storm query is typically constructed to iterate over the set of rows (``$rows``) using a "for" loop that uses user-defined variables to reference each field (column) in the CSV data. # # For example: # # .. parsed-literal:: # # for ($var1, $var2, $var3, $var4) in $rows { <do stuff> } # # See :ref:`syn-tools-csvtool` for a more detailed discussion of CSVTool use and associated Storm syntax. # # .. _vars-user: # # User-Defined Variables # ++++++++++++++++++++++ # # User-defined variables can be defined in one of two ways: # # - At runtime (i.e., within the scope of a specific Storm query). This is the most common use for user-defined variables. # - Mapped via options passed to the Storm runtime (i.e., when using the ``--optifle`` option from Synapse cmdr or via Cortex API access). This method is less common. When defined in this manner, user-defined variables will behave as though they are built-in variables that are runtsafe. # # .. _vars-names: # # Variable Names # ~~~~~~~~~~~~~~ # # All variable names in Storm (including built-in variables) begin with a dollar sign ( ``$`` ). A variable name can be any alphanumeric string, **except for** the name of a built-in variable (see :ref:`vars-builtin`), as those names are reserved. Variable names are case-sensitive; the variable ``$MyVar`` is different from ``$myvar``. # # # .. NOTE:: # # Storm will not prevent you from using the name of a built-in variable to define a variable (such as ``$node = 7``). However, doing so may result in undesired effects or unexpected errors due to the variable name collision. # # .. _vars-define: # # Defining Variables # ~~~~~~~~~~~~~~~~~~ # # Within Storm, a user-defined variable is defined using the syntax: # # .. parsed-literal:: # # $<varname> = <value> # # The variable name must be specified first, followed by the equals sign and the value of the variable itself. # # ``<value>`` can be: # # - an explicit value / literal, # - a node secondary or universal property, # - a tag or tag property, # - a built-in variable or method, # - a library function, # - a mathematical expression / "dollar expression", or # - an embedded query. # # Examples # ~~~~~~~~ # # Two types of examples are used below: # # - **Demonstrative example:** the ``$lib.print()`` library function is used to display the value of the user-defined variable being set. This is done for illustrative purposes only; ``$lib.print()`` is not required in order to use variables or methods. # # Keep Storm's operation chaining, pipeline, and node consumption aspects in mind when reviewing the demonstrative examples below. When using ``$lib.print()`` to display the value of a variable, the queries below will: # # - Lift the specified node(s). # - Assign the variable. Note that assigning a variable has no impact on the nodes themselves. # - Print the variable's value. # - Return any nodes still in the pipeline. Because variable assignment doesn't impact the node(s), they are not consumed and so are returned (displayed) at the CLI. # # The effect of this process is that for each node in the Storm query pipeline, the output of ``$lib.print()`` is displayed, followed by the relevant node. # # - **Use-case example:** the user-defined variable is used in one or more sample queries to illustrate possible practical use cases. These represent exemplar Storm queries for how a variable or method might be used in practice. While we have attempted to use relatively simple examples for clarity, some examples may leverage additional Storm features such as subqueries, subquery filters, or flow control elements such as "for" loops or "switch" statements. # # *Assign a literal to a user-defined variable:* # # - Assign the value 5 to the variable ``$threshold``: # + hideCode=true # Define and print test query q = '$threshold=5 $lib.print($threshold)' q1 = '\n' print(q + q1) # Execute the query to test it and get the packed nodes (podes). podes = await core.eval(q, num=0, cmdr=True) # + active="" # - Tag any ``file:bytes`` nodes that have a number of AV signature hits higher than a given threshold for review: # + hideCode=true hideOutput=true # Make some nodes q = '[file:bytes=sha256:0000746c55336cd8d34885545f9347d96607d0391fbd3e76dae7f2b3447775b4 it:av:filehit=(sha256:0000746c55336cd8d34885545f9347d96607d0391fbd3e76dae7f2b3447775b4, (0bfef0179bf358f3fe7bad67fa529c77, trojan.gen.2)) it:av:filehit=(sha256:0000746c55336cd8d34885545f9347d96607d0391fbd3e76dae7f2b3447775b4, (325cd5a01724fa0c63907eac044f4961, trojan.agent/gen-onlinegames)) it:av:filehit=(sha256:0000746c55336cd8d34885545f9347d96607d0391fbd3e76dae7f2b3447775b4, (ac8d9645c6cdf123683a73a02e231052, w32/imestartup.a.gen!eldorado))]' q1 = '[file:bytes=sha256:00007694135237ec8dc5234007043814608f239befdfc8a61b992e4d09e0cf3f it:av:filehit=(sha256:00007694135237ec8dc5234007043814608f239befdfc8a61b992e4d09e0cf3f, (be9793d772d23269ab0c165af819e74a, troj_gen.r002c0gkj17)) it:av:filehit=(sha256:00007694135237ec8dc5234007043814608f239befdfc8a61b992e4d09e0cf3f, (eef2ccb70945fb28a45c7f14f2a0f11d, malicious.1b8fb7)) it:av:filehit=(sha256:00007694135237ec8dc5234007043814608f239befdfc8a61b992e4d09e0cf3f, (ce4e34d2f9207095aa7351986bbad357, trojan-ddos.win32.stormattack.c)) it:av:filehit=(sha256:00007694135237ec8dc5234007043814608f239befdfc8a61b992e4d09e0cf3f, (ed344310e3203ec4348c4ee549a3b188, "trojan ( 00073eb11 )")) it:av:filehit=(sha256:00007694135237ec8dc5234007043814608f239befdfc8a61b992e4d09e0cf3f, (f5b5daeda10e487fccc07463d9df6b47, tool.stormattack.win32.10)) it:av:filehit=(sha256:00007694135237ec8dc5234007043814608f239befdfc8a61b992e4d09e0cf3f, (a0f25a5ba637d5c8e7c42911c4336085, trojan/w32.agent.61440.eii))]' podes = await core.eval(q, num=4, cmdr=False) podes = await core.eval(q1, num=7, cmdr=False) # + hideCode=true # Define and print test query q = '$threshold=5 file:bytes +{ -> it:av:filehit } >= $threshold [ +#review ]' print(q) # Execute the query to test it and get the packed nodes (podes). podes = await core.eval(q, num=1, cmdr=False) # + active="" # *Assign a node secondary property to a user-defined variable:* # # - Assign the ``:user`` property from an Internet-based account (``inet:web:acct``) to the variable ``$user``: # + hideCode=true hideOutput=true # Make a node q = '[inet:web:acct=(twitter.com,bert) :email=<EMAIL>]' podes = await core.eval(q, num=1, cmdr=False) # + hideCode=true # Define and print test query q = 'inet:web:acct=(twitter.com,bert) $user=:user $lib.print($user)' q1 = '\n' print(q + q1) # Execute the query to test it and get the packed nodes (podes). podes = await core.eval(q, num=1, cmdr=True) # + active="" # - Find email addresses associated with a set of Internet accounts where the username of the email address is the same as the username of the Internet account: # + hideCode=true hideOutput=true # Make another node q = '[inet:web:acct=(twitter.com,ernie) :email=<EMAIL>]' podes = await core.eval(q, num=1, cmdr=False) # + hideCode=true # Define and print test query q = 'inet:web:acct $user=:user -> inet:email +:user=$user' print(q) # Execute the query to test it and get the packed nodes (podes). podes = await core.eval(q, num=1, cmdr=False) # + active="" # *Assign a node universal property to a user-defined variable:* # # - Assign the ``.seen`` universal property from a DNS A node to the variable ``$time``: # + hideCode=true hideOutput=true # Make a node q = '[inet:dns:a=(woot.com,172.16.31.10) .seen=("2018/11/27 03:28:14","2019/08/15 18:32:47")]' podes = await core.eval(q, num=1, cmdr=False) # + hideCode=true # Define and print test query q = 'inet:dns:a=(woot.com,172.16.31.10) $time=.seen $lib.print($time)' q1 = '\n' print(q + q1) # Execute the query to test it and get the packed nodes (podes). podes = await core.eval(q, num=1, cmdr=True) # + active="" # .. NOTE:: # # In the example above, the raw value of the ``.seen`` property is assigned to the ``$time`` variable. ``.seen`` is an interval (:ref:`type-ival`) type, consisting of a pair of minimum and maximum time values. These values are stored in Unix epoch millis, which are the values shown by the output of the ``$lib.print()`` function. # # + active="" # - Given a DNS A record, find other DNS A records that pointed to the same IP address in the same time window: # + hideCode=true hideOutput=true # Make some moar nodes q = '[ ( inet:dns:a=(hurr.net,172.16.31.10) .seen=("2018/12/09 06:02:53","2019/01/03 11:27:01") ) ( inet:dns:a=(derp.org,172.16.31.10) .seen=("2019/09/03 01:11:23","2019/12/14 14:22:00"))]' podes = await core.eval(q, num=2, cmdr=False) # + hideCode=true # Define and print test query q = 'inet:dns:a=(woot.com,172.16.31.10) $time=.seen -> inet:ipv4 -> inet:dns:a +.seen@=$time' print(q) # Execute the query to test it and get the packed nodes (podes). podes = await core.eval(q, num=2, cmdr=False) # + active="" # *Assign a tag to a user-defined variable:* # # - Assign the explicit tag value ``cno.infra.anon.tor`` to the variable ``$tortag``: # + hideCode=true # Define and print test query q = '$tortag=cno.infra.anon.tor $lib.print($tortag)' q1 = '\n' print(q + q1) # Execute the query to test it and get the packed nodes (podes). podes = await core.eval(q, num=0, cmdr=True) # + active="" # - Tag IP addresses that Shodan says are associated with Tor with the ``#cno.infra.anon.tor`` tag: # + hideCode=true hideOutput=true # Make some nodes q = '[ inet:ipv4=172.16.31.10 inet:ipv4=172.16.58.3 inet:ipv4=192.168.127.12 +#rep.shodan.tor ]' podes = await core.eval(q, num=3, cmdr=False) # + hideCode=true # Define and print test query q = '$tortag=cno.infra.anon.tor inet:ipv4#rep.shodan.tor [ +#$tortag ]' print(q) # Execute the query to test it and get the packed nodes (podes). podes = await core.eval(q, num=3, cmdr=False) # + active="" # *Assign a tag timestamp to a user-defined variable:* # # - Assign the times associated with Threat Group 20’s use of a malicious domain to the variable ``$time``: # + hideCode=true hideOutput=true # Make a node q = '[inet:fqdn=evildomain.com +#cno.threat.t20.tc=(2015/09/08,2017/09/08)]' podes = await core.eval(q, num=1, cmdr=False) # + hideCode=true # Define and print test query q = 'inet:fqdn=evildomain.com $time=#cno.threat.t20.tc $lib.print($time)' q1 = '\n' print(q + q1) # Execute the query to test it and get the packed nodes (podes). podes = await core.eval(q, num=1, cmdr=True) # + active="" # - Find DNS A records for any subdomain associated with a Threat Group 20 zone during the time they controlled the zone: # + hideCode=true hideOutput=true # Make some moar nodes q = '[ (inet:dns:a=(www.evildomain.com,172.16.31.10) .seen=(2016/07/12,2016/12/13)) (inet:dns:a=(smtp.evildomain.com,172.16.58.3) .seen=(2016/04/04,2016/08/02)) (inet:dns:a=(evildomain.com,172.16.17.32) .seen=(2017/12/22,2019/12/22))]' podes = await core.eval(q, num=3, cmdr=False) # + hideCode=true # Define and print test query q = 'inet:fqdn#cno.threat.t20.tc $time=#cno.threat.t20.tc -> inet:fqdn:zone -> inet:dns:a +.seen@=$time' print(q) # Execute the query to test it and get the packed nodes (podes). podes = await core.eval(q, num=2, cmdr=False) # + active="" # *Assign a tag property to a user-defined variable:* # # - Assign the risk value assigned by DomainTools to an FQDN to the variable ``$risk``: # + hideCode=true hideOutput=true # Create a custom tag property await core.core.addTagProp('risk', ('int', {'min': 0, 'max': 100}), {'doc': 'Risk score'}) # + hideCode=true hideOutput=true # Make a node q = '[inet:fqdn=badsite.org +#rep.domaintools:risk=85]' podes = await core.eval(q, num=1, cmdr=False) # + hideCode=true # Define and print test query q = 'inet:fqdn=badsite.org $risk=#rep.domaintools:risk $lib.print($risk)' q1 = '\n' print(q + q1) # Execute the query to test it and get the packed nodes (podes). podes = await core.eval(q, num=1, cmdr=True) # + active="" # - Given an FQDN with a risk score, find all FQDNs with an equal or higher risk score: # + hideCode=true hideOutput=true # Make some moar nodes: q = '[ (inet:fqdn=stillprettybad.com +#rep.domaintools:risk=92) (inet:fqdn=notsobad.net +#rep.domaintools:risk=67)]' podes = await core.eval(q, num=2, cmdr=False) # + hideCode=true # Define and print test query q = 'inet:fqdn=badsite.org $risk=#rep.domaintools:risk inet:fqdn#rep.domaintools:risk>=$risk' print(q) # Execute the query to test it and get the packed nodes (podes). podes = await core.eval(q, num=3, cmdr=False) # + active="" # *Assign a built-in variable to a user-defined variable:* # # - Assign a ``ps:person`` node to the variable ``$person``: # + hideCode=true hideOutput=true # Make a node q = '[ps:person="0040a7600a7a4b59297a287d11173d5c"]' podes = await core.eval(q, num=1, cmdr=False) # + hideCode=true # Define and print test query q = 'ps:person=0040a7600a7a4b59297a287d11173d5c $person=$node $lib.print($person)' q1 = '\n' print(q + q1) # Execute the query to test it and get the packed nodes (podes). podes = await core.eval(q, num=1, cmdr=True) # + active="" # - For a given person, find all objects the person "has" and all the news articles that reference that person (uses the Storm :ref:`storm-tee` command): # + hideCode=true hideOutput=true # Make some moar nodes: q = '[ (edge:has=((ps:person,0040a7600a7a4b59297a287d11173d5c),(inet:web:acct,(twitter.com,mytwitter)))) (edge:refs=((media:news,00076a3f20808a14cbaa01ad51111edc),(ps:person,0040a7600a7a4b59297a287d11173d5c)))]' podes = await core.eval(q, num=2, cmdr=False) # + hideCode=true # Define and print test query q = 'ps:person=0040a7600a7a4b59297a287d11173d5c $person = $node | tee { edge:has:n1=$person -> * } { edge:refs:n2=$person <- * +media:news }' print(q) # Execute the query to test it and get the packed nodes (podes). podes = await core.eval(q, num=2, cmdr=False) # + active="" # .. NOTE:: # # See the :ref:`meth-node` section of the :ref:`storm-adv-methods` document for additional detail and examples when using the ``$node`` built-in variable and related methods. # + active="" # *Assign a built-in variable method to a user-defined variable:* # # - Assign the value of a domain node to the variable ``$fqdn``: # + hideCode=true hideOutput=true # Make a node: q = '[ inet:fqdn=mail.mydomain.com ]' podes = await core.eval(q, num=1, cmdr=False) # + hideCode=true # Define and print test query q = 'inet:fqdn=mail.mydomain.com $fqdn=$node.value() $lib.print($fqdn)' q1 = '\n' print(q + q1) # Execute the query to test it and get the packed nodes (podes). podes = await core.eval(q, num=1, cmdr=True) # + active="" # - Find the DNS A records associated with a given domain where the PTR record for the IP matches the FQDN: # + hideCode=true hideOutput=true # Make some moar nodes: q = '[ inet:dns:a=(mail.mydomain.com,11.12.13.14) inet:dns:a=(mail.mydomain.com,192.168.3.11) ( inet:ipv4=192.168.3.11 :dns:rev=mail.mydomain.com ) ]' podes = await core.eval(q, num=3, cmdr=False) # + hideCode=true # Define and print test query q = 'inet:fqdn=mail.mydomain.com $fqdn=$node.value() -> inet:dns:a +{ -> inet:ipv4 +:dns:rev=$fqdn }' print(q) # Execute the query to test it and get the packed nodes (podes). podes = await core.eval(q, num=1, cmdr=False) # + active="" # *Assign a library function to a user-defined variable:* # # - Assign a value to the variable ``$mytag`` using a library function: # + hideCode=true # Define and print test query q = '$mytag = $lib.str.format("cno.mal.sofacy") $lib.print($mytag)' q1 = '\n' print(q + q1) # Execute the query to test it and get the packed nodes (podes). podes = await core.eval(q, num=0, cmdr=True) # + active="" # - Assign a value to the variable ``$mytag`` using a library function (example 2): # + hideCode=true hideOutput=true # Make a node: q = '[ file:bytes=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 +#code.fam.sofacy ]' podes = await core.eval(q, num=1, cmdr=False) # + hideCode=true # Define and print test query q = 'file:bytes=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 for $tag in $node.tags(code.fam.*) { $malfam=$tag.split(".").index(2) $mytag=$lib.str.format("cno.mal.{malfam}", malfam=$malfam) $lib.print($mytag) }' q1 = '\n' print(q + q1) # Execute the query to test it and get the packed nodes (podes). podes = await core.eval(q, num=1, cmdr=True) # + active="" # The above example leverages: # # - three variables (``$tag``, ``$malfam``, and ``$mytag``); # - the :ref:`meth-node-tags` method; # - the ``$lib.split()``, ``$lib.index()``, and ``$lib.str.format()`` library functions; as well as # - a "for" loop. # # # + active="" # - If a file is tagged as part of a malicious code (malware) family, then also tag the file to indicate it is part of that malware's ecosystem: # + hideCode=true # Define and print test query q = 'file:bytes=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 for $tag in $node.tags(code.fam.*) { $malfam=$tag.split(".").index(2) $mytag=$lib.str.format("cno.mal.{malfam}", malfam=$malfam) [ +#$mytag ] }' print(q) # Execute the query to test it and get the packed nodes (podes). podes = await core.eval(q, num=1, cmdr=False) # + active="" # .. NOTE: # # The above query could be written as a **trigger** (:ref:`uto-triggers`) so that any time a ``#code.fam.<family>`` tag was applied to a file, the corresponding ``#cno.mal.<family>`` tag would be applied automatically. # + active="" # *Use a mathematical expression / "dollar expression" as a variable:* # # - Use a mathematical expression to increment the variable ``$x``: # + hideCode=true # Define and print test query q = '$x=5 $x=$($x + 1) $lib.print($x)' q1 = '\n' print(q + q1) # Execute the query to test it and get the packed nodes (podes). podes = await core.eval(q, num=0, cmdr=True) # + active="" # - For any domain with a "risk" score from Talos, tag those with a score greater than 75 as "high risk": # + hideCode=true hideOutput=true # Make some nodes: q = '[ ( inet:fqdn=woot.com +#rep.talos:risk=36 ) ( inet:fqdn=derp.net +#rep.talos:risk=78 ) ( inet:fqdn=hurr.org +#rep.talos:risk=92 ) ]' podes = await core.eval(q, num=3, cmdr=False) # + hideCode=true # Define and print test query q = 'inet:fqdn#rep.talos:risk $risk=#rep.talos:risk if $($risk > 75) { [ +#high.risk ] }' print(q) # Execute the query to test it and get the packed nodes (podes). podes = await core.eval(q, num=3, cmdr=False) # + active="" # .. NOTE:: # # In the examples above, the mathematical expressions ``$($x + 1)`` and ``$($risk > 75)`` are not themselves variables, despite starting with a dollar sign ( ``$`` ). The syntax convention of "dollar expression" ( ``$( <expression> )`` ) allows Storm to support the use of variables (like ``$x`` and ``$risk``) in mathematical and logical operations. # + active="" # *Assign an embedded query to a user-defined variable:* # # - TBD # + hideCode=true hideOutput=true # Close cortex because done _ = await core.fini()
docs/synapse/userguides/storm_adv_vars.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="mebohSU88Ssk" colab_type="text" # # Italy Crime Index web scraped in Python # Data scraped from [Il Sole 24 ORE](https://lab24.ilsole24ore.com/indice-della-criminalita/indexT.php) with `BeautifulSoup` library and visualized with `Matplotlib` library (inspired by [<NAME>](https://towardsdatascience.com/bar-chart-race-in-python-with-matplotlib-8e687a5c8a41)). # # ### Import the dependent libraries # + id="Qs1voSB68TYN" colab_type="code" colab={} import collections import matplotlib.pyplot as plt import matplotlib.ticker as ticker import numpy as np import pandas as pd import requests import seaborn as sns from bs4 import BeautifulSoup from sklearn.cluster import KMeans from sklearn.preprocessing import StandardScaler # + [markdown] id="sV_U_d6x8T3F" colab_type="text" # ## Data # # Scrape all the tables from [Il Sole 24 ORE](https://lab24.ilsole24ore.com/indice-della-criminalita/indexT.php) with `BeautifulSoup`, store them in a dictionary to create finally a `pandas` DataFrame with every city as *index* and different table names as *columns*. # + id="80ZscY-WAJrd" colab_type="code" outputId="dae617b7-77f9-4fc9-b998-2528eb5388c8" colab={"base_uri": "https://localhost:8080/", "height": 249} url = "https://lab24.ilsole24ore.com/indice-della-criminalita/indexT.php" # Make a GET request to fetch the raw HTML content html_content = requests.get(url).text # Parse the html content soup = BeautifulSoup(html_content, features="html.parser") # Find all the tables in the HTML tables = soup.find_all("div", attrs={"class": "grid-item"}) # Initialize a dictionary that will store all the tables dict_table = collections.defaultdict(dict) # Get all the headings of tables headings = [] for tab in tables: t_head = tab.find("h2").text.strip() headings.append(t_head) # Get all the rows of table rows = tab.tbody.find_all("tr") for row in rows: cols = row.find_all("td") cols = [x.text.strip() for x in cols][0:4] if len(cols) == 4: # because it is possible that cols is an empty list dict_table[t_head][cols[1]] = float( cols[3].replace(".", "").replace(",", ".") ) dict_table = dict(dict_table) df = pd.DataFrame.from_dict(dict_table) df.head(5) # + [markdown] id="gCAugt_UBdUF" colab_type="text" # Then create a new column `'Altro'` made by the difference from `'Classifica finale'` and all the other columns, and finally see the *mean* values for every columns in descending order to understand which features have the most impact in the overall ranking. # + id="LnepV_XqClwA" colab_type="code" outputId="ee926d90-39be-4e9b-fa52-1569723cdccf" colab={"base_uri": "https://localhost:8080/", "height": 370} df.insert(1, "Altro", df.loc[:, headings[0]] - df.loc[:, headings[1:]].sum(axis=1)) df.mean().round(1).sort_values(ascending=False) # + [markdown] id="wvM64nYrEIto" colab_type="text" # It will be interesting to see the final ranking with all the variables scaled to have the same weight and also scaled only the 18 columns that are the most important crimes. Let's do this by creating a new `pandas` DataFrame and scaling the values with `StandardScaler()` from `scikit-learn` library. # + id="lggB61jJFD56" colab_type="code" outputId="2533b26c-bd57-4e4f-ffa3-91484d4bd622" colab={"base_uri": "https://localhost:8080/", "height": 266} scaler = StandardScaler() df_scale = df.iloc[:, 1:].copy() # scale all the values df_scale[df_scale.columns] = scaler.fit_transform( df_scale[df_scale.columns] ) # insert the ranking calculated by this new values df_scale.insert(0, headings[0], df_scale.sum(axis=1)) df_scale = df_scale.rename( columns={"Classifica finale": "Classifica finale scaled"} ) # insert the ranking calculated only by the selected 18 columns df_scale.insert( 1, "Classifica finale scaled No Altro", df_scale.loc[:, headings[1:]].sum(axis=1), ) df_scale.head(5) # + [markdown] id="yTvSP0MGIIus" colab_type="text" # To better understand the location of every city we need to add a new label based on geographical area division [NUTS 1](https://it.m.wikipedia.org/wiki/Nomenclatura_delle_unit%C3%A0_territoriali_per_le_statistiche_dell%27Italia) created by [Eurostat](https://it.m.wikipedia.org/wiki/Eurostat). So we loaded a file `nuts_italy.csv` stored on [GitHub](https://gist.githubusercontent.com/giampaolocasolla/85a35332168c217d4068a6638bb9a7d9/raw/a2b0d1b5d3b528e0b8b7c89e886b17e6bca36983/nuts_italy.csv) created by [this site](https://wikitable2csv.ggor.de/) to get a dictionary with every cities linked to their NUTS 1. # # For this project we only need 2 columns to work with `'NUTS 1', 'NUTS 3'`. # + id="-HgSkg6cIK64" colab_type="code" colab={} url_nuts = 'https://gist.githubusercontent.com/giampaolocasolla/85a35332168c217d4068a6638bb9a7d9/raw/a2b0d1b5d3b528e0b8b7c89e886b17e6bca36983/nuts_italy.csv' dict_nuts = pd.read_csv(url_nuts).set_index("NUTS 3")["NUTS 1"].to_dict() # insert a new column to df_scale that will be necessary later df_scale.insert(2, "nuts", list(df_scale.index.map(dict_nuts))) # + [markdown] id="q8FRcFv7q-te" colab_type="text" # ## Ranking # # Now we create a function to generate a specific DataFrame for future plots. # # The DataFrame need all the necessary labels and only the *Top 10* and *Last 10* elements. # + id="8PRDWVUKC8mT" colab_type="code" colab={} def create_df_plot(df, col, dict_nuts, numb=10): # select the Top 10 elements and the Last 10 elements from the original DataFrame df_plot_1 = ( df.loc[:, col] .sort_values(ascending=False) .iloc[np.arange(numb).tolist() + np.arange(len(df) - numb, len(df)).tolist()] .copy() .to_frame() ) df_plot_1.reset_index(level=0, inplace=True) df_plot_1 = df_plot_1.rename(columns={"index": "name"}) # add a new column with NUTS values from the dictionary created early df_plot_1["nuts"] = df_plot_1["name"].map(dict_nuts) # flip values from top to bottom df_plot_1 = df_plot_1[::-1] # add a new column with labels df_plot_1["rank"] = [col] * (numb*2) df_plot_1 = df_plot_1.rename(columns={col: "value"}) return df_plot_1 # + [markdown] id="JR9MFj6k7Ln2" colab_type="text" # Generate a single DataFrame `df_plot` that is created from the function `create_df_plot()` above, cycled 3 times for every different ranking. # + id="hu9ABCtP7MKn" colab_type="code" outputId="2c12f5d3-d516-4463-9627-0dcc4d18f823" colab={"base_uri": "https://localhost:8080/", "height": 195} appended_data = [] for d, c in zip( [df, df_scale, df_scale], [ "Classifica finale", "Classifica finale scaled", "Classifica finale scaled No Altro", ], ): data = create_df_plot(d, c, dict_nuts) # store DataFrame in list appended_data.append(data) # merge all the stored DataFrames df_plot = pd.concat(appended_data) df_plot.head(5) # + [markdown] id="eN_CUTWG8xy2" colab_type="text" # We'll use `colors` to add color to the bars. # + id="o_7RurR886-R" colab_type="code" colab={} colors = dict( zip( ["Nord-Ovest", "Nord-Est", "Centro", "Sud", "Isole"], ["#adb0ff", "#ffb3ff", "#90d595", "#e48381", "#aafbff"], ) ) # + [markdown] id="Bw_CO4qT82ss" colab_type="text" # Create a function `draw_barchart()` to generate a single horizontal bar plot, styling the following items: # # - Text: font sizes, color, orientation, position based on values # - Format: comma separated values and axes tickers # - Axis: Move to top, color, add subtitle # - Grid: Add lines behind bars # - Remove box frame # - Add title, credit # + id="VSUAM1qh9ug3" colab_type="code" colab={} def draw_barchart( type_rank, group_lk, title_plot, subtitle_plot, scaled=False, dx=None ): dff = df_plot[df_plot["rank"] == type_rank].sort_values(by="value", ascending=True) ax.clear() ax.barh(dff["name"], dff["value"], color=[colors[group_lk[x]] for x in dff["name"]]) if dx is None: dx = dff["value"].max() / 200 for i, (value, name) in enumerate(zip(dff["value"], dff["name"])): # text inside bars if value > 0: ax.text(value - dx, i, name, size=14, weight=600, ha="right", va="bottom") ax.text( value - dx, i - 0.25, group_lk[name], size=10, color="#444444", ha="right", va="baseline", ) if not scaled: ax.text(value + dx, i, f"{value:,.0f}", size=14, ha="left", va="center") else: ax.text(value + dx, i, name, size=14, weight=600, ha="left", va="bottom") ax.text( value + dx, i - 0.25, group_lk[name], size=10, color="#444444", ha="left", va="baseline", ) if not scaled: ax.text( value - dx, i, f"{value:,.0f}", size=14, ha="right", va="center" ) # ... polished styles # Top 10 ax.text( 1, 0.7, "Top 10", transform=ax.transAxes, color="#777777", size=35, ha="right", weight=800, ) # Last 10 ax.text( 1, 0.3, "Last 10", transform=ax.transAxes, color="#777777", size=35, ha="right", weight=800, ) # Subtitle plot ax.text(0, 1.06, subtitle_plot, transform=ax.transAxes, size=12, color="#777777") ax.xaxis.set_major_formatter(ticker.StrMethodFormatter("{x:,.0f}")) ax.xaxis.set_ticks_position("top") ax.tick_params(axis="x", colors="#777777", labelsize=12) ax.set_yticks([]) ax.margins(0, 0.01) ax.grid(which="major", axis="x", linestyle="-") ax.set_axisbelow(True) # Title plot ax.text(0, 1.12, title_plot, transform=ax.transAxes, size=24, weight=600, ha="left") # Author, credit ax.text( 1, 0, "by @giampaolocasolla; credit @pratapvardhan", transform=ax.transAxes, ha="right", color="#777777", bbox=dict(facecolor="white", alpha=0.8, edgecolor="white"), ) # remove box frame plt.box(False) # + [markdown] id="1kcd6WK09vuQ" colab_type="text" # ### Figure 1 # # Use the function `draw_barchart()` to generate the first Figure, made by *Total number of crimes 'emerged' for every 100,000 inhabitants*. # + id="NwrsK2UQ9wFM" colab_type="code" outputId="584611ef-694b-43cf-a88c-7aa8dd53d972" colab={"base_uri": "https://localhost:8080/", "height": 843} fig, ax = plt.subplots(figsize=(18, 13)) draw_barchart( "Classifica finale", dict_nuts, title_plot="Italy Crime Index 2019", subtitle_plot="Total number of crimes 'emerged' for every 100,000 inhabitants", scaled=False, ) # + [markdown] id="xt_zLcZtAax_" colab_type="text" # ### Figure 2 # # Use the function `draw_barchart()` to generate the second Figure, made by *Total number of crimes 'emerged' for every 100,000 inhabitants scaled by every types of crime*. # + id="lXGZDRAeAbUb" colab_type="code" outputId="4001aa2a-278a-4288-9bec-21db6dd3e2ac" colab={"base_uri": "https://localhost:8080/", "height": 843} fig, ax = plt.subplots(figsize=(18, 13)) draw_barchart( "Classifica finale scaled", dict_nuts, title_plot="Italy Crime Index 2019", subtitle_plot="Total number of crimes 'emerged' for every 100,000 inhabitants scaled by every types of crime", scaled=True, ) # + [markdown] id="6D8K-H8xAcM2" colab_type="text" # ###Figure 3 # # Use the function `draw_barchart()` to generate the third Figure, made by *Total number of crimes 'emerged' for every 100,000 inhabitants scaled only by selected 18 types of crime*. # + id="Z8KUfgHRAccA" colab_type="code" outputId="2232f789-9519-4cd0-dcda-bbf6430bcac2" colab={"base_uri": "https://localhost:8080/", "height": 843} fig, ax = plt.subplots(figsize=(18, 13)) draw_barchart( "Classifica finale scaled No Altro", dict_nuts, title_plot="Italy Crime Index 2019", subtitle_plot="Total number of crimes 'emerged' for every 100,000 inhabitants scaled only by selected 18 types of crime", scaled=True, ) # + [markdown] id="LmWtCl8MRWdv" colab_type="text" # ## Unsupervised Learning # # Unsupervised learning is a type of self-organized Hebbian learning that helps find previously unknown patterns in data set without pre-existing labels. # + [markdown] id="GFfATEQISNc_" colab_type="text" # ### 2-D representation # # Let's start to visualize our `df_scale` DataFrame in 2 dimensions with `t-SNE` algorithm and `PCA` creating a function `plot_projections()`. # + id="H7tQ3fEGSnnz" colab_type="code" colab={} def plot_projections( df, # starting DataFrame which=None, # which algoritm to use groups_dict=None, # dictionary of mapping elements to groups groups_num=None, # how many groups are there colors=None, # dictionary of colors clusters=None, # list of clusters ): import warnings if which == "t-SNE": # t-SNE algorithm from sklearn.manifold import TSNE import time time_start = time.time() tsne = TSNE(n_components=2, perplexity=10, learning_rate=100, verbose=1) results = tsne.fit_transform(df)[:, 0:2] print("t-SNE done! Time elapsed: {} seconds".format(time.time() - time_start)) elif which == "PCA": # PCA algorithm from sklearn.decomposition import PCA pca = PCA(n_components=2) results = pca.fit_transform(df)[:, 0:2] print( "Explained variation per principal component: {}".format( pca.explained_variance_ratio_ ) ) else: warnings.warn("Select TSNE or PCA") # create a DataFrame for the plot df_plt = pd.DataFrame(results, columns=["one", "two"]) if clusters is None: df_plt["label"] = list(df.index.map(groups_dict)) else: df_plt["label"] = clusters if groups_num is None: groups_num = len(set(df_plt["label"])) # count the number of different groups sns.set() sns.set_context("notebook", font_scale=1.3) fig, ax = plt.subplots(figsize=(13, 6)) sns.scatterplot( x="one", y="two", hue="label", style="label", palette=colors, data=df_plt, legend="full", alpha=1, s=70, ) # title ax.text(0, 1.05, which, transform=ax.transAxes, size=24, weight=600, ha="left") # author ax.text( 1, 0, "by @giampaolocasolla", transform=ax.transAxes, ha="right", color="#777777", bbox=dict(facecolor="white", alpha=0.8, edgecolor="white"), ) # + [markdown] id="AoFEU-s0VJ5d" colab_type="text" # #### t-SNE # # Use the function `plot_projections()` to generate 2-D representation of the `df_scale` DataFrame with `t-SNE` algorithm and colors varying by `NUTS 1`. # + id="ZfeZDPJtV37_" colab_type="code" outputId="92483ac6-9c3a-4349-989f-c37bc9902e22" colab={"base_uri": "https://localhost:8080/", "height": 570} plot_projections( df_scale[df_scale.columns[3:]], which="t-SNE", groups_dict=dict_nuts, colors=colors, ) # + [markdown] id="rH_xdtbqWA_N" colab_type="text" # #### PCA # # Use the function `plot_projections()` to generate 2-D representation of the `df_scale` DataFrame with `PCA` algorithm and colors varying by `NUTS 1`. # + id="M9BEpxbHWBnD" colab_type="code" outputId="b9538db0-8228-4129-c667-9bf1a145e671" colab={"base_uri": "https://localhost:8080/", "height": 452} plot_projections( df_scale[df_scale.columns[3:]], which="PCA", groups_dict=dict_nuts, colors=colors, ) # + [markdown] id="J1G3nqjCA1p4" colab_type="text" # ### Clustering # # In order to retrive insights from our data we need to group them with a clustering algorithm. # # Clustering algorithms seek to learn, from the properties of the data, an optimal division in groups of points (cities in our case). # # The first phase is to choose the appropriate number of groups, also known as *k* value. # # For this task I switched to **R** to use a modified version of `NbClust()` function that cycle through various indices and clustering methods from *k*=2 to *k*=10 and select the choosen *k* for every indices. # # An *ensemble* approach through a *voting* technique has been used. # # The result is *k*=3 as can be seen in the following figure. # # ![](https://drive.google.com/uc?id=15JmsSaIiY1WaGlhHg0c3v9HDMfAM3LpJ) # # + [markdown] id="fKbh33BmEb7c" colab_type="text" # #### *k*-Means # # The *k*-Means algorithm searches for a pre-determined number of clusters within an unlabeled multidimensional dataset. # # We use the function `KMeans()` from the `sklearn.cluster` library. # + id="lcqsssnpBQbd" colab_type="code" colab={} kmeans = KMeans(n_clusters=3, random_state=0).fit(df_scale.iloc[:, 3:]) # insert the results of the clustering in df_scale DataFrame df_scale.insert(3, "cluster", kmeans.labels_) # + [markdown] id="RZLNPRUPhxAz" colab_type="text" # With the results of the *k*-Means algorithm create a new DataFrame containing all the cities varying by clusters. # + id="bcru9ctJhxcZ" colab_type="code" outputId="1c29bff1-c8fa-4dc0-d90f-30229c89ecd8" colab={"base_uri": "https://localhost:8080/", "height": 1000} # Initialize a dictionary that will store all the tables dict_clusters = collections.defaultdict(list) # loop through the df_scale DataFrame for index, row in df_scale.iterrows(): dict_clusters[row["cluster"]].append(index) dict_clusters = dict(dict_clusters) # generate a new DataFrame from the created dictionary df_list = pd.DataFrame.from_dict(dict_clusters, orient='index').transpose().fillna("") # print all the rows of the DataFrame with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also display(df_list[[0,1,2]]) # + [markdown] id="G_o88EdTZ1il" colab_type="text" # Let's see the clusters distribution based on `NUTS 1` from the results of *k*-Means algorithm by a *grouped bar plot*. # # From the following figure is interesting to see a big cluster with cities from all `NUTS 1`, but the other two clusters very well splitted among them. # + id="CcVqQ2yGZ11l" colab_type="code" outputId="f06b0e64-c19e-4ce3-bfdd-32ae61b9ca52" colab={"base_uri": "https://localhost:8080/", "height": 435} # create a new DataFrame for the plot with some data manipulations df_bar = df_scale.groupby("nuts").cluster.value_counts().unstack(0) df_bar.columns.name = "label" fig, ax = plt.subplots(figsize=(13, 6)) sns.set() sns.set_context("notebook", font_scale=1.3) df_bar.plot(kind="bar", color=[colors[x] for x in df_bar.columns], ax=ax) plt.xticks(rotation=0) ax.text(0, 1.05, "Clusters distribution", transform=ax.transAxes, size=24, weight=600, ha="left") ax.text( 0, 0.97, "by @giampaolocasolla", transform=ax.transAxes, ha="left", color="#777777", bbox=dict(facecolor="white", alpha=0.8, edgecolor="white"), ) plt.show() # + [markdown] id="HTwTHyu8Ys_i" colab_type="text" # #### PCA with clusters # # Use the function `plot_projections()` to generate 2-D representation of the `df_scale` DataFrame with `PCA` algorithm and colors varying by `cluster`. # + id="-NRhRfVFZWu7" colab_type="code" outputId="01db069d-4faa-45b4-eaf6-6037f82c8544" colab={"base_uri": "https://localhost:8080/", "height": 452} plot_projections( df_scale[df_scale.columns[4:]], which="PCA", groups_dict=dict_nuts, colors={0: "#808080", 1: "#134074", 2: "#542344"}, clusters=df_scale["cluster"].to_list(), ) # + [markdown] id="4Mh5wNNal-Th" colab_type="text" # #### Features analysis # # Rearrange `df_scale` DataFrame to create a *grouped horizontal box plot* so that the values of every column goes to a single column with `melt()` function. # + id="nD665zuul-jp" colab_type="code" colab={} df_analysis = pd.melt( df_scale, id_vars=["cluster"], value_vars=df_scale.columns[4:], var_name="feature" ) # + [markdown] id="OTXi2EqomJ17" colab_type="text" # Let's see a *box plot* representation for all the features varying on `cluster` from the results of *k*-Means algorithm. # + id="zmf9vP8DmKGv" colab_type="code" outputId="afc4b32d-d1de-4125-ac28-c182b5fc8be2" colab={"base_uri": "https://localhost:8080/", "height": 701} fig, ax = plt.subplots(figsize=(8, 11)) sns.set() sns.set_context("notebook", font_scale=1.3) sns.boxplot( x="value", y="feature", data=df_analysis, palette={0: "#808080", 1: "#134074", 2: "#542344"}, hue="cluster", orient="h", ) ax.text(0, 1.05, "Box plots", transform=ax.transAxes, size=24, weight=600, ha="left") ax.text( 1, 0, "by @giampaolocasolla", transform=ax.transAxes, ha="right", color="#777777", bbox=dict(facecolor="white", alpha=0.8, edgecolor="white"), ) plt.legend(title="cluster", loc="center right") plt.xlabel("") plt.ylabel("") plt.show() # + [markdown] id="zP0KGuTgmnR5" colab_type="text" # ## Results # # From the last figure it's possible to notice that: # # - the bigger cluster is made by cities that have *mean* values in every types of crime (like a **baseline**). # # - the cluster made by cities from `Nord-Ovest, Nord-Est, Centro` have: # # |high values|low values| # |-----|-------| # |Altro|| # |Furti con strappo|| # |Furti con destrezza|| # |Furti in abitazione|| # |Furti in esercizi commerciali|| # |Rapine|| # |Truffe e frodi informatiche|| # |Stupefacenti|| # |Violenze sessuali|| # # # - the cluster made by cities from `Sud, Isole` have: # # |high values|low values| # |-----|-------| # |Omicidi volontari consumati|Furti in abitazione| # |Tentati omicidi|Furti in esercizi commerciali| # |Furti di autovetture|Violenze sessuali| # |Estorsioni|| # |Usura|| # |Associazione per delinquere|| # |Associazione di tipo mafioso|| # |Incendi|| #
italy_crime_index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python # language: python # name: conda-env-python-py # --- # <center> # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/Logos/organization_logo/organization_logo.png" width="300" alt="cognitiveclass.ai logo" /> # </center> # # # Content Based Filtering # # Estimated time needed: **25** minutes # # ## Objectives # # After completing this lab you will be able to: # # - Create a recommendation system using collaborative filtering # # Recommendation systems are a collection of algorithms used to recommend items to users based on information taken from the user. These systems have become ubiquitous, and can be commonly seen in online stores, movies databases and job finders. In this notebook, we will explore Content-based recommendation systems and implement a simple version of one using Python and the Pandas library. # # ### Table of contents # # <div class="alert alert-block alert-info" style="margin-top: 20px"> # <ol> # <li><a href="#ref1">Acquiring the Data</a></li> # <li><a href="#ref2">Preprocessing</a></li> # <li><a href="#ref3">Content-Based Filtering</a></li> # </ol> # </div> # <br> # # <a id="ref1"></a> # # # Acquiring the Data # # To acquire and extract the data, simply run the following Bash scripts: # Dataset acquired from [GroupLens](http://grouplens.org/datasets/movielens?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork-20718538&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork-20718538&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork-20718538&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork-20718538&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ). Lets download the dataset. To download the data, we will use **`!wget`** to download it from IBM Object Storage. # **Did you know?** When it comes to Machine Learning, you will likely be working with large datasets. As a business, where can you host your data? IBM is offering a unique opportunity for businesses, with 10 Tb of IBM Cloud Object Storage: [Sign up now for free](http://cocl.us/ML0101EN-IBM-Offer-CC) # # !wget -O moviedataset.zip https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%205/data/moviedataset.zip print('unziping ...') # !unzip -o -j moviedataset.zip # Now you're ready to start working with the data! # # <a id="ref2"></a> # # # Preprocessing # # First, let's get all of the imports out of the way: # #Dataframe manipulation library import pandas as pd #Math functions, we'll only need the sqrt function so let's import only that from math import sqrt import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # Now let's read each file into their Dataframes: # #Storing the movie information into a pandas dataframe movies_df = pd.read_csv('movies.csv') #Storing the user information into a pandas dataframe ratings_df = pd.read_csv('ratings.csv') #Head is a function that gets the first N rows of a dataframe. N's default is 5. movies_df.head() # Let's also remove the year from the **title** column by using pandas' replace function and store in a new **year** column. # #Using regular expressions to find a year stored between parentheses #We specify the parantheses so we don't conflict with movies that have years in their titles movies_df['year'] = movies_df.title.str.extract('(\(\d\d\d\d\))',expand=False) #Removing the parentheses movies_df['year'] = movies_df.year.str.extract('(\d\d\d\d)',expand=False) #Removing the years from the 'title' column movies_df['title'] = movies_df.title.str.replace('(\(\d\d\d\d\))', '') #Applying the strip function to get rid of any ending whitespace characters that may have appeared movies_df['title'] = movies_df['title'].apply(lambda x: x.strip()) movies_df.head() # With that, let's also split the values in the **Genres** column into a **list of Genres** to simplify future use. This can be achieved by applying Python's split string function on the correct column. # #Every genre is separated by a | so we simply have to call the split function on | movies_df['genres'] = movies_df.genres.str.split('|') movies_df.head() # Since keeping genres in a list format isn't optimal for the content-based recommendation system technique, we will use the One Hot Encoding technique to convert the list of genres to a vector where each column corresponds to one possible value of the feature. This encoding is needed for feeding categorical data. In this case, we store every different genre in columns that contain either 1 or 0. 1 shows that a movie has that genre and 0 shows that it doesn't. Let's also store this dataframe in another variable since genres won't be important for our first recommendation system. # # + #Copying the movie dataframe into a new one since we won't need to use the genre information in our first case. moviesWithGenres_df = movies_df.copy() #For every row in the dataframe, iterate through the list of genres and place a 1 into the corresponding column for index, row in movies_df.iterrows(): for genre in row['genres']: moviesWithGenres_df.at[index, genre] = 1 #Filling in the NaN values with 0 to show that a movie doesn't have that column's genre moviesWithGenres_df = moviesWithGenres_df.fillna(0) moviesWithGenres_df.head() # - # Next, let's look at the ratings dataframe. # ratings_df.head() # Every row in the ratings dataframe has a user id associated with at least one movie, a rating and a timestamp showing when they reviewed it. We won't be needing the timestamp column, so let's drop it to save on memory. # #Drop removes a specified row or column from a dataframe ratings_df = ratings_df.drop('timestamp', 1) ratings_df.head() # <a id="ref3"></a> # # # Content-Based recommendation system # # Now, let's take a look at how to implement **Content-Based** or **Item-Item recommendation systems**. This technique attempts to figure out what a user's favourite aspects of an item is, and then recommends items that present those aspects. In our case, we're going to try to figure out the input's favorite genres from the movies and ratings given. # # Let's begin by creating an input user to recommend movies to: # # Notice: To add more movies, simply increase the amount of elements in the **userInput**. Feel free to add more in! Just be sure to write it in with capital letters and if a movie starts with a "The", like "The Matrix" then write it in like this: 'Matrix, The' . # userInput = [ {'title':'Breakfast Club, The', 'rating':5}, {'title':'Toy Story', 'rating':3.5}, {'title':'Jumanji', 'rating':2}, {'title':"Pulp Fiction", 'rating':5}, {'title':'Akira', 'rating':4.5} ] inputMovies = pd.DataFrame(userInput) inputMovies # #### Add movieId to input user # # With the input complete, let's extract the input movie's ID's from the movies dataframe and add them into it. # # We can achieve this by first filtering out the rows that contain the input movie's title and then merging this subset with the input dataframe. We also drop unnecessary columns for the input to save memory space. # #Filtering out the movies by title inputId = movies_df[movies_df['title'].isin(inputMovies['title'].tolist())] #Then merging it so we can get the movieId. It's implicitly merging it by title. inputMovies = pd.merge(inputId, inputMovies) #Dropping information we won't use from the input dataframe inputMovies = inputMovies.drop('genres', 1).drop('year', 1) #Final input dataframe #If a movie you added in above isn't here, then it might not be in the original #dataframe or it might spelled differently, please check capitalisation. inputMovies # We're going to start by learning the input's preferences, so let's get the subset of movies that the input has watched from the Dataframe containing genres defined with binary values. # #Filtering out the movies from the input userMovies = moviesWithGenres_df[moviesWithGenres_df['movieId'].isin(inputMovies['movieId'].tolist())] userMovies # We'll only need the actual genre table, so let's clean this up a bit by resetting the index and dropping the movieId, title, genres and year columns. # #Resetting the index to avoid future issues userMovies = userMovies.reset_index(drop=True) #Dropping unnecessary issues due to save memory and to avoid issues userGenreTable = userMovies.drop('movieId', 1).drop('title', 1).drop('genres', 1).drop('year', 1) userGenreTable # Now we're ready to start learning the input's preferences! # # To do this, we're going to turn each genre into weights. We can do this by using the input's reviews and multiplying them into the input's genre table and then summing up the resulting table by column. This operation is actually a dot product between a matrix and a vector, so we can simply accomplish by calling Pandas's "dot" function. # inputMovies['rating'] #Dot produt to get weights userProfile = userGenreTable.transpose().dot(inputMovies['rating']) #The user profile userProfile # Now, we have the weights for every of the user's preferences. This is known as the User Profile. Using this, we can recommend movies that satisfy the user's preferences. # # Let's start by extracting the genre table from the original dataframe: # #Now let's get the genres of every movie in our original dataframe genreTable = moviesWithGenres_df.set_index(moviesWithGenres_df['movieId']) #And drop the unnecessary information genreTable = genreTable.drop('movieId', 1).drop('title', 1).drop('genres', 1).drop('year', 1) genreTable.head() genreTable.shape # With the input's profile and the complete list of movies and their genres in hand, we're going to take the weighted average of every movie based on the input profile and recommend the top twenty movies that most satisfy it. # #Multiply the genres by the weights and then take the weighted average recommendationTable_df = ((genreTable*userProfile).sum(axis=1))/(userProfile.sum()) recommendationTable_df.head() #Sort our recommendations in descending order recommendationTable_df = recommendationTable_df.sort_values(ascending=False) #Just a peek at the values recommendationTable_df.head() # Now here's the recommendation table! # #The final recommendation table movies_df.loc[movies_df['movieId'].isin(recommendationTable_df.head(20).keys())] # ### Advantages and Disadvantages of Content-Based Filtering # # ##### Advantages # # - Learns user's preferences # - Highly personalized for the user # # ##### Disadvantages # # - Doesn't take into account what others think of the item, so low quality item recommendations might happen # - Extracting data is not always intuitive # - Determining what characteristics of the item the user dislikes or likes is not always obvious # # <h2>Want to learn more?</h2> # # IBM SPSS Modeler is a comprehensive analytics platform that has many machine learning algorithms. It has been designed to bring predictive intelligence to decisions made by individuals, by groups, by systems – by your enterprise as a whole. A free trial is available through this course, available here: <a href="https://www.ibm.com/analytics/spss-statistics-software">SPSS Modeler</a> # # Also, you can use Watson Studio to run these notebooks faster with bigger datasets. Watson Studio is IBM's leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, Watson Studio enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of Watson Studio users today with a free account at <a href="https://www.ibm.com/cloud/watson-studio">Watson Studio</a> # # ### Thank you for completing this lab! # # ## Author # # <NAME> # # ### Other Contributors # # <a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank"><NAME></a> # # ## Change Log # # | Date (YYYY-MM-DD) | Version | Changed By | Change Description | # | ----------------- | ------- | ---------- | ---------------------------------- | # | 2020-11-03 | 2.1 | Lakshmi | Updated URL of csv | # | 2020-08-27 | 2.0 | Lavanya | Moved lab to course repo in GitLab | # | | | | | # | | | | | # # ## <h3 align="center"> © IBM Corporation 2020. All rights reserved. <h3/> #
ML0101EN-RecSys-Content-Based-movies-py-v1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # # Kaggle介绍 # # 网址:[https://www.kaggle.com/](https://www.kaggle.com/) # # Kaggle 是一个流行的数据科学竞赛平台,已被谷歌收购,参阅《业界 | 谷歌云官方正式宣布收购数据科学社区 Kaggle》。 # # Kaggle是由联合创始人、首席执行官安东尼·高德布卢姆(<NAME>)2010年在墨尔本创立的,主要为开发商和数据科学家提供举办机器学习竞赛、托管数据库、编写和分享代码的平台。该平台已经吸引了80万名数据科学家的关注,这些用户资源或许正是吸引谷歌的主要因素。 # # 作为一个竞赛平台,Kaggle 对于初学者来说可能有些难度。毕竟其中的一些竞赛有高达 100 万美元的奖金池和数百位参赛者。顶级的团队在处理机场安全提升或卫星数据分析等任务上拥有数十年积累的经验。为了帮助初学者入门 Kaggle,EliteDataScience 近日发表了一篇入门介绍文章,解答了一些初学者最常遇到的问题。机器之心对这篇文章进行了编译介绍,另外也增加了一些机器之心之前发过的文章作为补充资源。 # # # # Kaggle竞赛 # Kaggle 竞赛本质上,带有奖金池的竞赛必须满足一些标准: # - 问题必须困难:竞赛不应该是一个下午就能解决的任务。为了得到最好的投资回报,主办公司会提交他们最大最难的问题。 # - 解决方案必须新:要赢得最新的竞赛,你通常需要进行扩展研究、定制算法、训练先进的模型等等。 # - 表现必须能比较:竞赛必须要决出优胜者,所以你和其他对手的解决方案必须要被评分。 # # Kaggle 竞赛鼓励你竭尽所能,而经典数据科学则推崇效率和最大化的业务效果。 # # # # ## Kaggle 竞赛值得参加吗? # # 尽管 Kaggle 和经典数据科学之间存在差异,但 Kaggle 仍然是一种很好的入门工具。 # # 每个竞赛都是独立的。无需设置项目范围然后收集数据,这让你有时间专注其它技能。 # # 练习就是实践。学习数据科学的最好方法是在做中学。只要没有每场竞赛都获胜的压力,你就可以练习各种有趣的问题。 # # 讨论和获胜者采访很有启发性。每个竞赛都有自己的讨论板块与获胜者简报。你可以窥见更有经验的数据科学家的思考过程。 # # # ## 怎样入门 Kaggle? # # ### 第一步:选择一种编程语言 # # 首先,我们推荐你选择一种编程语言,并坚持使用。Python 和 R 在 Kaggle 和更广泛的数据科学社区上都很流行。 # # 如果你是一个毫无经验的新手,我们推荐 Python,因为这是一种通用编程语言,你可以在整个流程中都使用它。 # # 参考: # # - 数据科学领域 R vs Python:http://elitedatascience.com/r-vs-python-for-data-science # # - 如何为数据科学学习 Python:http://elitedatascience.com/learn-python-for-data-science # # - [R vs Python:R 是现在最好的数据科学语言吗?](http://mp.weixin.qq.com/s?__biz=MzA3MzI4MjgzMw==&mid=2650722126&idx=1&sn=00b5c6ed7e4c576c8be3eae1dc348cfe&chksm=871b0b30b06c8226b163198b78ad40b52715509d3b36f0774670d6e0e284378cc3bdeca42d4a&scene=21#wechat_redirect) # # - [超越 R,Python 成为最受欢迎的机器学习语言](http://mp.weixin.qq.com/s?__biz=MzA3MzI4MjgzMw==&mid=2650720220&idx=3&sn=c3b8b65ec118d6fdef3f55e5f29e89ff&chksm=871b03a2b06c8ab4b0a0c87a3b3647372578b129ad5f420fddc5a1a84138a7db51911e143980&scene=21#wechat_redirect) # # # # ## 第二步:学习探索数据的基础 # # 加载、浏览和绘制你的数据(即探索性分析)的能力是数据科学的第一步,因为它可以为你将在模型训练过程中做的各种决策提供信息。 # # 如果你选择了 Python 路线,那么我们推荐你使用专门为这个目的设计的 Seaborn 库。其中有高层面的绘图函数,可以绘制许多最常见和有用的图表。 # # 参考: # # - Seaborn 库:https://seaborn.pydata.org/ # # - Python Seaborn 教程:http://elitedatascience.com/python-seaborn-tutorial # # - [资源 | 2017 年最流行的 15 个数据科学 Python 库]( # http://mp.weixin.qq.com/s?__biz=MzA3MzI4MjgzMw==&mid=2650726730&idx=2&sn=89d176a782875afaafc69dd40cbee006&chksm=871b2534b06cac22a9235168a8d8867b97f795d2fba4435a2c7ec18b1039703903e4a79560bd&scene=21#wechat_redirect) # # # ### 第三步:训练你的第一个机器学习模型 # # # 在进入 Kaggle 之前,我们推荐你先在更简单更容易管理的数据集上训练一个模型。这能让你熟悉机器学习库,为以后的工作做铺垫。 # # 关键在于培养良好的习惯,比如将你的数据集分成独立的训练集和测试集,交叉验证避免过拟合以及使用合适的表现评价指标。 # # 对于 Python,最好的通用机器学习库是 Scikit-Learn。 # # 参考: # # - Scikit-Learn 库:http://scikit-learn.org/stable/ # # - Python Scikit-Learn 教程:http://elitedatascience.com/python-machine-learning-tutorial-scikit-learn # # - 7天应用机器学习速成课:http://elitedatascience.com/ # - [只需十四步:从零开始掌握 Python 机器学习(附资源)](http://mp.weixin.qq.com/s?__biz=MzA3MzI4MjgzMw==&mid=2650724242&idx=1&sn=703d242700e29813d6c482daf6b211c5&chksm=871b13ecb06c9afa28f8aad729496620078985e4eae8a1296fc407dbd70c1d70fabb3b2817fa&scene=21#wechat_redirect) # # - [教程 | Kaggle CTO <NAME> :机器学习的八个步骤](http://mp.weixin.qq.com/s?__biz=MzA3MzI4MjgzMw==&mid=2650725654&idx=4&sn=505ababb07a6bee5d42a6254a7493a7e&chksm=871b1968b06c907e9f84888ecb2ced8d3f52f0129720872e97f923b94deb34383eacefb4a5cf&scene=21#wechat_redirect) # # # # ### 第四步:解决入门级竞赛 # # 现在我们已经准备好尝试 Kaggle 竞赛了,这些竞赛分成几个类别。最常见的类别是: # # # - Featured:这些通常是由公司、组织甚至政府赞助的,奖金池最大。 # # - Research:这些是研究方向的竞赛,只有很少或没有奖金。它们也有非传统的提交流程。 # # - Recruitment:这些是由想要招聘数据科学家的公司赞助的。目前仍然相对少见。 # # - Getting Started:这些竞赛的结构和 Featured 竞赛类似,但没有奖金。它们有更简单的数据集、大量教程和滚动的提交窗口让你可以随时输入。 # # # Getting Started 竞赛非常适合初学者,因为它们给你提供了低风险的学习环境,并且还有很多社区创造的教程:https://www.kaggle.com/c/titanic#tutorials # # # ### 第五步:比赛是为了更好地学习,而不是赚钱 # # 有了上面的基础,就可以参与到 Featured 竞赛中了。一般来说,为了取得好排名,通常需要远远更多的时间和精力。 # # 因此,我们建议你明智地选择参与项目。参加竞赛能帮你深入到你希望长期参与的技术领域中。 # # 尽管奖金很诱人,但更有价值(也更可靠)的回报是为你的未来事业所获得的技能。 #
Resources/Kaggle.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # TF-IDF in digitised journals # # This notebook calculates TF-IDF values for words in digitised journals harvested from Trove. See also the notebook on [word frequences in digitised journals](word_frequences_in_digitised_journals.ipynb). More documentation coming... import re import tarfile import zipfile from io import BytesIO import pandas as pd import requests from tqdm.auto import tqdm import altair as alt import os from pathlib import Path import ipywidgets as widgets from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer import numpy as np # + # Are you using Jupyter Lab? # If so either don't run this cell or comment out the line below alt.renderers.enable('notebook') # If you forget, run this cell, and then get strange warnings when you make a chart, # uncomment the following line and run this cell to reset the chart renderer # alt.renderers.enable('default') # - # ## Select a journal # # Create a dropdown widget to select a digitised journal. The cells below will use this widget to get the value of the currently selected journal. # Load details of digitised journals from CSV df_journals = pd.read_csv('digital-journals-with-text.csv').sort_values(by='title') journal_list = [(f"{j['title']} ({j['issues_with_text']} issues)", j['directory']) for j in df_journals[['title', 'directory', 'issues_with_text']].to_dict('records')] journals = widgets.Dropdown(options=journal_list, disabled=False) display(journals) # ## Download all the issues of the journal # # Download a zip file containing the OCRd text of all the selected journal's available issues from the repository on CloudStor. Then unzip! # + def get_docs_path(journal): path = os.path.join('downloads', journal, 'texts') docs_path = [p for p in sorted(Path(path).glob('*.txt'))] return docs_path def download_journal(journal): ''' Download the OCRd text of the selected journal from the respoitory on CloudStor. ''' # Create a directory to put the downloaded files path = os.path.join('downloads', journal) os.makedirs(path, exist_ok=True) # To get a sub-folder on Cloudstor you add a 'path' parameter params = { 'path': f'/{journal}/texts' } # Get the zipped texts folder from Cloudstor -- note the 'download' in the url to get the zipped folder response = requests.get('https://cloudstor.aarnet.edu.au/plus/s/QOmnqpGQCNCSC2h/download', params=params) # Unzip the zip! zipped = zipfile.ZipFile(BytesIO(response.content)) zipped.extractall(path) print(f'{len(get_docs_path(journal))} issues downloaded') # - # Get the OCRd text of the selected journal download_journal(journals.value) # ## Calculate the TF-IDF values # + def get_docs(journal): docs_path = get_docs_path(journal) for p in docs_path: yield p.read_text(encoding='utf-8').strip() def get_file_names(journal): return [p.stem for p in get_docs_path(journal)] def get_years(journal): ''' Get a list of years extracted from the filenames of the issues. ''' years = [] for doc in get_docs_path(journal): try: matches = re.findall(r'-((?:18|19|20)\d{2})-', doc.stem) years.append(int(matches[-1])) except IndexError: print(f'YEAR NOT FOUND: {doc}') return sorted(list(set(years))) def get_docs_year(journal): ''' Combine all the issues from a year into a single document ready to be fed into the pipeline. ''' docs_year = {} path = Path(f'{journals}/texts') for doc in get_docs_path(journal): try: matches = re.findall(r'-((?:18|19|20)\d{2})-', doc.stem) year = int(matches[-1]) except IndexError: print(f'YEAR NOT FOUND: {doc}') else: try: docs_year[year].append(doc) except KeyError: docs_year[year] = [doc] for y in sorted(docs_year.keys()): year_doc = ' '.join([p.read_text(encoding='utf-8').strip() for p in docs_year[y]]) yield year_doc # - # Calculate the TF-IDF values for each year. vectorizer = TfidfVectorizer(stop_words='english', max_features=10000, ngram_range=(1,1), min_df=5, max_df=0.5) # preprocessor = lambda x: re.sub(r'(\d[\d\.])+', 'NUM', x.lower()) X_freq = np.asarray(vectorizer.fit_transform(get_docs_year(journals.value)).todense()) df_tfidf_years = pd.DataFrame(X_freq, columns=vectorizer.get_feature_names(), index=get_years(journals.value)) # + # Save as a CSV #df_freq.to_csv(f'{journals.value}-word-frequencies.csv') # - # Display the results df_tfidf_years.head() # Let's display the words each year with the highest TF-IDF scores. # Top words per year df_years_top = pd.DataFrame({n: df_tfidf_years.T[col].nlargest(10).index.tolist() for n, col in enumerate(df_tfidf_years.T)}).T df_years_top.index = get_years(journals.value) df_years_top.head() # And know we'll display the results in one huuuge chart. compound_chart = alt.vconcat() years = get_years(journals.value) # Number of columns cols = 4 start = 0 while start < len(years): row = alt.hconcat() for year in years[start:start+cols]: df_year_word_count = pd.DataFrame([{'word': w, 'count': df_tfidf_years.loc[year][w]} for w in df_years_top.loc[year].tolist()]) chart = alt.Chart(df_year_word_count).mark_bar().encode( y='word:N', x='count:Q', ).properties(width=120, height=120, title=str(year), columns=4) row |= chart compound_chart &= row start += cols compound_chart
umdigitalstudio2019/tfidf_in_digitised_journals.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/google/neural-tangents/blob/master/notebooks/function_space_linearization.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="9uPYkWOcghJm" pycharm={} # ##### Copyright 2019 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # + [markdown] colab_type="text" id="YDnknGorgv2O" pycharm={} # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="8KPv0bOW6UCi" pycharm={} # #### Import & Utils # + [markdown] colab_type="text" id="cxFbqXZKhGW0" pycharm={} # Install JAX, Tensorflow Datasets, and Neural Tangents. # # The first line specifies the version of jaxlib that we would like to import. Note, that "cp36" species the version of python (version 3.6) used by JAX. Make sure your colab kernel matches this version. # + colab={} colab_type="code" id="g_gSbMyUhF92" pycharm={} # !pip install -q tensorflow-datasets # !pip install -q git+https://www.github.com/google/neural-tangents # + [markdown] colab_type="text" id="knIftr57X055" pycharm={} # Import libraries # + colab={} colab_type="code" id="8D0i89hRmNoC" pycharm={} from jax.api import jit from jax.api import grad from jax import random import jax.numpy as np from jax.experimental.stax import logsoftmax from jax.experimental import optimizers import tensorflow_datasets as tfds import neural_tangents as nt from neural_tangents import stax # + [markdown] colab_type="text" id="_bbZz-nWX4Hj" pycharm={} # Define helper functions for processing data and defining a vanilla momentum optimizer # + colab={} colab_type="code" id="-W1ws1B-6_vq" pycharm={} def process_data(data_chunk): """Flatten the images and one-hot encode the labels.""" image, label = data_chunk['image'], data_chunk['label'] samples = image.shape[0] image = np.array(np.reshape(image, (samples, -1)), dtype=np.float32) image = (image - np.mean(image)) / np.std(image) label = np.eye(10)[label] return {'image': image, 'label': label} # + colab={} colab_type="code" id="Ik27L4izDK9s" pycharm={} @optimizers.optimizer def momentum(learning_rate, momentum=0.9): """A standard momentum optimizer for testing. Different from `jax.experimental.optimizers.momentum` (Nesterov). """ learning_rate = optimizers.make_schedule(learning_rate) def init_fn(x0): v0 = np.zeros_like(x0) return x0, v0 def update_fn(i, g, state): x, velocity = state velocity = momentum * velocity + g x = x - learning_rate(i) * velocity return x, velocity def get_params(state): x, _ = state return x return init_fn, update_fn, get_params # + [markdown] colab_type="text" id="32Wvhil9X8IK" pycharm={} # # Function Space Linearization # + [markdown] colab_type="text" id="JJ_zDKsKcDB-" pycharm={} # Create MNIST data pipeline using TensorFlow Datasets. # + colab={} colab_type="code" id="5llaSqZW4Et3" pycharm={} dataset_size = 64 ds_train, ds_test = tfds.as_numpy( tfds.load('mnist:3.*.*', split=['train[:%d]' % dataset_size, 'test[:%d]' % dataset_size], batch_size=-1) ) train = process_data(ds_train) test = process_data(ds_test) # + [markdown] colab_type="text" id="Ajz_oTOw72v8" pycharm={} # Setup some experiment parameters. # + colab={} colab_type="code" id="UtjfeaYC72Gs" pycharm={} learning_rate = 1e0 training_steps = np.arange(1000) print_every = 100.0 # + [markdown] colab_type="text" id="1-nKR--j5p2C" pycharm={} # Create a Fully-Connected Network. # + colab={} colab_type="code" id="wIbfrdzq5pLZ" pycharm={} init_fn, f, _ = stax.serial( stax.Dense(512, 1., 0.05), stax.Erf(), stax.Dense(10, 1., 0.05)) key = random.PRNGKey(0) _, params = init_fn(key, (-1, 784)) # + [markdown] colab_type="text" id="c9zgKt9B8NBt" pycharm={} # Construct the NTK. # + colab={} colab_type="code" id="bU6ccJM_8LWt" pycharm={} ntk = nt.batch(nt.empirical_ntk_fn(f, vmap_axes=0), batch_size=16, device_count=0) g_dd = ntk(train['image'], None, params) g_td = ntk(test['image'], train['image'], params) # + [markdown] colab_type="text" id="jdR-lIW11Vbj" pycharm={} # Now that we have the NTK and a network we can compare against a number of different dynamics. Remember to reinitialize the network and NTK if you want to try a different dynamics. # + [markdown] colab_type="text" id="hVesciX61bGb" pycharm={} # ## Gradient Descent, MSE Loss # + [markdown] colab_type="text" id="Lrp9YNCt7nCj" pycharm={} # Create a optimizer and initialize it. # + colab={} colab_type="code" id="J-8i_4KD7o5s" pycharm={} opt_init, opt_apply, get_params = optimizers.sgd(learning_rate) state = opt_init(params) # + [markdown] colab_type="text" id="NspVdDOU8mhk" pycharm={} # Create an MSE loss and a gradient. # + colab={} colab_type="code" id="z6L-LzyF8qLW" pycharm={} loss = lambda fx, y_hat: 0.5 * np.mean((fx - y_hat) ** 2) grad_loss = jit(grad(lambda params, x, y: loss(f(params, x), y))) # + [markdown] colab_type="text" id="f57Teh1317hn" pycharm={} # Create an MSE predictor and compute the function space values of the network at initialization. # + colab={} colab_type="code" id="7UH_uOxz16w2" pycharm={} predictor = nt.predict.gradient_descent_mse(g_dd, train['label'], learning_rate=learning_rate) fx_train = f(params, train['image']) # + [markdown] colab_type="text" id="rWROOyCZ9u6N" pycharm={} # Train the network. # + colab={"height": 204} colab_type="code" executionInfo={"elapsed": 2319, "status": "ok", "timestamp": 1588652647308, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 420} id="WXeof-AB8BiV" outputId="668fec81-aa7c-4b57-96f3-28b766e2cf35" pycharm={} print ('Time\tLoss\tLinear Loss') X, Y = train['image'], train['label'] predictions = predictor(training_steps, fx_train) for i in training_steps: params = get_params(state) state = opt_apply(i, grad_loss(params, X, Y), state) if i % print_every == 0: exact_loss = loss(f(params, X), Y) linear_loss = loss(predictions[i], Y) print('{}\t{:.4f}\t{:.4f}'.format(i, exact_loss, linear_loss)) # + [markdown] colab_type="text" id="gx65YR3A8_yd" pycharm={} # ## Gradient Descent, Cross Entropy Loss # + [markdown] colab_type="text" id="8jEb5V9C8_yd" pycharm={} # Create a optimizer and initialize it. # + colab={} colab_type="code" id="VKfuj6O88_ye" pycharm={} opt_init, opt_apply, get_params = optimizers.sgd(learning_rate) state = opt_init(params) # + [markdown] colab_type="text" id="hpWaHdvH8_yg" pycharm={} # Create an Cross Entropy loss and a gradient. # + colab={} colab_type="code" id="zQ03wQ7O8_yh" pycharm={} loss = lambda fx, y_hat: -np.mean(logsoftmax(fx) * y_hat) grad_loss = jit(grad(lambda params, x, y: loss(f(params, x), y))) # + [markdown] colab_type="text" id="WgS4k3878_yi" pycharm={} # Create a Gradient Descent predictor and compute the function space values of the network at initialization. # + colab={} colab_type="code" id="h2uIi4mQ8_yi" pycharm={} predictor = nt.predict.gradient_descent(loss, g_dd, train['label'], learning_rate=learning_rate) fx_train = f(params, train['image']) # + [markdown] colab_type="text" id="tRh7Ur9Y8_yj" pycharm={} # Train the network. # + colab={"height": 204} colab_type="code" executionInfo={"elapsed": 4141, "status": "ok", "timestamp": 1588652652808, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 420} id="FnW6DNWf8_yj" outputId="93101ca1-bf59-4262-ae7c-a7fecfc1a7b2" pycharm={} print ('Time\tLoss\tLinear Loss') X, Y = train['image'], train['label'] predictions = predictor(training_steps, fx_train) for i in training_steps: params = get_params(state) state = opt_apply(i, grad_loss(params, X, Y), state) if i % print_every == 0: t = i * learning_rate exact_loss = loss(f(params, X), Y) linear_loss = loss(predictions[i], Y) print('{:.0f}\t{:.4f}\t{:.4f}'.format(i, exact_loss, linear_loss)) # + [markdown] colab_type="text" id="vc2FaYtEDBJ_" pycharm={} # ## Momentum, Cross Entropy Loss # + [markdown] colab_type="text" id="L4onegU1DBKA" pycharm={} # Create a optimizer and initialize it. # + colab={} colab_type="code" id="cxoiw-DADBKB" pycharm={} mass = 0.9 opt_init, opt_apply, get_params = momentum(learning_rate, mass) state = opt_init(params) # + [markdown] colab_type="text" id="63VJ8y9FDBKE" pycharm={} # Create a Cross Entropy loss and a gradient. # + colab={} colab_type="code" id="e8SxBiZXDBKE" pycharm={} loss = lambda fx, y_hat: -np.mean(logsoftmax(fx) * y_hat) grad_loss = jit(grad(lambda params, x, y: loss(f(params, x), y))) # + [markdown] colab_type="text" id="t7GiiW-LDBKI" pycharm={} # Create a momentum predictor and initialize it. # + colab={} colab_type="code" id="8fpKKqPaDBKJ" pycharm={} predictor = nt.predict.gradient_descent(loss, g_dd, train['label'], learning_rate=learning_rate, momentum=mass) fx_train = f(params, train['image']) # + [markdown] colab_type="text" id="jW9ws4fMDBKL" pycharm={} # Train the network. # + colab={"height": 204} colab_type="code" executionInfo={"elapsed": 5582, "status": "ok", "timestamp": 1588652659738, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 420} id="_pfseUitDBKM" outputId="e6c9403d-1152-4e84-8479-211a3089ddcb" pycharm={} print ('Time\tLoss\tLinear Loss') X, Y = train['image'], train['label'] predictions = predictor(training_steps, fx_train) for i in training_steps: params = get_params(state) state = opt_apply(i, grad_loss(params, X, Y), state) if i % print_every == 0: exact_loss = loss(f(params, X), Y) linear_loss = loss(predictions[i], Y) print('{:.0f}\t{:.4f}\t{:.4f}'.format(i, exact_loss, linear_loss))
notebooks/function_space_linearization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Introduction to Python Programming # === # ## Basic Syntax print('Hello World!') message = 'Hello World!' print(message) # ## User I/O print('What is your name?') name = input() print('Hello,', name) name = input('What is your name? ') print('Hello,', name) # ## Arithmetic Operators 1 + 1 2 - 1 3 * 3 10 / 5 # real division, will give you floating point 10 // 5 # floor division, will give you integer 11 // 2 2 ** 3 # exponent 5 % 4 # modulus # ## Logical Operators 1 or 1 1 or 0 1 and 1 1 and 0 not True not False # ## Relational Operators 1 == 1 # equal to 0 == 1 # equal to 1 != 1 # not equal to 0 != 1 # not equal to 1 > 0 # greater than 0 > 1 # greater than 1 < 0 # less than 0 < 1 # less than 0 >= 0 # greater than or equal to 1 >= 0 # greater than or equal to -1 >= 0 # greater than or equal to 0 <= 0 # less than or equal to 1 <= 0 # less than or equal to -1 <= 0 # less than or equal to # ## Assignment Operators a = 1 # assignment a += 1 # add and a -= 1 # subtract and a *= 1 # multiply and a /= 1 # divide and a %= 1 # modulus and a **= 1 # exponent and a //= 1 # floor division and # ## Data Structures: List subjects = ['mathematics', 'physics', 'computer science', 'chemistry'] # create list # ### Membership Operators 'mathematics' in subjects # checks if 'mathematics' is in subjects list 'mathematics' not in subjects # checks if 'mathematics' is not in subjects list # ### Accessing List Elements print('subjects[0]:', subjects[0]) # access first element print('subjects[-1]', subjects[-1]) # access last element # ### Updating List Element subjects[0] = 'Mathematics' # ### Length of List len(subjects) # ### Slicing subjects[1:] # access all elements starting from second element only subjects[1:3] # access second and third elements only # ### Appending subjects.append('statistics') # add a new element # ## Data Structures: Dictionary scientist = {'Name': '<NAME>', 'Field': 'Mathematics'} print('Name:', scientist['Name']) # accessing dictionary value scientist['Field'] = 'Mathematics, Computer Science, Biology' # updating dictionary value len(scientist) # length of dictionary scientist.keys() # accessing keys scientist.values() # accessing values # ## Condition Statements if 'Mathematics' in subjects: print('Mathematics is in subjects!') if 'mathematics' in subjects: print('mathematics is in subjects!') else: print('mathematics is not in subjects!') if 'mathematics' in subjects: print('mathematics is in subjects!') elif 'Mathematics' in subjects: print('Mathematics is in subjects!') else: print('Neither') # ## Loop Statements # + value = 0 while True: print('Hello!', end=' ') if value == 3: break else: value += 1 # - # ## Functions def pretty_print(statement): print('*======*') print(statement) print('*======*') pretty_print('Hello, World!!') def add(x, y): return x + y c = add(1, 2) print(c)
notebooks/0-introduction-to-python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import matplotlib.pyplot as plt from matplotlib.patches import Arc, Rectangle from matplotlib.lines import Line2D import numpy as np # + def ls_circle(points): ''' Input: Nx2 points Output: cx, cy, r ''' xs = points[:,0] ys = points[:,1] us = xs - np.mean(xs) vs = ys - np.mean(ys) Suu = np.sum(us**2) Suv = np.sum(us*vs) Svv = np.sum(vs**2) Suuu = np.sum(us**3) Suvv = np.sum(us*vs*vs) Svvv = np.sum(vs**3) Svuu = np.sum(vs*us*us) A = np.array([ [Suu, Suv], [Suv, Svv] ]) b = np.array([1/2.*Suuu+1/2.*Suvv, 1/2.*Svvv+1/2.*Svuu]) cx, cy = np.linalg.solve(A, b) r = np.sqrt(cx*cx+cy*cy+(Suu+Svv)/len(xs)) cx += np.mean(xs) cy += np.mean(ys) return np.array([cx, cy]), r def project_point_to_circle(point, c, r): direction = point - c closest = c + (direction / np.linalg.norm(direction)) * r return closest def make_arc(c, r, start=0, end=2*np.pi): theta = np.linspace(start, end, 100) print(start, end) x1 = r * np.cos(theta) + c[0] x2 = r * np.sin(theta) + c[1] return np.stack([x1, x2], 1) def get_angle_plot(line1, line2, offset=1, color='k', origin=[0,0]): l1xy = line1.get_xydata() l2xy = line2.get_xydata() u = l1xy[1] - l1xy[0] v = l2xy[1] - l2xy[0] angle1 = abs(np.degrees(np.arctan2(u[1], u[0]))) angle2 = abs(np.degrees(np.arctan2(v[1], v[0]))) theta1 = min(angle1, angle2) theta2 = max(angle1, angle2) return Arc( origin, offset, offset, 0, theta1, theta2, color=color, linewidth=1.0, zorder=1.0) def arrow(ax, p1, p2, text='', color='k', head_width=0.02): line = Line2D([p1[0], p2[0]], [p1[1], p2[1]], color=color) ax.add_line(line) ax.arrow( p2[0], p2[1], (p2[0] - p1[0]) * 0.001, (p2[1] - p1[1]) * 0.001, head_width=head_width, color=color) if text: ax.text(p2[0], p2[1], text) # + colors = {'butter1': (252, 233, 79), 'butter2': (237, 212, 0), 'butter3': (196, 160, 0), 'orange1': (252, 175, 62), 'orange2': (245, 121, 0), 'orange3': (206, 92, 0), 'chocolate1': (233, 185, 110), 'chocolate2': (193, 125, 17), 'chocolate3': (143, 89, 2), 'chameleon1': (138, 226, 52), 'chameleon2': (115, 210, 22), 'chameleon3': ( 78, 154, 6), 'skyblue1': (114, 159, 207), 'skyblue2': ( 52, 101, 164), 'skyblue3': ( 32, 74, 135), 'plum1': (173, 127, 168), 'plum2': (117, 80, 123), 'plum3': ( 92, 53, 102), 'scarletred1': (239, 41, 41), 'scarletred2': (204, 0, 0), 'scarletred3': (164, 0, 0), 'aluminium1': (238, 238, 236), 'aluminium2': (211, 215, 207), 'aluminium3': (186, 189, 182), 'aluminium4': (136, 138, 133), 'aluminium5': ( 85, 87, 83), 'aluminium6': ( 46, 52, 54), 'indigo': (114, 33, 188), 'maroon': (103, 7, 72), 'turquoise': ( 64, 224, 208), 'green4': ( 0, 139, 0)} for k, (r, g, b) in colors.items(): colors[k] = (r / 255, g / 255, b / 255) # + points = [[0.20066186907158112, -0.032855948849496976], [0.3703214095102396, -0.1323611333831328], [0.48125568589863593, -0.29269832005189755], [0.5339926472234708, -0.48612209986502347], [0.5527041638633724, -0.6915413640867977]] points = [(0, 0)] + points points = np.stack(points, 0) points = points[:,::-1] target_index = 2 c, r = ls_circle(points) arc = make_arc(c, r, start=(np.pi + 1.0 * np.pi), end=2 * np.pi + 0.62 * np.pi) # Perturb after to emphasize projection. points[target_index] += [0, 0.1] target = points[target_index] closest = project_point_to_circle(target, c, r) plt.figure(figsize=(2.5, 2.5), dpi=300) ax = plt.gca() x = Line2D([0, 0], [0, 0.5], color='k') y = Line2D([0, 0.5], [0, 0], color='k') project_to_arc = Line2D([target[0], closest[0]], [target[1], closest[1]], linestyle='-', color=colors['scarletred1'], alpha=0.5) to_target = Line2D([0, closest[0]], [0, closest[1]], linestyle='--', color='k', zorder=6.0) up = Line2D([0, 0], [0, np.linalg.norm(closest)], linestyle='--', color='k', zorder=6.0, alpha=0.8) angle_arc = get_angle_plot(x, to_target, offset=0.5, color='k') ax.plot(arc[:,0], arc[:,1], alpha=0.3, linestyle='--', color=colors['skyblue1'], zorder=0.0) ax.add_line(to_target) ax.add_line(project_to_arc) ax.add_line(up) ax.add_patch(angle_arc) # Car W = 0.1 H = W * 1.9 car = Rectangle( (0 - W / 2, 0.0 - H * 0.8), W, H, fill=True, edgecolor='k', facecolor='w', zorder=5.0) ax.add_patch(car) ax.text(-0.09, 0.32, r'$s^*$', fontsize=12) #for i in range(1, len(points)): # ax.text(points[i,0] - 0.06, points[i,1], r'$p_%s$' % i, fontsize=8) ax.scatter(closest[0], closest[1], color=colors['skyblue1'], zorder=8, s=40, edgecolor='k') ax.scatter(points[1:,0], points[1:,1], color=colors['scarletred1'], zorder=8, s=40, edgecolor='k') #plt.plot(points[0,0], points[0,1], 'k.') BOUNDS = 0.7 ax.grid(False) ax.set_xticks([]) ax.set_yticks([]) ax.axis('off') ax.set_facecolor((1.0, 1.0, 1.0)) plt.tight_layout() plt.gca().axis('equal') plt.xlim(-0.79, BOUNDS / 4) plt.ylim(-0.02, 0.25) plt.savefig('controller.pdf', dpi=300) plt.show() # + points = [[0.20066186907158112, -0.032855948849496976], [0.3703214095102396, -0.1323611333831328], [0.48125568589863593, -0.29269832005189755], [0.5339926472234708, -0.48612209986502347], [0.5527041638633724, -0.6915413640867977]] points = [(0, 0)] + points points = np.stack(points, 0) points = points[:,::-1] target_index = 2 c, r = ls_circle(points) arc = make_arc(c, r, start=(np.pi + 1.0 * np.pi), end=2 * np.pi + 0.62 * np.pi) # Perturb after to emphasize projection. points[target_index] += [0, 0.1] target = points[target_index] closest = project_point_to_circle(target, c, r) plt.clf() plt.figure(figsize=(2.5, 2.5), dpi=300) ax = plt.gca() x = Line2D([0, 0], [0, 0.5], color='k') y = Line2D([0, 0.5], [0, 0], color='k') project_to_arc = Line2D([target[0], closest[0]], [target[1], closest[1]], linestyle='-', color=colors['scarletred1'], alpha=0.5) to_target = Line2D([0, closest[0]], [0, closest[1]], linestyle='--', color='k', zorder=6.0) up = Line2D([0, 0], [0, np.linalg.norm(closest)], linestyle='--', color='k', zorder=6.0, alpha=0.8) angle_arc = get_angle_plot(x, to_target, offset=0.5, color='k') # ax.plot(arc[:,0], arc[:,1], alpha=0.3, linestyle='--', color=colors['skyblue1'], zorder=0.0) # ax.add_line(to_target) # ax.add_line(project_to_arc) # ax.add_line(up) # ax.add_patch(angle_arc) # Car W = 0.1 H = W * 1.9 car = Rectangle( (0 - W / 2, 0.0 - H * 0.8), W, H, fill=True, edgecolor='k', facecolor='w', zorder=5.0) ax.add_patch(car) # ax.text(-0.09, 0.32, r'$s^*$', fontsize=12) #for i in range(1, len(points)): # ax.text(points[i,0] - 0.06, points[i,1], r'$p_%s$' % i, fontsize=8) # ax.scatter(closest[0], closest[1], color=colors['skyblue1'], zorder=8, s=40, edgecolor='k') ax.scatter(points[1:,0], points[1:,1], color=colors['scarletred1'], zorder=8, s=40, edgecolor='k') ax.plot(points[:,0], points[:,1], '--', color=colors['scarletred1'], alpha=0.5) for i in range(len(points) - 1): x = (points[i,0] + points[i+1,0])/2 y = (points[i,1] + points[i+1,1])/2 #ax.text(x, y, r'$s_%s$' % (i+1)) #plt.plot(points[0,0], points[0,1], 'k.') BOUNDS = 0.7 ax.grid(False) ax.set_xticks([]) ax.set_yticks([]) ax.axis('off') ax.set_facecolor((1.0, 1.0, 1.0)) plt.tight_layout() plt.gca().axis('equal') plt.xlim(-0.79, BOUNDS / 4) plt.ylim(-0.02, 0.25) plt.savefig('speed.pdf', dpi=300) plt.show() # -
misc/controller.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python36 # --- # Copyright (c) Microsoft Corporation. All rights reserved. # # Licensed under the MIT License. # ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/deployment/deploy-multi-model/multi-model-register-and-deploy.png) # # Deploy Multiple Models as Webservice # # This example shows how to deploy a Webservice with multiple models in step-by-step fashion: # # 1. Register Models # 2. Deploy Models as Webservice # ## Prerequisites # If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the [configuration](../../../configuration.ipynb) Notebook first if you haven't. # + # Check core SDK version number import azureml.core print("SDK version:", azureml.core.VERSION) # - # ## Initialize Workspace # # Initialize a workspace object from persisted configuration. # + tags=["create workspace"] from azureml.core import Workspace ws = Workspace.from_config() print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\n') # - # ## Register Models # In this example, we will be using and registering two models. # # You wil need to have a `first_model.pkl` file and `second_model.pkl` file in the current directory. The below call registers the files as Models with the names `my_first_model` and `my_second_model` in the workspace. # + tags=["register model from file"] from azureml.core.model import Model my_model_1 = Model.register(model_path="first_model.pkl", model_name="my_first_model", workspace=ws) my_model_2 = Model.register(model_path="second_model.pkl", model_name="my_second_model", workspace=ws) # - # ## Write the Entry Script # Write the script that will be used to predict on your models # ### Model.get_model_path() # # To get the paths of your models, use `Model.get_model_path(model_name, version=None, _workspace=None)` method. This method will find the path to a model using the name of the model registered under the workspace. # # In this example, we do not use the optional arguments `version` and `_workspace`. # # #### Using environment variable AZUREML_MODEL_DIR # # In other [examples](../deploy-to-cloud/score.py) with a single model deployment, we use the environment variable `AZUREML_MODEL_DIR` and model file name to get the model path. # # For single model deployments, this environment variable is the path to the model folder (`./azureml-models/$MODEL_NAME/$VERSION`). When we deploy multiple models, the environment variable is set to the folder containing all models (./azureml-models). # # If you're using multiple models and you know the versions of the models you deploy, you can use this method to get the model path: # # ```python # # Construct the model path using the registered model name, version, and model file name # model_1_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'my_first_model', '1', 'first_model.pkl') # ``` # + # %%writefile score.py import pickle import json import numpy as np from sklearn.externals import joblib from sklearn.linear_model import Ridge from azureml.core.model import Model def init(): global model_1, model_2 # note here "my_first_model" is the name of the model registered under the workspace # this call should return the path to the model.pkl file on the local disk. model_1_path = Model.get_model_path(model_name='my_first_model') model_2_path = Model.get_model_path(model_name='my_second_model') # deserialize the model files back into a sklearn model model_1 = joblib.load(model_1_path) model_2 = joblib.load(model_2_path) # note you can pass in multiple rows for scoring def run(raw_data): try: data = json.loads(raw_data)['data'] data = np.array(data) # Call predict() on each model result_1 = model_1.predict(data) result_2 = model_2.predict(data) # you can return any data type as long as it is JSON-serializable return {"prediction1": result_1.tolist(), "prediction2": result_2.tolist()} except Exception as e: result = str(e) return result # - # ## Create Environment # You can now create and/or use an Environment object when deploying a Webservice. The Environment can have been previously registered with your Workspace, or it will be registered with it as a part of the Webservice deployment. Please note that your environment must include azureml-defaults with verion >= 1.0.45 as a pip dependency, because it contains the functionality needed to host the model as a web service. # # More information can be found in our [using environments notebook](../training/using-environments/using-environments.ipynb). # + from azureml.core import Environment env = Environment.from_conda_specification(name='deploytocloudenv', file_path='myenv.yml') # This is optional at this point # env.register(workspace=ws) # - # ## Create Inference Configuration # # There is now support for a source directory, you can upload an entire folder from your local machine as dependencies for the Webservice. # Note: in that case, environments's entry_script and file_path are relative paths to the source_directory path; myenv.docker.base_dockerfile is a string containing extra docker steps or contents of the docker file. # # Sample code for using a source directory: # # ```python # from azureml.core.environment import Environment # from azureml.core.model import InferenceConfig # # myenv = Environment.from_conda_specification(name='myenv', file_path='env/myenv.yml') # # # explicitly set base_image to None when setting base_dockerfile # myenv.docker.base_image = None # # add extra docker commends to execute # myenv.docker.base_dockerfile = "FROM ubuntu\n RUN echo \"hello\"" # # inference_config = InferenceConfig(source_directory="C:/abc", # entry_script="x/y/score.py", # environment=myenv) # ``` # # - file_path: input parameter to Environment constructor. Manages conda and python package dependencies. # - env.docker.base_dockerfile: any extra steps you want to inject into docker file # - source_directory: holds source path as string, this entire folder gets added in image so its really easy to access any files within this folder or subfolder # - entry_script: contains logic specific to initializing your model and running predictions # + tags=["create image"] from azureml.core.model import InferenceConfig inference_config = InferenceConfig(entry_script="score.py", environment=env) # - # ### Deploy Model as Webservice on Azure Container Instance # # Note that the service creation can take few minutes. # + tags=["azuremlexception-remarks-sample"] from azureml.core.webservice import AciWebservice, Webservice from azureml.exceptions import WebserviceException deployment_config = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1) aci_service_name = 'aciservice-multimodel' try: # if you want to get existing service below is the command # since aci name needs to be unique in subscription deleting existing aci if any # we use aci_service_name to create azure aci service = Webservice(ws, name=aci_service_name) if service: service.delete() except WebserviceException as e: print() service = Model.deploy(ws, aci_service_name, [my_model_1, my_model_2], inference_config, deployment_config) service.wait_for_deployment(True) print(service.state) # - # #### Test web service # + import json test_sample = json.dumps({'data': [ [1,2,3,4,5,6,7,8,9,10], [10,9,8,7,6,5,4,3,2,1] ]}) test_sample_encoded = bytes(test_sample, encoding='utf8') prediction = service.run(input_data=test_sample_encoded) print(prediction) # - # #### Delete ACI to clean up # + tags=["deploy service", "aci"] service.delete()
how-to-use-azureml/deployment/deploy-multi-model/multi-model-register-and-deploy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Asset Pricing via Simulation # # #### <NAME> import numpy as np import matplotlib.pyplot as plt from numba import jit, prange from numpy.random import randn # We aim to compute the price dividend ratio # # $$ # V_t = \mathbb E_t # \left[ # M_{t+1} \frac{D_{t+1}}{D_t} # (1 + V_{t+1}) # \right] # $$ # # given the SDF and dividend process. # ### Model 1 # We consider the case of nonstationary dividends and consumption, where # # $$ \ln \frac{D_{t+1}}{D_t} = \mu_d + X_t + \sigma_d \eta_{d, t+1} $$ # # The shock sequence $\{ \eta_{d,t} \}$ is IID and standard normal. The state process $\{X_t\}$ obeys # # $$ X_{t+1} = \rho X_t + \sigma \xi_{t+1} $$ # # where $\{ \xi_t \}$ is IID and standard normal. # # Consumption is also nonstationary, obeying # # $$ \ln \frac{C_{t+1}}{C_t} = \mu_c + X_t + \sigma_c \eta_{c, t+1} $$ # # The sequence $\{ \eta_{c,t} \}$ is likewise IID and standard normal. # # We use the Lucas SDF # # $$ M_{t+1} = \beta \frac{u'(C_{t+1})}{u'(C_t)} $$ # # The utility function is # # $$ u(c) = \frac{c^{1-\gamma}}{1 - \gamma} $$ # # Hence # # $$ # M_{t+1} # = \beta \left( \frac{C_{t+1}}{C_t} \right)^{-\gamma} # = \beta \exp(-\gamma( \mu_c + X_t + \sigma_c \eta_{c, t+1} )) # $$ # ### Simulation Method # For $m$ in 1 to $M$, simulate the observation # # $$ \Lambda(x, N, m) := \sum_{n=1}^N \prod_{i=1}^n A_i^{(m)} $$ # # where $X_0 = x$ and $m$ indicates observation $m$. # Compute # # $$ \hat v(x, M, n) := \frac{1}{M} \sum_{m=1}^M \Lambda(x, N, m) $$ # # This is an estimate of $v(x)$ # @jit(nopython=True) def simulate_forward_sum_ave(x0, β=0.96, γ=2.0, ρ=0.9, σ=0.05, μ_d=0.01, σ_d=0.01, μ_c=0.05, σ_c=0.01, N=1000, M=20000): Λ_vals = np.empty(M) for m in range(M): # Generate the path correspoding to m np.random.seed(m) A_path = np.empty(N) x = x0 for t in range(N): η_c = randn() η_d = randn() t1 = -γ * μ_c + μ_d + (1 - γ) * x t2 = - γ * σ_c * η_c + σ_d * η_d A_path[t] = β * np.exp(t1 + t2) x = ρ * x + σ * randn() # Compute the forward value of that path A_prod = 1.0 Λ = 0.0 for n in range(N): A_prod *= A_path[n] Λ += A_prod # Record the result, adding it to the sum Λ_vals[m] = Λ return Λ_vals.mean() @jit(nopython=True, parallel=True) def compute_prices_across_states(x_vals): n = len(x_vals) v_vals = np.empty(n) for i in prange(n): v_vals[i] = simulate_forward_sum_ave(x_vals[i]) return v_vals x_vals = np.linspace(-0.3, 0.3, 20) v_vals = compute_prices_across_states(x_vals) fig, ax = plt.subplots() ax.plot(x_vals, v_vals) ax.set_xlabel("$x$") ax.set_ylabel("$v(x)$") plt.show() # ### Model 2 # # Following Song, Schorfheide and Yaron (2018, ECMA), let # # # \begin{align*} # \ln \frac{C_{t+1}}{ C_t} & # = \mu_c + z_t + \sigma_{c, t} \, \eta_{c, t+1}, # \\ # \ln \frac{D_{t+1}}{ D_t} & # = \mu_d + \alpha z_t + \delta \sigma_{c, t} # \, \eta_{c, t+1} + \sigma_{d, t} \, \eta_{d, t+1} # \end{align*} # # where # # \begin{align*} # z_{t+1} & = \rho \, z_t + (1 - \rho^2)^{1/2} \, # \sigma_{z, t} \, \upsilon_{t+1}, # \\ # \sigma_{i, t} & = \varphi_i \, \bar{\sigma} \exp(h_{i, t}), # \\ # h_{i, t+1} & = \rho_{h_i} h_i + \sigma_{h_i} \xi_{i, t+1}, # \quad i \in \{z, c, d\} # \end{align*} # # # As before, # # $$ # A_{t+1} # = M_{t+1} \frac{D_{t+1}}{D_t} # = \beta \left( \frac{C_{t+1}}{C_t} \right)^{-\gamma} # \frac{D_{t+1}}{D_t} # $$ # # This becomes # # $$ # A_{t+1} # = \beta \exp( # -\gamma \mu_c + \mu_d # + (\alpha - \gamma) z_t # - \gamma \sigma_{c, t} \eta_{c, t+1} # + \sigma_{d, t} \eta_{d, t+1} # ) # $$
day3/applications/asset_pricing/asset_pricing_simulation_with_ssy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # ### ML # 1. Datos. Detectar el tipo de problema: # - Clasificación: (Logistic regression, SVC, KNN,Decission Tree C,Rrandom Forest Classificator, ...) # - Regresión: (Regresión lineal, non-linear polinómica, SVR, Decission Tree R,Rrandom Forest Classificator R,...) # # 2. EDA: Data wrangling: # - !!!!!!! Crear el modelo baseline¡¡¡¡¡¡¡ -> si este modelo sin haber hcho nada da mejores resultados es importante hacerlo # - Webscraping, API, Json,CSV,BBDD (SQL/NoSQL), páginas Web # - Feature Engineering (extraccion de caracteristicas): # - Outliers, regex, nan, distribuciones, estandarización (rango los datos con la media en 0 y la desviacion tipica 1 ), normalizar, encoding (dummies),duplicados, eliminar columnas no reolevantes. # - Visualizar datos ( Correlaciones) # - Variables agregadas: No hemos hecho ejercicios pero se puede hacer para buscar patrones o añadir nuevos patrones. Se agregan columnas agregadas: por ejemplo media de otras columnas numericas # - Ponderaciones de columnas ( darle mas peso a una columna, mutilipoicando por 2 por 3 por 7 para darle mas valor a esa columna) # - Eliminar columnas colinieales (una multiplicada por 2) o una columna alturas en metros y otra en cm # 3. Maquetar los datos para que train y test tengan sentido. # # 4. Partir los datos en conjunto de entrenamiento y test. Elegimos un % de test y semilla. # # 5. Cross validation -> con el objetivo de saber como es el comportamientos de mi modelo para mis datos. Grid_search ya lo hace asi que es mejor hacerlo con el Grid_search. Depende del numero de filas y datos que tengas, si es muy grande no me lo entrenes simplemente dime que ta les la relacion que tienen # # 6. Grid search pipeline ->pipeline podria estar en la parte de EDA porque ya retocamos los datos # # 7. Nos quedamos con el mejor modelo y probamos el score con el conjunto. -> grid search no nos da con el conjunto de test solo con el de train asi que necesitamos los dos. Nos quedamos con el que mejor generalice, si ninguno es valido hay que volver arriba y elegir otro modelo. # # 8. Guardamos para futuras pruebas y para estar relajados. # # 9. Tras probar muchas veces con muchas semillas, tenemos un conjunto de test y de train fijados, hacemos un .fit de todo .fit(X,y) y guardamos el modelo entrenado con todos los daots, con otro nombre. # # # """ para que pare un conjunto de validacion si encuentra algun trozso muy bajo if score_val>np.mean(val_score)//2: print("caca de parte de validacion")""" """if i % == 1000: pickle.domp(model,open(path+"model_saved.sab","wb")) """
week10_ML_competition_pca_kmeans/day1_gridsearch_pipelines/gridsearch/esquema.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Copy items-Copy1.csv file into pandas dataframe and # # replace all fields that are empty with NaN # + import pandas as pd import numpy as np # read dataset into pandas dataframe df_items = pd.read_csv('items-Copy1.csv') # replace field that's entirely space (or empty) with NaN df_items = df_items.replace(np.nan, '', regex=True) data_text = df_items[['description']] data_text['index'] = data_text.index documents = data_text # - len(documents) documents[:5] # # Need to perform the following steps: # 1. Tokenization -- split text into sequences and sentences into words. # Lowercase all words and remove punctuation. # 2. Remove words with fewer than 3 characters # 3. Remove stopwords # 4. Lemmatize words -- words in third person are changed to first person # and verbs in past and future tenses are changed into present. # 5. Stem words -- words are reduced to root form. # # ## gensim and nltk libraries will be used to # ## implement this. import gensim from gensim.utils import simple_preprocess from gensim.parsing.preprocessing import STOPWORDS from nltk.stem import WordNetLemmatizer, SnowballStemmer from nltk.stem.porter import * import numpy as np np.random.seed(2018) import nltk nltk.download('wordnet') # # Lemmatize example print(WordNetLemmatizer().lemmatize('went', pos='v')) # # Stemmer Example stemmer = SnowballStemmer('english') original_words = ['caresses', 'flies', 'dies', 'mules', 'denied', 'died', 'agreed', 'owned', 'humbled', 'sized', 'meeting', 'stating', 'siezing', 'itemization', 'sensational', 'traditional', 'reference', 'colonizer', 'plotted'] singles = [stemmer.stem(plural) for plural in original_words] pd.DataFrame(data={'original word': original_words, 'stemmed': singles}) # # Function in order to lemmatize and stem # # preprocess on the dataset def lemmatize_stemming(text): return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos='v')) def preprocess(text): result = [] for token in gensim.utils.simple_preprocess(text): if token not in gensim.parsing.preprocessing.STOPWORDS and len(token) > 3: result.append(lemmatize_stemming(token)) return result # # Initialize stemmer stemmer = SnowballStemmer('english') # # Display before and after of preprocessing on # # example document # + doc_sample = documents[documents['index'] == 3].values[0][0] print('original document: ') words = [] for word in doc_sample.split(' '): words.append(word) print(words) print('\n\n tokenized and lemmatized document: ') print(preprocess(doc_sample)) # - # # Preprocess all description field text # # Save result in 'processed_docs' processed_docs = documents['description'].map(preprocess) processed_docs[:10] # # Create Bag of Words with Data set # ## Start with creating dictionary with number of times a word appears in training set then filter out tokens that appear in: # # 1. less than 15 documents OR # 2. more than 0.5 documents # # ### * Keep only first 100000 most frequent tokens # + dictionary = gensim.corpora.Dictionary(processed_docs) dictionary.filter_extremes(no_below=15, no_above=0.5, keep_n=100000) # - # # Utilize doc2bow from gensim bow_corpus = [dictionary.doc2bow(doc) for doc in processed_docs] bow_corpus[3000] # # Preview BoW from sample preprocessed document bow_doc_3000 = bow_corpus[3000] for i in range(len(bow_doc_3000)): print("Word {} (\"{}\") appears {} time.".format(bow_doc_3000[i][0], dictionary[bow_doc_3000[i][0]], bow_doc_3000[i][1])) # # Create tf-idf model using models.tfidfmodel on # # bow_corpus from gensim import corpora, models tfidf = models.TfidfModel(bow_corpus) corpus_tfidf = tfidf[bow_corpus] # from pprint import pprint # for doc in corpus_tfidf: # pprint(doc) # # Train LDA model using gensim.models.LdaMulticore # # Save to 'lda_model' lda_model = gensim.models.LdaMulticore( bow_corpus, num_topics=10, id2word=dictionary, passes=2, workers=2) # # For each topic, explore the words occuring in that # # topic and the relative weight for idx, topic in lda_model.print_topics(-1): print('Topic: {} \nWords: {}'.format(idx, topic)) # # Attempt distinguishing topics using words in each topic and their corresponding weights Run LDA using tf-idf lda_model_tfidf = gensim.models.LdaMulticore( corpus_tfidf, num_topics=10, id2word=dictionary, passes=2, workers=4) for idx, topic in lda_model_tfidf.print_topics(-1): print('Topic: {} Word: {}'.format(idx, topic)) # # Evaluate performance by classifying sample # # document using LDA BoW model processed_docs[3000] for index, score in sorted(lda_model[bow_corpus[3000]], key=lambda tup: -1*tup[1]): print("\nScore: {}\t \nTopic: {}".format( score, lda_model.print_topic(index, 10))) # # Evaluate performance by classifying sample # # document using LDA tf-idf model for index, score in sorted(lda_model_tfidf[bow_corpus[3000]], key=lambda tup: -1*tup[1]): print("\nScore: {}\t \nTopic: {}".format( score, lda_model_tfidf.print_topic(index, 10))) # # Test model on unseen document unseen_document = 'Black Facebook employees complain racism, discrimination have gotten worse' bow_vector = dictionary.doc2bow(preprocess(unseen_document)) for index, score in sorted(lda_model[bow_vector], key=lambda tup: -1*tup[1]): print("Score: {}\t Topic: {}".format( score, lda_model.print_topic(index, 5))) # # Let's explore the words in the description field and form a word cloud to get a visual representation of the most common words # # Let's first remove punctuation and lowercase the words # Load the regular expression library import re # Remove punctuation documents['description_processed'] = documents['description'].map( lambda x: re.sub('[,\.!?]', '', x)) # Convert the titles to lowercase documents['description_processed'] = documents['description_processed'].map( lambda x: x.lower()) # Print out the first rows of papers documents['description_processed'].head() # + from wordcloud import WordCloud # Join the different processed descriptions together long_string = ','.join(list(documents['description_processed'].values)) # Create WordCloud object wordcloud = WordCloud(background_color='white', max_words=5000, contour_width=3, contour_color='steelblue') # Generate a word cloud wordcloud.generate(long_string) # Visualize the word cloude wordcloud.to_image() # - # # Plot ten most frequent words based on documents # + # Load the library with the CountVectorizer method from sklearn.feature_extraction.text import CountVectorizer import numpy as np import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') # %matplotlib inline # Helper function def plot_10_most_common_words(count_data, count_vectorizer): import matplotlib.pyplot as plt words = count_vectorizer.get_feature_names() total_counts = np.zeros(len(words)) for t in count_data: total_counts += t.toarray()[0] count_dict = (zip(words, total_counts)) count_dict = sorted(count_dict, key=lambda x: x[1], reverse=True)[0:10] words = [w[0] for w in count_dict] counts = [w[1] for w in count_dict] x_pos = np.arange(len(words)) plt.figure(2, figsize=(15, 15/1.6180)) plt.subplot(title='10 most common words') sns.set_context("notebook", font_scale=1.25, rc={"lines.linewidth": 2.5}) sns.barplot(x_pos, counts, palette='husl') plt.xticks(x_pos, words, rotation=90) plt.xlabel('words') plt.ylabel('counts') plt.show() # Initialise the count vectorizer with the English stop words count_vectorizer = CountVectorizer(stop_words='english') # Fit and transform the processed titles count_data = count_vectorizer.fit_transform(documents['description_processed']) # Visualise the 10 most common words plot_10_most_common_words(count_data, count_vectorizer) # - # # LDA model training and results visualization # + from sklearn.decomposition import LatentDirichletAllocation as LDA import warnings warnings.simplefilter("ignore", DeprecationWarning) # Load LDA model from sk-learn # Helper method def print_topics(model, count_vectorizer, n_top_words): words = count_vectorizer.get_feature_names() for topic_idx, topic in enumerate(model.components_): print("\nTopic #{}:".format(topic_idx)) print(" ".join([words[i] for i in topic.argsort()[:-n_top_words - 1:-1]])) # Tweak the two parameters below number_topics = 5 number_words = 10 # Create and fit the LDA model lda = LDA(n_components=number_topics, n_jobs=-1) lda.fit(count_data) # Print the topics found by the LDA model print("Topics found via LDA:") print_topics(lda, count_vectorizer, number_words) # - # # ----------------------------------------------------- # # Attempt another method import nltk nltk.download('stopwords') # # Import the packages required # + import warnings import logging import re import numpy as np import pandas as pd from pprint import pprint # Gensim import gensim import gensim.corpora as corpora from gensim.utils import simple_preprocess from gensim.models import CoherenceModel # spacy for lemmatization import spacy # Plotting tools import pyLDAvis import pyLDAvis.gensim import matplotlib.pyplot as plt # %matplotlib inline # Enable logging for gensim logging.basicConfig( format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR) warnings.filterwarnings("ignore", category=DeprecationWarning) # - # # Prepare the Stopwords # NLTK Stop words from nltk.corpus import stopwords stop_words = stopwords.words('english') stop_words.extend(['from', 'subject', 're', 'edu', 'use']) # # Import dataset # + import pandas as pd import numpy as np # read dataset into pandas dataframe df_items = pd.read_csv('items-Copy1.csv') # replace field that's entirely space (or empty) with NaN df_items = df_items.replace(np.nan, '', regex=True) data_text = df_items[['description']] data_text['index'] = data_text.index documents = data_text # - len(documents) documents[:5] # # Function in order to lemmatize and stem preprocess on the dataset from gensim.parsing.preprocessing import STOPWORDS from nltk.stem import WordNetLemmatizer, SnowballStemmer from nltk.stem.porter import * import numpy as np np.random.seed(2018) def lemmatize_stemming(text): # return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos='v')) return simple_preprocess(text) def preprocess(text): result = [] for token in gensim.utils.simple_preprocess(text): if token not in gensim.parsing.preprocessing.STOPWORDS and len(token) > 3: result.append(lemmatize_stemming(token)[0]) return result # # Initialize stemmer stemmer = SnowballStemmer('english') # # Display before and after of preprocessing on example document # + doc_sample = documents[documents['index'] == 300].values[0][0] print('original document: ') words = [] for word in doc_sample.split(' '): words.append(word) print(words) print('\n\n tokenized and lemmatized document: ') print(preprocess(doc_sample)) # - # # Preprocess all description field text save result in 'processed_docs' # + # processed_docs = documents['description'].map(preprocess) # processed_docs[:10] def sent_to_words(sentences): for sent in sentences: sent = re.sub('\S*@\S*\s?', '', sent) # remove emails sent = re.sub('\s+', ' ', sent) # remove newline chars sent = re.sub("\'", "", sent) # remove single quotes sent = gensim.utils.simple_preprocess(str(sent), deacc=True) yield(sent) # # Convert to list # data = df.content.values.tolist() # data_words = list(sent_to_words(data)) # print(data_words[:1]) list(documents['description']) processed_docs = [preprocess(document) for document in list(documents['description'])] processed_docs[:10] # - # # Create Method for Generating N-Grams import nltk nltk.download('punkt') # + from nltk.util import ngrams # Function to generate n-grams from sentences. def extract_ngrams(data, num): n_grams = ngrams(nltk.word_tokenize(data), num) return [ ' '.join(grams) for grams in n_grams] # See trigram example print(processed_docs[0]) print("Trigram: ", extract_ngrams(' '.join(processed_docs[0]), 3)) # - # Remove instances of https in data words as this is not significant for idx, processed_doc in enumerate(processed_docs): if 'https' in processed_doc: processed_docs[idx].remove('https') if 'httpszllwww' in processed_doc: processed_docs[idx].remove('httpszllwww') if 'facebook' in processed_doc: processed_docs[idx].remove('facebook') if 'twitter' in processed_doc: processed_docs[idx].remove('twitter') if 'youtub' in processed_doc: processed_docs[idx].remove('youtub') if 'follow' in processed_doc: processed_docs[idx].remove('follow') if 'instagram' in processed_doc: processed_docs[idx].remove('instagram') if 'channel' in processed_doc: processed_docs[idx].remove('channel') if 'subscrib' in processed_doc: processed_docs[idx].remove('subscrib') if 'vraocdtg' in processed_doc: processed_docs[idx].remove('vraocdtg') if 'subscribe' in processed_doc: processed_docs[idx].remove('subscribe') if 'youtube' in processed_doc: processed_docs[idx].remove('youtube') # Double check for idx, processed_doc in enumerate(processed_docs): if 'https' in processed_doc or 'httpszllwww' in processed_doc or 'facebook' in processed_doc or 'twitter' in processed_doc or 'youtub' in processed_doc or 'follow' in processed_doc or 'instagram' in processed_doc or 'channel' in processed_doc or 'subscrib' in processed_doc or 'vraocdtg' in processed_doc or 'subscribe' in processed_doc or 'youtube' in processed_doc: print('here') data_words_quintgrams = [extract_ngrams(' '.join(processed_doc), 5) for processed_doc in processed_docs] print(processed_docs[100]) print(data_words_quintgrams[100]) # + #data_words_quintgrams # - # # Create the dictionary and Corpus for topic modeling # ### Creates unique id for each word in the document. For example: (0, 1) implies word id 0 occurs once in the first document and word id 1 occurs once and so on. import gensim.corpora as corpora # Create Dictionary id2word = corpora.Dictionary(data_words_quintgrams) # Create Corpus texts = data_words_quintgrams # Term Document Frequency corpus = [id2word.doc2bow(text) for text in texts] # View print(corpus[:1]) # # Can also see what word a given id corresponds to, pass the id as a key to the dictionary. id2word[0] # Human readable format of corpus for document 1 [[(id2word[id], freq) for id, freq in cp] for cp in corpus[:1]] # # Build the topic model # Build LDA model lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus, id2word=id2word, num_topics=10, random_state=100, update_every=1, chunksize=100, passes=10, alpha='auto', per_word_topics=True) # # View the topics in the LDA model # ### LDA model above is built with 26 topics. Each topic is a combination of keywords and each keyword contributes a certain weight to the topic. You can see the keywords for each topic and the importance (weight) of each keyword # ### ex. Topic 0 represented as: 0.091*"protest" + 0.076*"hate" + 0.074*"organ" + 0.070*"ralli" + ''0.054*"futur" + 0.053*"charlott" + 0.051*"march" + 0.046*"violenc" + ''0.045*"celebr" + 0.035*"host" # ### This means that the top 10 keywords that contribute to this topic are: 'protest', 'hate', 'organ' and so on so the weight of 'protest is 0.091. The weights reflect how important a keyword is to that topic. One can attempt to summarize the topics from these keywords from pprint import pprint # Print the Keyword in the 10 topics pprint(lda_model.print_topics()) doc_lda = lda_model[corpus] # # Compute model Perplexity and Coherence score # ## These metrics provide a convenient measure to judge how good a given topic model is. # 1. Perplexity -- A measure of how well a probability distribution or probability model predicts a sample. Low perplexity indicates that a probability distribution is good at predicting a sample. # 2. Coherence -- Measures a single topic by measuring the degree of semantic similiarity between high scoring words in the topic. This helps distinguish between topics that are semantically interpretable topics and topics that are artifacts of statistical inference. A set of statements or facts are said to be coherent if they support eachother. # + from gensim.models import CoherenceModel # Compute Perplexity print('\nPerplexity: ', lda_model.log_perplexity(corpus)) # Compute Coherence Score coherence_model_lda = CoherenceModel( model=lda_model, texts=data_words_quintgrams, dictionary=id2word, coherence='c_v') coherence_lda = coherence_model_lda.get_coherence() print('\nCoherence Score: ', coherence_lda) # - # # Create preprocessed_doc to index dict in order doc2idx = {idx: doc for idx, doc in enumerate(documents['description'])} proc2idx = {idx: doc for idx, doc in enumerate(processed_docs)} ngram2idx = {idx: doc for idx, doc in enumerate(data_words_quintgrams)} # # Find the dominant topic for each description # ### In order to find out what topic a document is about we can find the topic number that has the highest percentage contribution in that document # + def format_topics_sentences(ldamodel=None, corpus=corpus, texts=documents['description']): # Init output sent_topics_df = pd.DataFrame() # Get main topic in each document for i, row_list in enumerate(ldamodel[corpus]): row = row_list[0] if ldamodel.per_word_topics else row_list # print(row) row = sorted(row, key=lambda x: (x[1]), reverse=True) # Get the Dominant topic, Perc Contribution and Keywords for each document for j, (topic_num, prop_topic) in enumerate(row): if j == 0: # => dominant topic wp = ldamodel.show_topic(topic_num) topic_keywords = ", ".join([word for word, prop in wp]) sent_topics_df = sent_topics_df.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True) else: break sent_topics_df.columns = ['Dominant_Topic', 'Perc_Contribution', 'Topic_Keywords'] # Add original text to the end of the output contents = pd.Series(texts) sent_topics_df = pd.concat([sent_topics_df, contents], axis=1) return(sent_topics_df) df_topic_sents_keywords = format_topics_sentences(ldamodel=lda_model, corpus=corpus, texts=documents['description']) # Format df_dominant_topic = df_topic_sents_keywords.reset_index() df_dominant_topic.columns = [ 'Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text'] documents_idx = [("document #: {}".format(idx)) for idx, row in documents.iterrows()] df_dominant_topic['Text_ID'] = documents_idx df_dominant_topic['Dominant_Topic_ID'] = ["topic #: {}".format( row['Dominant_Topic']) for idx, row in df_dominant_topic.iterrows()] # Save to csv df_dominant_topic.to_csv('dominant_topic.csv') # Show df_dominant_topic # - df_dominant_topic.loc[[2857]] # # Find the most representative document for each topic # ### To help understand the topic, we can find documents a given topic has contributed to the most and infer the topic by reading the document. # + # Group top 5 sentences under each topic sent_topics_sorteddf_mallet = pd.DataFrame() sent_topics_outdf_grpd = df_topic_sents_keywords.groupby('Dominant_Topic') for i, grp in sent_topics_outdf_grpd: sent_topics_sorteddf_mallet = pd.concat([sent_topics_sorteddf_mallet, grp.sort_values(['Perc_Contribution'], ascending=[0]).head(1)], axis=0) # Reset Index sent_topics_sorteddf_mallet.reset_index(drop=True, inplace=True) # function to return key for any value def get_key(val, my_dict): for key, value in my_dict.items(): if val == value: return key # Format sent_topics_sorteddf_mallet.columns = [ 'Topic_Num', "Topic_Perc_Contrib", "Keywords", "Text"] sent_topics_sorteddf_mallet['Text_idx'] = [get_key( row['Text'], doc2idx) for idx, row in sent_topics_sorteddf_mallet.iterrows()] sent_topics_sorteddf_mallet['Text_Processed'] = [ proc2idx[row['Text_idx']] for idx, row in sent_topics_sorteddf_mallet.iterrows()] sent_topics_sorteddf_mallet['Text_NGrams'] = [ ngram2idx[row['Text_idx']] for idx, row in sent_topics_sorteddf_mallet.iterrows()] # Save to csv sent_topics_sorteddf_mallet.to_csv('sent_topics_sorteddf_mallet.csv') # Show sent_topics_sorteddf_mallet # + text_directory_df = pd.DataFrame(list(doc2idx.items())) text_directory_df.columns = ['ID', 'Text_Processed'] text_directory_df['Text_Raw'] = documents['description'] text_directory_df.to_csv('text_directory.csv') text_directory_df # + topic_directory_df = pd.DataFrame(list(lda_model.print_topics())) topic_directory_df.columns = ['ID', 'Topic'] topic_directory_df.to_csv('topic_directory_df.csv') topic_directory_df # + import networkx as nx from networkx.algorithms import bipartite sent_topics_sorteddf_mallet['topic_text'] = sent_topics_sorteddf_mallet[[ 'Topic_Num', 'Text_idx']].apply(tuple, axis=1) # print(list(sent_topics_sorteddf_mallet['topic_text'])) # print('\n') # print(list(sent_topics_sorteddf_mallet['Topic_Num'])) # print('\n') # print(list(sent_topics_sorteddf_mallet['Text_idx'])) # print('\n') # print(list(zip(list(sent_topics_sorteddf_mallet['Topic_Num']), list( # sent_topics_sorteddf_mallet['Text_idx'])))) def plot_bipartite_topic2text(data_frame, subdomian_col, domain_col): B = nx.Graph() B.add_nodes_from(data_frame[subdomian_col], bipartite=0) B.add_nodes_from(data_frame[domain_col], bipartite=1) B.add_weighted_edges_from([(row[domain_col], row[subdomian_col], 1) for idx, row in data_frame.iterrows()], weight='weight') # print(B.edges(data=True)) plt.figure(3, figsize=(25, 25)) pos = {node: [0, i] for i, node in enumerate(data_frame[domain_col])} pos.update({node: [1, i] for i, node in enumerate(data_frame[subdomian_col])}) nx.draw(B, pos, with_labels=False) for p in pos: # raise text positions pos[p][1] += 0.25 nx.draw_networkx_labels(B, pos, node_size=65, font_size=17) plt.savefig('bipartite_graph_doc2topic.png') plt.show() #plot_bipartite_topic2text(sent_topics_sorteddf_mallet, 'Topic_Num', 'Text_idx') plot_bipartite_topic2text(df_dominant_topic.head(20), 'Dominant_Topic_ID', 'Text_ID') # + import seaborn as sns df_dominant = df_dominant_topic[['Topic_Perc_Contrib', 'Dominant_Topic', 'Document_No']].copy() print(len(df_dominant[(df_dominant['Dominant_Topic'] != 5.0)])) print(df_dominant[(df_dominant['Dominant_Topic'] != 5.0)]) df_dominant = df_dominant[df_dominant.columns].astype(float) df_dominant = df_dominant.round(1) plt.figure(figsize=(20,15)) print(df_dominant) result = df_dominant.pivot(index='Document_No', columns='Dominant_Topic', values='Topic_Perc_Contrib') sns.heatmap(result, fmt="g", cmap='viridis') plt.savefig('Dominant_Topics.png') plt.show() # - # # Let's analyze the LDA model results through visualization, we will use pyLDAvis to interact with the results in order to: # 1. Understand the individual results better -- Manually select each topic to view it's top most frequent and/or "relevant terms using different values of λ. This can be of aid when assigning human interpretable name or "meaning" to each topic. # 2. Understand the relationships between the topics -- Explore Intertopic Distance Plot that can assist in learning how topics relate to each other to reveal potential higher-level structure between groups of topics. # # <ul> # <li>Each buble on the left-hand side represents a topic, the larger the topic, the more prevalent that topic is.</li> # # <li>A good topic model should have fairly large, non-overlapping bubbles scattered throughout the chart instead of being clustered in one quadrant.</li> # # <li>A model with too many topics will typically have many overlaps and small sized bubbles clustered in one region.</li> # # <li>If you hover over one of the bubbles, the words and bars on the right side will update, these words are the salient keywords that form the selected topic.</li> # # <li>The alpha parameter represents document-topic density -- a higher alpha means that documents are made up of more topics and result in more specific topic distributions per document.</li> # </ul> # + # %%time from pyLDAvis import sklearn as sklearn_lda import pickle import pyLDAvis # Visualize the topics pyLDAvis.enable_notebook() vis = pyLDAvis.gensim.prepare(lda_model, corpus, id2word) pyLDAvis.save_html(vis, 'lda.html')
latent_dirichlet_allocation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Effects of GoBike Users Behaviour on Its Demand # ## by <NAME> # + [markdown] slideshow={"slide_type": "slide"} # ## Investigation Overview # # In this analysis, I investigated users' features and relate them to bike trips, which can be used to predict the demand for bikes. The main focus was on the members' age, gender, and user type, but the duration of trips, start time weekday of trips, and distance of trips were explored variables. # # ## Dataset Overview # # The dataset provides trip records for the month of February, 2019 only. After some wangling efforts, there were 175,194 bike trips and 22 features in the dataset. The main features of interest are Trip Duration (seconds), Start Station Name, End Station Name, # User Type (Subscriber or Customer – “Subscriber” = Member or “Customer” = Casual), Member GenderBike, Share For All Trip, Age Group, Trip Duration (minutes), Start Time (hours), Start Time Day of the WeekEnd Time (hours), End Time Day of the Week, and Distance (miles). # + slideshow={"slide_type": "skip"} # import all packages and set plots to be embedded inline import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sb # %matplotlib inline # suppress warnings from final output import warnings warnings.simplefilter("ignore") # + slideshow={"slide_type": "skip"} # load in the dataset into a pandas dataframe bike = pd.read_csv('fordgobike_tripdata_clean.csv') # + [markdown] slideshow={"slide_type": "slide"} # ## Distribution of Bike Trip Duration (seconds) # # Duration in seconds has a very large range of values. Plotted on a log-scale, the distribution takes on a unimodal shape and has a peak between 300 and 900. It is obvious that bike trips are usually within an hour mark (3600 seconds). # + slideshow={"slide_type": "subslide"} # univariate plot of bike trip duration in second log_binsize = 0.04 bins = 10 ** np.arange(1.7, np.log10(bike['duration_sec'].max())+log_binsize, log_binsize) plt.figure(figsize=[8, 5]) plt.hist(data = bike, x = 'duration_sec', bins = bins) plt.xscale('log') plt.xticks([100, 300, 900, 3000, 9000], ['100', '300', '900', '3000', '9000']) plt.xlabel('Duration in Seconds') plt.ylabel('Bike Trip') plt.title ('Bike Trip Duration in Seconds') plt.show() # + [markdown] slideshow={"slide_type": "slide"} # ## Distribution of Bike Trip Duration (minutes) # # The distribution has a long tailed end, where bike trips on average are between 4 and 10 minute range, with 75% of trips being under 14 minutes. This suggests that users are taking the bikes on short distances. # + slideshow={"slide_type": "subslide"} # plotting with a different bin size and tick labels bin_edges = np.arange(0, 45, 1) ticks = [0, 5, 10, 15, 20, 25, 30, 35, 40, 45] labels = ['{}'.format(val) for val in ticks] plt.figure(figsize=[8, 5]) plt.hist(data = bike, x = 'duration_min', bins = bin_edges, rwidth = 0.6); plt.title("Bike Trip Duaration in Minutes") plt.xlabel('Duration in Minutes') plt.ylabel('Bike Trip') plt.xticks(ticks, labels) plt.show() # + [markdown] slideshow={"slide_type": "slide"} # ## Distribution of Member Age # # 75% of users are male, 23% are female and 2% are other gender. # + slideshow={"slide_type": "subslide"} # import necessary library from matplotlib.ticker import EngFormatter plt.figure(figsize = [8,6]) # create the plot base_color = sb.color_palette()[0] sb.countplot(data = bike, x = 'member_gender', color = base_color) plt.title("Bike Trips vs. Member's Gender", fontsize=14, y=1.01) plt.ylabel('Bike Trip') plt.xlabel(" Member Gender") # add annotations n_points = bike.shape[0] cat_counts = bike['member_gender'].value_counts() locs, labels = plt.xticks() # get the current tick locations and labels # loop through each pair of locations and labels for loc, label in zip(locs, labels): ## get the text property for the label to get the correct count count = cat_counts[label.get_text()] pct_string = '{:0.0f}%'.format(100*count/n_points) # print the annotation just below the top of the bar plt.text(loc, count-8, pct_string, ha = 'center', color = 'black') plt.gca().yaxis.set_major_formatter(EngFormatter()) # + [markdown] slideshow={"slide_type": "slide"} # ## Distribution of Trip Distance (miles) # # Trips are a mile on average, with users in the 3rd quartile ride about 1.38 miles. This reveals that on average, users go on a short trips, mostly under 2 miles. # + slideshow={"slide_type": "skip"} # trip distance summary bike.distance_miles.describe() # + slideshow={"slide_type": "subslide"} # now, let's look at the trip distance in miles bins = np.arange(0, 5+0.5, 0.5) plt.hist(data = bike, x = 'distance_miles', bins = bins); plt.title('Bike Trip Distance in Miles') plt.xlabel('Distance (miles)') plt.ylabel('Bike Trip') plt.show() # + [markdown] slideshow={"slide_type": "slide"} # ## Most Popular Start Stations # # Market St is the most popular start station followed by San Francisco Caltrain Station 2. # + slideshow={"slide_type": "skip"} # create top 5 start station count start_station_count = bike['start_station_name'].value_counts() start_station_count = start_station_count[:5,] start_station_count # + slideshow={"slide_type": "subslide"} start_station_count.plot('barh', figsize=(15,8), color = 'cornflowerblue', title='Most Popular Start Station').set_xlabel("Bike Trip"); # + [markdown] slideshow={"slide_type": "slide"} # ## Most Popular End Stations # # San Francisco Caltrain Station 2 is observed to overtake Market St as the most popular end station. # + slideshow={"slide_type": "skip"} # create top 5 end station end_station_count = bike['end_station_name'].value_counts() end_station_count = end_station_count[:5,] end_station_count # + slideshow={"slide_type": "subslide"} end_station_count.plot('barh', figsize=(15,8), color = 'cornflowerblue', title='Most Popular End Station').set_xlabel("Bike Trip"); # + [markdown] slideshow={"slide_type": "slide"} # ## Relationship in Users and Their Behviours I # # #### Weekday vs. User Type # The trip number of subscribers is much higher than customers. # # #### Weekday vs. Member Gender # Male complete 2 times or more trips as female. # # #### Weeday vs. Bike Share for All Trip # Bike share for all trip by users are very low compare to users that do not use bike share for all trip # # #### Weekday vs. Age Group # Users between the ages (25-40) called Millennials use bike share the most in all weekdays # + slideshow={"slide_type": "subslide"} # plot the relationship between categorical variables fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2, figsize = [15,10]) plt.suptitle('Relationships in Users and their Behavior I', fontsize=14, y=1.025) # subplot 1: weekday vs user type plt.subplot(2, 2, 1) sb.countplot(data = bike, x = 'start_time_weekday', hue = 'user_type', palette = 'Blues') plt.ylabel('Bike Trip') # subplot 2: weekday vs. member gender plt.subplot(2, 2, 2) sb.countplot(data = bike, x = 'start_time_weekday', hue = 'member_gender', palette = 'Blues') plt.ylabel('Bike Trip') # subplot 3: weekday vs. , bike share for all trip plt.subplot(2, 2, 3) sb.countplot(data = bike, x = 'start_time_weekday', hue = 'bike_share_for_all_trip', palette = 'Blues') plt.ylabel('Bike Trip') # subplot 3: weekday vs.age group plt.subplot(2, 2, 4) sb.countplot(data = bike, x = 'start_time_weekday', hue = 'age_group', palette = 'Greens_r') plt.ylabel('Bike Trip') plt.tight_layout() # + [markdown] slideshow={"slide_type": "slide"} # ## Relationship in Users and Their Behviours II # # #### Member Gender vs. Age Group # There are more males than females and other genders. Both male and female gender have more users in the age group called Millennials # # #### Age Group vs. Member Gender # Males dominate more in all the age groups # # #### User Type vs. Member Gender # Males are the highest gender of users both as customers and as subscribers # # #### Member Gender vs. User Type # Subcribers in male are more than in female, followed by other gender # + slideshow={"slide_type": "subslide"} # plot the relationship between categorical variables fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2,figsize = [14,8]) base_color = sb.color_palette()[0] plt.suptitle('Relationship in users and their Behavior II', fontsize=14, y=1.025) ax1 = plt.subplot(2,2,1) ax1=sb.countplot(data = bike, x = 'member_gender', hue = 'age_group', palette = 'Blues') ax1= plt.xlabel("Gender") ax1= plt.ylabel('Bike Trip') ax2 = plt.subplot(2,2,2) ax2=sb.countplot(data = bike, x = 'age_group', hue = 'member_gender', palette = 'Greens') ax2= plt.xlabel("Generaton") ax2= plt.ylabel('Bike Trip') ax3 = plt.subplot(2,2,3) ax3=sb.countplot(data = bike, x = 'user_type', hue = 'member_gender', palette = 'Blues') ax3= plt.xlabel('User Type') ax3= plt.ylabel('Bike Trip') ax4 = plt.subplot(2,2,4) ax4=sb.countplot(data = bike, x = 'member_gender' , hue = 'user_type', palette = 'Greens') ax4= plt.xlabel("Gender") ax4= plt.ylabel('Bike Trip') plt.tight_layout() # + [markdown] slideshow={"slide_type": "slide"} # ## Duration and Distance by User Type # # From the faceted heat maps below, It is observed that there is an effect of user type on the relationship between usage duration in seconds and distance in miles. # # For user type, customers tend to go on a long trip, that is, they use bike share for long durations and distances as compared to subscribers, whose trips are in short duration and distance. The more focused aspect in subscribers also revealed that subscribers are looking for a consistent experience, which is daily commute as compared to customers which use bike share for different purposes. # + slideshow={"slide_type": "skip"} # define heatmap function def hist2dgrid(x, y, **kwargs): """ Quick hack for creating heat maps with seaborn's PairGrid.""" palette = kwargs.pop('color') bin_size_x = 250 bin_size_y = 0.4 bins_x = np.arange(0, 5000+bin_size_x, bin_size_x) bins_y = np.arange(0, 7+bin_size_y, bin_size_y) plt.hist2d(x, y, bins = [bins_x, bins_y], cmap = palette, cmin = 0.5) # + slideshow={"slide_type": "subslide"} # create faceted heat maps on user_type g = sb.FacetGrid(data = bike, col = 'user_type', col_wrap = 3, size = 4) g.map(hist2dgrid, 'duration_sec', 'distance_miles', color = 'inferno_r') g.set_xlabels('Duration (sec)') g.set_ylabels('Distance (miles)') plt.show() # + [markdown] slideshow={"slide_type": "slide"} # ## Duration by User Type and Age # # Here, age does not have any effect in predicting demand using bike duration of trip. Instead, there is a significant difference in duration of trip between subscriber and customer. Subscriber generally uses bike for short distance trip and short duration trip unlike Customer that uses bike for long distance trip and duration trip. # + slideshow={"slide_type": "skip"} # select dataset for member aged between 25 to 40 df_flag = ((bike['member_age'] >= 25) & (bike['member_age'] <= 40)) df_plot = bike.loc[df_flag,:] # + slideshow={"slide_type": "subslide"} # setting the figure size fig = plt.figure(figsize = [8,6]) # point plot of age against duration with user_type ax = sb.pointplot(data = df_plot, x = 'member_age', y = 'duration_sec', hue = 'user_type', palette = 'Blues', linestyles = '', dodge = 0.4) plt.title('Duration of Trip across User Type and Age') plt.ylabel('Duration (sec)') plt.xlabel('Age') ax.set_yticklabels([],minor = True) plt.show(); # + slideshow={"slide_type": "subslide"} # set the figure size fig = plt.figure(figsize = [8,6]) # point plot of age against duration with user_type ax = sb.pointplot(data = df_plot, x = 'member_age', y = 'distance_miles', hue = 'user_type', palette = 'Blues', linestyles = '', dodge = 0.4) plt.title('Distance of Trip across User Type and Age') plt.ylabel('Distance (miles)') plt.xlabel('Age') ax.set_yticklabels([],minor = True) plt.show(); # + [markdown] slideshow={"slide_type": "slide"} # ## Duration by Weekday and User Type # # As observed in the previous section, Customers trip duration is relatively high compare to subcribers trip duration. But more interestingly, there is a steep jump or increase in both customers duration and subcribers duration on weekends (Saturday and Sunday). # + slideshow={"slide_type": "subslide"} # plot the interactions between categorical variables and numeric variable plt.figure(figsize = [16, 4]) plt.suptitle("Weekday vs. Duration and User Type", fontsize = 14) ax1=plt.subplot(1,2, 1) ax1=sb.pointplot(data = bike, x= 'start_time_weekday', y = 'duration_sec', hue = 'user_type', palette = 'Blues', linestyles = '') ax1=plt.ylabel('Duration (seconds)') ax1=plt.title('Weekday vs. Duration of Trip') ax1=plt.yticks([400, 600, 800, 1000, 1200, 1400, 1600, 1800]) # 30 minutes ax1=plt.legend(title='User Type') ax2=plt.subplot(1, 2, 2) ax2=sb.pointplot(data = bike, x= 'start_time_weekday', y = 'duration_sec', hue = 'user_type', palette = 'Blues', linestyles = '') ax2=plt.ylabel('Duration (seconds)') ax2=plt.title('Weekday vs. Duration of Trip') ax2=plt.yticks([700,1.3e3, 1.9e3, 2.5e3, 3.1e3, 3.6e3, 4e3]) ax2=plt.legend(title='User Type') plt.tight_layout() # + [markdown] slideshow={"slide_type": "slide"} # ## Duration by Weekday and Age Group # # There are more short time trips on working days (Mon-Fri). The duration of trip of age range Gen Z, Millennials and Gen X is more stable than the older range. These have higher variation. # + slideshow={"slide_type": "subslide"} # plot the interactions between categorical variables and numeric variable plt.figure(figsize = [16, 4]) plt.suptitle("Weekday vs. Duration and Member's age", fontsize = 14) ax1=plt.subplot(1,2, 1) ax1=sb.pointplot(data = bike, x= 'start_time_weekday', y = 'duration_sec', hue = 'age_group', palette = 'Blues', linestyles = '') ax1=plt.ylabel('Duration (seconds)') ax1=plt.title('Weekday vs. Duration of Trip') ax1=plt.yticks([400, 600, 800, 1000, 1200, 1400, 1600, 1800]) # 30 minutes ax1=plt.legend(title='Generation') ax2=plt.subplot(1, 2, 2) ax2=sb.pointplot(data = bike, x= 'start_time_weekday', y = 'duration_sec', hue = 'age_group', palette = 'Blues', linestyles = '') ax2=plt.ylabel('Duration (seconds)') ax2=plt.title('Weekday vs. Duration of Trip') ax2=plt.yticks([700,1.3e3, 1.9e3, 2.5e3, 3.1e3, 3.6e3, 4e3]) ax2=plt.legend(title='Generation') plt.tight_layout()
slide_deck_james.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/krakowiakpawel9/neural-network-course/blob/master/02_basics/07_nn_implementation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="NmSZRhSCUOB-" colab_type="text" # * @author: <EMAIL> # * @site: e-smartdata.org # + [markdown] id="hsUWcfin6ZlT" colab_type="text" # ### Implementacja prostej sieci neuronowej # # ##### Kroki: # 1. Zainicjowanie parametrów sieci # 2. Propagacja wprzód # 3. Obliczenie błędu predykcji # 4. Propagacja wsteczna (uczenie modelu) # 5. Test działania modelu # # + id="XkDZb9Nt04rg" colab_type="code" colab={} import numpy as np import pandas as pd np.random.seed(42) X = np.array([1.4, 0.7]) y_true = np.array([1.8]) # + id="zc5rkn_m1IjB" colab_type="code" colab={} def initialize_parameters(n_x, n_h, n_y): W1 = np.random.rand(n_h, n_x) W2 = np.random.rand(n_h, n_y) return W1, W2 # + id="G43XdqDa1exJ" colab_type="code" colab={} def forward_propagation(X, W1, W2): H1 = np.dot(X, W1) y_pred = np.dot(H1, W2) return H1, y_pred # + id="XbaAd2jt1v8t" colab_type="code" colab={} def calculate_error(y_pred, y_true): return y_pred - y_true # + id="U3QFBEn_14Lj" colab_type="code" colab={} def predict(X, W1, W2): _, y_pred = forward_propagation(X, W1, W2) return y_pred[0] # + id="rClKyNy02CnX" colab_type="code" colab={} def backpropagation(X, W1, W2, learning_rate, iters=1000, precision=0.0000001): H1, y_pred = forward_propagation(X, W1, W2) train_loss = [] for i in range(iters): error = calculate_error(y_pred, y_true) W2 = W2 - learning_rate * error * H1.T W1 = W1 - learning_rate * error * np.dot(X.T, W2.T) y_pred = predict(X, W1, W2) print(f'Iter #{i}: y_pred {y_pred}: loss: {abs(calculate_error(y_pred, y_true[0]))}') train_loss.append(abs(calculate_error(y_pred, y_true[0]))) if abs(error) < precision: break return W1, W2, train_loss # + id="phVhArP520f3" colab_type="code" colab={} def build_model(): W1, W2 = initialize_parameters(2, 2, 1) W1, W2, train_loss = backpropagation(X, W1, W2, 0.01) model = {'W1': W1, 'W2': W2, 'train_loss': train_loss} return model # + id="Sg7M4xDv3JFT" colab_type="code" outputId="19f5d852-3b8d-4c06-b7a6-b4dc35c10e54" colab={"base_uri": "https://localhost:8080/", "height": 1000} model = build_model() # + id="mR5WBYto4NV2" colab_type="code" outputId="993c5bc2-eadb-410a-8a8f-c01ed2d4a13a" colab={"base_uri": "https://localhost:8080/", "height": 204} loss = pd.DataFrame({'train_loss': model['train_loss']}) loss = loss.reset_index().rename(columns={'index': 'iter'}) loss['iter'] += 1 loss.head() # + id="-UV2RRs_5H7v" colab_type="code" outputId="c0912c24-8673-44e4-860a-e26712bf5fe8" colab={"base_uri": "https://localhost:8080/", "height": 542} import plotly.graph_objects as go fig = go.Figure(data=go.Scatter(x=loss['iter'], y=loss['train_loss'], mode='markers+lines')) fig.show() # + id="2s66fHa43LDk" colab_type="code" colab={} predict(X, model['W1'], model['W2'])
02_basics/07_nn_implementation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import matplotlib.pyplot as plt import matplotlib.colors as colors import numpy as np fname = "density_projection.dat" d = np.fromfile(fname,dtype=np.float32) d = np.reshape(d,(512,512)) fname = "energy_projection.dat" energy = np.fromfile(fname,dtype=np.float32) energy = np.reshape(energy,(512,512)) d_log = np.log10(d) f = plt.figure(figsize=(7,7)) plt.imshow(d_log) e_log = np.log10(energy) f = plt.figure(figsize=(7,7)) plt.imshow(e_log) # + d_min = d_log.min() d_max = d_log.max() v = (d_log - d_min)/(d_max - d_min) s = 1.0 - v # + e_min = e_log.min() e_max = e_log.max() h = 0.8 - 0.2*(e_log - e_min)/(e_max - e_min) # + hsv_image = np.zeros((512,512,3)) hsv_image[:,:,0] = h hsv_image[:,:,1] = s hsv_image[:,:,2] = v rgb_image = colors.hsv_to_rgb(hsv_image) # - f = plt.figure(figsize=(7,7)) plt.imshow(rgb_image) plt.imsave("test.png",rgb_image)
simulation-visualization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Task -1 # + mutiline_text = """It is a truth universally acknowledged, that a single man in possession of a good fortune, must be in want of a wife. However little known the feelings or views of such a man may be on his first entering a neighbourhood, this truth is so well fixed in the minds of the surrounding families, that he is considered the rightful property of some one or other of their daughters. "My dear Mr. Bennet," said his lady to him one day, "have you heard that Netherfield Park is let at last?" Mr. Bennet replied that he had not. "But it is," returned she; "for Mrs. Long has just been here, and she told me all about it." Mr. Bennet made no answer. "Do you not want to know who has taken it?" cried his wife impatiently. "You want to tell me, and I have no objection to hearing it." This was invitation enough. "Why, my dear, you must know, Mrs. Long says that Netherfield is taken by a young man of large fortune from the north of England; that he came down on Monday in a chaise and four to see the place, and was so much delighted with it, that he agreed with Mr. Morris immediately; that he is to take possession before Michaelmas, and some of his servants are to be in the house by the end of next week." "What is his name?" "Bingley." "Is he married or single?" "Oh! Single, my dear, to be sure! A single man of large fortune; four or five thousand a year. What a fine thing for our girls!" "How so? How can it affect them?" "My dear <NAME>," replied his wife, "how can you be so tiresome! You must know that I am thinking of his marrying one of them." "Is that his design in settling here?" "Design! Nonsense, how can you talk so! But it is very likely that he may fall in love with one of them, and therefore you must visit him as soon as he comes." "I see no occasion for that. You and the girls may go, or you may send them by themselves, which perhaps will be still better, for as you are as handsome as any of them, <NAME> may like you the best of the party." "My dear, you flatter me. I certainly have had my share of beauty, but I do not pretend to be anything extraordinary now. When a woman has five grown-up daughters, she ought to give over thinking of her own beauty." "In such cases, a woman has not often much beauty to think of." "But, my dear, you must indeed go and see <NAME> when he comes into the neighbourhood." "It is more than I engage for, I assure you." "But consider your daughters. Only think what an establishment it would be for one of them. Sir William and <NAME> are determined to go, merely on that account, for in general, you know, they visit no newcomers. Indeed you must go, for it will be impossible for us to visit him if you do not." "You are over-scrupulous, surely. I dare say <NAME> will be very glad to see you; and I will send a few lines by you to assure him of my hearty consent to his marrying whichever he chooses of the girls; though I must throw in a good word for my little Lizzy." "I desire you will do no such thing. Lizzy is not a bit better than the others; and I am sure she is not half so handsome as Jane, nor half so good-humoured as Lydia. But you are always giving her the preference." "They have none of them much to recommend them," replied he; "they are all silly and ignorant like other girls; but Lizzy has something more of quickness than her sisters." "Mr. Bennet, how can you abuse your own children in such a way? You take delight in vexing me. You have no compassion for my poor nerves." "You mistake me, my dear. I have a high respect for your nerves. They are my old friends. I have heard you mention them with consideration these last twenty years at least." "Ah, you do not know what I suffer." "But I hope you will get over it, and live to see many young men of four thousand a year come into the neighbourhood." "It will be no use to us, if twenty such should come, since you will not visit them." "Depend upon it, my dear, that when there are twenty, I will visit them all." Mr. Bennet was so odd a mixture of quick parts, sarcastic humour, reserve, and caprice, that the experience of three-and-twenty years had been insufficient to make his wife understand his character. Her mind was less difficult to develop. She was a woman of mean understanding, little information, and uncertain temper. When she was discontented, she fancied herself nervous. The business of her life was to get her daughters married; its solace was visiting and news. """ # - type(mutiline_text) len(mutiline_text) # ### Task -2 mutiline_text = mutiline_text.replace('\n', "") mutiline_text # remove special chars, punctuation etc. cleaned_multiline_text = "" for char in mutiline_text: if char == " ": cleaned_multiline_text += char elif char.isalnum(): # using the isalnum() method of strings. cleaned_multiline_text += char else: cleaned_multiline_text += " " cleaned_multiline_text # + # Another way of doing this (Faster and less code) import re cleaned_multiline_text_2 = re.sub(r'[?|$|.|!|"|,|;|:]',r'',mutiline_text) cleaned_multiline_text_2 # - # The above uses Regular Expression. Which will be introduced at a later time # ### Task - 3 list_of_words = cleaned_multiline_text.split() list_of_words len(list_of_words) # ### Task - 4 # Use set to get unique words unique_words_as_list = list(set(list_of_words)) len(unique_words_as_list) # Use dict to do the same unique_words_as_dict = dict.fromkeys(list_of_words) len(list(unique_words_as_dict.keys())) # ### Task -5 for word in list_of_words: if unique_words_as_dict[word] is None: unique_words_as_dict[word] = 1 else: unique_words_as_dict[word] += 1 unique_words_as_dict
Lesson01/Activity01/.ipynb_checkpoints/Student Activity 01 - Solutions-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.11 64-bit (''cv'': conda)' # name: python3 # --- # - scikit-image https://scikit-image.org # - OpenCV https://opencv.org # - Python Image Library (PIL) http://www.pythonware.com/products/pil/ (deplicated) # - Pillow or (PIL) https://pillow.readthedocs.io/en/stable/ # + import matplotlib.pyplot as plt # %matplotlib inline plt.gray(); from matplotlib.pyplot import imshow import skimage from skimage.io import imread, imsave from skimage.color import rgb2gray from skimage.transform import resize from skimage.filters import threshold_otsu, threshold_local from skimage.exposure import histogram, adjust_gamma from skimage.morphology import square from skimage import measure, color, morphology import cv2 import numpy as np from ipywidgets import interact, interactive, fixed, RadioButtons import ipywidgets as widgets from IPython.display import display # - # # 画像ダウンロード # + import requests def download(url, filename): with open(filename, 'wb') as saveFile: saveFile.write(requests.get(url).content) # TheDefiniteArticle - Own work CC BY-SA 4.0 # https://commons.wikimedia.org/wiki/File:Woy_Woy_Channel_-_Vignetted.jpg download( 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/lake.jpg', # 'https://upload.wikimedia.org/wikipedia/commons/0/03/Woy_Woy_Channel_-_Vignetted.jpg', 'lake.jpg') # Pixabay License # https://pixabay.com/photos/girl-portrait-looking-young-female-919048/ download( 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/girl.jpg', # 'https://cdn.pixabay.com/photo/2015/09/02/13/24/girl-919048_1280.jpg', 'girl.jpg') # Baka888 - Own work CC BY-SA 4.0 # https://commons.wikimedia.org/wiki/File:Worldflags19.png download( 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/flag.png', # 'https://upload.wikimedia.org/wikipedia/commons/b/bb/Worldflags19.png', 'flag.png') # yellowcloud CC BY 2.0 # https://www.flickr.com/photos/yellowcloud/6375883291 download( 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/IR_cat.jpg', # 'https://live.staticflickr.com/6226/6375883291_972be61f52_w_d.jpg', 'IR_cat.jpg') # <NAME>~commonswiki CC BY-SA 3.0 # https://commons.wikimedia.org/w/index.php?curid=555450 download( 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/salt_and_pepper.png', # 'https://upload.wikimedia.org/wikipedia/commons/3/32/Dead_tree_salt_and_pepper.png', 'salt_and_pepper.png') # Pixabay License # https://pixabay.com/ja/illustrations/ハニカム-蜂の巣-自然-2891372/ download( 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/honeycomb.jpg', # 'https://cdn.pixabay.com/photo/2017/10/26/15/05/honeycomb-2891372_1280.jpg', 'honeycomb.jpg') # <NAME> - Own work CC BY-SA 3.0 # https://commons.wikimedia.org/wiki/File:Text.JPG download( 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/text.jpg', # 'https://upload.wikimedia.org/wikipedia/commons/thumb/3/3e/Text.JPG/640px-Text.JPG', 'text.jpg') # 由紀奈 / CC0 # https://commons.wikimedia.org/wiki/File:Start.wav download( 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/start.wav', # 'https://upload.wikimedia.org/wikipedia/commons/9/93/Start.wav', 'start.wav') # Shirt58 CC0 # https://en.wikipedia.org/wiki/File:Shirt58_handwriting.png download( 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/writing.png', # 'https://upload.wikimedia.org/wikipedia/en/thumb/5/5d/Shirt58_handwriting.png/800px-Shirt58_handwriting.png', 'writing.png') # Tkgd2007 - Own work CC BY-SA 3.0 # https://commons.wikimedia.org/wiki/File:Human_evolution.svg download( 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/evol.png', # 'https://upload.wikimedia.org/wikipedia/commons/thumb/6/69/Human_evolution.svg/600px-Human_evolution.svg.png', 'evol.png') # Designguy84 - Photo CC BY-SA 3.0 # https://commons.wikimedia.org/wiki/File:Early_Imperial_Australian_Coins.jpg download( 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/coins.jpg', # 'https://upload.wikimedia.org/wikipedia/commons/thumb/b/bd/Early_Imperial_Australian_Coins.jpg/320px-Early_Imperial_Australian_Coins.jpg', 'coins.jpg') # <NAME> CC BY-SA 4.0 # https://commons.wikimedia.org/wiki/File:Taxi_Noord_business_card,_Oude_Pekela_(2020)_02.jpg download( 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/card.jpg', # 'https://upload.wikimedia.org/wikipedia/commons/thumb/2/29/Taxi_Noord_business_card%2C_Oude_Pekela_%282020%29_02.jpg/640px-Taxi_Noord_business_card%2C_Oude_Pekela_%282020%29_02.jpg', 'card.jpg') # Diliff - Own work CC BY-SA 2.5 # http://en.wikipedia.org/wiki/File:Colosseum_in_Rome,_Italy_-_April_2007.jpg download( 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/Colosseum.jpg', # 'https://upload.wikimedia.org/wikipedia/commons/thumb/5/53/Colosseum_in_Rome%2C_Italy_-_April_2007.jpg/640px-Colosseum_in_Rome%2C_Italy_-_April_2007.jpg', 'Colosseum.jpg') download( 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/Colosseum_original.jpg', # 'https://upload.wikimedia.org/wikipedia/commons/5/53/Colosseum_in_Rome%2C_Italy_-_April_2007.jpg', 'Colosseum_original.jpg') # # Peter Haas CC BY-SA 3.0 # # https://commons.wikimedia.org/wiki/File:Notre-Dame_de_Paris_2013-07-24.jpg download( 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/Nortre-Dame-1.jpg', # 'https://upload.wikimedia.org/wikipedia/commons/thumb/a/af/Notre-Dame_de_Paris_2013-07-24.jpg/355px-Notre-Dame_de_Paris_2013-07-24.jpg', 'Nortre-Dame-1.jpg') # # <NAME> CC BY-SA 4.0 # # https://commons.wikimedia.org/wiki/File:Paris,_Notre_Dame_--_2014_--_1445.jpg download( 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/Nortre-Dame-2.jpg', # 'https://upload.wikimedia.org/wikipedia/commons/thumb/1/11/Paris%2C_Notre_Dame_--_2014_--_1445.jpg/301px-Paris%2C_Notre_Dame_--_2014_--_1445.jpg', 'Nortre-Dame-2.jpg') # CC0 1.0 # https://openclipart.org/detail/247372/woman-walking download( 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/sil.png', # 'https://openclipart.org/image/400px/svg_to_png/247372/WomanWalking.png', 'sil.png') # NASA Public domain # File:Moon right-view (Clementine dataset).png download( 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/moon.png', # 'https://upload.wikimedia.org/wikipedia/commons/thumb/4/41/Moon_right-view_%28Clementine_dataset%29.png/800px-Moon_right-view_%28Clementine_dataset%29.png', 'moon.png') # OpenCV BSD download( 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/aloeL.jpg', # 'https://raw.githubusercontent.com/opencv/opencv/master/samples/data/aloeL.jpg', 'aloeL.jpg') download( 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/aloeR.jpg', # 'https://raw.githubusercontent.com/opencv/opencv/master/samples/data/aloeR.jpg', 'aloeR.jpg') # + # <NAME> (Photographer) - <NAME>, WebHDR CC BY-SA 2.0 # https://commons.wikimedia.org/wiki/File:HDRI_Sample_Scene_Window_-_01.jpg # .. # https://commons.wikimedia.org/wiki/File:HDRI_Sample_Scene_Window_-_12.jpg urls = [ 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/hdr_00.jpg', 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/hdr_01.jpg', 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/hdr_02.jpg', 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/hdr_03.jpg', 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/hdr_04.jpg', 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/hdr_05.jpg', 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/hdr_06.jpg', 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/hdr_07.jpg', 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/hdr_08.jpg', 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/hdr_09.jpg', 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/hdr_10.jpg', 'https://raw.githubusercontent.com/tttamaki/lecture_code/main/ImageProcessing/images/hdr_11.jpg', ] # urls = \ # ['https://upload.wikimedia.org/wikipedia/commons/5/51/HDRI_Sample_Scene_Window_-_01.jpg', # 'https://upload.wikimedia.org/wikipedia/commons/c/c1/HDRI_Sample_Scene_Window_-_02.jpg', # 'https://upload.wikimedia.org/wikipedia/commons/5/5b/HDRI_Sample_Scene_Window_-_03.jpg', # 'https://upload.wikimedia.org/wikipedia/commons/6/6e/HDRI_Sample_Scene_Window_-_04.jpg', # 'https://upload.wikimedia.org/wikipedia/commons/d/d1/HDRI_Sample_Scene_Window_-_05.jpg', # 'https://upload.wikimedia.org/wikipedia/commons/5/51/HDRI_Sample_Scene_Window_-_06.jpg', # 'https://upload.wikimedia.org/wikipedia/commons/b/b6/HDRI_Sample_Scene_Window_-_07.jpg', # 'https://upload.wikimedia.org/wikipedia/commons/f/f8/HDRI_Sample_Scene_Window_-_08.jpg', # 'https://upload.wikimedia.org/wikipedia/commons/8/83/HDRI_Sample_Scene_Window_-_09.jpg', # 'https://upload.wikimedia.org/wikipedia/commons/c/c0/HDRI_Sample_Scene_Window_-_10.jpg', # 'https://upload.wikimedia.org/wikipedia/commons/6/6e/HDRI_Sample_Scene_Window_-_11.jpg', # 'https://upload.wikimedia.org/wikipedia/commons/8/8b/HDRI_Sample_Scene_Window_-_12.jpg'] for i, url in enumerate(urls): filename = 'hdr_{:02d}.jpg'.format(i) print(url, filename) download(url, filename) # - # # 配列へのアクセス:行,列 # # - 配列へのアクセスの順番 # - 行,列 # - 画素へのアクセスの順番 # - 縦,横 # - y, x # - ループを回すなら外側がy,内側がx # - 配列2つ目のインデックスのほうが連続したメモリ領域 # # 例: # + im = np.zeros((2, 3)) # 幅3✕高さ2の画像(配列) im[0, 1] = 255 # (x,y)=(1,0)の画素へアクセス print(im) imshow(im) plt.axis('off') plt.show() # - # C言語なら # <pre> # #include <stdio.h> # int main(void){ # # float im[2][3]; # im[0][1] = 255; # for(int y = 0; y < 2; y++) { # for(int x = 0; x < 3; x++) # printf("%.2f, ", im[y][x]); # printf("\n"); # } # return 0; # } # </pre> # # カラー画像とグレースケール画像 # + im = imread('girl.jpg') imshow(im) print('shape: ', im.shape) rows, columns, channels = im.shape print("row: ", rows) print("col: ", columns) print("chn: ", channels) # + im = rgb2gray(imread('girl.jpg')) imshow(im) print('shape: ', im.shape) rows, columns = im.shape print("row: ", rows) print("col: ", columns) print("chn: ", channels) # - # # RGBチャンネル # + # im = imread('lake.jpg') # im = imread('flag.png') im = imread('girl.jpg') imshow(im) plt.title("original RGB image") plt.show() r_channel = im[:, :, 0] g_channel = im[:, :, 1] b_channel = im[:, :, 2] fig = plt.figure(figsize=(15,3)) for i, c in zip(range(3), 'RGB'): ax = fig.add_subplot(1, 3, i + 1) imshow(im[:, :, i], vmin=0, vmax=255) plt.colorbar() plt.title('{} channel'.format(c)) plt.show(); # - # # 3次元配列でのカラー画像の表現 # # ## Packed format # - 画素へのアクセスの順番 # - 縦,横,チャンネル # - y, x, color # - 1画素の色情報のメモリ領域が連続している # - 多くの場合はこれ # + im = np.zeros((2, 4, 3), dtype=np.uint8) # 縦2,横4,3チャンネル分 im[0, 1, 0] = 255 print(im[:, :, 0]) # 赤:0番目のチャンネル print(im[:, :, 1]) # 緑:1番目のチャンネル print(im[:, :, 2]) # 青:2番目のチャンネル imshow(im) plt.axis('off') plt.show() # - # ## Planar format # - 画素へのアクセスの順番 # - チャンネル,縦,横 # - channel, y, x # - 「2x3の画像」のメモリ領域が連続している # - 特殊用途 # - 一部の動画フォーマット # - 深層学習では「バッチ」 im = np.zeros((3, 2, 4), dtype=np.uint8) # 画像を3枚,縦2,横4 im[0, 0, 1] = 255 print(im) imshow(im) # imshowはpacked formatを仮定しているので,このplanar formatを表示するとおかしなことになる plt.axis('off') plt.show() # # RGBとBGR # # ## RGB # - 多くの教科書の説明もこれ # - 多くの画像処理ライブラリはこれ # - pythonならskimage, matplotlib # + im = np.zeros((2, 4, 3), dtype=np.uint8) # 縦2,横4,3チャンネル分 im[0, 1, 0] = 255 print(im[:, :, 0]) # 赤:0番目のチャンネル print(im[:, :, 1]) # 緑:1番目のチャンネル print(im[:, :, 2]) # 青:2番目のチャンネル imshow(im) plt.axis('off') plt.show() # - # ## BGR # - こちらもよく使われる # - opencv(python, C/C++) # - WindowsのCOLORREF(16進で0x00bbggrr) # - ハードウェア # # - データの解釈の違いだけ # + im = np.zeros((2, 4, 3), dtype=np.uint8) im[0, 1, 0] = 255 print(im[:, :, 0]) # 青:0番目のチャンネル print(im[:, :, 1]) # 緑:1番目のチャンネル print(im[:, :, 2]) # 赤:2番目のチャンネル # - imshow(im) # このmatplotlibのimshowはRGBを仮定 plt.axis('off') plt.show() # opencvのimshowはBGRを仮定 cv2.imshow('opencv imshow window', cv2.resize(im, (400, 200), interpolation=cv2.INTER_NEAREST)) cv2.waitKey(3000) # 3000ms(3秒)待つ cv2.destroyWindow('opencv imshow window') # 消えないかもしれないけど無視 # ## BGRとRGBの変換 im_BGR = cv2.imread('girl.jpg') # OpenCV im_RGB = imread('girl.jpg') # scikit-image # + imshow(im_RGB) # matplotlibのimshowはRGBを仮定 plt.title('show RGB image as RGB image') plt.axis('off') plt.show() imshow(im_BGR) # matplotlibのimshowはRGBを仮定 plt.title('show BGR image as RGB image') plt.axis('off') plt.show() # - im_BGR_to_RGB = cv2.cvtColor(im_BGR, cv2.COLOR_BGR2RGB) imshow(im_BGR_to_RGB) plt.title('show RGB-converted BGR image as RGB image') plt.axis('off') plt.show() im_BGR_to_RGB = im_BGR[:, :, ::-1] imshow(im_BGR_to_RGB) plt.title('show RGB-converted BGR image as RGB image') plt.axis('off') plt.show() # + im_BGR_to_RGB = np.zeros_like(im_BGR) im_BGR_to_RGB[:, :, 0] = im_BGR[:, :, 2] im_BGR_to_RGB[:, :, 1] = im_BGR[:, :, 1] im_BGR_to_RGB[:, :, 2] = im_BGR[:, :, 0] imshow(im_BGR_to_RGB) plt.title('show RGB-converted BGR image as RGB image') plt.axis('off') plt.show() # - imsave('girl_rgb.jpg', im_RGB) # scikit-image: RGB im = imread('girl_rgb.jpg') # scikit-image: RGB imshow(im) # matplotlib: RGB plt.title('RGB image') plt.axis('off') plt.show() cv2.imwrite('girl_rgb.jpg', im_RGB) # Opencv: BGR im = imread('girl_rgb.jpg') # scikit-image: RGB imshow(im) # matplotlib: RGB plt.title('RGB image') plt.axis('off') plt.show() cv2.imwrite('girl_rgb.jpg', im_RGB) # OpenCV: BGR im = cv2.imread('girl_rgb.jpg') # OpenCV: BGR imshow(im) # matplotlib: RGB plt.title('RGB image') plt.axis('off') plt.show() # # グレースケールへの変換 # + # im = imread('lake.jpg') # im = imread('flag.png')[:,:,:3] # RGBAの場合はRGBだけ取り出す im = imread('girl.jpg') imshow(im) plt.title("original RGB image") plt.show() # 組み込みのrgb2gray関数を使う gray = 0.2125 R + 0.7154 G + 0.0721 B im_gray1 = rgb2gray(im) imshow(im_gray1, vmin=0, vmax=1) # 型はfloat,範囲は[0,1]になる plt.colorbar() plt.title("rgb2gray min {0} max {1}".format(im_gray1.min(), im_gray1.max() )) plt.show() # RGBの平均をグレースケール画像とする.最初にfloatに変換し(範囲は[0,255]になる),表示のときにuint8に変換する im_gray2 = (im[:,:,0].astype(float) + im[:,:,1].astype(float) + im[:,:,2].astype(float)) / 3 imshow(im_gray2, vmin=0, vmax=255) plt.colorbar() plt.title("(R+B+G)/3 min {0:.2f} max {1:.2f}".format(im_gray2.min(), im_gray2.max() )) plt.show() # RGBの重み付き平均をグレースケール画像とする. # https://en.wikipedia.org/wiki/Grayscale#Luma_coding_in_video_systems im_gray3 = (0.299 * im[:,:,0].astype(float) + 0.587 * im[:,:,1].astype(float) + 0.114 * im[:,:,2].astype(float)) imshow(im_gray3, vmin=0, vmax=255) plt.colorbar() plt.title("$\gamma'$ of PAL and NTSC min {0:.2f} max {1:.2f}".format(im_gray3.min(), im_gray3.max() )) plt.show() # RGBの重み付き平均をグレースケール画像とする.その2.規格によって重み係数は異なる # https://en.wikipedia.org/wiki/Grayscale#Luma_coding_in_video_systems # rgb2gray()はこれを使っている.http://scikit-image.org/docs/dev/api/skimage.color.html#skimage.color.rgb2gray im_gray4 = (0.2126 * im[:,:,0].astype(float) + 0.7152 * im[:,:,1].astype(float) + 0.0722 * im[:,:,2].astype(float)) imshow(im_gray4, vmin=0, vmax=255) plt.colorbar() plt.title("$\gamma'$ of HDTV min {0:.2f} max {1:.2f}".format(im_gray4.min(), im_gray4.max() )) plt.show() # - # # ヒストグラム # + im_files = ['lake.jpg', 'flag.png', 'girl.jpg', 'writing.png'] for file in im_files: im = imread(file)[:,:,:3] # RGBAの場合はRGBだけ取り出す fig = plt.figure(figsize=(20,3)) ax = fig.add_subplot(1, 3, 1) im = rgb2gray(im) # 範囲は[0,1] imshow(im) plt.title(file) plt.axis('off') bins = 256 ax = fig.add_subplot(1, 3, 2) freq, bins = histogram(im) plt.plot(bins, freq) plt.xlabel("intensity") plt.ylabel("frequency") plt.title('histogram (linear)') plt.xlim(0,1) ax = fig.add_subplot(1, 3, 3) freq, bins = histogram(im) plt.plot(bins, freq) plt.xlabel("intensity") plt.ylabel("log frequency") plt.yscale('log') plt.title('histogram (log)') plt.xlim(0,1) plt.show(); # - # # 統計量 # + im_files = ['lake.jpg', 'flag.png', 'girl.jpg', 'writing.png'] for file in im_files: im = imread(file)[:,:,:3] # RGBAの場合はRGBだけ取り出す imshow(im) plt.title(file) plt.show() im = rgb2gray(im) print('mean: ', im.mean()) print('std: ', im.std()) print('median: ', np.median(im)) print('max: ', im.max()) print('min: ', im.min()) print('contrast: ', (im.max() - im.min()) / (im.max() + im.min()) ) # Michelson contrast print('contrast: ', im.max() / im.min() if im.min() > 0 else np.nan ) # contrast ratio print('contrast: ', im.max() - im.min() ) # contrast difference print() # - # ## 平均と分散の計算方法 # ### 定義式そのまま # - 計算量大:2重ループ2回 # - 書いてはいけないコード # im = rgb2gray(imread('girl.jpg')) # + h, w = im.shape mean = 0 for y in range(h): for x in range(w): mean += im[y, x] mean /= h * w print('mean: ', mean) var = 0 for y in range(h): for x in range(w): var += (im[y, x] - mean)**2 var /= h * w print('variance: ', var) print('std: ', np.sqrt(var)) # - # ### 定義式を変形したもの # - 計算量は半分:2重ループ1回 # - 数式から導出できる # # $ # \mu = \frac{1}{N} \sum_i x_i \\ # \sigma^2 = \frac{1}{N} \sum_i (x_i - \mu)^2 # = (\frac{1}{N} \sum_i x_i^2) - \mu^2 # $ # + h, w = im.shape mean = 0 var = 0 for y in range(h): for x in range(w): mean += im[y, x] var += im[y, x]**2 mean /= h * w print('mean: ', mean) var /= h * w var -= mean**2 print('variance: ', var) print('std: ', np.sqrt(var)) # - # # タスク # # - カメラを手で抑えて真っ暗な画像を撮影する # - その画像を読み込み,ヒストグラムと統計量を計算する # # ガンマ変換 # + files = ['flag.png', 'lake.jpg', 'girl.jpg'] @interact(gamma=(0.1, 10, 0.1), file=files) def g(gamma=1.0, file=files[0]): im = rgb2gray(imread(file)[:,:,:3]) # RGBAの場合はRGBだけ取り出す fig = plt.figure(figsize=(20, 3)) ax = fig.add_subplot(1, 4, 1) imshow(im, vmin=0, vmax=1) plt.colorbar() plt.title('original image') ax = fig.add_subplot(1, 4, 2) im_gamma = adjust_gamma(im, gamma) imshow(im_gamma, vmin=0, vmax=1) plt.colorbar() plt.title('$\gamma$ = {}'.format(gamma)) ax = fig.add_subplot(1, 4, 3) x = np.arange(0, 1, 0.01) plt.plot(x, x ** gamma) ax.set_aspect('equal') ax.set_xlabel('input') ax.set_ylabel('output') plt.title('$\gamma$ = {}'.format(gamma)) ax = fig.add_subplot(1, 4, 4) freq, bins = histogram(im_gamma) plt.plot(bins, freq) plt.xlim(0, 1) plt.xlabel("intensity") plt.ylabel("log frequency") plt.yscale('log') plt.title('histogram (log)') plt.show(); # - # # チャンネル別のガンマ変換 # # + vals = (val_start, val_end, val_step) = 0.1, 10, 0.1 val_default = 1.0 files = ['flag.png', 'lake.jpg', 'girl.jpg'] @interact(r_gamma=vals, g_gamma=vals, b_gamma=vals, file=files) def g(r_gamma=val_default, g_gamma=val_default, b_gamma=val_default, file=files[0]): im = imread(file)[:,:,:3] # RGBAの場合はRGBだけ取り出す fig = plt.figure(figsize=(15, 3)) ax = fig.add_subplot(1, 3, 1) imshow(im, vmin=0, vmax=255) plt.title('original image') ax = fig.add_subplot(1, 3, 2) im[:, :, 0] = 255 * (im[:, :, 0]/255) ** r_gamma im[:, :, 1] = 255 * (im[:, :, 1]/255) ** g_gamma im[:, :, 2] = 255 * (im[:, :, 2]/255) ** b_gamma imshow(im, vmin=0, vmax=1) plt.title('$\gamma$ = ({0},{1},{2})'.format(r_gamma, g_gamma, b_gamma)) ax = fig.add_subplot(1, 3, 3) x = np.arange(0, 1, 0.01) plt.plot(x, x ** r_gamma, c='red', label='$\gamma$ for R') plt.plot(x, x ** g_gamma, c='green', label='$\gamma$ for G') plt.plot(x, x ** b_gamma, c='blue', label='$\gamma$ for B') plt.legend() ax.set_aspect('equal') ax.set_xlabel('input') ax.set_ylabel('output') plt.show(); # - # # 疑似カラー表示 # + im = rgb2gray(imread('IR_cat.jpg')) cmaps = ['gray', 'gray_r', 'jet', 'jet_r', 'rainbow', 'rainbow_r', 'hsv', 'hsv_r', 'terrain', 'terrain_r' ] @interact(cmap=RadioButtons(options=cmaps)) def g(cmap='gray'): imshow(im, cmap=cmap) plt.colorbar() plt.axis('off') plt.tight_layout() plt.show() # - # # 画像間の演算 # # ## 平均 # + im1 = imread('girl.jpg') im2 = imread('flag.png')[:,:,:3] # alphaチャンネルを除去 im1 = resize(im1, (480, 640, 3)) im2 = resize(im2, (480, 640, 3)) im_ave = (im1 + im2) / 2 imshow(im_ave) plt.axis('off') plt.title('average image') plt.show(); # - # ## 重み付き平均 # + vals = (val_start, val_end, val_step) = 0.0, 1, 0.1 val_default = 0.5 im1 = imread('girl.jpg') im2 = imread('flag.png')[:,:,:3] # alphaチャンネルを除去 im1 = resize(im1, (480, 640, 3)) im2 = resize(im2, (480, 640, 3)) @interact(alpha=vals) def g(alpha=val_default): fig = plt.figure(figsize=(15, 3)) ax = fig.add_subplot(1, 3, 1) im_weighted_ave = alpha * im1 + (1 - alpha) * im2 imshow(im_weighted_ave) ax = fig.add_subplot(1, 3, 2) imshow(alpha * im1) plt.title('weight: {:.2f}'.format(alpha)) ax = fig.add_subplot(1, 3, 3) imshow((1 - alpha) * im2) plt.title('weight: {:.2f}'.format(1 - alpha)) plt.show() # - # ## アルファブレンディング # + vals = (val_start, val_end, val_step) = 0, 640, 20 val_default = 320 im1 = imread('girl.jpg') im2 = imread('flag.png')[:, :, :3] # alphaチャンネルを除去 im1 = resize(im1, (480, 640, 3)) im2 = resize(im2, (480, 640, 3)) @interact(center=vals) def g(center=val_default): alpha_mask = np.zeros((480, 640, 3)) for y in range(480): for x in range(640): alpha_mask[y, x, :] = 1 / (1 + np.exp(-(x - center) * 0.015)) im1_a = im1 * alpha_mask im2_a = im2 * (1 - alpha_mask) fig = plt.figure(figsize=(15, 6)) ax = fig.add_subplot(2, 3, 1) im_weighted_ave = im1_a + im2_a imshow(im_weighted_ave) ax = fig.add_subplot(2, 3, 2) imshow(im1_a) ax = fig.add_subplot(2, 3, 3) imshow(im2_a) ax = fig.add_subplot(2, 3, 5) imshow(alpha_mask) ax = fig.add_subplot(2, 3, 6) imshow(1 - alpha_mask) plt.show() # - # # 二値化 # + im = rgb2gray(imread('text.jpg')) @interact(block_size=(1, 500, 10)) def g(block_size=51): fig = plt.figure(figsize=(15, 2.5)) ax = fig.add_subplot(1, 3, 1) imshow(im) plt.axis('off') plt.title('original image') ax = fig.add_subplot(1, 3, 2) global_th = threshold_otsu(im) binary_global = im > global_th imshow(binary_global) plt.axis('off') plt.title('global thresholding by Otsu th') ax = fig.add_subplot(1, 3, 3) adaptive_th = threshold_local(im, block_size) binary_adaptive = im > adaptive_th imshow(binary_adaptive) plt.title('adaptive thresholding with {0}x{0} block'.format(block_size)) plt.axis('off') plt.show() # - # # ラベリング img = rgb2gray(imread('coins.jpg')) imshow(img) # + block_size = 121 img2 = img > threshold_local(img, block_size) imshow(img2) plt.title('type {0}, values {1}'.format(img2.dtype, np.unique(img2))) plt.show() img2i = skimage.img_as_int(img2) imshow(img2i) plt.title('type {0}, values {1}'.format(img2i.dtype, np.unique(img2i))) plt.show() # + img2il = measure.label(img2i) # ラベリング imshow(img2il) # ラベリング結果の表示.輝度値はラベル番号 plt.title('labeling') plt.show() imshow(color.label2rgb(img2il, bg_label=0)) # ラベリング結果をカラーで表示. plt.title('labeling with random colors') plt.show() # - # # モルフォロジー処理 @interact(element=['disk', 'square'], size=(1, 10, 1) ) def g(element='disk', size=1): if element == 'square': disk = morphology.square(size) else: disk = morphology.disk(size) fig = plt.figure(figsize=(20, 6)) fig.add_subplot(2, 4, 1) imshow(img2il) # ラベリング結果の表示.輝度値はラベル番号 plt.title('labeling') plt.axis('off') img2im = img2il.copy() fig.add_subplot(2, 4, 2) img2im = morphology.binary_dilation(img2im, disk) img2im = morphology.binary_erosion(img2im, disk) imshow( measure.label(img2im) ) plt.title('filling holes') plt.axis('off') fig.add_subplot(2, 4, 3) img2im = morphology.binary_erosion(img2im, disk) img2im = morphology.binary_dilation(img2im, disk) imshow( measure.label(img2im) ) plt.title('remove small regions') plt.axis('off') fig.add_subplot(2, 4, 4) imshow(color.label2rgb(measure.label(img2im), bg_label=0)) plt.title('labeling after morphorogy operations') plt.axis('off') fig.add_subplot(2, 4, 8) # 各物体の凸包を求めると,もっときれいになる img2im = measure.label(morphology.convex_hull_object(img2im)) imshow(color.label2rgb(img2im, bg_label=0)) plt.title('convex hulls') plt.axis('off') plt.show() # # 細線化 img = (imread('evol.png')[:,:,3] > 1).astype(int) # 実はRGBA画像なので,まずRGB値を使ってboolへ変換 # + fig = plt.figure(figsize=(15, 3)) ax = fig.add_subplot(1, 3, 1) imshow(img) plt.title('original bianry image') plt.axis('off') ax = fig.add_subplot(1, 3, 2) thin_img = morphology.skeletonize(img) imshow(thin_img) plt.title('thinning') plt.axis('off') ax = fig.add_subplot(1, 3, 3) _, dist = morphology.medial_axis(img, return_distance=True) imshow(dist) # 距離変換画像.これを使って細線化を行っているはず plt.title('distance transform') plt.axis('off') plt.show() # -
ImageProcessing/1-image-pixel-binary.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### You will learn to use Scikit-Learn, a machine learning library with Python, to predict whether a tweet is popular or not, from its image and text. # ### 1. A small dataset # This dataset consists of (image, text) pairs of tweets, organized by the unique tweet ids. Check folder `./dataset/`. Each tweet has an id and a label: popular (1) or not popular (0), the information is saved in `./meta.csv`. # # 1.1 load `meta.csv` using pandas to a dataframe, obtain a Nx2 numpy array M, first column is ID, second column is label # hint: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html # # 1.2 split the rows of M randomly to 70% train, 20% validation, and 10% test: `M_train, M_val, M_test` # hint: you can use this [function](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) twice. # # 1.3 Write a function `extract_feature_label(row)`, which takes a row of any meta matrix, read the image and text from its ID in `./dataset/`, and extract image features and text features. The function should return three things: label (0 or 1), image feature vector, text feature vector. # # To read an image from file, you can use `skimage`, particularly this [function](https://scikit-image.org/docs/dev/api/skimage.io.html#imread) # # You can use the provided functions `get_image_feature(image)`, `get_text_feature(text)` for help, but feel free to implement feature extraction on your own design. # + import numpy as np import torch from sklearn.feature_extraction.text import CountVectorizer from skimage.transform import resize # Load text corpus and prepare the Bigram. corpus = open('./corpus.txt','r').readlines() bigram_vectorizer = CountVectorizer(ngram_range=(1, 2), token_pattern=r'\b\w+\b', min_df=1) bigram_vectorizer.fit(corpus) def get_image_feature(image, image_size=(128,128)): # image is an HxWxC numpy array. image = resize(image, (image_size[0], image_size[1]), anti_aliasing=True) print(image.shape) return image.reshape((-1)) def get_text_feature(text): # text is a String feature = bigram_vectorizer.transform([text]).toarray() return feature.reshape((-1)) # - def extract_feature_label(row): ''' return label, image_feature, text_feature ''' return # ### 2. Prepare data in matrix form # Write a for loop to iterate over each of `M_train, M_val, M_test`. Use the above implemented function to aggregate the features and labels to numpy array forms. For example, for `M_train` with m entries, you should get `X_image_train (m x n), X_text_train (m x n), y_train (m x 1)`. Same applies for validation and test sets. # ### 3. Learn to use scikit-learn to do classification, [tutorial](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) # # In the above tutorial, pay attention to `examples` sample code. In your case, X should be a numpy array of m x n, where m is the number of tweets, n is the feature dimension, y should be a numpy array of m x 1, or a list, each element is either 1 or 0. Note the `score()` function gives the accuracy of prediction. # # After learning this, you can call the `fit()` function on the training data, and `predict()` function on the test data. You can use [this](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html) to compute the accuracy of your prediction on the test data. # ### 4. Now you are ready to do multimodal prediction! # # 4.1 Early fusion. # For each tweet, concatenate its image feature and text feature to a new vector. That being said, you should concatenate the columns of `X_image_train` and `X_text_train` for example. The concatenated matrix is called `X_train`, then you can use Logistic regression! # # 4.2 Late fusion. # Use `X_image_train` and `X_text_train` separately to train two logistic regression classifiers `clf1, clf2`. # # Get two sets of prediction probabiities `y_image_prob_test` and `y_text_prob_test` using the two classifiers on the test set, use `predict_proba()` function in sklearn. These two should be two vectors with the same length. # # After getting the two probability vectors, compute the average probability vector `y_avg_prob_test` of these two. # # Use `y_avg_prob_test` to get a binary prediction vector `y_pred`. Use `y_pred `and `y_test` to compute accuracy. # ### 5. Report # # On the test set, report the following accuracies: # 1. using image feature only # 2. using text feature only # 3. using early fusion # 4. using late fusion # # Extension: # 1. try other classifiers in sklearn, e.g. KNN, decision tree # 2. in late fusion, instead of compute average, you can also compute a weighted average, e.g. 30% for image, 70% for text if you think text is more important to predict popularity. Try different weights and see how the accuracy changes.
hw1/hw1-multimodal-learning-scikit-learn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### SKBL - JSON data occupation # * GITHUB [salgo60/HISCOKoder](https://github.com/salgo60/HISCOKoder) # * this [Notebook](https://github.com/salgo60/HISCOKoder/blob/main/Jupyter/SKBL%20yrken%20JSON.ipynb) # # SKBL är en av de få svenska aktörer som har strukturerad data för yrken och ett API. Dock saknas externa kopplingar som Wikidata och HISCOkoder.... även tid anges som textsträngar i olika format, plats saknar även det "samma som" utan blir en textsträng med koordinat. # # Summa sumarum bra med API men datat är långt från optimalt för att bearbetas vidare. Jag var med på SKBL ledningsgruppsmöte och pratade om Linked data 2020 dec men dom har inte orkat ta steget ännu.... se [presentation](https://docs.google.com/presentation/d/1EvQHdzPsIA4hr2WuNXnbpsLHec2GS89b_pHvilO6xhQ/edit?usp=sharing) tror inte KARP plattformen är optimal för "samma som" plus att det behövs nya kompetenser som Linked data i projektet för att det skall ta fart.... # #### Other sources we sync # * [Arken](https://github.com/salgo60/open-data-examples/blob/master/Arken.ipynb) # * WD [Property:P8899](https://www.wikidata.org/wiki/Property:P8899) # * [Kulturpersoner Uppsalakyrkogård](https://github.com/salgo60/open-data-examples/blob/master/Check%20WD%20kulturpersoner%20uppsalakyrkogardar.ipynb) # * [Litteraturbanken](https://github.com/salgo60/open-data-examples/blob/master/Litteraturbanken%20Author.ipynb) # * WD property [P5101](https://www.wikidata.org/wiki/Property_talk:P5101) [P5123](https://www.wikidata.org/wiki/Property_talk:P5123) # * [Nobelprize.org](https://github.com/salgo60/open-data-examples/blob/master/Nobel%20API.ipynb) # * WD [property 8024](https://www.wikidata.org/wiki/Property:P8024) # * [SBL](https://github.com/salgo60/open-data-examples/blob/master/SBL.ipynb) # * WD [property 3217](https://www.wikidata.org/wiki/Property:P3217) # * [SKBL](https://github.com/salgo60/open-data-examples/blob/master/Svenskt%20Kvinnobiografiskt%20lexikon%20part%203.ipynb) # * WD [property 4963](https://www.wikidata.org/wiki/Property:P4963) # * [Svenska Akademien](https://github.com/salgo60/open-data-examples/blob/master/Svenska%20Akademien.ipynb) # * WD [property 5325](https://www.wikidata.org/wiki/Property:P5325) # from datetime import datetime start_time = datetime.now() print("Last run: ", start_time) # + import urllib3, json import pandas as pd http = urllib3.PoolManager() url = "https://ws.spraakbanken.gu.se/ws/karp/v5/query?mode=skbl&q=extended%7C%7Cand%7Cskblstatus%7Cequals%7Cok&size=10000" r = http.request('GET', url) data = json.loads(r.data.decode('utf-8')) listSKBLYrke = [] i = 0 for row in (data["hits"]["hits"]): # print(row) # print (row["_source"]["url"]) new_item = dict() new_item['skblid'] = row["_source"]["url"] try: #print (row["_source"]["occupation"]) for o in row["_source"]['occupation']: # print("\t",o['description']) # print("\t\t",o['detail']) # print("\t\t",o['place']) # print("\t\t",o['type']) # Sakkunnig # Sakkunnig, bl a 1924 års privatskoleutredning # {'pin': {'lat': 59.329323, 'lon': 18.068581}, 'place': 'Stockholm'} # Ideellt arbete # Chef # VD, <NAME> # {'pin': {'lat': 59.329323, 'lon': 18.068581}, 'place': 'Stockholm'} # Yrke try: description = o['description'] except: description = "" new_item['description'] = description try: detail = o['detail'] except: detail = "" new_item['detail'] = detail try: Ofrom = o['from'] except: Ofrom = "" new_item['from'] = Ofrom try: Oto = o['to'] except: Oto = "" new_item['to'] = Oto try: place = o['place'] except: place = "" new_item['place'] = place try: occupationtype = o['type'] except: occupationtype = "" new_item['occupationtype'] = occupationtype except Exception as e: print("Error occupation\t",row["_source"]["url"],e) listSKBLYrke.append(new_item) i = i + 1 #print (i) print (len(listSKBLYrke) ," antal poster") # - listSKBLYrke[1:10] import pandas as pd newListdf = pd.DataFrame(listSKBLYrke,columns=list (["skblid","description","detail","occupationtype","from","to","place"])) newListdf.info() newListdf.head(50) newListdf["skblid"].value_counts() Yrkendf = newListdf["description"].value_counts() Yrkendf newListdf["occupationtype"].value_counts() # + import matplotlib.pyplot as plt plt.rcParams["figure.figsize"] = (10,10) ax = Yrkendf.plot(kind='pie') ax.set_title("Vanligaste Yrken i SKBL " + start_time.strftime('%Y %m %d')) # - fig = ax.get_figure() plt.rcParams['savefig.facecolor']='white' fig.savefig('../img/SKBL_yrken_json.png') newListdf.to_csv("SKBLyrken_newListdf.csv") Yrkendf.to_csv("SKBLyrken_valuecount.csv")
Jupyter/.ipynb_checkpoints/SKBL yrken JSON-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # <br> # @author: <NAME><br> # import random import math import time import threading import pygame import sys import os import Data_Manager # This module simulates the Dynamic Traffic Lights with PIR Sensors at the intersection. # # The fundamental assumption for the PIR Sensor techonology is to provide a method of detecting the presence of cars based on movement in each direction of the intersecion. In this simulation, the PIR sensors are represented by "rays" that detect the presence of vehicles when they "collide" with the rays within the sensors' operating range. # # The PIR sensor logic is implemented following a "master-slave" module where East is lane 0, West is lane 2, North is lane 1 and South is lane 3. With this assumption the sensors check every greenmax if vehicles are present in the opposite direction. # # At the end of the simulation the algorithm will print the number of cars served per techonology, the average waiting time in red at the intersection, and their efficiencies compared to each other in the form of graphs and tables; if the plotter file is run. # + # Initial Parameters greenMax = 180 greenMin = 60 # Default values of signal timers defaultRed = 150 defaultYellow = 5 defaultGreen = greenMax defaultMinimum = 10 defaultMaximum = 60 timeCheck = greenMax - greenMin signals = [] noOfSignals = 4 simTime = 3600 # change this to change time of simulation timeElapsed = 0 currentGreen = 0 # Indicates which signal is green nextGreen = (currentGreen + 1) % noOfSignals currentYellow = 0 # Indicates whether yellow signal is on or off # Average times for vehicles to pass the intersection carTime = 2 bikeTime = 1 busTime = 2.5 truckTime = 2.5 # Count of cars at a traffic signal noOfCars = 0 noOfBikes = 0 noOfBuses = 0 noOfTrucks = 0 noOfLanes = 2 # Red signal time at which cars will be detected at a signal detectionTime = 5 # Average speeds of vehicles in terms of pixels per second speeds = { "car": 2.25, "bus": 1.8, "truck": 1.8, "bike": 2.5, } # Coordinates of vehicles' start x = { "right": [0, 0, 0], "down": [775, 747, 717], "left": [1400, 1400, 1400], "up": [602, 627, 657], } y = { "right": [338, 360, 388], "down": [0, 0, 0], "left": [508, 476, 446], "up": [800, 800, 800], } # Dictionary of vehicles in the simulation with lanes per direction vehicles = { "right": { 0: [], 1: [], 2: [], "crossed": 0 }, "down": { 0: [], 1: [], 2: [], "crossed": 0 }, "left": { 0: [], 1: [], 2: [], "crossed": 0 }, "up": { 0: [], 1: [], 2: [], "crossed": 0 }, } vehicleTypes = {0: "car", 1: "bus", 2: "truck", 3: "bike"} directionNumbers = {0: "right", 1: "down", 2: "left", 3: "up"} # Coordinates of signal image, timer, and vehicle count signalCoods = [(493, 230), (875, 230), (875, 570), (493, 570)] signalTimerCoods = [(530, 210), (850, 210), (850, 550), (530, 550)] vehicleCountTexts = ["0", "0", "0", "0"] vehicleCountCoods = [(480, 210), (910, 210), (910, 550), (480, 550)] # Coordinates of stop lines stopLines = {"right": 391, "down": 200, "left": 1011, "up": 665} defaultStop = {"right": 381, "down": 190, "left": 1021, "up": 675} stops = { "right": [381, 381, 381], "down": [190, 190, 190], "left": [1021, 1021, 1021], "up": [675, 675, 675], } # Coordinates of the middle line of the intersection relative to the x axis mid = { "right": { "x": 700, "y": 461 }, "down": { "x": 700, "y": 461 }, "left": { "x": 700, "y": 461 }, "up": { "x": 700, "y": 461 }, } # Default rotation angle of the cars rotationAngle = 3 # Gap between vehicles gap = 15 # Stopping gap from vehicle to the stop line pixels per second gap2 = 15 # Moving gap between vehicles in pixels per second pygame.init() # Initializes Pygame # A container class to hold and manage multiple Sprite objects (Vehicle images) simulation = pygame.sprite.Group() # + """ Calculation of the Average Waiting Time for all lanes - STARTS """ # Time managers START leftWaitTime = 0 rightWaitTime = 0 topWaitTime = 0 bottomWaitTime = 0 # Calculates the average waiting time for the simulation def calculateAverageWaitTime(): global leftWaitTime, rightWaitTime, topWaitTime, bottomWaitTime return round( (((leftWaitTime + rightWaitTime + topWaitTime + bottomWaitTime) / 60) / 4), 3) # Tracks the waiting time for all lanes def trackWaitTimeForAllLanes(): global leftWaitTime, rightWaitTime, topWaitTime, bottomWaitTime, signals, currentGreen if signals[currentGreen] != 0: leftWaitTime += 1 if signals[currentGreen] != 0: rightWaitTime += 1 if signals[currentGreen] != 0: topWaitTime += 1 if signals[currentGreen] != 0: bottomWaitTime += 1 # Time managers END """ Calculation of the Average Waiting Time for all lanes - ENDS """ # - class TrafficSignal: def __init__(self, red, yellow, green, minimum, maximum): """ Initializes the traffic lights as objects. """ self.red = red self.yellow = yellow self.green = green self.minimum = minimum self.maximum = maximum self.signalText = "30" self.totalGreenTime = 0 class Vehicle(pygame.sprite.Sprite): """Initializes vehicles parameters and vehicles images as sprite objects.""" def __init__(self, lane, vehicleClass, direction_number, direction, will_turn): pygame.sprite.Sprite.__init__(self) self.lane = lane self.vehicleClass = vehicleClass self.speed = speeds[vehicleClass] self.direction_number = direction_number self.direction = direction self.x = x[direction][lane] self.y = y[direction][lane] self.crossed = 0 self.willTurn = will_turn self.turned = 0 self.rotateAngle = 0 vehicles[direction][lane].append(self) self.index = len(vehicles[direction][lane]) - 1 # Path to load vehicle images from folder based on # direction and vehicle class path = "images/" + direction + "/" + vehicleClass + ".png" self.originalImage = pygame.image.load(path) self.currentImage = pygame.image.load(path) # Get width and height of the current image self.width = self.currentImage.get_width() self.height = self.currentImage.get_height() self.image = self.originalImage # Return the rectangule of the vehicle images self.rect = self.image.get_rect() # Positions the rectangule of the vehicle # images in the same coordinates self.rect.x = self.x self.rect.y = self.y if direction == "right": # Checks if there is more than 1 vehicle in the lanes # before crossing the stop lines in the right direction if ( len(vehicles[direction][lane]) > 1 and vehicles[direction][lane][self.index - 1].crossed == 0 ): # Setting stop coordinate as: stop coordinate # of next vehicle - width of next vehicle - gap self.stop = ( vehicles[direction][lane][self.index - 1].stop - vehicles[direction][lane][self.index - 1].currentImage.get_rect().width - gap ) # setting stop coordinate as: # stop coordinate of next vehicle - width of next vehicle - gap else: self.stop = defaultStop[direction] # Set new starting and stopping coordinate temp = self.currentImage.get_rect().width + gap x[direction][lane] -= temp stops[direction][lane] -= temp elif direction == "left": # Checks if there is more than 1 vehicle in the lanes # before crossing the stop lines in the left direction if (len(vehicles[direction][lane]) > 1 and vehicles[direction][lane][self.index - 1].crossed == 0): self.stop = (vehicles[direction][lane][self.index - 1].stop + vehicles[direction][lane] [self.index - 1].currentImage.get_rect().width + gap) else: self.stop = defaultStop[direction] temp = self.currentImage.get_rect().width + gap x[direction][lane] += temp stops[direction][lane] += temp elif direction == "down": if (len(vehicles[direction][lane]) > 1 and vehicles[direction][lane][self.index - 1].crossed == 0): # Setting stop coordinate as: stop coordinate # of next vehicle - width of next vehicle - gap self.stop = (vehicles[direction][lane][self.index - 1].stop - vehicles[direction][lane] [self.index - 1].currentImage.get_rect().height - gap) else: self.stop = defaultStop[direction] # Set new starting and stopping coordinate temp = self.currentImage.get_rect().height + gap y[direction][lane] -= temp stops[direction][lane] -= temp elif direction == "up": # Checks if there is more than 1 vehicle in the lanes # before crossing the stop lines in the down direction if (len(vehicles[direction][lane]) > 1 and vehicles[direction][lane][self.index - 1].crossed == 0): self.stop = (vehicles[direction][lane][self.index - 1].stop + vehicles[direction][lane] [self.index - 1].currentImage.get_rect().height + gap) else: self.stop = defaultStop[direction] # Set new starting and stopping coordinate temp = self.currentImage.get_rect().height + gap y[direction][lane] += temp stops[direction][lane] += temp # Adds all parameteres to the simulation object simulation.add(self) def render(self, screen): """Renders the vehicle images on the screen.""" screen.blit(self.image, (self.x, self.y)) def update(self, screen): """Updates the vehicle images on the screen.""" self.image = self.currentImage self.rect.x = self.x self.rect.y = self.y def move(self): """Move the vehicles according to their direction after crossing the stop line.""" if self.direction == "right": # Checks If the vehicle image has crossed the stop line if (self.crossed == 0 and self.x + self.currentImage.get_rect().width > stopLines[self.direction] ): self.crossed = 1 # The vehicle has Crossed the stop line vehicles[self.direction]["crossed"] += 1 if self.willTurn == 1: # Checks if the vehicle that just crossed # the stop line will turn right if (self.crossed == 0 or self.x + self.currentImage.get_rect().width < mid[self.direction]["x"]): if (self.x + self.currentImage.get_rect().width <= self.stop or (currentGreen == 0 and currentYellow == 0) or self.crossed == 1) and ( self.index == 0 or self.x + self.currentImage.get_rect().width < (vehicles[self.direction][self.lane][self.index - 1].x - gap2) or vehicles[self.direction][self.lane][ self.index - 1].turned == 1): self.x += self.speed else: if self.turned == 0: # Checks if the vehicle that just # crossed didn't turn right # If it didn't turn right, then it keeps # the vehicle moving forward and keep them straight self.rotateAngle += rotationAngle self.currentImage = pygame.transform.rotate( self.originalImage, -self.rotateAngle) self.x += 3 self.y += 2.8 # If the vehicle turns right at the # last moment then its decision is registered if self.rotateAngle == 90: self.turned = 1 else: # Index represents the relative position # of the vehicle among # the vehicles moving in the same direction # and the same lane if (self.index == 0 or self.y + self.currentImage.get_rect().height < (vehicles[self.direction][self.lane][self.index - 1].y - gap2) or self.x + self.currentImage.get_rect().width < (vehicles[self.direction][self.lane][self.index - 1].x - gap2)): self.y += self.speed else: if (self.x + self.currentImage.get_rect().width <= self.stop or self.crossed == 1 or (currentGreen == 0 and currentYellow == 0)) and ( self.index == 0 or self.x + self.currentImage.get_rect().width < (vehicles[self.direction][self.lane][self.index - 1].x - gap2) or (vehicles[self.direction][self.lane][self.index - 1].turned == 1)): # (if the image has not reached its # stop coordinate or has crossed # stop line or has green signal) # and (it is either the first vehicle in that lane # or it is has enough gap to the # next vehicle in that lane) self.x += self.speed # move the vehicle elif self.direction == "down": # Checks If the vehicle image has crossed the stop line if (self.crossed == 0 and self.y + self.currentImage.get_rect().height > stopLines[self.direction]): self.crossed = 1 vehicles[self.direction]["crossed"] += 1 if self.willTurn == 1: # Checks if the vehicle that just crossed # the stop line will turn if (self.crossed == 0 or self.y + self.currentImage.get_rect().height < mid[self.direction]["y"]): if (self.y + self.currentImage.get_rect().height <= self.stop or (currentGreen == 1 and currentYellow == 0) or self.crossed == 1) and ( self.index == 0 or self.y + self.currentImage.get_rect().height < (vehicles[self.direction][self.lane][self.index - 1].y - gap2) or vehicles[self.direction][self.lane][ self.index - 1].turned == 1): self.y += self.speed else: if self.turned == 0: # Checks if the vehicle that just # crossed didn't turn # If it didn't turn right, then it # keeps the vehicle moving forward and keep them straight self.rotateAngle += rotationAngle self.currentImage = pygame.transform.rotate( self.originalImage, -self.rotateAngle) # If the vehicle turns right at the last # moment then its decision is registered self.x -= 2.5 self.y += 2 if self.rotateAngle == 90: self.turned = 1 else: # Index represents the relative position # of the vehicle among the vehicles # moving in the same direction and the same lane if (self.index == 0 or self.x > (vehicles[self.direction][self.lane][self.index - 1].x + vehicles[self.direction][self.lane] [self.index - 1].currentImage.get_rect().width + gap2) or self.y < (vehicles[self.direction][self.lane][self.index - 1].y - gap2)): self.x -= self.speed else: if (self.y + self.currentImage.get_rect().height <= self.stop or self.crossed == 1 or (currentGreen == 1 and currentYellow == 0)) and ( self.index == 0 or self.y + self.currentImage.get_rect().height < (vehicles[self.direction][self.lane][self.index - 1].y - gap2) or (vehicles[self.direction][self.lane][self.index - 1].turned == 1)): # (if the image has not reached its stop coordinate or has # crossed stop line or has green signal) and # (it is either the first vehicle in that lane or it is # has enough gap to the next vehicle in that lane) self.y += self.speed # move the vehicle elif self.direction == "left": # Checks If the vehicle image has crossed the stop line if self.crossed == 0 and self.x < stopLines[self.direction]: self.crossed = 1 vehicles[self.direction]["crossed"] += 1 if self.willTurn == 1: # Checks if the vehicle that just crossed # the stop line will turn if self.crossed == 0 or self.x > mid[self.direction]["x"]: if ( self.x >= self.stop or (currentGreen == 2 and currentYellow == 0) or self.crossed == 1 ) and ( self.index == 0 or self.x > (vehicles[self.direction][self.lane][self.index - 1].x + vehicles[self.direction][self.lane] [self.index - 1].currentImage.get_rect().width + gap2) or vehicles[self.direction][self.lane][self.index - 1].turned == 1): self.x -= self.speed else: if self.turned == 0: # Checks if the vehicle that just crossed didn't turn # If it didn't turn right, then it keeps the # vehicle moving forward and keep them straight self.rotateAngle += rotationAngle self.currentImage = pygame.transform.rotate( self.originalImage, -self.rotateAngle) self.x -= 1.8 self.y -= 2.5 # If the vehicle turns right at the last # moment then its decision is registered if self.rotateAngle == 90: self.turned = 1 else: # Index represents the relative position of the vehicle # among the vehicles moving in the same # direction and the same lane if (self.index == 0 or self.y > (vehicles[self.direction][self.lane][self.index - 1].y + vehicles[self.direction][self.lane] [self.index - 1].currentImage.get_rect().height + gap2) or self.x > (vehicles[self.direction][self.lane][self.index - 1].x + gap2)): self.y -= self.speed else: if (self.x >= self.stop or self.crossed == 1 or (currentGreen == 2 and currentYellow == 0)) and ( self.index == 0 or self.x > (vehicles[self.direction][self.lane][self.index - 1].x + vehicles[self.direction][self.lane][self.index - 1]. currentImage.get_rect().width + gap2) or (vehicles[self.direction][self.lane][self.index - 1].turned == 1)): # (if the image has not reached its stop # coordinate or has crossed # stop line or has green signal) and # (it is either the first vehicle # in that lane or it is has enough gap # to the next vehicle in that lane) self.x -= self.speed # move the vehicle elif self.direction == "up": # Checks If the vehicle image has crossed the stop line if self.crossed == 0 and self.y < stopLines[self.direction]: self.crossed = 1 vehicles[self.direction]["crossed"] += 1 if self.willTurn == 1: # Checks if the vehicle that just crossed # the stop line will turn if self.crossed == 0 or self.y > mid[self.direction]["y"]: if ( self.y >= self.stop or (currentGreen == 3 and currentYellow == 0) or self.crossed == 1 ) and ( self.index == 0 or self.y > (vehicles[self.direction][self.lane][self.index - 1].y + vehicles[self.direction][self.lane][self.index - 1]. currentImage.get_rect().height + gap2) or vehicles[self.direction][self.lane][self.index - 1].turned == 1): self.y -= self.speed else: if self.turned == 0: # Checks if the vehicle that just crossed didn't turn # If it didn't turn right, then it keeps # the vehicle moving forward and keep them straight self.rotateAngle += rotationAngle self.currentImage = pygame.transform.rotate( self.originalImage, -self.rotateAngle) self.x += 2 self.y -= 2 # If the vehicle turns right at the last # moment then its decision is registered if self.rotateAngle == 90: self.turned = 1 else: # Index represents the relative position # of the vehicle among the vehicles # moving in the same direction and the same lane if (self.index == 0 or self.x < (vehicles[self.direction][self.lane][self.index - 1].x - vehicles[self.direction][self.lane] [self.index - 1].currentImage.get_rect().width - gap2) or self.y > (vehicles[self.direction][self.lane][self.index - 1].y + gap2)): self.x += self.speed else: if (self.y >= self.stop or self.crossed == 1 or (currentGreen == 3 and currentYellow == 0)) and ( self.index == 0 or self.y > (vehicles[self.direction][self.lane][self.index - 1].y + vehicles[self.direction][self.lane][self.index - 1]. currentImage.get_rect().height + gap2) or (vehicles[self.direction][self.lane][self.index - 1].turned == 1)): # (if the image has not reached its stop # coordinate or has crossed # stop line or has green signal) # and (it is either the first vehicle in # that lane or it is has enough gap # to the next vehicle in that lane) self.y -= self.speed # move the vehicle # + # Initialization of signals with default values def initialize(): """ Initializes the traffic signals with default values. """ # TrafficSignal1 red: 0 yellow: defaultyellow green: defaultGreen ts1 = TrafficSignal(0, defaultYellow, defaultGreen, defaultMinimum, defaultMaximum) signals.append(ts1) # TrafficSignal2 red: (ts1.red+ts1.yellow+ts1.green) # yellow: defaultYellow, green: defaultGreen ts2 = TrafficSignal( ts1.red + ts1.yellow + ts1.green, defaultYellow, defaultGreen, defaultMinimum, defaultMaximum, ) signals.append(ts2) # TrafficSignal3 red: defaultRed # yellow: defaultyellow green: defaultGreen ts3 = TrafficSignal(defaultRed, defaultYellow, defaultGreen, defaultMinimum, defaultMaximum) signals.append(ts3) # TrafficSignal4 red: defaultRed # yellow: defaultyellow green: defaultGreen ts4 = TrafficSignal(defaultRed, defaultYellow, defaultGreen, defaultMinimum, defaultMaximum) signals.append(ts4) repeat() # - def setTime(): """ Sets time based on number of vehicles. """ global noOfCars, noOfBikes, noOfBuses, noOfTrucks, noOfLanes global carTime, busTime, truckTime, bikeTime noOfCars, noOfBuses, noOfTrucks, noOfBikes = 0, 0, 0, 0 # Counts vehicles in the next green direction for j in range(len(vehicles[directionNumbers[nextGreen]][0])): vehicle = vehicles[directionNumbers[nextGreen]][0][j] if vehicle.crossed == 0: vclass = vehicle.vehicleClass # print(vclass) noOfBikes += 1 # Counts the number of vehicles for each direction based on vehicle class for i in range(1, 3): for j in range(len(vehicles[directionNumbers[nextGreen]][i])): vehicle = vehicles[directionNumbers[nextGreen]][i][j] if vehicle.crossed == 0: vclass = vehicle.vehicleClass # print(vclass) if vclass == "car": noOfCars += 1 elif vclass == "bus": noOfBuses += 1 elif vclass == "truck": noOfTrucks += 1 # Calculate the green time of cars greenTime = math.ceil( ((noOfCars * carTime) + (noOfBuses * busTime) + (noOfTrucks * truckTime) + (noOfBikes * bikeTime)) / (noOfLanes + 1)) # Set default green time value if greenTime < defaultMinimum: greenTime = defaultMinimum elif greenTime > defaultMaximum: greenTime = defaultMaximum # Increase the green time of signals by one signals[(currentGreen + 1) % (noOfSignals)].green = greenTime def repeat(): """ Changes the color of the traffic lights based on simulation timing. """ global currentGreen, currentYellow, nextGreen # While the timer of current green signal is not zero while (signals[currentGreen].green > 0): updateValues() # Start a thread to set the detection time of next green signal if (signals[(currentGreen + 1) % (noOfSignals)].red == detectionTime ): # set time of next green signal thread = threading.Thread(name="detection", target=setTime, args=()) thread.daemon = True thread.start() time.sleep(1) currentYellow = 1 # set yellow signal on vehicleCountTexts[currentGreen] = "0" # reset stop coordinates of lanes and vehicles for i in range(0, 3): stops[directionNumbers[currentGreen]][i] = defaultStop[ directionNumbers[currentGreen]] for vehicle in vehicles[directionNumbers[currentGreen]][i]: vehicle.stop = defaultStop[directionNumbers[currentGreen]] while (signals[currentGreen].yellow > 0): # while the timer of current yellow signal is not zero updateValues() time.sleep(1) currentYellow = 0 # set yellow signal off # reset all signal times of current signal to default times signals[currentGreen].green = defaultGreen signals[currentGreen].yellow = defaultYellow signals[currentGreen].red = defaultRed currentGreen = nextGreen # set next signal as green signal nextGreen = 1 # set next green signal signals[nextGreen].red = ( signals[currentGreen].yellow + signals[currentGreen].green ) # Set the red time of next to next signal as (yellow time # + green time) of next signal repeat() def updateValues(): """ Updates values of the signal timers after every second. """ # Increase the green channel of all signals for i in range(0, noOfSignals): if i == currentGreen: if currentYellow == 0: signals[i].green -= 1 signals[i].totalGreenTime += 1 else: signals[i].yellow -= 1 else: signals[i].red -= 1 def generateVehicles(): """ Generates vehicles in the simulation """ while True: # Get a random vehicle type vehicle_type = random.randint(0, 3) if vehicle_type == 3: lane_number = 0 else: lane_number = random.randint(0, 1) + 1 will_turn = 0 if lane_number == 2: temp = random.randint(0, 3) if temp <= 2: will_turn = 1 elif temp > 2: will_turn = 0 # Set up a random direction number temp = random.randint(0, 999) direction_number = 0 # Distribution of vehicles across the four directions a = [400, 800, 900, 1000] # a = [100, 200, 225, 250] # Set the direction of the vehicle to temp if temp < a[0]: direction_number = 0 elif temp < a[1]: direction_number = 1 elif temp < a[2]: direction_number = 2 elif temp < a[3]: direction_number = 3 Vehicle( lane_number, vehicleTypes[vehicle_type], direction_number, directionNumbers[direction_number], will_turn, ) time.sleep(0.75) def simulationTime(): """ Main loop for simulation time. """ global timeElapsed, simTime while True: timeElapsed += 1 time.sleep(1) if timeElapsed == simTime: totalVehicles = 0 print("Lane-wise Vehicle Counts") for i in range(noOfSignals): print("Lane", i + 1, ":", vehicles[directionNumbers[i]]["crossed"]) totalVehicles += vehicles[directionNumbers[i]]["crossed"] print("Total vehicles passed: ", totalVehicles) print("Total time passed: ", timeElapsed) print( "No. of vehicles passed per unit time: ", (float(totalVehicles) / float(timeElapsed)), ) print("Average waiting Time: ", calculateAverageWaitTime()) Data_Manager.save_PIR(f"{totalVehicles}", simTime, calculateAverageWaitTime() ) # write data of the sim to the file os._exit(1) # + """ PIR Logic Parameters - STARTS HERE""" class laser(pygame.sprite.Sprite): """ This class makes the rectangles for vehicle detection for each lane. """ def __init__(self, width, height, x, y, colour): super().__init__() self.image = pygame.Surface([width, height]) self.image.fill(colour) self.rect = self.image.get_rect() self.rect.x = x self.rect.y = y # Creates and positions the Lasers in the different lanes myObj1 = laser(10, 100, 360, 330, (255, 138, 91)) # left laser myObj2 = laser(100, 10, 705, 170, (234, 82, 111)) # top laser myObj3 = laser(10, 105, 1030, 430, (255, 138, 91)) # right laser myObj4 = laser(100, 10, 595, 690, (234, 82, 111)) # bottom laser # Add laser group laser_group = pygame.sprite.Group() laser_group.add(myObj1) laser_group.add(myObj2) laser_group.add(myObj3) laser_group.add(myObj4) carDidComeOnLeft = False carDidComeOnRight = False carDidComeOnTop = False carDidComeOnBottom = False oppositeRoad = currentGreen + 2 # Road opp to the current one [slave] currentMaster = 0 # current master, which gets changed when its slave is done executing # These variables determine if lanes are serviced leftServiced = False rightServiced = False topServiced = False bottomServiced = False killLeft = False killRight = False killTop = False killBottom = False f1 = True f2 = False f3 = False f4 = False f5 = False f6 = False f7 = False f8 = False # Create the group sprite jdm2 = pygame.sprite.Group() cars = pygame.sprite.Group() cars2 = pygame.sprite.Group() # The font for the detection font on the sidewalk carsDetectedFont = pygame.font.SysFont( "arial", 22) """ PIR Logic Parameters - STARTS HERE # - def main(): """ This function runs the entire simulation. """ thread4 = threading.Thread(name="simulationTime", target=simulationTime, args=()) thread4.daemon = True thread4.start() thread2 = threading.Thread(name="initialization", target=initialize, args=()) # initialization thread2.daemon = True thread2.start() # Colors black = (0, 0, 0) white = (255, 255, 255) # Screensize screenWidth = 1400 screenHeight = 922 screenSize = (screenWidth, screenHeight) # Setting background image i.e. image of intersection background = pygame.image.load("images/intersection.png") # Set pygame screen screen = pygame.display.set_mode(screenSize) pygame.display.set_caption( "<NAME> Dynamic Traffic Lights Simulator \ - EE Capstone Project - Fall 2021" ) # Loading signal images and font redSignal = pygame.image.load("images/signals/red.png") yellowSignal = pygame.image.load("images/signals/yellow.png") greenSignal = pygame.image.load("images/signals/green.png") font = pygame.font.Font(None, 30) pirPhoto = pygame.image.load("images/pir.png") # PIR pic thread3 = threading.Thread(name="generateVehicles", target=generateVehicles, args=()) # Generating vehicles thread3.daemon = True thread3.start() run = True # Main loop to run the simulation while run: # Quit the pygame event loop for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() # Need to add this to kill simulation sys.exit() trackWaitTimeForAllLanes() screen.blit(background, (0, 0)) # display background in simulation for i in range( 0, noOfSignals ): # display signal and set timer according to # current status: green, yello, or red if i == currentGreen: if currentYellow == 1: if signals[i].yellow == 0: signals[i].signalText = "STOP" else: signals[i].signalText = signals[i].yellow screen.blit(yellowSignal, signalCoods[i]) else: if signals[i].green == 0: signals[i].signalText = "SLOW" else: signals[i].signalText = signals[i].green j = signals[i].green screen.blit(greenSignal, signalCoods[i]) else: if signals[i].red <= 10: if signals[i].red == 0: signals[i].signalText = "GO" else: signals[i].signalText = signals[i].red else: signals[i].signalText = "---" screen.blit(redSignal, signalCoods[i]) signalTexts = ["", "", "", ""] # reset flags carDidComeOnLeft = False carDidComeOnRight = False carDidComeOnTop = False carDidComeOnBottom = False # Collision detection in each lane sideDetection = pygame.sprite.spritecollide(myObj3, simulation, False) laser_event_1 = pygame.sprite.spritecollide(myObj1, simulation, False) for i in laser_event_1: carDidComeOnLeft = True laser_event_2 = pygame.sprite.spritecollide(myObj2, simulation, False) for i in laser_event_2: carDidComeOnTop = True laser_event_3 = pygame.sprite.spritecollide(myObj3, simulation, False) for i in laser_event_3: carDidComeOnRight = True laser_event_4 = pygame.sprite.spritecollide(myObj4, simulation, False) for i in laser_event_4: carDidComeOnBottom = True screen.blit(pirPhoto, (346, 290)) # left lane screen.blit(pirPhoto, (833, 160)) # top lane screen.blit(pirPhoto, (1020, 550)) # right lane screen.blit(pirPhoto, (537, 680)) # bottom lane upHit = pygame.sprite.spritecollide(myObj2, simulation, False) for i in upHit: cars.add(i) # Car detection Status text surf1 = carsDetectedFont.render( f'Cars Present: {"Yes" if carDidComeOnLeft else "No"}', True, f'{"darkgreen" if carDidComeOnLeft else "black"}', ) # left road rect1 = surf1.get_rect(topleft=(150, 260)) screen.blit(surf1, rect1) surf2 = carsDetectedFont.render( f'Cars Present: {"Yes" if carDidComeOnRight else "No"}', True, f'{"darkgreen" if carDidComeOnRight else "black"}', ) # right road rect2 = surf2.get_rect(topleft=(screenWidth - 310, 570)) screen.blit(surf2, rect2) surf3 = carsDetectedFont.render( f'Cars Present: {"Yes" if carDidComeOnBottom else "No"}', True, f'{"darkgreen" if carDidComeOnBottom else "black"}', ) # bottom road rect3 = surf3.get_rect(topleft=(435, 750)) screen.blit(surf3, rect3) surf4 = carsDetectedFont.render( f'Cars Present: {"Yes" if carDidComeOnTop else "No"}', True, f'{"darkgreen" if carDidComeOnTop else "black"}', ) # left road rect4 = surf4.get_rect(topleft=(825, 120)) screen.blit(surf4, rect4) # display signal timer and vehicle count for i in range(0, noOfSignals): signalTexts[i] = font.render(str(signals[i].signalText), True, white, black) screen.blit(signalTexts[i], signalTimerCoods[i]) x = signals[i].maximum displayText = vehicles[directionNumbers[i]]["crossed"] vehicleCountTexts[i] = font.render(str(displayText), True, black, white) screen.blit(vehicleCountTexts[i], vehicleCountCoods[i]) timeElapsedText = font.render(("Simulation Time: " + str(timeElapsed)), True, black, white) screen.blit(timeElapsedText, (1100, 50)) laser_group.draw(screen) # comment this to hide the lasers laser_group.update() # comment this to hide the lasers simulation.draw(screen) simulation.update(screen) # Lane time switching Starts here: global nextGreen, currentMaster global leftServiced, rightServiced, topServiced, bottomServiced global oppositeRoad, timeCheck global killLeft, killRight, killTop, killBottom global f1, f2, f3, f4, f5, f6, f7, f8 # left block --------------------------------------------------------------------------------- if (currentGreen == 0 and signals[currentGreen].green <= timeCheck and carDidComeOnRight and f1): # m f1 = False f2 = True signals[currentGreen].green = 0 nextGreen = 2 if (currentGreen == 2 and signals[currentGreen].green <= timeCheck and carDidComeOnLeft and f2): # s f2 = False f3 = True leftServiced = True killLeft = True signals[currentGreen].green = 0 nextGreen = 1 # top block ---------------------------------------------------------------------------------- if (currentGreen == 1 and signals[currentGreen].green <= timeCheck and carDidComeOnBottom and f3): # m f3 = False f4 = True signals[currentGreen].green = 0 nextGreen = 3 if (currentGreen == 3 and signals[currentGreen].green <= timeCheck and carDidComeOnTop and f4): f4 = False f5 = True topServiced = True killTop = True signals[currentGreen].green = 0 nextGreen = 2 # right block --------------------------------------------------------------------------------- if (currentGreen == 2 and signals[currentGreen].green <= timeCheck and carDidComeOnLeft and f5): # m f5 = False f6 = True signals[currentGreen].green = 0 nextGreen = 0 # opposite if (currentGreen == 0 and signals[currentGreen].green <= timeCheck and carDidComeOnRight and f6): f6 = False f7 = True rightServiced = True killRight = True signals[currentGreen].green = 0 nextGreen = 3 # bottom block --------------------------------------------------------------------------------- if (currentGreen == 3 and signals[currentGreen].green <= timeCheck and carDidComeOnTop and f7): f7 = False f8 = True signals[currentGreen].green = 0 nextGreen = 1 if (currentGreen == 1 and signals[currentGreen].green <= timeCheck and carDidComeOnTop and f8): f8 = False bottomServiced = True signals[currentGreen].green = 0 nextGreen = 0 # reset block --- once our lanes are serviced we reset everything and start from the right lane once more and so on --- if leftServiced and topServiced and rightServiced and bottomServiced: leftServiced = False rightServiced = False topServiced = False bottomServiced = False f1 = True f2 = False f3 = False f4 = False f5 = False f6 = False f7 = False f8 = False pygame.display.update() for i in simulation: i.move() if __name__ == "__main__": main()
Jupyter Notebooks/MF_Simulation_PIR.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="4oDa_k-bJ8OW" # # Building a QA System with BERT on Wikipedia # > A high-level code walk-through of an IR-based QA system with PyTorch and Hugging Face. # # - toc: true # - badges: true # - comments: true # - categories: [PyTorch, Hugging Face, Wikipedia, BERT, Transformers] # + [markdown] id="Wn_8e_uVJ8OY" # ![](my_icons/markus-spiske-C0koz3G1I4I-unsplash.jpg "Image by <NAME> at Unsplash.com") # # # So you've decided to build a QA system # # You want to start with something simple and general, so you plan to make it open domain, using Wikipedia as a corpus for answering questions. You want to use the best NLP that your compute resources allow (you're lucky enough to have access to a GPU), so you're going to focus on the big, flashy Transformer models that are all the rage these days. # # Sounds like you're building an IR-based QA system. In our previous post ([Intro to Automated Question Answering](https://qa.fastforwardlabs.com/methods/background/2020/04/28/Intro-to-QA.html)), we covered the general design of these systems, which typically require two main components: the document _retriever_ (a search engine) that selects the n most relevant documents from a large collection, and a document _reader_ that processes these candidate documents in search of an explicit answer span. # # ![](my_icons/QAworkflow.png "IR-based automated question answering workflow") # # # Now we're going to build it! # # This post is chock full of code that walks through our approach. We'll also highlight and clarify some powerful resources (including off-the-shelf models and libraries) that you can use to quickly get going on a QA system of your own. We'll cover all the necessary steps including: # * installing libraries and setting up an environment, # * training a Transformer style model on the SQuAD dataset, # * understanding Hugging Face's run_squad.py training script and output, # * and passing a full Wikipedia article as context for a question. # # # By the end of this post we'll have a working IR-based QA system, with BERT as the document reader and Wikipedia's search engine as the document retriever - a fun toy model that hints at potential real-world use cases. # # This article was originally developed in a Jupyter Notebook and, thanks to [fastpages](https://fastpages.fast.ai/), converted to a blog post. For an interactive environment, click the "Open in Colab" button above (though we note that, due to Colab's system constraints, some of the cells in this notebook might not be fully executable. We'll highlight when this is the case, but don't worry -- you'll still be able to play around with all the fun stuff.) # # Let's get started! # # # # Setting up your virtual environment # A virtual environment is always best practice and we're using `venv` on our workhorse machine. For this project, we'll be using PyTorch, which handles the heavy lifting of deep differentiable learning. If you have a GPU you'll want a PyTorch build that includes CUDA support, though most cells in this notebook will work fine without one. Check out [PyTorch's quick install guide](https://pytorch.org/) to determine the best build for your GPU and OS. We'll also be using the [Transformers](https://huggingface.co/transformers/index.html) library, which provides easy-to-use implementations of all the popular Transformer architectures, like BERT. Finally, we'll need the [wikipedia](https://pypi.org/project/wikipedia/) library for easy access and parsing of Wikipedia pages. # + [markdown] id="bIzrwiO-J8OY" # You can recreate our env (with CUDA 9.2 support -- but use the appropriate version for your machine) with the following commands in your command line: # # # ``` bash # $ python3 -m venv myenv # $ source myenv/bin/activate # $ pip install torch==1.5.0+cu92 torchvision==0.6.0+cu92 -f https://download.pytorch.org/whl/torch_stable.html # $ pip install transformers==2.5.1 # $ pip install wikipedia==1.4.0 # ``` # # + [markdown] id="lEZzdtFqJ8OZ" # Note: Our GPU machine sports an older version of CUDA (9.2 -- we're getting around to updating that), so we need to use an older version of PyTorch for the necessary CUDA support. The training script we'll be using requires some specific packages. More recent versions of PyTorch include these packages; however, older versions do not. If you have to work with an older version of PyTorch, you might need to install `TensorboardX` (see the hidden code cell below). # + id="I-9ucC94J8Oa" # collapse-hide # line 69 of `run_squad.py` script shows why you might need to install # tensorboardX if you have an older version of torch try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter # + id="QqjgkYveYTff" import json # !pip install tqdm from tqdm import tqdm json_open = open('/content/data/squad/dev-v2.0.json', 'r') json_load = json.load(json_open) # !pip install googletrans from googletrans import Translator translator = Translator() """ for line in lines: translated = translator.translate(line, dest="ja"); print(line) # English print(translated.text) # Japanese print() #保存 with open("/content/data/squad/train-v2.0_ja.json", mode="w") as json_load: d = json.dumps(d) f.write(d) """ # + [markdown] id="VSFwYaIpoMYs" # ## paragraph(問題文)を日本語化 # # + id="KIgNZDG1iVa_" outputId="7d739d6b-9bb3-453b-e185-04befaa38bcf" colab={"base_uri": "https://localhost:8080/"} for i in tqdm(range(len(json_load['data']))): for j in range(len(json_load['data'][i]["paragraphs"])): json_load['data'][i]["paragraphs"][j]["context"]=translator.translate(json_load['data'][i]["paragraphs"][j]["context"],dest="ja").text # + id="siEiYL3YB_bG" for i in range(len(json_load['data'])): for j in range(len(json_load['data'][i]["paragraphs"])): json_load['data'][i]["paragraphs"][j]["context"]=translator.translate(json_load['data'][i]["paragraphs"][j]["context"],dest="ja").text # + id="jpVn1gfwE3lV" # + [markdown] id="DNgJj0sfst01" # ## Questionを日本語化 # + id="8Vi3SA9OJ8Ol" outputId="6103dfe4-6d7a-403b-c482-c358e8dc761f" colab={"base_uri": "https://localhost:8080/"} # set path with magic # %env DATA_DIR=./data/squad # download the data def download_squad(version=1): if version == 1: # !wget -P $DATA_DIR https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json # !wget -P $DATA_DIR https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json else: # !wget -P $DATA_DIR https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json # !wget -P $DATA_DIR https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json download_squad(version=2) # + id="fjzddQrar0W8" outputId="8d3f292c-e0de-4c06-eaa6-d8ed250d8c1f" colab={"base_uri": "https://localhost:8080/", "height": 232} for i in range(len(json_load['data'])): for j in range(len(json_load['data'][i]["paragraphs"])): json_load['data'][i]["paragraphs"][j]["context"]=translator.translate(json_load['data'][i]["paragraphs"][j]["context"],dest="ja").text # + id="HDoFlxMdpi5G" outputId="12b11a42-2ba5-4b81-ef77-2ceb86235954" colab={"base_uri": "https://localhost:8080/"} json_load['data'][i]["paragraphs"][j]['qas'] # + id="_qaet6keslV2" # + id="IkkkXUOZln28" # + id="ByAWtYdJjxQP" """ for line in lines: translated = translator.translate(line, dest="ja"); print(line) # English print(translated.text) # Japanese print() #保存 with open("/content/data/squad/train-v2.0_ja.json", mode="w") as json_load: d = json.dumps(d) f.write(d) """ # + [markdown] id="Hedhc00oJ8Of" # Conversely, if you're working in Colab, you can run the cell below. # + id="AemXZWpEJ8Og" outputId="800b9987-6971-475b-a5d4-a7<PASSWORD>" colab={"base_uri": "https://localhost:8080/"} # !pip install torch torchvision -f https://download.pytorch.org/whl/torch_stable.html # !pip install transformers # !pip install wikipedia # + [markdown] id="B0C1ZkiuJ8Ok" # # Hugging Face Transformers # The [Hugging Face Transformers](https://huggingface.co/transformers/#) package provides state-of-the-art general-purpose architectures for natural language understanding and natural language generation. They host dozens of pre-trained models operating in over 100 languages that you can use right out of the box. All of these models come with deep interoperability between PyTorch and Tensorflow 2.0, which means you can move a model from TF2.0 to PyTorch and back again with just a line or two of code! # # # If you're new to Hugging Face, we strongly recommend working through the HF [Quickstart guide](https://huggingface.co/transformers/quickstart.html) as well as their excellent [Transformer Notebooks](https://huggingface.co/transformers/notebooks.html) (we did!), as we won't cover that material in this notebook. We'll be using [`AutoClasses`](https://huggingface.co/transformers/model_doc/auto.html), which serve as a wrapper around pretty much any of the base Transformer classes. # + [markdown] id="YV5bGuAqJ8Ok" # ## Fine-tuning a Transformer model for Question Answering # # To train a Transformer for QA with Hugging Face, we'll need # 1. to pick a specific model architecture, # 2. a QA dataset, and # 3. the training script. # # With these three things in hand we'll then walk through the fine-tuning process. # # ### 1. Pick a Model # Not every Transformer architecture lends itself naturally to the task of question answering. For example, GPT does not do QA; similarly BERT does not do machine translation. HF identifies the following model types for the QA task: # # - BERT # - distilBERT # - ALBERT # - RoBERTa # - XLNet # - XLM # - FlauBERT # # # We'll stick with the now-classic BERT model in this notebook, but feel free to try out some others (we will - and we'll let you know when we do). Next up: a training set. # # # ### 2. QA dataset: SQuAD # One of the most canonical datasets for QA is the Stanford Question Answering Dataset, or SQuAD, which comes in two flavors: SQuAD 1.1 and SQuAD 2.0. These reading comprehension datasets consist of questions posed on a set of Wikipedia articles, where the answer to every question is a segment (or span) of the corresponding passage. In SQuAD 1.1, all questions have an answer in the corresponding passage. SQuAD 2.0 steps up the difficulty by including questions that cannot be answered by the provided passage. # # The following code will download the specified version of SQuAD. # + [markdown] id="qHkYxVPUJ8Oq" # ### 3. Fine-tuning script # # We've chosen a model and we've got some data. Time to train! # # All the standard models that HF supports have been pre-trained, which means they've all been fed massive unsupervised training sets in order to learn basic language modeling. In order to perform well at specific tasks (like question answering), they must be trained further -- fine-tuned -- on specific datasets and tasks. # # # HF helpfully provides a script that fine-tunes a Transformer model on one of the SQuAD datasets, called `run_squad.py`. You can grab the script [here](https://github.com/huggingface/transformers/blob/master/examples/question-answering/run_squad.py) or run the cell below. # + id="kQLbf5ksJ8Oq" outputId="5a9f5db2-e27f-469b-f8a1-<PASSWORD>" colab={"base_uri": "https://localhost:8080/"} # download the run_squad.py training script # !curl -L -O https://raw.githubusercontent.com/huggingface/transformers/master/examples/question-answering/run_squad.py # + [markdown] id="_ITuEQz2J8Ot" # This script takes care of all the hard work that goes into fine-tuning a model and, as such, it's pretty complicated. It hosts no fewer than 45 arguments, providing an impressive amount of flexibility and utility for those who do a lot of training. We'll leave the details of this script for another day, and focus instead on the basic command to fine-tune BERT on SQuAD 1.1 or 2.0. # # Below are the most important arguments for the `run_squad.py` fine-tuning script. # + id="2aLgtAieJ8Ou" # fine-tuning your own model for QA using HF's `run_squad.py` # turn flags on and off according to the model you're training cmd = [ 'python', # '-m torch.distributed.launch --nproc_per_node 2', # use this to perform distributed training over multiple GPUs 'run_squad.py', '--model_type', 'bert', # model type (one of the list under "Pick a Model" above) '--model_name_or_path', 'bert-base-uncased', # specific model name of the given model type (shown, a list is here: https://huggingface.co/transformers/pretrained_models.html) # on first execution this initiates a download of pre-trained model weights; # can also be a local path to a directory with model weights '--output_dir', './models/bert/bbu_squad2', # directory for model checkpoints and predictions # '--overwrite_output_dir', # use when adding output to a directory that is non-empty -- # for instance, when training crashes midway through and you need to restart it '--do_train', # execute the training method '--train_file', '/content/data/squad/train-v2.0.json', # provide the training data '--version_2_with_negative', # ** MUST use this flag if training on SQuAD 2.0! DO NOT use if training on SQuAD 1.1 '--do_lower_case', # ** set this flag if using an uncased model; don't use for Cased Models '--do_eval', # execute the evaluation method on the dev set -- note: # if coupled with --do_train, evaluation runs after fine-tuning '--predict_file', '/content/data/squad/dev-v2.0.json', # provide evaluation data (dev set) '--eval_all_checkpoints', # evaluate the model on the dev set at each checkpoint '--per_gpu_eval_batch_size', '12', # evaluation batch size for each gpu '--per_gpu_train_batch_size', '12', # training batch size for each gpu '--save_steps', '5000', # how often checkpoints (complete model snapshot) are saved '--threads', '8', # num of CPU threads to use for converting SQuAD examples to model features # --- Model and Feature Hyperparameters --- '--num_train_epochs', '3', # number of training epochs - usually 2-3 for SQuAD '--learning_rate', '3e-5', # learning rate for the default optimizer (Adam in this case) '--max_seq_length', '384', # maximum length allowed for the full input sequence '--doc_stride', '128' # used for long documents that must be chunked into multiple features -- # this "sliding window" controls the amount of stride between chunks ] # + [markdown] id="_UUqH_Q8J8Ox" # Here's what to expect when executing `run_squad.py` for the first time: # # 1. Pre-trained model weights for the specified model type (i.e., `bert-base-uncased`) are downloaded. # 2. SQuAD training examples are converted into features (takes 15-30 minutes depending on dataset size and number of threads). # 3. Training features are saved to a cache file (so that you don't have to do this again *for this model type*). # 4. If `--do_train`, training commences for as many epochs as you specify, saving the model weights every `--save_steps` steps until training finishes. These checkpoints are saved in `[--output_dir]/checkpoint-[step number]` subdirectories. # 5. The final model weights and peripheral files are saved to `--output_dir`. # 6. If `--do_eval`, SQuAD dev examples are converted into features. # 7. Dev features are also saved to a cache file. # 8. Evaluation commences and outputs a dizzying assortment of performance scores. # # # ### Time to train! # But first, a note on compute requirements. We don't recommend fine-tuning a Transformer model unless you're rocking at least one GPU and a considerable amount of RAM. For context, our GPU is several years old (GeForce GTX TITAN X), and while it's not nearly as fast as the Tesla V100 (the current Cadillac of GPUs), it gets the job done. Fine-tuning `bert-base-uncased` takes about 1.75 hours _per epoch_. Additionally, our workhorse machine has 32GB CPU and 12GB GPU memory, which is sufficient for data processing and training most models on either of the SQuAD datasets. # # The following cells demonstrate two ways to fine-tune: on the command line and in a Colab notebook. # + [markdown] id="XnCfPqKCJ8Oz" # #### Training on the command line # We saved the following as a shell script (`run_squad.sh`) and ran on the command line (`$ source run_squad.sh`) of our workhorse GPU machine. Shell scripts help prevent numerous mistakes and mis-keys when typing args to a command line, especially for complex scripts like this. They also allow you to keep track of which arguments were used last (though, as we'll see below, the `run_squad.py` script has a solution for that). We actually kept two shell scripts -- one explicitly for training and another for evaluation. # + [markdown] id="YylyKTLdJ8Oz" # ```bash # # # #!/bin/sh # export DATA_DIR=./data/squad # export MODEL_DIR=./models # python run_squad.py \ # --model_type bert \ # --model_name_or_path bert-base-uncased \ # --output_dir models/bert/ \ # --data_dir data/squad \ # --overwrite_output_dir \ # --overwrite_cache \ # --do_train \ # --train_file train-v2.0.json \ # --version_2_with_negative \ # --do_lower_case \ # --do_eval \ # --predict_file dev-v2.0.json \ # --per_gpu_train_batch_size 2 \ # --learning_rate 3e-5 \ # --num_train_epochs 2.0 \ # --max_seq_length 384 \ # --doc_stride 128 \ # --threads 10 \ # --save_steps 5000 # ``` # + [markdown] id="WvnnJ28MJ8O0" # #### Training in Colab # Alternatively, you can execute training in the cell as shown below. We note that standard Colab environments only provide 12GB of RAM. Converting the SQuAD dataset to features is memory intensive and may cause the basic Colab environment to fail silently. If you have a Colab instance with additional memory capacity (16GB+), this cell should execute fully. # + id="Ap1o7wKDJ8O1" # !python run_squad.py \ # --model_type bert \ # --model_name_or_path bert-base-uncased \ # --output_dir models/bert/ \ # --data_dir data/squad \ # --overwrite_output_dir \ # --overwrite_cache \ # --do_train \ # --train_file train-v2.0.json \ # --version_2_with_negative \ # --do_lower_case \ # --do_eval \ # --predict_file dev-v2.0.json \ # --per_gpu_train_b-atch_size 2 \ # --learning_rate 3e-5 \ # --num_train_epochs 2.0 \ # --max_seq_length 384 \ # --doc_stride 128 \ # --threads 10 \ # --save_steps 5000 # + id="iRmd-dFjJ8O5" # hide # Execute the training from a standard Jupyter Notebook from subprocess import PIPE, STDOUT, Popen # Live output from run_squad.py is through stderr (rather than stdout). # The following command runs the process and ports stderr to stdout p = Popen(cmd, stdout=PIPE, stderr=STDOUT) # Default behavior when using bash cells in jupyter is that you won't see the live output in the cell # -- you can only see output once the entire process has finished and then you get it all at once. # This is less than ideal when training models that can take hours or days of compute time! # This command combined with the above allows you to see the live output feed in the notebook, # though it's a bit asynchronous. for line in iter(p.stdout.readline, b''): print(">>> " + line.decode().rstrip()) # + [markdown] id="WbYh24rCJ8O8" # ### Training Output # # Successful completion of the `run_squad.py` yields a slew of output, which can be found in the `--output_dir` directory specified above. There you'll find... # # Files for the model's tokenizer: # * `tokenizer_config.json` # * `vocab.txt` # * `special_tokens_map.json` # # Files for the model itself: # * `pytorch_model.bin`: these are the actual model weights (this file can be several GB for some models) # * `config.json`: details of the model architecture # # Binary representation of the command line arguments used to train this model (so you'll never forget which arguments you used!) # * `training_args.bin` # # And if you included `--do_eval`, you'll also see these files: # * `predictions_.json`: the official best answer for each example # * `nbest_predictions_.json`: the top n best answers for each example # # # Providing the path to this directory to `AutoModel` or `AutoModelForQuestionAnswering` will load your fine-tuned model for use. # + id="3z18ZUZ2J8O9" from transformers import AutoTokenizer, AutoModelForQuestionAnswering # Load the fine-tuned model tokenizer = AutoTokenizer.from_pretrained("./models/bert/bbu_squad2") model = AutoModelForQuestionAnswering.from_pretrained("./models/bert/bbu_squad2") # + [markdown] id="0udYlib9J8PA" # ## Using a pre-fine-tuned model from the Hugging Face repository # If you don't have access to GPUs or don't have the time to fiddle and train models, you're in luck! Hugging Face is more than a collection of slick Transformer classes -- it also hosts [a repository](https://huggingface.co/models) for pre-trained and fine-tuned models contributed from the wide community of NLP practitioners. Searching for "squad" brings up at least 55 models. # # ![](https://github.com/fastforwardlabs/ff14_blog/blob/master/_notebooks/my_icons/HF_repo.png?raw=1) # # # Each of these links provides explicit code for using the model, and, in some cases, information on how it was trained and what results were achieved. Let's load one of these pre-fine-tuned models. # + id="bAXu3eJRJ8PA" "import torch "from transformers import AutoTokenizer, AutoModelForQuestionAnswering # executing these commands for the first time initiates a download of the # model weights to ~/.cache/torch/transformers/ #tokenizer = AutoTokenizer.from_pretrained("deepset/bert-base-cased-squad2") #model = AutoModelForQuestionAnswering.from_pretrained("deepset/bert-base-cased-squad2") # + [markdown] id="bwFkh5OCJ8PD" # ## Let's try our model! # # Whether you fine-tuned your own or used a pre-fine-tuned model, it's time to play with it! There are three steps to QA: # 1. tokenize the input # 2. obtain model scores # 3. get the answer span # # These steps are discussed in detail in the HF [Transformer Notebooks](https://huggingface.co/transformers/notebooks.html). # + id="TQEzAri2J8PD" outputId="0f38c55d-79c5-440a-c7fa-7051064a6aec" question = "Who ruled Macedonia" context = """Macedonia was an ancient kingdom on the periphery of Archaic and Classical Greece, and later the dominant state of Hellenistic Greece. The kingdom was founded and initially ruled by the Argead dynasty, followed by the Antipatrid and Antigonid dynasties. Home to the ancient Macedonians, it originated on the northeastern part of the Greek peninsula. Before the 4th century BC, it was a small kingdom outside of the area dominated by the city-states of Athens, Sparta and Thebes, and briefly subordinate to Achaemenid Persia.""" # 1. TOKENIZE THE INPUT # note: if you don't include return_tensors='pt' you'll get a list of lists which is easier for # exploration but you cannot feed that into a model. inputs = tokenizer.encode_plus(question, context, return_tensors="pt") # 2. OBTAIN MODEL SCORES # the AutoModelForQuestionAnswering class includes a span predictor on top of the model. # the model returns answer start and end scores for each word in the text answer_start_scores, answer_end_scores = model(**inputs) answer_start = torch.argmax(answer_start_scores) # get the most likely beginning of answer with the argmax of the score answer_end = torch.argmax(answer_end_scores) + 1 # get the most likely end of answer with the argmax of the score # 3. GET THE ANSWER SPAN # once we have the most likely start and end tokens, we grab all the tokens between them # and convert tokens back to words! tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(inputs["input_ids"][0][answer_start:answer_end])) # + [markdown] id="V_lU8NaqJ8PG" # # QA on Wikipedia pages # We tried our model on a question paired with a short passage, but what if we want to retrieve an answer from a longer document? A typical Wikipedia page is much longer than the example above, and we need to do a bit of massaging before we can use our model on longer contexts. # # Let's start by pulling up a Wikipedia page. # + id="8NjxLa2QJ8PH" outputId="ecfd720a-5ac1-4cdc-bc75-d9189dea0af4" import wikipedia as wiki import pprint as pp question = 'What is the wingspan of an albatross?' results = wiki.search(question) print("Wikipedia search results for our question:\n") pp.pprint(results) page = wiki.page(results[0]) text = page.content print(f"\nThe {results[0]} Wikipedia article contains {len(text)} characters.") # + id="VBF-CtbvJ8PK" outputId="8ff9b7c1-4ed4-4603-a1ab-d607202b3587" inputs = tokenizer.encode_plus(question, text, return_tensors='pt') print(f"This translates into {len(inputs['input_ids'][0])} tokens.") # + [markdown] id="avRWFjyNJ8PN" # The tokenizer takes the input as text and returns tokens. In general, tokenizers convert words or pieces of words into a model-ingestible format. The specific tokens and format are dependent on the type of model. For example, BERT tokenizes words differently from RoBERTa, so be sure to always use the associated tokenizer appropriate for your model. # # In this case, the tokenizer converts our input text into 8824 tokens, but this far exceeds the maximum number of tokens that can be fed to the model at one time. Most BERT-esque models can only accept 512 tokens at once, thus the (somewhat confusing) warning above (how is 10 > 512?). This means we'll have to split our input into chunks and each chunk must not exceed 512 tokens in total. # # When working with Question Answering, it's crucial that each chunk follows this format: # # [CLS] question tokens [SEP] context tokens [SEP] # # This means that, for each segment of a Wikipedia article, we must prepend the original question, followed by the next "chunk" of article tokens. # # # + id="cVqg4VdwJ8PO" outputId="407a59f7-3393-4d86-a47f-54196b0b2ad8" # time to chunk! from collections import OrderedDict # identify question tokens (token_type_ids = 0) qmask = inputs['token_type_ids'].lt(1) qt = torch.masked_select(inputs['input_ids'], qmask) print(f"The question consists of {qt.size()[0]} tokens.") chunk_size = model.config.max_position_embeddings - qt.size()[0] - 1 # the "-1" accounts for # having to add a [SEP] token to the end of each chunk print(f"Each chunk will contain {chunk_size - 2} tokens of the Wikipedia article.") # create a dict of dicts; each sub-dict mimics the structure of pre-chunked model input chunked_input = OrderedDict() for k,v in inputs.items(): q = torch.masked_select(v, qmask) c = torch.masked_select(v, ~qmask) chunks = torch.split(c, chunk_size) for i, chunk in enumerate(chunks): if i not in chunked_input: chunked_input[i] = {} thing = torch.cat((q, chunk)) if i != len(chunks)-1: if k == 'input_ids': thing = torch.cat((thing, torch.tensor([102]))) else: thing = torch.cat((thing, torch.tensor([1]))) chunked_input[i][k] = torch.unsqueeze(thing, dim=0) # + id="HhNG48feJ8PQ" outputId="a59f150f-462c-4c7f-ac82-e5dc3557c08d" for i in range(len(chunked_input.keys())): print(f"Number of tokens in chunk {i}: {len(chunked_input[i]['input_ids'].tolist()[0])}") # + [markdown] id="_16eZT2iJ8PU" # Each of these chunks (except for the last one) has the following structure: # # [CLS], 12 question tokens, [SEP], 497 tokens of the Wikipedia article, [SEP] token = 512 tokens # # Each of these chunks can now be fed to the model without causing indexing errors. We'll get an "answer" for each chunk; however, not all answers are useful, since not every segment of a Wikipedia article is informative for our question. The model will return the [CLS] token when it determines that the context does not contain an answer to the question. # + id="uxsDpsbfJ8PV" outputId="a103e6ba-3928-444e-efea-1ac022bdaaa5" def convert_ids_to_string(tokenizer, input_ids): return tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids)) answer = '' # now we iterate over our chunks, looking for the best answer from each chunk for _, chunk in chunked_input.items(): answer_start_scores, answer_end_scores = model(**chunk) answer_start = torch.argmax(answer_start_scores) answer_end = torch.argmax(answer_end_scores) + 1 ans = convert_ids_to_string(tokenizer, chunk['input_ids'][0][answer_start:answer_end]) # if the ans == [CLS] then the model did not find a real answer in this chunk if ans != '[CLS]': answer += ans + " / " print(answer) # + [markdown] id="2Gujs0-DJ8Pc" # # Putting it all together # # Let's recap. We've essentially built a simple IR-based QA system! We're using `wikipedia`'s search engine to return a list of candidate documents that we then feed into our document reader (in this case, BERT fine-tuned on SQuAD 2.0). Let's make our code easier to read and more self-contained by packaging the document reader into a class. # + id="VgXGzWLTJ8Pd" from transformers import AutoTokenizer, AutoModelForQuestionAnswering class DocumentReader: def __init__(self, pretrained_model_name_or_path='bert-large-uncased'): self.READER_PATH = pretrained_model_name_or_path self.tokenizer = AutoTokenizer.from_pretrained(self.READER_PATH) self.model = AutoModelForQuestionAnswering.from_pretrained(self.READER_PATH) self.max_len = self.model.config.max_position_embeddings self.chunked = False def tokenize(self, question, text): self.inputs = self.tokenizer.encode_plus(question, text, add_special_tokens=True, return_tensors="pt") self.input_ids = self.inputs["input_ids"].tolist()[0] if len(self.input_ids) > self.max_len: self.inputs = self.chunkify() self.chunked = True def chunkify(self): """ Break up a long article into chunks that fit within the max token requirement for that Transformer model. Calls to BERT / RoBERTa / ALBERT require the following format: [CLS] question tokens [SEP] context tokens [SEP]. """ # create question mask based on token_type_ids # value is 0 for question tokens, 1 for context tokens qmask = self.inputs['token_type_ids'].lt(1) qt = torch.masked_select(self.inputs['input_ids'], qmask) chunk_size = self.max_len - qt.size()[0] - 1 # the "-1" accounts for # having to add an ending [SEP] token to the end # create a dict of dicts; each sub-dict mimics the structure of pre-chunked model input chunked_input = OrderedDict() for k,v in self.inputs.items(): q = torch.masked_select(v, qmask) c = torch.masked_select(v, ~qmask) chunks = torch.split(c, chunk_size) for i, chunk in enumerate(chunks): if i not in chunked_input: chunked_input[i] = {} thing = torch.cat((q, chunk)) if i != len(chunks)-1: if k == 'input_ids': thing = torch.cat((thing, torch.tensor([102]))) else: thing = torch.cat((thing, torch.tensor([1]))) chunked_input[i][k] = torch.unsqueeze(thing, dim=0) return chunked_input def get_answer(self): if self.chunked: answer = '' for k, chunk in self.inputs.items(): answer_start_scores, answer_end_scores = self.model(**chunk) answer_start = torch.argmax(answer_start_scores) answer_end = torch.argmax(answer_end_scores) + 1 ans = self.convert_ids_to_string(chunk['input_ids'][0][answer_start:answer_end]) if ans != '[CLS]': answer += ans + " / " return answer else: answer_start_scores, answer_end_scores = self.model(**self.inputs) answer_start = torch.argmax(answer_start_scores) # get the most likely beginning of answer with the argmax of the score answer_end = torch.argmax(answer_end_scores) + 1 # get the most likely end of answer with the argmax of the score return self.convert_ids_to_string(self.inputs['input_ids'][0][ answer_start:answer_end]) def convert_ids_to_string(self, input_ids): return self.tokenizer.convert_tokens_to_string(self.tokenizer.convert_ids_to_tokens(input_ids)) # + [markdown] id="LJNk2rZVJ8Pf" # Below is our clean, fully working QA system! Feel free to add your own questions. # + id="H8U1p9NRJ8Pg" # collapse-hide # to make the following output more readable I'll turn off the token sequence length warning import logging logging.getLogger("transformers.tokenization_utils").setLevel(logging.ERROR) # + id="jj4ZOgEoJ8Pi" outputId="18b7d51e-ea8e-4787-a3d1-f31bbc26990a" questions = [ 'When was <NAME> born?', 'Why is the sky blue?', 'How many sides does a pentagon have?' ] reader = DocumentReader("deepset/bert-base-cased-squad2") # if you trained your own model using the training cell earlier, you can access it with this: #reader = DocumentReader("./models/bert/bbu_squad2") for question in questions: print(f"Question: {question}") results = wiki.search(question) page = wiki.page(results[0]) print(f"Top wiki result: {page}") text = page.content reader.tokenize(question, text) print(f"Answer: {reader.get_answer()}") print() # + [markdown] id="s0_Kb0b-J8Pk" # It got 2 out of 3 questions right! # # Notice that, at least for the current questions we've chosen, the QA system fails because of Wikipedia's default search engine, not because of BERT! It pulls up the wrong page for two of our questions: a page about Bar<NAME>ama Sr. instead of the former US President, and an article about the US's Department of Defense building "The Pentagon" instead of a page about geometry. In the latter case, we ended up with the correct answer by coincidence! This illustrates that any successful IR-based QA system requires a search engine (document retriever) as good as the document reader. # # + [markdown] id="_gVjinwNJ8Pl" # # Wrapping Up # # There we have it! A working QA system on Wikipedia articles. This is great, but it's admittedly not very sophisticated. Furthermore, we've left a lot of questions unanswered: # # 1. Why fine-tune on the SQuAD dataset and not something else? What other options are there? # 2. How good is BERT at answering questions? And how do we define "good"? # 3. Why BERT and not another Transformer model? # 4. Currently, our QA system can return an answer for each chunk of a Wiki article, but not all of those answers are correct -- How can we improve our `get_answer` method? # 5. Additionally, we're chunking a wiki article in such a way that we could be ending a chunk in the middle of a sentence -- Can we improve our `chunkify` method? # # # Over the course of this project, we'll tackle these questions and more. By the end of this series we hope to demonstrate a snazzier QA model that incorporates everything we learn along the way. Stay tuned! # + id="l1v4ntQjJ8Pl"
chatbot/qa.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # A short example of ctaplot functions # + pycharm={"is_executing": false} import ctaplot import numpy as np import matplotlib.pyplot as plt import astropy.units as u # + pycharm={"is_executing": false} ctaplot.set_style('slides') # - # ## Generate some dummy data # + pycharm={"is_executing": false} size = 1000 simu_energy = 10**np.random.uniform(-2, 2, size) * u.TeV reco_energy = simu_energy.value**0.9 * simu_energy.unit source_alt = 3. * u.rad source_az = 1.5 * u.rad simu_alt = source_alt * np.ones(size) simu_az = source_az * np.ones(size) reco_alt = np.random.normal(loc=source_alt.to_value(u.rad), scale=2e-3, size=size) * u.rad reco_az = np.random.normal(loc=source_az.to_value(u.rad)-0.005, scale=2e-3, size=size) * u.rad # - # ## Position reconstruction # + pycharm={"is_executing": false} # ctaplot.plot_field_of_view_map(reco_alt, reco_az, source_alt, source_az); # + pycharm={"is_executing": false} fig, axes = plt.subplots(1, 2, figsize=(20,5)) ctaplot.plot_theta2(reco_alt, reco_az, simu_alt, simu_az, bins=40, ax=axes[0]) ctaplot.plot_angular_resolution_per_energy(reco_alt, reco_az, simu_alt, simu_az, simu_energy, ax=axes[1]) ctaplot.plot_angular_resolution_cta_requirement('south', ax=axes[1], color='black') axes[1].legend(); plt.show() # - # Ok, the position is really not well reconstructed. # But this is actually because of a bias in the reconstruction. We can ask for an automatic correction of this bias. # + pycharm={"is_executing": false} fig, axes = plt.subplots(1, 2, figsize=(20,5)) ctaplot.plot_theta2(reco_alt, reco_az, simu_alt, simu_az, bins=40, bias_correction=True, ax=axes[0]) ctaplot.plot_angular_resolution_per_energy(reco_alt, reco_az, simu_alt, simu_az, simu_energy, bias_correction=True, ax=axes[1]) ctaplot.plot_angular_resolution_cta_requirement('south', ax=axes[1], color='black') axes[1].legend() plt.show() # - # Now the angular resolution looks better, in agreement with the input scale of the Gaussian distribution. # ## Energy reconstruction # + pycharm={"is_executing": false} plt.figure(figsize=(12,7)) ax = ctaplot.plot_energy_resolution(simu_energy, reco_energy) ctaplot.plot_energy_resolution_cta_requirement('north', ax=ax) ax.legend() plt.show() # - # **But you might want to study the energy resolution as a function of another variable... # or to compute the resolution of other stuff** # + pycharm={"is_executing": false} new_variable = simu_energy * 2 bins, res = ctaplot.resolution_per_bin(new_variable, simu_energy, reco_energy, bins=np.logspace(-2,2,10)*u.TeV, relative_scaling_method='s1') # + pycharm={"is_executing": false} ax = ctaplot.plot_energy_resolution(simu_energy, reco_energy) ctaplot.plot_resolution(bins, res, label='new resolution', ax=ax, log=True) ax.legend() plt.show() # + pycharm={"is_executing": false}
examples/notebooks/resolution_examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from sklearn import preprocessing import numpy as np X = np.array([ [0., 0., 5., 13., 9., 1.], [0., 0., 13., 15., 10., 15.], [0., 3., 15., 2., 0., 11.] ]) print(preprocessing.scale(X))
chapter04/e2-ch4-s2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import argparse import numpy as np import os import tabulate import torch import torch.nn.functional as F import torch.nn import data import models import curves import utils import pickle # + import numpy as np import matplotlib.mlab as mlab import matplotlib.pyplot as plt from scipy.stats import norm import utils import time from torch import nn import seaborn as sns from sklearn.manifold import TSNE # - # VGG16 # # Computing mean distance. # # Initialization for the layer in NN: # # $W_i \sim U(-\sqrt{k}, \sqrt{k})$ # where $k = \frac 1 {N_{in}}$ # # $N_{in}$ - nubmer of input parameters for the weights. # # $N_{w}$ - numver of weights in the layer. # # mean square dist between two layers in Linear layer: # # $E [dist^2(L^1, L^2)] = E\sum_i (L_1^i-L_i^2)^2 = N_{w} (2 E (L_i^1)^2) = 2 N_{w} \frac{ \left (2 \sqrt k \right )^2} {12} = \frac {2 N_{w}} {3 N_{in}}$ # # for convolution layer: # # $E [dist^2(L^1, L^2)] = E\sum_i (L_1^i-L_i^2)^2 = N_{w} (2 E (L_i^1)^2) = 2 N_{w} \left (\sqrt \frac 2 {n} \right )$ # # where n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels # + architecture = getattr(models, "LogRegression") model = architecture.base(num_classes=10, **architecture.kwargs) # - loaders, num_classes = data.loaders( "MNIST", "data", 128, 1, "VGG", True) # + # distance for VGG16 network: DIST = 0 i=1 for m in model.modules(): if isinstance(m, torch.nn.Conv2d): print(i, m) i+=1 N_in = m.in_channels*np.prod(m.kernel_size) n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels N_w = np.prod(m.weight.shape) # +np.prod(m.bias.shape) print('N_in', N_in) print('N_w', N_w) DIST+=2*N_w*2/n if isinstance(m, torch.nn.Linear): print(i, m) i+=1 N_in = m.in_features N_w = np.prod(m.weight.shape)+np.prod(m.bias.shape) print('N_in', N_in) print('N_w', N_w) DIST+=2/3*float(N_w)/float(N_in) print() print('Distance', (np.sqrt(DIST))) # - def distance(model1, model2): par1 = np.concatenate([p.data.cpu().numpy().ravel() for p in model1.parameters()]) par2 = np.concatenate([p.data.cpu().numpy().ravel() for p in model2.parameters()]) u = par2 - par1 dx = np.linalg.norm(u) return dx # + architecture = getattr(models, "LogRegression") model1 = architecture.base(num_classes=10, **architecture.kwargs) model2 = architecture.base(num_classes=10, **architecture.kwargs) model3 = architecture.base(num_classes=10, **architecture.kwargs) model4 = architecture.base(num_classes=10, **architecture.kwargs) model1.load_state_dict(torch.load('curves_mnist/LogRegression/curve1/checkpoint-0.pt')['model_state']) model2.load_state_dict(torch.load('curves_mnist/LogRegression/curve3/checkpoint-0.pt')['model_state']) # - def samples(model): p1 = list(model.parameters())[0].data.numpy() p2 = list(model.parameters())[1].data.numpy() samples = np.hstack([p1, p2[:, None]]) return samples list(model1.parameters())[0].shape # + # class LogRegression(nn.Module): # def __init__(self, num_classes, in_dim): # super(LogRegression, self).__init__() # self.fc = nn.Linear(in_dim, num_classes) # def forward(self, x): # x = x.view(x.size(0), -1) # x = self.fc(x) # return x # model2 = LogRegression(10, 784) # model1 = LogRegression(10, 784) # - distance(model1, model2) 2.57/(28*np.sqrt(10)) S_initial1 = samples(model1) S_initial2 = samples(model2) (S_initial1*S_initial2).sum() S_initial1.shape criterion = F.cross_entropy regularizer = None model1.load_state_dict(torch.load('curves_mnist/LogRegression/curve1/checkpoint-20.pt')['model_state']) model2.load_state_dict(torch.load('curves_mnist/LogRegression/curve2/checkpoint-20.pt')['model_state']) model1.cpu(); S1 = samples(model1) model2.cpu(); S2 = samples(model2) distance(model1, model2) # + # lr = 0.001 # model1.cuda() # optimizer = torch.optim.SGD( # filter(lambda param: param.requires_grad, model1.parameters()), # lr=lr, # weight_decay=0. # ) # columns = ['ep', 'lr', 'tr_loss', 'tr_acc', 'te_nll', 'te_acc', 'time'] # for epoch in range(0, 10 + 1): # time_ep = time.time() # train_res = utils.train(loaders['train'], model1, optimizer, criterion, regularizer, cuda=True) # test_res = utils.test(loaders['test'], model1, criterion, regularizer, cuda=True) # time_ep = time.time() - time_ep # values = [epoch, lr, train_res['loss'], train_res['accuracy'], test_res['nll'], # test_res['accuracy'], time_ep] # table = tabulate.tabulate([values], columns, tablefmt='simple', floatfmt='9.4f') # print(table) # - sns.kdeplot(S_initial1[:, 1], S_initial1[:, 2], shade=True, cbar=True) sns.kdeplot(S1[:, 1], S1[:, 2], shade=True, cbar=True) sns.kdeplot(S_initial2[:, 1], S_initial2[:, 2], shade=True, cbar=True) sns.kdeplot(S2[:, 1], S2[:, 2], shade=True, cbar=True) def get_importante_weights(S, koef=0.01): mask = np.zeros(785) mask[S[:, :].var(axis=0)>koef]=1 mask = mask[:784].reshape((28, 28)) return mask S1.shape mask1 = get_importante_weights(S1, koef=0.001) mask2 = get_importante_weights(S2, koef=0.001) np.abs(mask1-mask2).sum() def find_masked_distance(mask1, mask2, S_initial1, S_initial2): mask = mask1*mask2 mask = mask.reshape(-1).astype(int) S_initial1, S_initial2 = S_initial1[:, :784], S_initial2[:, :784] S_initial1, S_initial2 = S_initial1[: :]*mask, S_initial2[:, :]*mask dx = S_initial1 - S_initial2 distance = np.sqrt((dx*dx).sum()) return distance a = find_masked_distance(mask1, mask2, S1, S2) a plt.imshow(mask1) plt.show() plt.imshow(mask2) # оюученные распределения в 500 пикселе 1 и 2 модель sns.kdeplot(S1[:, 500], shade=True, cbar=True) sns.kdeplot(S2[:, 500], shade=True, cbar=True) # оюученные распределения в 1 пикселе 1 и 2 модель sns.kdeplot(S1[:, 1], shade=True, cbar=True) sns.kdeplot(S2[:, 1], shade=True, cbar=True) # оюученное и изначальные распределения в 500 пикселе 1 модель sns.kdeplot(S_initial1[:, 500], shade=True, cbar=True) sns.kdeplot(S1[:, 500], shade=True, cbar=True) # оюученное и изначальные распределения в 500 пикселе 2 модель sns.kdeplot(S_initial2[:, 500], shade=True, cbar=True) sns.kdeplot(S2[:, 500], shade=True, cbar=True) # оюученное и изначальные распределения в 1 пикселе 1 модель sns.kdeplot(S1[:, 1], shade=True, cbar=True) sns.kdeplot(S_initial1[:, 1], shade=True, cbar=True) # оюученное и изначальные распределения в 1 пикселе 2 модель sns.kdeplot(S2[:, 1], shade=True, cbar=True) sns.kdeplot(S_initial2[:, 1], shade=True, cbar=True) # изначальные распределения в 1 пикселе sns.kdeplot(S_initial1[:, 1], shade=True, cbar=True) sns.kdeplot(S_initial2[:, 1], shade=True, cbar=True) # совместное распределение 1 модель sns.kdeplot(S1[:, 37], S1[:, 500], shade=True, cbar=True) # совместное распределение 2 модель sns.kdeplot(S2[:, 37], S2[:, 500], shade=True, cbar=True) # ## HeatMap # + from matplotlib import pyplot as plt import torchvision # %matplotlib inline def show_images(model, scale=15, line_width=10): x = list(model.parameters())[0].cpu().data plt.figure(figsize=(scale, scale / line_width * (x.shape[0] // line_width + 1))) x = x.view(10, 1, 28, 28) # x = x-x.min() # print(x.max()) # x = x/x.max() # print(x) mtx = torchvision.utils.make_grid(x, nrow=line_width, pad_value=1) plt.imshow(mtx.permute([1, 2, 0]).numpy(), ) plt.axis('off') show_images(model) # - from pylab import rcParams rcParams['figure.figsize'] = 12, 10 rcParams['figure.dpi'] = 100 # + def heat_image(model, scale=15, line_width=10): plt.figure(figsize=(scale, scale / line_width * (10 // line_width + 1))) fig, ax = plt.subplots(1,10) ims = list(model.parameters())[0].cpu().data.numpy() for ind, im in enumerate(ims): im = np.abs(im.reshape((28, 28))) ax[ind].imshow(im) ax[ind].set_title(ind) # plt.colorbar() # plt.axis('off') plt.show() # - heat_image(model1) heat_image(model2) rcParams['figure.figsize'] = 12, 10 rcParams['figure.dpi'] = 50 (pr_im2*pr_im1).sum()/(np.sqrt((pr_im2*pr_im2).sum())*np.sqrt((pr_im1*pr_im1).sum())) # + pr_im1 = list(model1.parameters())[0].cpu().data.numpy()[0] pr_im2 = list(model2.parameters())[0].cpu().data.numpy()[0] # im1 = np.abs(im1.reshape((28, 28))) # im2 = np.abs(im2.reshape((28, 28))) im1 = pr_im1.reshape((28, 28)) im2 = pr_im2.reshape((28, 28)) plt.imshow(im1) plt.colorbar() plt.show() plt.imshow(im2) plt.colorbar() plt.show() # - i, j = 20, 10 im1[i][j], im2[i][j] dx = im1-im2 distance = np.sqrt((dx*dx).sum())/28 distance 0.73/28 import numpy as np np.sqrt(0.73**2*10)
experiments/Study_LogRegression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + # Using a helper function to do Cross Validation automatically. # Similar to how other sklearn objects are wrapped in helpers like # pipelines. # - from sklearn import ensemble rf = ensemble.RandomForestRegressor(max_features='auto') from sklearn import datasets X, y = datasets.make_regression(10000, 10) from sklearn import cross_validation scores = cross_validation.cross_val_score(rf, X, y) print scores scores = cross_validation.cross_val_score(rf, X, y, verbose=3, cv=4)
Chapter 5/5.0.2 Automatic Cross Validation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 파일 읽고 쓰기 # # 파일 생성하기 f = open("새파일.txt",'w') f.close() # 파일을 생성하기 위해 우리는 파이썬 내장 함수 open을 사용했다. open 함수는 다음과 같이 "파일 이름"과 "파일 열기 모드"를 입력값으로 받고 결괏값으로 파일 객체를 돌려준다. # - 사용법 # 파일 객체 = open(파일 이름, 파일 열기 모드) # # 파일 열기 모드 | 설명 # r | 읽기모드 - 파일을 읽기만 할 때 사용 # w | 쓰기모드 - 파일에 내용을 쓸 때 사용 # a | 추가모드 - 파일의 마지막에 새로운 내용을 추가 할 때 사용 # 파일을 쓰기 모드로 열면 해당 파일이 이미 존재할 경우 원래 있던 내용이 모두 사라지고, 해당 파일이 존재하지 않으면 새로운 파일이 생성된다. # 위 예에서는 디렉터리에 파일이 없는 상태에서 새파일.txt를 쓰기 모드인 'w'로 열었기 때문에 새파일.txt라는 이름의 새로운 파일이 현재 디렉터리에 생성되는 것이다. # - 만약 새파일.txt 파일을 C:/doit 디렉터리에 생성하고 싶다면 다음과 같이 작성해야 한다. f = open("C:/Users/ldm81/Desktop/새파일.txt", 'w') f.close() # 위 예에서 f.close()는 열려 있는 파일 객체를 닫아 주는 역할을 한다. 사실 이 문장은 생략해도 된다. 프로그램을 종료할 때 파이썬 프로그램이 열려 있는 파일의 객체를 자동으로 닫아주기 때문이다. 하지만 close()를 사용해서 열려 있는 파일을 직접 닫아 주는 것이 좋다. 쓰기모드로 열었던 파일을 닫지 않고 다시 사용하려고 하면 오류가 발생하기 때문이다. # ## 파일을 쓰기모드로 열어 출력값 적기 # - 이번에는 프로그램의 출력값을 파일에 직접 써보자. f = open("C:/Users/ldm81/Desktop/새파일.txt", 'w') for i in range(1,11): data="%d번째 줄입니다. \n" %i f.write(data) f.close() # ![image.png](attachment:image.png) # ## 프로그램의 외부에 저장된 파일을 읽는 여러가지 방법 # ## readline() 함수 f = open("C:/Users/ldm81/Desktop/새파일.txt", 'r') line= f.readline() print(line) # - 여러 줄 읽는 법 f = open("C:/Users/ldm81/Desktop/새파일.txt", 'r') while True: line = f.readline() if not line: break print(line) f.close() # 즉 while True: 무한 루프 안에서 f.readline()을 사용해 파일을 계속해서 한 줄씩 읽어 들인다. 만약 더 이상 읽을 줄이 없으면 break를 수행한다(readline()은 더 이상 읽을 줄이 없을 경우 None을 출력한다). # - readlines 함수 f = open("C:/Users/ldm81/Desktop/새파일.txt", 'r') lines = f.readlines() for line in lines: print(line) f.close() # readlines 함수는 파일의 모든 줄을 읽어서 각각의 줄을 요소로 갖는 리스트로 돌려준다. # 따라서 위 예에서 lines는 리스트 ["1 번째 줄입니다.", "2 번째 줄입니다.", ..., "10 번째 줄입니다."]가 된다. # f.readlines()에서 f.readline()과는 달리 s가 하나 더 붙어 있음에 유의하자. # - read 함수 f = open("C:/Users/ldm81/Desktop/새파일.txt", 'r') data=f.read() print(data) f.close() # f.read()는 파일의 내용 전체를 문자열로 돌려준다. 따라서 위 예의 data는 파일의 전체 내용이다. # ## 파일에 새로운 내용 추가하기 # # 쓰기 모드('w')로 파일을 열 때 이미 존재하는 파일을 열면 그 파일의 내용이 모두 사라지게 된다. # 하지만 원래 있던 값을 유지하면서 단지 새로운 값만 추가해야 할 경우도 있다. # 이런 경우에는 파일을 추가 모드('a')로 열면 된다. f = open("C:/Users/ldm81/Desktop/새파일.txt", 'a') for i in range(11,20): data="%d번째 줄입니다. \n"%i f.write(data) f.close() # 새파일.txt 파일을 추가 모드('a')로 열고 write를 사용해서 결괏값을 기존 파일에 추가해 적는 예이다. # 여기에서 추가 모드로 파일을 열었기 때문에 새파일.txt 파일이 원래 가지고 있던 내용 바로 다음부터 결괏값을 적기 시작한다. # # ![image.png](attachment:image.png) # ## with문과 함께 사용하기 # # 지금까지 살펴본 예제를 보면 항상 다음과 같은 방식으로 파일을 열고 닫아 왔다. # # f = open("foo.txt", 'w') # f.write("Life is too short, you need python") # f.close() # # # 파일을 열면 위와 같이 항상 close해 주는 것이 좋다. # 하지만 이렇게 파일을 열고 닫는 것을 자동으로 처리할 수 있다면 편리하지 않을까? # 파이썬의 with문이 바로 이런 역할을 해준다. with open("C:/Users/ldm81/Desktop/새파일.txt", "w") as f: f.write("Life is too short, you need python") # 위와 같이 with문을 사용하면 with 블록을 벗어나는 순간 열린 파일 객체 f가 자동으로 close되어 편리하다. # ## sys모듈로 매개변수 추가 # 명령 프롬프트(DOS)를 사용해 본 독자라면 다음과 같은 명령어를 사용해 봤을 것이다. # # C:\> type a.txt # 위 type 명령어는 바로 뒤에 적힌 파일 이름을 인수로 받아 그 내용을 출력해 주는 명령 프롬프트 명령어이다. 대부분의 명령 프롬프트 명령어는 다음과 같이 명령행(명령 프롬프트 창)에서 매개변수를 직접 주어 프로그램을 실행하는 방식을 따른다. 이러한 기능을 파이썬 프로그램에도 적용할 수가 있다. # # - 사용법 # 명령 프롬프트 명령어 [인수1 인수2 ...] # # 파이썬에서는 sys 모듈을 사용하여 매개변수를 직접 줄 수있다. sys 모듈을 사용하려면 아래 예의 import sys처럼 import 명령어를 사용해야 한다. # # ※ 모듈을 사용하고 만드는 방법에 대해서는 05장에서 자세히 다룰 것이다. # #sys1.py # import sys # args = sys.argv[1:] # for i in args: # print(i) # 위 예는 입력받은 인수를 for문을 사용해 차례대로 하나씩 출력하는 예이다. # sys 모듈의 argv는 명령 창에서 입력한 인수를 의미한다. 즉 다음과 같이 입력했다면 # argv[0]은 파일 이름 sys1.py가 되고 argv[1]부터는 뒤에 따라오는 인수가 차례로 argv의 요소가 된다 # # ![image.png](attachment:image.png) # ![image.png](attachment:image.png)
JumptoPython/Part_4_Program_IO/04-3_File_Read_and_Write.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Prediction using keras # Let us import pandas to read our csv file. import pandas as pd df=pd.read_csv('Dataset/train_clean_data.csv') df.head() # Let us assign price as output and rest all of the columns as input. X=df.drop(['Price'],axis=1) y=df[['Price']] X=X.iloc[:,:].values y=y.iloc[:,:].values X X.shape y y.shape # Let us normalize our input and output. # + # data normalization with sklearn from sklearn.preprocessing import MinMaxScaler,StandardScaler scaler = StandardScaler() X_scaled = scaler.fit_transform(X) y_scaled = scaler.fit_transform(y) # - X_scaled y_scaled # Let us import Sequential for declaring our model and Dense to specify number of layers. from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense import sys # !{sys.executable} -m pip install tensorflowjs import tensorflowjs as tfjs # Let us define keras model with 18 layers with input layer having 256 nodes, optimizer adam and loss mean_squared_error for regression. def regression_model(): # create model model = Sequential() model.add(Dense(256, activation='relu', input_shape=(28,))) model.add(Dense(64, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(1)) # compile model model.compile(optimizer='adam', loss='mean_squared_error') return model model = regression_model() # Let us fit our training data. model.fit(X_scaled, y_scaled, validation_split=0.3, epochs=100, verbose=2) tfjs.converters.save_keras_model(model,'models') # Let us import testing data and normalize it. df=pd.read_csv('Dataset/test_clean_data.csv') df.head() X_test=df.iloc[:,:].values X_test X_test.shape # Let us normalize our testing data. X_scaled_test = scaler.fit_transform(X_test) X_scaled_test # Let us print predicted values. pred=model.predict(X_scaled_test) pred # Let us print our loss. score = model.evaluate(X_scaled_test,pred,verbose=0) print('Loss:',score) # Our loss is 0. # It's highly accurate. pred.shape model.save('Keras_prediction.h5')#To save model
prediction_using_keras.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib import style import matplotlib.ticker as ticker import seaborn as sns from sklearn.datasets import load_boston from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.metrics import plot_confusion_matrix from sklearn.metrics import classification_report from sklearn.metrics import f1_score from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.model_selection import RepeatedKFold from sklearn.model_selection import GridSearchCV from sklearn.model_selection import ParameterGrid from sklearn.inspection import permutation_importance import multiprocessing labels = pd.read_csv('../csv/train_labels.csv') labels.head() values = pd.read_csv('../csv/train_values.csv') values.head(10).T to_be_categorized = ["land_surface_condition", "foundation_type", "roof_type",\ "position", "ground_floor_type", "other_floor_type",\ "plan_configuration", "legal_ownership_status"] for row in to_be_categorized: values[row] = values[row].astype("category") values.info() datatypes = dict(values.dtypes) for row in values.columns: if datatypes[row] != "int64" and datatypes[row] != "int32" and \ datatypes[row] != "int16" and datatypes[row] != "int8": continue if values[row].nlargest(1).item() > 32767 and values[row].nlargest(1).item() < 2**31: values[row] = values[row].astype(np.int32) elif values[row].nlargest(1).item() > 127: values[row] = values[row].astype(np.int16) else: values[row] = values[row].astype(np.int8) values.info() labels["building_id"] = labels["building_id"].astype(np.int32) labels["damage_grade"] = labels["damage_grade"].astype(np.int8) labels.info() importantes = values\ .merge(labels, on="building_id") importantes.drop(columns=["building_id"], inplace = True) importantes["geo_level_1_id"] = importantes["geo_level_1_id"].astype("category") X_train, X_test, y_train, y_test = train_test_split(importantes.drop(columns = 'damage_grade'), importantes['damage_grade'],\ test_size = 0.2, random_state = 123) def encode_and_bind(original_dataframe, feature_to_encode): dummies = pd.get_dummies(original_dataframe[[feature_to_encode]]) res = pd.concat([original_dataframe, dummies], axis=1) res = res.drop([feature_to_encode], axis=1) return(res) features = ["geo_level_1_id", "land_surface_condition",\ "foundation_type", "roof_type",\ "position", "ground_floor_type", "other_floor_type",\ "plan_configuration", "legal_ownership_status"] for feature in features: X_train = encode_and_bind(X_train, feature) modelito = RandomForestClassifier(n_estimators = 270, max_depth = 25, criterion = "gini", verbose=True, n_jobs=-1) modelito.fit(X_train, y_train) modelito.score(X_train, y_train) modelito.feature_importances_ importantes = values\ .merge(labels, on="building_id") importantes.drop(columns=["building_id"], inplace = True) importantes["geo_level_1_id"] = importantes["geo_level_1_id"].astype("category") importantes X_train, X_test, y_train, y_test = train_test_split(importantes.drop(columns = 'damage_grade'), importantes['damage_grade'],\ test_size = 0.2, random_state = 123) def encode_and_bind(original_dataframe, feature_to_encode): dummies = pd.get_dummies(original_dataframe[[feature_to_encode]]) res = pd.concat([original_dataframe, dummies], axis=1) res = res.drop([feature_to_encode], axis=1) return(res) features = ["geo_level_1_id", "land_surface_condition",\ "foundation_type", "roof_type",\ "position", "ground_floor_type", "other_floor_type",\ "plan_configuration", "legal_ownership_status"] for feature in features: X_train = encode_and_bind(X_train, feature) X_test = encode_and_bind(X_test, feature) modelito = RandomForestClassifier(n_estimators = 270, max_depth = 25, max_features = 45, criterion = "gini", verbose=True, n_jobs=-1) modelito.fit(X_train, y_train) modelito.score(X_train, y_train) y_preds = modelito.predict(X_test) f1_score(y_test, y_preds, average='micro') modelito.feature_importances_ test_values = pd.read_csv('../csv/test_values.csv', index_col = "building_id") test_values subset_de_testvalues = test_values subset_de_testvalues["geo_level_1_id"] = subset_de_testvalues["geo_level_1_id"].astype("category") subset_de_testvalues def encode_and_bind(original_dataframe, feature_to_encode): dummies = pd.get_dummies(original_dataframe[[feature_to_encode]]) res = pd.concat([original_dataframe, dummies], axis=1) res = res.drop([feature_to_encode], axis=1) return(res) features = ["geo_level_1_id", "land_surface_condition",\ "foundation_type", "roof_type",\ "position", "ground_floor_type", "other_floor_type",\ "plan_configuration", "legal_ownership_status"] for feature in features: subset_de_testvalues = encode_and_bind(subset_de_testvalues, feature) subset_de_testvalues preds = modelito.predict(subset_de_testvalues) submission_format = pd.read_csv('../csv/submission_format.csv', index_col = "building_id") my_submission = pd.DataFrame(data=preds, columns=submission_format.columns, index=submission_format.index) my_submission.head() my_submission.to_csv('../csv/predictions/laucha1.csv')
src/.ipynb_checkpoints/lf-atr-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Линейная регрессия и стохастический градиентный спуск # Задание основано на материалах лекций по линейной регрессии и градиентному спуску. Вы будете прогнозировать выручку компании в зависимости от уровня ее инвестиций в рекламу по TV, в газетах и по радио. # ## Вы научитесь: # - решать задачу восстановления линейной регрессии # - реализовывать стохастический градиентный спуск для ее настройки # - решать задачу линейной регрессии аналитически # ## Введение # Линейная регрессия - один из наиболее хорошо изученных методов машинного обучения, позволяющий прогнозировать значения количественного признака в виде линейной комбинации прочих признаков с параметрами - весами модели. Оптимальные (в смысле минимальности некоторого функционала ошибки) параметры линейной регрессии можно найти аналитически с помощью нормального уравнения или численно с помощью методов оптимизации. # Линейная регрессия использует простой функционал качества - среднеквадратичную ошибку. Мы будем работать с выборкой, содержащей 3 признака. Для настройки параметров (весов) модели решается следующая задача: # $$\Large \frac{1}{\ell}\sum_{i=1}^\ell{{((w_0 + w_1x_{i1} + w_2x_{i2} + w_3x_{i3}) - y_i)}^2} \rightarrow \min_{w_0, w_1, w_2, w_3},$$ # где $x_{i1}, x_{i2}, x_{i3}$ - значения признаков $i$-го объекта, $y_i$ - значение целевого признака $i$-го объекта, $\ell$ - число объектов в обучающей выборке. # ## Градиентный спуск # Параметры $w_0, w_1, w_2, w_3$, по которым минимизируется среднеквадратичная ошибка, можно находить численно с помощью градиентного спуска. # Градиентный шаг для весов будет выглядеть следующим образом: # $$\Large w_0 \leftarrow w_0 - \frac{2\eta}{\ell} \sum_{i=1}^\ell{{((w_0 + w_1x_{i1} + w_2x_{i2} + w_3x_{i3}) - y_i)}}$$ # $$\Large w_j \leftarrow w_j - \frac{2\eta}{\ell} \sum_{i=1}^\ell{{x_{ij}((w_0 + w_1x_{i1} + w_2x_{i2} + w_3x_{i3}) - y_i)}},\ j \in \{1,2,3\}$$ # Здесь $\eta$ - параметр, шаг градиентного спуска. # ## Стохастический градиентный спуск # Проблема градиентного спуска, описанного выше, в том, что на больших выборках считать на каждом шаге градиент по всем имеющимся данным может быть очень вычислительно сложно. # В стохастическом варианте градиентного спуска поправки для весов вычисляются только с учетом одного случайно взятого объекта обучающей выборки: # $$\Large w_0 \leftarrow w_0 - \frac{2\eta}{\ell} {((w_0 + w_1x_{k1} + w_2x_{k2} + w_3x_{k3}) - y_k)}$$ # $$\Large w_j \leftarrow w_j - \frac{2\eta}{\ell} {x_{kj}((w_0 + w_1x_{k1} + w_2x_{k2} + w_3x_{k3}) - y_k)},\ j \in \{1,2,3\},$$ # где $k$ - случайный индекс, $k \in \{1, \ldots, \ell\}$. # ## Нормальное уравнение # Нахождение вектора оптимальных весов $w$ может быть сделано и аналитически. # Мы хотим найти такой вектор весов $w$, чтобы вектор $y$, приближающий целевой признак, получался умножением матрицы $X$ (состоящей из всех признаков объектов обучающей выборки, кроме целевого) на вектор весов $w$. То есть, чтобы выполнялось матричное уравнение: # $$\Large y = Xw$$ # Домножением слева на $X^T$ получаем: # $$\Large X^Ty = X^TXw$$ # Это хорошо, поскольку теперь матрица $X^TX$ - квадратная, и можно найти решение (вектор $w$) в виде: # $$\Large w = {(X^TX)}^{-1}X^Ty$$ # Матрица ${(X^TX)}^{-1}X^T$ - [*псевдообратная*](https://ru.wikipedia.org/wiki/Псевдообратная_матрица) для матрицы $X$. В NumPy такую матрицу можно вычислить с помощью функции [numpy.linalg.pinv](http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.linalg.pinv.html). # # Однако, нахождение псевдообратной матрицы - операция вычислительно сложная и нестабильная в случае малого определителя матрицы $X$ (проблема мультиколлинеарности). # На практике лучше находить вектор весов $w$ решением матричного уравнения # $$\Large X^TXw = X^Ty$$Это может быть сделано с помощью функции [numpy.linalg.solve](http://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.linalg.solve.html). # # Но все же на практике для больших матриц $X$ быстрее работает градиентный спуск, особенно его стохастическая версия. # ## Инструкции по выполнению # В начале напишем простую функцию для записи ответов в текстовый файл. Ответами будут числа, полученные в ходе решения этого задания, округленные до 3 знаков после запятой. Полученные файлы после выполнения задания надо отправить в форму на странице задания на Coursera.org. def write_answer_to_file(answer, filename): with open(filename, 'w') as f_out: f_out.write(str(round(answer, 3))) # **1. Загрузите данные из файла *advertising.csv* в объект pandas DataFrame. [Источник данных](http://www-bcf.usc.edu/~gareth/ISL/data.html).** import pandas as pd adver_data = pd.read_csv('advertising.csv') # **Посмотрите на первые 5 записей и на статистику признаков в этом наборе данных.** adver_data.head(5) adver_data.describe() # **Создайте массивы NumPy *X* из столбцов TV, Radio и Newspaper и *y* - из столбца Sales. Используйте атрибут *values* объекта pandas DataFrame.** import numpy as np data = adver_data X = np.array(data[['TV', 'Radio', 'Newspaper']]) y = np.array(data['Sales']) X # **Отмасштабируйте столбцы матрицы *X*, вычтя из каждого значения среднее по соответствующему столбцу и поделив результат на стандартное отклонение. Для определенности, используйте методы mean и std векторов NumPy (реализация std в Pandas может отличаться). Обратите внимание, что в numpy вызов функции .mean() без параметров возвращает среднее по всем элементам массива, а не по столбцам, как в pandas. Чтобы произвести вычисление по столбцам, необходимо указать параметр axis.** means, stds = np.mean(X, axis=0), np.std(X, axis=0) X = (X - means) / stds X # **Добавьте к матрице *X* столбец из единиц, используя методы *hstack*, *ones* и *reshape* библиотеки NumPy. Вектор из единиц нужен для того, чтобы не обрабатывать отдельно коэффициент $w_0$ линейной регрессии.** X = np.hstack((X, np.ones(len(X)).reshape(len(X), 1))) X # **2. Реализуйте функцию *mserror* - среднеквадратичную ошибку прогноза. Она принимает два аргумента - объекты Series *y* (значения целевого признака) и *y\_pred* (предсказанные значения). Не используйте в этой функции циклы - тогда она будет вычислительно неэффективной.** def mserror(y, y_pred): return np.sum(np.square(y - y_pred)) / len(y) # **Какова среднеквадратичная ошибка прогноза значений Sales, если всегда предсказывать медианное значение Sales по исходной выборке? Запишите ответ в файл '1.txt'.** answer1 = mserror(y, np.median(y)) print(answer1) write_answer_to_file(answer1, '1.txt') # **3. Реализуйте функцию *normal_equation*, которая по заданным матрицам (массивам NumPy) *X* и *y* вычисляет вектор весов $w$ согласно нормальному уравнению линейной регрессии.** def normal_equation(X, y): return np.dot(np.linalg.pinv(X), y) norm_eq_weights = normal_equation(X, y) print(norm_eq_weights) # **Какие продажи предсказываются линейной моделью с весами, найденными с помощью нормального уравнения, в случае средних инвестиций в рекламу по ТВ, радио и в газетах? (то есть при нулевых значениях масштабированных признаков TV, Radio и Newspaper). Запишите ответ в файл '2.txt'.** answer2 = np.sum(np.array([0, 0, 0, 1]) * norm_eq_weights) print(answer2) write_answer_to_file(answer2, '2.txt') # **4. Напишите функцию *linear_prediction*, которая принимает на вход матрицу *X* и вектор весов линейной модели *w*, а возвращает вектор прогнозов в виде линейной комбинации столбцов матрицы *X* с весами *w*.** def linear_prediction(X, w): return np.dot(X, w) # **Какова среднеквадратичная ошибка прогноза значений Sales в виде линейной модели с весами, найденными с помощью нормального уравнения? Запишите ответ в файл '3.txt'.** answer3 = mserror(y, linear_prediction(X, norm_eq_weights)) print(answer3) write_answer_to_file(answer3, '3.txt') # **5. Напишите функцию *stochastic_gradient_step*, реализующую шаг стохастического градиентного спуска для линейной регрессии. Функция должна принимать матрицу *X*, вектора *y* и *w*, число *train_ind* - индекс объекта обучающей выборки (строки матрицы *X*), по которому считается изменение весов, а также число *$\eta$* (eta) - шаг градиентного спуска (по умолчанию *eta*=0.01). Результатом будет вектор обновленных весов. Наша реализация функции будет явно написана для данных с 3 признаками, но несложно модифицировать для любого числа признаков, можете это сделать.** def stochastic_gradient_step(X, y, w, train_ind, eta=0.01): x = X[train_ind] * (np.sum(X[train_ind] * w) - y[train_ind]) * (2 / X.shape[0]) grad0 = x[0] grad1 = x[1] grad2 = x[2] grad3 = x[3] return w - eta * np.array([grad0, grad1, grad2, grad3]) # **6. Напишите функцию *stochastic_gradient_descent*, реализующую стохастический градиентный спуск для линейной регрессии. Функция принимает на вход следующие аргументы:** # - X - матрица, соответствующая обучающей выборке # - y - вектор значений целевого признака # - w_init - вектор начальных весов модели # - eta - шаг градиентного спуска (по умолчанию 0.01) # - max_iter - максимальное число итераций градиентного спуска (по умолчанию 10000) # - max_weight_dist - максимальное евклидово расстояние между векторами весов на соседних итерациях градиентного спуска, # при котором алгоритм прекращает работу (по умолчанию 1e-8) # - seed - число, используемое для воспроизводимости сгенерированных псевдослучайных чисел (по умолчанию 42) # - verbose - флаг печати информации (например, для отладки, по умолчанию False) # # **На каждой итерации в вектор (список) должно записываться текущее значение среднеквадратичной ошибки. Функция должна возвращать вектор весов $w$, а также вектор (список) ошибок.** def stochastic_gradient_descent(X, y, w_init, eta=1e-2, max_iter=1e4, max_weight_dist=1e-8, seed=42, verbose=False): # Инициализируем расстояние между векторами весов на соседних # итерациях большим числом. weight_dist = np.inf # Инициализируем вектор весов w = w_init # Сюда будем записывать ошибки на каждой итерации errors = [mserror(y, linear_prediction(X, w))] # Счетчик итераций iter_num = 0 # Будем порождать псевдослучайные числа # (номер объекта, который будет менять веса), а для воспроизводимости # этой последовательности псевдослучайных чисел используем seed. np.random.seed(seed) # Основной цикл while weight_dist > max_weight_dist and iter_num < max_iter: # порождаем псевдослучайный # индекс объекта обучающей выборки random_ind = np.random.randint(X.shape[0]) # Ваш код здесь new_w = stochastic_gradient_step(X, y, w, random_ind, eta) errors.append(mserror(y, linear_prediction(X, new_w))) if verbose: print (errors[-1]) weight_dist = np.sqrt(np.sum(np.square(new_w - w))) w = new_w iter_num += 1 return w, errors # **Запустите $10^5$ итераций стохастического градиентного спуска. Укажите вектор начальных весов *w_init*, состоящий из нулей. Оставьте параметры *eta* и *seed* равными их значениям по умолчанию (*eta*=0.01, *seed*=42 - это важно для проверки ответов).** # %%time stoch_grad_desc_weights, stoch_errors_by_iter = stochastic_gradient_descent(X, y, np.zeros(4), max_iter = 1e5, verbose = False) # **Посмотрим, чему равна ошибка на первых 50 итерациях стохастического градиентного спуска. Видим, что ошибка не обязательно уменьшается на каждой итерации.** # %pylab inline plot(range(50), stoch_errors_by_iter[:50]) xlabel('Iteration number') ylabel('MSE') # **Теперь посмотрим на зависимость ошибки от номера итерации для $10^5$ итераций стохастического градиентного спуска. Видим, что алгоритм сходится.** # %pylab inline plot(range(len(stoch_errors_by_iter)), stoch_errors_by_iter) xlabel('Iteration number') ylabel('MSE') # **Посмотрим на вектор весов, к которому сошелся метод.** stoch_grad_desc_weights # **Посмотрим на среднеквадратичную ошибку на последней итерации.** stoch_errors_by_iter[-20:] # **Какова среднеквадратичная ошибка прогноза значений Sales в виде линейной модели с весами, найденными с помощью градиентного спуска? Запишите ответ в файл '4.txt'.** answer4 = mserror(y, linear_prediction(X, stoch_grad_desc_weights)) print(answer4) write_answer_to_file(answer4, '4.txt') # **Ответами к заданию будут текстовые файлы, полученные в ходе этого решения. Обратите внимание, что отправленные файлы не должны содержать пустую строку в конце. Данный нюанс является ограничением платформы Coursera. Мы работаем над исправлением этого ограничения.**
Coursera/Machine-learning-data-analysis/Course 2/Week_01/PA_linreg_stochastic_grad_descent.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #匯入lib import pandas as pd # %matplotlib inline import matplotlib.pyplot as plt import numpy as np #load training data data = pd.read_csv('lr1.csv') data.head() X = data.iloc[:, [0, 1]].values y = data.iloc[:, 2].values # + mu = X.mean() sigma = X.std() X_std = (X-mu)/sigma # - t = np.linspace(-2, 2, 100) #當y等於0時,只找0,也就是第一個參數(X1) plt.plot(X_std[y==1, 0], X_std[y==1, 1], 'x') plt.plot(X_std[y==0, 0], X_std[y==0, 1], 'o') from sklearn.linear_model import LogisticRegression # + lr = LogisticRegression(C=100.0, random_state=1) lr.fit(X_std, y) print(lr.coef_ ) print(lr.intercept_ ) # + #-2到2,100個值的等差級數 t = np.linspace(-2, 2, 100) plt.plot(X_std[y==1, 0], X_std[y==1, 1], 'x') plt.plot(X_std[y==0, 0], X_std[y==0, 1], 'o') #plt.plot(x, f(x)) plt.plot(t, -(float(lr.intercept_)+lr.coef_[0][0]*t)/lr.coef_[0][1]) # + #(200, 100) and (100,300) X_test=[[200, 100], [100,300]] #要用續練級的平均和標準差 from sklearn.preprocessing import StandardScaler stdsc = StandardScaler() X_test_std = stdsc.fit_transform(X_test) # - lr.predict_proba(X_test_std) #再把值對應圖上
Logistic Regression - using sklearn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- df = parse('pokemon_jay.csv') species_count(df) max_level(df) filter_range(df, 60, 70) highest_stage_per_type(df) count_types(df) def max_level(df): x = df.loc[(df['level'] == max(df['level']))][] return x[x.index] df[df['level'] == max(df['level'])].index[0] vv = df[df['level'] == max(df['level'])].iloc[0,:] vv['name'] x = max_level(df) type(x) x.index x[] x.loc[x.index,:] # + import pandas as pd # Write your functions here! def parse(file_name): return pd.read_csv(file_name, header=0) def species_count(df): return len(pd.unique(df.loc[:,'name'])) def max_level(df): x = df.loc[df['level'] == max(df['level'])] return (x['name'], x['level']) def filter_range(df, min, max): return df[(df['level'] >= min) & (df['level'] < max)]['name'].tolist() def mean_attack_for_type(df, type): atk = df[df['type'] == type]['atk'] return atk.mean() def count_types(df): type_df = df.groupby(by = 'type').apply(len) result = {} for type, num in type_df.items(): result[type] = num return result def highest_stage_per_type(df): stage_df = df.groupby('type')['stage'].apply(max) result = {} for type, stage in stage_df.items(): result[type] = stage return result def mean(values): return sum(values)/ len(values) def mean_attack_per_type(df): df = df.groupby('type')['atk'].apply(mean) result = {} for key, value in df.items(): result[key] = round(value, 2) return result # - def count_types(df): type_df = df.groupby(by='type').apply(len) result = {} for type, num in type_df.items(): result[type] = num return result count_types(df) def count_types(data): result = {} for i in enumerate(data): print(i) # p_type = i['type'] # if p_type in result.keys(): # result[p_type] += 1 # else: # result[p_type] = 1 return result df.loc[0,:] count_types(df) mean_attack_per_type(df) highest_stage_per_type(df) type_df = df.groupby(by = 'type').apply(len) type(type_df) count_types(df) mean_attack_for_type(df,'fire') v = max_level(df) v[1] df = parse('pokemon_box.csv') df.head()
hw2/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 # + import os import sys import numpy as np import pandas as pd import plotly as pl # + sys.path.insert(0, "..") import ccal np.random.random(20121020) pl.offline.init_notebook_mode(connected=True) # + n_row = 32 n_column = 64 df = pd.DataFrame( np.random.random_sample(size=(n_row, n_column)), index=pd.Index(("Index {}".format(i) for i in range(n_row)), name="DF Row"), columns=pd.Index( ("Column {}".format(i) for i in range(n_column)), name="DF Column" ), ) df # + ks = (2, 4, 8) k = "K{}".format(ks[0]) # - n_job = 3 # + ccal.establish_path(ccal.hierarchical_consensus_cluster.__name__, "directory") column_cluster, column_cluster__ccc = ccal.hierarchical_consensus_cluster( df, ks[0], directory_path=ccal.hierarchical_consensus_cluster.__name__ ) # - column_cluster column_cluster__ccc # + ccal.establish_path(ccal.hierarchical_consensus_cluster_with_ks.__name__, "directory") k_return = ccal.hierarchical_consensus_cluster_with_ks( df, ks, n_job=n_job, directory_path=ccal.hierarchical_consensus_cluster_with_ks.__name__, ) k_return[k].keys() # - k_return[k]["column_cluster"] k_return[k]["column_cluster.ccc"] # + ccal.establish_path(ccal.nmf_consensus_cluster.__name__, "directory") w, h, e, w_element_cluster, w_element_cluster__ccc, h_element_cluster, h_element_cluster__ccc = ccal.nmf_consensus_cluster( df, ks[0], directory_path=ccal.nmf_consensus_cluster.__name__ ) # - w h e w_element_cluster w_element_cluster__ccc h_element_cluster h_element_cluster__ccc # + ccal.establish_path(ccal.nmf_consensus_cluster_with_ks.__name__, "directory") k_return = ccal.nmf_consensus_cluster_with_ks( df, ks, n_job=n_job, directory_path=ccal.nmf_consensus_cluster_with_ks.__name__ ) k_return[k].keys() # - k_return[k]["w"] k_return[k]["h"] k_return[k]["e"] k_return[k]["w_element_cluster"] k_return[k]["w_element_cluster.ccc"] k_return[k]["h_element_cluster"] k_return[k]["h_element_cluster.ccc"] # + import shutil shutil.rmtree(ccal.hierarchical_consensus_cluster.__name__) shutil.rmtree(ccal.hierarchical_consensus_cluster_with_ks.__name__) shutil.rmtree(ccal.nmf_consensus_cluster.__name__) shutil.rmtree(ccal.nmf_consensus_cluster_with_ks.__name__)
notebook/clustering.ipynb