markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
Check Accuracy
from sklearn import svm clf = svm.SVC(kernel='rbf') clf.fit(X_train_SVM, y_train_SVM) yhat = clf.predict(X_test_SVM) yhat [0:5] from sklearn.metrics import f1_score, jaccard_similarity_score f1_acc = f1_score(y_test_SVM, yhat, average='weighted') jaccard_acc = jaccard_similarity_score(y_test_SVM, yhat) f1_acc, jaccard_acc
_____no_output_____
MIT
Coursera/IBM Python 01/Course02/ML Python Sharing.ipynb
brianshen1990/KeepLearning
Logistic Regression Datset train and testJust use the K-Nearest one
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix LR = LogisticRegression(C=0.01, solver='liblinear').fit(X_train,y_train) yhat = LR.predict(X_test) yhat_prob = LR.predict_proba(X_test) yhat, yhat_prob
_____no_output_____
MIT
Coursera/IBM Python 01/Course02/ML Python Sharing.ipynb
brianshen1990/KeepLearning
Accuaracy
from sklearn.metrics import jaccard_similarity_score jaccard_similarity_score(y_test, yhat) from sklearn.metrics import log_loss log_loss(y_test, yhat_prob)
_____no_output_____
MIT
Coursera/IBM Python 01/Course02/ML Python Sharing.ipynb
brianshen1990/KeepLearning
Model Evaluation using Test set
from sklearn.metrics import jaccard_similarity_score from sklearn.metrics import f1_score from sklearn.metrics import log_loss
_____no_output_____
MIT
Coursera/IBM Python 01/Course02/ML Python Sharing.ipynb
brianshen1990/KeepLearning
First, download and load the test set:
!wget -O loan_test.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/loan_test.csv
--2020-05-22 15:47:37-- https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/loan_test.csv Resolving s3-api.us-geo.objectstorage.softlayer.net (s3-api.us-geo.objectstorage.softlayer.net)... 67.228.254.196 Connecting to s3-api.us-geo.objectstorage.softlayer.net (s3-api.us-geo.objectstorage.softlayer.net)|67.228.254.196|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 3642 (3.6K) [text/csv] Saving to: ‘loan_test.csv’ 100%[======================================>] 3,642 --.-K/s in 0s 2020-05-22 15:47:37 (242 MB/s) - ‘loan_test.csv’ saved [3642/3642]
MIT
Coursera/IBM Python 01/Course02/ML Python Sharing.ipynb
brianshen1990/KeepLearning
Load Test set for evaluation
test_df = pd.read_csv('loan_test.csv') test_df.head()
_____no_output_____
MIT
Coursera/IBM Python 01/Course02/ML Python Sharing.ipynb
brianshen1990/KeepLearning
Prepare data
test_df['due_date'] = pd.to_datetime(test_df['due_date']) test_df['effective_date'] = pd.to_datetime(test_df['effective_date']) test_df['dayofweek'] = test_df['effective_date'].dt.dayofweek test_df['weekend'] = test_df['dayofweek'].apply(lambda x: 1 if (x>3) else 0) test_df.head() test_df['Gender'].replace(to_replace=['male','female'], value=[0,1],inplace=True) test_df.head() test_Feature = test_df[['Principal','terms','age','Gender','weekend']] test_Feature = pd.concat([test_Feature,pd.get_dummies(test_df['education'])], axis=1) test_Feature.drop(['Master or Above'], axis = 1,inplace=True) test_Feature.head() feature_df = test_Feature[['Principal', 'terms', 'age', 'Gender', 'weekend', 'Bechalor', 'High School or Below', 'college']] test_X_SVM = np.asarray(feature_df) test_X_SVM[0:5] test_y = test_df['loan_status'].values test_y[0:5] test_Y_Feature = [ 1 if i == "PAIDOFF" else 0 for i in test_df['loan_status'].values] test_y_SVM = np.asarray(test_Y_Feature) test_y_SVM [0:5] test_y_SVM[0:5]
_____no_output_____
MIT
Coursera/IBM Python 01/Course02/ML Python Sharing.ipynb
brianshen1990/KeepLearning
K-Nearest
neigh = KNeighborsClassifier(n_neighbors = 7).fit(X_train,y_train) yhat=neigh.predict(test_Feature) K_f1_acc = f1_score(test_y, yhat, average='weighted') k_jaccard_acc = jaccard_similarity_score(test_y, yhat) K_f1_acc, k_jaccard_acc
/opt/conda/envs/Python36/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples. 'precision', 'predicted', average, warn_for)
MIT
Coursera/IBM Python 01/Course02/ML Python Sharing.ipynb
brianshen1990/KeepLearning
dcicison tree
drugTree = DecisionTreeClassifier(criterion="entropy", max_depth = 6) drugTree.fit(X_train,y_train) yhat = drugTree.predict(test_Feature) DT_f1_acc = f1_score(test_y, yhat, average='weighted') DT_jaccard_acc = jaccard_similarity_score(test_y, yhat) DT_f1_acc, DT_jaccard_acc
_____no_output_____
MIT
Coursera/IBM Python 01/Course02/ML Python Sharing.ipynb
brianshen1990/KeepLearning
SVM
clf = svm.SVC(kernel='rbf') clf.fit(X_train_SVM, y_train_SVM) yhat = clf.predict(test_X_SVM) SVM_f1_acc = f1_score(test_y_SVM, yhat, average='weighted') SVM_jaccard_acc = jaccard_similarity_score(test_y_SVM, yhat) SVM_f1_acc, SVM_jaccard_acc
/opt/conda/envs/Python36/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning. "avoid this warning.", FutureWarning)
MIT
Coursera/IBM Python 01/Course02/ML Python Sharing.ipynb
brianshen1990/KeepLearning
Logistic Regression
LR = LogisticRegression(C=0.01, solver='liblinear').fit(X_train,y_train) yhat = LR.predict(test_Feature) LR_f1_acc = f1_score(test_y, yhat, average='weighted') LR_jaccard_acc = jaccard_similarity_score(test_y, yhat) from sklearn.metrics import log_loss yhat_prob = LR.predict_proba(test_Feature) LR_log_loss = log_loss(test_y, yhat_prob) yhat, LR_f1_acc, LR_jaccard_acc, LR_log_loss
/opt/conda/envs/Python36/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples. 'precision', 'predicted', average, warn_for)
MIT
Coursera/IBM Python 01/Course02/ML Python Sharing.ipynb
brianshen1990/KeepLearning
2 - Análise Exploratória de Séries Temporais - Faturamento TotalProjeto para a disciplina de **Estatística** (Módulo 4) do Data Science Degree (turma de julho de 2020) Equipe* Felipe Lima de Oliveira* Mário Henrique Romagna Cesa* Tsuyioshi Valentim Fukuda* Fernando Raineri MonariLink para [projeto no Github](https://github.com/flimao/case-previsao-faturamento) IntroduçãoEste notebook é uma continuação da análise exploratória inicial.Neste notebook, vamos progredir para a análise exploratória de séries temporais.
# importação de bibliotecas import datetime as dt import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import json # importação de bibliotecas de análise from statsmodels.tsa.seasonal import seasonal_decompose from statsmodels.graphics.tsaplots import plot_acf, plot_pacf from statsmodels.tsa.stattools import acf, pacf from statsmodels.tsa.arima_process import ArmaProcess from pmdarima.arima import auto_arima from pmdarima.arima.arima import ARIMA # teste para verificar estacionariedade (Dickey-Fuller: https://en.wikipedia.org/wiki/Dickey%E2%80%93Fuller_test) from statsmodels.tsa.stattools import adfuller # metricas from sklearn.metrics import mean_absolute_percentage_error as smape, mean_squared_error as smse, mean_absolute_error as smae # pacote com funções para análise desse projeto import os cwd = os.getcwd() os.chdir("../") import py_scripts.plots, py_scripts.transform, py_scripts.metrics os.chdir(cwd) import matplotlib as mpl mpl.rcParams['figure.dpi'] = 120 mpl.rcParams['figure.figsize'] = (10, 4)
_____no_output_____
MIT
notebooks_exploration/2-faturamento_total.ipynb
flimao/case-previsao-faturamento
Importação dos dados
ts_raw = pd.read_csv(r'../data/sim_ts_limpo.csv') tsd, tswide = py_scripts.transform.pipeline(ts_raw) fat_total = tswide.sum(axis = 'columns').dropna() fat_total fat_total.plot(linestyle = '', marker = 'o') plt.title('Faturamento (R$ bi)') plt.show()
_____no_output_____
MIT
notebooks_exploration/2-faturamento_total.ipynb
flimao/case-previsao-faturamento
Análise ExploratóriaVamos primeiramente analisar o faturamento total contido na série histórica:
fat_total = tsd['total'] fat_total.describe() sns.scatterplot(data = fat_total) plt.title('Série histórica (últimos 4 anos)') plt.show()
_____no_output_____
MIT
notebooks_exploration/2-faturamento_total.ipynb
flimao/case-previsao-faturamento
Parece haver um salto entre 2014 e 2015 no faturamento total. Esse salto é devido ao lançamento de um outro produto, `transporte`. O faturamento deste novo produto é uma ordem de magnitude menor que o faturamento do produto `alimenticio` (como vimos brevemente no gráfico de dados faltantes e veremos com detalhes mais a frente), mas é o suficiente para que seja notado no faturamento total.
fig = plt.figure(figsize = (6, 8)) sns.boxplot(y = fat_total) plt.ylabel('Faturamento total (R$ bi)') plt.title('Boxplot - Série Histórica completa') plt.show() sns.histplot(fat_total) plt.xlabel('Faturamento total (R$ bi)') plt.title('Histograma - Série Histórica completa') plt.show()
_____no_output_____
MIT
notebooks_exploration/2-faturamento_total.ipynb
flimao/case-previsao-faturamento
No entanto, medidas descritivas de séries temporais devem ser tomadas em relação ao tempo. Vamos separar essas medidas ano a ano:
n_anos = 4 anos_recentes = fat_total[fat_total.index >= dt.datetime.now() - dt.timedelta(days = n_anos * 365) + pd.tseries.offsets.YearBegin()] anos_recentes.describe() sns.scatterplot(data = anos_recentes) plt.title(f'Série histórica (últimos {n_anos} anos)') plt.show() sns.boxplot(y = fat_total, x = fat_total.index.year) plt.ylabel('Faturamento total (R$ bi)') plt.title('Boxplot - Série Histórica completa') plt.show() sns.boxplot(y = anos_recentes, x = anos_recentes.index.year) plt.ylabel('Faturamento total (R$ bi)') plt.title(f'Boxplot - Série Histórica (últimos {n_anos} anos)') plt.show()
_____no_output_____
MIT
notebooks_exploration/2-faturamento_total.ipynb
flimao/case-previsao-faturamento
Parece haver alguns *outliers* em 2021.No entanto, a série de 2021 está incompleta (vai somente até outubro). Historicamente, há um salto no faturamento em agosto, o que pode estar causando essa deturpação das medidas descritivas. Excluindo o ano de 2021...
anos_recentes_exc2021 = fat_total[(fat_total.index >= '2016') & (fat_total.index < '2021')] sns.boxplot(y = anos_recentes_exc2021, x = anos_recentes_exc2021.index.year) plt.ylabel('Faturamento total (R$ bi)') plt.title(f'Boxplot - Série Histórica (2016-2020)') plt.show()
_____no_output_____
MIT
notebooks_exploration/2-faturamento_total.ipynb
flimao/case-previsao-faturamento
A pandemia se faz notar nos dados apenas com o aumento ligeiro da mediana em relação à distância entre o Q1 e o Q3.
sns.histplot(x = anos_recentes, hue = anos_recentes.index.year, multiple = 'dodge', shrink = .8, common_norm = False, palette = sns.color_palette()[:4]) plt.xlabel('Faturamento total (R$ bi)') plt.title(f'Histograma - Série Histórica (últimos {n_anos} anos)') plt.show()
_____no_output_____
MIT
notebooks_exploration/2-faturamento_total.ipynb
flimao/case-previsao-faturamento
Os histogramas ano a ano estão melhor comportados que o histograma da série histórica completa. Fazendo uma análise mês a mês para cada ano...
hue = fat_total.index.year palette = [] for i, year in enumerate(hue.unique()): if year not in [2014, 2015]: palette += ['lightgray'] else: palette += [sns.color_palette()[i]] ax = sns.lineplot( y = fat_total, x = fat_total.index.month, hue = fat_total.index.year, palette = palette ) ax.set_xlabel('Mês') ax.set_ylabel('Faturamento total (R$ bi)') ax.set_title(f"Receita por mes do ano") plt.tight_layout() plt.show()
_____no_output_____
MIT
notebooks_exploration/2-faturamento_total.ipynb
flimao/case-previsao-faturamento
... nota-se claramente o salto dado de 2014 para 2015 com a entrada do novo produto. Estacionariedade Para que a série de faturamentos mensais totais possa ser decomposta, é necessário que ela seja estacionária. Não parece ser, mas vamos testar através do teste estatístico de Dickey-Fuller.A hipótese nula do teste de Dickey-Fuller é que a série é um passeio aleatório (random walk):
testedf = adfuller(fat_total) pvalor = testedf[1] alpha = 0.05 print(f'Valor-p: {pvalor:.3%}', end = '') if pvalor < alpha: print(f' < {alpha:.0%}') print(' Série de faturamentos mensais é estacionária. Rejeita-se a hipótese de a série ser um passeio aleatório.') else: print(f' > {alpha:.0%}') print(' Série de faturamentos mensais é um passeio aleatório. Não podemos rejeitar a hipótese nula.')
Valor-p: 98.342% > 5% Série de faturamentos mensais é um passeio aleatório. Não podemos rejeitar a hipótese nula.
MIT
notebooks_exploration/2-faturamento_total.ipynb
flimao/case-previsao-faturamento
Isso é evidenciado pela decomposição da série temporal. Decomposição em séries de Fourier
decomp_total = seasonal_decompose(fat_total) # plot fig, axs = plt.subplots(nrows = 4, figsize = (10, 8), sharex = True) sns.lineplot(data = fat_total, ax = axs[0]) axs[0].set_title('Faturamento total') sns.lineplot(data = decomp_total.trend, ax = axs[1]) axs[1].set_ylabel('Tendência') sns.lineplot(data = decomp_total.seasonal, ax = axs[2]) axs[2].set_ylabel('Sazonalidade') resid = (decomp_total.resid - decomp_total.resid.mean())/decomp_total.resid.std() sns.scatterplot(data = resid, ax = axs[3]) axs[3].set_ylabel('Residual') fig.suptitle(f"Decomposição temporal: faturamento total") plt.show()
_____no_output_____
MIT
notebooks_exploration/2-faturamento_total.ipynb
flimao/case-previsao-faturamento
Como mostrado anteriormente, esta série temporal não é estacionária, o que podemos ver através dos resíduos padronizados do último quadro (onde há um padrão claro oscilatório). Modelo autorregressivo - Faturamento total Para analisar e prever essa série temporal, é necessário um modelo mais completo. Utilizaremos aqui um modelo autorregressivo integrado de média móvel com sazonalidade - **SARIMA**.OBS.: o modelo completo chama-se SARIMAX; o `X` adicional permite a modelagem de variáveis exógenas. No entanto, não utilizaremos variáveis exógenas neste caso.
# excluindo o período pré-2015 test_begin = '2020-01-01' fat_modelo = fat_total['2015-01-01':] total_train = fat_modelo[:test_begin].iloc[:-1] total_test = fat_modelo[test_begin:] train_test_split_idx = int(fat_modelo.shape[0] * 0.8 + 1) total_train = fat_modelo[:train_test_split_idx] total_test = fat_modelo[train_test_split_idx:] total_train.plot(label = 'Treino') total_test.plot(label = 'Teste') plt.title('Train test split - Faturamento total') plt.ylabel('Faturamento total (R$ bi)') plt.legend() plt.show()
_____no_output_____
MIT
notebooks_exploration/2-faturamento_total.ipynb
flimao/case-previsao-faturamento
O modelo SARIMA contém alguns parâmetros, `S(P, D, Q, S)`, `AR(p)`, `I(d)` e `MA(q)`.Para determinarmos o parâmetro `d`, uma boa indicação é o gráfico de autocorrelação:
fig = plt.figure() ax = fig.gca() plot_pacf(fat_modelo, lags = 20, method = 'ywm', ax = ax) ax.set_xlabel('Lags') ax.set_title('Autocorrelação parcial - Série de faturamento total') plt.show()
_____no_output_____
MIT
notebooks_exploration/2-faturamento_total.ipynb
flimao/case-previsao-faturamento
Neste caso, uma boa estimativa para o parâmetro `d` é 1 subtraído do número de *lags* em que a correlação é estatisticamente significativa. Neste caso, $d \sim 1$.
arimas = {} arimas['total'] = auto_arima( y = total_train, start_p = 1, max_p = 3, d = 2, max_d = 4, start_q = 1, max_q = 3, start_P = 1, max_P = 3, D = None, max_D = 4, start_Q = 1, max_Q = 3, #max_order = 6, m = 12, seasonal = True, alpha = 0.05, stepwise = True, trace = True, n_fits = 500, ) modelo_corrente = ARIMA(order = (0, 2, 0), seasonal_order = (1, 1, 1, 12), with_intercept = True).fit(y = total_train) modelo_funcional = [ ARIMA(order = (0, 1, 0), seasonal_order = (0, 1, 0, 12), with_intercept = False).fit(y = total_train), ARIMA(order = (0, 2, 3), seasonal_order = (2, 1, 1, 12), with_intercept = False).fit(y = total_train), ARIMA(order = (0, 2, 0), seasonal_order = (1, 1, 1, 12), with_intercept = False).fit(y = total_train), ARIMA(order = (0, 2, 0), seasonal_order = (1, 1, 1, 12), with_intercept = True).fit(y = total_train), ] arimas['total'] = ARIMA(order = (0, 2, 0), seasonal_order = (1, 1, 1, 12), with_intercept = True).fit(y = total_train) arimas['total'].summary()
_____no_output_____
MIT
notebooks_exploration/2-faturamento_total.ipynb
flimao/case-previsao-faturamento
Métricas para o modelo autorregressivo SARIMAX Primeiramente, podemos avaliar o ajuste visualmente:
n_test_periods = total_test.shape[0] arr_preds = arimas['total'].predict(n_test_periods) idx = pd.date_range(freq = 'MS', start = total_test.index[0], periods = n_test_periods) preds = pd.Series(arr_preds, index = idx) preds.name = 'yearly_preds' preds.plot(label = 'Predição') total_test.plot(label = 'Conjunto de teste') plt.legend() plt.ylabel('Faturamento total (R$ bi)') plt.title('Predição contra conjunto de teste') plt.show()
_____no_output_____
MIT
notebooks_exploration/2-faturamento_total.ipynb
flimao/case-previsao-faturamento
Vamos aplicar algumas métricas quantitativas ao modelo:
kwargs_total = dict( y_true = total_test, y_pred = preds, n = total_train.shape[0], dof = arimas['total'].df_model() ) py_scripts.metrics.mostrar_metricas(**kwargs_total) arimas['total'].arima_res_.data.endog fat_total['2019-06':'2020-06']
_____no_output_____
MIT
notebooks_exploration/2-faturamento_total.ipynb
flimao/case-previsao-faturamento
Task of muscle Modeling
import numpy as np import math import matplotlib.pyplot as plt %matplotlib notebook #inline
_____no_output_____
MIT
courses/modsim2018/tasks/Tasks_ForLectures10and11/.ipynb_checkpoints/Tasks_During_Lecture10-checkpoint.ipynb
raissabthibes/bmc
Nova Proposta Normalizando a Força pela Força máxima Propriedades do vasto medialUmax = 0.04Lslack = 0.223Lce = 0.087Lceopt = 0.093width = 0.63 * LceoptFmax = 7400;a = 0.25 * Fmaxb = 0.25*10 * Lceopt Condições Iniciaisphi = np.pi/2phid = 0Lce = 0.31 - Lslackt0 = 0tend = 2.99h = 0.001 t = np.arange(t0,tend,h)Lce_2 = np.empty_like(t)Lce_2[0] = 0F = np.empty_like(t)F[0] = 0Fkpe = np.empty_like(t)Fkpe[0] = 0 Tenho de tirar o Fmax for i in range (1,len(t)): if t[i]<=1: Lm = 0.31 if t[i]>1 and t[i]<2: Lm = 0.31 - 0.04*(t[i]-1) Lsee = Lm - Lce if (Lsee < Lslack): FTendonNorm = 0; else: FTendonNorm = ((Lsee-Lslack)/(Umax*Lslack))**2; if (Lce < Lceopt): FkpeNorm = 0; else: FkpeNorm = ((Lce-Lceopt)/(Umax*Lceopt))**2; F0 = max([0, (1-((Lce-Lceopt)/width)**2)]) if FTendonNorm > F0: passprint('Error: can not do excentric contractions') Lcedt = -b*(F0-(FTendonNorm-FkpeNorm)) / ((FTendonNorm-FkpeNorm)+a) Euler intergration Step Lce = Lce + h * Lcedt F[i] = FTendonNorm Lce_2[i] = Lce plt.plot (t,F)plt.ylabel('Force [N]')plt.xlabel('time [s]')plt.show() F = F * Fmax plt.plot (t,F)plt.ylabel('Force [N]')plt.xlabel('time [s]')plt.show() Nova Proposta 3 Normalizando a Força pela Força máxima E pelo comprimento ótimo do elemento contrátil --> (Lceopt)Vou tirar dividir tudo o que tem Fmax por Fmax e tudo o que tem Lceopt por Lceopt
# Propriedades do vasto medial Umax = 0.04 Lslack = 0.223 Lceopt = 0.093 LceNorm = 0.087 / Lceopt width = 0.63 Fmax = 7400; a = 0.25 # * Fmax b = 0.25*10 # Condições Iniciais phi = np.pi/2 phid = 0 #Lce = 0.31 - Lslack t0 = 0 tend = 2.99 h = 0.001 # Inicializar t = np.arange(t0,tend,h) Lce_2 = np.empty_like(t); Lce_2[0] = 0 F = np.empty_like(t); F[0] = 0 Fkpe = np.empty_like(t); Fkpe[0] = 0 fiberLength = np.empty_like(t); fiberLength[0] = 0 tendonLength = np.empty_like(t); tendonLength[0] = 0 # Integração por Euler for i in range (1,len(t)): if t[i]<=1: Lm = 0.31 if t[i]>1 and t[i]<2: Lm = 0.31 - 0.04*(t[i]-1) LseeNorm = Lm/Lceopt - LceNorm if (LseeNorm < Lslack/Lceopt): FTendonNorm = 0; else: FTendonNorm = ((LseeNorm-Lslack/Lceopt)/(Umax*Lslack/Lceopt))**2; if (LceNorm < 1): FkpeNorm = 0; else: FkpeNorm = ((LceNorm-1)/(Umax))**2; F0 = max([0, (1-((LceNorm-1)/width)**2)]) if FTendonNorm > F0: pass #print('Error: can not do excentric contractions') LceNormdt = -b*(F0-(FTendonNorm-FkpeNorm)) / ((FTendonNorm-FkpeNorm)+a) # Euler intergration Step LceNorm = LceNorm + h * LceNormdt F[i] = FTendonNorm #* Fmax fiberLength[i] = LceNorm * Lceopt tendonLength[i] = LseeNorm * Lceopt FiberTendon = fiberLength + tendonLength # Plot plt.plot (t,F) plt.ylabel('Force [N]') plt.xlabel('time [s]') plt.show() fig, ax = plt.subplots(1,3,figsize=(6,6), sharex=True) ax[0].plot(t, fiberLength, label = 'Fiber') ax[0].plot(t, tendonLength, label = 'Tendon') ax[0].grid() plt.legend(loc = 'best') plt.xlabel('Time [s]') plt.ylabel('Length [mm]') def comuteTendonForce(LseeNorm, Lslack, Lceopt): ''' Compute Tendon Force Inputs: LseeNorm - Normalized Tendon Length Lslack - slack length of the tendon (non-normalized) Lceopt - Optimal length of the fiber Outputs: FTendonNorm - Normalized force of Tendon ''' Umax=0.04 if (LseeNorm < Lslack/Lceopt): FTendonNorm = 0; else: FTendonNorm = ((LseeNorm-Lslack/Lceopt)/(Umax*Lslack/Lceopt))**2; return def comuteParallelElementForce(LceNorm): ''' Compute Parallel Element Force Input: LceNorm - Normalized contratile element Length Output: FTendonNorm - Normalized force of Tendon ''' Umax=1 if LceNorm < 1: FkpeNorm = 0; else: FkpeNorm = ((LceNorm-1)/(Umax))**2; return FkpeNorm def computeForceLengthCurve(LceNorm): ''' Compute Force Length Curve Input: LceNorm - Normalized contratile element Length Output: F0 - Normalized force of Tendon ''' width = 0. F0 = max([0, (1-((LceNorm-1)/width)**2)]) return F0 def computeContractileElementDerivative(F0, FCE): ''' Compute Force Length Curve Input: LceNorm - Normalized contratile element Length Output: F0 - Normalized force of Tendon ''' a = 0.25 b = 0.25 * 10 if FCE > F0: print('Error: can not do excentric contractions') LceNormdt = -b*(F0-FCE) / (FCE+a) return LceNormdt # Propriedades do vasto medial Umax = 0.04 Lslack = 0.223 Lceopt = 0.093 LceNorm = 0.087 / Lceopt width = 0.63 Fmax = 7400; a = 0.25 # * Fmax b = 0.25*10 # Condições Iniciais phi = np.pi/2 phid = 0 #Lce = 0.31 - Lslack t0 = 0 tend = 2.99 h = 0.001 # Inicializar t = np.arange(t0,tend,h) Lce_2 = np.empty_like(t); Lce_2[0] = 0 F = np.empty_like(t); F[0] = 0 Fkpe = np.empty_like(t); Fkpe[0] = 0 fiberLength = np.empty_like(t); fiberLength[0] = 0 tendonLength = np.empty_like(t); tendonLength[0] = 0 # Integração por Euler for i in range (1,len(t)): if t[i]<=1: Lm = 0.31 if t[i]>1 and t[i]<2: Lm = 0.31 - 0.04*(t[i]-1) LseeNorm = Lm/Lceopt - LceNorm if (LseeNorm < Lslack/Lceopt): FTendonNorm = 0; else: FTendonNorm = ((LseeNorm-Lslack/Lceopt)/(Umax*Lslack/Lceopt))**2; if (LceNorm < 1): FkpeNorm = 0; else: FkpeNorm = ((LceNorm-1)/(Umax))**2; F0 = max([0, (1-((LceNorm-1)/width)**2)]) if FTendonNorm > F0: pass #print('Error: can not do excentric contractions') LceNormdt = -b*(F0-(FTendonNorm-FkpeNorm)) / ((FTendonNorm-FkpeNorm)+a) # Euler intergration Step LceNorm = LceNorm + h * LceNormdt F[i] = FTendonNorm #* Fmax fiberLength[i] = LceNorm * Lceopt tendonLength[i] = LseeNorm * Lceopt FiberTendon = fiberLength + tendonLength # Plot plt.plot (t,F) plt.ylabel('Force [N]') plt.xlabel('time [s]') plt.show()
_____no_output_____
MIT
courses/modsim2018/tasks/Tasks_ForLectures10and11/.ipynb_checkpoints/Tasks_During_Lecture10-checkpoint.ipynb
raissabthibes/bmc
Python in Action Part 1: Python Fundamentals 22 &mdash; Variable scope rules in Python> scope and lifecycle of variables in Python When you declare a variable outside of any function in Python, the variable will be visible to any code after the declaration. That is called a global variable.Note that you will be able to see the value of the variable without requiring any additional keyword.See below about the use of the `global` keyword when you need to modify the value of a global variable:
name = 'Jason' def say_hello(): print(f'Hello, {name}') say_hello() print('Your name is ' + name)
Hello, Jason Your name is Jason
MIT
01-python-basics/22-variable-scope-python.ipynb
sergiofgonzalez/python-in-action
When you define a variable inside a function, that function will only be visible within that function. That is called a local variable:
name = 'Jason' def say_hello(): name = 'Idris' print(f'Hello, {name}!') say_hello() print(name)
Hello, Idris! Jason
MIT
01-python-basics/22-variable-scope-python.ipynb
sergiofgonzalez/python-in-action
The `global` keywordThere might be situations on which you would like to modify the value of a global variable within the scope of a function.When that happens, you will be required to use the `global` keyword:
name = 'Jason' def say_hello(): print(f'Hi, {name}!') def holler_hello(): global name name = name.upper() print(f'HI, {name}!') say_hello() holler_hello() print(name)
Hi, Jason! HI, JASON! JASON
MIT
01-python-basics/22-variable-scope-python.ipynb
sergiofgonzalez/python-in-action
open the FITS file
#read image into a 2-D numpy array fname = "image.fits" hdu_list = fits.open(fname) hdu_list.info() #access image by indexing hdu_list image_data = hdu_list[0].data #data is stored as a 2D numpy array. Show the shape of the array print(type(image_data)) print(image_data.shape)
_____no_output_____
MIT
final_project_1.ipynb
ashleyparrilla/astro-detection
show data
#show the image m,s = np.mean(image_data), np.std(image_data) plt.imshow(image_data, interpolation='nearest', cmap='gray', vmin=m-s, vmax=m+s, origin='lower',) plt.colorbar(); plt.savefig('image_1.png')
_____no_output_____
MIT
final_project_1.ipynb
ashleyparrilla/astro-detection
background subtraction
#measure spatially varying background on image bkg = sep.Background(image_data) bkg = sep.Background(image_data, bw=64, bh=64, fw=3, fh=3) #get global mean and noise of image's background print(bkg.globalback) print(bkg.globalrms) #evaluate background as 2-D array but same size as original image bkg_image = bkg.back() #bkg_image = np.array(bkg) #show background plt.imshow(bkg_image,interpolation='nearest',cmap='gray',origin='lower') plt.colorbar(); plt.savefig('image_2.png') #evaluate background noise as 2-D array, same size as original image bkg_rms = bkg.rms() #show background noise plt.imshow(bkg_rms,interpolation='nearest',cmap='gray',origin='lower') plt.colorbar(); plt.savefig('image_3.pdf') #subtract background image_data_sub = image_data - bkg
_____no_output_____
MIT
final_project_1.ipynb
ashleyparrilla/astro-detection
object detection
#set detection threshold to be a constant value of 1.5*sigma #sigma=global background rms objects = sep.extract(image_data_sub, 1.5, err=bkg.globalrms) #number of objects detected len(objects) #over-plot the object coordinates with some parameters on the image #this will check where the detected objects are from matplotlib.patches import Ellipse #plot background-subtracted image fig, ax = plt.subplots() m,s = np.mean(image_data_sub), np.std(image_data_sub) im = ax.imshow(image_data_sub, interpolation='nearest', cmap='gray', vmin=m-s,vmax=m+s,origin='lower') #plot an ellipse for each object for i in range(len(objects)): e = Ellipse(xy=(objects['x'][i],objects['y'][i]), width=6*objects['a'][i], height=6*objects['b'][i], angle=objects['theta'][i]*180./np.pi) e.set_facecolor('none') e.set_edgecolor('red') ax.add_artist(e) plt.savefig('image_4.png') #see available fields objects.dtype.names
_____no_output_____
MIT
final_project_1.ipynb
ashleyparrilla/astro-detection
aperture photometry
#perform circular aperture photometry #with a 3 pixel radius at locations of the objects flux, fluxerr, flag = sep.sum_circle(image_data_sub,objects['x'], objects['y'], 3.0, err=bkg.globalrms, gain=1.0) #show the first 10 objects results: for i in range(10): print("object {:d}: flux = {:f} +/- {:f}".format(i, flux[i], fluxerr[i]))
_____no_output_____
MIT
final_project_1.ipynb
ashleyparrilla/astro-detection
How to use Amazon ForecastHelps advanced users start with Amazon Forecast quickly. The demo notebook runs through a typical end to end usecase for a simple timeseries forecasting scenario. Prerequisites: [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/installing.html) . For more informations about APIs, please check the [documentation](https://docs.aws.amazon.com/forecast/latest/dg/what-is-forecast.html) Table Of Contents* [Setting up](setup)* [Test Setup - Running first API](hello)* [Forecasting Example with Amazon Forecast](forecastingExample)**Read Every Cell FULLY before executing it** Setup
import sys import os import time import boto3 # importing forecast notebook utility from notebooks/common directory sys.path.insert( 0, os.path.abspath("../../common") ) import util
_____no_output_____
MIT-0
notebooks/advanced/Getting_started_with_AutoML/Getting_started_with_AutoML.ipynb
yinti/forecast-sagemaker
Configure the S3 bucket name and region name for this lesson.- If you don't have an S3 bucket, create it first on S3.- Although we have set the region to us-west-2 as a default value below, you can choose any of the regions that the service is available in.
text_widget_bucket = util.create_text_widget( "bucketName", "input your S3 bucket name" ) text_widget_region = util.create_text_widget( "region", "input region name.", default_value="us-west-2" ) bucketName = text_widget_bucket.value assert bucketName, "bucket_name not set." region = text_widget_region.value assert region, "region not set." session = boto3.Session(region_name=region) forecast = session.client(service_name='forecast') forecastquery = session.client(service_name='forecastquery')
_____no_output_____
MIT-0
notebooks/advanced/Getting_started_with_AutoML/Getting_started_with_AutoML.ipynb
yinti/forecast-sagemaker
Forecasting with Amazon Forecast Preparing your Data In Amazon Forecast , a dataset is a collection of file(s) which contain data that is relevant for a forecasting task. A dataset must conform to a schema provided by Amazon Forecast. For this exercise, we use the individual household electric power consumption dataset. (Dua, D. and Karra Taniskidou, E. (2017). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science.) We aggregate the usage data hourly. Data Type Amazon forecast can import data from Amazon S3. We first explore the data locally to see the fields
import pandas as pd df = pd.read_csv("../../common/data/item-demand-time.csv", dtype = object) df.head(3)
_____no_output_____
MIT-0
notebooks/advanced/Getting_started_with_AutoML/Getting_started_with_AutoML.ipynb
yinti/forecast-sagemaker
Now upload the data to S3. But before doing that, go into your AWS Console, select S3 for the service and create a new bucket inside the `Oregon` or `us-west-2` region. Use that bucket name convention of `amazon-forecast-unique-value-data`. The name must be unique, if you get an error, just adjust until your name works, then update the `bucketName` cell below.
s3 = session.client('s3') key="elec_data/item-demand-time.csv" s3.upload_file(Filename="../../common/data/item-demand-time.csv", Bucket=bucketName, Key=key) # Create the role to provide to Amazon Forecast. role_name = "ForecastNotebookRole-AutoML" role_arn = util.get_or_create_iam_role( role_name = role_name )
_____no_output_____
MIT-0
notebooks/advanced/Getting_started_with_AutoML/Getting_started_with_AutoML.ipynb
yinti/forecast-sagemaker
CreateDataset More details about `Domain` and dataset type can be found on the [documentation](https://docs.aws.amazon.com/forecast/latest/dg/howitworks-domains-ds-types.html) . For this example, we are using [CUSTOM](https://docs.aws.amazon.com/forecast/latest/dg/custom-domain.html) domain with 3 required attributes `timestamp`, `target_value` and `item_id`. Also for your project name, update it to reflect your name in a lowercase format.
DATASET_FREQUENCY = "H" TIMESTAMP_FORMAT = "yyyy-MM-dd hh:mm:ss" project = 'workshop_forecastdemo_1' # Replace this with a unique name here, make sure the entire name is < 30 characters. datasetName= project+'_ds' datasetGroupName= project +'_gp' s3DataPath = "s3://"+bucketName+"/"+key datasetName
_____no_output_____
MIT-0
notebooks/advanced/Getting_started_with_AutoML/Getting_started_with_AutoML.ipynb
yinti/forecast-sagemaker
Schema Definition We are defining the attributes for the model
# Specify the schema of your dataset here. Make sure the order of columns matches the raw data files. schema ={ "Attributes":[ { "AttributeName":"timestamp", "AttributeType":"timestamp" }, { "AttributeName":"target_value", "AttributeType":"float" }, { "AttributeName":"item_id", "AttributeType":"string" } ] } response=forecast.create_dataset( Domain="CUSTOM", DatasetType='TARGET_TIME_SERIES', DatasetName=datasetName, DataFrequency=DATASET_FREQUENCY, Schema = schema ) datasetArn = response['DatasetArn'] create_dataset_group_response = forecast.create_dataset_group(DatasetGroupName=datasetGroupName, Domain="CUSTOM", DatasetArns= [datasetArn] ) datasetGroupArn = create_dataset_group_response['DatasetGroupArn']
_____no_output_____
MIT-0
notebooks/advanced/Getting_started_with_AutoML/Getting_started_with_AutoML.ipynb
yinti/forecast-sagemaker
If you have an existing datasetgroup, you can update it using **update_dataset_group** to update dataset group.
forecast.describe_dataset_group(DatasetGroupArn=datasetGroupArn)
_____no_output_____
MIT-0
notebooks/advanced/Getting_started_with_AutoML/Getting_started_with_AutoML.ipynb
yinti/forecast-sagemaker
Create Data Import JobBrings the data into Amazon Forecast system ready to forecast from raw data.
datasetImportJobName = 'EP_AML_DSIMPORT_JOB_TARGET' ds_import_job_response=forecast.create_dataset_import_job(DatasetImportJobName=datasetImportJobName, DatasetArn=datasetArn, DataSource= { "S3Config" : { "Path":s3DataPath, "RoleArn": role_arn } }, TimestampFormat=TIMESTAMP_FORMAT ) ds_import_job_arn=ds_import_job_response['DatasetImportJobArn'] print(ds_import_job_arn)
_____no_output_____
MIT-0
notebooks/advanced/Getting_started_with_AutoML/Getting_started_with_AutoML.ipynb
yinti/forecast-sagemaker
Check the status of dataset, when the status change from **CREATE_IN_PROGRESS** to **ACTIVE**, we can continue to next steps. Depending on the data size. It can take 10 mins to be **ACTIVE**. This process will take 5 to 10 minutes.
status_indicator = util.StatusIndicator() while True: status = forecast.describe_dataset_import_job(DatasetImportJobArn=ds_import_job_arn)['Status'] status_indicator.update(status) if status in ('ACTIVE', 'CREATE_FAILED'): break time.sleep(10) status_indicator.end() forecast.describe_dataset_import_job(DatasetImportJobArn=ds_import_job_arn)
_____no_output_____
MIT-0
notebooks/advanced/Getting_started_with_AutoML/Getting_started_with_AutoML.ipynb
yinti/forecast-sagemaker
Create Predictor with customer forecast horizon Forecast horizon is the number of number of time points to predicted in the future. For weekly data, a value of 12 means 12 weeks. Our example is hourly data, we try forecast the next day, so we can set to 24. If we are not sure which recipe will perform best, we can utilise the Auto ML option that the SDK offers.
predictorName = project+'_autoML' forecastHorizon = 24 algorithmArn = 'arn:aws:forecast:::algorithm/ETS' create_predictor_response=forecast.create_predictor(PredictorName=predictorName, ForecastHorizon=forecastHorizon, PerformAutoML=True, PerformHPO=False, EvaluationParameters= {"NumberOfBacktestWindows": 1, "BackTestWindowOffset": 24}, InputDataConfig= {"DatasetGroupArn": datasetGroupArn}, FeaturizationConfig= {"ForecastFrequency": "H", "Featurizations": [ {"AttributeName": "target_value", "FeaturizationPipeline": [ {"FeaturizationMethodName": "filling", "FeaturizationMethodParameters": {"frontfill": "none", "middlefill": "zero", "backfill": "zero"} } ] } ] } ) predictorArn=create_predictor_response['PredictorArn']
_____no_output_____
MIT-0
notebooks/advanced/Getting_started_with_AutoML/Getting_started_with_AutoML.ipynb
yinti/forecast-sagemaker
Check the status of the predictor. When the status change from **CREATE_IN_PROGRESS** to **ACTIVE**, we can continue to next steps. Depending on data size, model selection and hyper parameters,it can take 10 mins to more than one hour to be **ACTIVE**.
status_indicator = util.StatusIndicator() while True: status = forecast.describe_predictor(PredictorArn=predictorArn)['Status'] status_indicator.update(status) if status in ('ACTIVE', 'CREATE_FAILED'): break time.sleep(10) status_indicator.end()
_____no_output_____
MIT-0
notebooks/advanced/Getting_started_with_AutoML/Getting_started_with_AutoML.ipynb
yinti/forecast-sagemaker
Get Error Metrics Let's get the accuracy metrics of the predicto we just created using Auto ML. The response will be a dictionary with all available recipes. Auto ML works out the best one for our predictor.
forecast.get_accuracy_metrics(PredictorArn=predictorArn)
_____no_output_____
MIT-0
notebooks/advanced/Getting_started_with_AutoML/Getting_started_with_AutoML.ipynb
yinti/forecast-sagemaker
Create Forecast Now create a forecast using the model that was trained.
forecastName= project+'_aml_forecast' create_forecast_response=forecast.create_forecast(ForecastName=forecastName, PredictorArn=predictorArn) forecastArn = create_forecast_response['ForecastArn']
_____no_output_____
MIT-0
notebooks/advanced/Getting_started_with_AutoML/Getting_started_with_AutoML.ipynb
yinti/forecast-sagemaker
Check the status of the forecast process, when the status change from **CREATE_IN_PROGRESS** to **ACTIVE**, we can continue to next steps. Depending on data size, model selection and hyper parameters,it can take 10 mins to more than one hour to be **ACTIVE**. There's no output here, but that is fine as long as the * is there.
status_indicator = util.StatusIndicator() while True: status = forecast.describe_forecast(ForecastArn=forecastArn)['Status'] status_indicator.update(status) if status in ('ACTIVE', 'CREATE_FAILED'): break time.sleep(10) status_indicator.end()
_____no_output_____
MIT-0
notebooks/advanced/Getting_started_with_AutoML/Getting_started_with_AutoML.ipynb
yinti/forecast-sagemaker
Get Forecast Once created, the forecast results are ready and you view them.
forecastResponse = forecastquery.query_forecast( ForecastArn=forecastArn, Filters={"item_id":"client_12"} ) print(forecastResponse)
_____no_output_____
MIT-0
notebooks/advanced/Getting_started_with_AutoML/Getting_started_with_AutoML.ipynb
yinti/forecast-sagemaker
Export Forecast You can export forecast to s3 bucket. To do so an role with s3 put access is needed, but this has already been created.
forecastExportName= project+'_aml_forecast_export' outputPath="s3://"+bucketName+"/output" forecast_export_response = forecast.create_forecast_export_job( ForecastExportJobName = forecastExportName, ForecastArn=forecastArn, Destination = { "S3Config" : { "Path":outputPath, "RoleArn": role_arn } } ) forecastExportJobArn = forecast_export_response['ForecastExportJobArn'] status_indicator = util.StatusIndicator() while True: status = forecast.describe_forecast_export_job(ForecastExportJobArn=forecastExportJobArn)['Status'] status_indicator.update(status) if status in ('ACTIVE', 'CREATE_FAILED'): break time.sleep(10) status_indicator.end()
_____no_output_____
MIT-0
notebooks/advanced/Getting_started_with_AutoML/Getting_started_with_AutoML.ipynb
yinti/forecast-sagemaker
Check s3 bucket for results
s3.list_objects(Bucket=bucketName,Prefix="output")
_____no_output_____
MIT-0
notebooks/advanced/Getting_started_with_AutoML/Getting_started_with_AutoML.ipynb
yinti/forecast-sagemaker
CleanupOnce we have completed the above steps, we can start to cleanup the resources we created. All delete jobs, except for `delete_dataset_group` are asynchronous, so we have added the helpful `wait_till_delete` function. Resource Limits documented here.
# Delete forecast export for both algorithms util.wait_till_delete(lambda: forecast.delete_forecast_export_job(ForecastExportJobArn = forecastExportJobArn)) # Delete forecast util.wait_till_delete(lambda: forecast.delete_forecast(ForecastArn = forecastArn)) # Delete predictor util.wait_till_delete(lambda: forecast.delete_predictor(PredictorArn = predictorArn)) # Delete Import util.wait_till_delete(lambda: forecast.delete_dataset_import_job(DatasetImportJobArn=ds_import_job_arn)) # Delete the dataset util.wait_till_delete(lambda: forecast.delete_dataset(DatasetArn=datasetArn)) # Delete Dataset Group util.wait_till_delete(lambda: forecast.delete_dataset_group(DatasetGroupArn=datasetGroupArn)) # Delete IAM role util.delete_iam_role( role_name )
_____no_output_____
MIT-0
notebooks/advanced/Getting_started_with_AutoML/Getting_started_with_AutoML.ipynb
yinti/forecast-sagemaker
MODELOS DE CATEGORIZACION 1. Introducción
import IPython.display as ipd import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import sklearn as skl import sklearn.utils, sklearn.preprocessing, sklearn.decomposition, sklearn.svm data = pd.read_pickle("clean_data/track.pkl") unpickled_df_features = pd.read_pickle("clean_data/features.pkl")
_____no_output_____
MIT
Modelizacion Coti.ipynb
constanzasilvestre/digital-house-challenge-3
(Copiado de Data Analysis) Analisis de FeaturesLos features fueron generados utilizando la libreria de librosa sobre mp3 de extractos de cada cancion.(Del data esta es la primera agrupacion)Los features generados son:- mfcc: Mel-frequency cepstral coefficients (MFCCs). The Mel frequency cepstral coefficients (MFCCs) of a signal are a small set of features (usually about 10–20) which concisely describe the overall shape of a spectral envelope. It models the characteristics of the human voice.- chroma_cens: Computes the chroma variant “Chroma Energy Normalized” (CENS). CENS features are robust to dynamics, timbre and articulation, thus these are commonly used in audio matching and retrieval applications. Chroma features are an interesting and powerful representation for music audio in which the entire spectrum is projected onto 12 bins representing the 12 distinct semitones (or chroma) of the musical octave.- tonnetz: Tonal centroid features (tonnetz). This representation uses the method to project chroma features onto a 6-dimensional basis representing the perfect fifth, minor third, and major third each as two-dimensional coordinates.- spectral_contrast: Each frame of a spectrogram S is divided into sub-bands. For each sub-band, the energy contrast is estimated by comparing the mean energy in the top quantile (peak energy) to that of the bottom quantile (valley energy). High contrast values generally correspond to clear, narrow-band signals, while low contrast values correspond to broad-band noise. - spectral_centroid: Each frame of a magnitude spectrogram is normalized and treated as a distribution over frequency bins, from which the mean (centroid) is extracted per frame.It indicates where the ”centre of mass” for a sound is located and is calculated as the weighted mean of the frequencies present in the sound. Consider two songs, one from a blues genre and the other belonging to metal. Now as compared to the blues genre song which is the same throughout its length, the metal song has more frequencies towards the end. So spectral centroid for blues song will lie somewhere near the middle of its spectrum while that for a metal song would be towards its end.- spectral_bandwidth: Compute p’th-order spectral bandwidth.- spectral_rolloff: The roll-off frequency is defined for each frame as the center frequency for a spectrogram bin such that at least roll_percent (0.85 by default) of the energy of the spectrum in this frame is contained in this bin and the bins below. This can be used to, e.g., approximate the maximum (or minimum) frequency by setting roll_percent to a value close to 1 (or 0).- rmse: Compute root-mean-square (RMS) value for each frame, either from the audio samples y or from a spectrogram S.- zcr: Zero-crossing rate of an audio time series -> The zero crossing rate is the rate of sign-changes along a signal, i.e., the rate at which the signal changes from positive to negative or back. This feature has been used heavily in both speech recognition and music information retrieval. It usually has higher values for highly percussive sounds like those in metal and rock.Para mas informacion sobre cada feature: [Librosa features](https://librosa.org/doc/main/feature.html)SpectrogramA spectrogram is a visual representation of the spectrum of frequencies of sound or other signals as they vary with time. Spectrograms are sometimes called sonographs, voiceprints, or voicegrams. When the data is represented in a 3D plot, they may be called waterfalls. In 2-dimensional arrays, the first axis is frequency while the second axis is time.Para cada feature se calcula:- kurtosis- max- mean- median- min- skew- std Conclusion:- zcr: Me interesa quedarme con esto ya que categoriza al metal y rock- spectral_centroid: el centro de masa del sonido, en metal esta sobre el final y en blues en medio- spectral rolloff: Es una medida de la señal que represental la frecuencia por debajo de el total spectral enegergy - mfcc: modelo de voz humana- chroma: representacion poderosa del audiowe will choose 5 features, i.e. Mel-Frequency Cepstral Coefficients, Spectral Centroid, Zero Crossing Rate, Chroma Frequencies, Spectral Roll-off. filename chroma_stft rmse spectral_centroid spectral_bandwidth rolloff zero_crossing_rate mfcc1 mfcc2 mfcc3 ... mfcc12 mfcc13 mfcc14 mfcc15 mfcc16 mfcc17 mfcc18 mfcc19 mfcc20 label Feature extraction
unpickled_df_features.head(5).style.format('{:.2f}') features_columns = ['mfcc', 'chroma_cens', 'spectral_centroid',"spectral_bandwidth", 'spectral_rolloff', "zcr"] clean_features = unpickled_df_features[features_columns] clean_features.shape clean_features_spectral_centroid= unpickled_df_features["spectral_centroid"]["mean"] print(clean_features_spectral_centroid.head()) clean_features_spectral_centroid = clean_features_spectral_centroid.rename(columns={"01":"spectral_centroid"}) clean_features_spectral_bandwidth = unpickled_df_features["spectral_bandwidth"]["mean"] print(clean_features_spectral_bandwidth.head()) clean_features_spectral_bandwidth = clean_features_spectral_bandwidth.rename(columns={"01":"spectral_bandwidth"}) clean_features_spectral_rolloff = unpickled_df_features["spectral_rolloff"]["mean"] print(clean_features_spectral_rolloff.head()) clean_features_spectral_rolloff = clean_features_spectral_rolloff.rename(columns={"01":"spectral_rolloff"}) clean_features_zcr = unpickled_df_features["zcr"]["mean"] print(clean_features_zcr.head()) clean_features_zcr = clean_features_zcr.rename(columns={"01":"zcr"}) clean_features_mfcc = unpickled_df_features["mfcc"]["mean"] clean_features_mfcc_mean =clean_features_mfcc.mean(axis=1) print(clean_features_mfcc_mean.head()) #clean_features_spectral_rolloff = clean_features_spectral_rolloff.rename(columns={"0":"mfcc"}) clean_features_chroma_cens= unpickled_df_features["chroma_cens"]["mean"] clean_features_chroma_cens_mean=clean_features_chroma_cens.mean(axis=1) print(clean_features_chroma_cens_mean.head()) #clean_features_chroma_cens_mean = clean_features_chroma_cens_mean.rename(columns={"0":"chroma"}) clean_features = pd.concat([clean_features_spectral_rolloff, clean_features_spectral_bandwidth, clean_features_spectral_centroid, clean_features_zcr, clean_features_mfcc_mean, clean_features_chroma_cens_mean ], axis=1, join='inner') features = clean_features.rename(columns={0: "mfcc",1:"chroma" }) print(features.columns) data_full = pd.concat([data, features], axis=1, join='inner') data_full.shape data=data_full
_____no_output_____
MIT
Modelizacion Coti.ipynb
constanzasilvestre/digital-house-challenge-3
Preparamos los datos del modelo
from matplotlib import offsetbox import joblib #from PIL import Image from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.model_selection import train_test_split from sklearn import linear_model from sklearn import metrics import matplotlib.pyplot as plt
_____no_output_____
MIT
Modelizacion Coti.ipynb
constanzasilvestre/digital-house-challenge-3
Observamos la media y varianza de las variables:
#Obtenemos las variables numericas del data: print (data.columns) print(data.info()) #dropeo location porque tiene muchos nulos data_complete = data_full.drop(labels="location",axis=1) print("Media de las variables: ") print(data_complete.mean(axis=0)) print('\n') print("Varianza de las variables: ") print(data_complete.var(axis=0)) print (data_complete.columns) #Armo una lista de numericas para depsues hacer el fit transform en estas lista_numero=["duration", "acousticness","album_tracks", "danceability","energy","instrumentalness", "liveness", "speechiness","tempo","valence",'spectral_rolloff', 'spectral_bandwidth', 'spectral_centroid', 'zcr', 'mfcc', 'chroma'] data_complete=data_complete[lista_numero] # El argumento stratify nos permite generar una división que respeta la misma proporción entre clases en ambos sets X = data_complete Y = data['genre_top'] X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state = 1237, stratify= Y) #tengo qeu hacer un stratidfy aca para qeu en la division de train test tenga las mismo porcentaje de varialbes) display(Y_train.value_counts(normalize=True).round(2)) display(Y_test.value_counts(normalize=True).round(2)) #Es necesario llevar a la misma escala, porque sino la que tiene mayor varianza va a pesar mas en PCA. Por eso normalizamos los datos #en un modelo no hacemos en el test el fit_transform, solo hacemos transform. Porque ya tenemos la informacion de la media y varianza en el fit del train. std_sclr = StandardScaler() std_sclr_trained = std_sclr.fit(X_train) X_train_numerical = std_sclr_trained.transform(X_train) X_train_numerical_scaled = pd.DataFrame(X_train_numerical, columns = lista_numero) X_train_numerical_scaled.head() X_test_numerical = std_sclr_trained.transform(X_test) X_test_numerical_scaled = pd.DataFrame(X_test_numerical, columns = lista_numero) X_test_numerical_scaled.head() print("Media de las variables: ") print(X_train_numerical_scaled.mean(axis=0)) print('\n') # Observamos nuevamente la varianza de las variables: como normalizamos la varianza es 1 print("Varianza de las variables: ") print(X_train_numerical_scaled.var(axis=0))
Media de las variables: duration 2.054668e-17 acousticness 8.022849e-17 album_tracks 5.641946e-16 danceability 2.689316e-16 energy 6.707861e-17 instrumentalness -2.376314e-16 liveness 1.595708e-16 speechiness -1.208808e-16 tempo 2.483364e-16 valence 2.045747e-16 spectral_rolloff -1.985932e-16 spectral_bandwidth 2.344191e-16 spectral_centroid 4.954861e-16 zcr -4.218721e-17 mfcc -2.156437e-17 chroma 1.941324e-16 dtype: float64 Varianza de las variables: duration 1.000143 acousticness 1.000143 album_tracks 1.000143 danceability 1.000143 energy 1.000143 instrumentalness 1.000143 liveness 1.000143 speechiness 1.000143 tempo 1.000143 valence 1.000143 spectral_rolloff 1.000143 spectral_bandwidth 1.000143 spectral_centroid 1.000143 zcr 1.000143 mfcc 1.000143 chroma 1.000143 dtype: float64
MIT
Modelizacion Coti.ipynb
constanzasilvestre/digital-house-challenge-3
Features del modeloX_test_numerical_scaled / X_train_numerical_scaled / Y_train, Y_test 1. Reduccion de dimensionalidad -> PCA
model_pca = PCA().fit(X_train_numerical_scaled) X_train_PCA = model_pca.transform(X_train_numerical_scaled) X_test_PCA = model_pca.transform(X_test_numerical_scaled) componentes=model_pca.n_components_ print("Componentes del modelo", model_pca.n_components_) def plot_explained_variance(components_count, X): model_pca = PCA(components_count).fit(X) explained_variance = model_pca.explained_variance_ratio_ #print(explained_variance) cumulative_explained_variance = np.cumsum(explained_variance) #print(cumulative_explained_variance) plt.plot(cumulative_explained_variance) plt.xlabel('número de componentes') plt.ylabel('% de varianza explicada'); plot_explained_variance(components_count = componentes, X = X_train_numerical_scaled)
_____no_output_____
MIT
Modelizacion Coti.ipynb
constanzasilvestre/digital-house-challenge-3
PCA para features musicales
lista_features=['spectral_rolloff', 'spectral_bandwidth', 'spectral_centroid', 'zcr', 'mfcc', 'chroma' ] std_sclr = StandardScaler() std_sclr_trained = std_sclr.fit(X_train[lista_features]) X_train_numerical = std_sclr_trained.transform(X_train[lista_features]) X_train_numerical_scaled = pd.DataFrame(X_train_numerical, columns = lista_features) X_train_numerical_scaled.head() X_test_numerical = std_sclr_trained.transform(X_test[lista_features]) X_test_numerical_scaled = pd.DataFrame(X_test_numerical, columns = lista_features) X_test_numerical_scaled.head() model_pca = PCA().fit(X_train_numerical_scaled) X_train_PCA = model_pca.transform(X_train_numerical_scaled) X_test_PCA = model_pca.transform(X_test_numerical_scaled) componentes=model_pca.n_components_ print("Componentes del modelo", model_pca.n_components_) plot_explained_variance(components_count = componentes, X = X_train_numerical_scaled)
_____no_output_____
MIT
Modelizacion Coti.ipynb
constanzasilvestre/digital-house-challenge-3
con la regla del codo notamos que con 3 variables de PCA podemos explicar más de 95% y con 2 podemos explicar el 95% Representacion grafica con 2 variables
pca_digits_vis = PCA(n_components=2) data_numero = pca_digits_vis.fit_transform(data_complete[lista_features]) print(data_complete[lista_features].shape) print(data_numero.shape) def plot_digits_pca(projection, generos): colors = ["#476A2A", "#7851B8", "#BD3430", "#4A2D4E", "#875525", "#A83683", "#4E655E", "#853541", "#3A3120", "#535D8E"] plt.figure(figsize=(10,10)) plt.xlim(projection[:,0].min(), projection[:,0].max()) plt.ylim(projection[:,1].min(), projection[:,1].max()) for i in range(len(projection)): plt.xlabel('Primer Componente Principal') plt.ylabel('Segundo Componente Principal') plt.scatter(projection[i,0], projection[i,1], s=10) #color=color[genero[i]] #No pude conectar un color con un genero distinto plot_digits_pca(data_numero, data.genre_top)
_____no_output_____
MIT
Modelizacion Coti.ipynb
constanzasilvestre/digital-house-challenge-3
2. NAIVE BAYES
from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score from sklearn.metrics import recall_score from sklearn.metrics import precision_score from sklearn.metrics import confusion_matrix import seaborn as sns gnb = GaussianNB() gnb.fit(X_train_numerical_scaled, Y_train) Y_pred = gnb.predict(X_test_numerical_scaled) Y_pred round(accuracy_score(Y_test, Y_pred), 2) print('Accuracy=', accuracy_score(Y_test, Y_pred)) #print('Recall=', recall_score(Y_test, Y_pred)) #print('Precision=', precision_score(Y_test, Y_pred)) sns.heatmap(confusion_matrix(Y_test, Y_pred), annot=True, fmt='.0f') plt.ylabel('Etiquetas reales') plt.xlabel('Etiquetas predichas');
_____no_output_____
MIT
Modelizacion Coti.ipynb
constanzasilvestre/digital-house-challenge-3
3. KNN
# Importamos la clase KNeighborsClassifier de módulo neighbors from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier() knn.fit(X_train, Y_train) y_pred = knn.predict(X_test) from sklearn.metrics import accuracy_score accuracy_score(Y_test, y_pred).round(2) from sklearn.model_selection import cross_val_score, KFold kf = KFold(n_splits=12, shuffle=True, random_state=12) scores_para_df = [] for i in range(1, 21): # En cada iteración, instanciamos el modelo con un hiperparámetro distinto model = KNeighborsClassifier(n_neighbors=i) # cross_val_scores nos devuelve un array de 5 resultados, # uno por cada partición que hizo automáticamente CV cv_scores = cross_val_score(model, X_train, Y_train, cv=kf) # Para cada valor de n_neighbours, creamos un diccionario con el valor # de n_neighbours y la media y el desvío de los scores dict_row_score = {'score_medio':np.mean(cv_scores), 'score_std':np.std(cv_scores), 'n_neighbors':i} # Guardamos cada uno en la lista de diccionarios scores_para_df.append(dict_row_score) df_scores = pd.DataFrame(scores_para_df) df_scores.head() df_scores['limite_inferior'] = df_scores['score_medio'] - df_scores['score_std'] df_scores['limite_superior'] = df_scores['score_medio'] + df_scores['score_std'] df_scores.head() # Graficamos los resultados plt.plot(df_scores['n_neighbors'], df_scores['limite_inferior'], color='r') plt.plot(df_scores['n_neighbors'], df_scores['score_medio'], color='b') plt.plot(df_scores['n_neighbors'], df_scores['limite_superior'], color='r'); # Identificamos el score máximo df_scores.loc[df_scores.score_medio == df_scores.score_medio.max()] # Utilizamos sklearn para estandarizar la matriz de features from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train = scaler.fit_transform(X_train) # Verificamos que las variables ahora tengan media 0 y desvío 1. print('Medias:', np.mean(X_train, axis=0).round(2)) print('Desvio:', np.std(X_train, axis=0).round(2)) # Calculamos nuevamente los scores de cross validation, # pero esta vez sobre los features estandarizados: scores_para_df_standard = [] for i in range(1, 21): model = KNeighborsClassifier(n_neighbors=i) cv_scores = cross_val_score(model, X_train, Y_train, cv=kf) dict_row_score = {'score_medio':np.mean(cv_scores), 'score_std':np.std(cv_scores), 'n_neighbors':i} scores_para_df_standard.append(dict_row_score) # Creamos el DataFrame a partir de la lista de diccionarios df_scores_standard = pd.DataFrame(scores_para_df_standard) df_scores_standard.head() df_scores_standard['limite_superior'] = df_scores_standard['score_medio'] + df_scores_standard['score_std'] df_scores_standard['limite_inferior'] = df_scores_standard['score_medio'] - df_scores_standard['score_std'] df_scores_standard.head() # Graficamos los resultados plt.plot(df_scores_standard['n_neighbors'], df_scores_standard['limite_inferior'], color='r') plt.plot(df_scores_standard['n_neighbors'], df_scores_standard['score_medio'], color='b') plt.plot(df_scores_standard['n_neighbors'], df_scores_standard['limite_superior'], color='r'); # Identificamos el score máximo df_scores_standard.loc[df_scores_standard.score_medio == df_scores_standard.score_medio.max()] # Asignamos el valor del k óptimo a una variable best_k = df_scores_standard.loc[df_scores_standard.score_medio == df_scores_standard.score_medio.max(), 'n_neighbors'].values[0] best_k # Elegimos el modelo óptimo de acuerdo a las pruebas de cross validation model = KNeighborsClassifier(n_neighbors=best_k) # Lo ajustamos sobre los datos de entrenamiento model.fit(X_train, Y_train) #Evaluamos qué accuracy obtenemos en train accuracy_score(Y_train, model.predict(X_train)).round(2) # Lo utilizamos para predecir en test X_test = scaler.transform(X_test) # ¡Importantísimo estandarizar también los datos de test con las medias y desvíos aprendidos en train! y_pred = model.predict(X_test) # Evaluamos el accuracy del modelo en test accuracy_score(Y_test, y_pred).round(2)
_____no_output_____
MIT
Modelizacion Coti.ipynb
constanzasilvestre/digital-house-challenge-3
KNN con 11 neighbors -> El modelo esta under fiteando- Acurr en train -> 0.72 - Acurr en test -> 0.67
# Obtenemos la matriz de confusión from sklearn.metrics import confusion_matrix cm = confusion_matrix(Y_test, y_pred) cm from sklearn.metrics import confusion_matrix from sklearn.metrics import plot_confusion_matrix sns.set(rc={'figure.figsize':(30,30)}) # Graficamos la matriz de confusión print(confusion_matrix(Y_test, y_pred)) plot_confusion_matrix(model,X_test, Y_test) sns.heatmap(confusion_matrix(Y_test, y_pred), annot=True, fmt='.0f') plt.ylabel('Etiquetas reales') plt.xlabel('Etiquetas predichas');
_____no_output_____
MIT
Modelizacion Coti.ipynb
constanzasilvestre/digital-house-challenge-3
GridSearch & Pipeline
from sklearn.model_selection import GridSearchCV from sklearn.model_selection import StratifiedKFold from sklearn.pipeline import Pipeline folds=StratifiedKFold(n_splits=5,shuffle=True, random_state=42) pasos = [('scaler', StandardScaler()), ('knn', KNeighborsClassifier())] pipe_grid = Pipeline(pasos) param_grid = {'knn__n_neighbors':range(2,20,2),'knn__weights':['uniform','distance']} grid = GridSearchCV(pipe_grid, param_grid, cv=folds) grid.fit(X_train_numerical_scaled, Y_train) grid.best_score_ grid.best_estimator_ accuracy_score(grid.best_estimator_.predict(X_test_numerical_scaled),Y_test)
_____no_output_____
MIT
Modelizacion Coti.ipynb
constanzasilvestre/digital-house-challenge-3
Validating the 10m Sahel Africa Cropland Mask DescriptionPreviously, in the `6_Accuracy_assessment_20m.ipynb` notebook, we were doing preliminary validations on 20m resolution testing crop-masks. The crop-mask was stored on disk as a geotiff. The final cropland extent mask, produced at 10m resolution, is stored in the datacube and requires a different method for validating.> NOTE: A very big sandbox is required (256GiB RAM) to run this script. This notebook will output a `confusion error matrix` containing Overall, Producer's, and User's accuracy, along with the F1 score for each class. *** Getting startedTo run this analysis, run all the cells in the notebook, starting with the "Load packages" cell. Load Packages
import os import sys import glob import rasterio import datacube import pandas as pd import numpy as np import seaborn as sn import matplotlib.pyplot as plt import geopandas as gpd from sklearn.metrics import f1_score from rasterstats import zonal_stats
_____no_output_____
Apache-2.0
testing/sahel_cropmask/6_Accuracy_assessment_10m.ipynb
digitalearthafrica/crop-mask
Analysis Parameters* `product` : name of crop-mask we're validating* `bands`: the bands of the crop-mask we want to load and validate. Can one of either `'mask'` or `'filtered'`* `grd_truth` : a shapefile containing crop/no-crop points to serve as the "ground-truth" dataset
product = "crop_mask_sahel" band = 'mask' grd_truth = 'data/validation_samples.shp'
_____no_output_____
Apache-2.0
testing/sahel_cropmask/6_Accuracy_assessment_10m.ipynb
digitalearthafrica/crop-mask
Load the datasets`the cropland extent mask`
#connect to the datacube dc = datacube.Datacube(app='feature_layers') #load 10m cropmask ds = dc.load(product=product, measurements=[band], resolution=(-10,10)).squeeze() print(ds)
<xarray.Dataset> Dimensions: (y: 364800, x: 672000) Coordinates: time datetime64[ns] 2019-07-02T11:59:59.999999 * y (y) float64 3.36e+06 3.36e+06 3.36e+06 ... -2.88e+05 -2.88e+05 * x (x) float64 -1.728e+06 -1.728e+06 ... 4.992e+06 4.992e+06 spatial_ref int32 6933 Data variables: mask (y, x) uint8 0 0 0 0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0 0 0 0 Attributes: crs: EPSG:6933 grid_mapping: spatial_ref
Apache-2.0
testing/sahel_cropmask/6_Accuracy_assessment_10m.ipynb
digitalearthafrica/crop-mask
`Ground truth points`
#ground truth shapefile ground_truth = gpd.read_file(grd_truth).to_crs('EPSG:6933') # rename the class column to 'actual' ground_truth = ground_truth.rename(columns={'Class':'Actual'}) # reclassifer into int ground_truth['Actual'] = np.where(ground_truth['Actual']=='non-crop', 0, ground_truth['Actual']) ground_truth['Actual'] = np.where(ground_truth['Actual']=='crop', 1, ground_truth['Actual']) ground_truth.head()
_____no_output_____
Apache-2.0
testing/sahel_cropmask/6_Accuracy_assessment_10m.ipynb
digitalearthafrica/crop-mask
Convert points into polygonsWhen the validation data was collected, 40x40m polygons were evaluated as either crop/non-crop rather than points, so we want to sample the raster using the same small polygons. We'll find the majority or 'mode' statistic within the polygon and use that to compare with the validation dataset.
#set radius (in metres) around points radius = 20 #create circle buffer around points, then find envelope ground_truth['geometry'] = ground_truth['geometry'].buffer(radius).envelope
_____no_output_____
Apache-2.0
testing/sahel_cropmask/6_Accuracy_assessment_10m.ipynb
digitalearthafrica/crop-mask
Calculate zonal statisticsWe want to know what the majority pixel value is inside each validation polygon.
def custom_majority(x): a=np.ma.MaskedArray.count(x) b=np.sum(x) c=b/a if c>0.5: return 1 if c<=0.5: return 0 #calculate stats stats = zonal_stats(ground_truth.geometry, ds[band].values, affine=ds.geobox.affine, add_stats={'majority':custom_majority}, nodata=255) #append stats to grd truth df ground_truth['Prediction']=[i['majority'] for i in stats] ground_truth.head()
_____no_output_____
Apache-2.0
testing/sahel_cropmask/6_Accuracy_assessment_10m.ipynb
digitalearthafrica/crop-mask
*** Create a confusion matrix
confusion_matrix = pd.crosstab(ground_truth['Actual'], ground_truth['Prediction'], rownames=['Actual'], colnames=['Prediction'], margins=True) confusion_matrix
_____no_output_____
Apache-2.0
testing/sahel_cropmask/6_Accuracy_assessment_10m.ipynb
digitalearthafrica/crop-mask
Calculate User's and Producer's Accuracy `Producer's Accuracy`
confusion_matrix["Producer's"] = [confusion_matrix.loc[0, 0] / confusion_matrix.loc[0, 'All'] * 100, confusion_matrix.loc[1, 1] / confusion_matrix.loc[1, 'All'] * 100, np.nan]
_____no_output_____
Apache-2.0
testing/sahel_cropmask/6_Accuracy_assessment_10m.ipynb
digitalearthafrica/crop-mask
`User's Accuracy`
users_accuracy = pd.Series([confusion_matrix[0][0] / confusion_matrix[0]['All'] * 100, confusion_matrix[1][1] / confusion_matrix[1]['All'] * 100] ).rename("User's") confusion_matrix = confusion_matrix.append(users_accuracy)
_____no_output_____
Apache-2.0
testing/sahel_cropmask/6_Accuracy_assessment_10m.ipynb
digitalearthafrica/crop-mask
`Overall Accuracy`
confusion_matrix.loc["User's","Producer's"] = (confusion_matrix.loc[0, 0] + confusion_matrix.loc[1, 1]) / confusion_matrix.loc['All', 'All'] * 100
_____no_output_____
Apache-2.0
testing/sahel_cropmask/6_Accuracy_assessment_10m.ipynb
digitalearthafrica/crop-mask
`F1 Score`The F1 score is the harmonic mean of the precision and recall, where an F1 score reaches its best value at 1 (perfect precision and recall), and is calculated as:$$\begin{aligned}\text{Fscore} = 2 \times \frac{\text{UA} \times \text{PA}}{\text{UA} + \text{PA}}.\end{aligned}$$Where UA = Users Accuracy, and PA = Producer's Accuracy
fscore = pd.Series([(2*(confusion_matrix.loc["User's", 0]*confusion_matrix.loc[0, "Producer's"]) / (confusion_matrix.loc["User's", 0]+confusion_matrix.loc[0, "Producer's"])) / 100, f1_score(ground_truth['Actual'].astype(np.int8), ground_truth['Prediction'].astype(np.int8), average='binary')] ).rename("F-score") confusion_matrix = confusion_matrix.append(fscore)
_____no_output_____
Apache-2.0
testing/sahel_cropmask/6_Accuracy_assessment_10m.ipynb
digitalearthafrica/crop-mask
Tidy Confusion Matrix* Limit decimal places,* Add readable class names* Remove non-sensical values
# round numbers confusion_matrix = confusion_matrix.round(decimals=2) # rename booleans to class names confusion_matrix = confusion_matrix.rename(columns={0:'Non-crop', 1:'Crop', 'All':'Total'}, index={0:'Non-crop', 1:'Crop', 'All':'Total'}) #remove the nonsensical values in the table confusion_matrix.loc["User's", 'Total'] = '--' confusion_matrix.loc['Total', "Producer's"] = '--' confusion_matrix.loc["F-score", 'Total'] = '--' confusion_matrix.loc["F-score", "Producer's"] = '--' confusion_matrix
_____no_output_____
Apache-2.0
testing/sahel_cropmask/6_Accuracy_assessment_10m.ipynb
digitalearthafrica/crop-mask
Export csv
confusion_matrix.to_csv('results/Sahel_10m_accuracy_assessment_confusion_matrix.csv')
_____no_output_____
Apache-2.0
testing/sahel_cropmask/6_Accuracy_assessment_10m.ipynb
digitalearthafrica/crop-mask
Recommendations with IBMIn this notebook, you will be putting your recommendation skills to use on real data from the IBM Watson Studio platform. You may either submit your notebook through the workspace here, or you may work from your local machine and submit through the next page. Either way assure that your code passes the project [RUBRIC](https://review.udacity.com/!/rubrics/2322/view). **Please save regularly.**By following the table of contents, you will build out a number of different methods for making recommendations that can be used for different situations. Table of ContentsI. [Exploratory Data Analysis](Exploratory-Data-Analysis)II. [Rank Based Recommendations](Rank)III. [User-User Based Collaborative Filtering](User-User)IV. [Content Based Recommendations (EXTRA - NOT REQUIRED)](Content-Recs)V. [Matrix Factorization](Matrix-Fact)VI. [Extras & Concluding](conclusions)At the end of the notebook, you will find directions for how to submit your work. Let's get started by importing the necessary libraries and reading in the data.
import pandas as pd import numpy as np import matplotlib.pyplot as plt import project_tests as t import pickle %matplotlib inline df = pd.read_csv('data/user-item-interactions.csv') # Import the user item interactions dataframe df_content = pd.read_csv('data/articles_community.csv') # Import the articles database dataframe del df['Unnamed: 0'] del df_content['Unnamed: 0'] # Show df to get an idea of the data df.head() # Show df_content to get an idea of the data df_content.head()
_____no_output_____
MIT
Recommendations_with_IBM.ipynb
Anirudh-Kulkarni/IBM_article_recommendations
Part I : Exploratory Data AnalysisUse the dictionary and cells below to provide some insight into the descriptive statistics of the data.`1.` What is the distribution of how many articles a user interacts with in the dataset? Provide a visual and descriptive statistics to assist with giving a look at the number of times each user interacts with an article.
df_email = df.set_index('email') # Set index to the email df_email_count = df_email.groupby('email')['article_id'].count() # Group articles by email and extract article_id's count: this gives # the number of articles each user interacted with df_email_countunique = df_email.groupby('email')['article_id'].unique() # Here, we define a dataframe as above BUT we include an article in # the count only once even if a user has interacted multiple times with it df_email_count.describe() df_email_countunique_len = df_email_countunique.apply(lambda x: len(x)) # We extract the number of articles each user interacted with and # show the statistics below df_email_countunique_len.describe() # Fill in the median and maximum number of user_article interactios below median_val = 3 # 50% of individuals interact with ____ number of articles or fewer. max_views_by_user = 364 # The maximum number of user-article interactions by any 1 user with ... articles.
_____no_output_____
MIT
Recommendations_with_IBM.ipynb
Anirudh-Kulkarni/IBM_article_recommendations
`2.` Explore and remove duplicate articles from the **df_content** dataframe.
# Find and explore duplicate articles df_content[df_content['article_id'].duplicated() == True] # The above shows the duplicate entries # Remove any rows that have the same article_id - only keep the first df_content1 = df_content.drop_duplicates(subset =['article_id']) df_content1.head()
_____no_output_____
MIT
Recommendations_with_IBM.ipynb
Anirudh-Kulkarni/IBM_article_recommendations
`3.` Use the cells below to find:**a.** The number of unique articles that have an interaction with a user. **b.** The number of unique articles in the dataset (whether they have any interactions or not).**c.** The number of unique users in the dataset. (excluding null values) **d.** The number of user-article interactions in the dataset.
df4 = df.set_index('article_id') df5 = df4.groupby('article_id')['title'] df4.describe() unique_articles = 714# The number of unique articles that have at least one interaction total_articles = 1051 # The number of unique articles on the IBM platform unique_users = 5148 # The number of unique users user_article_interactions = 45993 # The number of user-article interactions
_____no_output_____
MIT
Recommendations_with_IBM.ipynb
Anirudh-Kulkarni/IBM_article_recommendations
`4.` Use the cells below to find the most viewed **article_id**, as well as how often it was viewed. After talking to the company leaders, the `email_mapper` function was deemed a reasonable way to map users to ids. There were a small number of null values, and it was found that all of these null values likely belonged to a single user (which is how they are stored using the function below).
df_id_art = df4.groupby(['article_id']) # Create a dataframe grouped # by article_id and having the index of article_id value_counts = df['article_id'].value_counts(dropna=True, sort=True) # Create a value_count series with number of times the article is # interacted with. Then sort it with highest count values on the top. value_counts.head() most_viewed_article_id = '1429.0' # The most viewed article in the dataset as a string with one value following the decimal max_views = 937 # The most viewed article in the dataset was viewed how many times? ## No need to change the code here - this will be helpful for later parts of the notebook # Run this cell to map the user email to a user_id column and remove the email column def email_mapper(): coded_dict = dict() cter = 1 email_encoded = [] for val in df['email']: if val not in coded_dict: coded_dict[val] = cter cter+=1 email_encoded.append(coded_dict[val]) return email_encoded email_encoded = email_mapper() del df['email'] df['user_id'] = email_encoded # show header df.head() ## If you stored all your results in the variable names above, ## you shouldn't need to change anything in this cell sol_1_dict = { '`50% of individuals have _____ or fewer interactions.`': median_val, '`The total number of user-article interactions in the dataset is ______.`': user_article_interactions, '`The maximum number of user-article interactions by any 1 user is ______.`': max_views_by_user, '`The most viewed article in the dataset was viewed _____ times.`': max_views, '`The article_id of the most viewed article is ______.`': most_viewed_article_id, '`The number of unique articles that have at least 1 rating ______.`': unique_articles, '`The number of unique users in the dataset is ______`': unique_users, '`The number of unique articles on the IBM platform`': total_articles } # Test your dictionary against the solution t.sol_1_test(sol_1_dict)
It looks like you have everything right here! Nice job!
MIT
Recommendations_with_IBM.ipynb
Anirudh-Kulkarni/IBM_article_recommendations
Part II: Rank-Based RecommendationsUnlike in the earlier lessons, we don't actually have ratings for whether a user liked an article or not. We only know that a user has interacted with an article. In these cases, the popularity of an article can really only be based on how often an article was interacted with.`1.` Fill in the function below to return the **n** top articles ordered with most interactions as the top. Test your function using the tests below.
def get_top_articles(n, df=df): ''' INPUT: n - (int) the number of top articles to return df - (pandas dataframe) df as defined at the top of the notebook OUTPUT: top_articles - (list) A list of the top 'n' article titles ''' value_counts = df['article_id'].value_counts(dropna=True, sort=True) top_articles_id = list(value_counts.index[0:n]) # Return articles with highest value counts i.e. interacted with the most top_articles = [df[df['article_id'] == art_id].title.iloc[0] for art_id in top_articles_id] return top_articles # Return the top article titles from df (not df_content) def get_top_article_ids(n, df=df): ''' INPUT: n - (int) the number of top articles to return df - (pandas dataframe) df as defined at the top of the notebook OUTPUT: top_articles - (list) A list of the top 'n' article ids ''' # Return article ids with highest value counts i.e. interacted with the most value_counts = df['article_id'].value_counts(dropna=True, sort=True) top_articles = list(value_counts.index[0:n]) return top_articles # Return the top article ids print(get_top_articles(10)) print(get_top_article_ids(10)) # Test your function by returning the top 5, 10, and 20 articles top_5 = get_top_articles(5) top_10 = get_top_articles(10) top_20 = get_top_articles(20) # Test each of your three lists from above t.sol_2_test(get_top_articles)
Your top_5 looks like the solution list! Nice job. Your top_10 looks like the solution list! Nice job. Your top_20 looks like the solution list! Nice job.
MIT
Recommendations_with_IBM.ipynb
Anirudh-Kulkarni/IBM_article_recommendations
Part III: User-User Based Collaborative Filtering`1.` Use the function below to reformat the **df** dataframe to be shaped with users as the rows and articles as the columns. * Each **user** should only appear in each **row** once.* Each **article** should only show up in one **column**. * **If a user has interacted with an article, then place a 1 where the user-row meets for that article-column**. It does not matter how many times a user has interacted with the article, all entries where a user has interacted with an article should be a 1. * **If a user has not interacted with an item, then place a zero where the user-row meets for that article-column**. Use the tests to make sure the basic structure of your matrix matches what is expected by the solution.
# create the user-article matrix with 1's and 0's def create_user_item_matrix(df): ''' INPUT: df - pandas dataframe with article_id, title, user_id columns OUTPUT: user_item - user item matrix Description: Return a matrix with user ids as rows and article ids on the columns with 1 values where a user interacted with an article and a 0 otherwise ''' # Extract only the user_id and article_id columns df6 = df[['user_id', 'article_id']] # Extract dummies of the article_id variable and concatenate with user_id variable df7 = pd.concat([df6.user_id, pd.get_dummies(df6.article_id)], axis=1) # If an article is interacted with more than or equal to once by a user, set it to 1! user_item = (df7.groupby('user_id').sum() > 0).astype(int) return user_item # return the user_item matrix user_item = create_user_item_matrix(df) ## Tests: You should just need to run this cell. Don't change the code. assert user_item.shape[0] == 5149, "Oops! The number of users in the user-article matrix doesn't look right." assert user_item.shape[1] == 714, "Oops! The number of articles in the user-article matrix doesn't look right." assert user_item.sum(axis=1)[1] == 36, "Oops! The number of articles seen by user 1 doesn't look right." print("You have passed our quick tests! Please proceed!")
You have passed our quick tests! Please proceed!
MIT
Recommendations_with_IBM.ipynb
Anirudh-Kulkarni/IBM_article_recommendations
`2.` Complete the function below which should take a user_id and provide an ordered list of the most similar users to that user (from most similar to least similar). The returned result should not contain the provided user_id, as we know that each user is similar to him/herself. Because the results for each user here are binary, it (perhaps) makes sense to compute similarity as the dot product of two users. Use the tests to test your function.
def find_similar_users(user_id, user_item=user_item): ''' INPUT: user_id - (int) a user_id user_item - (pandas dataframe) matrix of users by articles: 1's when a user has interacted with an article, 0 otherwise OUTPUT: similar_users - (list) an ordered list where the closest users (largest dot product users) are listed first Description: Computes the similarity of every pair of users based on the dot product Returns an ordered ''' # compute similarity of each user to the provided user sim = user_item.dot(user_item.iloc[user_id-1,:]) # sort by similarity sim2 = sim.sort_values(ascending = False) # create list of just the ids most_similar_users = list(sim2.index) # remove the own user's id most_similar_users.remove(user_id) return most_similar_users # return a list of the users in order from most to least similar # Do a spot check of your function print("The 10 most similar users to user 1 are: {}".format(find_similar_users(1)[:10])) print("The 5 most similar users to user 3933 are: {}".format(find_similar_users(3933)[:5])) print("The 3 most similar users to user 46 are: {}".format(find_similar_users(46)[:3]))
The 10 most similar users to user 1 are: [3933, 23, 3782, 203, 4459, 131, 3870, 46, 4201, 5041] The 5 most similar users to user 3933 are: [1, 23, 3782, 4459, 203] The 3 most similar users to user 46 are: [4201, 23, 3782]
MIT
Recommendations_with_IBM.ipynb
Anirudh-Kulkarni/IBM_article_recommendations
`3.` Now that you have a function that provides the most similar users to each user, you will want to use these users to find articles you can recommend. Complete the functions below to return the articles you would recommend to each user.
def get_article_names(article_ids, df=df): ''' INPUT: article_ids - (list) a list of article ids df - (pandas dataframe) df as defined at the top of the notebook OUTPUT: article_names - (list) a list of article names associated with the list of article ids (this is identified by the title column) ''' # Extract titles given the article_ids article_names = [df.title[df.article_id == float(a)].iloc[0] for a in article_ids] return article_names # Return the article names associated with list of article ids def get_user_articles(user_id, user_item=user_item): ''' INPUT: user_id - (int) a user id user_item - (pandas dataframe) matrix of users by articles: 1's when a user has interacted with an article, 0 otherwise OUTPUT: article_ids - (list) a list of the article ids seen by the user article_names - (list) a list of article names associated with the list of article ids (this is identified by the doc_full_name column in df_content) Description: Provides a list of the article_ids and article titles that have been seen by a user ''' # Extract the articles seen by a user article_ids = list(str(x) for x in set(df[df.user_id==user_id].article_id)) article_names = list(str(x) for x in set(df[df.user_id==user_id].title)) return article_ids, article_names # return the ids and names def user_user_recs(user_id, m=10): ''' INPUT: user_id - (int) a user id m - (int) the number of recommendations you want for the user OUTPUT: recs - (list) a list of recommendations for the user Description: Loops through the users based on closeness to the input user_id For each user - finds articles the user hasn't seen before and provides them as recs Does this until m recommendations are found Notes: Users who are the same closeness are chosen arbitrarily as the 'next' user For the user where the number of recommended articles starts below m and ends exceeding m, the last items are chosen arbitrarily ''' # Find similar users (by dot product) similar_users = find_similar_users(user_id) #Find articles already seen by user art_ids1, art_nms1 = get_user_articles(user_id) # Find other articles based on similar users that our user has not # already seen rec_list = [] for user in similar_users: art_ids, art_nms = get_user_articles(user) rec_list.append(list(set(art_ids) - set(art_ids1))) recs2 = [item for sublist in rec_list for item in sublist] recs = recs2[:m] return recs # return your recommendations for this user_id # Check Results #get_article_names(user_user_recs(1, 10)) # Return 10 recommendations for user 1 list(str(x) for x in set(df[df.user_id==20].article_id)) # Test your functions here - No need to change this code - just run this cell assert set(get_article_names(['1024.0', '1176.0', '1305.0', '1314.0', '1422.0', '1427.0'])) == set(['using deep learning to reconstruct high-resolution audio', 'build a python app on the streaming analytics service', 'gosales transactions for naive bayes model', 'healthcare python streaming application demo', 'use r dataframes & ibm watson natural language understanding', 'use xgboost, scikit-learn & ibm watson machine learning apis']), "Oops! Your the get_article_names function doesn't work quite how we expect." assert set(get_article_names(['1320.0', '232.0', '844.0'])) == set(['housing (2015): united states demographic measures','self-service data preparation with ibm data refinery','use the cloudant-spark connector in python notebook']), "Oops! Your the get_article_names function doesn't work quite how we expect." assert set(get_user_articles(20)[0]) == set(['1320.0', '232.0', '844.0']) assert set(get_user_articles(20)[1]) == set(['housing (2015): united states demographic measures', 'self-service data preparation with ibm data refinery','use the cloudant-spark connector in python notebook']) assert set(get_user_articles(2)[0]) == set(['1024.0', '1176.0', '1305.0', '1314.0', '1422.0', '1427.0']) assert set(get_user_articles(2)[1]) == set(['using deep learning to reconstruct high-resolution audio', 'build a python app on the streaming analytics service', 'gosales transactions for naive bayes model', 'healthcare python streaming application demo', 'use r dataframes & ibm watson natural language understanding', 'use xgboost, scikit-learn & ibm watson machine learning apis']) print("If this is all you see, you passed all of our tests! Nice job!")
If this is all you see, you passed all of our tests! Nice job!
MIT
Recommendations_with_IBM.ipynb
Anirudh-Kulkarni/IBM_article_recommendations
`4.` Now we are going to improve the consistency of the **user_user_recs** function from above. * Instead of arbitrarily choosing when we obtain users who are all the same closeness to a given user - choose the users that have the most total article interactions before choosing those with fewer article interactions.* Instead of arbitrarily choosing articles from the user where the number of recommended articles starts below m and ends exceeding m, choose articles with the articles with the most total interactions before choosing those with fewer total interactions. This ranking should be what would be obtained from the **top_articles** function you wrote earlier.
def get_top_sorted_users(user_id, df=df, user_item=user_item): ''' INPUT: user_id - (int) df - (pandas dataframe) df as defined at the top of the notebook user_item - (pandas dataframe) matrix of users by articles: 1's when a user has interacted with an article, 0 otherwise OUTPUT: neighbors_df - (pandas dataframe) a dataframe with: neighbor_id - is a neighbor user_id similarity - measure of the similarity of each user to the provided user_id num_interactions - the number of articles viewed by the user - if a u Other Details - sort the neighbors_df by the similarity and then by number of interactions where highest of each is higher in the dataframe ''' # To compute number of interactions: # Extract dummies of the article_id variable and concatenate with user_id variable df_new = pd.concat([df.user_id, pd.get_dummies(df.article_id)], axis=1) # Sum the number of interactions of a user user_item2 = (df_new.groupby('user_id').sum()).sum(axis=1) # Find the users with most similarity and create a new data frame neighbors_df = pd.DataFrame(find_similar_users(user_id), columns = ['neighbor_id']) # Add columns with the similarities and their number of interactions neighbors_df['similarity'] = list(user_item.loc[neighbors_df.neighbor_id].dot(user_item.loc[user_id,:])) neighbors_df['num_interactions']=list(user_item2.loc[neighbors_df.neighbor_id]) neighbors_df.sort_values(by=['similarity','num_interactions'], ascending = False) return neighbors_df # Return the dataframe specified in the doc_string def user_user_recs_part2(user_id, m=10): ''' INPUT: user_id - (int) a user id m - (int) the number of recommendations you want for the user OUTPUT: recs - (list) a list of recommendations for the user by article id rec_names - (list) a list of recommendations for the user by article title Description: Loops through the users based on closeness to the input user_id For each user - finds articles the user hasn't seen before and provides them as recs Does this until m recommendations are found Notes: * Choose the users that have the most total article interactions before choosing those with fewer article interactions. * Choose articles with the articles with the most total interactions before choosing those with fewer total interactions. ''' # Get the neighbours neighbors_df = get_top_sorted_users(user_id) # And the articles that our user has already seen art_ids1, art_nms1 = get_user_articles(user_id) rec_list = [] # Get recommendations from neighbours that our user hasn't already seen for user in neighbors_df.neighbor_id: art_ids, art_nms = get_user_articles(user) rec_list.append(list(set(art_ids) - set(art_ids1))) recs2 = [item for sublist in rec_list for item in sublist] recs = recs2[:m] rec_names = get_article_names(recs) return recs, rec_names # Quick spot check - don't change this code - just use it to test your functions rec_ids, rec_names = user_user_recs_part2(20, 10) print("The top 10 recommendations for user 20 are the following article ids:") print(rec_ids) print() print("The top 10 recommendations for user 20 are the following article names:") print(rec_names) neighbors_df = get_top_sorted_users(131) neighbors_df.iloc[0:12]
_____no_output_____
MIT
Recommendations_with_IBM.ipynb
Anirudh-Kulkarni/IBM_article_recommendations
`5.` Use your functions from above to correctly fill in the solutions to the dictionary below. Then test your dictionary against the solution. Provide the code you need to answer each following the comments below.
### Tests with a dictionary of results user1_most_sim = neighbors_df.iloc[0].neighbor_id # Find the user that is most similar to user 1 user131_10th_sim = neighbors_df.iloc[9].neighbor_id# Find the 10th most similar user to user 131 ## Dictionary Test Here sol_5_dict = { 'The user that is most similar to user 1.': user1_most_sim, 'The user that is the 10th most similar to user 131': user131_10th_sim, } t.sol_5_test(sol_5_dict)
This all looks good! Nice job!
MIT
Recommendations_with_IBM.ipynb
Anirudh-Kulkarni/IBM_article_recommendations
`6.` If we were given a new user, which of the above functions would you be able to use to make recommendations? Explain. Can you think of a better way we might make recommendations? Use the cell below to explain a better method for new users. The above method only works by finding other similar users, so user-bsed collaborative methods will not work. We could use rank based recommendations i.e. the get_top_articles function. For better ways to make recommendations, we could potentially add filters that the user could use to select articles. `7.` Using your existing functions, provide the top 10 recommended articles you would provide for the a new user below. You can test your function against our thoughts to make sure we are all on the same page with how we might make a recommendation.
new_user = '0.0' # What would your recommendations be for this new user '0.0'? As a new user, they have no observed articles. # Provide a list of the top 10 article ids you would give to new_user_recs = list(str(x) for x in get_top_article_ids(10)) # Your recommendations here new_user_recs assert set(new_user_recs) == set(['1314.0','1429.0','1293.0','1427.0','1162.0','1364.0','1304.0','1170.0','1431.0','1330.0']), "Oops! It makes sense that in this case we would want to recommend the most popular articles, because we don't know anything about these users." print("That's right! Nice job!")
That's right! Nice job!
MIT
Recommendations_with_IBM.ipynb
Anirudh-Kulkarni/IBM_article_recommendations
Part IV: Content Based Recommendations (EXTRA - NOT REQUIRED)Another method we might use to make recommendations is to perform a ranking of the highest ranked articles associated with some term. You might consider content to be the **doc_body**, **doc_description**, or **doc_full_name**. There isn't one way to create a content based recommendation, especially considering that each of these columns hold content related information. `1.` Use the function body below to create a content based recommender. Since there isn't one right answer for this recommendation tactic, no test functions are provided. Feel free to change the function inputs if you decide you want to try a method that requires more input values. The input values are currently set with one idea in mind that you may use to make content based recommendations. One additional idea is that you might want to choose the most popular recommendations that meet your 'content criteria', but again, there is a lot of flexibility in how you might make these recommendations. This part is NOT REQUIRED to pass this project. However, you may choose to take this on as an extra way to show off your skills.
def make_content_recs(article_id, m): ''' INPUT: article_id - (str with a number) one article id that the user has interacted with m - (int) the number of recommendations you want for the user OUTPUT: recs - (list) a list of recommendations for the user by article id rec_names - (list) a list of recommendations for the user by article title Description: For the given article_id, find the users who have interacted with this article. Find the other articles that most of these users have interacted with. ''' article_id_float = float(article_id) # Find all users who have interacted with this article users_df = user_item[user_item.columns[user_item.columns == article_id_float]] users_to_use = list(users_df.index[users_df[article_id_float] ==1]) # Find the other articles that they have most interacted with as a group articles_rec = user_item.iloc[users_to_use,:].sum().sort_values(ascending= False) articles_rec.drop(labels=article_id_float) recs = list(str(x) for x in articles_rec[0:m].index) rec_names = get_article_names(recs) return recs, rec_names
_____no_output_____
MIT
Recommendations_with_IBM.ipynb
Anirudh-Kulkarni/IBM_article_recommendations
`2.` Now that you have put together your content-based recommendation system, use the cell below to write a summary explaining how your content based recommender works. Do you see any possible improvements that could be made to your function? Is there anything novel about your content based recommender? This part is NOT REQUIRED to pass this project. However, you may choose to take this on as an extra way to show off your skills. **Write an explanation of your content based recommendation system here.** `3.` Use your content-recommendation system to make recommendations for the below scenarios based on the comments. Again no tests are provided here, because there isn't one right answer that could be used to find these content based recommendations. This part is NOT REQUIRED to pass this project. However, you may choose to take this on as an extra way to show off your skills.
# make recommendations for a brand new user new_user_recs = list(str(x) for x in get_top_article_ids(10)) # Your recommendations here new_user_recs # make a recommendations for a user who only has interacted with article id '1427.0' user_rec_ids, user_rec_titles = make_content_recs('1427.0', 5) user_rec_ids
_____no_output_____
MIT
Recommendations_with_IBM.ipynb
Anirudh-Kulkarni/IBM_article_recommendations
Part V: Matrix FactorizationIn this part of the notebook, you will build use matrix factorization to make article recommendations to the users on the IBM Watson Studio platform.`1.` You should have already created a **user_item** matrix above in **question 1** of **Part III** above. This first question here will just require that you run the cells to get things set up for the rest of **Part V** of the notebook.
# Load the matrix here user_item_matrix = pd.read_pickle('user_item_matrix.p') # quick look at the matrix user_item_matrix.head()
_____no_output_____
MIT
Recommendations_with_IBM.ipynb
Anirudh-Kulkarni/IBM_article_recommendations
`2.` In this situation, you can use Singular Value Decomposition from [numpy](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.svd.html) on the user-item matrix. Use the cell to perform SVD, and explain why this is different than in the lesson.
# Perform SVD on the User-Item Matrix Here u, s, vt = np.linalg.svd(user_item_matrix)# use the built in to get the three matrices
_____no_output_____
MIT
Recommendations_with_IBM.ipynb
Anirudh-Kulkarni/IBM_article_recommendations
This matrix has only binary values, so it is different in that sense from the rating matrix used in the lesson. This matrix has nonempty values for every cell, therefore we need not use FunkSVD on it but can do with SVD. `3.` Now for the tricky part, how do we choose the number of latent features to use? Running the below cell, you can see that as the number of latent features increases, we obtain a lower error rate on making predictions for the 1 and 0 values in the user-item matrix. Run the cell below to get an idea of how the accuracy improves as we increase the number of latent features.
num_latent_feats = np.arange(10,700+10,20) sum_errs = [] for k in num_latent_feats: # restructure with k latent features s_new, u_new, vt_new = np.diag(s[:k]), u[:, :k], vt[:k, :] # take dot product user_item_est = np.around(np.dot(np.dot(u_new, s_new), vt_new)) # compute error for each prediction to actual value diffs = np.subtract(user_item_matrix, user_item_est) # total errors and keep track of them err = np.sum(np.sum(np.abs(diffs))) sum_errs.append(err) plt.plot(num_latent_feats, 1 - np.array(sum_errs)/df.shape[0]); plt.xlabel('Number of Latent Features'); plt.ylabel('Accuracy'); plt.title('Accuracy vs. Number of Latent Features');
_____no_output_____
MIT
Recommendations_with_IBM.ipynb
Anirudh-Kulkarni/IBM_article_recommendations
`4.` From the above, we can't really be sure how many features to use, because simply having a better way to predict the 1's and 0's of the matrix doesn't exactly give us an indication of if we are able to make good recommendations. Instead, we might split our dataset into a training and test set of data, as shown in the cell below. Use the code from question 3 to understand the impact on accuracy of the training and test sets of data with different numbers of latent features. Using the split below: * How many users can we make predictions for in the test set? * How many users are we not able to make predictions for because of the cold start problem?* How many articles can we make predictions for in the test set? * How many articles are we not able to make predictions for because of the cold start problem?
df_train = df.head(40000) df_test = df.tail(5993) # Create matrices for training and testing separately user_item_train = create_user_item_matrix(df_train) user_item_test = create_user_item_matrix(df_test) # Find users in test that are not in train and articles in test that are not in train len(set(user_item_test.index) - set(user_item_train.index)) len(set(user_item_test.columns) - set(user_item_train.columns)) #Visualize the test matrix user_item_test.head() df_train = df.head(40000) df_test = df.tail(5993) def create_test_and_train_user_item(df_train, df_test): ''' INPUT: df_train - training dataframe df_test - test dataframe OUTPUT: user_item_train - a user-item matrix of the training dataframe (unique users for each row and unique articles for each column) user_item_test - a user-item matrix of the testing dataframe (unique users for each row and unique articles for each column) test_idx - all of the test user ids test_arts - all of the test article ids ''' # Create train and test dataframes user_item_train = create_user_item_matrix(df_train) user_item_test = create_user_item_matrix(df_test) test_idx = list(user_item_test.index) test_arts = list(user_item_test.columns) return user_item_train, user_item_test, test_idx, test_arts user_item_train, user_item_test, test_idx, test_arts = create_test_and_train_user_item(df_train, df_test) # Replace the values in the dictionary below a = 662 b = 574 c = 20 d = 0 sol_4_dict = { 'How many users can we make predictions for in the test set?': c, 'How many users in the test set are we not able to make predictions for because of the cold start problem?': a, 'How many articles can we make predictions for in the test set?': b, 'How many articles in the test set are we not able to make predictions for because of the cold start problem?': d } t.sol_4_test(sol_4_dict) # There seems to be some issue with the solution dictionary.
_____no_output_____
MIT
Recommendations_with_IBM.ipynb
Anirudh-Kulkarni/IBM_article_recommendations
`5.` Now use the **user_item_train** dataset from above to find U, S, and V transpose using SVD. Then find the subset of rows in the **user_item_test** dataset that you can predict using this matrix decomposition with different numbers of latent features to see how many features makes sense to keep based on the accuracy on the test data. This will require combining what was done in questions `2` - `4`.Use the cells below to explore how well SVD works towards making predictions for recommendations on the test data.
# fit SVD on the user_item_train matrix u_train, s_train, vt_train = np.linalg.svd(user_item_train)# fit svd similar to above then use the cells below # Print all the shapes for understanding what the matrices represent print(np.shape(u_train)) print(np.shape(s_train)) print(np.shape(vt_train)) print(np.shape(user_item_train)) # Find users that are common in the train and test dataframe rows_to_remove = list(set(user_item_test.index) - set(user_item_train.index)) rows_to_keep = list(set(user_item_test.index) - set(rows_to_remove)) rows_to_keep # Find article_ids that are common in the train and test dataframe columns_to_remove = list(set(user_item_test.columns) - set(user_item_train.columns)) columns_to_keep = list(set(user_item_test.columns) - set(columns_to_remove)) # Find row indices correponding to common users # and column indices correponsing to common article_ids users_to_keep = [row - 1 for row in rows_to_keep] article_indices = list(user_item_train.columns) articles_to_keep = [article_indices.index(i) for i in columns_to_keep] # Find the u_train, v_train and s_train corresponding to only common users # and articles u_train2 = u_train[users_to_keep,:] u_train3 = u_train2[:, users_to_keep] vt_train2 = vt_train[articles_to_keep,:] vt_train3 = vt_train2[:, articles_to_keep] s_train2 = s_train[articles_to_keep] np.shape(np.around(np.dot(np.dot(u_new, s_new), vt_new))) # Keep only the common users in the train and test dataframes; # we keep all the articles as they are all present in the train dataframe user_item_test2 = user_item_test.loc[user_item_test.index.intersection(rows_to_keep)] # Use the reduced u,v,s to make predictions about the test dataframe # with different number of latent features and compare the results num_latent_feats = np.arange(1,20,1) sum_errs = [] for k in num_latent_feats: # restructure with k latent features s_new, u_new, vt_new = np.diag(s_train2[:k]), u_train3[:, :k], vt_train3[:k, :] # take dot product user_item_est = np.around(np.dot(np.dot(u_new, s_new), vt_new)) # compute error for each prediction to actual value diffs = np.subtract(user_item_test2, user_item_est) # total errors and keep track of them err = np.sum(np.sum(np.abs(diffs))) sum_errs.append(err) plt.plot(num_latent_feats, 1 - np.array(sum_errs)/df.shape[0]); plt.xlabel('Number of Latent Features'); plt.ylabel('Accuracy'); plt.title('Accuracy vs. Number of Latent Features');
_____no_output_____
MIT
Recommendations_with_IBM.ipynb
Anirudh-Kulkarni/IBM_article_recommendations
`6.` Use the cell below to comment on the results you found in the previous question. Given the circumstances of your results, discuss what you might do to determine if the recommendations you make with any of the above recommendation systems are an improvement to how users currently find articles? It seems that the accuracy on the test set is high but that it reduced with the number of latent features. This could be because as the number of latent features increases, the model overfits on the training data to reproduce the training data matrix. To determine if any of the above recommendation systems are an improvement, we could perhaps perform cross validation on the dataset but splitting it into train-test groups multiple times and then averaging over the prediction accuracies.To evaluate the performance of the recommendation system, we could do an A/B testing type experiment where we recommend articles to the users based on our predictions and see if they're morely like to follow up on these articles compared to articles that were not predicted. ExtrasUsing your workbook, you could now save your recommendations for each user, develop a class to make new predictions and update your results, and make a flask app to deploy your results. These tasks are beyond what is required for this project. However, from what you learned in the lessons, you certainly capable of taking these tasks on to improve upon your work here! Conclusion> Congratulations! You have reached the end of the Recommendations with IBM project! > **Tip**: Once you are satisfied with your work here, check over your report to make sure that it is satisfies all the areas of the [rubric](https://review.udacity.com/!/rubrics/2322/view). You should also probably remove all of the "Tips" like this one so that the presentation is as polished as possible. Directions to Submit> Before you submit your project, you need to create a .html or .pdf version of this notebook in the workspace here. To do that, run the code cell below. If it worked correctly, you should get a return code of 0, and you should see the generated .html file in the workspace directory (click on the orange Jupyter icon in the upper left).> Alternatively, you can download this report as .html via the **File** > **Download as** submenu, and then manually upload it into the workspace directory by clicking on the orange Jupyter icon in the upper left, then using the Upload button.> Once you've done this, you can submit your project by clicking on the "Submit Project" button in the lower right here. This will create and submit a zip file with this .ipynb doc and the .html or .pdf version you created. Congratulations!
from subprocess import call call(['python', '-m', 'nbconvert', 'Recommendations_with_IBM.ipynb'])
_____no_output_____
MIT
Recommendations_with_IBM.ipynb
Anirudh-Kulkarni/IBM_article_recommendations
Residual NetworksWelcome to the second assignment of this week! You will learn how to build very deep convolutional networks, using Residual Networks (ResNets). In theory, very deep networks can represent very complex functions; but in practice, they are hard to train. Residual Networks, introduced by [He et al.](https://arxiv.org/pdf/1512.03385.pdf), allow you to train much deeper networks than were previously practically feasible.**In this assignment, you will:**- Implement the basic building blocks of ResNets. - Put together these building blocks to implement and train a state-of-the-art neural network for image classification. Updates If you were working on the notebook before this update...* The current notebook is version "2a".* You can find your original work saved in the notebook with the previous version name ("v2") * To view the file directory, go to the menu "File->Open", and this will open a new tab that shows the file directory. List of updates* For testing on an image, replaced `preprocess_input(x)` with `x=x/255.0` to normalize the input image in the same way that the model's training data was normalized.* Refers to "shallower" layers as those layers closer to the input, and "deeper" layers as those closer to the output (Using "shallower" layers instead of "lower" or "earlier").* Added/updated instructions. This assignment will be done in Keras. Before jumping into the problem, let's run the cell below to load the required packages.
import numpy as np from keras import layers from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D from keras.models import Model, load_model from keras.preprocessing import image from keras.utils import layer_utils from keras.utils.data_utils import get_file from keras.applications.imagenet_utils import preprocess_input import pydot from IPython.display import SVG from keras.utils.vis_utils import model_to_dot from keras.utils import plot_model from resnets_utils import * from keras.initializers import glorot_uniform import scipy.misc from matplotlib.pyplot import imshow %matplotlib inline import keras.backend as K K.set_image_data_format('channels_last') K.set_learning_phase(1)
Using TensorFlow backend.
MIT
Convolutional Neural Networks/Residual_Networks_v2a.ipynb
joyfinder/Deep_Learning_Specialisation
1 - The problem of very deep neural networksLast week, you built your first convolutional neural network. In recent years, neural networks have become deeper, with state-of-the-art networks going from just a few layers (e.g., AlexNet) to over a hundred layers.* The main benefit of a very deep network is that it can represent very complex functions. It can also learn features at many different levels of abstraction, from edges (at the shallower layers, closer to the input) to very complex features (at the deeper layers, closer to the output). * However, using a deeper network doesn't always help. A huge barrier to training them is vanishing gradients: very deep networks often have a gradient signal that goes to zero quickly, thus making gradient descent prohibitively slow. * More specifically, during gradient descent, as you backprop from the final layer back to the first layer, you are multiplying by the weight matrix on each step, and thus the gradient can decrease exponentially quickly to zero (or, in rare cases, grow exponentially quickly and "explode" to take very large values). * During training, you might therefore see the magnitude (or norm) of the gradient for the shallower layers decrease to zero very rapidly as training proceeds: **Figure 1** : **Vanishing gradient** The speed of learning decreases very rapidly for the shallower layers as the network trains You are now going to solve this problem by building a Residual Network! 2 - Building a Residual NetworkIn ResNets, a "shortcut" or a "skip connection" allows the model to skip layers: **Figure 2** : A ResNet block showing a **skip-connection** The image on the left shows the "main path" through the network. The image on the right adds a shortcut to the main path. By stacking these ResNet blocks on top of each other, you can form a very deep network. We also saw in lecture that having ResNet blocks with the shortcut also makes it very easy for one of the blocks to learn an identity function. This means that you can stack on additional ResNet blocks with little risk of harming training set performance. (There is also some evidence that the ease of learning an identity function accounts for ResNets' remarkable performance even more so than skip connections helping with vanishing gradients).Two main types of blocks are used in a ResNet, depending mainly on whether the input/output dimensions are same or different. You are going to implement both of them: the "identity block" and the "convolutional block." 2.1 - The identity blockThe identity block is the standard block used in ResNets, and corresponds to the case where the input activation (say $a^{[l]}$) has the same dimension as the output activation (say $a^{[l+2]}$). To flesh out the different steps of what happens in a ResNet's identity block, here is an alternative diagram showing the individual steps: **Figure 3** : **Identity block.** Skip connection "skips over" 2 layers. The upper path is the "shortcut path." The lower path is the "main path." In this diagram, we have also made explicit the CONV2D and ReLU steps in each layer. To speed up training we have also added a BatchNorm step. Don't worry about this being complicated to implement--you'll see that BatchNorm is just one line of code in Keras! In this exercise, you'll actually implement a slightly more powerful version of this identity block, in which the skip connection "skips over" 3 hidden layers rather than 2 layers. It looks like this: **Figure 4** : **Identity block.** Skip connection "skips over" 3 layers. Here are the individual steps.First component of main path: - The first CONV2D has $F_1$ filters of shape (1,1) and a stride of (1,1). Its padding is "valid" and its name should be `conv_name_base + '2a'`. Use 0 as the seed for the random initialization. - The first BatchNorm is normalizing the 'channels' axis. Its name should be `bn_name_base + '2a'`.- Then apply the ReLU activation function. This has no name and no hyperparameters. Second component of main path:- The second CONV2D has $F_2$ filters of shape $(f,f)$ and a stride of (1,1). Its padding is "same" and its name should be `conv_name_base + '2b'`. Use 0 as the seed for the random initialization. - The second BatchNorm is normalizing the 'channels' axis. Its name should be `bn_name_base + '2b'`.- Then apply the ReLU activation function. This has no name and no hyperparameters. Third component of main path:- The third CONV2D has $F_3$ filters of shape (1,1) and a stride of (1,1). Its padding is "valid" and its name should be `conv_name_base + '2c'`. Use 0 as the seed for the random initialization. - The third BatchNorm is normalizing the 'channels' axis. Its name should be `bn_name_base + '2c'`. - Note that there is **no** ReLU activation function in this component. Final step: - The `X_shortcut` and the output from the 3rd layer `X` are added together.- **Hint**: The syntax will look something like `Add()([var1,var2])`- Then apply the ReLU activation function. This has no name and no hyperparameters. **Exercise**: Implement the ResNet identity block. We have implemented the first component of the main path. Please read this carefully to make sure you understand what it is doing. You should implement the rest. - To implement the Conv2D step: [Conv2D](https://keras.io/layers/convolutional/conv2d)- To implement BatchNorm: [BatchNormalization](https://faroit.github.io/keras-docs/1.2.2/layers/normalization/) (axis: Integer, the axis that should be normalized (typically the 'channels' axis))- For the activation, use: `Activation('relu')(X)`- To add the value passed forward by the shortcut: [Add](https://keras.io/layers/merge/add)
# GRADED FUNCTION: identity_block def identity_block(X, f, filters, stage, block): """ Implementation of the identity block as defined in Figure 4 Arguments: X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev) f -- integer, specifying the shape of the middle CONV's window for the main path filters -- python list of integers, defining the number of filters in the CONV layers of the main path stage -- integer, used to name the layers, depending on their position in the network block -- string/character, used to name the layers, depending on their position in the network Returns: X -- output of the identity block, tensor of shape (n_H, n_W, n_C) """ # defining name basis conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' # Retrieve Filters F1, F2, F3 = filters # Save the input value. You'll need this later to add back to the main path. X_shortcut = X # First component of main path X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X) X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X) X = Activation('relu')(X) ### START CODE HERE ### # Second component of main path (≈3 lines) X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed = 0))(X) X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X) X = Activation('relu')(X) # Third component of main path (≈2 lines) X = Conv2D(filters = F3, kernel_size = (1,1), strides = (1, 1), padding = 'valid', name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed = 0))(X) X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X) # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines) X = Add()([X, X_shortcut]) X = Activation('relu')(X) ### END CODE HERE ### return X tf.reset_default_graph() with tf.Session() as test: np.random.seed(1) A_prev = tf.placeholder("float", [3, 4, 4, 6]) X = np.random.randn(3, 4, 4, 6) A = identity_block(A_prev, f = 2, filters = [2, 4, 6], stage = 1, block = 'a') test.run(tf.global_variables_initializer()) out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0}) print("out = " + str(out[0][1][1][0]))
out = [ 0.94822985 0. 1.16101444 2.747859 0. 1.36677003]
MIT
Convolutional Neural Networks/Residual_Networks_v2a.ipynb
joyfinder/Deep_Learning_Specialisation