path
stringlengths
7
265
concatenated_notebook
stringlengths
46
17M
notebooks/generalPredictions.ipynb
###Markdown First attempt to downscale data in a single notebook. There are fewer functions and some extraneous code compared to later notebooks.Model is trained on 2007 HRDPS/CANRCM data, used to downscale 2008 CANRCM. Importing Data ###Code ##HRDPS import files = glob.glob('/results/forcing/atmospheric/GEM2.5/gemlam/gemlam_y2007m??d??.nc') ##list of data files, febrary 2007 - each file covers one day, with hourly data ##the grid is 266 by 256 points, with resolution 2.5km ##relevant data in this file is u_wind and v_wind data files.sort() print(len(files)) ## 3-hour averaged matrix hr07_u = np.empty( (8*len(files), 266, 256)) hr07_v = np.empty( (8*len(files), 266, 256)) for i in range(len(files)): dayX = xr.open_dataset(files[i]) avg_u = np.array( dayX['u_wind'] ).reshape(8, 3, 266, 256).mean(axis = 1) avg_v = np.array( dayX['v_wind'] ).reshape(8, 3, 266, 256).mean(axis = 1) hr07_u[8*i:8*i + 8, : , : ] = avg_u ##adding 3-hour average to new data array hr07_v[8*i:8*i + 8, : , : ] = avg_v del avg_u del avg_v del dayX print(hr07_u.shape) ##CANRCM 2007 import p1 = '/home/arandhawa/canrcm_uas_2007.nc' p2 = '/home/arandhawa/canrcm_vas_2007.nc' d1 = xr.open_dataset(p1) d2 = xr.open_dataset(p2) can07_u = d1['uas'][16:,140:165,60:85] can07_v = d2['vas'][16:,140:165,60:85] print(can07_u.shape) ###Output (2904, 25, 25) ###Markdown PCA Functions ###Code ##transforms and concatenates two data sets def transform2(data1, data2): A_mat = transform(data1) B_mat = transform(data2) return np.concatenate((A_mat, B_mat), axis=0) def reverse2(matrix, orig_shape): split4 = int( matrix.shape[0]/2 ) u_data = reverse(matrix[:split4,:], orig_shape) ##reconstructing u_winds from n PCs v_data = reverse(matrix[split4:,:], orig_shape) ##reconstructing v_winds from n PCs return (u_data, v_data) ##performs PCA analysis using sklearn.pca def doPCA(comp, matrix): pca = PCA(n_components = comp) ##adjust the number of principle conponents to be calculated PCs = pca.fit_transform(matrix) eigvecs = pca.components_ mean = pca.mean_ return (PCs, eigvecs, mean) ##data must be converted into a 2D matrix for pca analysis ##transform takes a 3D data array (time, a, b) -> (a*b, time) ##(the data grid is flattened a column using numpy.flatten) def transform(xarr): arr = np.array(xarr) ##converting to numpy array arr = arr.reshape(arr.shape[0], arr.shape[1]*arr.shape[2]) ##reshaping from size (a, b, c) to (a, b*c) arr = arr.transpose() return arr def reverse(mat, orig_shape): arr = np.copy(mat) arr = arr.transpose() arr = arr.reshape(-1, orig_shape[1], orig_shape[2]) ##reshaping back to original array shape return arr ##graphing percentage of original data represented by the first n principle conponents def graph_variance(matrix, n): pcaG = PCA(n_components = n) ##Number of principle conponents to show PCsG = pcaG.fit_transform(matrix) plt.plot(np.cumsum(pcaG.explained_variance_ratio_)) plt.xlabel('number of components') plt.ylabel('cumulative explained variance'); plt.show() del pcaG del PCsG def get_mode(PCs, n, orig_shape): ##converts PCs (column vectors) to 2d conpoents for u and v wind split = int(PCs.shape[0]/2) mode_u = PCs[:split, n].reshape(orig_shape[1], orig_shape[2]) mode_v = PCs[split:, n].reshape(orig_shape[1], orig_shape[2]) return (mode_u, mode_v) def graph_nPCs(PCs, eigvecs, n, orig_shape): fig, ax = plt.subplots(n, 3, figsize=(10, 3*n)) ax[0, 0].set_title("u-conponent") ax[0, 1].set_title("v-component") ax[0, 2].set_title("time-loadings") for i in range(n): mode_u, mode_v = get_mode(PCs, i, orig_shape) colors = ax[i, 0].pcolormesh(mode_u, cmap = 'bwr') fig.colorbar(colors, ax = ax[i,0]) colors = ax[i, 1].pcolormesh(mode_v, cmap = 'bwr') fig.colorbar(colors, ax = ax[i,1]) ax[i, 2].plot(eigvecs[i]) plt.tight_layout() plt.show() ###Output _____no_output_____ ###Markdown PCAs on Training Data ###Code can07_mat = transform2(can07_u, can07_v) can07_PCs, can07_eigs, can07_mean = doPCA(150, can07_mat) hr07_mat = transform2(hr07_u, hr07_v) hr07_PCs, hr07_eigs, hr07_mean = doPCA(150, hr07_mat) can07_eigs.shape del hr07_u del hr07_v del can07_u del can07_v ###Output _____no_output_____ ###Markdown Multiple Linear Regression ###Code def fit_modes(vectors, num_vec, data, result_size, type = 'LS'): X = vectors[0:num_vec,:].T result = np.zeros((result_size, X.shape[0])) scores = np.zeros(result_size) if type == 'LS': for i in range(result_size): y = data[i,:] reg = LinearRegression().fit(X, y) result[i] = reg.predict(X) scores[i] = reg.score(X, y) elif type == 'MAE': for i in range(result_size): y = data[i,:] reg = QuantileRegressor(quantile = 0.5, alpha = 0, solver = 'highs').fit(X, y) result[i] = reg.predict(X) scores[i] = reg.score(X, y) return (result, scores) def getCoefs(vectors, num_vec, data, num_modes, type = 'LS'): X = vectors[0:num_vec,:].T coefs = np.zeros((num_modes, X.shape[1])) intercept = np.zeros(num_modes) if type == 'LS': for i in range(num_modes): y = data[i,:] reg = LinearRegression().fit(X, y) coefs[i] = reg.coef_[0:num_vec] intercept[i] = reg.intercept_ elif type == 'MAE': for i in range(num_modes): y = data[i,:] reg = QuantileRegressor(quantile = 0.5, alpha = 0, solver = 'highs').fit(X, y) coefs[i] = reg.coef_[0:num_vec] intercept[i] = reg.intercept_ return (coefs, intercept) def getEnergyCoefs(eigs, old_eigs): coefs = np.sqrt( (old_eigs[0:eigs.shape[0]]**2).mean(axis = 1)/(eigs**2).mean(axis = 1)) return coefs coefs = getCoefs(can07_eigs, 50, hr07_eigs, 50, type = 'LS') print(coefs[0].shape) new_eigs = fit_modes(can07_eigs, 75, hr07_eigs, 75, type = 'LS')[0] energyCoefs = getEnergyCoefs(new_eigs, hr07_eigs) del new_eigs plt.plot(energyCoefs) plt.xlabel('Eigenvector Number') plt.ylabel('Energy Coefficient') y = (3.03030752, 3.03069005, 3.02725621, 3.02896918, 3.03485817) x = (85, 75, 65, 55, 45) plt.plot(x, y) plt.xlabel('Number of HRDPS modes') plt.ylabel('Average RMSE') ###Output _____no_output_____ ###Markdown PCA on 2008 data ###Code ##supposed to be 2008 canrcm data p1 = '/home/arandhawa/canrcm_2008_uas.nc' p2 = '/home/arandhawa/canrcm_2008_vas.nc' d1 = xr.open_dataset(p1) d2 = xr.open_dataset(p2) ##duplicating feb 28 twice to account for leap year (feb 29th is missing) can08_u = np.concatenate((d1['uas'][:472,140:165,60:85], d1['uas'][464:472,140:165,60:85], d1['uas'][472:,140:165,60:85] )) can08_v = np.concatenate((d2['vas'][:472,140:165,60:85], d2['vas'][464:472,140:165,60:85], d2['vas'][472:,140:165,60:85])) ##not duplicating feb 28 twice ##can08_u = d1['uas'][16:,140:165,60:85] ##can08_v = d2['vas'][16:,140:165,60:85] can08_mat = transform2(can08_u, can08_v) ##conducting PCA analysis on combined data matrix PCs, eigvecs, mean_2008 = doPCA(30, can08_mat) ## graph_nPCs(PCs, eigvecs, 3, hr08_u.shape) ###Output _____no_output_____ ###Markdown Predictions ###Code def project(u, v): ##scalar projection of u onto v - with extra 1/norm factor (for math reasons) v_norm = np.sqrt(np.sum(v**2)) return np.dot(u, v)/v_norm**2 def projectData(data_mat, new_PCs, n): time = data_mat.shape[1] proj = np.empty((n, time)) for j in range(n): for i in range(time): proj[j, i] = project(data_mat[:,i], new_PCs[:,j]) return proj proj = projectData(can08_mat, can07_PCs, 50) fig, ax = plt.subplots(1, 5, figsize=(15, 3)) for i in range(5): ax[i].set_title("Eigenvector {}".format(i)) ax[i].plot(proj[i]) plt.tight_layout() pred_eigs = np.matmul(coefs[0], proj) + coefs[1].reshape(-1, 1) ##multiple linear regression output pred_eigs = pred_eigs*energyCoefs[0:50].reshape(-1, 1) ##energy balancing recon = np.matmul(hr07_PCs[:,0:50], pred_eigs[0:50]) + mean_2008 u_data_rec, v_data_rec = reverse2(recon, (-1, 266, 256)) del recon print(u_data_rec.shape) ###Output (2928, 266, 256) ###Markdown Overall Function ###Code def reconstruct(downscale_mat, mean, can_PCs, can_eigs, hr_PCs, hr_eigs, n, r, method = 'LS', EB = True): coefs = getCoefs(can_eigs, n, hr_eigs, r, type = method) proj = projectData(downscale_mat, can_PCs, n) pred_eigs = np.matmul(coefs[0], proj) + coefs[1].reshape(-1, 1) ##multiple linear regression output if (EB == True): energyCoefs = getEnergyCoefs( fit_modes(can_eigs, n, hr_eigs, r, type = method)[0] , hr07_eigs) pred_eigs = pred_eigs*energyCoefs.reshape(-1, 1) ##energy balancing recon = np.matmul(hr_PCs[:,0:r], pred_eigs[0:r]) + mean u_data_rec, v_data_rec = reverse2(recon, (-1, 266, 256)) return (u_data_rec, v_data_rec) ###Output _____no_output_____ ###Markdown Analysis ###Code ##HRDPS import files = glob.glob('/results/forcing/atmospheric/GEM2.5/gemlam/gemlam_y2008m??d??.nc') ##list of data files, febrary 2007 - each file covers one day, with hourly data ##the grid is 266 by 256 points, with resolution 2.5km ##relevant data in this file is u_wind and v_wind data files.sort() print(len(files)) ## 3-hour averaged matrix hr08_u = np.empty( (8*len(files), 266, 256)) hr08_v = np.empty( (8*len(files), 266, 256)) for i in range(len(files)): dayX = xr.open_dataset(files[i]) avg_u = np.array( dayX['u_wind'] ).reshape(8, 3, 266, 256).mean(axis = 1) avg_v = np.array( dayX['v_wind'] ).reshape(8, 3, 266, 256).mean(axis = 1) hr08_u[8*i:8*i + 8, : , : ] = avg_u ##adding 3-hour average to new data array hr08_v[8*i:8*i + 8, : , : ] = avg_v del avg_u del avg_v del dayX HB_u = hr08_u[:,142,139] SS_u = hr08_u[:,174,107] SH_u = hr08_u[:,129,149] HB_v = hr08_v[:,142,139] SS_v = hr08_v[:,174,107] SH_v = hr08_v[:,129,149] del hr08_u del hr08_v print(HB_u.shape) u_data_rec, v_data_rec = reconstruct(can08_mat, mean_2008, can07_PCs, can07_eigs, hr07_PCs, hr07_eigs, 75, 75, method = 'LS', EB = False) ## Plotting data at three locations. fig, ax = plt.subplots(3, 3, figsize=(15, 10)) alpha_val = 0.7 ax[0, 0].plot(HB_u, label = "HRDPS", alpha = alpha_val) ax[0, 0].plot(u_data_rec[:,142,139], label = "Reconstructed", alpha = alpha_val) ax[0, 0].set_title("Halibut Bank, u-winds") ax[0, 0].set_xlabel("Time (hours)") ax[0, 0].set_ylabel("Wind Speed (m/s)") ax[0, 0].legend() ax[0, 1].plot(SS_u, label = "HRDPS", alpha = alpha_val) ax[0, 1].plot(u_data_rec[:,174,107], label = "Reconstructed", alpha = alpha_val) ax[0, 1].set_title("Sentry Shoal, u-winds") ax[0, 1].set_xlabel("Time (hours)") ax[0, 1].set_ylabel("Wind Speed (m/s)") ax[0, 1].legend() ax[0, 2].plot(SH_u, label = "HRDPS", alpha = alpha_val) ax[0, 2].plot(u_data_rec[:,129, 149], label = "Reconstructed", alpha = alpha_val) ax[0, 2].set_title("Sand Heads, u-winds") ax[0, 2].set_xlabel("Time (hours)") ax[0, 2].set_ylabel("Wind Speed (m/s)") ax[0, 2].legend() ax[1, 0].plot(HB_v, label = "HRDPS", alpha = alpha_val) ax[1, 0].plot(v_data_rec[:,142,139], label = "Reconstructed", alpha = alpha_val) ax[1, 0].set_title("Halibut Bank, v-winds") ax[1, 0].set_xlabel("Time (hours)") ax[1, 0].set_ylabel("Wind Speed (m/s)") ax[1, 0].legend() ax[1, 1].plot(SS_v, label = "HRDPS", alpha = alpha_val) ax[1, 1].plot(v_data_rec[:,174,107], label = "Reconstructed", alpha = alpha_val) ax[1, 1].set_title("Sentry Shoal, v-winds") ax[1, 1].set_xlabel("Time (hours)") ax[1, 1].set_ylabel("Wind Speed (m/s)") ax[1, 1].legend() ax[1, 2].plot(SH_v, label = "HRDPS", alpha = alpha_val) ax[1, 2].plot(v_data_rec[:,129, 149], label = "Reconstructed", alpha = alpha_val) ax[1, 2].set_title("Sand Heads, v-winds") ax[1, 2].set_xlabel("Time (hours)") ax[1, 2].set_ylabel("Wind Speed (m/s)") ax[1, 2].legend() ax[2,0].plot(np.sqrt(HB_u**2 + HB_v**2), label = "HRDPS", alpha = alpha_val) ax[2,0].plot(np.sqrt(u_data_rec[:,142,139]**2 + v_data_rec[:,142,139]**2), label = "Reconstructed", alpha = alpha_val) ax[2,0].set_xlabel("Time (hours)") ax[2,0].set_title("Halibut Bank, wind speed") ax[2,0].set_ylabel("Wind Speed (m/s)") ax[2,0].legend() ax[2,1].plot(np.sqrt(SS_u**2 + SS_v**2), label = "HRDPS", alpha = alpha_val) ax[2,1].plot(np.sqrt(u_data_rec[:,174,107]**2 + v_data_rec[:,174,107]**2), label = "Reconstructed", alpha = alpha_val) ax[2,1].set_xlabel("Time (hours)") ax[2,1].set_title("Sentry Shoal, wind speed") ax[2,1].set_ylabel("Wind Speed (m/s)") ax[2,1].legend() ax[2,2].plot(np.sqrt(SH_u**2 + SH_v**2) , label = "HRDPS", alpha = alpha_val) ax[2,2].plot(np.sqrt(u_data_rec[:,129,149]**2 + v_data_rec[:,129,149]**2) , label = "Reconstructed", alpha = alpha_val) ax[2,2].set_xlabel("Time (hours)") ax[2,2].set_title("Sand Heads, wind speed") ax[2,2].set_ylabel("Wind Speed (m/s)") ax[2,2].legend() plt.tight_layout() nums = np.arange(0, 37, 2) fig, ax = plt.subplots(1, 3, figsize=(10, 5)) ax[0].set_title("Halibut Bank") ax[0].hist(np.sqrt(HB_u**2 + HB_v**2), bins = nums, label = "HRDPS", alpha = 0.7) ax[0].hist(np.sqrt(u_data_rec[:,142,139]**2 + v_data_rec[:,142,139]**2), bins = nums, label = "Reconstructed", alpha = 0.7) ax[0].set_xlabel("Wind Speed (m/s)") ax[0].set_ylabel("Number of data points") ax[0].legend() ax[1].set_title("Sentry Shoal") ax[1].hist(np.sqrt(SS_u**2 + SS_v**2), bins = nums, label = "HRDPS", alpha = 0.7) ax[1].hist(np.sqrt(u_data_rec[:,174,107]**2 + v_data_rec[:,174,107]**2), bins = nums, label = "Reconstructed", alpha = 0.7) ax[1].set_xlabel("Wind Speed (m/s)") ax[1].set_ylabel("Number of data points") ax[1].legend() ax[2].set_title("Sand Heads") ax[2].hist(np.sqrt(SH_u**2 + SH_v**2) , bins = nums, label = "HRDPS", alpha = 0.7) ax[2].hist(np.sqrt(u_data_rec[:,129,149]**2 + v_data_rec[:,129,149]**2) , bins = nums, label = "Reconstructed", alpha = 0.7) ax[2].set_xlabel("Wind Speed (m/s)") ax[2].set_ylabel("Number of data points") ax[2].legend() plt.tight_layout() a1 = np.histogram(np.sqrt(HB_u**2 + HB_v**2), bins = nums) b1 = np.histogram(np.sqrt(u_data_rec[:,142,139]**2 + v_data_rec[:,142,139]**2), bins = nums) a2 = np.histogram(np.sqrt(SS_u**2 + SS_v**2), bins = nums) b2 = np.histogram(np.sqrt(u_data_rec[:,174,107]**2 + v_data_rec[:,174,107]**2), bins = nums) a3 = np.histogram(np.sqrt(SH_u**2 + SH_v**2), bins = nums) b3 = np.histogram(np.sqrt(u_data_rec[:,129,149]**2 + v_data_rec[:,129,149]**2) , bins = nums) # calc middle of bin and width of bin axes = [(a1[1][x]+a1[1][x+1])/2 for x in range(len(a1[1])-1)] w = a1[1][1] ##plot differences fig, ax = plt.subplots(1, 3, figsize = (10, 3)) ax[0].bar(axes, b1[0] - a1[0], width=w, label="Reconstruct - HRDPS 2008") ax[0].set_title("Halibut Bank") ax[1].bar(axes, b2[0] - a2[0], width=w, label="Reconstruct - HRDPS 2008") ax[1].set_title("Sentry Shoal") ax[2].bar(axes, b3[0] - a3[0], width=w, label="Reconstruct - HRDPS 2008") ax[2].set_title("Sand Heads") for i in range(3): ax[i].set_xlabel("Wind speed (m/s)") ax[i].set_ylabel("Data Points") plt.tight_layout() metrics = np.zeros((3, 3, 3)) def helper(x, y, ax, i, j): if i == 2: myBool = False else: myBool = True reg = LinearRegression(fit_intercept = myBool).fit(x.reshape(-1, 1), y) a = reg.coef_[0] b = reg.intercept_ y_pred = a*x + b bias = np.mean( y - x ) r_squared = reg.score(x.reshape(-1, 1), y) rmse = np.sqrt( mean_squared_error(y, y_pred) ) global metrics metrics[i, j, 0] = r_squared metrics[i, j, 1] = bias metrics[i, j, 2] = rmse im = ax[i,j].hist2d(x, y, bins = (17, 17), range = [[-14, 20], [-14, 20]], cmap=plt.cm.Reds) ax[i,j].plot(x, a*x+b, color = 'black') ax[i,j].plot(x, x, color = 'blue') ax[i,j].annotate("r-squared = {:.3f}".format(r_squared), (-12, 17)) ax[i,j].annotate("bias = {:.3f}".format(bias), (-12, 15)) ax[i,j].annotate("RMS error = {:.3f}".format(rmse), (-12, 13)) ax[i,j].annotate("y = {:.3f}x + {:.3f}".format(a, b), (-12, 11)) ax[i,j].set_ylabel("HRDPS data") ax[i,j].set_xlabel("reconstructed data") ax[i,j].set_ylim(-14, 20) ax[i,j].set_xlim(-14, 20) ## plt.colorbar(im,ax=ax[i,j]) ##https://stackoverflow.com/questions/23876588/matplotlib-colorbar-in-each-subplot return (a, b) a = np.empty((3, 3)) b = np.empty((3, 3)) fig, ax = plt.subplots(3, 3, figsize = (14, 10)) ##Habibut bank y = HB_u x = u_data_rec[:, 142, 139] ax[0, 0].set_title("Halibut Bank, u-wind") a[0,0], b[0,0] = helper(x, y, ax, 0, 0) y = HB_v x = v_data_rec[:, 142, 139] ax[1, 0].set_title("Halibut Bank, v-wind") a[1,0], b[1,0] = helper(x, y, ax, 1, 0) y = np.sqrt(HB_u**2 + HB_v**2) x = np.sqrt(u_data_rec[:, 142, 139]**2 + v_data_rec[:, 142, 139]**2) ax[2, 0].set_title("Halibut Bank, wind speed") a[2,0], b[2,0] = helper(x, y, ax, 2, 0) ##Sentry Shoal y = SS_u x = u_data_rec[:,174, 107] ax[0, 1].set_title("Sentry Shoa, u-wind") a[0,1], b[0,1] = helper(x, y, ax, 0, 1) y = SS_v x = v_data_rec[:,174, 107] ax[1, 1].set_title("Sentry Shoal, v-wind") a[1,1], b[1,1] = helper(x, y, ax, 1, 1) y = np.sqrt(SS_u**2 + SS_v**2) x = np.sqrt(u_data_rec[:,174, 107]**2 + v_data_rec[:,174, 107]**2) ax[2, 1].set_title("Sentry Shoal, wind speed") a[2,1], b[2,1] = helper(x, y, ax, 2, 1) ##Sand Heads y = SH_u x = u_data_rec[:,129,149] ax[0, 2].set_title("Sand Heads, u-wind") a[0,2], b[0,2] = helper(x, y, ax, 0, 2) y = SH_v x = v_data_rec[:,129,149] ax[1, 2].set_title("Sand Heads, v-wind") a[1,2], b[1,2] = helper(x, y, ax, 1, 2) y = np.sqrt(SH_u**2 + SH_v**2) x = np.sqrt(u_data_rec[:,129,149]**2 + v_data_rec[:,129,149]**2) ax[2, 2].set_title("Sand Heads, wind speed") a[2,2], b[2,2] = helper(x, y, ax, 2, 2) plt.tight_layout() ##metrics used to rate fit ex = np.zeros(b1[0].shape) rmse = np.sqrt( mean_squared_error(b1[0] - a1[0], ex) ) rmse += np.sqrt( mean_squared_error(b2[0] - a2[0], ex) ) rmse += np.sqrt( mean_squared_error(b3[0] - a3[0], ex) ) rmse_hist = rmse/3 print("histogram rmse:", rmse_hist) print(" R^2, bias, RMSE") avg = np.mean(np.mean(metrics[0:2], axis = 1), axis = 0) print(avg) ###Output histogram rmse: 82.63206401000994 R^2, bias, RMSE [ 0.44719818 -0.5218725 3.08363886]
image_captioning/4_Zip Your Project Files and Submit.ipynb
###Markdown Project SubmissionWhen you are ready to submit your project, meaning you have checked the [rubric](https://review.udacity.com/!/rubrics/1427/view) and made sure that you have completed all tasks and answered all questions. Then you are ready to compress your files and submit your solution!The following steps assume:1. All cells have been *run* in Notebooks 2 and 3 (and that progress has been saved).2. All questions in those notebooks have been answered.3. Your architecture in `model.py` is your best tested architecture.Please make sure all your work is saved before moving on. You do not need to change any code in these cells; this code is to help you submit your project, only.---The first thing we'll do, is convert your notebooks into `.html` files; these files will save the output of each cell and any code/text that you have modified and saved in those notebooks. Note that the first notebooks are not included because their contents will not affect your project review. ###Code !jupyter nbconvert "2_Training.ipynb" !jupyter nbconvert "3_Inference.ipynb" ###Output [NbConvertApp] Converting notebook 2_Training.ipynb to html [NbConvertApp] Writing 339835 bytes to 2_Training.html [NbConvertApp] Converting notebook 3_Inference.ipynb to html [NbConvertApp] Writing 1378535 bytes to 3_Inference.html ###Markdown Zip the project filesNext, we'll zip all these notebook files and your `model.py` file into one compressed archive named `project2.zip`.After completing this step you should see this zip file appear in your home directory, where you can download it as seen in the image below, by selecting it from the list and clicking **Download**. This step may take a minute or two to complete. ###Code !!apt-get -y update && apt-get install -y zip !zip project2.zip -r . [email protected] ###Output adding: 2_Training.html (deflated 83%) adding: 3_Inference.html (deflated 37%) adding: model.py (deflated 64%)
examples/add_property.ipynb
###Markdown Table of Contents1&nbsp;&nbsp;Adding a Property to wikirepo1.1&nbsp;&nbsp;Adding single column property1.2&nbsp;&nbsp;Adding a single column property that spans time1.3&nbsp;&nbsp;Adding a multi-column property2&nbsp;&nbsp;Adding a Property to Wikidata **Adding Properties**In this example we'll show how to add properties to wikirepo. See [examples/add_data](http://localhost:8888/notebooks/programming/wikirepo/examples/add_data.ipynb) for (eventually) how to leverage wikirepo to add data to Wikidata.Adding properties to wikirepo can be as simple as finding a wikirepo data module that queries a similar data structure, copying this module to the appropriate data directory for the new property (see next note), renaming the module to what the user should enter to query it, and assigning appropriate values to the variables that make up the module header: `pid`, `sub_pid`, `col_name`, `col_prefix`, `ignore_char` and `span`. To fully detail this, we're going to pretend that the following properties can't already be accessed by wikirepo:- ['P1082' (population)](https://www.wikidata.org/wiki/Property:P1082)- ['P6' (head of government)](https://www.wikidata.org/wiki/Property:P6)- ['P172' (ethnicity)](https://www.wikidata.org/wiki/Property:P172)The final modules for each of these can be found in [data/demographic/population](https://github.com/andrewtavis/wikirepo/blob/main/wikirepo/data/demographic/population.py), [data/political/executive](https://github.com/andrewtavis/wikirepo/blob/main/wikirepo/data/political/executive.py), and [data/demographic/ethnic_div](https://github.com/andrewtavis/wikirepo/blob/main/wikirepo/data/demographic/ethnic_div.py) respectively. The focus will be how to add a property that already exists on [Wikidata](https://www.wikidata.org/wiki/Wikidata:Main_Page) to wikirepo, with later versions covering the process of adding a property to Wikidata as well.**Note:** by "the appropriate data directory for the new property" we mean that a new module should go into the [wikirepo/data](https://github.com/andrewtavis/wikirepo/tree/main/wikirepo/data) directory that matches a Wikidata sub-page. Sometimes data isn't on the location's page itself, but rather on a sub-page. An example is that certain economic properties for [Germany](https://www.wikidata.org/wiki/Q183) are found on the page [economy of Germany](https://www.wikidata.org/wiki/Q8046). wikirepo checks for a property on the main page of a location first, and if the property is not found then the package checks the sub-page associated with the module's directory (the user is notified that the property does not exist for the given location if it is found in neither page). Properties are often moved from main pages to sub-pages, so even current main page property modules need to be organized based on where they could be re-indexed. Worst comes to worst, put the module in [data/misc](https://github.com/andrewtavis/wikirepo/tree/main/wikirepo/data/misc). ###Code from wikirepo.data import time_utils, wd_utils from IPython.core.display import display, HTML display(HTML("<style>.container { width:99% !important; }</style>")) ###Output _____no_output_____ ###Markdown We'll use ['Q183' (Germany)](https://www.wikidata.org/wiki/Q183) for this example. First we'll initialize an `EntitiesDict` and the QID, and then we'll load in the entity: ###Code ents_dict = wd_utils.EntitiesDict() qid = 'Q183' ent = wd_utils.load_ent(ents_dict=ents_dict, pq_id=qid) ents_dict.key_lbls() ###Output _____no_output_____ ###Markdown Adding a Property to wikirepo Adding single column property ['P1082' (population)](https://www.wikidata.org/wiki/Property:P1082) is an example of a property that goes in a single column, which also only occurs once at any given time.Let's start by defining our property and checking an element of the population data for Germany: ###Code pop_pid = 'P1082' pop_0_entry = wd_utils.get_prop(ents_dict=ents_dict, qid=qid, pid=pop_pid)[0] pop_0_entry ###Output _____no_output_____ ###Markdown The big thing to notice in that is that the qualifier ['P585' (point in time)](https://www.wikidata.org/wiki/Property:P585) is present. That and that `prop_0_entry['mainsnak']['datavalue']['value']['amount']` is a single value tells us that this property should go into a single column. Let's check this value, as well as get its date: ###Code pop_0_val = pop_0_entry['mainsnak']['datavalue']['value']['amount'] pop_0_val pop_0_t = pop_0_entry['qualifiers']['P585'][0]['datavalue']['value']['time'] pop_0_t ###Output _____no_output_____ ###Markdown From that we see that we could have a character that needs to be ignored - specifically the `+`. We actually don't though, as wikirepo will convert this value to an integer, and `int('+string_number')` gets rid of the `+` for us.**Note:** wikirepo will also take care of the date for us. The package will first format the date, and then it will use a provided `time_lvl` variable's value to truncate this formatted `datetime.date` object to an appropriate level. Here's a quick demo of this assuming that the `time_lvl` of our query is `yearly`: ###Code pop_0_t_formatted = wd_utils.format_t(pop_0_t) pop_0_t_formatted time_utils.truncate_date(d=pop_0_t_formatted, time_lvl='yearly') ###Output _____no_output_____ ###Markdown The value itself will be included if the above year is included in the `timespan` value passed. If no `time_lvl` variable is passed, then the full date will be maintained, and its value will be queried if it's the most recent, with the date then being appended as a string for documentation of when the value comes from.Final notes on the property module: the value in question can be accessed directly instead of through another property, so this tells us that we have no need for the `sub_pid` variable (more on this later); as the value goes into one column, we use the `col_name` variable instead of `col_prefix` (more on this later as well); and the value occurs at only one time, so we keep the `span` variable as `False` (more on this later too).We now have all the information needed to make the **population** module's header: ###Code pid = 'P1082' sub_pid = None col_name = 'population' col_prefix = None ignore_char = '' span = False ###Output _____no_output_____ ###Markdown The final module can again be found in [data/demographic/population](https://github.com/andrewtavis/wikirepo/blob/main/wikirepo/data/demographic/population.py). Adding a single column property that spans time An executive via ['P6' (head of government)](https://www.wikidata.org/wiki/Property:P6) is an example of a property that goes in a single column that further occurs over a span of time.Let's start again by defining the pid and loading in an entry: ###Code exec_pid = 'P6' exec_0_entry = wd_utils.get_prop(ents_dict=ents_dict, qid=qid, pid=exec_pid)[0] exec_0_entry ###Output _____no_output_____ ###Markdown Firstly we can see that the value in question cannot be directly subscripted for, as it is a QID entity itself. wikirepo will access the variable for us and derive its label, but let's find out who it is: ###Code exec_0_qid = exec_0_entry['mainsnak']['datavalue']['value']['id'] exec_0_qid wd_utils.get_lbl(ents_dict=ents_dict, pq_id=exec_0_qid) ###Output _____no_output_____ ###Markdown That this entity is a span can be seen by the fact that it does not have ['P585' (point in time)](https://www.wikidata.org/wiki/Property:P585), but rather ['P580' (start time)](https://www.wikidata.org/wiki/Property:P580). Values in this property can also have the property ['P582' (end time)](https://www.wikidata.org/wiki/Property:P582). **Note:** wikirepo assumes that an entity that has a start time and lacks an end time is the current subject for the property, so the latest date in the `timespan` argument for query functions will be used. The opposite is true for if an end time is present without a start time - the first date in the `timespan` will be used based on the assumption that this is the first subject of the property.Having values or subjects with start and end times implies that the `span` variable for the module header should in this case be `True`. We still are putting our results in a single column, so we use `col_name` instead of `col_prefix` (this is covered in the next section), and we can again the ignore `sub_pid` variable (also covered in the next section).From this we have all the information we need for the **executive** module's header: ###Code pid = 'P6' sub_pid = None col_name = 'executive' col_prefix = None ignore_char = '' span = True ###Output _____no_output_____ ###Markdown The resulting module can again be found in [data/political/executive](https://github.com/andrewtavis/wikirepo/blob/main/wikirepo/data/political/executive.py). Adding a multi-column property Ethnic diversity via ['P172' (ethnic group)](https://www.wikidata.org/wiki/Property:P172) is an example of a property that should be split over multiple columns. Rather than put all the information into a single column for the user to then split, wikirepo instead prefixes each potential element and creates columns for them for their respective data.Let's look at the first element of German ethnicity: ###Code ethnic_div_pid = 'P172' ethnic_div_0_entry = wd_utils.get_prop(ents_dict=ents_dict, qid=qid, pid=ethnic_div_pid)[0] ethnic_div_0_entry ###Output _____no_output_____ ###Markdown Each of the values for this property is an entity, and the values are stored within sub PIDs. As before, let's check some QIDs of this value: ###Code ethnic_div_0_qid = ethnic_div_0_entry['mainsnak']['datavalue']['value']['id'] ethnic_div_0_qid ethnic_div_0_lbl = wd_utils.get_lbl(ents_dict=ents_dict, pq_id=ethnic_div_0_qid) ethnic_div_0_lbl ethnic_div_1_entry = wd_utils.get_prop(ents_dict=ents_dict, qid=qid, pid=ethnic_div_pid)[1] ethnic_div_1_qid = ethnic_div_1_entry['mainsnak']['datavalue']['value']['id'] ethnic_div_1_lbl = wd_utils.get_lbl(ents_dict=ents_dict, pq_id=ethnic_div_1_qid) ethnic_div_1_lbl ###Output _____no_output_____ ###Markdown The value itself needs to be subsetted for using ['P1107' (proportion)](https://www.wikidata.org/wiki/Property:P1107). wikirepo will do this for us, but let's subset for the first value anyway: ###Code ethnic_div_0_val = ethnic_div_0_entry['qualifiers']['P1107'][0]['datavalue']['value']['amount'] ethnic_div_0_val ###Output _____no_output_____ ###Markdown For this property we thus need to use a `sub_pid` variable that tells wikirepo where to look for the value. **None:** another use of `sub_pid` is to set its value to `bool`. This tells wikirepo to assign `True` if the property is present. An example of this is [data/institutional/org_membership](https://github.com/andrewtavis/wikirepo/blob/main/wikirepo/data/institutional/org_membership.py) where a boolean value is assigned to columns based on if a location is a member of an organization at a given time. Values of `False` need to be filled afterwards, and some values are replaced for organizations that are widely known. This is thus an example of a property that requires a bit more work than simply setting the module header.Continuing, as we want the values to be put into separate columns where the QIDs labels for the entries get prefixed, we need to use the `col_prefix` variable and set the `col_name` variable to `None`. Let's choose `eth` for `col_prefix`, meaning that columns produced will be `eth_germans`, `eth_turks`, etc (an underscore is added automatically). To complete the needed information, the values themselves are only present at individual times, so in this case we can set `span` to `False`.From here we have the full information for the header of the **ethnic_div** module: ###Code pid = 'P172' sub_pid = 'P1107' col_name = None col_prefix = 'eth' ignore_char = '' span = False ###Output _____no_output_____
Course_1-PreLaunch_Preparatory_Content/Module_2-Python_Libraries/2-Pandas/Session1_Basics_of_Pandas.ipynb
###Markdown Data Frame ###Code # All imports import numpy as np import pandas as pd ###Output _____no_output_____ ###Markdown Example - 1 Create a Data Frame cars using raw data stored in a dictionary ###Code cars_per_cap = [809, 731, 588, 18, 200, 70, 45] country = ['United States', 'Australia', 'Japan', 'India', 'Russia', 'Morocco', 'Egypt'] drives_right = [True, False, False, False, True, True, True] data = {"cars_per_cap": cars_per_cap, "country": country, "drives_right": drives_right} data cars = pd.DataFrame(data) cars type(cars) ###Output _____no_output_____ ###Markdown Example - 2 (Reading data from a file) Create a Data Frame by importing cars data from cars.csv ###Code # Read a file using pandas cars_df = pd.read_csv('cars.csv') cars_df ###Output _____no_output_____ ###Markdown Example - 3 (Column headers) Read file - skip header ###Code cars_df = pd.read_csv('cars.csv', header=None) cars_df ###Output _____no_output_____ ###Markdown Assign Headers ###Code # Returns an array of headers cars_df.columns # Rename Headers cars_df.columns = ['country code', 'region', 'country', 'cars_per_cap', 'drive_right'] cars_df ###Output _____no_output_____ ###Markdown Example - 4 (Row index/names) Read file - skip header and assign first column as index. ###Code # Index is returned by cars_df.index # Read file and set 1st column as index cars_df = pd.read_csv("cars.csv", header= None, index_col=0) # set the column names cars_df.columns = ['region', 'country', 'cars_per_cap', 'drive_right'] cars_df # Print the new index cars_df.index ###Output _____no_output_____ ###Markdown Rename the Index Name ###Code cars_df.index.name = 'country_code' cars_df ###Output _____no_output_____ ###Markdown Delete the index name ###Code cars_df.index.name = None cars_df ###Output _____no_output_____ ###Markdown Set Hierarchical index ###Code # Read file and set 1st column as index cars_df = pd.read_csv("cars.csv", header= None) # set the column names cars_df.columns = ['country_code','region','country','cars_per_cap','drives_right'] cars_df.set_index(['region', 'country_code'], inplace=True) cars_df ###Output _____no_output_____ ###Markdown Example - 5 (Write Data Frame to file) Write cars_df to cars_to_csv.csv ###Code cars_df.to_csv('cars_to_csv.csv') df = pd.read_csv('marks.csv', sep='|', header=None, index_col=0) df.head() df.columns = ['Name', 'Subject', 'Maximum Marks', 'Marks Obtained', 'Percentage'] df.index.name = "S.No." df.head() ###Output _____no_output_____
deploy-sm-notebook.ipynb
###Markdown To define MXNetModel ###Code mxnet_model = MXNetModel(model_data=model_data, role=role, entry_point=entry_point, py_version='py3', framework_version='1.6.0', image='<image uri of the container image>, model_server_workers=2 ) ###Output _____no_output_____ ###Markdown Deploy model endpoint ###Code predictor = mxnet_model.deploy(instance_type='ml.c5.large', initial_instance_count=1) print(predictor.endpoint) ###Output _____no_output_____ ###Markdown Run a simple performance test ###Code import sagemaker from sagemaker.mxnet.model import MXNetPredictor sagemaker_session = sagemaker.Session() endpoint_name = '<ENDPOINT 이름>' predictor = MXNetPredictor(endpoint_name, sagemaker_session) input_sentence = '아기 공룡 둘리는 희동이와' pred_latency_sum = 0 pred_count_sum = 0 pred_cnt = 0 for i in range(20): try: pred_out = predictor.predict(input_sentence) if i == 0: continue predicted_sentence= pred_out[0] predict_count = pred_out[1] predict_latency = pred_out[2] pred_latency_sum += predict_latency pred_count_sum =+ predict_count pred_cnt += 1 except: print('Error and ingore it.') avg_latency = pred_latency_sum / pred_cnt avg_latency_per_inf = avg_latency / pred_count_sum print('Input sentence: {}'.format(input_sentence)) print('Predicted sentence: {}'.format(predicted_sentence)) print('Average number of inferenced token: {:.2f}'.format(pred_count_sum)) print('Average inference latency for a sentence completion: {:.2f}'.format(avg_latency)) print('Average inference latency per a token: {:.2f}\n'.format(avg_latency_per_inf)) ###Output _____no_output_____ ###Markdown Clean UP! ###Code predictor.delete_endpoint() predictor.delete_model() ###Output _____no_output_____
xgboost/xgboost-tutorial.ipynb
###Markdown https://machinelearningmastery.com/xgboost-python-mini-course/ ###Code !wget https://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data ls from numpy import loadtxt from xgboost import XGBClassifier, plot_importance from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score import matplotlib.pyplot as plt %matplotlib inline # load data dataset = loadtxt('pima-indians-diabetes.data', delimiter=',') X = dataset[:, 0:8] Y = dataset[:, 8] X.shape, Y.shape seed = 7 test_size = 0.33 X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=seed) model = XGBClassifier() model model.fit(X_train, y_train) y_pred = model.predict(X_test) predictions = [round(value) for value in y_pred] accuracy = accuracy_score(y_test, predictions) print('Accuracy: %.2f%%' % (accuracy * 100.0)) print(model.feature_importances_) plot_importance(model) model.learning_rate dir(model) model.n_estimators model.n_classes_ model.subsample model.max_depth from sklearn.model_selection import GridSearchCV from sklearn.model_selection import StratifiedKFold n_estimators = [50, 100, 150, 200] max_depth = [2, 4, 6, 8] param_grid = dict(max_depth=max_depth, n_estimators=n_estimators) param_grid X = dataset[:, 0:8] Y = dataset[:, 8] model = XGBClassifier() kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=7) grid_search = GridSearchCV(model, param_grid, scoring='neg_log_loss', n_jobs=1, cv=kfold, verbose=1) result = grid_search.fit(X, Y) result.best_score_ result.best_params_ means = result.cv_results_['mean_test_score'] means std = result.cv_results_['std_test_score'] std params = result.cv_results_['params'] params for mean, stdev, param in zip(means, std, params): print('%f (%f) with: %r' % (mean, stdev, param)) ###Output -0.478966 (0.037297) with: {'max_depth': 2, 'n_estimators': 50} -0.474370 (0.040899) with: {'max_depth': 2, 'n_estimators': 100} -0.478332 (0.046846) with: {'max_depth': 2, 'n_estimators': 150} -0.487738 (0.051882) with: {'max_depth': 2, 'n_estimators': 200} -0.491832 (0.054754) with: {'max_depth': 4, 'n_estimators': 50} -0.508782 (0.060357) with: {'max_depth': 4, 'n_estimators': 100} -0.534646 (0.071571) with: {'max_depth': 4, 'n_estimators': 150} -0.565249 (0.079672) with: {'max_depth': 4, 'n_estimators': 200} -0.514191 (0.063924) with: {'max_depth': 6, 'n_estimators': 50} -0.552013 (0.074915) with: {'max_depth': 6, 'n_estimators': 100} -0.594136 (0.078587) with: {'max_depth': 6, 'n_estimators': 150} -0.640523 (0.088810) with: {'max_depth': 6, 'n_estimators': 200} -0.540215 (0.079779) with: {'max_depth': 8, 'n_estimators': 50} -0.599218 (0.091481) with: {'max_depth': 8, 'n_estimators': 100} -0.647917 (0.103900) with: {'max_depth': 8, 'n_estimators': 150} -0.686291 (0.115291) with: {'max_depth': 8, 'n_estimators': 200}
docs/notebooks/03 Running Robot.ipynb
###Markdown Running Robot This notebook is associated with Robot Framework kernel. Therefore this notebook runs Robot Framework suites with tests or tasks. **Every Robot notebook cell must begin with** a one of the available Robot table headings: **Settings**, **Variables**, **Keywords**, **Test Cases** or **Tasks**. ###Code *** Settings *** Library String ###Output _____no_output_____ ###Markdown The same Robot Framework table heading may be used in multiple code cells, and a single cell may contain Robot Framework syntax for multiple tables. ###Code *** Settings *** Test teardown Log variables *** Variable *** ${fullname} Jane Doe ###Output _____no_output_____ ###Markdown While every cell must be executed in order, similarly to a Python notebook, resulting Robot Framework suite is executed only when the latest cell includes either tests or tasks. ###Code *** Test Cases *** Name should contain exactly two words ${parts}= Split string ${fullname} Length should be ${parts} 2 ###Output _____no_output_____ ###Markdown After executing a cell with tests or tasks, result links (`Log | Report`) are inserted below the executed cell. These links open the resulting Robot Framework log or report on a new browser tab, from which the log or report can also be downloaded as a file.All resulting logs and report are saved inline with the notebook file. In the case of a test failure, Robot Framework console output with error is displayed just below the log and report links. ###Code *** Test Cases *** Name should contain exactly two three ${parts}= Split string ${fullname} Length should be ${parts} 3 ###Output _____no_output_____ ###Markdown Finally, if the last run keyword on a test or task returns a value (including possible test teardown), that value may be rendered below the cell. ###Code *** Test Cases *** Name should still contain exactly two words [Teardown] Set variable ${parts} ${parts}= Split string ${fullname} Length should be ${parts} 2 ###Output _____no_output_____
python/dqnlab.ipynb
###Markdown Deep Q Networks (DQN) Lab NotebookIn this notebook, you will learn the basics applying the DQN algorithm to simple environments in OpenAI Gym. By the end of the notebook, you will have a working DQN agent that can balance a CartPole as well as solve other simple control problems in alternate environments. Sections1. Getting Started with Pixels to Actions- Setup- DQN Algorithm- Q-Network model- Training Requirements* OpenAI Gym[classic_control] 0.7.4 or higher * Python 3.5 or higher* pyTorch 0.4Portions of code and explanations in this notebook have been borrowed from the [PyTorch DQN Tutorial by Adam Paszke](https://github.com/apaszke). 1. Getting Started with Pixels to ActionsIn the Q-Learning Lab notebook for this course, you were able to train an agent to balance the OpenAI Gym CartPole using a provided Q-Learning algorithm. The Q-Learning algorithm read in the environment observations and reward provided by the `gym` CartPole-v0 environment api. The Q-Learning agent used a table to store all the updated values and simply looked up the best learned value to make decisions. This works great when we have an environment that can tell us exactly what is going on. The CartPole-v0 environment provides the exact position, velocity, and angles of the pole! But what if all we had was a picture of what was happening in the game? So instead of an observation like this (position, velocity, angle, tip velocity):```observation: [ 0.00326999 -0.17222302 0.01642742 0.30067511]```we have an observation like this (the actual image):As we've seen in other deep learning problems, a neural network can be trained to "learn" distinguishing features from an image. With a Deep Q-Network (DQN), we can learn to solve the CartPole problem by looking at the scene. Because of this, our results aren't directly comparable to results that use the simpler observation scheme - this is a much harder task. Training is slower, because all frames have to be rendered.The algorithm will focus on a patch of the image where the cart exists, and determine the state based on the difference between the current and previous patches. This will allow the agent to take the velocity of the pole into account from one image. 2. Setup packages, gpu, utilities ###Code # install packages in Udacity Workspaces # DO NOT EXECUTE OFFLINE !python -m pip install pyvirtualdisplay from pyvirtualdisplay import Display display = Display(visible=0, size=(1400, 900)) display.start() %matplotlib inline import gym import math import random import numpy as np import matplotlib import matplotlib.pyplot as plt from collections import namedtuple from itertools import count from PIL import Image import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torchvision.transforms as T env = gym.make('CartPole-v0').unwrapped # set up matplotlib is_ipython = 'inline' in matplotlib.get_backend() if is_ipython: from IPython import display plt.ion() # if gpu is to be used device = torch.device("cuda" if torch.cuda.is_available() else "cpu") ###Output _____no_output_____ ###Markdown *from the PyTorch tutorial:* 3. DQN algorithmOur environment is deterministic, so all equations presented here arealso formulated deterministically for the sake of simplicity. In thereinforcement learning literature, they would also contain expectationsover stochastic transitions in the environment.Our aim will be to train a policy that tries to maximize the discounted,cumulative reward$$R_{t_0} = \sum_{t=t_0}^{\infty} \gamma^{t - t_0} r_t$$where $R_{t_0}$ is also known as the *return*. The discount,$\gamma$, should be a constant between $0$ and $1$that ensures the sum converges. It makes rewards from the uncertain farfuture less important for our agent than the ones in the near futurethat it can be fairly confident about.The main idea behind Q-learning is that if we had a function$Q^*: State \times Action \rightarrow \mathbb{R}$, that could tellus what our return would be, if we were to take an action in a givenstate, then we could easily construct a policy that maximizes ourrewards:$$ \pi^*(s) = \arg\!\max_a \ Q^*(s, a)$$However, we don't know everything about the world, so we don't haveaccess to $Q^*$. But, since neural networks are universal functionapproximators, we can simply create one and train it to resemble$Q^*$.For our training update rule, we'll use a fact that every $Q$function for some policy obeys the Bellman equation:$$Q^{\pi}(s, a) = r + \gamma Q^{\pi}(s', \pi(s'))$$The difference between the two sides of the equality is known as thetemporal difference error, $\delta$:$$ \delta = Q(s, a) - (r + \gamma \max_a Q(s', a))$$To minimise this error, we will use the [Huberloss](https://en.wikipedia.org/wiki/Huber_loss). The Huber loss actslike the mean squared error when the error is small, but like the meanabsolute error when the error is large - this makes it more robust tooutliers when the estimates of $Q$ are very noisy. We calculatethis over a batch of transitions, $B$, sampled from the replaymemory:$$ \mathcal{L} = \frac{1}{|B|}\sum_{(s, a, s', r) \ \in \ B} \mathcal{L}(\delta)$$$$ \text{where} \quad \mathcal{L}(\delta) = \begin{cases} \frac{1}{2}{\delta^2} & \text{for } |\delta| \le 1, \\ |\delta| - \frac{1}{2} & \text{otherwise.} \end{cases}$$ 4. Q-network modelOur model will be a convolutional neural network that takes in thedifference between the current and previous screen patches. It has twooutputs, representing $Q(s, \mathrm{left})$ and$Q(s, \mathrm{right})$ (where $s$ is the input to thenetwork). In effect, the network is trying to predict the *quality* oftaking each action given the current input. Class: DQN network model ###Code class DQN(nn.Module): def __init__(self): super(DQN, self).__init__() self.conv1 = nn.Conv2d(3, 16, kernel_size=5, stride=2) self.bn1 = nn.BatchNorm2d(16) self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2) self.bn2 = nn.BatchNorm2d(32) self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2) self.bn3 = nn.BatchNorm2d(32) self.head = nn.Linear(448, 2) def forward(self, x): x = F.relu(self.bn1(self.conv1(x))) x = F.relu(self.bn2(self.conv2(x))) x = F.relu(self.bn3(self.conv3(x))) return self.head(x.view(x.size(0), -1)) ###Output _____no_output_____ ###Markdown Class: Replay MemoryWe'll be using experience replay memory for training our DQN. It storesthe transitions that the agent observes, allowing us to reuse this datalater. By sampling from it randomly, the transitions that build up abatch are decorrelated. It has been shown that this greatly stabilizesand improves the DQN training procedure.For this, we're going to need two classses:- ``Transition`` - a named tuple representing a single transition in our environment- ``ReplayMemory`` - a cyclic buffer of bounded size that holds the transitions observed recently. It also implements a ``.sample()`` method for selecting a random batch of transitions for training. ###Code Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward')) class ReplayMemory(object): def __init__(self, capacity): self.capacity = capacity self.memory = [] self.position = 0 def push(self, *args): """Saves a transition.""" if len(self.memory) < self.capacity: self.memory.append(None) self.memory[self.position] = Transition(*args) self.position = (self.position + 1) % self.capacity def sample(self, batch_size): return random.sample(self.memory, batch_size) def __len__(self): return len(self.memory) ###Output _____no_output_____ ###Markdown Definitions: Input extractionThe code below are utilities for extracting and processing renderedimages from the environment. It uses the ``torchvision`` package, whichmakes it easy to compose image transforms. Once you run the cell it willdisplay an example patch that it extracted. ###Code resize = T.Compose([T.ToPILImage(), T.Resize(40, interpolation=Image.CUBIC), T.ToTensor()]) # This is based on the code from gym. screen_width = 600 def get_cart_location(): world_width = env.x_threshold * 2 scale = screen_width / world_width return int(env.state[0] * scale + screen_width / 2.0) # MIDDLE OF CART def get_screen(): screen = env.render(mode='rgb_array').transpose( (2, 0, 1)) # transpose into torch order (CHW) # Strip off the top and bottom of the screen screen = screen[:, 160:320] view_width = 320 cart_location = get_cart_location() if cart_location < view_width // 2: slice_range = slice(view_width) elif cart_location > (screen_width - view_width // 2): slice_range = slice(-view_width, None) else: slice_range = slice(cart_location - view_width // 2, cart_location + view_width // 2) # Strip off the edges, so that we have a square image centered on a cart screen = screen[:, :, slice_range] # Convert to float, rescare, convert to torch tensor # (this doesn't require a copy) screen = np.ascontiguousarray(screen, dtype=np.float32) / 255 screen = torch.from_numpy(screen) # Resize, and add a batch dimension (BCHW) return resize(screen).unsqueeze(0).to(device) env.reset() plt.figure() plt.imshow(get_screen().cpu().squeeze(0).permute(1, 2, 0).numpy(), interpolation='none') plt.title('Example extracted screen') plt.show() env.render() env.close() ###Output _____no_output_____ ###Markdown 5. Training Hyperparameters and utilitiesThe next two cells set the hyperparameters, instantiate our model and its optimizer, and define twoutilities:- ``select_action`` - will select an action accordingly to an epsilon greedy policy. Simply put, we'll sometimes use our model for choosing the action, and sometimes we'll just sample one uniformly. The probability of choosing a random action will start at ``EPS_START`` and will decay exponentially towards ``EPS_END``. ``EPS_DECAY`` controls the rate of the decay.- ``plot_durations`` - a helper for plotting the durations of episodes, along with an average over the last 100 episodes (the measure used in the official evaluations). The plot will be underneath the cell containing the main training loop, and will update after every episode. Preparation: Instantiate model and optimizer ###Code BATCH_SIZE = 128 GAMMA = 0.999 EPS_START = 0.9 EPS_END = 0.05 EPS_DECAY = 200 TARGET_UPDATE = 10 policy_net = DQN().to(device) target_net = DQN().to(device) target_net.load_state_dict(policy_net.state_dict()) target_net.eval() optimizer = optim.RMSprop(policy_net.parameters()) memory = ReplayMemory(10000) steps_done = 0 episode_durations = [] ###Output _____no_output_____ ###Markdown Definition: action selection and plot durations utility ###Code def select_action(state): global steps_done sample = random.random() eps_threshold = EPS_END + (EPS_START - EPS_END) * \ math.exp(-1. * steps_done / EPS_DECAY) steps_done += 1 if sample > eps_threshold: with torch.no_grad(): return policy_net(state).max(1)[1].view(1, 1) else: return torch.tensor([[random.randrange(2)]], device=device, dtype=torch.long) def plot_durations(): plt.figure(2) plt.clf() durations_t = torch.tensor(episode_durations, dtype=torch.float) plt.title('Training...') plt.xlabel('Episode') plt.ylabel('Duration') plt.plot(durations_t.numpy()) # Take 100 episode averages and plot them too if len(durations_t) >= 100: means = durations_t.unfold(0, 100, 1).mean(1).view(-1) means = torch.cat((torch.zeros(99), means)) plt.plot(means.numpy()) plt.pause(0.001) # pause a bit so that plots are updated if is_ipython: display.clear_output(wait=True) display.display(plt.gcf()) ###Output _____no_output_____ ###Markdown Definition: single step optimizationFinally, the code for training our model.Here, you can find an ``optimize_model`` function that performs asingle step of the optimization. It first samples a batch, concatenatesall the tensors into a single one, computes $Q(s_t, a_t)$ and$V(s_{t+1}) = \max_a Q(s_{t+1}, a)$, and combines them into ourloss. By defition we set $V(s) = 0$ if $s$ is a terminalstate.We also use a target network to compute $V(s_{t+1}) for added stability. The target network has its weights kept frozen most of the time, but is updated with the policy network’s weights every so often. This is usually a set number of steps but we shall use episodes for simplicity. ###Code def optimize_model(): if len(memory) < BATCH_SIZE: return transitions = memory.sample(BATCH_SIZE) # Transpose the batch (see http://stackoverflow.com/a/19343/3343043 for # detailed explanation). batch = Transition(*zip(*transitions)) # Compute a mask of non-final states and concatenate the batch elements non_final_mask = torch.tensor(tuple(map(lambda s: s is not None, batch.next_state)), device=device, dtype=torch.uint8) non_final_next_states = torch.cat([s for s in batch.next_state if s is not None]) state_batch = torch.cat(batch.state) action_batch = torch.cat(batch.action) reward_batch = torch.cat(batch.reward) # Compute Q(s_t, a) - the model computes Q(s_t), then we select the # columns of actions taken state_action_values = policy_net(state_batch).gather(1, action_batch) # Compute V(s_{t+1}) for all next states. next_state_values = torch.zeros(BATCH_SIZE, device=device) next_state_values[non_final_mask] = target_net(non_final_next_states).max(1)[0].detach() # Compute the expected Q values expected_state_action_values = (next_state_values * GAMMA) + reward_batch # Compute Huber loss loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1)) # Optimize the model optimizer.zero_grad() loss.backward() for param in policy_net.parameters(): param.grad.data.clamp_(-1, 1) optimizer.step() ###Output _____no_output_____ ###Markdown Definition: main training loopBelow, you can find the main training loop wrapped in the `dqn_training` definition. At the beginning we resetthe environment and initialize the ``state`` Tensor. Then, we samplean action, execute it, observe the next screen and the reward (always1), and optimize our model once. When the episode ends (our modelfails), we restart the loop. ###Code # wrap the main training loop in a definition def dqn_training(num_episodes, visualize_plt=False, max_steps=500): """ num_episodes: int number of episodes visualize_plt: bool if true, display the cartpole action in the notebook if false (default), display the episodes x durations graph """ for i_episode in range(num_episodes): # Initialize the environment and state env.reset() if visualize_plt: img = plt.imshow(env.render(mode='rgb_array')) # only call this once, only for jupyter last_screen = get_screen() current_screen = get_screen() state = current_screen - last_screen for t in count(): # Select and perform an action action = select_action(state) _, reward, done, _ = env.step(action.item()) reward = torch.tensor([reward], device=device) # Observe new state last_screen = current_screen current_screen = get_screen() if visualize_plt: img.set_data(env.render(mode='rgb_array')) # just update the data plt.axis('off') display.display(plt.gcf()) display.clear_output(wait=True) if not done: next_state = current_screen - last_screen else: next_state = None # Store the transition in memory memory.push(state, action, next_state, reward) # Move to the next state state = next_state # Perform one step of the optimization (on the target network) optimize_model() if done or t>max_steps: episode_durations.append(t + 1) if visualize_plt: print("Duration = {}".format(t)) else: plot_durations() break # Update the target network if i_episode % TARGET_UPDATE == 0: target_net.load_state_dict(policy_net.state_dict()) print('Complete') env.render(close=True) env.close() plt.ioff() plt.show() ###Output _____no_output_____ ###Markdown Execution: Train the modelBelow, `num_episodes` is set small. You can run this cell over an over to increase training. The durations graph shows the duration of the cartpole balance for each episode. ###Code # note that the duplicate %matplotlib backend call is needed to avoid duplicate graphs %matplotlib %matplotlib num_episodes=5 dqn_training(num_episodes) ###Output _____no_output_____ ###Markdown Execution: CartPole visualizationExecute the following cell at any point to watch the trained DQN agent control the Cartpole. ###Code %matplotlib inline num_episodes=1 dqn_training(num_episodes, visualize_plt=True) ###Output _____no_output_____
80-reporting.ipynb
###Markdown 80-reporting> Reporting and evaluating model performanceIn this notebook series, we investigate and convey the results of the data Common helpful packages ###Code #Data analysis and processing import pandas as pd import numpy as np #plotting import matplotlib.pyplot as plt import seaborn as sns #Constants/globals cleaned_data_filename = '' ###Output _____no_output_____
_posts/scikit/isolation-forest/IsolationForest example.ipynb
###Markdown An example using IsolationForest for anomaly detection.The IsolationForest ‘isolates’ observations by randomly selecting a feature and then randomly selecting a split value between the maximum and minimum values of the selected feature.Since recursive partitioning can be represented by a tree structure, the number of splittings required to isolate a sample is equivalent to the path length from the root node to the terminating node.This path length, averaged over a forest of such random trees, is a measure of normality and our decision function.Random partitioning produces noticeable shorter paths for anomalies. Hence, when a forest of random trees collectively produce shorter path lengths for particular samples, they are highly likely to be anomalies.[1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. “Isolation forest.” Data Mining, 2008. ICDM‘08. Eighth IEEE International Conference on. New to Plotly?Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/).You can set up Plotly to work in [online](https://plot.ly/python/getting-started/initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/start-plotting-online).We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started! Version ###Code import sklearn sklearn.__version__ ###Output _____no_output_____ ###Markdown Imports This tutorial imports [IsolationForest](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.IsolationForest.htmlsklearn.ensemble.IsolationForest). ###Code print(__doc__) import plotly.plotly as py import plotly.graph_objs as go import numpy as np import matplotlib.pyplot as plt from sklearn.ensemble import IsolationForest ###Output Automatically created module for IPython interactive environment ###Markdown Calculations ###Code rng = np.random.RandomState(42) # Generate train data X = 0.3 * rng.randn(100, 2) X_train = np.r_[X + 2, X - 2] # Generate some regular novel observations X = 0.3 * rng.randn(20, 2) X_test = np.r_[X + 2, X - 2] # Generate some abnormal novel observations X_outliers = rng.uniform(low=-4, high=4, size=(20, 2)) # fit the model clf = IsolationForest(max_samples=100, random_state=rng) clf.fit(X_train) y_pred_train = clf.predict(X_train) y_pred_test = clf.predict(X_test) y_pred_outliers = clf.predict(X_outliers) # plot the line, the samples, and the nearest vectors to the plane xx = yy = np.linspace(-5, 5, 50) Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) ###Output _____no_output_____ ###Markdown Plot Results ###Code def matplotlib_to_plotly(cmap, pl_entries): h = 1.0/(pl_entries-1) pl_colorscale = [] for k in range(pl_entries): C = map(np.uint8, np.array(cmap(k*h)[:3])*255) pl_colorscale.append([k*h, 'rgb'+str((C[0], C[1], C[2]))]) return pl_colorscale back = go.Contour(x=xx, y=yy, z=Z, colorscale=matplotlib_to_plotly(plt.cm.Blues_r, len(Z)), showscale=False, line=dict(width=0) ) b1 = go.Scatter(x=X_train[:, 0], y=X_train[:, 1], name="training observations", mode='markers', marker=dict(color='white', size=7, line=dict(color='black', width=1)) ) b2 = go.Scatter(x=X_test[:, 0], y=X_test[:, 1], name="new regular observations", mode='markers', marker=dict(color='green', size=6, line=dict(color='black', width=1)) ) c = go.Scatter(x=X_outliers[:, 0], y=X_outliers[:, 1], name="new abnormal observations", mode='markers', marker=dict(color='red', size=6, line=dict(color='black', width=1)) ) layout = go.Layout(title="IsolationForest", hovermode='closest') data = [back, b1, b2, c] fig = go.Figure(data=data, layout=layout) py.iplot(fig) from IPython.display import display, HTML display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />')) display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">')) ! pip install git+https://github.com/plotly/publisher.git --upgrade import publisher publisher.publish( 'IsolationForest example.ipynb', 'scikit-learn/plot-isolation-forest/', 'IsolationForest | plotly', ' ', title = 'IsolationForest | plotly', name = 'IsolationForest', has_thumbnail='true', thumbnail='thumbnail/isolation.jpg', language='scikit-learn', page_type='example_index', display_as='ensemble_methods', order=3, ipynb= '~Diksha_Gabha/2996') ###Output _____no_output_____
notebooks/labs37_notebooks/FrankenBERT_Labs37.ipynb
###Markdown Imports & Installations ###Code !pip install pyforest !pip install plotnine !pip install transformers !pip install psycopg2-binary !pip uninstall -y tensorflow-datasets !pip install lit_nlp tfds-nightly transformers==4.1.1 # Automatic library importer (doesn't quite import everything yet) from pyforest import * # Expands Dataframe to view entire pandas dataframe pd.options.display.max_colwidth = 750 # For tracking the duration of executed code cells from time import time # To connect to Blue Witness Labeler's DB import psycopg2 # For visualizations from plotnine import * from plotnine.data import mpg import plotly.graph_objects as go from plotly.subplots import make_subplots # For BERT model import torch from torch.utils.data import TensorDataset, DataLoader, RandomSampler from transformers import BertTokenizer, BertForSequenceClassification, AdamW from transformers import get_linear_schedule_with_warmup from tensorflow.keras.preprocessing.sequence import pad_sequences ###Output _____no_output_____ ###Markdown Reading in our Tweets ###Code def get_df(db_url) -> pd.DataFrame: ''' Connects to our Blue Witness Data Labeler and retrieves manually labelled text before converting them all into a pandas dataframe. Parameters ---------- db_url: psycopg2 database Returns ------- df: pandas datafarme Contains thousands of text with appropriate police (non-)violence labels ''' conn = psycopg2.connect(db_url) curs = conn.cursor() curs.execute("SELECT * FROM training;") cols = [k[0] for k in curs.description] rows = curs.fetchall() df = pd.DataFrame(rows, columns=cols) curs.close() conn.close() return df # ALWAYS REMEMBER TO REMOVE THE PostgreSQL URL ASSIGNED TO THIS VARIABLE WHEN COMITTING TO OUR REPO db_url = "" data_labeler_df = get_df(db_url) data_labeler_df def rank_wrangle(): ''' Loads in both synthetic tweets generated from GPT-2 and authentic tweets scraped and manually labelled from Twitter. Combines both sets of tweets together into a single dataframe. Drops any null values and duplicates. rank2_syn.txt, rank3_syn.txt, and rank4_syn.txt can be found in notebooks/labs37_notebooks/synthetic_tweets Parameters ---------- None Returns ------- df: pandas dataframe Contains fully concatenated dataframe ''' # Supplying our dataframes with proper labels column_headers = ['tweets', 'labels'] # Reading in our three police force rank datasets synthetic_tweets_rank2 = pd.read_csv("/content/rank2_syn.txt", sep = '/', names=column_headers) synthetic_tweets_rank3 = pd.read_csv("/content/rank3_syn.txt", sep = '/', names=column_headers) synthetic_tweets_rank4 = pd.read_csv("/content/rank4_syn.txt", sep = '/', names=column_headers) # Concatenating all of our datasets into one compiled = pd.concat([data_labeler_df, synthetic_tweets_rank2, synthetic_tweets_rank3, synthetic_tweets_rank4]) # Dropping unnecessary column compiled.drop('id', axis=1, inplace=True) # Discarding generated duplicates from GPT-2 while keeping the original Tweets compiled.drop_duplicates(subset='tweets', keep='first', inplace=True) # Dropping any possible NaNs if compiled.isnull().values.any(): compiled.dropna(how='any', inplace=True) return compiled # Applying our function above to view the contents of our dataframe force_ranks = rank_wrangle() force_ranks ###Output _____no_output_____ ###Markdown Visualizations ###Code %matplotlib inline (ggplot(force_ranks) # defining what dataframe to use + aes(x='labels') # defining what variable/column to use + geom_bar(size=20) # defining the type of plot to use and its size + labs(title='Number of Tweets Reporting Police Violence per Force Rank', x='Force Rank', y='Number of Tweets') ) # Creating custom donut chart with Plotly labels = ['0 - No Police Presence', '5 - Lethal Force (Guns & Explosives)', '1 - Non-violent Police Presence', '3 - Blunt Force Trauma (Batons & Shields)', '4 - Chemical & Electric Weapons (Tasers & Pepper Spray)', '2 - Open Handed (Arm Holds & Pushing)'] values = force_ranks.labels.value_counts() bw_colors = ['rgb(138, 138, 144)', 'rgb(34, 53, 101)', 'rgb(37, 212, 247)', 'rgb(59, 88, 181)', 'rgb(56, 75, 126)', 'rgb(99, 133, 242)'] # Using 'pull' on Rank 5 to accentuate the frequency of the most excessive use of force by police # 'hole' determines the size of the donut chart fig = go.Figure(data=[go.Pie(labels=labels, values=values, pull=[0, 0.2, 0, 0, 0, 0], hole=.3, name='Blue Witness', marker_colors=bw_colors)]) # Displaying our donut chart fig.update(layout_title_text='Percentage of Tweets Reporting Police Violence per Force Rank') fig = go.Figure(fig) fig.show() ###Output _____no_output_____ ###Markdown Preparing Data for BERTSplitting dataframe into training and testing sets before converting to parquet for later reference/resource. ###Code def parquet_and_split(): ''' Splits our data into a format amicable to NLP modeling. Saves our original dataframe as well as the two split dataframes into parquet files for later reference/use. ----- Parameters ------ None Returns ------- df: pandas dataframes Contains two split dataframes ready to be fit to and tested against a model ''' # Splitting dataframe into training and testing sets for modeling # 20% of our data will be reserved for testing training, testing = train_test_split(force_ranks, test_size=0.2) # Sanity Check if force_ranks.shape[0] == training.shape[0] + testing.shape[0]: print("Sanity Check - Succesful!") else: print("Sanity Check - Unsuccessful!") # Converting dataframes to parquet format for later reference # Using parquet as our new dataset storage format as they cannot be edited like CSVs can. They are immutable. # For viewing in vscode, install the parquet-viewer extension: https://marketplace.visualstudio.com/items?itemName=dvirtz.parquet-viewer training.to_parquet('synthetic_training.parquet') testing.to_parquet('synthetic_testing.parquet') force_ranks.to_parquet('synthetic_complete.parquet') return training, testing training, testing = parquet_and_split() ###Output _____no_output_____ ###Markdown BERT Training our NLP Multi-Class Classification Model ###Code def bert_trainer(df, output_dir: str, epochs: int): start = time() max_len = 280 if torch.cuda.is_available(): print("CUDA Active") device = torch.device("cuda") else: print("CPU Active") device = torch.device("cpu") sentences = df["tweets"].values labels = df["labels"].values tokenizer = BertTokenizer.from_pretrained( 'bert-base-uncased', do_lower_case=True, ) inputs = [ tokenizer.encode(sent, add_special_tokens=True) for sent in sentences ] inputs_ids = pad_sequences( inputs, maxlen=max_len, dtype="long", value=0, truncating="post", padding="post", ) attention_masks = [ [int(token_id != 0) for token_id in sent] for sent in inputs_ids ] train_inputs = torch.tensor(inputs_ids) train_labels = torch.tensor(labels) train_masks = torch.tensor(attention_masks) batch_size = 32 train_data = TensorDataset(train_inputs, train_masks, train_labels) train_sampler = RandomSampler(train_data) train_dataloader = DataLoader( train_data, sampler=train_sampler, batch_size=batch_size, ) model = BertForSequenceClassification.from_pretrained( 'bert-base-uncased', num_labels=6, output_attentions=False, output_hidden_states=False, ) if torch.cuda.is_available(): model.cuda() optimizer = AdamW(model.parameters(), lr=2e-5, eps=1e-8) total_steps = len(train_dataloader) * epochs scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=0, num_training_steps=total_steps, ) loss_values = [] print('\nTraining...') for epoch_i in range(1, epochs + 1): print(f"\nEpoch: {epoch_i}") total_loss = 0 model.train() for step, batch in enumerate(train_dataloader): b_input_ids = batch[0].to(device) b_input_mask = batch[1].to(device) b_labels = batch[2].to(device) model.zero_grad() outputs = model( b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels, ) loss = outputs[0] total_loss += loss.item() loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) optimizer.step() scheduler.step() avg_train_loss = total_loss / len(train_dataloader) loss_values.append(avg_train_loss) print(f"Average Loss: {avg_train_loss}") if not os.path.exists(output_dir): os.makedirs(output_dir) print(f"\nSaving model to {output_dir}") model_to_save = model.module if hasattr(model, 'module') else model model_to_save.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) end = time() total_run_time_in_hours = (((end - start)/60)/60) rounded_total_run_time_in_hours = np.round(total_run_time_in_hours, decimals=2) print(f"Finished training in {rounded_total_run_time_in_hours} hours!") !nvidia-smi # If running on Colab, the best GPU to have in use is the NVIDIA Tesla P100 # Colab notebook may crash the first time this code cell is run. # Running this cell again after runtime restart shouldn't produce any more issues. bert_trainer(training, 'saved_model', epochs=50) ###Output CUDA Active ###Markdown Making Predictions ###Code class FrankenBert: """ Implements BertForSequenceClassification and BertTokenizer for binary classification from a saved model """ def __init__(self, path: str): """ If there's a GPU available, tell PyTorch to use the GPU. Loads model and tokenizer from saved model directory (path) """ if torch.cuda.is_available(): self.device = torch.device('cuda') else: self.device = torch.device('cpu') self.model = BertForSequenceClassification.from_pretrained(path) self.tokenizer = BertTokenizer.from_pretrained(path) self.model.to(self.device) def predict(self, text: str): """ Makes a binary classification prediction based on saved model """ inputs = self.tokenizer( text, padding=True, truncation=True, max_length=280, return_tensors='pt', ).to(self.device) output = self.model(**inputs) prediction = output[0].softmax(1) tensors = prediction.detach().cpu().numpy() result = np.argmax(tensors) confidence = tensors[0][result] return f"Rank: {result}, {100 * confidence:.2f}%" model = FrankenBert('saved_model') model.predict("Mickey Mouse is in the house") model.predict("Cops gave me a speeding ticket for walking too fast") model.predict("The cops showed up but didn't do anything") model.predict("Cops held that guy with a neck hold") model.predict("Cops punched me and pushed me to the ground") model.predict("Cops hit her with a baton") model.predict("Cops sprayed my mom with pepper spray") model.predict("Cops shot rubber bullets at the crowd") model.predict("Police used tear gas on a pedestrian for no reason") model.predict("Cops killed that woman") model.predict("Yesterday I saw a policeman hit a poor person behind my house. I wonder whats going on") model.predict("Man ran up to me and pepper sprayed me. I've called the cops, but they have not gotten themselves involved yet.") model.predict("People gathered to protest. Cops show up and are using batons to disperse the gathering.") model.predict("People gathered to protest. Cops show up and are using rubber bullets to disperse the gathering.") ###Output _____no_output_____ ###Markdown Saving Trained Model ###Code from google.colab import drive drive.mount('/content/gdrive') #path that contains folder you want to copy %cd /content/gdrive/MyDrive/ColabNotebooks/Labs/saved_model # copy local folder to folder on Google Drive %cp -av /content/saved_model saved_model ###Output /content/gdrive/MyDrive/ColabNotebooks/Labs/saved_model '/content/saved_model' -> 'saved_model' '/content/saved_model/config.json' -> 'saved_model/config.json' '/content/saved_model/pytorch_model.bin' -> 'saved_model/pytorch_model.bin' '/content/saved_model/tokenizer_config.json' -> 'saved_model/tokenizer_config.json' '/content/saved_model/special_tokens_map.json' -> 'saved_model/special_tokens_map.json' '/content/saved_model/vocab.txt' -> 'saved_model/vocab.txt'
ai_etc/III_MachineLearningClassification.ipynb
###Markdown III. Machine Learning ClassificationNow that we have a sense of what our data loooks like, we're ready to build our classifier. We'll do this in three steps:* 1. **Data Cleanup**: We'll take a few extra steps to clean up our data and prepare it for our classifiers* 2. **Training**: We'll train several machine learning models on our input data* 3. **Evaluation/Analysis**: We'll look at the results of our classifiers and analyze them ###Code df = pd.read_csv('diabetes.csv') df.head() def prepare_dataset(df): """ Separates the dataset into X and y """ X = df.loc[:, df.columns != 'Outcome'] y = df.Outcome return X, y X, y = prepare_dataset(df) ###Output _____no_output_____ ###Markdown Data CleanupNo dataset is perfect. When you look at some of our features, you might notice that not everything makes sense. For example, if you look at the minimum of some of these columns, you notice that some patients have a BMI and blood pressure of 0. Does that sound right?Chances are these are **missing values**: those patients don't really have a BMI of 0, but maybe the researchers didn't collect those patient's BMI and so just put 0 in as a subsitute. You might also see these as "NaN", meaning "not a number", but in this case they were assigned a value of 0. Questions to discuss- Why might these values be missing?- Does every column with a "0" mean that that's a missing value?- What are some potential problems of building a classifier with missing values?- What should we do about them? ###Code X.describe() ###Output _____no_output_____ ###Markdown There are multiple ways to handle missing values. To keep things simple, we'll simply replace those "0" values with the average of that column, which is a rough estimate of what we might expect that patient to have. This is called [data imputation](https://www.theanalysisfactor.com/seven-ways-to-make-up-data-common-methods-to-imputing-missing-data/).Not every column has missing values - for example, 0 pregnancies makes sense, and the minimum age is 21. We'll only impute the values in the columns for which a value of "0" doesn't make sense. ###Code columns_to_fill = ["Glucose", "BloodPressure", "SkinThickness", "Insulin", "BMI"] X[columns_to_fill] = X[columns_to_fill].replace(0, X[columns_to_fill].mean()) X.describe() ###Output _____no_output_____ ###Markdown The next step we'll take is **scaling** the data. Each of the columns of our table has very different ranges. Some algorithms expect each variable to be scaled within a normal range. https://scikit-learn.org/stable/modules/preprocessing.htmlpreprocessing-scalerhttps://en.wikipedia.org/wiki/Feature_scaling ###Code columns = X.columns from sklearn.preprocessing import StandardScaler X = StandardScaler().fit_transform(X) X = pd.DataFrame(data=X, columns=columns) X.describe() _ = X.hist(figsize=(10,8), grid=False) ###Output _____no_output_____ ###Markdown TrainingWe're now finally ready to train our models! Test-Train SplitWe'll also split up our dataset into a *train* and *test* set. Our ultimate goal is to be able to predict whether a set of brand-new patients has diabetes. These new patients have never been seen before by our classifier. A common practice in machine learning development is to take a portion of our data and leave those rows out during training, then we'll see how our classifiers perform on these rows.https://en.wikipedia.org/wiki/Training,_validation,_and_test_sets Questions to discuss- Why is it important to evaluate on testing data that is separate from our training data?- How should you select the data that you'll leave out for testing?- What are the costs of taking out data for testing? ie., making a **blind testset**- What proportions should be used for testing and training? Is there a guiding principle? ###Code from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix # Split up data from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test = train_test_split(X,y, test_size=0.2) X_train.shape X_test.shape ###Output _____no_output_____ ###Markdown Choosing an algorithmThere are many different types of algorithms that can be used for machine learning classification. Each one works a little differently and some work better for a specific problem. To find the best one, we'll pick a few different models and train each of them, then analyze and compare the results.https://en.wikipedia.org/wiki/Outline_of_machine_learningTypes_of_machine_learning_algorithmsWe won't go into the details about each classifier, but we'll try out each of these 6 classifiers. Feel free to look them up and see how each performs. ###Code from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC models = {"LR": LogisticRegression(), "NB": GaussianNB(), "KNN": KNeighborsClassifier(), "DT": DecisionTreeClassifier(), "RFC": RandomForestClassifier(), "SVM": SVC() } model_scores = [] model_names_scores = [] for name, model in models.items(): # Train the model print("Training {}".format(name)) model.fit(X_train, y_train) # Predict on the test set pred = model.predict(X_test) # Measure the accuracy accuracy = accuracy_score(y_test, pred) print("Accuracy: {}".format(accuracy)) print() # Append to this list to analyze later model_names_scores.append((name, accuracy)) ###Output _____no_output_____ ###Markdown Evaluation and AnalysisLet's see how our classifiers did on our test set. Let's start by sorting the scores by accuracy and plotting their performance. ###Code sorted_name_scores = sorted(model_names_scores, key=lambda x:x[1], reverse=True) sorted_names, sorted_scores = zip(*sorted_name_scores) x_plot = range(len(sorted_names)) fig, ax = plt.subplots() ax.bar(x_plot, sorted_scores) ax.set_ylim(0.5, 1.0) ax.set_xticks(x_plot) _ = ax.set_xticklabels(sorted_names) ###Output _____no_output_____ ###Markdown Closer analysisNow, let's take the best-performing model and look at more details. While training, we only looked at the **accuracy** of the classifier. But an accuracy score is sometimes insufficient. **Question**: Can you think of some reasons why?To supplement this, we'll look at three more metrics: **precision**, **recall**, and **f1-score**. Here is a blog post that explains the difference between these metrics: https://towardsdatascience.com/accuracy-precision-recall-or-f1-331fb37c5cb9In summary, here's what each metric tells us:- **Precision**: If our classifier says a patient has diabetes, how likely is it that our classifier is correct?- **Recall**: If our classifier is given a patient with diabetes, how likely is it that our classifier will correctly predict that?- **F1**: This is a balanced average of the two Now, analyze your resultsLook at the graph and take name of the highest-performing classifier (the first label on the x-axis). Replace the value of `best_clf_name` in the cell below. We'll then look at more detailed metrics for this model. ###Code # Look at the best score best_clf_name = sorted_names[0] best_clf = models[best_clf_name] print('Best Model: {0}'.format(best_clf)) best_clf.fit(X_train, y_train) # Retrain pred = best_clf.predict(X_test) print(classification_report(y_test, pred)) ###Output _____no_output_____ ###Markdown Questions to discuss- Compare your results to the results of others in your group. Did you get the same results, or are they different?- What kind of factors can cause you to get different results from someone else?- If you want, run this part of the notebook, starting at the cell that says "Split up Data". Try changing the parameter `test_size` (currently 0.2). Does doing this change your results? Confusion MatrixLet's now plot a **confusion matrix** to see what kinds of mistakes our classifier is making. A confusion matrix plots the **truth** on the y axis (whether a patient had diabetes or didn't) against the **predictions** on the x axis (whether or not our classifier predicted diabetes). ###Code conf = confusion_matrix(y_test,pred) label = ["No Diabetes","Diabetes"] ax = sns.heatmap(conf, annot=True, xticklabels=label, yticklabels=label) ax.set_ylabel('Truth', fontdict={'size': 20}) _ = ax.set_xlabel('Prediction', fontdict={'size': 20}) ###Output _____no_output_____
projects/.ipynb_checkpoints/SampleMLTASKQUESTION-checkpoint.ipynb
###Markdown Procedures for the below work1. Create the file for Train and Test Data from the data given2. Insert Sales list using for loop for each record according to the conditons if stock = demand ....sales = damand if stock > demand ....sales = demand if demand > stock ....sales = stock3. Calculate profit using for loop according to profit = sales - stock....we assume remaining stock went bad 4. Using scipy model ...get the model y = ax + c for our data of profit and demand5. Draw a regression plot for the data6. Using linear_model to test again our regression if it matches7. prepare data for demand to be used to predict profit8. Predict the profit values and apppend them to the data9. Join the train and test data sets while fully complted9. Create a sqlite table and dump the data ###Code dir(sqlalchemy) #create a dictionary with our train data train = {"period_in_weeks":[1 ,2,3,4, 5, 6,7] , "Demand":[i*750 for i in [1500 , 2000 ,3500 ,5000 , 6000 , 6550 , 8000]]\ , "Stocks":[i*600 for i in [1500 , 2500 , 3000 , 4500 , 6500 , 7000 , 7550]]} train #convert dictionary to a dataframe train = pd.DataFrame(train) #check the size of records size = train.shape[0] train #convert the stocks and demand data into list #we assume the demanded unit is according to the current market price of 750 #and stock is amount at 650 per unit Demand =list(train["Demand"]) Stocks =list(train["Stocks"]) #create a sales list Sales = [] #function to update sales def addSales(data): for i in range(data.shape[0]): if Stocks[i]/600 == Demand[i]/750: Sales.append(Stocks[i]) elif Stocks[i]/600 < Demand[i]/750: Sales.append(Stocks[i]) elif Stocks[i]/600 > Demand[i]/750: Sales.append(Demand[i]) # print("yess") addSales(train) #create a column Sales and add the items in sales list train["Sales"] = Sales train #calculating profit # profit = (sales -stocks) #we assume that leftover goes bad #profit list Profit = [] #function to get profits def getProfit(data): for i in range(data.shape[0]): profit =(Sales[i]) - (Stocks[i]) Profit.append(profit) getProfit(train) #create the column Profit and populate it with data train["Profit"] = Profit # train["Stocks"] = list((train["Stocks"])) train test = {"period_in_weeks":[8,9,10,11] , "Demand":[i*750 for i in[8750 , 9000 , 9500 , 10000]]} test = pd.DataFrame(test) test # lets join the two tables df = pd.concat([train ,test] , axis = 0 , sort = False) df #checking the correlation bettween thedata posted df.corr() #from here we are able to see that #the data has all high correration that is positive except for profit which #shows verry little relationship with the rest #checking data description df.describe() train = df[:size] test = df[size:] from scipy import stats #lets plot regression plot # here a means coefficient of dependent variable # b means intercept # c means r # d means p values # e means standard error a , b, c , d , e = stats.linregress(train["Profit"] , train["Demand"]) print("y = {}x + {}".format(a , b)) print("\n\n Error is : {}".format(e)) sns.regplot(x=train["Profit"] , y=train["Demand"] , data = df) plt.show() #start making linear models from sklearn.linear_model import LinearRegression #prepare the datas to be used fro training X_train = train["Profit"].values[: , np.newaxis] Y_train = train["Demand"].values #create an instance for the model lm = LinearRegression() #fit your data and train the model lm.fit(X_train , Y_train) print("Our model is {}x + {}".format(lm.coef_ , lm.intercept_)) #prepare testing data Y_test = test["Demand"].values[: , np.newaxis] #predict the profit data prediction = lm.predict(Y_test) print(prediction) test["Profit"] = prediction test #lets make assumption that assume that they sales were same as Demand #since demand is directly related to sales test["Sales"] = test["Demand"] # sales + profit = stock test["Stocks"] = test["Sales"] + test["Profit"] test #join the two tables data = pd.concat([train ,test] , axis = 0 , sort = False ) data from sqlalchemy import create_engine engine = create_engine('sqlite:///:memory:') data.to_sql("Dsc_Table" , engine) #check database content read = pd.read_sql_query('SELECT * FROM Dsc_Table' , engine) print(read) ###Output index period_in_weeks Demand Stocks Sales Profit 0 0 1 1125000 9.000000e+05 900000.0 0.000000e+00 1 1 2 1500000 1.500000e+06 1500000.0 0.000000e+00 2 2 3 2625000 1.800000e+06 1800000.0 0.000000e+00 3 3 4 3750000 2.700000e+06 2700000.0 0.000000e+00 4 4 5 4500000 3.900000e+06 4500000.0 6.000000e+05 5 5 6 4912500 4.200000e+06 4912500.0 7.125000e+05 6 6 7 6000000 4.530000e+06 4530000.0 0.000000e+00 7 0 8 6562500 2.669423e+07 6562500.0 2.013173e+07 8 1 9 6750000 2.737127e+07 6750000.0 2.062127e+07 9 2 10 7125000 2.872534e+07 7125000.0 2.160034e+07 10 3 11 7500000 3.007941e+07 7500000.0 2.257941e+07
_posts/scikit/Iso-probability-lines-for-gpc/Iso-Probability Iines for Gaussian Processes Classification .ipynb
###Markdown A two-dimensional classification example showing iso-probability lines for the predicted probabilities. New to Plotly?Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/).You can set up Plotly to work in [online](https://plot.ly/python/getting-started/initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/start-plotting-online).We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started! Version ###Code import sklearn sklearn.__version__ ###Output _____no_output_____ ###Markdown Imports This tutorial imports [GaussianProcessClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.GaussianProcessClassifier.htmlsklearn.gaussian_process.GaussianProcessClassifier) and [DotProduct](http://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.kernels.DotProduct.htmlsklearn.gaussian_process.kernels.DotProduct). ###Code import plotly.plotly as py import plotly.graph_objs as go import numpy as np from matplotlib import pyplot as plt from matplotlib import cm from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.gaussian_process.kernels import DotProduct, ConstantKernel as C ###Output _____no_output_____ ###Markdown Calculations ###Code # A few constants lim = 8 def g(x): """The function to predict (classification will then consist in predicting whether g(x) <= 0 or not)""" return 5. - x[:, 1] - .5 * x[:, 0] ** 2. # Design of experiments X = np.array([[-4.61611719, -6.00099547], [4.10469096, 5.32782448], [0.00000000, -0.50000000], [-6.17289014, -4.6984743], [1.3109306, -6.93271427], [-5.03823144, 3.10584743], [-2.87600388, 6.74310541], [5.21301203, 4.26386883]]) # Observations y = np.array(g(X) > 0, dtype=int) # Instanciate and fit Gaussian Process Model kernel = C(0.1, (1e-5, np.inf)) * DotProduct(sigma_0=0.1) ** 2 gp = GaussianProcessClassifier(kernel=kernel) gp.fit(X, y) print("Learned kernel: %s " % gp.kernel_) # Evaluate real function and the predicted probability res = 50 x_ = np.linspace(- lim, lim, res) y_ = np.linspace(- lim, lim, res) x1, x2 = np.meshgrid(x_, y_) xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T y_true = g(xx) y_prob = gp.predict_proba(xx)[:, 1] y_true = y_true.reshape((res, res)) y_prob = y_prob.reshape((res, res)) ###Output Learned kernel: 0.0256**2 * DotProduct(sigma_0=5.72) ** 2 ###Markdown Plot the probabilistic classification iso-values ###Code def matplotlib_to_plotly(cmap, pl_entries): h = 1.0/(pl_entries-1) pl_colorscale = [] for k in range(pl_entries): C = map(np.uint8, np.array(cmap(k*h)[:3])*255) pl_colorscale.append([k*h, 'rgb'+str((C[0], C[1], C[2]))]) return pl_colorscale cmap = matplotlib_to_plotly(cm.gray_r, 20) cax = go.Heatmap(x=x_, y=y_, z=y_prob, colorscale=cmap, ) trace1 = go.Scatter(x=X[y <= 0, 0], y=X[y <= 0, 1], mode='markers', marker=dict(color='red', size=10), showlegend=False) trace2 = go.Scatter(x=X[y > 0, 0], y=X[y > 0, 1], mode='markers', marker=dict(color='blue', size=10), showlegend=False) cs1 = go.Contour(x=x_, y=y_[:: -1], z=y_true, ncontours=2, contours=dict(coloring='lines', ), line=dict(width=1, dash='dashdot'), colorscale=[[0, 'black'], [1, 'white']], showscale=False) cs2 = go.Contour(x=x_, y=y_[:: -1], z=y_prob, ncontours=2, contours=dict(coloring='lines', end=0.667, start=0.666, size=0.01), line=dict(width=1), colorscale=[[0, 'blue'], [1, 'white']], showscale=False) cs3 = go.Contour(x=x_, y=y_, z=y_prob[:: -1], contours=dict(coloring='lines', end=0.51, start=0.5, size=0.1), line=dict(width=1, dash='dash'), colorscale=[[0, 'black'], [1, 'white']], showscale=False) cs4 = go.Contour(x=x_, y=y_, z=y_prob[:: -1], contours=dict(coloring='lines', end=0.335, start=0.334, size=0.1), line=dict(width=1), colorscale=[[0, 'red'], [1, 'white']], showscale=False) layout = go.Layout(yaxis=dict(autorange='reversed', title='x<sub>2</sub>'), xaxis=dict(title='x<sub>1</sub>'), hovermode='closest', annotations=[dict( x=2, y=4.5, xref='x', yref='y', text='0.666', showarrow=False, font=dict( family='Courier New, monospace', size=12, color='blue')), dict( x=2, y=0.9, xref='x', yref='y', text='0.5', showarrow=False, font=dict( family='Courier New, monospace', size=12, color='black')), dict( x=2, y=-2.2, xref='x', yref='y', text='0.334', showarrow=False, font=dict( family='Courier New, monospace', size=12, color='red')), ]) fig = go.Figure(data=[cax, cs1, cs2, cs3, cs4, trace1, trace2], layout=layout) py.iplot(fig) ###Output _____no_output_____ ###Markdown License Author: Vincent Dubourg Adapted to GaussianProcessClassifier: Jan Hendrik Metzen License: BSD 3 clause ###Code from IPython.display import display, HTML display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />')) display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">')) ! pip install git+https://github.com/plotly/publisher.git --upgrade import publisher publisher.publish( 'Iso-Probability Iines for Gaussian Processes Classification .ipynb', 'scikit-learn/plot-gpc-isoprobability/', 'Iso-Probability Iines for Gaussian Processes Classification | plotly', ' ', title = 'Iso-Probability Iines for Gaussian Processes Classification | plotly', name = 'Iso-Probability Iines for Gaussian Processes Classification ', has_thumbnail='true', thumbnail='thumbnail/isoprobability.jpg', language='scikit-learn', page_type='example_index', display_as='gaussian-process', order=6, ipynb= '~Diksha_Gabha/3132') ###Output _____no_output_____
notebooks/13_range_data_model_causal_vs_non_causal.ipynb
###Markdown Comparison of causal and non-causal versions of model 1 on every second cell datasetThis notebook is used to investigate a causal and non-causal version of model 1 with the every second range cell input.As there are only two experiments, the number of epochs have also been increased. Notebook setup ###Code # Plot graphs inline %matplotlib inline ###Output _____no_output_____ ###Markdown The following cell is needed for compatibility when using both CoLab and Local Jupyter notebook. It sets the appropriate file path for the data. ###Code import os path = os.getcwd() if path == '/content': from google.colab import drive drive.mount('/content/gdrive') BASE_PATH = '/content/gdrive/My Drive/Level-4-Project/' os.chdir('gdrive/My Drive/Level-4-Project/') elif path == 'D:\\Google Drive\\Level-4-Project\\notebooks': BASE_PATH = "D:/Google Drive/Level-4-Project/" elif path == "/export/home/2192793m": BASE_PATH = "/export/home/2192793m/Level-4-Project/" DATA_PATH_MTI = BASE_PATH + 'data/processed/range_FFT/3/MTI_applied/' # not used DATA_PATH_NO_MTI = BASE_PATH + 'data/processed/range_FFT/3/MTI_not_applied/' RESULTS_PATH = BASE_PATH + 'results/range_data_model_causal_vs_non_causal/' if not os.path.exists(RESULTS_PATH): os.makedirs(RESULTS_PATH) from keras import backend as K import pandas as pd import numpy as np import matplotlib.pyplot as plt import pickle from keras.callbacks import CSVLogger from keras.utils import Sequence, to_categorical from keras.layers import Input, Conv1D, Multiply, Add, Activation, AveragePooling1D, Flatten, Dense from keras.models import load_model, Model from keras.callbacks import ModelCheckpoint import sys OVERWRITE_RESULTS = False OVERWRITE_GRAPHS = False ###Output _____no_output_____ ###Markdown Data Setup ###Code # Load in data dictionary. # This does not load in any actual data, # just the dictionary with the names of the files and their associated labels with open(DATA_PATH_NO_MTI + "index.pkl", "rb") as file: data = pickle.load(file) # Remove user C as this user is reserved for the test set try: del data["C"] except KeyError: print ("Key 'C' not found") def convert_label_to_int(label): """ Convert each label to an integer :param label: action label to convert :return: integer representation of the action """ if label == "walking": return 0 if label == "pushing": return 1 if label == "sitting": return 2 if label == "pulling": return 3 if label == "circling": return 4 if label == "clapping": return 5 if label == "bending": return 6 labels = {} partition = {'train': [], 'validation': []} # contains list of training and validation ID's validation_user = "B" # use user B for validation for user_letter, actions in data.items(): for action, results in actions.items(): for result in results: for row in result: if user_letter == validation_user: partition["validation"].append(row) labels[row] = convert_label_to_int(action) else: partition["train"].append(row) labels[row] = convert_label_to_int(action) target_names = ["walking", "pushing", "sitting", "pulling", "circling", "clapping", "bending"] nb_classes = len(target_names) ###Output _____no_output_____ ###Markdown DataGenerator ###Code class DataGenerator(Sequence): """ Based on code from https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly Keras data generator """ def __init__(self, list_IDs, labels, batch_size=32, dim=(3000), n_classes=7, shuffle=False, data_directory='data/', bin_range=(0,60), take_average=False, every_second_cell=False): """ Initialization :param list_IDs: IDs of files to train with :param labels: index to get associated label from file id :param batch_size: batch size :param dim: dimension of the input data :param n_classes: number of classes :param shuffle: shuffle data after each epoch toggle :param data_directory: path to the data :param bin_range: which range bins to use :param take_average: use the average of all cells toggle :param every_second_cell: use every second cell toggle """ self.dim = dim self.batch_size = batch_size self.labels = labels self.list_IDs = list_IDs self.n_classes = n_classes self.shuffle = shuffle self.data_directory = data_directory self.bin_range=bin_range self.take_average = take_average self.every_second_cell = every_second_cell self.indexes = None self.on_epoch_end() def __len__(self): """Denotes the number of batches per epoch""" return int(np.floor(len(self.list_IDs) / self.batch_size)) def __getitem__(self, index): """Generate one batch of data""" # Generate indexes of the batch indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size] # Find list of IDs list_IDs_temp = [self.list_IDs[k] for k in indexes] # Generate data X, y = self.__data_generation(list_IDs_temp) return X, y def on_epoch_end(self): """Updates indexes after each epoch""" self.indexes = np.arange(len(self.list_IDs)) if self.shuffle: np.random.shuffle(self.indexes) def __data_generation(self, list_IDs_temp): """Generates data containing batch_size samples""" # Initialization X = np.empty((self.batch_size, *self.dim)) y = np.empty((self.batch_size), dtype=int) # Generate data for i, ID in enumerate(list_IDs_temp): # Store sample if self.take_average: X[i,] = abs(np.average(np.load(self.data_directory + ID), axis=1)[:,np.newaxis]) elif self.every_second_cell: X[i,] = abs(np.load(self.data_directory + ID))[:,::2] else: X[i,] = abs(np.load(self.data_directory + ID))[:,self.bin_range[0]:self.bin_range[1]] # Store class y[i] = self.labels[ID] return X, to_categorical(y, num_classes=self.n_classes) ###Output _____no_output_____ ###Markdown Function to aid the exploration of models. ###Code def test_model(model, training_generator, validation_generator, epochs, csvlog_path, verbose=True, load_weights=False, weights_path=''): """ Train and test the model :param model: model to train and test :param training_generator: data generator for training the model :param validation_generator: data generator for validating the model :param epochs: number of epochs to train for :param csvlog_path: path to save results to :param verbose: verbose :param load_weights: load weights for the model from a checkpoint :param weights_path: path to trained model weights to load """ weights_path = MODEL_PATH + weights_path start_epoch = 0 if load_weights: model = load_model(weights_path) last_epoch = weights_path.split("-")[-3] start_epoch = int(last_epoch) model_path = csvlog_path.split(".")[-2] # remove the .csv if not load_weights: model.compile('adam', loss='categorical_crossentropy', metrics=['accuracy']) checkpoint = ModelCheckpoint(MODEL_PATH + model_path + "_epoch-{epoch:02d}-{val_acc:.2f}.hdf5", monitor='val_acc', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1) csv_logger = CSVLogger(RESULTS_PATH + csvlog_path, append=True) callbacks_list = [checkpoint, csv_logger] # Train model on dataset model.fit_generator(generator=training_generator, validation_data=validation_generator, use_multiprocessing=False, workers=1, epochs=epochs, callbacks=callbacks_list, initial_epoch=start_epoch, verbose=verbose) ###Output _____no_output_____ ###Markdown Visualize Results ###Code def visualize_results(csvlog_path, title, save=False, save_file_name=""): """ plot graph of training and validation results :param csvlog_path: path where results file is located :param title: title for the graph :param save: save the graph toggle :param save_file_name: name of file to save """ df = pd.read_csv(RESULTS_PATH + csvlog_path) epoch = df['epoch'] + 1 train = df["acc"] * 100 # convert to % val = df['val_acc'] * 100 # convert to % plt.figure() plt.plot(epoch, train, 'b--', marker='x', label='Training (Subjects A, D, E and F)') plt.plot(epoch, val, 'r-', marker='x', label='Validation (Subject B)') plt.xticks(epoch) plt.legend(loc='best') plt.xlabel('Epoch') plt.ylabel('Classification Accuracy (%)') plt.grid() plt.title(title) if save: plt.savefig(RESULTS_PATH + save_file_name, format='pdf') plt.show() def plot_loss(csvlog_path, title, save=False, save_file_name=""): """ plot graph of training and validation loss results :param csvlog_path: path where results file is located :param title: title for the graph :param save: save the graph toggle :param save_file_name: name of file to save """ df = pd.read_csv(RESULTS_PATH + csvlog_path) epoch = df['epoch'] +1 train = df["loss"] val = df['val_loss'] plt.figure() plt.plot(epoch, train, 'b--', marker='x', label='Training (Subjects A, D, E and F)') plt.plot(epoch, val, 'r-', marker='x', label='Validation (Subject B)') plt.xticks(epoch) plt.legend(loc='best') plt.xlabel('Epoch') plt.ylabel('Loss') plt.grid() plt.title(title) if save: plt.savefig(RESULTS_PATH + save_file_name, format='pdf') plt.show() ###Output _____no_output_____ ###Markdown Model 1: Wavenet model classification adaption from original paperKeras implementation of the WaveNet model based on implementations byhttps://github.com/basveeling/wavenet and https://github.com/mjpyeon/wavenet-classifierThis model has then been adapted to the classification task based on theinstructions from the paper "WAVENET: A GENERATIVE MODEL FOR RAW AUDIO" (https://arxiv.org/pdf/1609.03499.pdf)Specifically:"For this task we added a mean-pooling layer after the dilated convolutions that aggregated the activations to coarser frames spanning 10 milliseconds (160× downsampling). The pooling layer was followed by a few non-causal convolutions." ###Code class WaveNetClassifier: """ Keras implementation of the WaveNet model based on implementations by https://github.com/basveeling/wavenet and https://github.com/mjpyeon/wavenet-classifier This model has then been adapted to the classification task based on the instructions from the paper "WAVENET: A GENERATIVE MODEL FOR RAW AUDIO" (https://arxiv.org/pdf/1609.03499.pdf) """ def __init__(self, input_shape, output_shape, kernel_size=2, dilation_depth=9, nb_stacks=1, nb_filters=40, pool_size_1=80, pool_size_2=100, use_skip_connections=True, causal=True): """ Initialization :param input_shape: input shape of the data :param output_shape: number of classes :param kernel_size: kernel size for conv layers in stacks :param dilation_depth: number of dilated CNN layers per stack :param nb_stacks: number of stacks of dilated blocks :param nb_filters: number of filters for each conv layer :param pool_size_1: kernel size for first two conv layers after stacks :param pool_size_2: kernel size for third and fourth conv layers after stacks :param use_skip_connections: use skip connections toggle :param causal: use causal variant toggle """ self.activation = 'softmax' self.pool_size_1 = pool_size_1 self.pool_size_2 = pool_size_2 self.nb_stacks = nb_stacks self.kernel_size = kernel_size self.dilation_depth = dilation_depth self.nb_filters = nb_filters self.use_skip_connections = use_skip_connections self.input_shape = input_shape self.output_shape = output_shape if causal: self.padding = 'causal' else: self.padding = 'same' self.model = self.build_model() def residual_block(self, x, i, stack_nb): """ add a residual block :param x: current model :param i: dilation rate modifier :param stack_nb: stack number :return: model with residual block added, model for skip connection link """ original_x = x tanh_out = Conv1D(self.nb_filters, self.kernel_size, dilation_rate=2 ** i, padding=self.padding, name='dilated_conv_%d_tanh_s%d' % (2 ** i, stack_nb), activation='tanh')(x) sigm_out = Conv1D(self.nb_filters, self.kernel_size, dilation_rate=2 ** i, padding=self.padding, name='dilated_conv_%d_sigm_s%d' % (2 ** i, stack_nb), activation='sigmoid')(x) x = Multiply(name='gated_activation_%d_s%d' % (i, stack_nb))([tanh_out, sigm_out]) res_x = Conv1D(self.nb_filters, 1, padding='same')(x) skip_x = Conv1D(self.nb_filters, 1, padding='same')(x) res_x = Add()([original_x, res_x]) return res_x, skip_x def build_model(self): """ Build the model :return: keras model """ input_layer = Input(shape=self.input_shape, name='input_part') out = input_layer skip_connections = [] out = Conv1D(self.nb_filters, self.kernel_size, dilation_rate=1, padding=self.padding, name='initial_causal_conv' )(out) for stack_nb in range(self.nb_stacks): for i in range(0, self.dilation_depth + 1): out, skip_out = self.residual_block(out, i, stack_nb) skip_connections.append(skip_out) if self.use_skip_connections: out = Add()(skip_connections) out = Activation('relu')(out) # added a mean-pooling layer after the dilated convolutions that aggregated the activations to coarser frames # spanning 10 milliseconds (160× downsampling) # mean pooling layer adjust pool_size_1 to change downsampling out = AveragePooling1D(self.pool_size_1, padding='same', name='mean_pooling_layer_downsampling')(out) # few non-causal convolutions ''' *Update 15/02/2019* I have now realized that self.pool_size_1 and self.pool_size_2 actually represent kernel size here. Due to the length of time this notebook takes to run and as the values set worked this will not be rerun but has been updated in notebook 13 conducting the regularization investigation. ''' # out = Conv1D(self.nb_filters, self.pool_size_1, strides=2, padding='same', activation='relu')(out) out = Conv1D(self.nb_filters, self.pool_size_2, strides=2, padding='same', activation='relu')(out) out = Conv1D(self.output_shape, self.pool_size_2, strides=2, padding='same', activation='relu')(out) out = Conv1D(self.output_shape, self.pool_size_2, strides=2, padding='same', activation='relu')(out) out = Flatten()(out) out = Dense(512, activation='relu')(out) out = Dense(self.output_shape, activation='softmax')(out) return Model(input_layer, out) def get_model(self): return self.model def get_summary(self): self.model.summary() def get_receptive_field(self): """" Compute the receptive field of the model :return: receptive field """ k = self.kernel_size n = self.dilation_depth s = self.nb_stacks r_s = k + (2*(k-1)*((2**(n-1))-1)) # receptive field for one stack return (s*r_s) - (s-1) # total receptive field for 's' number of stacks ###Output _____no_output_____ ###Markdown Fixed Parameters ###Code # Try all bins to start with bin_range = (0, 63) data_shape = (3000, 32) n_filters = 64 activation = 'softmax' kernel_size = 2 pool_size_1 = 4 pool_size_2 = 8 batch_size = 16 epochs = 10 dilation_depth = 8 nb_stacks = 3 # Parameters for data generators params = {'dim': data_shape, 'batch_size': batch_size, 'n_classes': nb_classes, 'data_directory': DATA_PATH_NO_MTI, 'bin_range': bin_range, 'every_second_cell': True} ###Output _____no_output_____ ###Markdown Causal ###Code wnc = WaveNetClassifier((data_shape), (nb_classes), kernel_size=kernel_size, dilation_depth=dilation_depth, nb_stacks=nb_stacks, nb_filters=n_filters, pool_size_1=pool_size_1, pool_size_2=pool_size_2) wnc.build_model() model = wnc.get_model() training_generator = DataGenerator(partition['train'], labels, **params, shuffle=True) validation_generator = DataGenerator(partition['validation'], labels, **params, shuffle=False) csvlog_path = "causal.csv" if OVERWRITE_RESULTS: test_model(model, training_generator, validation_generator, epochs, csvlog_path, verbose=True) save_graph_path = csvlog_path.split('.')[-2] + ".pdf" visualize_results(csvlog_path, "Causal Model", save=OVERWRITE_GRAPHS, save_file_name=save_graph_path) plot_loss(csvlog_path, "Causal Model Loss") K.clear_session() ###Output _____no_output_____ ###Markdown Non-Causal ###Code wnc = WaveNetClassifier((data_shape), (nb_classes), kernel_size=kernel_size, dilation_depth=dilation_depth, nb_stacks=nb_stacks, nb_filters=n_filters, pool_size_1=pool_size_1, pool_size_2=pool_size_2, causal=False) wnc.build_model() model = wnc.get_model() training_generator = DataGenerator(partition['train'], labels, **params, shuffle=True) validation_generator = DataGenerator(partition['validation'], labels, **params, shuffle=False) csvlog_path = "non_causal.csv" if OVERWRITE_RESULTS: test_model(model, training_generator, validation_generator, epochs, csvlog_path, verbose=True) save_graph_path = csvlog_path.split('.')[-2] + ".pdf" visualize_results(csvlog_path, "Non-Causal Model", save=OVERWRITE_GRAPHS, save_file_name=save_graph_path) plot_loss(csvlog_path, "Non-Causal Model Loss") K.clear_session() ###Output _____no_output_____ ###Markdown From the two graphs it is clear that the causal model outperforms the non-causal. It also shows that overfitting is a clear issue. Smaller Dilation Comparison ###Code class WaveNetClassifier: """ Keras implementation of the WaveNet model based on implementations by https://github.com/basveeling/wavenet and https://github.com/mjpyeon/wavenet-classifier This model has then been adapted to the classification task based on the instructions from the paper "WAVENET: A GENERATIVE MODEL FOR RAW AUDIO" (https://arxiv.org/pdf/1609.03499.pdf) """ def __init__(self, input_shape, output_shape, kernel_size=2, dilation_depth=9, nb_stacks=1, nb_filters=40, pool_size=80, kernel_size_2=4, num_dense_nodes=512, use_skip_connections=True, causal=True): """ Initialization :param input_shape: input shape of the data :param output_shape: number of classes :param kernel_size: kernel size for conv layers in stacks :param dilation_depth: number of dilated CNN layers per stack :param nb_stacks: number of stacks of dilated blocks :param nb_filters: number of filters for each conv layer :param pool_size: pooling size for average pooling layer :param kernel_size_2: kernel size for conv layers after stacks :param use_skip_connections: use skip connections toggle :param causal: use causal variant of model toggle """ self.activation = 'softmax' self.pool_size = pool_size self.nb_stacks = nb_stacks self.kernel_size = kernel_size # for dilated layers self.kernel_size_2 = kernel_size_2 # for normal conv layers at end self.dilation_depth = dilation_depth self.nb_filters = nb_filters self.num_dense_nodes = num_dense_nodes self.use_skip_connections = use_skip_connections self.input_shape = input_shape self.output_shape = output_shape if causal: self.padding = 'causal' else: self.padding = 'same' self.model = self.build_model() def residual_block(self, x, i, stack_nb): """ add a residual block :param x: current model :param i: dilation rate modifier :param stack_nb: stack number :return: model with residual block added, model for skip connection link """ original_x = x tanh_out = Conv1D(self.nb_filters, 2, dilation_rate=2 ** i, padding=self.padding, name='dilated_conv_%d_tanh_s%d' % (2 ** i, stack_nb), activation='tanh')(x) sigm_out = Conv1D(self.nb_filters, 2, dilation_rate=2 ** i, padding=self.padding, name='dilated_conv_%d_sigm_s%d' % (2 ** i, stack_nb), activation='sigmoid')(x) x = Multiply(name='gated_activation_%d_s%d' % (i, stack_nb))([tanh_out, sigm_out]) res_x = Conv1D(self.nb_filters, 1, padding='same')(x) skip_x = Conv1D(self.nb_filters, 1, padding='same')(x) res_x = Add()([original_x, res_x]) return res_x, skip_x def build_model(self): """ Build the model :return: keras model """ input_layer = Input(shape=self.input_shape, name='input_part') out = input_layer skip_connections = [] out = Conv1D(self.nb_filters, 2, dilation_rate=1, padding=self.padding, name='initial_causal_conv' )(out) for stack_nb in range(self.nb_stacks): for i in range(0, self.dilation_depth + 1): out, skip_out = self.residual_block(out, i, stack_nb) skip_connections.append(skip_out) if self.use_skip_connections: out = Add()(skip_connections) out = Activation('relu')(out) # added a mean-pooling layer after the dilated convolutions that aggregated the activations to coarser frames # spanning 10 milliseconds (160× downsampling) # mean pooling layer adjust pool_size to change downsampling out = AveragePooling1D(self.pool_size, padding='same', name='mean_pooling_layer_downsampling')(out) # few non-causal convolutions # In notebooks 11, 12 and 13 self.kernel_size_2 was incorrectly represented as pooling sizes. out = Conv1D(self.nb_filters, self.kernel_size_2, strides=2, padding='same', activation='relu')(out) out = Conv1D(self.nb_filters, self.kernel_size_2, strides=2, padding='same', activation='relu')(out) out = Conv1D(self.output_shape, self.kernel_size_2, strides=2, padding='same', activation='relu')(out) out = Conv1D(self.output_shape, self.kernel_size_2, strides=2, padding='same', activation='relu')(out) out = Flatten()(out) out = Dense(self.num_dense_nodes, activation='relu')(out) out = Dense(self.output_shape, activation='softmax')(out) return Model(input_layer, out) def get_model(self): return self.model def get_summary(self): self.model.summary() def get_receptive_field(self): """ Compute the receptive field of the model :return: receptive field """ k = self.kernel_size n = self.dilation_depth s = self.nb_stacks r_s = k + (2*(k-1)*((2**(n-1))-1)) # receptive field for one stack return (s*r_s) - (s-1) # total receptive field for 's' number of stacks dilation_depth = 8 nb_stacks = 1 pool_size = 4 kernel_size_2 = 8 num_dense_nodes = 512 # same as before ###Output _____no_output_____ ###Markdown Causal v2 ###Code wnc = WaveNetClassifier((data_shape), (nb_classes), kernel_size=kernel_size, dilation_depth=dilation_depth, nb_stacks=nb_stacks, nb_filters=n_filters, pool_size=pool_size, kernel_size_2=kernel_size_2, num_dense_nodes=num_dense_nodes) wnc.build_model() model = wnc.get_model() training_generator = DataGenerator(partition['train'], labels, **params, shuffle=True) validation_generator = DataGenerator(partition['validation'], labels, **params, shuffle=False) csvlog_path = "causal_v2.csv" if OVERWRITE_RESULTS: test_model(model, training_generator, validation_generator, epochs, csvlog_path, verbose=True) save_graph_path = csvlog_path.split('.')[-2] + ".pdf" visualize_results(csvlog_path, "Causal Model", save=OVERWRITE_GRAPHS, save_file_name=save_graph_path) K.clear_session() ###Output _____no_output_____ ###Markdown Non-Causal v2 ###Code wnc = WaveNetClassifier((data_shape), (nb_classes), kernel_size=kernel_size, dilation_depth=dilation_depth, nb_stacks=nb_stacks, nb_filters=n_filters, pool_size=pool_size, kernel_size_2=kernel_size_2, num_dense_nodes=num_dense_nodes, causal=False) wnc.build_model() model = wnc.get_model() training_generator = DataGenerator(partition['train'], labels, **params, shuffle=True) validation_generator = DataGenerator(partition['validation'], labels, **params, shuffle=False) csvlog_path = "non_causal_v2.csv" if OVERWRITE_RESULTS: test_model(model, training_generator, validation_generator, epochs, csvlog_path, verbose=True) save_graph_path = csvlog_path.split('.')[-2] + ".pdf" visualize_results(csvlog_path, "Non-Causal Model", save=OVERWRITE_GRAPHS, save_file_name=save_graph_path) ###Output _____no_output_____ ###Markdown As shown, the causal model appears to still outperform the non-causal. Both versions appear to suffer signifcantly from overfitting for later epochs. This is addressed by regularization techniques investigated in notebook 14. Combined Graphs ###Code causal = pd.read_csv(RESULTS_PATH + "causal.csv") non_causal = pd.read_csv(RESULTS_PATH + "non_causal.csv") causal_v2 = pd.read_csv(RESULTS_PATH + "causal_v2.csv") non_causal_v2 = pd.read_csv(RESULTS_PATH + "non_causal_v2.csv") epochs = causal['epoch'] + 1 plt.plot(epochs, causal_v2['acc'] * 100, 'r', label="Causal: stacks 1, blocks 8", marker='x') plt.plot(epochs, causal['acc'] * 100, 'c', label="Causal: stacks 3, blocks 8", marker='x') plt.plot(epochs, non_causal_v2['acc'] * 100, 'g--', label="Non-Causal: stacks 1, blocks 8", marker='x') plt.plot(epochs, non_causal['acc'] * 100, 'b--', label="Non-Causal: stacks 3, blocks 8", marker='x') plt.xticks(epochs) plt.ylim((0, 100)) plt.xlabel("Epoch") plt.ylabel("Classification Accuracy (%)") plt.title("Training Accuracy Comparison") plt.grid(True) plt.legend(loc=4) plt.tight_layout() if OVERWRITE_GRAPHS: plt.savefig(RESULTS_PATH + 'train_acc.pdf', format='pdf') epochs = causal['epoch'] + 1 plt.plot(epochs, causal_v2['val_acc'] * 100, 'r', label="Causal: stacks 1, blocks 8", marker='x') plt.plot(epochs, causal['val_acc'] * 100, 'c', label="Causal: stacks 3, blocks 8", marker='x') plt.plot(epochs, non_causal_v2['val_acc'] * 100, 'g--', label="Non-Causal: stacks 1, blocks 8", marker='x') plt.plot(epochs, non_causal['val_acc'] * 100, 'b--', label="Non-Causal: stacks 3, blocks 8", marker='x') plt.xticks(epochs) plt.ylim((0, 100)) plt.xlabel("Epoch") plt.ylabel("Classification Accuracy (%)") plt.title("Validation Accuracy Comparison") plt.grid(True) plt.legend() plt.tight_layout() if OVERWRITE_GRAPHS: plt.savefig(RESULTS_PATH + 'val_acc.pdf', format='pdf') ###Output _____no_output_____
3. 深度学习基础 - 练习.ipynb
###Markdown 练习1 : 使用 pytorch 编写一个线性回归学习模型 ###Code import torch import torch.nn as nn import torch.optim as optim from torch.nn import init from torch.utils.data import Dataset, DataLoader, TensorDataset import numpy as np true_w = 10 true_b = 4 features = torch.randn(1000, 1) labels = true_w * features + true_b labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size())) ###Output _____no_output_____ ###Markdown **pytorch 中的 Dataset TensorDataset 和 DataLoader**1. DatasetDataset 是一个包装类,用来将数据包装为 Dataset 类,然后传入 DataLoader 中,再使用 DataLoader 这个类来更加快捷的对数据进行操作2. DataLoaderDataLoader 提供的常用操作有:batch_size(每个batch的大小), shuffle(是否进行shuffle操作), num_workers(加载数据的时候使用几个子进程)3. TensorDataset使用 TensorDataset 来将数据包装成Dataset类。 TensorDataset(data_tensor, target_tensor) ###Code data_set = TensorDataset(features, labels) data_iter = DataLoader(data_set, batch_size = 128, shuffle = True) class MYLinearNet(nn.Module): def __init__(self, n_feature): super(MYLinearNet, self).__init__() self.linear = nn.Linear(n_feature, 1) def forward(self, x): y = self.linear(x) return y n_feature = 1 net = MYLinearNet(n_feature) print(net) init.normal_(net.linear.weight, mean=0, std=0.01) init.constant_(net.linear.bias, val=0) loss = nn.MSELoss() optimizer = optim.SGD(net.parameters(), lr=0.03) for epoch in range(10): for X, y in data_iter: output = net(X) l = loss(output, y) optimizer.zero_grad() l.backward() optimizer.step() print("epoch is %s, loss is %s" %(epoch, l.item())) print("weight true value is %s, train value is %s" % (true_w, net.linear.weight.item())) print("bias true value is %s, train value is %s" % (true_b, net.linear.bias.item())) ###Output weight true value is 10, train value is 9.949129104614258 bias true value is 4, train value is 3.9797616004943848
networking/ipaddress.ipynb
###Markdown The ipaddress module includes classes for working with IPv4 and IPv6 network addresses. The classes support validation, finding addresses and hosts on a network, and other common operations. Addresses The most basic object represents the network address itself. Pass a string, integer, or byte sequence to ip_address() to construct an address. The return value will be a IPv4Address or IPv6Address instance, depending on the type of address being used. ###Code import binascii import ipaddress ADDRESSES = [ '10.9.0.6', 'fdfd:87b5:b475:5e3e:b1bc:e121:a8eb:14aa', ] for ip in ADDRESSES: addr = ipaddress.ip_address(ip) print('{!r}'.format(addr)) print(' IP version:', addr.version) print(' is private:', addr.is_private) print(' packed form:', binascii.hexlify(addr.packed)) print(' integer:', int(addr)) print() ###Output IPv4Address('10.9.0.6') IP version: 4 is private: True packed form: b'0a090006' integer: 168361990 IPv6Address('fdfd:87b5:b475:5e3e:b1bc:e121:a8eb:14aa') IP version: 6 is private: True packed form: b'fdfd87b5b4755e3eb1bce121a8eb14aa' integer: 337611086560236126439725644408160982186 ###Markdown Both classes can provide various representations of the address for different purposes, as well as answer basic assertions such as whether the address is reserved for multicast communication or if it is on a private network. Networks A network is defined by a range of addresses. It is usually expressed with a base address and a mask indicating which portions of the address represent the network, and which portions are remaining to represent addresses on that network. The mask can be expressed explicitly, or using a prefix length value as in the example below. ###Code import ipaddress NETWORKS = [ '10.9.0.0/24', 'fdfd:87b5:b475:5e3e::/64', ] for n in NETWORKS: net = ipaddress.ip_network(n) print('{!r}'.format(net)) print(' is private:', net.is_private) print(' broadcast:', net.broadcast_address) print(' compressed:', net.compressed) print(' with netmask:', net.with_netmask) print(' with hostmask:', net.with_hostmask) print(' num addresses:', net.num_addresses) print() ###Output IPv4Network('10.9.0.0/24') is private: True broadcast: 10.9.0.255 compressed: 10.9.0.0/24 with netmask: 10.9.0.0/255.255.255.0 with hostmask: 10.9.0.0/0.0.0.255 num addresses: 256 IPv6Network('fdfd:87b5:b475:5e3e::/64') is private: True broadcast: fdfd:87b5:b475:5e3e:ffff:ffff:ffff:ffff compressed: fdfd:87b5:b475:5e3e::/64 with netmask: fdfd:87b5:b475:5e3e::/ffff:ffff:ffff:ffff:: with hostmask: fdfd:87b5:b475:5e3e::/::ffff:ffff:ffff:ffff num addresses: 18446744073709551616 ###Markdown As with addresses, there are two network classes for IPv4 and IPv6 networks. Each class provides properties or methods for accessing values associated with the network such as the broadcast address and the addresses on the network available for hosts to use. A network instance is iterable, and yields the addresses on the network. ###Code import ipaddress NETWORKS = [ '10.9.0.0/24', 'fdfd:87b5:b475:5e3e::/64', ] for n in NETWORKS: net = ipaddress.ip_network(n) print('{!r}'.format(net)) for i, ip in zip(range(3), net): print(ip) print() ###Output IPv4Network('10.9.0.0/24') 10.9.0.0 10.9.0.1 10.9.0.2 IPv6Network('fdfd:87b5:b475:5e3e::/64') fdfd:87b5:b475:5e3e:: fdfd:87b5:b475:5e3e::1 fdfd:87b5:b475:5e3e::2 ###Markdown Iterating over the network yields addresses, but not all of them are valid for hosts. For example, the base address of the network and the broadcast address are both included. To find the addresses that can be used by regular hosts on the network, use the hosts() method, which produces a generator. ###Code import ipaddress NETWORKS = [ '10.9.0.0/24', 'fdfd:87b5:b475:5e3e::/64', ] for n in NETWORKS: net = ipaddress.ip_network(n) print('{!r}'.format(net)) for i, ip in zip(range(3), net.hosts()): print(ip) print() ###Output IPv4Network('10.9.0.0/24') 10.9.0.1 10.9.0.2 10.9.0.3 IPv6Network('fdfd:87b5:b475:5e3e::/64') fdfd:87b5:b475:5e3e::1 fdfd:87b5:b475:5e3e::2 fdfd:87b5:b475:5e3e::3 ###Markdown In addition to the iterator protocol, networks support the in operator to determine if an address is part of a network. ###Code import ipaddress NETWORKS = [ ipaddress.ip_network('10.9.0.0/24'), ipaddress.ip_network('fdfd:87b5:b475:5e3e::/64'), ] ADDRESSES = [ ipaddress.ip_address('10.9.0.6'), ipaddress.ip_address('10.7.0.31'), ipaddress.ip_address( 'fdfd:87b5:b475:5e3e:b1bc:e121:a8eb:14aa' ), ipaddress.ip_address('fe80::3840:c439:b25e:63b0'), ] for ip in ADDRESSES: for net in NETWORKS: if ip in net: print('{}\nis on {}'.format(ip, net)) break else: print('{}\nis not on a known network'.format(ip)) print() ###Output 10.9.0.6 is on 10.9.0.0/24 10.7.0.31 is not on a known network fdfd:87b5:b475:5e3e:b1bc:e121:a8eb:14aa is on fdfd:87b5:b475:5e3e::/64 fe80::3840:c439:b25e:63b0 is not on a known network ###Markdown Interfaces A network interface represents a specific address on a network and can be represented by a host address and a network prefix or netmask. ###Code import ipaddress ADDRESSES = [ '10.9.0.6/24', 'fdfd:87b5:b475:5e3e:b1bc:e121:a8eb:14aa/64', ] for ip in ADDRESSES: iface = ipaddress.ip_interface(ip) print('{!r}'.format(iface)) print('network:\n ', iface.network) print('ip:\n ', iface.ip) print('IP with prefixlen:\n ', iface.with_prefixlen) print('netmask:\n ', iface.with_netmask) print('hostmask:\n ', iface.with_hostmask) print() ###Output IPv4Interface('10.9.0.6/24') network: 10.9.0.0/24 ip: 10.9.0.6 IP with prefixlen: 10.9.0.6/24 netmask: 10.9.0.6/255.255.255.0 hostmask: 10.9.0.6/0.0.0.255 IPv6Interface('fdfd:87b5:b475:5e3e:b1bc:e121:a8eb:14aa/64') network: fdfd:87b5:b475:5e3e::/64 ip: fdfd:87b5:b475:5e3e:b1bc:e121:a8eb:14aa IP with prefixlen: fdfd:87b5:b475:5e3e:b1bc:e121:a8eb:14aa/64 netmask: fdfd:87b5:b475:5e3e:b1bc:e121:a8eb:14aa/ffff:ffff:ffff:ffff:: hostmask: fdfd:87b5:b475:5e3e:b1bc:e121:a8eb:14aa/::ffff:ffff:ffff:ffff
Sampling and inferential statistics.ipynb
###Markdown **Sampling and inferential statistics** ###Code import numpy as np import pandas as pd import matplotlib.pyplot as plt import random %matplotlib inline pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) ###Output _____no_output_____ ###Markdown **What are population and sampling** ###Code import pandas as pd df = pd.read_excel("PopulationEstimates.xls",skiprows=2) dfTX = df[df["State"]=="TX"].tail(-1) dfTX.head() dfTX.tail() ###Output _____no_output_____ ###Markdown **The safer sampling Ways** Simple random sampling Total number of counties ###Code plt.figure(figsize=(10,6)) plt.rcParams.update({'font.size': 22}) plt.hist(dfTX["POP_ESTIMATE_2018"],bins=100) plt.title("Total number of counties: {}".format(len(dfTX["POP_ESTIMATE_2018"]))) plt.axvline(np.mean(dfTX["POP_ESTIMATE_2018"]),c="r",linestyle="--") plt.xlabel("Population") plt.ylabel("Count"); ###Output _____no_output_____ ###Markdown The average population of a county ###Code np.mean(dfTX["POP_ESTIMATE_2018"]) random.seed(2020) plt.figure(figsize=(10,6)) sample = random.sample(dfTX["POP_ESTIMATE_2018"].to_list(),25) plt.hist(sample,bins=100) plt.axvline(np.mean(sample),c="r") plt.title("Mean of sample population: {}".format(np.mean(sample))) plt.xlabel("Population") plt.ylabel("Count"); ###Output _____no_output_____ ###Markdown Distribution of sample mean ###Code numSample = 25 trials = 100 random.seed(2020) sampleMeans = [] for i in range(trials): sample = random.sample(dfTX["POP_ESTIMATE_2018"].to_list(),numSample) sampleMeans.append(np.mean(sample)) plt.figure(figsize=(10,8)) plt.hist(sampleMeans,bins=25) plt.title("Distribution of the {} sample means for sample size of {}".format(trials, numSample)) plt.gca().xaxis.set_tick_params(rotation=45) plt.xlabel("Sample Mean") plt.ylabel("Count"); ###Output _____no_output_____ ###Markdown Change of sample size ###Code numSamples = [25,100] colors = ["r","b"] trials = 1000 random.seed(2020) plt.figure(figsize=(10,8)) sampleMeans = [] for j in range(len(numSamples)): for i in range(trials): sample = random.sample(dfTX["POP_ESTIMATE_2018"].to_list(),numSamples[j]) sampleMeans.append(np.mean(sample)) plt.hist(sampleMeans,color=colors[j],alpha=0.5,bins=25,label="sample size: {}".format(numSamples[j]),density=True) plt.legend() plt.gca().xaxis.set_tick_params(rotation=45) plt.title("Distribution density of means of 1000 SRS, \nwith respect to sample sizes") plt.xlabel("Sample Mean") plt.ylabel("Density"); ###Output _____no_output_____ ###Markdown Change of trials ###Code numSample = 100 colors = ["r","b"] trials = [1000,5000] random.seed(2020) plt.figure(figsize=(10,8)) sampleMeans = [] for j in range(len(trials)): for i in range(trials[j]): sample = random.sample(dfTX["POP_ESTIMATE_2018"].to_list(),numSample) sampleMeans.append(np.mean(sample)) plt.hist(sampleMeans,color=colors[j],alpha=0.5,bins=25,label="trials: {}".format(trials[j]),density=True) plt.legend(); plt.title("Distribution density of means of 1000 SRS and 5000 SRS") plt.xlabel("Sample Mean") plt.ylabel("Density"); ###Output _____no_output_____ ###Markdown **Stratified random sampling** ###Code from collections import Counter Counter(dfTX["Rural-urban_Continuum Code_2013"]) random.seed(2020) sampleMeans = [] trial = 1 samples = [] for i in range(trial): for level in sorted(np.unique(dfTX["Rural-urban_Continuum Code_2013"])): samples += random.sample(dfTX[dfTX["Rural-urban_Continuum Code_2013"]==level]["POP_ESTIMATE_2018"].to_list(),4) sampleMeans.append(np.mean(samples)) sampleMeans random.seed(2020) sampleMeans = [] trial = 1000 samples = [] for i in range(trial): for level in sorted(np.unique(dfTX["Rural-urban_Continuum Code_2013"])): samples += random.sample(dfTX[dfTX["Rural-urban_Continuum Code_2013"]==level]["POP_ESTIMATE_2018"].to_list(),4) sampleMeans.append(np.mean(samples)) plt.figure(figsize=(10,8)) plt.hist(sampleMeans,bins=25); plt.title("Sample mean distribution, with stratified random sampling ") plt.gca().xaxis.set_tick_params(rotation=45) plt.xlabel("Sample Mean") plt.ylabel("Count"); ###Output _____no_output_____ ###Markdown check the mean of each group ###Code plt.figure(figsize=(10,8)) levels = [] codeMeans = [] for level in sorted(np.unique(dfTX["Rural-urban_Continuum Code_2013"])): codeMean = np.mean(dfTX[dfTX["Rural-urban_Continuum Code_2013"]==level]["POP_ESTIMATE_2018"]) levels.append(level) codeMeans.append(codeMean) plt.plot(levels,codeMeans,marker=10,markersize=20) plt.title("Urbanization level code versus mean population") plt.xlabel("Urbanization level code (2013)") plt.ylabel("Population mean"); ###Output _____no_output_____ ###Markdown Systematic random sampling ###Code random.seed(2020) idx = random.randint(0,10) populations = dfTX["POP_ESTIMATE_2018"].to_list() samples = [] samples.append(populations[idx]) while idx + 10 < len(populations): idx += 10 samples.append(populations[idx]) print(np.mean(samples)) ###Output 158799.64 ###Markdown **Sampling distribution of statistics** Theoretical mean and standard deviation ###Code print(np.mean([i for i in range(1,11)])) print(np.sqrt(np.mean([(i-5.5)**2 for i in range(1,11)]))) ###Output 5.5 2.8722813232690143 ###Markdown **Sampling distribution of the sample mean** ###Code trials = 100 sampleSize = 4 random.seed(2020) sampleMeans = [] candidates = [i for i in range(1,11)] plt.rcParams.update({'font.size': 18}) for i in range(trials): sampleMean = np.mean([random.choice(candidates) for _ in range(sampleSize)]) sampleMeans.append(sampleMean) plt.figure(figsize=(10,6)) plt.hist(sampleMeans, bins=25); plt.axvline(5.5,c="r", linestyle="--") plt.title("Sample mean distribution, trial: {}, sample size: {}".format(trials, sampleSize)) plt.xlabel("Sample mean") plt.ylabel("Count"); np.mean(sampleMeans) ###Output _____no_output_____ ###Markdown **Increase the number of samples** ###Code def obtainSampleMeans(trials = 100, sampleSize = 4): sampleMeans = [] candidates = [i for i in range(1,11)] for i in range(trials): sampleMean = np.mean([random.choice(candidates) for _ in range(sampleSize)]) sampleMeans.append(sampleMean) return sampleMeans random.seed(2020) figure, axes = plt.subplots(4,1,figsize=(8,16)) figure.tight_layout() times = [4,16,64,100] for i in range(len(times)): sampleMeans = obtainSampleMeans(100*times[i],4) axes[i].hist(sampleMeans,bins=40,density= True); axes[i].axvline(5.5,c="r") axes[i].set_title("Sample mean distribution, trial: {}, sample size: {}".format(100*times[i], 4)); print("trials: {}, mean: {}, std: {}".format(times[i]*100, np.mean(sampleMeans),np.std(sampleMeans))) ###Output trials: 400, mean: 5.64, std: 1.4078218992472025 trials: 1600, mean: 5.53390625, std: 1.4563112832464553 trials: 6400, mean: 5.4877734375, std: 1.4309896472527093 trials: 10000, mean: 5.51135, std: 1.4457899838842432 ###Markdown **Increase sample sizes** ###Code random.seed(2020) sizes = [2**k for k in range(1,9)] figure, axes = plt.subplots(8,1,figsize=(8,4*8)) figure.tight_layout() for i in range(len(sizes)): sampleMeans = obtainSampleMeans(6400,sizes[i]) axes[i].hist(sampleMeans,bins=np.linspace(np.min(sampleMeans),np.max(sampleMeans),40),density= True); axes[i].axvline(5.5,c="r", linestyle="--") axes[i].set_title("Sample mean distribution, trial: {}, sample size: {}".format(6400, sizes[i])); axes[i].set_xlim(0,10) print("mean: {}, std: {}".format(np.mean(sampleMeans),np.std(sampleMeans))) ###Output mean: 5.521953125, std: 2.008562358455105 mean: 5.498046875, std: 1.4431059486582176 mean: 5.5065625, std: 1.026083602146409 mean: 5.4973046875, std: 0.722252039615 mean: 5.49095703125, std: 0.5078149154840389 mean: 5.4991259765625, std: 0.3672720550902535 mean: 5.503431396484375, std: 0.25349930497350254 mean: 5.501925659179688, std: 0.1785229416136657 ###Markdown **Standard error of the sample mean** ###Code random.seed(2020) sizes = [2**k for k in range(1,9)] ses = [] for i in range(len(sizes)): sampleMeans = obtainSampleMeans(6400,sizes[i]) ses.append(np.std(sampleMeans)) plt.figure(figsize=(8,6)) plt.plot(sizes,ses) plt.title("Standard Error of Sample Mean Versus Sample Size") plt.xlabel("Sample Size") plt.ylabel("Standard Error of Sample Mean"); ###Output _____no_output_____ ###Markdown **Transform the Scales** ###Code plt.figure(figsize=(8,6)) plt.plot(sizes,[1/ele**2 for ele in ses]) plt.title("Inverse of the Square of Standard Error \nversus Sample Size") plt.xlabel("Sample Size") plt.ylabel("Transformed Standard Error of Sample Mean"); ###Output _____no_output_____
emotion-classification/notebooks/VentGoEmotionsInstanceCountAnalysis.ipynb
###Markdown GoEmotions Instance Count AnalysisCount the avg. instances per class in GoEmotions to build a comparable dataset ###Code import pandas as pd vent = pd.read_parquet('../preprocessed/vent-robust.parquet') goemotions = pd.read_parquet('../preprocessed/GoEmotions.parquet') vents_per_emotion = vent.groupby('emotions_label').size().mean() vent_emotions = len(vent.emotions_label.unique()) exploded = goemotions.emotions.explode().to_frame() comments_per_emotion = exploded.groupby('emotions').size().mean() goem_emotions = len(exploded.emotions.unique()) equivalent_vents = comments_per_emotion * vent_emotions vent_sample_size = equivalent_vents / len(vent) print(f'There are {goem_emotions} emotions in GoEmotions with {comments_per_emotion} comments on average.') print(f'The equivalent dataset needs {equivalent_vents} vents, or {100 * vent_sample_size:.2f}% of Vent.') import sys sys.path.append('../src') from utils.split import sorted_splits splits = sorted_splits(vent.sample(frac=vent_sample_size), 'created_at', [0.8, 0.1, 0.1]) for df, split_name in zip(splits, ['train', 'valid', 'test']): df['split'] = split_name print(splits[0].created_at.min(), '-', splits[0].created_at.max(), '\n', splits[1].created_at.min(), '-', splits[1].created_at.max(), '\n', splits[2].created_at.min(), '-', splits[2].created_at.max()) full_robust_sample = pd.concat(splits, ignore_index=True) full_robust_sample.to_parquet('../preprocessed/vent-robust-equivalent-sample.parquet') full_splits = sorted_splits(vent.sample(frac=1.0), 'created_at', [0.8, 0.1, 0.1]) for df, split_name in zip(full_splits, ['train', 'valid', 'test']): df['split'] = split_name full_robust_sample = pd.concat(full_splits, ignore_index=True) full_robust_sample.to_parquet('../preprocessed/vent-robust-splits.parquet') ###Output _____no_output_____
NeuralMatrixFactorization.ipynb
###Markdown Neural Matrix FactorizationOn this notebook we will implement NeuMF Architecture [Described Here](https://arxiv.org/pdf/1708.05031.pdf). This architectures features a generalization of Matrix Factorization called Generalized Matrix Factorization, and an MLP for Neural Collaborative Filtering. Both are connected by an parallel architecture mechanism.For train the model, we will use Movie Lens 100k Dataset [available here](http://files.grouplens.org/datasets/movielens/ml-latest-small.zip).The final output of this architecture predicts the probability of the supplied item to be relevant to supplied user, this means the problems is structured as a Binary Classification Problem.. ###Code from IPython.display import clear_output !pip install --upgrade tensorflow-gpu clear_output() #Downloads and extract Dataset to local, wait for download, i dont want to put a progress bar here sorry #You can run this on google colab for get faster downloads speeds import os import zipfile import requests if(not os.path.exists("./Datasets/MoviLens.zip")): resp = requests.get("http://files.grouplens.org/datasets/movielens/ml-latest-small.zip") os.mkdir("./Datasets") with open("./Datasets/MoviLens.zip", "wb") as f: f.write(resp.content) with zipfile.ZipFile("./Datasets/MoviLens.zip", "r") as zip_ref: zip_ref.extractall("./Datasets") #Imports import tensorflow as tf import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import os clear_output() ###Output _____no_output_____ ###Markdown Exploratory Data AnalysisSimple exploration without too much fancy graphs ###Code #Loads Dataset, we only need ratings.csv and movies.csv files, we can drop timestamp and genres for now ratings_df = pd.read_csv("./Datasets/ml-latest-small/ratings.csv").drop(["timestamp"], axis=1) movies_df = pd.read_csv("./Datasets/ml-latest-small/movies.csv").drop(["genres"], axis=1) ml_df = ratings_df.merge(movies_df, on="movieId") ml_df = ml_df.reindex(columns=["userId", "movieId", "title", "rating"]) ml_df.head() #Check info about the Dataset ml_df.info() #Check for NaNs ml_df.isna().sum() #List unique values of each column n_users = ml_df["userId"].max() n_movies = ml_df["movieId"].nunique() print("Unique Users: " + str(n_users)) print("Unique Movies: " + str(n_movies)) #Top movies with more rating count (dont confuse with more views or more rating score, but are correlated) count = ml_df["title"].value_counts() count[:15] ###Output _____no_output_____ ###Markdown Preprocessing ###Code #Encode ratings, now will encode the probability of relevance of the item for the user #The negative sampling ratio under this setup is almost 2.5, but can be better have a setup of 5.0 ratio ml_df["relevance"] = (ml_df["rating"] >= 5.0).astype(dtype=float) #Set Ids as categorical data ml_df["userId"] = ml_df["userId"].astype("category").cat.codes.values ml_df["movieId"] = ml_df["movieId"].astype("category").cat.codes.values ml_df.head() #Shuffle Dataframe ml_df = ml_df.sample(frac=1.0).reset_index(drop=True) ml_df.head() #Test for negative sampling ratio, for this architecture a negative sampling ratio around 5.0 is very good #based on the cited paper experiments https://arxiv.org/pdf/1708.05031.pdf rel = (ml_df["relevance"] == 1.0).sum() non_rel = (ml_df["relevance"] != 1.0).sum() sns.countplot(x="relevance", data=ml_df) plt.show() print("Negative Sampling Ratio: " + str(non_rel / rel)) #Split dataset between train, eval and test full_df = ml_df eval_df = full_df.sample(frac=0.05) full_df = full_df.drop(eval_df.index) test_df = full_df.sample(frac=0.05) train_df = full_df.drop(test_df.index) #Store number of datapoints in each dataset train_count = train_df.shape[0] eval_count = eval_df.shape[0] test_count = test_df.shape[0] print("Train samples: " + str(train_count)) print("Evaluation samples: " + str(eval_count)) print("Test samples: " + str(test_count)) #Verify the distribution of the three splits plt.figure(figsize=(15, 3)) #Verify users distributions plt.subplot(1, 3, 1) sns.distplot(a=train_df["userId"], bins=100).set_title("Users Distribution") sns.distplot(a=eval_df["userId"], bins=100) sns.distplot(a=test_df["userId"], bins=100) #Verify movies distributions plt.subplot(1, 3, 2) sns.distplot(a=train_df["movieId"], bins=100).set_title("Movies Distribution") sns.distplot(a=eval_df["movieId"], bins=100) sns.distplot(a=test_df["movieId"], bins=100) #Verify Negative Sampling Ration Between the splits plt.subplot(1, 3, 3) train_nsr = (train_df["relevance"] == 0.0).sum() / (train_df["relevance"] == 1.0).sum() eval_nsr = (eval_df["relevance"] == 0.0).sum() / (eval_df["relevance"] == 1.0).sum() test_nsr = (test_df["relevance"] == 0.0).sum() / (test_df["relevance"] == 1.0).sum() sns.barplot(x=["train", "eval", "test"], y=[train_nsr, eval_nsr, test_nsr]).set_title("Negative Sampling Ratio") plt.show() ###Output _____no_output_____ ###Markdown Now that all the splits are balanced, let create the Tensorflow Datasets used for the training input pipeline ###Code #Create Datasets for train, evaluation and testing train_ds = tf.data.Dataset.from_tensor_slices(({"userId":train_df["userId"], "movieId":train_df["movieId"]}, train_df["relevance"].values.reshape([-1, 1]))).shuffle(4196) eval_ds = tf.data.Dataset.from_tensor_slices(({"userId":eval_df["userId"], "movieId":eval_df["movieId"]}, eval_df["relevance"].values.reshape([-1, 1]))).batch(eval_count) test_ds = tf.data.Dataset.from_tensor_slices(({"userId":test_df["userId"], "movieId":test_df["movieId"]}, test_df["relevance"].values.reshape([-1, 1]))).batch(test_count) ###Output _____no_output_____ ###Markdown Generalized Matrix FactorizationOn this section we will implement the GMF described [on this paper](https://arxiv.org/pdf/1708.05031.pdf). This generalization introduces non-linearities and change dot product by element-wise product. Check the paper on GMF section for more details. I will write this model with reusability on mind, so you will be able to pick this and use it on your implementations easily. I will create a repo for this implementation later. ###Code class DenseBlock(tf.keras.layers.Layer): """ Dense Block Layer Features Batch Normalization, Dropout and Dense layers, in that order Created for convenient building of Deep Feedforward Networks Args: units (int): Number of units on Dense Layer dropout (float): % of inputs to drop from Batch Normalization Layer l2 (float): Strenght of L2 regularization on Dense Layer """ def __init__(self, units, activation="relu", dropout=0.1, l2=0.001): super(DenseBlock, self).__init__() self.bn = tf.keras.layers.BatchNormalization() self.drop = tf.keras.layers.Dropout(dropout) self.dense = tf.keras.layers.Dense(units, activation, kernel_regularizer=tf.keras.regularizers.L1L2(l2=l2), kernel_constraint=tf.keras.constraints.UnitNorm()) def call(self, inputs): X = self.bn(inputs) X = self.drop(X) X = self.dense(X) return X class GeneralizedMatrixFactorizer(tf.keras.Model): """ Generalized Matrix Factorization Model Element-wise Product of Embeddings instead of Dot Product Non-Linear Activation Capability Args: alpha_dim (int): Number of rows of alpha Embedding Matrix, on movie recommend, can be the max number of users beta_dim (int): Number of rows of beta Embedding Matrix, on movie recommend, can be the max number of movies latent_dim (int): Dimension of the latent space representation of both Embedding Layers output_dim (int): Dimension of the model output output_activation (str): Activation function to be used on the last layer of the model, default="sigmoid" use_bias (bool): If allow the model to use bias for embeddings, default=False dropout (float): % of inputs to drop before output dense layer l2 (float): Strenght of L2 regularization on embeddings and output dense layer alpha_key (str): Key name in the call() input dictionary assigned to alpha, default="alpha" beta_key (str): Key name in the call() input dictionary assigned to beta, default="beta" """ def __init__(self, alpha_dim, beta_dim, latent_dim, output_dim, output_activation="sigmoid", use_bias=False, dropout=0.1, l2=1e-5, alpha_key="alpha", beta_key="beta"): super(GeneralizedMatrixFactorizer, self).__init__() self.use_bias = use_bias self.alpha_key = alpha_key self.beta_key = beta_key self.alpha_emb = tf.keras.layers.Embedding(alpha_dim, latent_dim, embeddings_regularizer=tf.keras.regularizers.L1L2(l2=l2)) self.beta_emb = tf.keras.layers.Embedding(beta_dim, latent_dim, embeddings_regularizer=tf.keras.regularizers.L1L2(l2=l2)) if(use_bias): self.alpha_bias = tf.keras.layers.Embedding(alpha_dim, 1) self.beta_bias = tf.keras.layers.Embedding(beta_dim, 1) self.flat = tf.keras.layers.Flatten() self.mul = tf.keras.layers.Multiply() self.out = DenseBlock(output_dim, output_activation, dropout, l2) def call(self, inputs): """ Model Call Args: inputs (dict): Python dictionary with two keys, one for alpha and one for beta Return: Model output using current weights """ alpha_emb = self.alpha_emb(inputs[self.alpha_key]) alpha_emb = self.flat(alpha_emb) beta_emb = self.beta_emb(inputs[self.beta_key]) beta_emb = self.flat(beta_emb) X = self.mul([alpha_emb, beta_emb]) if(self.use_bias): alpha_bias = self.alpha_bias(inputs[self.alpha_key]) alpha_bias = self.flat(alpha_bias) beta_bias = self.beta_bias(inputs[self.beta_key]) beta_bias = self.flat(beta_bias) bias = tf.add(alpha_bias, beta_bias) X = tf.add(X, bias) X = self.out(X) return X #Hyper-parameters gmf_params = {"alpha_dim":n_users, "beta_dim":n_movies, "latent_dim":8, "output_dim":1, "output_activation":"sigmoid", "use_bias":False, "dropout":0.5, "l2":1e-4, "learning_rate":1e-3, "batch_size":256, "epochs":50} #Instantiation gmf = GeneralizedMatrixFactorizer(alpha_dim=gmf_params["alpha_dim"], beta_dim=gmf_params["beta_dim"], latent_dim = gmf_params["latent_dim"], output_dim=gmf_params["output_dim"], output_activation=gmf_params["output_activation"], use_bias=gmf_params["use_bias"], dropout=gmf_params["dropout"], l2=gmf_params["l2"], alpha_key="userId", beta_key="movieId") gmf.compile(tf.keras.optimizers.Adam(gmf_params["learning_rate"]), tf.keras.losses.BinaryCrossentropy(), metrics=[tf.keras.metrics.BinaryAccuracy()]) #Fitting gmf.fit(train_ds.batch(gmf_params["batch_size"]), epochs=gmf_params["epochs"], validation_data=eval_ds, callbacks=[tf.keras.callbacks.EarlyStopping(patience=3)]) gmf.evaluate(test_ds) ###Output 1/1 [==============================] - 0s 16ms/step - loss: 0.3494 - binary_accuracy: 0.8714 ###Markdown Neural Collaborative FilteringNow we will implement the Multi Layer Perceptron Portion of the NeuMF Architecture. This is very straighforward, visit [the paper](https://arxiv.org/pdf/1708.05031.pdf) for details ###Code class DeepFeedForward(tf.keras.Model): """ Plain Multi-Layer Perceptron Args: layers (int): Rank 1 array-like object describing the hiden units of each layer of the Model, the number of elements in this array will be the number of layers in the model dropout (float): Dropout rate of the layers l2 (float): L2 regularization strenght of all the layers """ def __init__(self, layers, dropout=0.1, l2=0.001): super(DeepFeedForward, self).__init__() self.block_list = list() for layer in layers: self.block_list.append(DenseBlock(layer, "relu", dropout, l2)) def call(self, inputs): X = inputs for layer in self.block_list: X = layer(X) return X class NCFNetwork(tf.keras.Model): """ Neural Collaborative Filtering Args: alpha_dim (int): Number of rows of alpha Embedding Matrix, on movie recommend, can be the max number of users beta_dim (int): Number of rows of beta Embedding Matrix, on movie recommend, can be the max number of movies latent_dim (int): Dimension of the latent space representation of both Embedding Layers layers (int): Rank 1 array-like object describing the hiden units in each layer of the MLP output_activation (str): Activation function to be used on the last layer of the model, default="sigmoid" use_bias (bool): If allow the model to use bias, default=False dropout (float): % of inputs to drop from Batch Normalization Layer l2 (float): Strenght of L2 regularization on Dense Layer alpha_key (str): Key name in the call() input dictionary assigned to alpha, default="alpha" beta_key (str): Key name in the call() input dictionary assigned to beta, default="beta" """ def __init__(self, alpha_dim, beta_dim, latent_dim, layers, output_activation="sigmoid", use_bias=False, dropout=0.1, l2=0.001, alpha_key="alpha", beta_key="beta"): super(NCFNetwork, self).__init__() self.alpha_key = alpha_key self.beta_key = beta_key self.use_bias = use_bias self.alpha_emb = tf.keras.layers.Embedding(alpha_dim, latent_dim, embeddings_regularizer=tf.keras.regularizers.L1L2(l2=l2)) self.beta_emb = tf.keras.layers.Embedding(beta_dim, latent_dim, embeddings_regularizer=tf.keras.regularizers.L1L2(l2=l2)) if(use_bias): self.alpha_bias = tf.keras.layers.Embedding(alpha_dim, 1) self.beta_bias = tf.keras.layers.Embedding(beta_dim, 1) self.flat = tf.keras.layers.Flatten() self.feedforward = DeepFeedForward(layers[:-1], dropout, l2) self.dense_out = DenseBlock(layers[-1], output_activation, dropout, l2) def call(self, inputs): alpha_emb = self.flat(self.alpha_emb(inputs[self.alpha_key])) beta_emb = self.flat(self.beta_emb(inputs[self.beta_key])) X = tf.concat([alpha_emb, beta_emb], axis=-1) if(self.use_bias): alpha_bias = self.flat(self.alpha_bias(inputs[self.alpha_key])) beta_bias = self.flat(self.beta_bias(inputs[self.beta_key])) bias = tf.add(alpha_bias, beta_bias) X = tf.add(X, bias) X = self.feedforward(X) X = self.dense_out(X) return X #Hyper-params ncf_params = {"alpha_dim":n_users, "beta_dim":n_movies, "latent_dim":8, "layers":[4, 2, 1], "output_activation":"sigmoid", "use_bias":False, "dropout":0.4, "l2":1e-5, "learning_rate":1e-3, "batch_size":256, "epochs":50} #Instantiation ncf = NCFNetwork(alpha_dim=ncf_params["alpha_dim"], beta_dim=ncf_params["beta_dim"], latent_dim=ncf_params["latent_dim"], layers=ncf_params["layers"], output_activation=ncf_params["output_activation"], use_bias=ncf_params["use_bias"], dropout=ncf_params["dropout"], l2=ncf_params["l2"], alpha_key="userId", beta_key="movieId") ncf.compile(tf.keras.optimizers.Adam(ncf_params["learning_rate"]), tf.keras.losses.BinaryCrossentropy(), metrics=[tf.keras.metrics.BinaryAccuracy()]) #Fitting ncf.fit(train_ds.batch(ncf_params["batch_size"]), epochs=ncf_params["epochs"], validation_data=eval_ds, callbacks=[tf.keras.callbacks.EarlyStopping(patience=3)]) ncf.evaluate(test_ds) ###Output 1/1 [==============================] - 0s 18ms/step - loss: 0.3261 - binary_accuracy: 0.8647 ###Markdown Naural Matrix FactorizationLets implement this hybrid architecture, described [here](https://arxiv.org/pdf/1708.05031.pdf). It features a parallel architecture between a generalization of classic Matrix Factorization called Generalized Matrix Factorization, and Deep Feedforward Network, both of them feeded by its own Embeddings pair for users and items. [Check the paper](https://arxiv.org/pdf/1708.05031.pdf) for more details. ###Code class NeuMF(tf.keras.Model): """ Neural Matrix Factorizer Args: alpha_dim (int): Number of rows of alpha Embedding Matrix, on movie recommend, can be the max number of users beta_dim (int): Number of rows of beta Embedding Matrix, on movie recommend, can be the max number of movies NCF_latent_dim (int): Dimension of the latent space representation of both Embedding Layers on NCFNetwork GMF_latent_dim (int): Dimension of the latent space representation of both Embedding Layers on GMF NCF_layers (int): Rank 1 array-like object describing the hiden units in each layer of the NCF's MLP GMF_output_dim (int): Dimension of the GMF output NeuMF_output_dim (int): Dimension of the NeuMF output NCF_output_activation (str): Activation function to be used on the last layer of NCF, default="relu" GMF_output_activation (str): Activation function to be used on the last layer of GMF, default="relu" NeuMF_output_activation (str): Activation function to be used on the last layer of NeuMF, default="sigmoid" output_weight (float): Weight the balance between NCF and GMF outputs, default=0.5 use_bias (bool): If allow the model to use bias, default=False dropout (float): % of inputs to drop from DenseBloscks l2 (float): Strenght of L2 regularization alpha_key (str): Key name in the call() input dictionary assigned to alpha, default="alpha" beta_key (str): Key name in the call() input dictionary assigned to beta, default="beta" """ def __init__(self, alpha_dim, beta_dim, NCF_latent_dim, GMF_latent_dim, NCF_layers, GMF_output_dim, NeuMF_output_dim, NCF_output_activation="relu", GMF_output_activation="relu", NeuMF_output_activation="sigmoid", output_weight=0.5, use_bias=False, dropout=0.1, l2=0.001, alpha_key="alpha", beta_key="beta"): super(NeuMF, self).__init__() self.output_weight = output_weight self.NCF = NCFNetwork(alpha_dim, beta_dim, NCF_latent_dim, NCF_layers, NCF_output_activation, use_bias, dropout, l2, alpha_key, beta_key) self.GMF = GeneralizedMatrixFactorizer(alpha_dim, beta_dim, GMF_latent_dim, GMF_output_dim, GMF_output_activation, use_bias, dropout, l2, alpha_key, beta_key) self.dense_out = DenseBlock(NeuMF_output_dim, NeuMF_output_activation, dropout, l2) def call(self, inputs): ncf = self.NCF(inputs) * (1.0 - self.output_weight) gmf = self.GMF(inputs) * self.output_weight X = tf.concat([ncf, gmf], axis=-1) X = self.dense_out(X) return X nmf_params = {"alpha_dim":n_users, "beta_dim":n_movies, "NCF_latent_dim":8, "GMF_latent_dim":8, "NCF_layers":[4, 2, 1], "GMF_output_dim":1, "NeuMF_output_dim":1, "output_weight":0.5, "use_bias":False, "dropout":0.4, "l2":1e-6, "learning_rate":1e-4, "batch_size":256, "epochs":50} #Instantiation nmf = NeuMF(alpha_dim=nmf_params["alpha_dim"], beta_dim=nmf_params["beta_dim"], NCF_latent_dim=nmf_params["NCF_latent_dim"], GMF_latent_dim=nmf_params["GMF_latent_dim"], NCF_layers=nmf_params["NCF_layers"], GMF_output_dim=nmf_params["GMF_output_dim"], NeuMF_output_dim=nmf_params["NeuMF_output_dim"], output_weight=nmf_params["output_weight"], use_bias=nmf_params["use_bias"], dropout=nmf_params["dropout"], l2=nmf_params["l2"], alpha_key="userId", beta_key="movieId") nmf.compile(tf.keras.optimizers.Adam(nmf_params["learning_rate"]), tf.keras.losses.BinaryCrossentropy(), metrics=[tf.keras.metrics.BinaryAccuracy()]) nmf.train_on_batch(train_ds.batch(1).take(1)) load_weights = True if(load_weights): #load NCF weights run_path = "./Checkpoints/NCF/elated-eon-328" nmf.NCF.load_weights(run_path + "/checkpoint") #load GMF weights run_path = "./Checkpoints/GMF/still-sea-324" nmf.NCF.load_weights(run_path + "/checkpoint") #Fitting nmf.fit(train_ds.batch(nmf_params["batch_size"]), epochs=nmf_params["epochs"], validation_data=eval_ds, callbacks=[tf.keras.callbacks.EarlyStopping(patience=3)]) nmf.evaluate(test_ds) ###Output 1/1 [==============================] - 0s 21ms/step - loss: 0.3790 - binary_accuracy: 0.8645
paper/Advection_diffusion/AD_artificial/.ipynb_checkpoints/Loop_noise_0_14_noise80-checkpoint.ipynb
###Markdown 2D Advection-Diffusion equation in this notebook we provide a simple example of the DeepMoD algorithm and apply it on the 2D advection-diffusion equation. ###Code # General imports import numpy as np import torch # DeepMoD functions from deepymod import DeepMoD from deepymod.model.func_approx import NN from deepymod.model.library import Library2D_third from deepymod.model.constraint import LeastSquares from deepymod.model.sparse_estimators import Threshold,PDEFIND from deepymod.training import train from deepymod.training.sparsity_scheduler import TrainTestPeriodic from scipy.io import loadmat # Settings for reproducibility np.random.seed(1) torch.manual_seed(1) if torch.cuda.is_available(): device = 'cuda' else: device = 'cpu' ###Output _____no_output_____ ###Markdown Prepare the data Next, we prepare the dataset. ###Code data = loadmat('Diffusion_2D_space41.mat') data = np.real(data['Expression1']).reshape((41,41,41,4))[:,:,:,3] x_dim, y_dim, t_dim = data.shape time_range = [1,2,4,6,8,10,12,14] for i in time_range: # Downsample data and prepare data without noise: down_data= np.take(np.take(np.take(data,np.arange(0,x_dim,3),axis=0),np.arange(0,y_dim,3),axis=1),np.arange(0,t_dim,i),axis=2) print("Dowmsampled shape:",down_data.shape, "Total number of data points:", np.product(down_data.shape)) index = len(np.arange(0,t_dim,i)) width, width_2, steps = down_data.shape x_arr, y_arr, t_arr = np.linspace(0,1,width), np.linspace(0,1,width_2), np.linspace(0,1,steps) x_grid, y_grid, t_grid = np.meshgrid(x_arr, y_arr, t_arr, indexing='ij') X, y = np.transpose((t_grid.flatten(), x_grid.flatten(), y_grid.flatten())), np.float32(down_data.reshape((down_data.size, 1))) # Add noise noise_level = 0.80 y_noisy = y + noise_level * np.std(y) * np.random.randn(y.size, 1) # Randomize data idx = np.random.permutation(y.shape[0]) X_train = torch.tensor(X[idx, :], dtype=torch.float32, requires_grad=True).to(device) y_train = torch.tensor(y_noisy[idx, :], dtype=torch.float32).to(device) # Configure DeepMoD network = NN(3, [40, 40, 40, 40], 1) library = Library2D_third(poly_order=0) estimator = Threshold(0.05) sparsity_scheduler = TrainTestPeriodic(periodicity=50, patience=200, delta=1e-5) constraint = LeastSquares() model = DeepMoD(network, library, estimator, constraint).to(device) optimizer = torch.optim.Adam(model.parameters(), betas=(0.99, 0.99), amsgrad=True, lr=2e-3) logdir='final_runs/80_noise_x14/'+str(index)+'/' train(model, X_train, y_train, optimizer,sparsity_scheduler, log_dir=logdir, split=0.8, max_iterations=50000, delta=1e-6, patience=200) ###Output Dowmsampled shape: (14, 14, 41) Total number of data points: 8036 49975 MSE: 7.58e-04 Reg: 9.02e-06 L1: 1.50e+00 Algorithm converged. Writing model to disk. Dowmsampled shape: (14, 14, 21) Total number of data points: 4116 49975 MSE: 7.96e-04 Reg: 1.62e-05 L1: 1.59e+00 Algorithm converged. Writing model to disk. Dowmsampled shape: (14, 14, 11) Total number of data points: 2156 49975 MSE: 8.93e-04 Reg: 5.37e-06 L1: 1.44e+00 Algorithm converged. Writing model to disk. Dowmsampled shape: (14, 14, 7) Total number of data points: 1372 49975 MSE: 9.58e-04 Reg: 2.63e-05 L1: 1.71e+00 Algorithm converged. Writing model to disk. Dowmsampled shape: (14, 14, 6) Total number of data points: 1176 49975 MSE: 1.28e-03 Reg: 5.92e-05 L1: 1.54e+00 Algorithm converged. Writing model to disk. Dowmsampled shape: (14, 14, 5) Total number of data points: 980 7300 MSE: 2.68e-03 Reg: 4.06e-06 L1: 1.47e+01
koncept512_train_test_py3_with_kuti.ipynb
###Markdown Install and load libraries ###Code %%capture # Setup paths drive_mount = '/content/drive/' drive_root = drive_mount + 'My Drive/research/data/' # persistent storage for dataset images data_root = '/content/koniq/' # library install path !pip install kuti !git clone https://github.com/subpic/koniq.git !pip install munch from kuti import model_helper as mh from kuti import applications as apps from kuti import tensor_ops as ops from kuti import generic as gen from kuti import image_utils as iu import pandas as pd, numpy as np, os from matplotlib import pyplot as plt from munch import Munch from google.colab import drive drive.mount(drive_mount) ###Output Loaded Kuti Mounted at /content/drive/ ###Markdown Setup KonIQ-10k dataset imagesDownload images resized to 512x384 pixels (originals are 1024x768) ###Code %%capture gen.make_dirs(drive_root+'koniq/') drive_image_path = drive_root+'koniq/koniq10k_512x384.zip' if not os.path.exists(drive_image_path): !wget -O "$drive_image_path" "http://datasets.vqa.mmsp-kn.de/archives/koniq10k_512x384.zip" gen.make_dirs(data_root+'images/') data_images_path = data_root+'images/' !cp "$drive_image_path" "$data_images_path" %cd $data_images_path !unzip -o koniq10k_512x384.zip %cd $data_root ###Output _____no_output_____ ###Markdown Define the KonCept512 model ###Code from tensorflow.keras.models import Model ids = pd.read_csv(data_root + 'metadata/koniq10k_distributions_sets.csv') # Build scoring model base_model, preprocess_fn = apps.get_model_imagenet(apps.InceptionResNetV2) head = apps.fc_layers(base_model.output, name='fc', fc_sizes = [2048, 1024, 256, 1], dropout_rates = [0.25, 0.25, 0.5, 0], batch_norm = 2) model = Model(inputs = base_model.input, outputs = head) # Parameters of the generator pre = lambda im: preprocess_fn( iu.ImageAugmenter(im, remap=False).fliplr().result) gen_params = dict(batch_size = 16, data_path = data_root+'images/512x384/', process_fn = pre, input_shape = (384,512,3), inputs = ['image_name'], outputs = ['MOS']) # Wrapper for the model, helps with training and testing helper = mh.ModelHelper(model, 'KonCept512', ids, loss='MSE', metrics=["MAE", ops.plcc_tf], monitor_metric = 'val_loss', monitor_mode = 'min', multiproc = True, workers = 5, logs_root = drive_root + 'logs/koniq', models_root = drive_root + 'models/koniq', gen_params = gen_params) ###Output Loading model InceptionResNetV2 Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/inception_resnet_v2/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5 219062272/219055592 [==============================] - 2s 0us/step 219070464/219055592 [==============================] - 2s 0us/step ###Markdown Train it ###Code # do validation in memory valid_gen = helper.make_generator(ids[ids.set=='validation'], batch_size = 16) helper.train(valid_gen=valid_gen, lr=1e-4, epochs=40) helper.load_model() helper.train(valid_gen=valid_gen, lr=1e-4/5, epochs=20) ###Output Training model: KonCept512/bsz:16 i:1[384,512,3] l:MSE o:1[1] ###Markdown Load trained model ###Code %%capture # download the pre-trained KonCept512 model model_root = data_root + 'models/' gen.make_dirs(model_root) !wget -O "{model_root}koncep512-model.h5" "http://datasets.vqa.mmsp-kn.de/archives/koncep512-trained-model.h5" helper.model.load_weights(model_root + 'koncep512-model.h5') ###Output _____no_output_____ ###Markdown Test model on the default test set ###Code y_pred = helper.predict() y_true = ids[ids.set=='test'].MOS.values apps.rating_metrics(y_true, y_pred); ###Output SRCC: 0.919 | PLCC: 0.932 | MAE: 9.132 | RMSE: 10.233 ###Markdown Predict on a custom image (example) ###Code # Load an image image_path = data_root + 'images/512x384/' + ids.image_name.values[0] im = preprocess_fn(iu.read_image(image_path)) # Create a batch, of 1 image batch = np.expand_dims(im, 0) # Predict quality score y_pred = helper.model.predict(batch).squeeze() print(f'Predicted score: {y_pred:.{2}f}, ground-truth score: {ids.MOS.values[0]:.{2}f}') ###Output Predicted score: 68.33, ground-truth score: 77.38
chapter_3/TorchText_Classification.ipynb
###Markdown Args ###Code args = Namespace( review_csv = "data/yelp/reviews_with_splits_lite.csv", model_state_file='model.pth', save_dir='model_storage/', label_field_name = "rating", data_field_name = "review", frequency_cutoff=25, device = torch.device('cuda' if torch.cuda.is_available() else 'cpu'), batch_size=128, early_stopping_criteria=5, learning_rate=0.001, num_epochs=100, seed=1337, # Runtime options catch_keyboard_interrupt=True, expand_filepaths_to_save_dir=True, reload_from_files=False, cuda=False ) ###Output _____no_output_____ ###Markdown Global Configurations ###Code def set_seed_everywhere(seed, cuda): np.random.seed(seed) torch.manual_seed(seed) if cuda: torch.cuda.manual_seed_all(seed) def handle_dirs(dirpath): if not os.path.exists(dirpath): os.makedirs(dirpath) if args.expand_filepaths_to_save_dir: args.model_state_file = os.path.join(args.save_dir, args.model_state_file) print("Expanded filepaths: ") print("\t{}".format(args.model_state_file)) # Check CUDA if not torch.cuda.is_available(): args.cuda = False print("Using CUDA: {}".format(args.cuda)) args.device = torch.device("cuda" if args.cuda else "cpu") # Set seed for reproducibility set_seed_everywhere(args.seed, args.cuda) # handle dirs handle_dirs(args.save_dir) ###Output Expanded filepaths: model_storage/model.pth Using CUDA: False ###Markdown Load Data ###Code data = pd.read_csv(args.review_csv) data.head() ###Output _____no_output_____ ###Markdown Define Fields ###Code import string def tokenizer(review): """Simple tokenizer""" return [word for word in review.split(" ") if word not in string.punctuation] REVIEW = Field(tokenize=tokenizer, sequential=True, lower=True) RATING = LabelField(dtype=torch.float32) ###Output _____no_output_____ ###Markdown Create Datasets TorchText ###Code class ReviewDataset(Dataset): def __init__(self, review_df, fields): examples = [] for i, row in tqdm_notebook(review_df.iterrows(), total=review_df.shape[0]): rating = row.rating review = row.review examples.append(Example.fromlist([review, rating], fields)) super().__init__(examples, fields) @staticmethod def sort_key(ex): return len(ex.review) @classmethod def splits(cls, fields, review_df): train_data = cls(review_df[review_df.split == "train"], fields) val_data = cls(review_df[review_df.split == "val"], fields) test_data = cls(review_df[review_df.split == "test"], fields) return train_data, val_data, test_data fields = [('review', REVIEW), ('rating', RATING)] train_data, val_data, test_data = ReviewDataset.splits(fields, review_df=data) print("REVIEW\n",vars(train_data[0])["review"]) print("RATING\n",vars(train_data[0])["rating"]) ###Output REVIEW ['on', 'a', 'recent', 'visit', 'to', 'las', 'vegas', 'my', 'friends', 'and', 'i', 'decided', 'to', 'stay', 'at', 'the', 'monte', 'carlo', 'because', 'it', 'had', 'been', 'recommended', 'to', 'us', 'and', 'we', 'like', 'the', 'location', 'i', 'would', 'say', 'overall', 'that', 'we', 'had', 'a', 'nice', 'vacation', 'but', 'we', 'experienced', 'a', 'problem', 'at', 'the', 'end', 'of', 'our', 'stay', 'n', 'nafter', 'we', 'had', 'packed', 'our', 'bags', 'and', 'just', 'before', 'checking', 'out', 'i', 'called', 'for', 'a', 'bellman', 'to', 'take', 'our', 'bags', 'down', 'for', 'us', 'after', 'checking', 'out', 'and', 'driving', 'home', 'we', 'discovered', 'that', 'a', 'bottle', 'of', 'perfume', 'had', 'been', 'broken', 'in', 'one', 'of', 'our', 'bags', 'as', 'all', 'we', 'did', 'was', 'go', 'straight', 'from', 'the', 'hotel', 'back', 'to', 'our', 'house', 'we', 'concluded', 'the', 'bag', 'had', 'most', 'likely', 'been', 'mishandled', 'by', 'the', 'bellman', 'n', 'nwe', 'immediately', 'called', 'the', 'hotel', 'and', 'a', 'very', 'sympathetic', 'person', 'took', 'our', 'information', 'and', 'told', 'us', 'he', 'would', 'investigate', 'what', 'happened', 'as', 'well', 'as', 'what', 'they', 'could', 'do', 'fo', 'us', 'and', 'get', 'back', 'to', 'us', 'within', 'hours', 'after', 'waiting', 'for', 'days', 'without', 'a', 'response', 'i', 'called', 'back', 'to', 'follow', 'up', 'i', 'was', 'unable', 'to', 'speak', 'to', 'the', 'person', 'who', 'originally', 'helped', 'me', 'and', 'found', 'that', 'the', 'person', 'with', 'whom', 'i', 'was', 'now', 'speaking', 'had', 'a', 'very', 'unpleasant', 'almost', 'accusing', 'tone', 'in', 'her', 'voice', 'n', 'nshe', 'informed', 'me', 'that', 'there', 'was', 'nothing', 'that', 'they', 'would', 'be', 'able', 'to', 'do', 'insisting', 'that', 'the', 'negligence', 'lay', 'with', 'us', 'we', 'were', 'also', 'advised', 'that', 'the', 'first', 'person', 'with', 'whom', 'we', 'spoke', 'should', 'not', 'have', 'committed', 'to', 'returning', 'our', 'call', 'within', 'hours', 'as', 'this', 'was', 'not', 'their', 'policy', 'n', 'noverall', 'i', 'would', 'say', 'that', 'i', 'was', 'disappointed', 'by', 'the', 'fact', 'that', 'they', 'are', 'not', 'willing', 'to', 'do', 'anything', 'about', 'the', 'broken', 'bottle', 'of', 'perfume', 'which', 'spilled', 'all', 'over', 'an', 'expensive', 'piece', 'of', 'luggage', 'but', 'even', 'more', 'unsatisfied', 'by', 'the', 'disingenuous', 'manner', 'in', 'which', 'we', 'were', 'treated', 'it', 'was', 'an', 'unpleasant', 'end', 'to', 'our', 'vacation', 'and', 'i', 'certainly', 'have', 'no', 'plans', 'to', 'revisit', 'the', 'hotel', 'nor', 'will', 'i', 'recommend', 'it', 'to', 'anyone', 'i', 'know'] RATING negative ###Markdown Build Vocab ###Code REVIEW.build_vocab(train_data, min_freq=args.frequency_cutoff) RATING.build_vocab(train_data) vars(RATING.vocab) print("TOTAL_WORDS = ", len(REVIEW.vocab.itos)) print(REVIEW.vocab.itos[:2]) args.total_words = len(REVIEW.vocab.itos) ###Output _____no_output_____ ###Markdown Create One-Hot-Vectorizer ###Code class Vectorizer(object): def __init__(self, review_field): self._review_field = review_field self._slicing = torch.tensor([i for i in # Exclude <pad> token range(len(self._review_field.vocab.itos)) if i !=1]) self._dimension = len(self._slicing) def vectorize(self, batch_matrix): batch_size = batch_matrix.shape[1] one_hot = torch.zeros((batch_size, len(self._review_field.vocab.itos)), dtype=torch.long) indices = batch_matrix.T source = torch.ones_like(indices) # source = (indices != 1).long() # Exclude <pad> token one_hot.scatter_(1, indices, source) return one_hot[:, self._slicing] oh_vectorize = Vectorizer(REVIEW) ###Output _____no_output_____ ###Markdown Define Iterators ###Code train_iterator, val_iterator = BucketIterator.splits( (train_data, val_data), batch_size = args.batch_size, sort_within_batch = True, device = args.device, ) test_iterator = Iterator(test_data, batch_size=args.batch_size, device=args.device, sort=False, sort_within_batch=False) for batch in train_iterator: v_code = oh_vectorize.vectorize(batch.review) print(v_code) first = batch.review[:,0].sort().values decode = ' '.join([REVIEW.vocab.itos[idx] for idx in first if idx != 1]) print(decode) first = v_code[0] decode = ' '.join([REVIEW.vocab.itos[idx] for idx, val in enumerate(first, 1) if val!= 0]) print("\n") print(decode) break ###Output tensor([[1, 1, 1, ..., 0, 0, 0], [0, 1, 1, ..., 0, 0, 0], [1, 1, 1, ..., 0, 0, 0], ..., [1, 1, 1, ..., 0, 0, 0], [1, 1, 1, ..., 0, 0, 0], [1, 1, 1, ..., 0, 0, 0]]) <unk> <unk> <unk> <unk> the the the the the the and and and and i i to to a a a a a a a was was was it it of of for for in in n n is that that this with t have have s had at at at at at were so there out if if get would back their an what only us been because ni ni also well over better best come chicken minutes room think bad table table took wasn long hour why nwe least breakfast waiting waiting waiting tasted served shrimp sat may rather sitting under rest eggs eggs spend simple mediocre hope happened nservice cocktail heat smile nfood shit ham dude nwell golden gate pig explanation lamp blackjack laying <pad> the and i to a was it of for in n is that this with t have s had at were so there out if get would back their an what only us been because ni also well over better best come chicken minutes room think bad table took wasn long hour why nwe least breakfast waiting tasted served shrimp sat may rather sitting under rest eggs spend simple mediocre hope happened nservice cocktail heat smile nfood shit ham dude nwell golden gate pig explanation lamp blackjack laying ###Markdown Model ###Code class ReviewClassifier(nn.Module): """ a simple perceptron based classifier """ def __init__(self, num_features): """ Args: num_features (int): the size of the input feature vector """ super(ReviewClassifier, self).__init__() self.fc1 = nn.Linear(in_features=num_features, out_features=1) def forward(self, x_in, apply_sigmoid=False): """The forward pass of the classifier Args: x_in (torch.Tensor): an input data tensor. x_in.shape should be (batch, num_features) apply_sigmoid (bool): a flag for the sigmoid activation should be false if used with the Cross Entropy losses Returns: the resulting tensor. tensor.shape should be (batch,) """ y_out = self.fc1(x_in.squeeze()).squeeze() if apply_sigmoid: y_out = torch.sigmoid(y_out) return y_out ###Output _____no_output_____ ###Markdown Training Loop Utilities ###Code def make_train_state(args): return {'stop_early': False, 'early_stopping_step': 0, 'early_stopping_best_val': 1e8, 'learning_rate': args.learning_rate, 'epoch_index': 0, 'train_loss': [], 'train_acc': [], 'val_loss': [], 'val_acc': [], 'test_loss': -1, 'test_acc': -1, 'model_filename': args.model_state_file} def update_train_state(args, model, train_state): """Handle the training state updates. Components: - Early Stopping: Prevent overfitting. - Model Checkpoint: Model is saved if the model is better :param args: main arguments :param model: model to train :param train_state: a dictionary representing the training state values :returns: a new train_state """ # Save one model at least if train_state['epoch_index'] == 0: torch.save(model.state_dict(), train_state['model_filename'].format(train_state['epoch_index'])) train_state['stop_early'] = False # Save model if performance improved elif train_state['epoch_index'] >= 1: loss_tm1, loss_t = train_state['val_loss'][-2:] # If loss worsened if loss_t >= train_state['early_stopping_best_val']: # Update step train_state['early_stopping_step'] += 1 # Loss decreased else: # Save the best model if loss_t < train_state['early_stopping_best_val']: torch.save(model.state_dict(), train_state['model_filename']) # Reset early stopping step train_state['early_stopping_step'] = 0 # Stop early ? train_state['stop_early'] = \ train_state['early_stopping_step'] >= args.early_stopping_criteria return train_state def compute_accuracy(y_pred, y_target): y_target = y_target.cpu() y_pred_indices = (torch.sigmoid(y_pred)>0.5).cpu().long()#.max(dim=1)[1] n_correct = torch.eq(y_pred_indices, y_target).sum().item() return n_correct / len(y_pred_indices) * 100 ###Output _____no_output_____ ###Markdown Initilization ###Code from torch.utils.tensorboard import SummaryWriter writer = SummaryWriter() classifier = ReviewClassifier(num_features=oh_vectorize._dimension) classifier def train(data_loaders, classifier, optimizer, loss_func, train_bar): running_loss = 0.0 running_acc = 0.0 classifier.train() for batch_index, batch_dict in enumerate(data_loaders["train"]): # the training routine is these 5 steps: # -------------------------------------- # step 1. zero the gradients optimizer.zero_grad() # step 2. compute the output x_in = oh_vectorize.vectorize(batch_dict.review) y_pred = classifier(x_in=x_in.float()) # step 3. compute the loss loss = loss_func(y_pred, batch_dict.rating) loss_t = loss.item() running_loss += (loss_t - running_loss) / (batch_index + 1) # step 4. use loss to produce gradients loss.backward() # step 5. use optimizer to take gradient step optimizer.step() # ----------------------------------------- # compute the accuracy acc_t = compute_accuracy(y_pred, batch_dict.rating) running_acc += (acc_t - running_acc) / (batch_index + 1) # update bar train_bar.set_postfix(loss=running_loss, acc=running_acc, epoch=epoch_index) train_bar.update() return running_loss, running_acc def val(data_loaders, classifier, loss_func, val_bar): running_loss = 0. running_acc = 0. classifier.eval() with torch.no_grad(): for batch_index, batch_dict in enumerate(data_loaders["val"]): # compute the output x_in = oh_vectorize.vectorize(batch_dict.review) y_pred = classifier(x_in=x_in.float()) # step 3. compute the loss loss = loss_func(y_pred, batch_dict.rating) loss_t = loss.item() running_loss += (loss_t - running_loss) / (batch_index + 1) # compute the accuracy acc_t = compute_accuracy(y_pred, batch_dict.rating) running_acc += (acc_t - running_acc) / (batch_index + 1) val_bar.set_postfix(loss=running_loss, acc=running_acc, epoch=epoch_index) val_bar.update() return running_loss, running_acc data_loaders = {"train": train_iterator, "val": val_iterator, "test": test_iterator} datasets = {"train": train_data, "val": val_data, "test": test_data} classifier = classifier.to(args.device) loss_func = nn.BCEWithLogitsLoss() optimizer = optim.Adam(classifier.parameters(), lr=args.learning_rate) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, mode='min', factor=0.5, patience=1) train_state = make_train_state(args) epoch_bar = tqdm_notebook(desc='training routine', total=args.num_epochs, position=0) train_bar = tqdm_notebook(desc='split=train', total=len(data_loaders["train"]), position=1, leave=True) val_bar = tqdm_notebook(desc='split=val', total=len(data_loaders["val"]), position=1, leave=True) try: for epoch_index in range(args.num_epochs): train_state['epoch_index'] = epoch_index # Iterate over training dataset running_loss, running_acc = train(data_loaders, classifier, optimizer, loss_func, train_bar) writer.add_scalar('Loss/train', running_loss, epoch_index) writer.add_scalar('Accuracy/train', running_acc, epoch_index) train_state['train_loss'].append(running_loss) train_state['train_acc'].append(running_acc) # Iterate over val dataset running_loss, running_acc = val(data_loaders, classifier, loss_func, val_bar) writer.add_scalar('Loss/val', running_loss, epoch_index) writer.add_scalar('Accuracy/val', running_acc, epoch_index) train_state['val_loss'].append(running_loss) train_state['val_acc'].append(running_acc) train_state = update_train_state(args=args, model=classifier, train_state=train_state) scheduler.step(train_state['val_loss'][-1]) train_bar.n = 0 val_bar.n = 0 epoch_bar.update() if train_state['stop_early']: break except KeyboardInterrupt: print("Exiting loop") %tensorboard --logdir runs # compute the loss & accuracy on the test set using the best available model classifier.load_state_dict(torch.load(train_state['model_filename'])) classifier = classifier.to(args.device) running_loss = 0. running_acc = 0. classifier.eval() with torch.no_grad(): for batch_index, batch_dict in tqdm_notebook(enumerate(data_loaders["test"]), total = len(data_loaders["test"])): # compute the output x_in = oh_vectorize.vectorize(batch_dict.review) y_pred = classifier(x_in=x_in.float()) # compute the loss loss = loss_func(y_pred, batch_dict.rating.float()) loss_t = loss.item() running_loss += (loss_t - running_loss) / (batch_index + 1) # compute the accuracy acc_t = compute_accuracy(y_pred, batch_dict.rating) running_acc += (acc_t - running_acc) / (batch_index + 1) train_state['test_loss'] = running_loss train_state['test_acc'] = running_acc print("Test loss: {:.3f}".format(train_state['test_loss'])) print("Test Accuracy: {:.2f}".format(train_state['test_acc'])) ###Output Test loss: 0.207 Test Accuracy: 92.03 ###Markdown Inference ###Code import re def preprocess_text(text): text = text.lower() text = re.sub(r"([.,!?])", r" \1 ", text) text = re.sub(r"[^a-zA-Z.,!?]+", r" ", text) return text def predict_rating(review, classifier, review_field, rating_field, oh_vectorizer, decision_threshold=0.5): """Predict the rating of a review Args: review (str): the text of the review classifier (ReviewClassifier): the trained model vectorizer (ReviewVectorizer): the corresponding vectorizer decision_threshold (float): The numerical boundary which separates the rating classes """ review = preprocess_text(review) review = review_field.tokenize(review) review = review_field.numericalize([review]) vectorized_review = oh_vectorizer.vectorize(review) result = classifier(vectorized_review.float()) probability_value = torch.sigmoid(result).item() index = 1 if probability_value < decision_threshold: index = 0 return rating_field.itos[index] test_review = "This book is meh" classifier = classifier.cpu() prediction = predict_rating(test_review, classifier, REVIEW, RATING.vocab, oh_vectorize, decision_threshold=0.5) print("{} -> {}".format(test_review, prediction)) ###Output This book is meh -> negative ###Markdown Interpretability ###Code classifier.fc1.weight.shape lookup_index = ['<unk>'] + REVIEW.vocab.itos[2:] # Sort weights fc1_weights = classifier.fc1.weight.detach()[0] _, indices = torch.sort(fc1_weights, dim=0, descending=True) indices = indices.numpy().tolist() # Top 20 words print("Influential words in Positive Reviews:") print("--------------------------------------") for i in range(20): print(lookup_index[indices[i]]) print("====\n\n\n") # Top 20 negative words print("Influential words in Negative Reviews:") print("--------------------------------------") indices.reverse() for i in range(20): print(lookup_index[indices[i]]) ###Output Influential words in Positive Reviews: -------------------------------------- excellent delicious amazing disappoint outstanding perfect awesome perfection yum incredible fantastic ngreat great downside superb heaven love hooked wonderful perfectly ==== Influential words in Negative Reviews: -------------------------------------- worst horrible terrible bland awful meh mediocre tasteless poisoning disgusting poor eh overpriced disappointment inedible rude disappointing lacked sucks overrated
communication.ipynb
###Markdown SetupMake sure you have already tried `basics.ipynb`, then follow the cells and instructions below. ###Code from pybricksdev.connections import BLEPUPConnection, EV3Connection from asyncio import gather, sleep car_hub = BLEPUPConnection() await car_hub.connect('00:16:53:AD:A6:A9') remote_hub = BLEPUPConnection() await remote_hub.connect('90:84:2B:4A:8F:7D') ###Output _____no_output_____ ###Markdown This script will run on the remote hub with a motor that acts as a dialFor now we just save it. We will run it later. ###Code %%file build/remote.py from pybricks.pupdevices import Motor from pybricks.parameters import Port from pybricks.tools import wait, StopWatch dial = Motor(Port.A) while True: # In this demo, limit angle to 0--255 for simplicity, so it fits in one byte. angle = max(0, min(dial.angle(), 255)) # Print the angle to the PC. print(angle) wait(10) ###Output _____no_output_____ ###Markdown This script will run on the vehicle robotFor now we just save it. We will run it later. ###Code %%file build/car.py from pybricks.pupdevices import Motor from pybricks.parameters import Port, Direction from pybricks.tools import wait, StopWatch from pybricks.experimental import getchar car_motor = Motor(Port.A) while True: # The car listens to characters from the PC. char = getchar() # If we have something, use it to set the duty cycle # of the motor, after scaling 255 to 100 if char is not None: car_motor.dc(char * 100 // 255) wait(5) ###Output _____no_output_____ ###Markdown This will run on the PCFor now we just save the function. We will run it later. ###Code # We can do anything we want with the data, like displaying it. # In this example, we just send the received number from the # remote to the car as a single byte async def forwarder(car_hub, remote_hub): # Give the hubs some time to start await sleep(2) while car_hub.state == car_hub.RUNNING or remote_hub.state == remote_hub.RUNNING: # Check if the remote has printed anything if len(remote_hub.output) > 0: # If so, let's see what the most recent value is line = remote_hub.output[-1] # In this demo, we are only interested in the most recent value, # so we can clear everything else. remote_hub.output = [] # Try to convert it to a number and send it as one byte to the receiving hub try: angle = int(line.decode()) await car_hub.write(bytes([angle])) except: pass await sleep(0.05) ###Output _____no_output_____ ###Markdown Run everythingThis runs both robot scripts and the PC script at the same time.It keeps running until you end both hub scripts using the button. ###Code await gather( remote_hub.run('build/remote.py', print_output=False), car_hub.run('build/car.py', print_output=False), forwarder(car_hub, remote_hub) ) # Disconnect (optional) await car_hub.disconnect() await remote_hub.disconnect() ###Output _____no_output_____
models/3_multiple_linear_regression.ipynb
###Markdown multiple regression y = X Bwhere: y = output (dependent) X = input (independent) B = beeta values X' * y = X'XB(X'X)^-1 X' y = (X'X)^-1 X'X B(X'X)^-1 X'y = B ###Code X = np.ones((diabetes_X_train.shape[0], diabetes_X_train.shape[1]+1)) X[:,1:] = diabetes_X_train X.shape X_transpose = np.matrix.transpose(X) B = np.matmul(np.matmul(np.linalg.inv(np.matmul(X_transpose, X)), X_transpose), diabetes_y_train) X_test = np.ones((diabetes_X_test.shape[0], diabetes_X_test.shape[1]+1)) X_test[:,1:] = diabetes_X_test y_pred = np.matmul(X_test, B) # The coefficients print('Coefficients:', B[1:]) # The intercept print('Intercept:', B[0]) # The mean squared error print("Mean squared error: %.4f" % mean_squared_error(diabetes_y_test, y_pred)) # Explained variance score: 1 is perfect prediction print('Variance score: %.4f' % r2_score(diabetes_y_test, y_pred)) ###Output Coefficients: [ -4.06035767 -266.98169237 547.20535959 279.52632656 -394.10840711 115.92175016 -28.24786192 182.52226293 627.99560396 106.18549789] Intercept: 152.6883674111716 Mean squared error: 2742.7850 Variance score: 0.5171 ###Markdown Validate our model with the sklearn package. ###Code # Create linear regression object regr = linear_model.LinearRegression() # Train the model using the training sets regr.fit(diabetes_X_train, diabetes_y_train) # Make predictions using the testing set diabetes_y_pred = regr.predict(diabetes_X_test) # The coefficients print('Coefficients:', regr.coef_) # The intercept print('Intercept:', regr.intercept_) # The mean squared error print("Mean squared error: %.4f" % mean_squared_error(diabetes_y_test, diabetes_y_pred)) # Explained variance score: 1 is perfect prediction print('Variance score: %.4f' % r2_score(diabetes_y_test, diabetes_y_pred)) ###Output Coefficients: [ -4.06035767 -266.98169237 547.20535959 279.52632656 -394.10840711 115.92175016 -28.24786192 182.52226293 627.99560396 106.18549789] Intercept: 152.6883674111717 Mean squared error: 2742.7850 Variance score: 0.5171
21 - Customer Analytics in Python/9_Modeling Brand Choice/4_Own Price Brand Choice Elasticity (5:31)/Purchase Analytics Predictive Analysis 10.4.ipynb
###Markdown Libraries ###Code import numpy as np import pandas as pd from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.cluster import KMeans import pickle from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt import matplotlib.axes as axs import seaborn as sns sns.set() ###Output _____no_output_____ ###Markdown Data Preparation ###Code df_purchase = pd.read_csv('purchase data.csv') scaler = pickle.load(open('scaler.pickle', 'rb')) pca = pickle.load(open('pca.pickle', 'rb')) kmeans_pca = pickle.load(open('kmeans_pca.pickle', 'rb')) features = df_purchase[['Sex', 'Marital status', 'Age', 'Education', 'Income', 'Occupation', 'Settlement size']] df_purchase_segm_std = scaler.transform(features) df_purchase_segm_pca = pca.transform(df_purchase_segm_std) purchase_segm_kmeans_pca = kmeans_pca.predict(df_purchase_segm_pca) df_purchase_predictors = df_purchase.copy() df_purchase_predictors['Segment'] = purchase_segm_kmeans_pca segment_dummies = pd.get_dummies(purchase_segm_kmeans_pca, prefix = 'Segment', prefix_sep = '_') df_purchase_predictors = pd.concat([df_purchase_predictors, segment_dummies], axis = 1) df_pa = df_purchase_predictors ###Output _____no_output_____ ###Markdown Purchase Probability Model ###Code Y = df_pa['Incidence'] X = pd.DataFrame() X['Mean_Price'] = (df_pa['Price_1'] + df_pa['Price_2'] + df_pa['Price_3'] + df_pa['Price_4'] + df_pa['Price_5'] ) / 5 model_purchase = LogisticRegression(solver = 'sag') model_purchase.fit(X, Y) model_purchase.coef_ ###Output _____no_output_____ ###Markdown Price Elasticity of Purchase Probability ###Code df_pa[['Price_1', 'Price_2', 'Price_3', 'Price_4', 'Price_5']].describe() price_range = np.arange(0.5, 3.5, 0.01) price_range df_price_range = pd.DataFrame(price_range) Y_pr = model_purchase.predict_proba(df_price_range) purchase_pr = Y_pr[:][:, 1] pe = model_purchase.coef_[:, 0] * price_range * (1 - purchase_pr) df_price_elasticities = pd.DataFrame(price_range) df_price_elasticities = df_price_elasticities.rename(columns = {0: "Price_Point"}) df_price_elasticities['Mean_PE'] = pe df_price_elasticities pd.options.display.max_rows = None df_price_elasticities plt.figure(figsize = (9, 6)) plt.plot(price_range, pe, color = 'grey') plt.xlabel('Price') plt.ylabel('Elasticity') plt.title('Price Elasticity of Purchase Probability') ###Output _____no_output_____ ###Markdown Purchase Probability by Segments $\color{green}{\text{Segment 1 - Career-Focused}}$ ###Code df_pa_segment_1 = df_pa[df_pa['Segment'] == 1] Y = df_pa_segment_1['Incidence'] X = pd.DataFrame() X['Mean_Price'] = (df_pa_segment_1['Price_1'] + df_pa_segment_1['Price_2'] + df_pa_segment_1['Price_3'] + df_pa_segment_1['Price_4'] + df_pa_segment_1['Price_5']) / 5 model_incidence_segment_1 = LogisticRegression(solver = 'sag') model_incidence_segment_1.fit(X, Y) model_incidence_segment_1.coef_ Y_segment_1 = model_incidence_segment_1.predict_proba(df_price_range) purchase_pr_segment_1 = Y_segment_1[:][:, 1] pe_segment_1 = model_incidence_segment_1.coef_[:, 0] * price_range * (1 - purchase_pr_segment_1) ###Output _____no_output_____ ###Markdown Results ###Code df_price_elasticities['PE_Segment_1'] = pe_segment_1 plt.figure(figsize = (9, 6)) plt.plot(price_range, pe, color = 'grey') plt.plot(price_range, pe_segment_1, color = 'green') plt.xlabel('Price') plt.ylabel('Elasticity') plt.title('Price Elasticity of Purchase Probability') ###Output _____no_output_____ ###Markdown $\color{red}{\text{Segment 2 - Fewer-Opportunities}}$ ###Code df_pa_segment_2 = df_pa[df_pa['Segment'] == 2] Y = df_pa_segment_2['Incidence'] X = pd.DataFrame() X['Mean_Price'] = (df_pa_segment_2['Price_1'] + df_pa_segment_2['Price_2'] + df_pa_segment_2['Price_3'] + df_pa_segment_2['Price_4'] + df_pa_segment_2['Price_5']) / 5 model_incidence_segment2 = LogisticRegression(solver = 'sag') model_incidence_segment2.fit(X, Y) model_incidence_segment2.coef_ Y_segment_2 = model_incidence_segment2.predict_proba(df_price_range) purchase_pr_segment2 = Y_segment_2[:][: , 1] pe_segment2 = model_incidence_segment2.coef_[:,0] * price_range * ( 1- purchase_pr_segment2) ###Output _____no_output_____ ###Markdown Results ###Code df_price_elasticities['PE_Segment_2'] = pe_segment2 plt.figure(figsize = (9, 6)) plt.plot(price_range, pe, color = 'grey') plt.plot(price_range, pe_segment_1, color = 'green') plt.plot(price_range, pe_segment2, color = 'r') plt.xlabel('Price') plt.ylabel('Elasticity') plt.title('Price Elasticity of Purchase Probability') ###Output _____no_output_____ ###Markdown ${\textbf{Homework}}$ $\color{blue}{\text{Segment 0 - Standard}}$ ###Code df_pa_segment_0 = df_pa[df_pa['Segment'] == 0] Y = df_pa_segment_0['Incidence'] X = pd.DataFrame() X['Mean_Price'] = (df_pa_segment_0['Price_1'] + df_pa_segment_0['Price_2'] + df_pa_segment_0['Price_3'] + df_pa_segment_0['Price_4'] + df_pa_segment_0['Price_5']) / 5 model_incidence_segment0 = LogisticRegression(solver = 'sag') model_incidence_segment0.fit(X, Y) model_incidence_segment0.coef_ Y_segment_0 = model_incidence_segment0.predict_proba(df_price_range) purchase_pr_segment0 = Y_segment_0[:][: , 1] pe_segment0 = model_incidence_segment0.coef_[:,0] * price_range *( 1- purchase_pr_segment0) df_price_elasticities.insert(2, column = 'PE_Segment_0', value = pe_segment0) ###Output _____no_output_____ ###Markdown $\color{orange}{\text{Segment 3 - Well-Off}}$ ###Code df_pa_segment_3 = df_pa[df_pa['Segment'] == 3] Y = df_pa_segment_3['Incidence'] X = pd.DataFrame() X['Mean_Price'] = (df_pa_segment_3['Price_1'] + df_pa_segment_3['Price_2'] + df_pa_segment_3['Price_3'] + df_pa_segment_3['Price_4'] + df_pa_segment_3['Price_5']) / 5 model_incidence_segment3 = LogisticRegression(solver = 'sag') model_incidence_segment3.fit(X, Y) model_incidence_segment3.coef_ Y_segment_3 = model_incidence_segment2.predict_proba(df_price_range) purchase_pr_segment3 = Y_segment_3[:][: , 1] pe_segment3 = model_incidence_segment3.coef_[:,0] * price_range *( 1- purchase_pr_segment3) df_price_elasticities['PE_Segment_3'] = pe_segment3 df_price_elasticities ###Output _____no_output_____ ###Markdown ${\textbf{Results}}$ ###Code plt.figure(figsize = (9, 6)) plt.plot(price_range, pe, color = 'grey') plt.plot(price_range, pe_segment0, color = 'b') plt.plot(price_range, pe_segment_1, color = 'green') plt.plot(price_range, pe_segment2, color = 'r') plt.plot(price_range, pe_segment3, color = 'orange') plt.xlabel('Price') plt.ylabel('Elasticity') plt.title('Price Elasticity of Purchase Probability') ###Output _____no_output_____ ###Markdown Purchase Probability with Promotion Feature Data Preparation ###Code Y = df_pa['Incidence'] X = pd.DataFrame() X['Mean_Price'] = (df_pa['Price_1'] + df_pa['Price_2'] + df_pa['Price_3'] + df_pa['Price_4'] + df_pa['Price_5']) / 5 X['Mean_Promotion'] = (df_pa['Promotion_1'] + df_pa['Promotion_2'] + df_pa['Promotion_3'] + df_pa['Promotion_4'] + df_pa['Promotion_5'] ) / 5 X.head() ###Output _____no_output_____ ###Markdown Model Estimation ###Code model_incidence_promotion = LogisticRegression(solver = 'sag') model_incidence_promotion.fit(X, Y) model_incidence_promotion.coef_ ###Output _____no_output_____ ###Markdown Price Elasticity with Promotion ###Code df_price_elasticity_promotion = pd.DataFrame(price_range) df_price_elasticity_promotion = df_price_elasticity_promotion.rename(columns = {0: "Price_Range"}) df_price_elasticity_promotion['Promotion'] = 1 Y_promotion = model_incidence_promotion.predict_proba(df_price_elasticity_promotion) promo = Y_promotion[:, 1] price_elasticity_promo = (model_incidence_promotion.coef_[:, 0] * price_range) * (1 - promo) df_price_elasticities['Elasticity_Promotion_1'] = price_elasticity_promo df_price_elasticities ###Output _____no_output_____ ###Markdown Price Elasticity without Promotion ###Code df_price_elasticity_promotion_no = pd.DataFrame(price_range) df_price_elasticity_promotion_no = df_price_elasticity_promotion_no.rename(columns = {0: "Price_Range"}) df_price_elasticity_promotion_no['Promotion'] = 0 Y_no_promo = model_incidence_promotion.predict_proba(df_price_elasticity_promotion_no) no_promo = Y_no_promo[: , 1] price_elasticity_no_promo = model_incidence_promotion.coef_[:, 0] * price_range *(1- no_promo) df_price_elasticities['Elasticity_Promotion_0'] = price_elasticity_no_promo plt.figure(figsize = (9, 6)) plt.plot(price_range, price_elasticity_no_promo) plt.plot(price_range, price_elasticity_promo) plt.xlabel('Price') plt.ylabel('Elasticity') plt.title('Price Elasticity of Purchase Probability with and without Promotion') ###Output _____no_output_____ ###Markdown ${\textbf{Brand Choice}}$ Data Preparation ###Code brand_choice = df_pa[df_pa['Incidence'] == 1] pd.options.display.max_rows = 100 brand_choice Y = brand_choice['Brand'] brand_choice.columns.values features = ['Price_1', 'Price_2', 'Price_3', 'Price_4', 'Price_5'] X = brand_choice[features] model_brand_choice = LogisticRegression(solver = 'sag', multi_class = 'multinomial') model_brand_choice.fit(X, Y) model_brand_choice.coef_ bc_coef = pd.DataFrame(model_brand_choice.coef_) bc_coef bc_coef = pd.DataFrame(np.transpose(model_brand_choice.coef_)) coefficients = ['Coef_Brand_1', 'Coef_Brand_2', 'Coef_Brand_3', 'Coef_Brand_4', 'Coef_Brand_5'] bc_coef.columns = [coefficients] prices = ['Price_1', 'Price_2', 'Price_3', 'Price_4', 'Price_5'] bc_coef.index = [prices] bc_coef = bc_coef.round(2) bc_coef ###Output _____no_output_____ ###Markdown Own Price Elasticity Brand 5 ###Code df_own_brand_5 = pd.DataFrame(index = np.arange(price_range.size)) df_own_brand_5['Price_1'] = brand_choice['Price_1'].mean() df_own_brand_5['Price_2'] = brand_choice['Price_2'].mean() df_own_brand_5['Price_3'] = brand_choice['Price_3'].mean() df_own_brand_5['Price_4'] = brand_choice['Price_4'].mean() df_own_brand_5['Price_5'] = price_range df_own_brand_5 predict_brand_5 = model_brand_choice.predict_proba(df_own_brand_5) pr_own_brand_5 = predict_brand_5[: ][:, 4] beta5 = bc_coef.iloc[4, 4] beta5 own_price_elasticity_brand_5 = beta5 * price_range * (1 - pr_own_brand_5) df_price_elasticities['Brand_5'] = own_price_elasticity_brand_5 pd.options.display.max_rows = None df_price_elasticities plt.figure(figsize = (9, 6)) plt.plot(price_range, own_price_elasticity_brand_5, color = 'grey') plt.xlabel('Price 5') plt.ylabel('Elasticity') plt.title('Own Price Elasticity of Purchase Probability for Brand 5') ###Output _____no_output_____
code/Agglomerative-Clustering.ipynb
###Markdown Primary Experiment: Number of Clusters ###Code MAX_CLUSTERS = X_principal.shape[0] experiment_results = pd.DataFrame() for num_cluster in range(2, MAX_CLUSTERS): ac = AgglomerativeClustering(n_clusters = num_cluster).fit(X_principal) acc, ss, db, ch, hs, cs, vs, ar, am = get_scores(X_principal, ac.labels_, num_cluster) result = {'Num. Clusters': num_cluster, 'Accuracy': acc, 'Silhouette Score': ss, 'Davies-Bouldin Score': db, 'Calinski-Harabasz Score': ch, 'Homogeneity Score': hs, 'Completeness Score': cs, 'V Measure Score': vs, 'Adjusted Rand Score': ar, 'Adjusted Mutual Info Score': am} experiment_results = experiment_results.append(result, ignore_index = True) if(DATASET == 'iris'): x_axis = 5 elif(DATASET == 'spiral'): x_axis = 8 x = experiment_results['Num. Clusters'].iloc[0:x_axis] accuracy = experiment_results['Accuracy'].iloc[0:x_axis] ss = experiment_results['Silhouette Score'].iloc[0:x_axis] bs = experiment_results['Davies-Bouldin Score'].iloc[0:x_axis] hs = experiment_results['Homogeneity Score'].iloc[0:x_axis] cs = experiment_results['Completeness Score'].iloc[0:x_axis] vs = experiment_results['V Measure Score'].iloc[0:x_axis] ar = experiment_results['Adjusted Rand Score'].iloc[0:x_axis] am = experiment_results['Adjusted Mutual Info Score'].iloc[0:x_axis] ch = experiment_results['Calinski-Harabasz Score'].iloc[0:x_axis] # Plot Configuration fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16,5)) fig.suptitle('Agglomerative Clustering') # ax1.plot(x, accuracy, 'gx-', label="Accuracy") ax1.plot(x, ss, 'bx-', label="Silhouette Score") ax1.plot(x, bs, 'rx-', label="Davies-Bouldin Score") ax1.plot(x, hs, 'cx-', label="Homogeneity Score") # ax1.plot(x, cs, 'mx-', label="Completeness Score") # ax1.plot(x, vs, 'yx-', label="V Measure Score") # ax1.plot(x, ar, 'kx-', label="Adjusted Rand Score") # ax1.plot(x, am, 'gx-', label="Adjusted Mutual Info Score") ax1.set_xlabel('Num. Clusters') ax1.set_ylabel('Scores') ax1.legend() ax1.set_xticks(np.arange(min(x), max(x)+1, 1)) ax2.plot(x, ch, 'bx-', label = "Calinski-Harabasz Score") ax2.set_xlabel('Num. Clusters') ax2.set_ylabel('Scores') ax2.legend() ax2.set_xticks(np.arange(min(x), max(x)+1, 1)) plt.show() fig.savefig(f'{DATASET}_clustering.png', bbox_inches='tight') ###Output _____no_output_____ ###Markdown Secondary Experiment: Linkage Styles ###Code if(DATASET == 'iris'): NUM_CLUSTERS = 3 elif(DATASET == 'spiral'): NUM_CLUSTERS = 5 linkage_types = ['ward', 'complete', 'average', 'single'] results = pd.DataFrame() for linkage_type in linkage_types: ac = AgglomerativeClustering(n_clusters = NUM_CLUSTERS, linkage = linkage_type).fit(X_principal) result = get_secondary_exp_result(X_principal, ac.labels_, NUM_CLUSTERS, 'Linkage Type', linkage_type) results = results.append(result, ignore_index = True) results.T ###Output _____no_output_____ ###Markdown Secondary Experiment: Affinity Styles ###Code affinity_types = ['euclidean', 'l1', 'l2', 'manhattan', 'cosine'] results = pd.DataFrame() for affinity_type in affinity_types: ac = AgglomerativeClustering(n_clusters = NUM_CLUSTERS, linkage = 'complete', affinity = affinity_type).fit(X_principal) result = get_secondary_exp_result(X_principal, ac.labels_, NUM_CLUSTERS, 'Affinity Type', affinity_type) results = results.append(result, ignore_index = True) results.T ###Output _____no_output_____ ###Markdown Secondary Experiment: Full Tree Computation ###Code compute_full_tree_types = ['auto', True, False] results = pd.DataFrame() for compute_full_tree_type in compute_full_tree_types: ac = AgglomerativeClustering(n_clusters = NUM_CLUSTERS, compute_full_tree = compute_full_tree_type).fit(X_principal) result = get_secondary_exp_result(X_principal, ac.labels_, NUM_CLUSTERS, 'Compute Full Tree', compute_full_tree_type) results = results.append(result, ignore_index = True) results.T ###Output _____no_output_____
Woche 1/1_4_2_Grundlagen_Python.ipynb
###Markdown ###Code def addiere_zwei_zahlen(erste_zahl, zweite_zahl): print("Ich addiere " + str(erste_zahl) + " und " + str(zweite_zahl)) summe = erste_zahl + zweite_zahl return summe ###Output _____no_output_____ ###Markdown 1.4 Grundlagen in Python Von www.python.org - www.python.org, GPL, Link Mit dem Wissen könnten wir unseren ersten interaktiven Python-Taschenrechner bauen.Dafür brauchen wir etwa:- eine Python Funktion, um vom Benutzer Daten abzufragen (Python bietet eine Standardfunktion hierfür)- eine Python Funktion, um Zahlen zu addieren- eine Python Funktion, um das Ergebnis zu melden- eine Python Funktion, welche diese Abfolgen kombiniert VorwegViele Wege führen nach Rom. Und so gibt es auch mehr als einen Weg, Programme zu entwickeln. Vielleicht fällt Ihnen gleich direkt ein, wie man den Taschenrechner noch entwickeln könnte - und dann ist das prima! Sollten Sie sich also fragen, ob man unsere Lösung "einfacher" oder "performanter" bauen könnte: bestimmt. Benutzer abfragenPython bietet mit der Funktion `input` eine Möglichkeit, Daten vom Benutzer abzufragen. Wir können somit beginnen: ###Code erste_zahl = input("Was soll die erste Zahl sein? ") zweite_zahl = input("Was soll die zweite Zahl sein? ") ###Output _____no_output_____ ###Markdown Nun haben wir unsere ersten Zahlen abgespeichert. Wir können unsere Funktion `addiere_zwei_zahlen` wiederverwenden: ###Code summe = addiere_zwei_zahlen(erste_zahl, zweite_zahl) print(summe) ###Output _____no_output_____ ###Markdown DebuggingDas sieht nicht aus wie das, was wir wollten. Woran liegt das?Ein häufiger Bestandteil der Entwicklung ist das sogenannte **Debugging**. Hierbei versuchen wir zu verstehen, warum unser Code nicht so funktioniert, wie wir es wollen.Schauen wir uns einmal den Datentyp der Variablen an, die von `input` erzeugt wurden: ###Code print(type(erste_zahl)) print(type(zweite_zahl)) ###Output _____no_output_____ ###Markdown Wir sehen: `input` speichert Eingaben als Zeichenkette. Wir müssen die Werte daher konvertieren.Da wir auch Dezimalzahlen addieren können wollen, konvertieren wir den input auf `float`: ###Code erste_zahl = float(erste_zahl) zweite_zahl = float(zweite_zahl) print(type(erste_zahl)) print(type(zweite_zahl)) ###Output _____no_output_____ ###Markdown Passt die Ausgabe nun? ###Code summe = addiere_zwei_zahlen(erste_zahl, zweite_zahl) print(summe) ###Output _____no_output_____ ###Markdown Nun wollen wir das Ergebnis an unseren Benutzer zurückmelden.Hierfür nutzen wir erneut `print`, und fügen eine kleine Nachricht hinzu: ###Code print("Das Ergebnis ist:", summe) ###Output _____no_output_____ ###Markdown Zum Schluss wollen wir alles in eine wiederverwendbare Funktion geben: ###Code def interaktives_addieren(): erste_zahl = float(input("Was soll die erste Zahl sein? ")) zweite_zahl = float(input("Was soll die zweite Zahl sein? ")) summe = addiere_zwei_zahlen(erste_zahl, zweite_zahl) print("Das Ergebnis ist:", summe) ###Output _____no_output_____ ###Markdown Nun können wir unsere Funktion aufrufen: ###Code interaktives_addieren() ###Output _____no_output_____ ###Markdown Wenn-Dann-RegelnNun kann unser Taschenrechner addieren - aber noch nicht subtrahieren. Das wollen wir ergänzen.Ein einfaches, aber enorm mächtiges Konzept in der Programmierung ist das `if-then-else`. Am einfachsten sehen wir das an einem Beispiel: ###Code def addiere_oder_subtrahiere(erste_zahl, zweite_zahl, ist_addition): if ist_addition == True: print("Ich addiere " + str(erste_zahl) + " mit " + str(zweite_zahl)) ergebnis = erste_zahl + zweite_zahl else: print("Ich subtrahiere " + str(zweite_zahl) + " von " + str(erste_zahl)) ergebnis = erste_zahl - zweite_zahl # wir können Variablen einfach überschreiben ergebnis = "Das Ergebnis ist: " + str(ergebnis) return ergebnis print(addiere_oder_subtrahiere(20, 10, True)) print(addiere_oder_subtrahiere(20, 10, False)) ###Output _____no_output_____ ###Markdown ListenWir haben bisher immer einzelne Werte angeschaut. Python bietet das Listen-Konstrukt, mit dem mehrere Werte in einer Variable gespeichert werden können: ###Code eine_liste = ["Christian", "Johannes", "Henrik", "Simon"] ###Output _____no_output_____ ###Markdown SchleifenListen werden interessant, wenn wir diese mit Schleifen verbinden. So können wir etwa über jeden Namen der Liste iterieren, um eine Logik auszuführen: ###Code for name in eine_liste: print("Hallo, ich heiße", name) ###Output _____no_output_____ ###Markdown Schlüssel-Wert StrukturenEbenso wichtig wie Listen sind Strukturen, die uns das abspeichern von beliebigen Werten unter einem Schlüssel ermöglichen. Diese Strukturen heißen Dictionaries. Nützlich sind solche Strukturen etwa, um Vorkommnisse zu zählen: ###Code farben = ["rot", "grün", "blau", "blau", "gelb", "rot", "rot", "blau", "gelb", "orange", "blau"] haufigkeiten = { "rot": 0, "grün": 0, "blau": 0, "gelb": 0, "orange": 0 } for farbe in farben: haufigkeiten[farbe] += 1 print(haufigkeiten) ###Output _____no_output_____
Iris dataset/algoritmo-de-hunt.ipynb
###Markdown Vamos analisar os dados que temos para tentar encontrar padrões entre as espécies:Podemos ver que a penúltima variável, PetalWidth, apresenta notáveis padrões para as espécies:* Para um PetalWidth menor que 1, a flor será Iris-setosa* Para um PetalWidth maior que 0.9 e menor que 1.8, a flor será Iris-versicolor* Para um PetalWidth maior que 1.8, a flor será Iris-virginicaNo entanto, observa-se também que existem pontos "fora da curva" entre Iris-versicolor e Iris-virginica (existem Iris-virginica com um PetalWidth menor que 1.8). Para solucionar isso podemos observar outra variável,PetalLengthCm.Temos que:* Para um PetalLengthCm menor que 2, a flor será Iris-setosa* Para um PetalLengthCm maior que 2 e menor que 5, a flor será Iris-versicolor* Para um PetalLengthCm maior que 5, a flor será Iris-virginicaPodemos combinar essas duas "regras" que encontramos para analisarmos os tipos de flor. Ademais, agora que consideramos também o PetalLengthCm, podemos diminuir o valor de PetalWidth que assumirá que a flor é uma Iris-versicolor, de 1.8 para 1.5. ###Code #fazendo o algoritmo de Hunt conterros = 0 for i in range(0,len(teste)): print(teste[i]) #verificando primeiro a variável PetalWidth if(teste[i][4]<1): print("Tipo: Iris-setosa") #verificando se a classificação foi correta, caso não, avisa if(teste[i][5]!="Iris-setosa"): print("erro :(") conterros = conterros + 1 elif(teste[i][4]<1.8): #provavelmete Iris-versicolor, mas é necessário verificar! if(teste[i][3]<5): #2<=PetalLength<5 => Iris-versicolor print("Tipo: Iris-versicolor") if(teste[i][5]!="Iris-versicolor"): print("erro :(") print("Tipo correto: ",teste[i][5]) conterros = conterros + 1 else: #PetalLength>=5 => Iris-versicolor print("Tipo: Iris-virginica") if(teste[i][5]!="Iris-virginica"): print("erro :(") print("Tipo correto: ",teste[i][5]) conterros = conterros + 1 else: #caso não se encaixe nos testes anteriores, obrigatoriamente será uma Iris-virginica print("Tipo: Iris-virginica") if(teste[i][5]!="Iris-virginica"): print("erro :(") print("Tipo correto: ",teste[i][5]) conterros = conterros + 1 print("---"); print("Número de casos falhos: ",conterros) print("Taxa de acertos: ",100-(conterros*100/tamanho),"%") ###Output [1.0, 5.1, 3.5, 1.4, 0.2, 'Iris-setosa'] Tipo: Iris-setosa --- [2.0, 4.9, 3.0, 1.4, 0.2, 'Iris-setosa'] Tipo: Iris-setosa --- [3.0, 4.7, 3.2, 1.3, 0.2, 'Iris-setosa'] Tipo: Iris-setosa --- [4.0, 4.6, 3.1, 1.5, 0.2, 'Iris-setosa'] Tipo: Iris-setosa --- [6.0, 5.4, 3.9, 1.7, 0.4, 'Iris-setosa'] Tipo: Iris-setosa --- [10.0, 4.9, 3.1, 1.5, 0.1, 'Iris-setosa'] Tipo: Iris-setosa --- [11.0, 5.4, 3.7, 1.5, 0.2, 'Iris-setosa'] Tipo: Iris-setosa --- [12.0, 4.8, 3.4, 1.6, 0.2, 'Iris-setosa'] Tipo: Iris-setosa --- [13.0, 4.8, 3.0, 1.4, 0.1, 'Iris-setosa'] Tipo: Iris-setosa --- [14.0, 4.3, 3.0, 1.1, 0.1, 'Iris-setosa'] Tipo: Iris-setosa --- [16.0, 5.7, 4.4, 1.5, 0.4, 'Iris-setosa'] Tipo: Iris-setosa --- [17.0, 5.4, 3.9, 1.3, 0.4, 'Iris-setosa'] Tipo: Iris-setosa --- [19.0, 5.7, 3.8, 1.7, 0.3, 'Iris-setosa'] Tipo: Iris-setosa --- [20.0, 5.1, 3.8, 1.5, 0.3, 'Iris-setosa'] Tipo: Iris-setosa --- [21.0, 5.4, 3.4, 1.7, 0.2, 'Iris-setosa'] Tipo: Iris-setosa --- [25.0, 4.8, 3.4, 1.9, 0.2, 'Iris-setosa'] Tipo: Iris-setosa --- [26.0, 5.0, 3.0, 1.6, 0.2, 'Iris-setosa'] Tipo: Iris-setosa --- [29.0, 5.2, 3.4, 1.4, 0.2, 'Iris-setosa'] Tipo: Iris-setosa --- [34.0, 5.5, 4.2, 1.4, 0.2, 'Iris-setosa'] Tipo: Iris-setosa --- [35.0, 4.9, 3.1, 1.5, 0.1, 'Iris-setosa'] Tipo: Iris-setosa --- [37.0, 5.5, 3.5, 1.3, 0.2, 'Iris-setosa'] Tipo: Iris-setosa --- [38.0, 4.9, 3.1, 1.5, 0.1, 'Iris-setosa'] Tipo: Iris-setosa --- [39.0, 4.4, 3.0, 1.3, 0.2, 'Iris-setosa'] Tipo: Iris-setosa --- [41.0, 5.0, 3.5, 1.3, 0.3, 'Iris-setosa'] Tipo: Iris-setosa --- [43.0, 4.4, 3.2, 1.3, 0.2, 'Iris-setosa'] Tipo: Iris-setosa --- [45.0, 5.1, 3.8, 1.9, 0.4, 'Iris-setosa'] Tipo: Iris-setosa --- [46.0, 4.8, 3.0, 1.4, 0.3, 'Iris-setosa'] Tipo: Iris-setosa --- [47.0, 5.1, 3.8, 1.6, 0.2, 'Iris-setosa'] Tipo: Iris-setosa --- [48.0, 4.6, 3.2, 1.4, 0.2, 'Iris-setosa'] Tipo: Iris-setosa --- [49.0, 5.3, 3.7, 1.5, 0.2, 'Iris-setosa'] Tipo: Iris-setosa --- [50.0, 5.0, 3.3, 1.4, 0.2, 'Iris-setosa'] Tipo: Iris-setosa --- [52.0, 6.4, 3.2, 4.5, 1.5, 'Iris-versicolor'] Tipo: Iris-versicolor --- [53.0, 6.9, 3.1, 4.9, 1.5, 'Iris-versicolor'] Tipo: Iris-versicolor --- [54.0, 5.5, 2.3, 4.0, 1.3, 'Iris-versicolor'] Tipo: Iris-versicolor --- [58.0, 4.9, 2.4, 3.3, 1.0, 'Iris-versicolor'] Tipo: Iris-versicolor --- [59.0, 6.6, 2.9, 4.6, 1.3, 'Iris-versicolor'] Tipo: Iris-versicolor --- [60.0, 5.2, 2.7, 3.9, 1.4, 'Iris-versicolor'] Tipo: Iris-versicolor --- [63.0, 6.0, 2.2, 4.0, 1.0, 'Iris-versicolor'] Tipo: Iris-versicolor --- [64.0, 6.1, 2.9, 4.7, 1.4, 'Iris-versicolor'] Tipo: Iris-versicolor --- [65.0, 5.6, 2.9, 3.6, 1.3, 'Iris-versicolor'] Tipo: Iris-versicolor --- [66.0, 6.7, 3.1, 4.4, 1.4, 'Iris-versicolor'] Tipo: Iris-versicolor --- [67.0, 5.6, 3.0, 4.5, 1.5, 'Iris-versicolor'] Tipo: Iris-versicolor --- [71.0, 5.9, 3.2, 4.8, 1.8, 'Iris-versicolor'] Tipo: Iris-virginica erro :( Tipo correto: Iris-versicolor --- [75.0, 6.4, 2.9, 4.3, 1.3, 'Iris-versicolor'] Tipo: Iris-versicolor --- [77.0, 6.8, 2.8, 4.8, 1.4, 'Iris-versicolor'] Tipo: Iris-versicolor --- [78.0, 6.7, 3.0, 5.0, 1.7, 'Iris-versicolor'] Tipo: Iris-virginica erro :( Tipo correto: Iris-versicolor --- [81.0, 5.5, 2.4, 3.8, 1.1, 'Iris-versicolor'] Tipo: Iris-versicolor --- [82.0, 5.5, 2.4, 3.7, 1.0, 'Iris-versicolor'] Tipo: Iris-versicolor --- [85.0, 5.4, 3.0, 4.5, 1.5, 'Iris-versicolor'] Tipo: Iris-versicolor --- [90.0, 5.5, 2.5, 4.0, 1.3, 'Iris-versicolor'] Tipo: Iris-versicolor --- [91.0, 5.5, 2.6, 4.4, 1.2, 'Iris-versicolor'] Tipo: Iris-versicolor --- [94.0, 5.0, 2.3, 3.3, 1.0, 'Iris-versicolor'] Tipo: Iris-versicolor --- [99.0, 5.1, 2.5, 3.0, 1.1, 'Iris-versicolor'] Tipo: Iris-versicolor --- [100.0, 5.7, 2.8, 4.1, 1.3, 'Iris-versicolor'] Tipo: Iris-versicolor --- [101.0, 6.3, 3.3, 6.0, 2.5, 'Iris-virginica'] Tipo: Iris-virginica --- [102.0, 5.8, 2.7, 5.1, 1.9, 'Iris-virginica'] Tipo: Iris-virginica --- [104.0, 6.3, 2.9, 5.6, 1.8, 'Iris-virginica'] Tipo: Iris-virginica --- [105.0, 6.5, 3.0, 5.8, 2.2, 'Iris-virginica'] Tipo: Iris-virginica --- [106.0, 7.6, 3.0, 6.6, 2.1, 'Iris-virginica'] Tipo: Iris-virginica --- [107.0, 4.9, 2.5, 4.5, 1.7, 'Iris-virginica'] Tipo: Iris-versicolor erro :( Tipo correto: Iris-virginica --- [108.0, 7.3, 2.9, 6.3, 1.8, 'Iris-virginica'] Tipo: Iris-virginica --- [112.0, 6.4, 2.7, 5.3, 1.9, 'Iris-virginica'] Tipo: Iris-virginica --- [113.0, 6.8, 3.0, 5.5, 2.1, 'Iris-virginica'] Tipo: Iris-virginica --- [119.0, 7.7, 2.6, 6.9, 2.3, 'Iris-virginica'] Tipo: Iris-virginica --- [121.0, 6.9, 3.2, 5.7, 2.3, 'Iris-virginica'] Tipo: Iris-virginica --- [122.0, 5.6, 2.8, 4.9, 2.0, 'Iris-virginica'] Tipo: Iris-virginica --- [123.0, 7.7, 2.8, 6.7, 2.0, 'Iris-virginica'] Tipo: Iris-virginica --- [124.0, 6.3, 2.7, 4.9, 1.8, 'Iris-virginica'] Tipo: Iris-virginica --- [127.0, 6.2, 2.8, 4.8, 1.8, 'Iris-virginica'] Tipo: Iris-virginica --- [130.0, 7.2, 3.0, 5.8, 1.6, 'Iris-virginica'] Tipo: Iris-virginica --- [132.0, 7.9, 3.8, 6.4, 2.0, 'Iris-virginica'] Tipo: Iris-virginica --- [133.0, 6.4, 2.8, 5.6, 2.2, 'Iris-virginica'] Tipo: Iris-virginica --- [134.0, 6.3, 2.8, 5.1, 1.5, 'Iris-virginica'] Tipo: Iris-virginica --- [135.0, 6.1, 2.6, 5.6, 1.4, 'Iris-virginica'] Tipo: Iris-virginica --- [139.0, 6.0, 3.0, 4.8, 1.8, 'Iris-virginica'] Tipo: Iris-virginica --- [140.0, 6.9, 3.1, 5.4, 2.1, 'Iris-virginica'] Tipo: Iris-virginica --- [143.0, 5.8, 2.7, 5.1, 1.9, 'Iris-virginica'] Tipo: Iris-virginica --- [144.0, 6.8, 3.2, 5.9, 2.3, 'Iris-virginica'] Tipo: Iris-virginica --- [146.0, 6.7, 3.0, 5.2, 2.3, 'Iris-virginica'] Tipo: Iris-virginica --- [148.0, 6.5, 3.0, 5.2, 2.0, 'Iris-virginica'] Tipo: Iris-virginica --- [149.0, 6.2, 3.4, 5.4, 2.3, 'Iris-virginica'] Tipo: Iris-virginica --- Número de casos falhos: 3 Taxa de acertos: 97.11538461538461 %
ProgDA_ProjectDec2019.ipynb
###Markdown Project 2019 Programming for Data Analysis* Choose a real-world phenomenon that can be measured and for which you could collect at least one-hundred data points across at least four different variables.* Investigate the types of variables involved, their likely distributions, and their relationships with each other* Synthesise/simulate a data set as closely matching their properties as possible.* Detail your research and implement the simulation in a Jupyter notebook – the data set itself can simply be displayed in an output cell within the notebook. Section 1 Choose a real-world phenomenon that can be measured and for which you could collect at least one-hundred data points across at least four different variables. For this project, I have selected a dataset that is available from the Irish Government's open data project to research, investigate then simulate some of the variables.The Open Data project is an initiative by the government of Ireland that makes data held by public bodies available and easily accessible online for reuse and redistribution to create interest and encourage engagement with open data.I have chosen the [Office of Public Works Heritage Site Details](https://www.opw.ie/en/media/opw-heritage-site-details.csv) open dataset which contains one hundred data points across twenty-four variables and was collected in 2015.The Office of Public Works (OPW) is a Government department with responsibility for the day-to-day running of all National Monuments in State care and National Historic Properties. The real-world phenomenon that is presented is a collection of information relating to the Heritage Sites that are open to the public. I chose this dataset because it is of interest to me in my professional life. In the next section of the project I will explore the kinds of variables that appear in a dataset relating to Heritage Sites, the relationships (if any) between variables and the distributions that are apparent. *** Section 2 Investigate the types of variables involved, their likely distributions, and their relationships with each other **Investigation of the original OPW dataset**In order to simulate a dataset on the subject of Heritage States owned by the State/ citizens of Ireland I must investigate a pre-existing one.[Office of Public Works Heritage Site Details](https://www.opw.ie/en/media/opw-heritage-site-details.csv)Considering that the dataset was made available as part of a government initiative to create interest and encourage engagement with open data - it was of poor quality.* The original dataset is relatively small - 100 records and 24 variables - however, the csv file was un-necessarily large when loading due to the inclusion of digits on line 2424 of the original spreadsheet.* The financial data - in this case cost of entry for different demographics was put together in one column along with other visitor information. This created difficulties for my investigation.I have adjusted it separately, re-save this truncated version in this github repository and continued the project with the updated format.**Observations On The Types of Variables**The dataset contains information about 100 unique, named Heritage Sites managed by The Office of Public Works collected in 2015.There are 24 different variables in the original dataset, most of which relate to visitor information e.g. GPS co-ordinates and contact details for the site.The following points are relevant to this exercise and the objective of synthesising data set in a methodical way which can match the contents. Heritage Site Name* Every Heritage Site name is a unique object Pricing structures in Euro, Datatype: Integer* Adult entrance price - an integer between 0 and 12 * Senior / Group entrance price - an integer between 0 and 9 * Child entrance price - an integer between 0 and 7 * Student entrance price - an integer between 0 and 8 * Family entrance price - an integer between 0 and 32 * 51% of the sites have free admission, 35% have an adult entrance fee of €5.* When an entrance fee is paid, there is a price point for all types of visitors.* An individual adult is the most expensive ticket with all others reducing by 1 or 2 euro from that point* A family ticket is approximately the same price as the sum of two adult plus one child tickets Visitor Numbers, Datatype : Integer* 2015 Visitor Numbers contain integers that range from 0 - 553,348. As previously state there is a strong relationship between the Region and Visitor Numbers.* 31 of the entries for 2015 Visitor Numbers contain a null value.* The remaining 69 datapoints show that visitor numbers range from 1750 to 553348* The total number of visitors is 5.1 million people Geographical Location, Datatype : Object* The county where the Heritage Site is located affects the Regional classification, if this information were to be shuffled, the county/region need to be linked.* There is no relationship between the number of Heritage Sites in a county and the visitor numbers.* There is a strong relationship between the Region and the Visitor Numbers.* Instead of joining the county and region, I will therefore omit the county variable entirely my reason for this is because the county variable has no strong relationship with any distribution - the regionality is a stronger one.Heritage Sites are in 7 different geographical regions, * Dublin* Midlands & East Coast * North-West* Shannon* South-East* South-West* WestThe majority of sites are located in Dublin, South - East and South-West. Cafe Facilities, Datatype : IntegerIn the original dataset, 9 out of 100 Heritage Sites have a Cafe on site Opening Dates, Datatype : Integer42 of the sites are open all year round, the remainder have seasonal opening times. **Likely Distributions in the OPW Heritage Sites Dataset used to inform a synthesised Dataset** What is the type of distribution that appears and that can be used to inform a synthesised Dataset?The normal distribution is a very common one and can be considered the standard distribution, therefore I will use it where there is an option in randomly generating variables but most of the simple random data functions in the random library use the uniform distribution.On two occasions, the binomial distribution is used, further information about this decision appears below.In addition, the central limit theorem can be used to support my decision to use normal distribution. This theorem states that the mean of any sample of variables (with finite mean and variance) with any distribution will approximate the normal distribution. **Relationships in the OPW Heritage Sites Dataset used to inform a synthesised Dataset** There is a strong relationship between the Region and the Visitor Numbers in the original Dataset.To me, this is the the most interesting variable in the original dataset.An assumption coming to the dataset would be that the busiest sites are in the most populous region of the country.This assumption is born out by Failte Ireland's (the National Tourism Development Authority) 2018 figures where 4 of the top 10 paying visitor attrations are in Dublin with a further two in the South-East. However, other sites on the list are in underpopulated areas e.g. the Cliffs of Moher that are world renowned for their remoteness and unspolit beauty.I do not expect to be able to recreate this subtle relationship with synthesised data, however the relationships that are produced will be explored in due course. Section 3 Synthesise/simulate a data set as closely matching the properties of the original as is possible. ###Code #Import modules required for the Assignment #NumPy package import numpy as np #Pandas library import pandas as pd #Seaborn package import seaborn as sns #Matplotlib library import matplotlib.pyplot as plt ###Output _____no_output_____ ###Markdown Following on from the findings in Section 2, I will simulate a data set as closely matching their properties as possible using the numpy random package thus building on my [previous work](http://localhost:8888/?token=98bc2512905f44f91efe55dc0b350cacc78b93d3f4e55086) carried out during this course where I explored the numpy random package.Unless otherwise stated, the scripts come from this project.I will :- Permute the 100 Heritage Sites names from the original OPW Datset using random.permutation- Synthesise 100 variables from the choice of seven Region names using random.choice- Synthesise random data for the number of visitors from integers that range from 0 - 553,348 using random.randint- Synthesise random data for the Adult price point variable and ensure that 51 out of the 100 Sites have free entry / zero value using random.randint and permutation- Synthesise random data for the number of cafes available at Heritage Sites using random.binomial- Synthesise random data for the opening hours at Heritage sites using random.binomialThen merge these dataframes into one large dataset that mirrors the original. Setting the seed The term "seed" refers to the starting position of generating pseudo-random numbers.If the seed is known/set it is possible to predict the pattern because the algorithm will do the same equation and return the same numbers at each iteration. The seed will be set in each script to ensure uniformity in the output and so that any any comments or observation on the output will make sense to you, the reader. Permute the Heritage Site names and create a new dataframeIt is not possible to randomly generate this text, therefore I will use the selection the provided in the original Dataset that informs this project. ###Code df=pd.read_csv("https://raw.githubusercontent.com/ClodaghMurphy/ProgDA_ProjectDec2019/master/opw-heritage-site-detailsNEW.csv", encoding="ISO-8859–1",nrows=100) #code adapted from https://stackoverflow.com/questions/49545599/how-to-turn-a-pandas-column-into-array-and-transpose-it New_Names = df[['Name']] #Set seed at 19 to ensure identical results at every iteration np.random.seed(19) #Permute the synthesised dataframe. Permute is a function from the random package that rearranges an array #this code ensures the output will be in column format df1 = pd.DataFrame((np.random.permutation(New_Names)), columns = ['New_Names']) #df1 ###Output _____no_output_____ ###Markdown Use .random.choice to produce a 100 row dataframe using the given 7 OPW regions ###Code #The .random.choice function randomly chooses a sample from an array #Code adapted from https://pynative.com/python-random-sample/ #Provide array of 7 OPW regions as they appear in the original dataset Regions = (["Dublin", "Midlands & East Coast", "North-West", "Shannon", "South-East", "South-West", "West"]) #Set seed at 19 to ensure identical results at every iteration np.random.seed(19) #When 100 is entered into the argument, 100 selections are output #Calling pd.Dataframe ensures the output is in a datafame format df2 = pd.DataFrame((np.random.choice(Regions, 100)), columns = ['Region']) #A uniform distribution is assumed in this function #df2 ###Output _____no_output_____ ###Markdown Synthesise random data for the number of visitors from integers that range from 0 - 553348 ###Code #Set seed at 19 to ensure identical results at every iteration np.random.seed(19) #Code adapted from library documentation https://docs.scipy.org/doc/numpy-1.15.0/reference/routines.random.html #As per the numpy documentation, this command returns random integers from the “discrete uniform” distribution df3 = pd.DataFrame((np.random.randint(0, high=553348, size=100, dtype='l')), columns = ['New_Vis_Numbers']) ###Output _____no_output_____ ###Markdown Instead of forcing a nil amount of Visitors for 31 Heritage Site, I will allow the numpy library to generate data.My reason for this choice, is that the number of visitors in those sites was not zero - it was simply not collected for various business reasons e.g. the site is a main thoroughfare in the case of St. Stephen's Green. It will be more interesting dataset if these statistics are contained in it. ###Code #Code adapted from https://realpython.com/python-histograms/ plt.hist(df3) plt.xlabel('Value - Visitor Numbers') plt.ylabel('Frequency') plt.title('Histogram showing random distribution of Visitor Numbers') plt.show() ###Output _____no_output_____ ###Markdown Synthesise random data for the Adult price point variable and ensure that 51 out of the 100 Sites have free entry / zero value There are five different categories of visitor in the original datasetAdult, Senior / Group, Child, Student and Family.I have focussed on the Adult price because it is the most expensive individual entry price and the other amounts are based on it. ###Code #Set seed at 19 to ensure identical results at every iteration np.random.seed(19) #Adult entrance price - 100 integers between 1 and 12 #0-12 is not used because this would result in some random values of 0 #Code adapted from library documentation https://docs.scipy.org/doc/numpy-1.15.0/reference/routines.random.html Adult = np.random.randint(1, high=12, size=100, dtype='l') Adult = pd.DataFrame(Adult) #Code adapted from https://stats.stackexchange.com/questions/283572/using-iloc-to-set-values/283575 #Replace 51 values with free entry/zero Adult.loc[0:50,0] = 0 #Permute the synthesised dataframe. Permute is used because shuffle creates a "key error" when used with a dataframe. df4 = pd.DataFrame(np.random.permutation(Adult), columns = ['Adult']) #df4 ###Output _____no_output_____ ###Markdown Synthesise random data for the number of cafes and opening hours at Heritage Sites In the original dataset, there is lots of visitor information jumbled together in once cell covering facts such as whether there are toilets, parking, wheelchair access and cafes on site or nearby.From the dataset I extracted that 100 Heritage Sites have a Cafe on site.Similarly, through scanning through the original data in excel format which goes into great detail about the individual local opening hours I can put the information in a much simpler way - 42 of the sites are open all year round, the remainder have seasonal opening times.(These investigations are not shown as part of this assignment - only the results.)One of the learning Outcomes for this module is that I will be able to model real-world problems as computing problems.I can display this ability through turning this data-intensive information into a Boolean format, i.e.,The Heritage Site has a Cafe - True/FalseThe Heritage Site is open all year - True/FalseIn order to synthesise data to meet the requirements I will use the binomial distribution from the numpy library.In the assignment that I completed earlier this year, I wrote about the binomial distribution which can be used in any instance repeated multiple times where there are deemed to be two possible outcomes - success or failure.The "probability of success" input is taken from the findings in the original dataset 9/100 and 42/100 respectively. ###Code #Set seed at 19 to ensure identical results at every iteration np.random.seed(19) #Code adapted from https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.random.binomial.html n, p = 1, .09 # number of trials, probability of each trial is 9/100 df5 = pd.DataFrame((np.random.binomial(n, p, 100)), columns = ['Cafe']) #df5 #Set seed at 19 to ensure identical results at every iteration np.random.seed(19) #Code adapted from https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.random.binomial.html n, p = 1, .42 # number of trials, probability of each trial is 42/100 df6 = pd.DataFrame((np.random.binomial(n, p, 100)), columns = ['Year Round Opening']) #df6 #Code adapted from https://stackoverflow.com/questions/28135436/concatenate-rows-of-two-dataframes-in-pandas New_Dataset = pd.concat([df1,df2, df3, df4, df5, df6,], axis=1) #The following line can be uncommented (remove the # symbol)in order to print the New Dataset. #New_Dataset ###Output _____no_output_____ ###Markdown Summary Data of the New Dataset ###Code New_Dataset.info() ###Output <class 'pandas.core.frame.DataFrame'> RangeIndex: 100 entries, 0 to 99 Data columns (total 6 columns): New_Names 100 non-null object Region 100 non-null object New_Vis_Numbers 100 non-null int32 Adult 100 non-null int32 Cafe 100 non-null int32 Year Round Opening 100 non-null int32 dtypes: int32(4), object(2) memory usage: 3.2+ KB ###Markdown .info is used to provide a concise summary of the information contained in the New_Dataset DataFrame.The output above tells me that there are six columns, there are 100 rows of information in each, the datatypes are as expected and the DataFrame uses 3.2KB memory. ###Code #Print a description of the output print("Description of the OPW Dataset") New_Dataset.describe() #if parentheses ()are not used, all columns will display but no useful summary statistics! ###Output _____no_output_____ ###Markdown According to pandas 0.25.1 documentation:> For numeric data, the result’s index will include count, mean, std, min, max as well as lower, 50 and upper percentiles. . By default the lower percentile is 25 and the upper percentile is 75. The 50 percentile is the same as the median.The output confirms that there are 100 data pointsA large amount of free sites produces unusual results in many of the columns e.g. the 25% and 50% quartiles are zero across the range.The standard deviations listed give an indication of the distance between the mean and all values, this figure is affected by the free entrance to over half of the sites in the dataset where tickets prices are concerned. Data Visualisation: Region and Visitor Numbers ###Code #Print a description of the output print("Countplot of the Random Choice function applied to 7 OPW Regions") #Code amended from https://stackoverflow.com/questions/42528921/how-to-prevent-overlapping-x-axis-labels-in-sns-countplot ax = sns.countplot(x="Region", data=New_Dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.tight_layout() plt.show() #code adapted from https://seaborn.pydata.org/generated/seaborn.boxplot.html ax = sns.boxplot(x="Region", y="New_Vis_Numbers", data=New_Dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") #plt.tight_layout() #plt.show() ###Output _____no_output_____ ###Markdown Observations on the BoxPlot * The minimum is represented by the lowermost line (a "whisker")* The maximum value is the uppermost line (a "whisker")* The lowermost end of each box is quartile 1, the uppermost represents quartile 3* The second quartile (the median) is the line inside the box.* The circles that appear on the plot indicate outliers* A compact box indicates less variation in the valuesThe Dataset upon which this project is based indicated a strong relationship between the Region and the Visitor Numbers.The synthesised data was unable to replicate this nuance and the above boxplot diplays an expected uniform distribution. Data Visualisation: Histograms of all Numerical Data ###Code #code adapted from https://towardsdatascience.com/how-to-perform-exploratory-data-analysis-with-seaborn-97e3413e841d New_Dataset.hist(bins=15, figsize=(15, 6), layout=(2, 2)); print ("Data Visualisation - Histograms setting out all numerical data") ###Output Data Visualisation - Histograms setting out all numerical data ###Markdown Observations on the Histograms Adults Entrance Fee / Visitor NumbersThe fact that 51% of Heritage Sites have free entry, skews the distribution somewhat. Both the Adult Entrance Fee and Visitor Numbers plots can be seen to have discrete uniform distribution which corresponds with the documentation for that library.Cafe/Year Round OpeningIt is understood that the zero value indicates a false statement and one means the statement is true.When writing the script to randomly generate numbers regarding the availability of a Cafe on site or Year Round Opening a degree of control was exerted in that the distribution is a fixed binomial distribution. However, the boolean value will be applied to the unique Heritage Site in a random way.The data visualised in the histograms met those precise requirements. Relationships between Variables - correlation In Section 2, while still dealing with the original OPW Dataset, I observed a strong relationship between the Region and the Visitor Numbers i.e., that Regions along the eastern seaboard had more visitors and wrote that I did not expect to easily recreate it with synthesised data. In Section 3, the synthesised data produced a uniform distribution wihout any strong relationships between location and visitor numbers.Correlation is a statistical measure that can be used to describe one variable in terms of its relationship with another.Covariance is a measure of the association/relationship between two variables X and Y.The synthesised data does not meet the assumptions that must be met (e.g. normally distributed data) in order to apply Pearson's Correlation (the one that is called by .corr()) however I will run the script as an example of the type of output that can be produced. It is straightfoward to call the correlation function (see below) but what do this output mean?* values around 1 indicates a strong positive relationship* values around 0 indicates the variables are not linearly correlated* values around -1 indicate a strong negative relationshipThe results below, therefore, indicate no linear correlation/strong negative relationships between the variables.The value of 1 is output when the same two variables are compared to one another e.g. Cafe and Cafe. ###Code New_Dataset.corr() ###Output _____no_output_____
astr-119-hw-3 (1).ipynb
###Markdown Define a function that checks whether or not the intial values are valid ###Code def Check_initial_values(f, x_min, x_max, tol): #function, minimum x, maximum x, tolerance #we need to check our initial guesses y_min = f(x_min) y_max = f(x_max) #check that there is a zero crossing between x_min and x_max #product of functional values should be less than 0 if(y_min*y_max>=0.0): print('There is no zero crossing in this range = ', x_min, x_max) #all the %f are replaced with the values after the last % #strings like this are used when you dont know the values of variables yet s = "f(%f) = %f, f(%f) = %f" % (x_min, y_min, x_max, y_max) print(s) return 0 #if the absolute value of functional values is less than the tolerance(a really small number) #then a root is found if(np.fabs(y_min)<tol): return 1 if(np.fabs(y_max)<tol): return 2 #if the function runs until this point without printing anything, the bracket is valid. #no zeros have been found but we can proceed with the bisection search return 3 def bisection_root_finding(f, x_min_start, x_max_start, tol): #this function uses bisection search to find a root x_min = x_min_start #this is the minimum x in the bracket x_max = x_max_start #this is the maximum x in the bracket x_mid = 0.0 #the rance goes from + to - or vice versa, so 0.0 will be in the middle y_min = f(x_min) #value of the function at x_min y_max = f(x_max) #value of the function at x_max y_mid = f(0.0) #value of the function at 0.0 imax= 10000 #set number of iterations i = 0 #starts counting from 0 #check the initial values flag = Check_initial_values(f, x_min, x_max, tol) #this uses the returned values of the Check initial values function and returns actuaL values if(flag==0): print('Error in bisection_root_finding().') raise ValueError('Initial values invalid', x_min, x_max) elif(flag==1): return x_min elif(flag==2): return x_max #this is the code for the search iterations flag = 1 #use a while loop to begin the iteration while(flag): x_mid = 0.5*(x_min*+x_max) #this is the midpoint of the bracket y_mid = f(x_mid) #this is the y value at the midpoint #if the absolute value of y_mid is less than the tolerance, then it is a root if(np.fabs(y_mid)<tol): flag = 0 else: #if the product of the midpoint value and one of the endpoints is >0 #replace the end point with the midpoint if(f(x_min)*f(x_mid)>0): x_min = x_mid else: x_max = x_mid print(x_min, f(x_min), x_max, f(x_max)) #add 1 to the iteration to represent the next i += 1 #exit if the max number of iterations is reached if(i>=imax): print('Max number of iterations exceeded = ', i) s = "Min bracket f(%f) = %f" % (x_min, f(x_min)) print(s) s = 'Max bracket f(%f) = %f' % (x_max, f(x_max)) print(s) s = 'Mid bracket f(%f) = %f' % (x_max, f(x_max)) print(s) raise StopIteration ('Stop iterations after ', i) return x_mid x_min = 0.0 x_max = 1.5 tolerance = 1.0e-6 print(x_min, Function_for_roots(x_min)) print(x_max, Function_for_roots(x_max)) x_root = bisection_root_finding(Function_for_roots,x_min,x_max,tolerance) y_root = Function_for_roots(x_root) s = 'Root found with y(%f) = %f' % (x_root, y_root) print(s) x = np.linspace(0,3,1000) a = 1.01*x**2 - 3.04*x + 2.07 z = 0 plt.plot(x, a) plt.hlines(z, 0,3) plt.ylim(-.5,2.1) #I'm not too sure how to plot points ###Output _____no_output_____
Python_IE534/hw4/submitted/report.ipynb
###Markdown Build Resnet from Scratch Model Configuration**Net Architecture:** The same as decribed in the homework. - dropout layer: p=0.2 - max pooling layer: stride = 2, kenerl_size=2**Batch Size:** 500**Optimizer:** - sgd(lr=0.05, momentum=0.9, weight_decay=5e-4) - for every 30 epoches, `lr /= 2`**Data Augmentation:** - RandomCrop - RandomHorizontalFlip Experiment Results**For 50 epochs, the highest testing accuracy I obtained during the training is 66.06% at the 32-th epoch.** After that the testing accuracy drops a bit and oscillate around 64%.Part of the log is provided below. **The final test accuracy is 64.48%**.```2018-10-05 18:53:04,475 - [INFO] - => Epoch: [32/50] | Training Loss:[0.47846364974975586] | Training Accuracy: [0.8605199999999997]2018-10-05 18:53:11,658 - [INFO] - => Epoch: [32/50] | Testing Loss:[1.299492597579956] | Testing Accuracy: [0.6606000000000001]2018-10-05 18:53:11,869 - [INFO] - => Best parameters are updated2018-10-05 18:54:26,938 - [INFO] - => Epoch: [33/50] | Training Loss:[0.45494773983955383] | Training Accuracy: [0.8647199999999999]2018-10-05 18:55:49,112 - [INFO] - => Epoch: [34/50] | Training Loss:[0.4044838547706604] | Training Accuracy: [0.8736999999999999]2018-10-05 18:55:56,082 - [INFO] - => Epoch: [34/50] | Testing Loss:[1.3297039270401] | Testing Accuracy: [0.6481999999999999]2018-10-05 18:57:11,305 - [INFO] - => Epoch: [35/50] | Training Loss:[0.510037899017334] | Training Accuracy: [0.88004]2018-10-05 18:58:33,666 - [INFO] - => Epoch: [36/50] | Training Loss:[0.3825763761997223] | Training Accuracy: [0.88046]2018-10-05 18:58:40,782 - [INFO] - => Epoch: [36/50] | Testing Loss:[1.3726511001586914] | Testing Accuracy: [0.6528]2018-10-05 18:59:55,949 - [INFO] - => Epoch: [37/50] | Training Loss:[0.4261672794818878] | Training Accuracy: [0.88594]2018-10-05 19:00:03,426 - [INFO] - => Best parameters are updated2018-10-05 19:01:18,523 - [INFO] - => Epoch: [38/50] | Training Loss:[0.3609049618244171] | Training Accuracy: [0.8888199999999999]2018-10-05 19:01:25,734 - [INFO] - => Epoch: [38/50] | Testing Loss:[1.576693058013916] | Testing Accuracy: [0.6409999999999999]2018-10-05 19:02:40,913 - [INFO] - => Epoch: [39/50] | Training Loss:[0.4049341082572937] | Training Accuracy: [0.8927200000000001]2018-10-05 19:04:03,075 - [INFO] - => Epoch: [40/50] | Training Loss:[0.3678820729255676] | Training Accuracy: [0.8916000000000001]2018-10-05 19:04:10,092 - [INFO] - => Epoch: [40/50] | Testing Loss:[1.48447585105896] | Testing Accuracy: [0.6491]2018-10-05 19:05:25,303 - [INFO] - => Epoch: [41/50] | Training Loss:[0.3971269726753235] | Training Accuracy: [0.8979600000000001]2018-10-05 19:06:47,696 - [INFO] - => Epoch: [42/50] | Training Loss:[0.3653419613838196] | Training Accuracy: [0.9029600000000002]2018-10-05 19:06:54,902 - [INFO] - => Epoch: [42/50] | Testing Loss:[1.3690619468688965] | Testing Accuracy: [0.6395999999999998]2018-10-05 19:08:10,098 - [INFO] - => Epoch: [43/50] | Training Loss:[0.3626648187637329] | Training Accuracy: [0.9033599999999999]2018-10-05 19:09:32,318 - [INFO] - => Epoch: [44/50] | Training Loss:[0.33587783575057983] | Training Accuracy: [0.9051200000000001]2018-10-05 19:09:39,526 - [INFO] - => Epoch: [44/50] | Testing Loss:[1.5433708429336548] | Testing Accuracy: [0.6497999999999999]2018-10-05 19:10:54,710 - [INFO] - => Epoch: [45/50] | Training Loss:[0.25377246737480164] | Training Accuracy: [0.9077000000000001]2018-10-05 19:12:17,097 - [INFO] - => Epoch: [46/50] | Training Loss:[0.333034873008728] | Training Accuracy: [0.9115400000000001]2018-10-05 19:12:24,290 - [INFO] - => Epoch: [46/50] | Testing Loss:[1.3026373386383057] | Testing Accuracy: [0.6446000000000001]2018-10-05 19:13:39,492 - [INFO] - => Epoch: [47/50] | Training Loss:[0.30075380206108093] | Training Accuracy: [0.9142800000000001]2018-10-05 19:15:01,872 - [INFO] - => Epoch: [48/50] | Training Loss:[0.2824198007583618] | Training Accuracy: [0.9175399999999999]2018-10-05 19:15:08,851 - [INFO] - => Epoch: [48/50] | Testing Loss:[1.137478232383728] | Testing Accuracy: [0.6436]2018-10-05 19:16:24,017 - [INFO] - => Epoch: [49/50] | Training Loss:[0.36052873730659485] | Training Accuracy: [0.91734]2018-10-05 19:17:46,417 - [INFO] - => Epoch: [50/50] | Training Loss:[0.2664807438850403] | Training Accuracy: [0.9181600000000003]2018-10-05 19:17:53,402 - [INFO] - => Epoch: [50/50] | Testing Loss:[1.2844302654266357] | Testing Accuracy: [0.6448]2018-10-05 19:17:53,529 - [INFO] - Trained on [50] epoch, with test accuracy [0.6448]. => During the training stages, historical best test accuracy is [0.6618]```Some simple visualization on- training accuracy v.s. testing accuracy- training loss v.s. testing lossare provided below. ###Code import torch import matplotlib.pyplot as plt checkpoint = torch.load('../myresnet_checkpoint.pth.tar',map_location='cpu') testing_accuracy_seq = checkpoint['testing_accuracy_seq'] training_accuracy_seq = checkpoint['training_accuracy_seq'] plt.plot( training_accuracy_seq, '-', color='blue', linewidth=2, label="train acc") plt.plot( testing_accuracy_seq, '-*', color='red', linewidth=2, label="test acc") plt.legend() plt.show() testing_loss_seq = checkpoint['testing_loss_seq'] training_loss_seq = checkpoint['training_loss_seq'] plt.plot( training_loss_seq, '-', color='blue', linewidth=2, label="train loss") plt.plot( testing_loss_seq, '-*', color='red', linewidth=2, label="test loss") plt.legend() plt.show() ###Output _____no_output_____ ###Markdown Build Resnet from Pretrained Resnet18 Model Configuration**Net Architecture:** Change the last fully connected layer with `out_features=100`. - dropout layer: p=0.2 - max pooling layer: stride = 2, kenerl_size=2**Batch Size:** 100 (avoid the memory deficiency issue.)**Optimizer:** - sgd(lr=0.05, momentum=0.9, weight_decay=5e-4) - for every 30 epoches, `lr /= 2`**Data Augmentation:** - Resize the figure to 224 * 224 - RandomCrop - RandomHorizontalFlip Experiment ResultsTraining Strategy: Instead of only training the last fully-connected layer, I trained on the whole net work to get higher testing accuracy, which is of course much more computational intensive,I set wall time to 2 hours, but each epoch takes around 9 minutes to train, so I cannot finished the planned 30 epochs. Within 2 hours limitation, I managed to finish 13 epochs of training with around **73% testing accuracy.** The highest testing accuracy I obtained during the training is **76.18% at the 10-th epoch**. After that the testing accuracy drops a bit and **oscillate around 73%**.The log is provided below. **The final test accuracy is 73.97%**.```2018-10-05 22:05:34,010 - [INFO] - torch version: 0.3.02018-10-05 22:05:34,011 - [INFO] - Data Preparation...2018-10-05 22:05:39,298 - [INFO] - Loading Data...2018-10-05 22:05:39,298 - [INFO] - Model setting...2018-10-05 22:05:41,102 - [INFO] - Resume from the checkpoint...2018-10-05 22:05:41,103 - [INFO] - => no checkpoint found at './tf_checkpoint.pth.tar'2018-10-05 22:05:41,103 - [INFO] - => Training based on the resnet-18 from scratch...2018-10-05 22:05:41,103 - [INFO] - Model Training...2018-10-05 22:14:06,773 - [INFO] - => Epoch: [1/30] | Training Loss:[1.6555067300796509] | Training Accuracy: [0.45396000000000003]2018-10-05 22:14:52,561 - [INFO] - => Best parameters are updated2018-10-05 22:23:18,275 - [INFO] - => Epoch: [2/30] | Training Loss:[1.0985631942749023] | Training Accuracy: [0.63412]2018-10-05 22:24:00,918 - [INFO] - => Epoch: [2/30] | Testing Loss:[1.2507389783859253] | Testing Accuracy: [0.5905]2018-10-05 22:24:01,436 - [INFO] - => Best parameters are updated2018-10-05 22:32:23,823 - [INFO] - => Epoch: [3/30] | Training Loss:[0.8547025918960571] | Training Accuracy: [0.6952200000000001]2018-10-05 22:33:06,877 - [INFO] - => Best parameters are updated2018-10-05 22:41:31,053 - [INFO] - => Epoch: [4/30] | Training Loss:[0.8175657391548157] | Training Accuracy: [0.7298199999999999]2018-10-05 22:42:13,646 - [INFO] - => Epoch: [4/30] | Testing Loss:[1.4175124168395996] | Testing Accuracy: [0.6254000000000001]2018-10-05 22:42:14,149 - [INFO] - => Best parameters are updated2018-10-05 22:50:41,597 - [INFO] - => Epoch: [5/30] | Training Loss:[0.7954866886138916] | Training Accuracy: [0.75752]2018-10-05 22:51:25,901 - [INFO] - => Best parameters are updated2018-10-05 22:59:51,816 - [INFO] - => Epoch: [6/30] | Training Loss:[0.9352169632911682] | Training Accuracy: [0.7698400000000001]2018-10-05 23:00:34,434 - [INFO] - => Epoch: [6/30] | Testing Loss:[1.1475493907928467] | Testing Accuracy: [0.6557]2018-10-05 23:09:02,586 - [INFO] - => Epoch: [7/30] | Training Loss:[1.0427197217941284] | Training Accuracy: [0.7843600000000001]2018-10-05 23:18:08,401 - [INFO] - => Epoch: [8/30] | Training Loss:[0.8770634531974792] | Training Accuracy: [0.795]2018-10-05 23:18:50,968 - [INFO] - => Epoch: [8/30] | Testing Loss:[1.046979546546936] | Testing Accuracy: [0.6552999999999999]2018-10-05 23:27:19,654 - [INFO] - => Epoch: [9/30] | Training Loss:[0.7507113814353943] | Training Accuracy: [0.8043800000000001]2018-10-05 23:28:02,417 - [INFO] - => Learning rate is updated!2018-10-05 23:36:26,854 - [INFO] - => Epoch: [10/30] | Training Loss:[0.24965983629226685] | Training Accuracy: [0.9040199999999999]2018-10-05 23:37:09,390 - [INFO] - => Epoch: [10/30] | Testing Loss:[0.8782387375831604] | Testing Accuracy: [0.7618]2018-10-05 23:37:09,905 - [INFO] - => Best parameters are updated2018-10-05 23:45:37,206 - [INFO] - => Epoch: [11/30] | Training Loss:[0.23527362942695618] | Training Accuracy: [0.94082]2018-10-05 23:54:39,736 - [INFO] - => Epoch: [12/30] | Training Loss:[0.31548258662223816] | Training Accuracy: [0.9526399999999999]2018-10-05 23:55:22,430 - [INFO] - => Epoch: [12/30] | Testing Loss:[0.9379260540008545] | Testing Accuracy: [0.7397]```Some simple visualization on- training accuracy v.s. testing accuracy- training loss v.s. testing lossare provided below. ###Code checkpoint = torch.load('../tf_checkpoint.pth.tar',map_location='cpu') testing_accuracy_seq = checkpoint['testing_accuracy_seq'] training_accuracy_seq = checkpoint['training_accuracy_seq'] plt.plot( training_accuracy_seq, '-', color='blue', linewidth=2, label="train acc") plt.plot( testing_accuracy_seq, '-*', color='red', linewidth=2, label="test acc") plt.legend() plt.show() testing_loss_seq = checkpoint['testing_loss_seq'] training_loss_seq = checkpoint['training_loss_seq'] plt.plot( training_loss_seq, '-', color='blue', linewidth=2, label="train loss") plt.plot( testing_loss_seq, '-*', color='red', linewidth=2, label="test loss") plt.legend() plt.show() ###Output _____no_output_____
time-intro.ipynb
###Markdown (time-intro)= Introduction to TimeIn this section, we'll introduce the tools you need to manipulate time... well, in Python at least. In this chapter, we'll cover times, dates, datetimes, time zones, and differences in datetimes.One code task related to time that we *won't* cover here includes how to run scripts or functions at a given frequency, ie how to schedule jobs.This chapter has benefitted from the [Python Data Science Handbook](https://jakevdp.github.io/PythonDataScienceHandbook/) by Jake VanderPlas, and [strftime.org](https://strftime.org/). Python's built-in datetimeThe datetime object is the fundamental time object in, for want of a better description, 'base' Python. It's useful to know about these before moving on to datetime operations using **pandas** (which you're far more likely to use in practice). It combines information on date *and* time, capturing as it does the year, month, day, hour, second, and microsecond. Let's import the class that deals with datetimes (whose objects are of type `datetime.datetime`) and take a look at it. ###Code from datetime import datetime now = datetime.now() print(now) ###Output _____no_output_____ ###Markdown Most people will be more used to working with day-month-year, while some people even have month-day-year, which clearly makes no sense at all! But note datetime follows [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601), the international standard for datetimes that has year-month-day-hrs:mins:seconds, with hours in the 24 hour clock format. This is the format you should use when coding too.As ever, the excellent [**rich**](https://github.com/willmcgugan/rich) library can give us a good idea of what properties and methods are available for objects of type `datetime.datetime` via its `inspect` method: ###Code from rich import inspect inspect(now) ###Output _____no_output_____ ###Markdown We can see that the variable we created has methods such as `year`, `month`, `day`, and so on, down to `microsecond`. When calling these methods on the `now` object we created, they will return the relevant detail. ```{admonition} ExerciseTry calling the year, month, and day functions on an instance of `datetime.now()`.```Note that, once created, `now` does not refresh itself: it's frozen at the time that it was made.To create a datetime using given information the command is: ###Code specific_datetime = datetime(2019, 11, 28) print(specific_datetime) ###Output _____no_output_____ ###Markdown To make clearer and more readable code, you can also call this using keyword arguments: `datetime(year=2019, month=11, day=28)`. Many of the operations you'd expect to just work with datetimes, do for example: ###Code now > specific_datetime ###Output _____no_output_____ ###Markdown Datetimes and stringsOne of the most common transformations you're likely to need to do when it comes to times is the one from a string, like "4 July 2002", to a datetime. You can do this using `datetime.strptime`. Here's an example: ###Code date_string = "16 February in 2002" datetime.strptime(date_string, "%d %B in %Y") ###Output _____no_output_____ ###Markdown What's going on? The pattern of the datestring is "day month 'in' year". Python's `strptime` function has codes for the different parts of a datetime (and the different ways they can be expressed). For example, if you had the short version of month instead of the long it would be: ###Code date_string = "16 Feb in 2002" datetime.strptime(date_string, "%d %b in %Y") ###Output _____no_output_____ ###Markdown What about turning a datetime into a string? We can do that too, courtesy of the same codes. ###Code now.strftime("%A, %m, %Y") ###Output _____no_output_____ ###Markdown Of course, you don't always want to have to worry about the ins and outs of what you're passing in, and the built-in `dateutil` is here for flexible parsing of formats should you need that (explicit is better than implicit though!): ###Code from dateutil.parser import parse date_string = "03 Feb 02" print(parse(date_string)) date_string = "3rd February 2002" print(parse(date_string)) ###Output _____no_output_____ ###Markdown You can find a close-to-comprehensive list of `strftime` codes at [https://strftime.org/](https://strftime.org/), but they're reproduced in the table below for convenience. | Code | Meaning | Example ||-|-|-|| %a | Weekday as locale’s abbreviated name. | Mon || %A | Weekday as locale’s full name. | Monday || %w | Weekday as a decimal number, where 0 is Sunday and 6 is Saturday. | 1 || %d | Day of the month as a zero-padded decimal number. | 30 || %-d | Day of the month as a decimal number. (Platform specific) | 30 || %b | Month as locale’s abbreviated name. | Sep || %B | Month as locale’s full name. | September || %m | Month as a zero-padded decimal number. | 09 || %-m | Month as a decimal number. (Platform specific) | 9 || %y | Year without century as a zero-padded decimal number. | 13 || %Y | Year with century as a decimal number. | 2013 || %H | Hour (24-hour clock) as a zero-padded decimal number. | 07 || %-H | Hour (24-hour clock) as a decimal number. (Platform specific) | 7 || %I | Hour (12-hour clock) as a zero-padded decimal number. | 07 || %-I | Hour (12-hour clock) as a decimal number. (Platform specific) | 7 || %p | Locale’s equivalent of either AM or PM. | AM || %M | Minute as a zero-padded decimal number. | 06 || %-M | Minute as a decimal number. (Platform specific) | 6 || %S | Second as a zero-padded decimal number. | 05 || %-S | Second as a decimal number. (Platform specific) | 5 || %f | Microsecond as a decimal number, zero-padded on the left. | 000000 || %z | UTC offset in the form +HHMM or -HHMM (empty string if the the object is naive). | || %Z | Time zone name (empty string if the object is naive). | || %j | Day of the year as a zero-padded decimal number. | 273 || %-j | Day of the year as a decimal number. (Platform specific) | 273 || %U | Week number of the year (Sunday as the first day of the week) as a zero padded decimal number. | 39 || %W | Week number of the year (Monday as the first day of the week) as a decimal number. | 39 || %c | Locale’s appropriate date and time representation. | Mon Sep 30 07:06:05 2013 || %x | Locale’s appropriate date representation. | 09/30/13 || %X | Locale’s appropriate time representation. | 07:06:05 || %% | A literal '%' character. | % | From time to timeAs well as recording a *single* datetime, there are plenty of occasions when we'll be interested in *differences* in datetimes. Let's create one and then check its type. ###Code time_diff = now - datetime(year=2020, month=1, day=1) print(time_diff) ###Output _____no_output_____ ###Markdown This is in the format of days, hours, minutes, seconds, and microseconds. Let's check the type, and more, with `inspect`: ###Code inspect(time_diff) ###Output _____no_output_____ ###Markdown This is of type `datetime.timedelta`. In the zoneDate and time objects may be categorized as aware or naive depending on whether or not they include timezone information; an aware object can locate itself relative to other aware objects, but a naive object does not contain enough information to unambiguously locate itself relative to other date/time objects. So far we've been working with naive datetime objects.The **pytz** package can help us work with time zones. It has two main use cases: i) localise timezone-naive datetimes so that they become aware, ie have a timezone and ii) convert a datetimne in one timezone to another timezone.The default timezone for coding is UTC. ‘UTC’ is Coordinated Universal Time. It is a successor to, but distinct from, Greenwich Mean Time (GMT) and the various definitions of Universal Time. UTC is now the worldwide standard for regulating clocks and time measurement.All other timezones are defined relative to UTC, and include offsets like UTC+0800 - hours to add or subtract from UTC to derive the local time. No daylight saving time occurs in UTC, making it a useful timezone to perform date arithmetic without worrying about the confusion and ambiguities caused by daylight saving time transitions, your country changing its timezone, or mobile computers that roam through multiple timezones.Let's create a couple of time zone aware datetimes and look at their difference. ###Code import pytz from pytz import timezone aware = datetime(tzinfo=pytz.UTC, year=2020, month=1, day=1) unaware = datetime(year=2020, month=1, day=1) us_tz = timezone("US/Eastern") us_aware = us_tz.localize(unaware) print(us_aware - aware) ###Output _____no_output_____ ###Markdown So we find that there's a five hour difference between UTC and the time on the East Coast of the USA. In the above, we used the `localize` method to make convert a naive datetime into an aware one, and we also initiated an aware datetime directly.For data where time really matters, such as some types of financial data, using timezone aware datetimes could prevent some nasty (and expensive) mistakes. ```{admonition} ExerciseUsing `datetime.now()` and `localize`, what is the time in the 'Australia/Melbourne' time zone?``` A More User-Friendly Approach to Datetimes: **arrow**While Python's standard library has near-complete date, time and timezone functionality, it's not the most user-friendly. The [**arrow**](https://arrow.readthedocs.io/en/latest/) package attempts to offer a sensible and human-friendly approach to creating, manipulating, formatting and converting dates, times and timestamps. Let's take a quick look at some of the functionality of **arrow**.Import arrow, create a datetime, and find the current datetime. ###Code import arrow dt = arrow.get("2013-05-11T21:23:00") print(dt) dt2 = arrow.now() dt2 ###Output _____no_output_____ ###Markdown Use **arrow** to shift a datetime back by an hour and a day. ###Code dt.shift(hours=-1, days=-1) ###Output _____no_output_____ ###Markdown Convert to a different datetime: ###Code dt2.to("US/Pacific") ###Output _____no_output_____ ###Markdown Give simpler, human readable datetimes: ###Code dt2.shift(hours=-1).humanize() ###Output _____no_output_____ ###Markdown Vectorised Datetimes Now we come to vectorised operations on datetimes using the powerful **numpy** packages (and this is what is used by **pandas**). **numpy** has its own version of datetime, called `np.datetime64`, and it's very efficient at scale. Let's see it in action: ###Code import numpy as np date = np.array("2020-01-01", dtype=np.datetime64) date ###Output _____no_output_____ ###Markdown The 'D' tells us that the smallest unit here is days. We can easily create a vector of dates from this object: ###Code date + range(32) ###Output _____no_output_____ ###Markdown Note how the last day rolls over into the next month.If you are creating a datetime with more precision than day, **numpy** will figure it out from the input, for example this gives resolution down to seconds. ###Code np.datetime64("2020-01-01 09:00") ###Output _____no_output_____
adv3_evaluation.ipynb
###Markdown Rotation Robustness ###Code # get the rotation angles angles = get_rotation_angles(MAX_ROT_ANGLE, ROT_LOG_BASE, ROT_STEPS) rot_hamming = get_augmented_hashes_and_hamming_dist( [os.path.join(HASH_DIR, 'rotation', f'{DATASET}_rotation_{angle}.csv') for angle in angles], angles, bin_hashes_orig, num_processes=15 ) df = pd.DataFrame(rot_hamming) df = df.melt() df = df.rename(columns={'variable': 'Angle', 'value': 'Normalized Hamming Distance'}) sns.set(rc={'figure.figsize':(8,5)}) sns.set_style("whitegrid") fig = sns.lineplot(data=df, marker='o', markersize=8, x="Angle", y="Normalized Hamming Distance", ci='sd') fig.set_xlabel("Angle", fontsize = 21, fontweight='bold') fig.set_ylabel("Norm. Hamming Distance", fontsize = 21, fontweight='bold') plt.xticks([-64, -32, -16,-4, 4, 16, 32, 64]) plt.yticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5]) fig.set(ylim=(-0.025, 0.55)) fig.set_yticklabels(fig.get_yticks(), size = 18, fontweight='bold') fig.set_xticklabels(fig.get_xticks(), size = 18, fontweight='bold') plt.tight_layout() plt.savefig(f'{PLOT_DIR}/{DATASET}_rotation_robustness.pdf') check_for_non_altered_hashes(rot_hamming, angles[6]) check_for_non_altered_hashes(rot_hamming, angles[8]) check_for_non_altered_hashes(rot_hamming, angles[4]) check_for_non_altered_hashes(rot_hamming, angles[10]) check_for_non_altered_hashes(rot_hamming, angles[0]) # plot the single image that does not change idx = check_for_non_altered_hashes(rot_hamming, angles[14], return_indices=True)[0] print(f'{dataset.classes[dataset[idx][1]][0]}:') plt.imshow(dataset[idx][0].permute(1,2,0).numpy()) plt.show() print_mean_and_std_for_keys(rot_hamming) plot_example_img_with_transformation(dataset, EXAMPLE_IMG_IDX, img_class=EXAMPLE_IMG_CLASS, transformation=Rotate(angles[-1]), file_path=os.path.join(EXAMPLE_IMG_DIR, 'rotation.png')) ###Output _____no_output_____ ###Markdown Translation Robustness ###Code translations = get_translation_tuples(MAX_TRANS, TRANS_LOG_BASE, TRANS_STEPS) trans_hamming = get_augmented_hashes_and_hamming_dist( [os.path.join(HASH_DIR, 'translation', f'{DATASET}_translation_{trans[0]}_{trans[1]}.csv') for trans in translations], translations, bin_hashes_orig, num_processes=24 ) # add the hamming distance without any translation trans_hamming[(0, 0)] = np.zeros_like(trans_hamming[(1, 1)]) df = pd.DataFrame(trans_hamming) df = pd.DataFrame(df.mean()).transpose() df = df.melt() sns.set_style("ticks", {'axes.grid': False}) x_values = df['variable_0'].to_numpy() y_values = df['variable_1'].to_numpy() z_values = df['value'].to_numpy() xi = np.linspace(0, x_values.max(), 67, endpoint=True)[None, :] yi = np.linspace(0, y_values.max(), 67, endpoint=True)[:, None] scipy_linear = griddata((x_values, y_values), z_values, (xi, yi), rescale=True) sns.set(rc={'figure.figsize':(8,6)}) cmap = sns.cubehelix_palette(as_cmap=True) labels = [i if i in [0, 4, 8, 16, 32, 64] else None for i in range(66)] ax = sns.heatmap(scipy_linear, cmap=cmap, vmax=0.25, vmin=0, xticklabels=labels, yticklabels=labels, rasterized=True) ax.tick_params(left=False, bottom=False) cbar = ax.collections[0].colorbar cbar.set_label(label='Norm. Hamming Distance', weight='bold') cbar.set_ticks([0, 0.05, 0.1, 0.15, 0.2, 0.25]) for label in cbar.ax.get_yticklabels(): label.set_fontsize(18) label.set_fontweight('bold') ax.scatter(x_values + 0.5, y_values + 0.5, s=15, c='None', edgecolors='yellow') ax.invert_yaxis() ax.figure.axes[-1].yaxis.label.set_size(18) ax.figure.axes[-1].tick_params(labelsize=18) ax.set_xticklabels(ax.get_xticklabels(), size=18, fontweight='bold') ax.set_yticklabels(ax.get_yticklabels(), size=18, fontweight='bold', rotation=0) plt.xlabel('Horizontal Translation', fontsize=21, fontweight='bold') plt.ylabel('Vertical Translation', fontsize=21, fontweight='bold') plt.tight_layout() plt.savefig(f'{PLOT_DIR}/{DATASET}_translation_robustness.pdf') print_mean_and_std_for_keys(trans_hamming) # plot two images that do not change idx1, idx2 = check_for_non_altered_hashes(trans_hamming, translations[-1], return_indices=True) print(f'{dataset.classes[dataset[idx1][1]][0]}:') plt.imshow(dataset[idx1][0].permute(1,2,0)) plt.show() print(f'{dataset.classes[dataset[idx2][1]][0]}:') plt.imshow(dataset[idx2][0].permute(1,2,0)) plt.show() plot_example_img_with_transformation(dataset, EXAMPLE_IMG_IDX, img_class=EXAMPLE_IMG_CLASS, transformation=Translate(translations[-1]), file_path=os.path.join(EXAMPLE_IMG_DIR, 'translation.png')) ###Output _____no_output_____ ###Markdown Hue Robustness ###Code hues = list(range(-180, 180, 30)) hue_hamming = get_augmented_hashes_and_hamming_dist( [os.path.join(HASH_DIR, 'hue', f'{DATASET}_hue_{hue}.csv') for hue in hues], hues, bin_hashes_orig, num_processes=12 ) hue_hamming[180] = hue_hamming[-180] df = pd.DataFrame(hue_hamming) df = df.melt() df = df.rename(columns={'variable': 'HSV Angle', 'value': 'Normalized Hamming Distance'}) sns.set(rc={'figure.figsize':(8,5)}) sns.set_style("whitegrid") fig = sns.lineplot(data=df, marker='o', markersize=8, x="HSV Angle", y="Normalized Hamming Distance", ci='sd') plt.xticks([-180, -120, -60, 0, 60, 120, 180]) plt.yticks([0.0, 0.05, 0.1, 0.15, 0.2, 0.25]) fig.set_xlabel("Hue Angle", fontsize = 21, fontweight='bold') fig.set_ylabel("Norm. Hamming Distance", fontsize = 21, fontweight='bold') fig.set_yticklabels(fig.get_yticks(), size = 18, fontweight='bold') fig.set_xticklabels(fig.get_xticks(), size = 18, fontweight='bold') yticklabels = [] for item in fig.get_yticklabels(): fmt = '{:0.2f}' item.set_text(fmt.format(float(item.get_text()))) yticklabels += [item] fig.set_yticklabels(yticklabels) plt.tight_layout() plt.savefig(f'{PLOT_DIR}/{DATASET}_hue_robustness.pdf') print_mean_and_std_for_keys(hue_hamming) check_for_non_altered_hashes(hue_hamming, hues[0]) plot_example_img_with_transformation(dataset, EXAMPLE_IMG_IDX, img_class=EXAMPLE_IMG_CLASS, transformation=ChangeHue(hues[0]), file_path=os.path.join(EXAMPLE_IMG_DIR, 'hue.png')) ###Output _____no_output_____ ###Markdown Brightness Robustness ###Code brightness_values = list(np.linspace(0, 2, 9, endpoint=True)) brightness_hamming = get_augmented_hashes_and_hamming_dist( [os.path.join(HASH_DIR, 'brightness', f'{DATASET}_brightness_{brightness}.csv') for brightness in brightness_values], brightness_values, bin_hashes_orig, num_processes=9 ) df = pd.DataFrame(brightness_hamming) df = df.melt() df = df.rename(columns={'variable': 'Brightness Factor', 'value': 'Norm. Hamming Distance'}) sns.set(rc={'figure.figsize':(8,5)}) sns.set_style("whitegrid") fig = sns.lineplot(data=df, marker='o', markersize=8, x="Brightness Factor", y="Norm. Hamming Distance", ci='sd') plt.yticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5]) fig.set(ylabel=None) fig.set(ylim=(-0.025, 0.55)) fig.set_xlabel(fig.get_xlabel(), fontsize = 21, fontweight='bold') #fig.set_ylabel(fig.get_ylabel(), fontsize = 21, fontweight='bold') fig.set_yticklabels([]) fig.set_xticklabels(fig.get_xticks(), size = 18, fontweight='bold') plt.tight_layout() plt.savefig(f'{PLOT_DIR}/{DATASET}_brightness_robustness.pdf') print_mean_and_std_for_keys(brightness_hamming) check_for_non_altered_hashes(brightness_hamming, brightness_values[-1]) plot_example_img_with_transformation(dataset, EXAMPLE_IMG_IDX, img_class=EXAMPLE_IMG_CLASS, transformation=ChangeBrightness(brightness_values[-1]), file_path=os.path.join(EXAMPLE_IMG_DIR, 'brightness.png')) # plot two images that do not change idx1, idx2 = check_for_non_altered_hashes(brightness_hamming, brightness_values[-1], return_indices=True)[:2] print(f'{dataset.classes[dataset[idx1][1]][0]}:') plt.imshow(dataset[idx1][0].permute(1,2,0)) plt.show() print(f'{dataset.classes[dataset[idx2][1]][0]}:') plt.imshow(dataset[idx2][0].permute(1,2,0)) plt.show() ###Output _____no_output_____ ###Markdown Contrast Robustness ###Code contrast_values = list(np.linspace(0, 2, 9, endpoint=True)) contrast_hamming = get_augmented_hashes_and_hamming_dist( [os.path.join(HASH_DIR, 'contrast', f'{DATASET}_contrast_{contrast}.csv') for contrast in contrast_values], contrast_values, bin_hashes_orig, num_processes=9 ) df = pd.DataFrame(contrast_hamming) df = df.melt() df = df.rename(columns={'variable': 'Contrast Factor', 'value': 'Norm. Hamming Distance'}) sns.set(rc={'figure.figsize':(8,5)}) sns.set_style("whitegrid") fig = sns.lineplot(data=df, marker='o', markersize=8, x="Contrast Factor", y="Norm. Hamming Distance", ci='sd') plt.yticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5]) fig.set(ylabel=None) fig.set(ylim=(-0.025, 0.55)) fig.set_xlabel(fig.get_xlabel(), fontsize = 21, fontweight='bold') fig.set_ylabel(fig.get_ylabel(), fontsize = 21, fontweight='bold') fig.set_yticklabels([]) fig.set_xticklabels(fig.get_xticks(), size = 18, fontweight='bold') plt.tight_layout() plt.savefig(f'{PLOT_DIR}/{DATASET}_contrast_robustness.pdf') check_for_non_altered_hashes(contrast_hamming, contrast_values[-1]) # plot two images that do not change idx1, idx2 = check_for_non_altered_hashes(contrast_hamming, contrast_values[-1], return_indices=True)[:2] print(f'{dataset.classes[dataset[idx1][1]][0]}:') plt.imshow(dataset[idx1][0].permute(1,2,0)) plt.show() print(f'{dataset.classes[dataset[idx2][1]][0]}:') plt.imshow(dataset[idx2][0].permute(1,2,0)) plt.show() print_mean_and_std_for_keys(contrast_hamming) plot_example_img_with_transformation(dataset, EXAMPLE_IMG_IDX, img_class=EXAMPLE_IMG_CLASS, transformation=ChangeContrast(contrast_values[-1]), file_path=os.path.join(EXAMPLE_IMG_DIR, 'contrast.png')) ###Output _____no_output_____ ###Markdown Saturation Robustness ###Code saturation_values = list(np.linspace(0, 2, 9, endpoint=True)) saturation_hamming = get_augmented_hashes_and_hamming_dist( [os.path.join(HASH_DIR, 'saturation', f'{DATASET}_saturation_{saturation}.csv') for saturation in saturation_values], saturation_values, bin_hashes_orig, num_processes=9 ) df = pd.DataFrame(saturation_hamming) df = df.melt() df = df.rename(columns={'variable': 'Saturation Factor', 'value': 'Norm. Hamming Distance'}) sns.set(rc={'figure.figsize':(8,5)}) sns.set_style("whitegrid") fig = sns.lineplot(data=df, marker='o', markersize=8, x="Saturation Factor", y="Norm. Hamming Distance", ci='sd') plt.yticks([0.00, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06]) fig.set_xlabel(fig.get_xlabel(), fontsize = 21, fontweight='bold') fig.set_ylabel(fig.get_ylabel(), fontsize = 21, fontweight='bold') fig.set_yticklabels(fig.get_yticks(), size = 18, fontweight='bold') fig.set_xticklabels(fig.get_xticks(), size = 18, fontweight='bold') yticklabels = [] for item in fig.get_yticklabels(): fmt = '{:0.2f}' item.set_text(fmt.format(float(item.get_text()))) yticklabels += [item] fig.set_yticklabels(yticklabels) plt.tight_layout() plt.savefig(f'{PLOT_DIR}/{DATASET}_saturation_robustness.pdf') print_mean_and_std_for_keys(saturation_hamming) check_for_non_altered_hashes(saturation_hamming, saturation_values[-1]) # plot two images that do not change idx1, idx2 = check_for_non_altered_hashes(saturation_hamming, saturation_values[-1], return_indices=True)[:2] print(f'{dataset.classes[dataset[idx1][1]][0]}:') plt.imshow(dataset[idx1][0].permute(1,2,0)) plt.show() print(f'{dataset.classes[dataset[idx2][1]][0]}:') plt.imshow(dataset[idx2][0].permute(1,2,0)) plt.show() plot_example_img_with_transformation(dataset, EXAMPLE_IMG_IDX, img_class=EXAMPLE_IMG_CLASS, transformation=ChangeSaturation(saturation_values[-1]), file_path=os.path.join(EXAMPLE_IMG_DIR, 'saturation.png')) ###Output _____no_output_____ ###Markdown Compression Robustness ###Code compression_values = [100] + list( (100 - np.ceil(np.logspace(0, np.log(100) / np.log(1.5), 10, endpoint=True, base=1.5))).clip(0, 100) ) compression_hamming = get_augmented_hashes_and_hamming_dist( [os.path.join(HASH_DIR, 'compression', f'{DATASET}_compression_{compression}.csv') for compression in compression_values], compression_values, bin_hashes_orig, num_processes=11 ) df = pd.DataFrame(compression_hamming) df = df.melt() df = df.rename(columns={'variable': 'Compression Value', 'value': 'Norm. Hamming Distance'}) sns.set(rc={'figure.figsize':(8,5)}) sns.set_style("whitegrid") fig = sns.lineplot(data=df, marker='o', markersize=8, x="Compression Value", y="Norm. Hamming Distance", ci='sd') plt.xticks([0, 40, 64, 78, 87, 92, 100]) plt.yticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5]) fig.set(ylabel=None) fig.set(ylim=(-0.025, 0.55)) fig.set_xlabel(fig.get_xlabel(), fontsize = 21, fontweight='bold') fig.set_ylabel(None) fig.set_yticklabels([]) fig.set_xticklabels(fig.get_xticks(), size = 18, fontweight='bold') plt.tight_layout() plt.savefig(f'{PLOT_DIR}/{DATASET}_compression_robustness.pdf') print_mean_and_std_for_keys(compression_hamming) check_for_non_altered_hashes(compression_hamming, compression_values[5]) # plot two images that do not change idx1, idx2 = check_for_non_altered_hashes(compression_hamming, compression_values[5], return_indices=True)[-2:] print(f'{dataset.classes[dataset[idx1][1]][0]}:') plt.imshow(dataset[idx1][0].permute(1,2,0)) plt.show() print(f'{dataset.classes[dataset[idx2][1]][0]}:') plt.imshow(dataset[idx2][0].permute(1,2,0)) plt.show() plot_example_img_with_transformation(dataset, EXAMPLE_IMG_IDX, img_class=EXAMPLE_IMG_CLASS, transformation=JpegCompression(compression_values[5]), file_path=os.path.join(EXAMPLE_IMG_DIR, 'compression.png')) ###Output _____no_output_____ ###Markdown Crop Robustness ###Code crop_values = list( filter( lambda x: x != 359, [360] + list(360 - np.append(np.logspace(0, 7, 8, base=2, endpoint=True, dtype=int), [180])) ) ) crop_hamming = get_augmented_hashes_and_hamming_dist( [os.path.join(HASH_DIR, 'crop', f'{DATASET}_crop_{crop}.csv') for crop in crop_values], crop_values, bin_hashes_orig, num_processes=10 ) df = pd.DataFrame(crop_hamming) df = df.melt() df = df.rename(columns={'variable': 'Center Crop Size', 'value': 'Norm. Hamming Distance'}) sns.set(rc={'figure.figsize':(8,5)}) sns.set_style("whitegrid") fig = sns.lineplot(data=df, marker='o', markersize=8, x="Center Crop Size", y="Norm. Hamming Distance", ci='sd') plt.xticks([180, 232, 286, 328, 360]) plt.yticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5]) fig.set(ylim=(-0.025, 0.55)) fig.invert_xaxis() fig.set_xlabel(fig.get_xlabel(), fontsize = 21, fontweight='bold') fig.set_ylabel(fig.get_ylabel(), fontsize = 21, fontweight='bold') fig.set_yticklabels(fig.get_yticks(), size = 18, fontweight='bold') fig.set_xticklabels(fig.get_xticks(), size = 18, fontweight='bold') plt.tight_layout() plt.savefig(f'{PLOT_DIR}/{DATASET}_crop_robustness.pdf') print_mean_and_std_for_keys(crop_hamming) check_for_non_altered_hashes(crop_hamming, crop_values[-2]) # plot two images that do not change idx1, idx2 = check_for_non_altered_hashes(crop_hamming, crop_values[-2], return_indices=True)[:2] print(f'{dataset.classes[dataset[idx1][1]][0]}:') plt.imshow(dataset[idx1][0].permute(1,2,0)) plt.show() print(f'{dataset.classes[dataset[idx2][1]][0]}:') plt.imshow(dataset[idx2][0].permute(1,2,0)) plt.show() plot_example_img_with_transformation(dataset, EXAMPLE_IMG_IDX, img_class=EXAMPLE_IMG_CLASS, transformation=CenterCrop(crop_values[-2]), file_path=os.path.join(EXAMPLE_IMG_DIR, 'crop.png')) ###Output _____no_output_____ ###Markdown Horizontal Flipping Robustness ###Code hflip_hamming = get_augmented_hashes_and_hamming_dist( [os.path.join(HASH_DIR, 'hflip', f'{DATASET}_hflip.csv')], [0], bin_hashes_orig, num_processes=1 ) print(f'Average Hamming Distance: {hflip_hamming[0].mean()}') print(f'Standard Deviation Hamming Distance: {hflip_hamming[0].std()}') check_for_non_altered_hashes(hflip_hamming, 0) # plot two images that do not change idx1, idx2 = check_for_non_altered_hashes(hflip_hamming, 0, return_indices=True)[:2] print(f'{dataset.classes[dataset[idx1][1]][0]}:') plt.imshow(dataset[idx1][0].permute(1,2,0)) plt.show() print(f'{dataset.classes[dataset[idx2][1]][0]}:') plt.imshow(dataset[idx2][0].permute(1,2,0)) plt.show() plot_example_img_with_transformation(dataset, EXAMPLE_IMG_IDX, img_class=EXAMPLE_IMG_CLASS, transformation=HorizontalFlipping(), file_path=os.path.join(EXAMPLE_IMG_DIR, 'hflip.png')) ###Output _____no_output_____ ###Markdown Vertical Flipping Robustness ###Code vflip_hamming = get_augmented_hashes_and_hamming_dist( [os.path.join(HASH_DIR, 'vflip', f'{DATASET}_vflip.csv')], [0], bin_hashes_orig, num_processes=1 ) print(f'Average Hamming Distance: {vflip_hamming[0].mean()}') print(f'Standard Deviation Hamming Distance: {vflip_hamming[0].std()}') check_for_non_altered_hashes(vflip_hamming, 0) # plot two images that do not change idx1, idx2 = check_for_non_altered_hashes(vflip_hamming, 0, return_indices=True)[:2] print(f'{dataset.classes[dataset[idx1][1]][0]}:') plt.imshow(dataset[idx1][0].permute(1,2,0)) plt.show() print(f'{dataset.classes[dataset[idx2][1]][0]}:') plt.imshow(dataset[idx2][0].permute(1,2,0)) plt.show() plot_example_img_with_transformation(dataset, EXAMPLE_IMG_IDX, img_class=EXAMPLE_IMG_CLASS, transformation=VerticalFlipping(), file_path=os.path.join(EXAMPLE_IMG_DIR, 'vflip.png')) ###Output _____no_output_____ ###Markdown Downsizing Robustness ###Code downsizing_values = list( filter( lambda x: x != 359, [360] + list(360 - np.append(np.logspace(0, 7, 8, base=2, endpoint=True, dtype=int), [180])) ) ) downsizing_hamming = get_augmented_hashes_and_hamming_dist( [os.path.join(HASH_DIR, 'downsizing', f'{DATASET}_downsizing_{size}.csv') for size in downsizing_values], downsizing_values, bin_hashes_orig, num_processes=9 ) df = pd.DataFrame(downsizing_hamming) df = df.melt() df = df.rename(columns={'variable': 'Image Size', 'value': 'Norm. Hamming Distance'}) sns.set(rc={'figure.figsize':(8,5)}) sns.set_style("whitegrid") fig = sns.lineplot(data=df, marker='o', markersize=8, x="Image Size", y="Norm. Hamming Distance", ci='sd') fig.invert_xaxis() plt.xticks([360, 328, 296, 232, 180]) fig.set(ylabel=None) fig.set(ylim=(-0.025, 0.55)) fig.set_xlabel(fig.get_xlabel(), fontsize = 21, fontweight='bold') fig.set_ylabel(None) fig.set_yticklabels([]) fig.set_xticklabels(fig.get_xticks(), size = 18, fontweight='bold') plt.tight_layout() plt.savefig(f'{PLOT_DIR}/{DATASET}_downsizing_robustness.pdf') print_mean_and_std_for_keys(downsizing_hamming) check_for_non_altered_hashes(downsizing_hamming, downsizing_values[1]) # plot two images that do not change idx1, idx2 = check_for_non_altered_hashes(downsizing_hamming, downsizing_values[1], return_indices=True)[:2] print(f'{dataset.classes[dataset[idx1][1]][0]}:') plt.imshow(dataset[idx1][0].permute(1,2,0)) plt.show() print(f'{dataset.classes[dataset[idx2][1]][0]}:') plt.imshow(dataset[idx2][0].permute(1,2,0)) plt.show() plot_example_img_with_transformation(dataset, EXAMPLE_IMG_IDX, img_class=EXAMPLE_IMG_CLASS, transformation=BlackBorder(downsizing_values[-1]), file_path=os.path.join(EXAMPLE_IMG_DIR, 'downsizing.png')) ###Output _____no_output_____
learnPandas.ipynb
###Markdown Pandas Quick Tutorial *** Pandas is a great fit when you want to deal with data from Databases, Excell, CSV, JSON, and other formats. It allows you to load the data, clean the data, process and visualize the results. You must install pandas before you can run this code: `pip install pandas`. Pandas comes installed with Anaconda, so you don't need to install it if you will use Anaconda. Pandas has two main data types: DataFrame (for 2D data/tables) and Series (for 1D series). Both types include row index which can be number or string index. To create a Series use .Series() passing a list of values. Pandas will create integer index. ###Code import pandas as pd import numpy as np s = pd.Series([1, 3, 2, 99.5, 0.53]) # Pandas will create default integer index print("s:") print(s) s1 = pd.Series([1, 3, 2, 99.5, 0.53], index=["a", "b", "c", "s", "w"]) # Pandas will create default integer index print("s1 with pre-defined index:") s1 ###Output s: 0 1.00 1 3.00 2 2.00 3 99.50 4 0.53 dtype: float64 s1 with pre-defined index: ###Markdown To create a DataFrame use .DataFrame() passing a dict of objects that can be converted to a table. ###Code df1 = pd.DataFrame({ "id": [1, 3, 5], "name":["Mark", "Aame", "Ali"], "age":[33, 25.5, 17] }) print("df1:") print(df1) df11 = pd.DataFrame(df1.set_index("id")) print("df11 with index set to the id:") print(df11) df2 = pd.DataFrame( { "a": 1.4, "dsds": pd.Timestamp("20210613"), "ss": pd.Series(1, index=list(range(4)), dtype="int32"), "ww": np.array([2] * 4, dtype="int32"), "w3": pd.Categorical(["value1", "value2", "value0", "value0"]), "T": "Same text", } ) print("====================") print("df2:") print(df2) ###Output df1: id name age 0 1 Mark 33.0 1 3 Aame 25.5 2 5 Ali 17.0 df11 with index set to the id: name age id 1 Mark 33.0 3 Aame 25.5 5 Ali 17.0 ==================== df2: a dsds ss ww w3 T 0 1.4 2021-06-13 1 2 value1 Same text 1 1.4 2021-06-13 1 2 value2 Same text 2 1.4 2021-06-13 1 2 value0 Same text 3 1.4 2021-06-13 1 2 value0 Same text ###Markdown Viewing data Use head to view the top rows, and tail to view the last rows. ###Code print("First 2 rows of df11") print(df11.head(2)) print("----------------------------") print("\nLast 2 rows of df11") print(df11.tail(2)) print("----------------------------") print("\nThe index of df11", df11.index) print("\nThe labels of df11", df11.columns) print("----------------------------") print("\nView only the name column", df11["name"]) print("----------------------------") print("\nView second and third rows\n ", df11[1:3]) print("----------------------------") print("\nConvert the age column to NumPy array", df11["age"].to_numpy()) ###Output First 2 rows of df11 name age id 1 Mark 33.0 3 Aame 25.5 ---------------------------- Last 2 rows of df11 name age id 3 Aame 25.5 5 Ali 17.0 ---------------------------- The index of df11 Int64Index([1, 3, 5], dtype='int64', name='id') The labels of df11 Index(['name', 'age'], dtype='object') ---------------------------- View only the name column id 1 Mark 3 Aame 5 Ali Name: name, dtype: object ---------------------------- View second and third rows name age id 3 Aame 25.5 5 Ali 17.0 ---------------------------- Convert the age column to NumPy array [33. 25.5 17. ] ###Markdown Data manipulation ###Code print("Sorting by Name column") print(df11.sort_values("name")) print("----------------------------") print("Sorting by Age column descending") print(df11.sort_values("age", ascending=False)) print("----------------------------") df11.loc[5, "age"] = 18 # loc takes the index ans the column name. It also can work on ranges print("Changing the age for Ali to 18\n", df11) df11.loc[:, "age"] += 1 # loc takes the index ans the column name. It also can work on ranges print("Increase all ages by 1 year\n", df11) ###Output Sorting by Name column name age id 3 Aame 25.5 5 Ali 17.0 1 Mark 33.0 ---------------------------- Sorting by Age column descending name age id 1 Mark 33.0 3 Aame 25.5 5 Ali 17.0 ---------------------------- Changing the age for Ali to 18 name age id 1 Mark 33.0 3 Aame 25.5 5 Ali 18.0 Increase all ages by 1 year name age id 1 Mark 34.0 3 Aame 26.5 5 Ali 19.0 ###Markdown Data indexing/filtering ###Code print("df11 with Use Boolean Filter [True, False, True]\n", df11[[True, False, True]]) print("----------------------------") print("df11 with Use Condition Filter age>20\n", df11[df11["age"]>20]) print("----------------------------") df12 = df11.copy() df12["gender"] = ["M", np.nan, "M"] # Add new "gender" column with missing data print("df12 with gender column:\n", df12) print("----------------------------") print("df12 with no missing gender values:\n", df12[df12["gender"].notnull()]) print("----------------------------") # drops all rows with missing data print("df12 with no missing values in all columns:\n", df12.dropna(how="any")) ## Concatination tmp = pd.DataFrame([ { "name":"Mary", "age": 23, "gender": "F" },{ "name":"Saly", "age": 25, "gender": "F" },{ "name":"Sam", "age": 45, "gender": "M" }], index=[8, 12, 20]) df13 = pd.concat([df12, tmp]) print("Added 3 new reconrds") df13 ###Output Added 3 new reconrds ###Markdown Joining data from two tables based on index Works same as JOIN in SQL ###Code df131 = pd.DataFrame([ ["Y", True], ["N", False], ["Y", True], ["N", True], ["Y", False], ["Y", True] ], columns=["married", "working"], index=[1,3,5,8,12,20] ) df14 = pd.merge(df13, df131, left_index=True, right_index=True) df14 ###Output _____no_output_____ ###Markdown Joining data from two tables based on a key Works same as JOIN in SQL ###Code df132 = pd.DataFrame([ [5, "Y", True], [3, "N", False], [1, "Y", True], [8, "N", True], [20, "Y", False], [12, "Y", True] ], columns=["personid", "married", "working"] ) # adding id column to the dataframe, which equals the index value df133 = df13.assign(personid= lambda x: x.index ) print("Added id column\n", df133) df141 = pd.merge(df133, df132, on="personid") # notice, the index got reset # To restore the idex we can use df141.set_index(df13.index) df141 ###Output Added id column name age gender personid 1 Mark 34.0 M 1 3 Aame 26.5 NaN 3 5 Ali 19.0 M 5 8 Mary 23.0 F 8 12 Saly 25.0 F 12 20 Sam 45.0 M 20 ###Markdown Grouping ###Code print("Group the data by gender and get the size") df13.groupby(["gender"]).size() ###Output Group the data by gender and get the size ###Markdown Importing data from JSON file ###Code data = pd.read_json("bookingdetails.json") data # data.describe() ###Output _____no_output_____ ###Markdown Read Data from Database Use the attached ReadMongoData.py to ready tables from hosted TravelExperts Database. You will need to install two libraries to use this file. Make sure the DB is hosted and accessible. If not, then you might have to host it first. See the file for more details. Check `readDB/ReadMongoData.py` to learn what tables you have access to. ###Code # pip install pymongo dnspython from readDB import ReadMongoData as db bookingdetails = db.getBookingDetails() bookingdetails ###Output _____no_output_____ ###Markdown Read Bookings table ###Code bookings = db.getBookings() bookings from readDB import ReadMongoData as db customers = db.getCustomers() customers ###Output _____no_output_____ ###Markdown Join the two tables on BookingId ###Code bookings_with_details = pd.merge(bookings, bookingdetails, on="BookingId") bookings_with_details bookings_with_customers = pd.merge(bookings, bookingdetails, on="BookingId") bookings_with_customers ###Output _____no_output_____ ###Markdown Grouping booking details with by destination, for example ###Code bookings_with_details[["BasePrice", "AgencyCommission"]] = bookings_with_details[[ "BasePrice", "AgencyCommission"]].astype(str).astype(float) bookings_with_details['Year']=bookings_with_details['TripStart'].dt.year df1= bookings_with_details df1 # bookings_with_details.groupby(["TripStart"]).sum()[['BasePrice', 'AgencyCommission', 'Year']] df1.groupby(["Year"]).sum()[['BasePrice', 'AgencyCommission']] ###Output _____no_output_____ ###Markdown Grouping by year and summing up base price and agency commission ###Code bookings_with_details.groupby(["Year"]).sum()[['BasePrice', 'AgencyCommission']] # bookings_with_details['Month'] = bookings_with_details['TripStart'].date[0:6] bookings_with_details['Month']=bookings_with_details['TripStart'].dt.year bookings_with_details.head() bookings_with_details[bookings_with_details['BasePrice'].notnull()] bookingdetails.groupby(["Destination"]).sum() print(bookingdetails.info()) ###Output <class 'pandas.core.frame.DataFrame'> Int64Index: 144 entries, 11 to 972 Data columns (total 13 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 BookingDetailId 144 non-null int64 1 ItineraryNo 144 non-null float64 2 TripStart 144 non-null datetime64[ns] 3 TripEnd 144 non-null datetime64[ns] 4 Description 144 non-null object 5 Destination 144 non-null object 6 BasePrice 144 non-null object 7 AgencyCommission 144 non-null object 8 BookingId 144 non-null int64 9 RegionId 144 non-null object 10 ClassId 144 non-null object 11 FeeId 144 non-null object 12 ProductSupplierId 144 non-null int64 dtypes: datetime64[ns](2), float64(1), int64(3), object(7) memory usage: 15.8+ KB None
docs/notebook/atmo/von_karman_psd_examples.ipynb
###Markdown Von Karman turbulence spectrum Total phase variance for Kolmogorov spectrumWe compute the Kolmogorov spectrum total variance over a 10m telescope and we compare it with Noll('76) formula ($\Delta_{1}$ = 1.029 ($\frac{D}{r0})^{5/3}$). ###Code import numpy as np from scipy.special import jv from arte.atmo.von_karman_psd import VonKarmanPsd 'Compute the total variance of Kolmogorov over a 10m telescope:' R = 5 r0 = 0.1 L0 = np.inf freqs = np.logspace(-8, 4, 1000) bess = jv(1, 2*np.pi*R*freqs) psd = VonKarmanPsd(r0, L0) psd_piston_removed = psd.spatial_psd(freqs) * (1 - (bess/(np.pi*R*freqs))**2) var_in_square_rad = np.trapz(psd_piston_removed*2*np.pi*freqs, freqs) noll_var_in_square_rad = 1.029*(2*R/r0)**(5./3) print("Variance computed using the VonKarmanPsd class: %d" %(var_in_square_rad)) print("Variance from Noll's formula: %d" %(noll_var_in_square_rad)) ###Output Variance computed using the VonKarmanPsd class: 2213 Variance from Noll's formula: 2216 ###Markdown Kolmogorov and Von Karman spectraPlot spectra for different outer scale values ###Code import matplotlib.pyplot as plt r0 = np.array([0.1, 0.5]) L0 = [25, 25] psd_kolm = VonKarmanPsd(0.1, np.inf) psd_vk = VonKarmanPsd(r0, L0) spatial_freqs = np.logspace(-4, 4, 1000) psd_kolm.plot_von_karman_psd_vs_frequency(spatial_freqs) psd_vk.plot_von_karman_psd_vs_frequency(spatial_freqs, idx=0) psd_vk.plot_von_karman_psd_vs_frequency(spatial_freqs, idx=1) plt.plot(spatial_freqs, 10*spatial_freqs**(-11/3), '--') plt.legend(['kolm r0=0.1m', 'von karman r0=0.1 L0=25', 'von karman r0=0.5 L0=25', 'k^-11/3']) ###Output _____no_output_____
02_supervised_learning/15_w3a_preprocessing_LR.ipynb
###Markdown Предобработка данных и логистическая регрессия для задачи бинарной классификации Programming assignment В задании вам будет предложено ознакомиться с основными техниками предобработки данных, а так же применить их для обучения модели логистической регрессии. Ответ потребуется загрузить в соответствующую форму в виде 6 текстовых файлов.Для выполнения задания требуется Python версии 2.7, а также актуальные версии библиотек:- NumPy: 1.10.4 и выше- Pandas: 0.17.1 и выше- Scikit-learn: 0.17 и выше ###Code import pandas as pd import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import scipy.stats as sts import seaborn as sns from contextlib import contextmanager sns.set() sns.set_style("whitegrid") color_palette = sns.color_palette('deep') + sns.color_palette('husl', 6) + sns.color_palette('bright') + sns.color_palette('pastel') %matplotlib inline sns.palplot(color_palette) # will be in numpy >= 1.15 @contextmanager def printoptions(*args, **kwargs): original = np.get_printoptions() np.set_printoptions(*args, **kwargs) try: yield finally: np.set_printoptions(**original) def ndprint(a, precision=3): with printoptions(precision=precision, suppress=True): print(a) from sklearn import datasets, linear_model, metrics, model_selection as mdsel, pipeline, preprocessing ###Output _____no_output_____ ###Markdown Описание датасета Задача: по 38 признакам, связанных с заявкой на грант (область исследований учёных, информация по их академическому бэкграунду, размер гранта, область, в которой он выдаётся) предсказать, будет ли заявка принята. Датасет включает в себя информацию по 6000 заявкам на гранты, которые были поданы в университете Мельбурна в период с 2004 по 2008 год.Полную версию данных с большим количеством признаков можно найти на https://www.kaggle.com/c/unimelb. ###Code data = pd.read_csv('data/15_data.csv') data.shape ###Output _____no_output_____ ###Markdown Выделим из датасета целевую переменную Grant.Status и обозначим её за yТеперь X обозначает обучающую выборку, y - ответы на ней ###Code X = data.drop('Grant.Status', 1) y = data['Grant.Status'] ###Output _____no_output_____ ###Markdown Теория по логистической регрессии После осознания того, какую именно задачу требуется решить на этих данных, следующим шагом при реальном анализе был бы подбор подходящего метода. В данном задании выбор метода было произведён за вас, это логистическая регрессия. Кратко напомним вам используемую модель.Логистическая регрессия предсказывает вероятности принадлежности объекта к каждому классу. Сумма ответов логистической регрессии на одном объекте для всех классов равна единице.$$ \sum_{k=1}^K \pi_{ik} = 1, \quad \pi_k \equiv P\,(y_i = k \mid x_i, \theta), $$где:- $\pi_{ik}$ - вероятность принадлежности объекта $x_i$ из выборки $X$ к классу $k$- $\theta$ - внутренние параметры алгоритма, которые настраиваются в процессе обучения, в случае логистической регрессии - $w, b$Из этого свойства модели в случае бинарной классификации требуется вычислить лишь вероятность принадлежности объекта к одному из классов (вторая вычисляется из условия нормировки вероятностей). Эта вероятность вычисляется, используя логистическую функцию:$$ P\,(y_i = 1 \mid x_i, \theta) = \frac{1}{1 + \exp(-w^T x_i-b)} $$Параметры $w$ и $b$ находятся, как решения следующей задачи оптимизации (указаны функционалы с L1 и L2 регуляризацией, с которыми вы познакомились в предыдущих заданиях):L2-regularization:$$ Q(X, y, \theta) = \frac{1}{2} w^T w + C \sum_{i=1}^l \log ( 1 + \exp(-y_i (w^T x_i + b ) ) ) \longrightarrow \min\limits_{w,b} $$L1-regularization:$$ Q(X, y, \theta) = \sum_{d=1}^D |w_d| + C \sum_{i=1}^l \log ( 1 + \exp(-y_i (w^T x_i + b ) ) ) \longrightarrow \min\limits_{w,b} $$$C$ - это стандартный гиперпараметр модели, который регулирует то, насколько сильно мы позволяем модели подстраиваться под данные. Предобработка данных Из свойств данной модели следует, что:- все $X$ должны быть числовыми данными (в случае наличия среди них категорий, их требуется некоторым способом преобразовать в вещественные числа)- среди $X$ не должно быть пропущенных значений (т.е. все пропущенные значения перед применением модели следует каким-то образом заполнить)Поэтому базовым этапом в предобработке любого датасета для логистической регрессии будет кодирование категориальных признаков, а так же удаление или интерпретация пропущенных значений (при наличии того или другого). ###Code data.head() ###Output _____no_output_____ ###Markdown Видно, что в датасете есть как числовые, так и категориальные признаки. Получим списки их названий: ###Code numeric_cols = ['RFCD.Percentage.1', 'RFCD.Percentage.2', 'RFCD.Percentage.3', 'RFCD.Percentage.4', 'RFCD.Percentage.5', 'SEO.Percentage.1', 'SEO.Percentage.2', 'SEO.Percentage.3', 'SEO.Percentage.4', 'SEO.Percentage.5', 'Year.of.Birth.1', 'Number.of.Successful.Grant.1', 'Number.of.Unsuccessful.Grant.1'] categorical_cols = list(set(X.columns.values.tolist()) - set(numeric_cols)) ###Output _____no_output_____ ###Markdown Также в нём присутствуют пропущенные значения. Очевидны решением будет исключение всех данных, у которых пропущено хотя бы одно значение. Сделаем это: ###Code data.dropna().shape ###Output _____no_output_____ ###Markdown Видно, что тогда мы выбросим почти все данные, и такой метод решения в данном случае не сработает.Пропущенные значения можно так же интерпретировать, для этого существует несколько способов, они различаются для категориальных и вещественных признаков.Для вещественных признаков:- заменить на 0 (данный признак давать вклад в предсказание для данного объекта не будет)- заменить на среднее (каждый пропущенный признак будет давать такой же вклад, как и среднее значение признака на датасете)Для категориальных:- интерпретировать пропущенное значение, как ещё одну категорию (данный способ является самым естественным, так как в случае категорий у нас есть уникальная возможность не потерять информацию о наличии пропущенных значений; обратите внимание, что в случае вещественных признаков данная информация неизбежно теряется) Задание 0. Обработка пропущенных значений.1. Заполните пропущенные вещественные значения в X нулями и средними по столбцам, назовите полученные датафреймы X_real_zeros и X_real_mean соответственно. Для подсчёта средних используйте описанную ниже функцию calculate_means, которой требуется передать на вход вешественные признаки из исходного датафрейма.2. Все категориальные признаки в X преобразуйте в строки, пропущенные значения требуется также преобразовать в какие-либо строки, которые не являются категориями (например, 'NA'), полученный датафрейм назовите X_cat.Для объединения выборок здесь и далее в задании рекомендуется использовать функции np.hstack(...) np.vstack(...) ###Code def calculate_means(numeric_data): means = [np.nanmean(serie) for _, serie in numeric_data.items()] return pd.Series(means, numeric_data.keys()) %%time num_data = data.loc[:, numeric_cols] means = num_data.mean(skipna=True) X_real_zeros = num_data.fillna(0.) X_real_mean = num_data.fillna(means) %%time X_cat = data \ .loc[:, categorical_cols] \ .astype(str) \ .fillna('NA') def get_uniques(data): return {key: len(serie.unique()) for key, serie in data.items()} print(pd.Series(get_uniques(X_cat))) ###Output RFCD.Code.2 584 With.PHD.1 2 RFCD.Code.3 501 Home.Language.1 3 RFCD.Code.4 100 Country.of.Birth.1 11 SEO.Code.3 277 Faculty.No..1 19 RFCD.Code.1 607 SEO.Code.2 350 A..1 58 Role.1 8 B.1 47 No..of.Years.in.Uni.at.Time.of.Grant.1 6 Person.ID.1 2002 SEO.Code.5 40 A.1 57 SEO.Code.4 85 RFCD.Code.5 29 Dept.No..1 103 Sponsor.Code 256 Grant.Category.Code 14 C.1 39 Contract.Value.Band...see.note.A 17 SEO.Code.1 378 dtype: int64 ###Markdown Преобразование категориальных признаков. В предыдущей ячейке мы разделили наш датасет ещё на две части: в одной присутствуют только вещественные признаки, в другой только категориальные. Это понадобится нам для раздельной последующей обработке этих данных, а так же для сравнения качества работы тех или иных методов.Для использования модели регрессии требуется преобразовать категориальные признаки в вещественные. Рассмотрим основной способ преоборазования категориальных признаков в вещественные: one-hot encoding. Его идея заключается в том, что мы преобразуем категориальный признак при помощи бинарного кода: каждой категории ставим в соответствие набор из нулей и единиц.Посмотрим, как данный метод работает на простом наборе данных. ###Code from sklearn.linear_model import LogisticRegression as LR from sklearn.feature_extraction import DictVectorizer as DV %%time categorial_data = pd.DataFrame({'sex': ['male', 'female', 'male', 'female'], 'nationality': ['American', 'European', 'Asian', 'European']}) print('Исходные данные:\n') print(categorial_data) encoder = DV(sparse = False) encoded_data = encoder.fit_transform(categorial_data.T.to_dict().values()) print('\nЗакодированные данные:\n') print(encoded_data) %%time print(categorial_data.sex.name) print(categorial_data.sex == 'male') a_ = np.array([1, 2, 3]) b_ = np.array([4, 6]) c_ = np.array([5, 7]) np.hstack([a_, b_, c_]) %%time def transform_serie(s, unique_values): val_to_ind = {val: i for i, val in enumerate(unique_values)} new_col_names = [f'{s.name}={val}' for val in unique_values] new_cols = np.zeros(shape=(len(s), len(unique_values))) for i, val in enumerate(s): new_cols[i, val_to_ind[val]] = 1 return new_cols, new_col_names def vectorize_categories_naive(data): transformed_data = [transform_serie(serie, serie.unique()) for key, serie in data.iteritems()] new_cols = np.hstack([cols for cols, names in transformed_data]) new_col_names = np.hstack([names for cols, names in transformed_data]) return new_cols, new_col_names vectorized_data = vectorize_categories_naive(categorial_data) print(vectorized_data) def generate_col_names(base_col_name, unique_values): return [f'{base_col_name}={val}' for val in unique_values] def get_col_val2ind_map(unique_values, base_ind): return {val: base_ind + i for i, val in enumerate(unique_values)} def generate_j_array(s, val_to_ind): return [val_to_ind[val] for val in s] def vectorize_categories_sparse(data): base_ind = 0 j = [None] * data.shape[1] col_names = [None] * data.shape[1] for ind, (key, serie) in enumerate(data.items()): unique_values = serie.unique() col_val2ind_map = get_col_val2ind_map(unique_values, base_ind) col_names[ind] = generate_col_names(serie.name, unique_values) j[ind] = generate_j_array(serie, col_val2ind_map) base_ind += len(unique_values) col_names = np.hstack(col_names) data_ = np.ones(data.shape[0] * data.shape[1]) i = np.tile(np.arange(data.shape[0]), data.shape[1]) j = np.hstack(j) result = coo_matrix((data_, (i, j))) return result, col_names vectorized_data = vectorize_categories_sparse(categorial_data) # print(vectorized_data[1]) ###Output _____no_output_____ ###Markdown Как видно, в первые три колонки оказалась закодированна информация о стране, а во вторые две - о поле. При этом для совпадающих элементов выборки строки будут полностью совпадать. Также из примера видно, что кодирование признаков сильно увеличивает их количество, но полностью сохраняет информацию, в том числе о наличии пропущенных значений (их наличие просто становится одним из бинарных признаков в преобразованных данных).Теперь применим one-hot encoding к категориальным признакам из исходного датасета. Обратите внимание на общий для всех методов преобработки данных интерфейс. Функция encoder.fit_transform(X) позволяет вычислить необходимые параметры преобразования, впоследствии к новым данным можно уже применять функцию encoder.transform(X) Очень важно применять одинаковое преобразование как к обучающим, так и тестовым данным, потому что в противном случае вы получите непредсказуемые, и, скорее всего, плохие результаты. В частности, если вы отдельно закодируете обучающую и тестовую выборку, то получите вообще говоря разные коды для одних и тех же признаков, и ваше решение работать не будет.Также параметры многих преобразований (например, рассмотренное ниже масштабирование) нельзя вычислять одновременно на данных из обучения и теста, потому что иначе подсчитанные на тесте метрики качества будут давать смещённые оценки на качество работы алгоритма. Кодирование категориальных признаков не считает на обучающей выборке никаких параметров, поэтому его можно применять сразу к всему датасету. ###Code %%time encoder = DV(sparse = False) X_cat_oh = encoder.fit_transform(X_cat.T.to_dict().values()) %%time vectorized_data = vectorize_categories_naive(X_cat) %%time vectorized_data = vectorize_categories_sparse(X_cat) # print(encoder.feature_names_) # print(vectorized_data[1]) X_cat_oh = vectorized_data[0] print(vectorized_data[0].shape) print(X_cat_oh.shape) print(type(X_cat_oh)) X_cat_oh.shape[0] * X_cat_oh.shape[1] ###Output _____no_output_____ ###Markdown Для построения метрики качества по результату обучения требуется разделить исходный датасет на обучающую и тестовую выборки.Обращаем внимание на заданный параметр для генератора случайных чисел: random_state. Так как результаты на обучении и тесте будут зависеть от того, как именно вы разделите объекты, то предлагается использовать заранее определённое значение для получение результатов, согласованных с ответами в системе проверки заданий. ###Code X_train_real_zeros, X_test_real_zeros, y_train, y_test = mdsel.train_test_split( X_real_zeros, y, test_size=0.3, random_state=0 ) X_train_real_mean, X_test_real_mean = mdsel.train_test_split( X_real_mean, test_size=0.3, random_state=0 ) X_train_cat_oh, X_test_cat_oh = mdsel.train_test_split( X_cat_oh, test_size=0.3, random_state=0 ) from scipy.sparse import hstack as sparse_hstack print(type(X_train_real_zeros), X_train_real_zeros.shape) print(type(X_train_cat_oh), X_train_cat_oh.shape) ###Output <class 'pandas.core.frame.DataFrame'> (4200, 13) <class 'scipy.sparse.csr.csr_matrix'> (4200, 5593) ###Markdown Описание классов Итак, мы получили первые наборы данных, для которых выполнены оба ограничения логистической регрессии на входные данные. Обучим на них регрессию, используя имеющийся в библиотеке sklearn функционал по подбору гиперпараметров модели optimizer = GridSearchCV(estimator, param_grid)где:- estimator - обучающий алгоритм, для которого будет производиться подбор параметров- param_grid - словарь параметров, ключами которого являются строки-названия, которые передаются алгоритму estimator, а значения - набор параметров для перебораДанный класс выполняет кросс-валидацию обучающей выборки для каждого набора параметров и находит те, на которых алгоритм работает лучше всего. Этот метод позволяет настраивать гиперпараметры по обучающей выборке, избегая переобучения. Некоторые опциональные параметры вызова данного класса, которые нам понадобятся:- scoring - функционал качества, максимум которого ищется кросс валидацией, по умолчанию используется функция score() класса esimator- n_jobs - позволяет ускорить кросс-валидацию, выполняя её параллельно, число определяет количество одновременно запущенных задач- cv - количество фолдов, на которые разбивается выборка при кросс-валидацииПосле инициализации класса GridSearchCV, процесс подбора параметров запускается следующим методом: optimizer.fit(X, y) На выходе для получения предсказаний можно пользоваться функцией optimizer.predict(X) для меток или optimizer.predict_proba(X) для вероятностей (в случае использования логистической регрессии). Также можно напрямую получить оптимальный класс estimator и оптимальные параметры, так как они является атрибутами класса GridSearchCV:- best\_estimator\_ - лучший алгоритм- best\_params\_ - лучший набор параметровКласс логистической регрессии выглядит следующим образом: estimator = LogisticRegression(penalty) где penalty принимает либо значение 'l2', либо 'l1'. По умолчанию устанавливается значение 'l2', и везде в задании, если об этом не оговорено особо, предполагается использование логистической регрессии с L2-регуляризацией. Задание 1. Сравнение способов заполнения вещественных пропущенных значений. ###Code from sklearn.linear_model import LogisticRegression from sklearn.grid_search import GridSearchCV from sklearn.metrics import roc_auc_score def plot_scores(optimizer): scores = [[item[0]['C'], item[1], (np.sum((item[2]-item[1])**2)/(item[2].size-1))**0.5] for item in optimizer.grid_scores_] scores = np.array(scores) plt.semilogx(scores[:,0], scores[:,1]) plt.fill_between(scores[:,0], scores[:,1]-scores[:,2], scores[:,1]+scores[:,2], alpha=0.3) plt.show() def write_answer_1(auc_1, auc_2): auc = (auc_1 + auc_2)/2 with open("out/15_preprocessing_lr_answer1.txt", "w") as fout: fout.write(str(auc)) ###Output _____no_output_____ ###Markdown 1. Составьте две обучающие выборки из вещественных и категориальных признаков: в одной вещественные признаки, где пропущенные значения заполнены нулями, в другой - средними. Рекомендуется записывать в выборки сначала вещественные, а потом категориальные признаки. ###Code # X_train_zeros = np.hstack([X_train_real_zeros, X_train_cat_oh]) # X_train_means = np.hstack([X_train_real_mean, X_train_cat_oh]) X_train_zeros = sparse_hstack([X_train_real_zeros, X_train_cat_oh]) X_train_means = sparse_hstack([X_train_real_mean, X_train_cat_oh]) X_test_zeros = sparse_hstack([X_test_real_zeros, X_test_cat_oh]) X_test_means = sparse_hstack([X_test_real_mean, X_test_cat_oh]) ###Output _____no_output_____ ###Markdown 2. Обучите на них логистическую регрессию, подбирая параметры из заданной сетки param_grid по методу кросс-валидации с числом фолдов cv=3. В качестве оптимизируемой функции используйте заданную по умолчанию. ###Code %%time param_grid = {'C': [0.01, 0.05, 0.1, 0.5, 1, 5, 10]} cv = 3 estimator = LogisticRegression() optimizer_zeros = GridSearchCV(estimator, param_grid, cv=cv) optimizer_means = GridSearchCV(estimator, param_grid, cv=cv) optimizer_zeros.fit(X_train_zeros, y_train) optimizer_means.fit(X_train_means, y_train) ###Output Wall time: 11.5 s ###Markdown 3. Постройте два графика оценок точности +- их стандратного отклонения в зависимости от гиперпараметра и убедитесь, что вы действительно нашли её максимум. Также обратите внимание на большую дисперсию получаемых оценок (уменьшить её можно увеличением числа фолдов cv). ###Code plot_scores(optimizer_zeros) plot_scores(optimizer_means) ###Output _____no_output_____ ###Markdown 4. Получите две метрики качества AUC ROC на тестовой выборке и сравните их между собой. Какой способ заполнения пропущенных вещественных значений работает лучше? В дальнейшем для выполнения задания в качестве вещественных признаков используйте ту выборку, которая даёт лучшее качество на тесте. ###Code from sklearn.metrics import roc_curve, roc_auc_score predicted_zeros = optimizer_zeros.predict_proba(X_test_zeros)[:, 1] predicted_means = optimizer_means.predict_proba(X_test_means)[:, 1] fpr, tpr, thr = roc_curve(y_test, predicted_zeros) fpr2, tpr2, thr2 = roc_curve(y_test, predicted_means) roc_auc_zeros = roc_auc_score(y_test, predicted_zeros) roc_auc_means = roc_auc_score(y_test, predicted_means) plt.figure(figsize=(12, 12)) plt.plot(fpr, tpr, label='zeros: {:.7f}'.format(roc_auc_zeros)) plt.plot(fpr2, tpr2, label='means: {:.7f}'.format(roc_auc_means)) plt.xlabel("FPR") plt.ylabel("TPR") plt.legend(loc=4) plt.axis([-0.1, 1.1, -0.1, 1.1]) ###Output _____no_output_____ ###Markdown 5. Передайте два значения AUC ROC (сначала для выборки, заполненной средними, потом для выборки, заполненной нулями) в функцию write_answer_1 и запустите её. Полученный файл является ответом на 1 задание. ###Code write_answer_1(roc_auc_means, roc_auc_zeros) ###Output _____no_output_____ ###Markdown Информация для интересующихся: вообще говоря, не вполне логично оптимизировать на кросс-валидации заданный по умолчанию в классе логистической регрессии функционал accuracy, а измерять на тесте AUC ROC, но это, как и ограничение размера выборки, сделано для ускорения работы процесса кросс-валидации. Масштабирование вещественных признаков. Попробуем как-то улучшить качество классификации. Для этого посмотрим на сами данные: ###Code list(X_train_real_zeros.columns) == numeric_cols from pandas.plotting import scatter_matrix data_numeric = X_train_real_zeros list_cols = ['Number.of.Successful.Grant.1', 'SEO.Percentage.2', 'Year.of.Birth.1'] scatter_matrix(data_numeric[list_cols], alpha=0.5, figsize=(10, 10)) plt.show() ###Output _____no_output_____ ###Markdown Как видно из графиков, разные признаки очень сильно отличаются друг от друга по модулю значений (обратите внимание на диапазоны значений осей x и y). В случае обычной регрессии это никак не влияет на качество обучаемой модели, т.к. у меньших по модулю признаков будут большие веса, но при использовании регуляризации, которая штрафует модель за большие веса, регрессия, как правило, начинает работать хуже.В таких случаях всегда рекомендуется делать стандартизацию (масштабирование) признаков, для того чтобы они меньше отличались друг друга по модулю, но при этом не нарушались никакие другие свойства признакового пространства. При этом даже если итоговое качество модели на тесте уменьшается, это повышает её интерпретабельность, потому что новые веса имеют смысл "значимости" данного признака для итоговой классификации.Стандартизация осуществляется посредством вычета из каждого признака среднего значения и нормировки на выборочное стандартное отклонение:$$ x^{scaled}_{id} = \dfrac{x_{id} - \mu_d}{\sigma_d}, \quad \mu_d = \frac{1}{N} \sum_{i=1}^l x_{id}, \quad \sigma_d = \sqrt{\frac{1}{N-1} \sum_{i=1}^l (x_{id} - \mu_d)^2} $$ ###Code data_numeric.hist(figsize=(12, 12)) plt.show() ###Output _____no_output_____ ###Markdown Задание 1.5. Масштабирование вещественных признаков.1. По аналогии с вызовом one-hot encoder примените масштабирование вещественных признаков для обучающих и тестовых выборок X_train_real_zeros и X_test_real_zeros, используя класс StandardScaler и методы StandardScaler.fit_transform(...) StandardScaler.transform(...)2. Сохраните ответ в переменные X_train_real_scaled и X_test_real_scaled соответственно ###Code from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train_real_scaled = scaler.fit_transform(X_train_real_zeros) X_test_real_scaled = scaler.transform(X_test_real_zeros) X_train_real_scaled = pd.DataFrame(X_train_real_scaled, columns=X_train_real_zeros.columns) X_train_real_scaled.corrwith(y_train) ###Output _____no_output_____ ###Markdown Сравнение признаковых пространств. Построим такие же графики для преобразованных данных: ###Code list_cols = ['Number.of.Successful.Grant.1', 'SEO.Percentage.2', 'Year.of.Birth.1'] scatter_matrix(X_train_real_scaled[list_cols], alpha=0.5, figsize=(10, 10)) plt.show() ###Output _____no_output_____ ###Markdown Как видно из графиков, мы не поменяли свойства признакового пространства: гистограммы распределений значений признаков, как и их scatter-plots, выглядят так же, как и до нормировки, но при этом все значения теперь находятся примерно в одном диапазоне, тем самым повышая интерпретабельность результатов, а также лучше сочетаясь с идеологией регуляризации. Задание 2. Сравнение качества классификации до и после масштабирования вещественных признаков.1. Обучите ещё раз регрессию и гиперпараметры на новых признаках, объединив их с закодированными категориальными.2. Проверьте, был ли найден оптимум accuracy по гиперпараметрам во время кроссвалидации.3. Получите значение ROC AUC на тестовой выборке, сравните с лучшим результатом, полученными ранее.4. Запишите полученный ответ в файл при помощи функции write_answer_2. ###Code def write_answer_2(auc): with open("out/15_preprocessing_lr_answer2.txt", "w") as fout: fout.write(str(auc)) %%time param_grid = {'C': [0.01, 0.05, 0.1, 0.5, 1, 5, 10]} cv = 3 X_train_scaled = sparse_hstack([X_train_real_scaled, X_train_cat_oh]).tocsr() X_test_scaled = sparse_hstack([X_test_real_scaled, X_test_cat_oh]).tocsr() optimizer_scaled = GridSearchCV(estimator, param_grid, cv=cv) optimizer_scaled.fit(X_train_scaled, y_train) plot_scores(optimizer_scaled) predicted_scaled = optimizer_scaled.predict_proba(X_test_scaled)[:, 1] fpr, tpr, thr = roc_curve(y_test, predicted_zeros) fpr2, tpr2, thr2 = roc_curve(y_test, predicted_scaled) roc_auc_zeros = roc_auc_score(y_test, predicted_zeros) roc_auc_scaled = roc_auc_score(y_test, predicted_scaled) plt.figure(figsize=(12, 12)) plt.plot(fpr, tpr, label='zeros: {:.7f}'.format(roc_auc_zeros)) plt.plot(fpr2, tpr2, label='scaled: {:.7f}'.format(roc_auc_scaled)) plt.xlabel("FPR") plt.ylabel("TPR") plt.legend(loc=4) plt.axis([-0.1, 1.1, -0.1, 1.1]) write_answer_2(roc_auc_scaled) ###Output _____no_output_____ ###Markdown Балансировка классов. Алгоритмы классификации могут быть очень чувствительны к несбалансированным классам. Рассмотрим пример с выборками, сэмплированными из двух гауссиан. Их мат. ожидания и матрицы ковариации заданы так, что истинная разделяющая поверхность должна проходить параллельно оси x. Поместим в обучающую выборку 20 объектов, сэмплированных из 1-й гауссианы, и 10 объектов из 2-й. После этого обучим на них линейную регрессию, и построим на графиках объекты и области классификации. ###Code np.random.seed(0) """Сэмплируем данные из первой гауссианы""" data_0 = np.random.multivariate_normal([0,0], [[0.5,0],[0,0.5]], size=80) """И из второй""" data_1 = np.random.multivariate_normal([0,1], [[0.5,0],[0,0.5]], size=80) """На обучение берём 20 объектов из первого класса и 10 из второго""" example_data_train = np.vstack([data_0[:40,:], data_1[:10,:]]) example_labels_train = np.concatenate([np.zeros((40)), np.ones((10))]) """На тест - 20 из первого и 30 из второго""" example_data_test = np.vstack([data_0[40:,:], data_1[10:,:]]) example_labels_test = np.concatenate([np.zeros((40)), np.ones((70))]) """Задаём координатную сетку, на которой будем вычислять область классификации""" xx, yy = np.meshgrid(np.arange(-3, 3, 0.02), np.arange(-3, 3, 0.02)) """Обучаем регрессию без балансировки по классам""" optimizer = GridSearchCV(LogisticRegression(), param_grid, cv=cv, n_jobs=-1) optimizer.fit(example_data_train, example_labels_train) """Строим предсказания регрессии для сетки""" Z = optimizer.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape) plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Pastel2) plt.scatter(data_0[:,0], data_0[:,1], color='red') plt.scatter(data_1[:,0], data_1[:,1], color='blue') """Считаем AUC""" auc_wo_class_weights = roc_auc_score(example_labels_test, optimizer.predict_proba(example_data_test)[:,1]) plt.title('Without class weights') plt.show() print('AUC: %f'%auc_wo_class_weights) """Для второй регрессии в LogisticRegression передаём параметр class_weight='balanced'""" optimizer = GridSearchCV(LogisticRegression(class_weight='balanced'), param_grid, cv=cv, n_jobs=-1) optimizer.fit(example_data_train, example_labels_train) Z = optimizer.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape) plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Pastel2) plt.scatter(data_0[:,0], data_0[:,1], color='red') plt.scatter(data_1[:,0], data_1[:,1], color='blue') auc_w_class_weights = roc_auc_score(example_labels_test, optimizer.predict_proba(example_data_test)[:,1]) plt.title('With class weights') plt.show() print('AUC: %f'%auc_w_class_weights) ###Output _____no_output_____ ###Markdown Как видно, во втором случае классификатор находит разделяющую поверхность, которая ближе к истинной, т.е. меньше переобучается. Поэтому на сбалансированность классов в обучающей выборке всегда следует обращать внимание.Посмотрим, сбалансированны ли классы в нашей обучающей выборке: ###Code print(np.sum(y_train==0)) print(np.sum(y_train==1)) ###Output 2316 1884 ###Markdown Видно, что нет.Исправить ситуацию можно разными способами, мы рассмотрим два:- давать объектам миноритарного класса больший вес при обучении классификатора (рассмотрен в примере выше)- досэмплировать объекты миноритарного класса, пока число объектов в обоих классах не сравняется Задание 3. Балансировка классов.1. Обучите логистическую регрессию и гиперпараметры с балансировкой классов, используя веса (параметр class_weight='balanced' регрессии) на отмасштабированных выборках, полученных в предыдущем задании. Убедитесь, что вы нашли максимум accuracy по гиперпараметрам.2. Получите метрику ROC AUC на тестовой выборке.3. Сбалансируйте выборку, досэмплировав в неё объекты из меньшего класса. Для получения индексов объектов, которые требуется добавить в обучающую выборку, используйте следующую комбинацию вызовов функций: np.random.seed(0) indices_to_add = np.random.randint(...) X_train_to_add = X_train[y_train.as_matrix() == 1,:][indices_to_add,:] После этого добавьте эти объекты в начало или конец обучающей выборки. Дополните соответствующим образом вектор ответов.4. Получите метрику ROC AUC на тестовой выборке, сравните с предыдущим результатом.5. Внесите ответы в выходной файл при помощи функции write_asnwer_3, передав в неё сначала ROC AUC для балансировки весами, а потом балансировки выборки вручную. ###Code def write_answer_3(auc_1, auc_2): auc = (auc_1 + auc_2) / 2 with open("out/15_preprocessing_lr_answer3.txt", "w") as fout: fout.write(str(auc)) %%time param_grid = {'C': [0.01, 0.05, 0.1, 0.5, 1, 5, 10]} cv = 3 estimator = LogisticRegression(class_weight='balanced') optimizer_scaled = GridSearchCV(estimator, param_grid, cv=cv) optimizer_scaled.fit(X_train_scaled, y_train) plot_scores(optimizer_scaled) predicted_scaled = optimizer_scaled.predict_proba(X_test_scaled)[:, 1] fpr, tpr, thr = roc_curve(y_test, predicted_zeros) fpr2, tpr2, thr2 = roc_curve(y_test, predicted_scaled) roc_auc_zeros = roc_auc_score(y_test, predicted_zeros) roc_auc_scaled = roc_auc_score(y_test, predicted_scaled) plt.figure(figsize=(12, 12)) plt.plot(fpr, tpr, label='zeros: {:.7f}'.format(roc_auc_zeros)) plt.plot(fpr2, tpr2, label='scaled: {:.7f}'.format(roc_auc_scaled)) plt.xlabel("FPR") plt.ylabel("TPR") plt.legend(loc=4) plt.axis([-0.1, 1.1, -0.1, 1.1]) np.random.seed(0) class_diff = np.sum(y_train==0) - np.sum(y_train==1) indices = np.where(y_train==1)[0] indices_to_add = np.random.randint(0, len(indices) - 1, size=(class_diff,)) mapped_indices = [indices[i] for i in indices_to_add] print(y_train.shape, X_train_scaled.shape) print(class_diff) X_train_to_add = X_train_scaled[mapped_indices, :] from scipy.sparse import sparse_vstack X_train_balanced = sparse_vstack([X_train_scaled, X_train_to_add]) y_train_balanced = np.hstack([y_train, np.ones(class_diff)]) estimator = LogisticRegression() optimizer_balanced = GridSearchCV(estimator, param_grid, cv=cv) optimizer_balanced.fit(X_train_balanced, y_train_balanced) predicted_balanced = optimizer_balanced.predict_proba(X_test_scaled)[:, 1] fpr, tpr, thr = roc_curve(y_test, predicted_balanced) fpr2, tpr2, thr2 = roc_curve(y_test, predicted_scaled) roc_auc_balanced = roc_auc_score(y_test, predicted_balanced) roc_auc_scaled = roc_auc_score(y_test, predicted_scaled) plt.figure(figsize=(12, 12)) plt.plot(fpr, tpr, label='balanced: {:.7f}'.format(roc_auc_balanced)) plt.plot(fpr2, tpr2, label='scaled: {:.7f}'.format(roc_auc_scaled)) plt.xlabel("FPR") plt.ylabel("TPR") plt.legend(loc=4) plt.axis([-0.1, 1.1, -0.1, 1.1]) write_answer_3(roc_auc_scaled, roc_auc_balanced) ###Output _____no_output_____ ###Markdown Стратификация выборок. Рассмотрим ещё раз пример с выборками из нормальных распределений. Посмотрим ещё раз на качество классификаторов, получаемое на тестовых выборках: ###Code print('AUC ROC for classifier without weighted classes', auc_wo_class_weights) print('AUC ROC for classifier with weighted classes: ', auc_w_class_weights) ###Output AUC ROC for classifier without weighted classes 0.7489285714285714 AUC ROC for classifier with weighted classes: 0.7485714285714286 ###Markdown Насколько эти цифры реально отражают качество работы алгоритма, если учесть, что тестовая выборка так же несбалансирована, как обучающая? При этом мы уже знаем, что алгоритм логистический регрессии чувствителен к балансировке классов в обучающей выборке, т.е. в данном случае на тесте он будет давать заведомо заниженные результаты. Метрика классификатора на тесте имела бы гораздо больший смысл, если бы объекты были разделы в выборках поровну: по 20 из каждого класса на обучени и на тесте. Переформируем выборки и подсчитаем новые ошибки: ###Code """Разделим данные по классам поровну между обучающей и тестовой выборками""" example_data_train = np.vstack([data_0[:40,:], data_1[:40,:]]) example_labels_train = np.concatenate([np.zeros((40)), np.ones((40))]) example_data_test = np.vstack([data_0[40:,:], data_1[-40:,:]]) example_labels_test = np.concatenate([np.zeros((40)), np.ones((40))]) """Обучим классификатор""" optimizer = GridSearchCV(LogisticRegression(class_weight='balanced'), param_grid, cv=cv, n_jobs=-1) optimizer.fit(example_data_train, example_labels_train) Z = optimizer.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape) plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Pastel2) plt.scatter(data_0[:,0], data_0[:,1], color='red') plt.scatter(data_1[:,0], data_1[:,1], color='blue') auc_stratified = roc_auc_score(example_labels_test, optimizer.predict_proba(example_data_test)[:,1]) plt.title('With class weights') plt.show() print('AUC ROC for stratified samples: ', auc_stratified) ###Output _____no_output_____ ###Markdown Как видно, после данной процедуры ответ классификатора изменился незначительно, а вот качество увеличилось. При этом, в зависимости от того, как вы разбили изначально данные на обучение и тест, после сбалансированного разделения выборок итоговая метрика на тесте может как увеличиться, так и уменьшиться, но доверять ей можно значительно больше, т.к. она построена с учётом специфики работы классификатора. Данный подход является частным случаем т.н. метода стратификации. Задание 4. Стратификация выборки.1. По аналогии с тем, как это было сделано в начале задания, разбейте выборки X_real_zeros и X_cat_oh на обучение и тест, передавая в функцию train_test_split(...) дополнительно параметр stratify=y Также обязательно передайте в функцию переменную random_state=0.2. Выполните масштабирование новых вещественных выборок, обучите классификатор и его гиперпараметры при помощи метода кросс-валидации, делая поправку на несбалансированные классы при помощи весов. Убедитесь в том, что нашли оптимум accuracy по гиперпараметрам.3. Оцените качество классификатора метрике AUC ROC на тестовой выборке.4. Полученный ответ передайте функции write_answer_4 ###Code def write_answer_4(auc): with open("out/15_preprocessing_lr_answer4.txt", "w") as fout: fout.write(str(auc)) X_train_real_stratified, X_test_real_stratified, y_train_stratified, y_test_stratified = mdsel.train_test_split( X_real_zeros, y, test_size=0.3, random_state=0, stratify=y ) X_train_cat_oh_stratified, X_test_cat_oh_stratified = mdsel.train_test_split( X_cat_oh, test_size=0.3, random_state=0, stratify=y ) scaler = StandardScaler() X_train_real_stratified = scaler.fit_transform(X_train_real_stratified) X_test_real_stratified = scaler.transform(X_test_real_stratified) X_train_stratified = sparse_hstack([X_train_real_stratified, X_train_cat_oh_stratified]) X_test_stratified = sparse_hstack([X_test_real_stratified, X_test_cat_oh_stratified]) X_train_stratified = X_train_stratified.tocsr() estimator = LogisticRegression(class_weight='balanced') optimizer_stratified = GridSearchCV(estimator, param_grid, cv=cv) optimizer_stratified.fit(X_train_stratified, y_train_stratified) predicted_stratified = optimizer_stratified.predict_proba(X_test_stratified)[:, 1] fpr, tpr, thr = roc_curve(y_test_stratified, predicted_stratified) roc_auc_stratified = roc_auc_score(y_test_stratified, predicted_stratified) plt.figure(figsize=(12, 12)) plt.plot(fpr, tpr, label='stratified: {:.7f}'.format(roc_auc_stratified)) plt.xlabel("FPR") plt.ylabel("TPR") plt.legend(loc=4) plt.axis([-0.1, 1.1, -0.1, 1.1]) write_answer_4(roc_auc_stratified) ###Output _____no_output_____ ###Markdown Теперь вы разобрались с основными этапами предобработки данных для линейных классификаторов.Напомним основные этапы:- обработка пропущенных значений- обработка категориальных признаков- стратификация- балансировка классов- масштабированиеДанные действия с данными рекомендуется проводить всякий раз, когда вы планируете использовать линейные методы. Рекомендация по выполнению многих из этих пунктов справедлива и для других методов машинного обучения. Трансформация признаков.Теперь рассмотрим способы преобразования признаков. Существует достаточно много различных способов трансформации признаков, которые позволяют при помощи линейных методов получать более сложные разделяющие поверхности. Самым базовым является полиномиальное преобразование признаков. Его идея заключается в том, что помимо самих признаков вы дополнительно включаете набор все полиномы степени $p$, которые можно из них построить. Для случая $p=2$ преобразование выглядит следующим образом:$$ \phi(x_i) = [x_{i,1}^2, ..., x_{i,D}^2, x_{i,1}x_{i,2}, ..., x_{i,D} x_{i,D-1}, x_{i,1}, ..., x_{i,D}, 1] $$Рассмотрим принцип работы данных признаков на данных, сэмплированных их гауссиан: ###Code from sklearn.preprocessing import PolynomialFeatures """Инициализируем класс, который выполняет преобразование""" transform = PolynomialFeatures(2) """Обучаем преобразование на обучающей выборке, применяем его к тестовой""" example_data_train_poly = transform.fit_transform(example_data_train) example_data_test_poly = transform.transform(example_data_test) """Обращаем внимание на параметр fit_intercept=False""" optimizer = GridSearchCV(LogisticRegression(class_weight='balanced', fit_intercept=False), param_grid, cv=cv, n_jobs=-1) optimizer.fit(example_data_train_poly, example_labels_train) Z = optimizer.predict(transform.transform(np.c_[xx.ravel(), yy.ravel()])).reshape(xx.shape) plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Pastel2) plt.scatter(data_0[:,0], data_0[:,1], color='red') plt.scatter(data_1[:,0], data_1[:,1], color='blue') plt.title('With class weights') plt.show() ###Output _____no_output_____ ###Markdown Видно, что данный метод преобразования данных уже позволяет строить нелинейные разделяющие поверхности, которые могут более тонко подстраиваться под данные и находить более сложные зависимости. Число признаков в новой модели: ###Code print(example_data_train_poly.shape) ###Output (80, 6) ###Markdown Но при этом одновременно данный метод способствует более сильной способности модели к переобучению из-за быстрого роста числа признаком с увеличением степени $p$. Рассмотрим пример с $p=11$: ###Code transform = PolynomialFeatures(11) example_data_train_poly = transform.fit_transform(example_data_train) example_data_test_poly = transform.transform(example_data_test) optimizer = GridSearchCV(LogisticRegression(class_weight='balanced', fit_intercept=False), param_grid, cv=cv, n_jobs=-1) optimizer.fit(example_data_train_poly, example_labels_train) Z = optimizer.predict(transform.transform(np.c_[xx.ravel(), yy.ravel()])).reshape(xx.shape) plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Pastel2) plt.scatter(data_0[:,0], data_0[:,1], color='red') plt.scatter(data_1[:,0], data_1[:,1], color='blue') plt.title('Corrected class weights') plt.show() ###Output _____no_output_____ ###Markdown Количество признаков в данной модели: ###Code print(example_data_train_poly.shape) ###Output (80, 78) ###Markdown Задание 5. Трансформация вещественных признаков.1. Реализуйте по аналогии с примером преобразование вещественных признаков модели при помощи полиномиальных признаков степени 22. Постройте логистическую регрессию на новых данных, одновременно подобрав оптимальные гиперпараметры. Обращаем внимание, что в преобразованных признаках уже присутствует столбец, все значения которого равны 1, поэтому обучать дополнительно значение $b$ не нужно, его функцию выполняет один из весов $w$. В связи с этим во избежание линейной зависимости в датасете, в вызов класса логистической регрессии требуется передавать параметр fit_intercept=False. Для обучения используйте стратифицированные выборки с балансировкой классов при помощи весов, преобразованные признаки требуется заново отмасштабировать.3. Получите AUC ROC на тесте и сравните данный результат с использованием обычных признаков.4. Передайте полученный ответ в функцию write_answer_5. ###Code def write_answer_5(auc): with open("out/15_preprocessing_lr_answer5.txt", "w") as fout: fout.write(str(auc)) X_train_real_stratified, X_test_real_stratified, y_train_stratified, y_test_stratified = mdsel.train_test_split( X_real_zeros, y, test_size=0.3, random_state=0, stratify=y ) X_train_cat_oh_stratified, X_test_cat_oh_stratified = mdsel.train_test_split( X_cat_oh, test_size=0.3, random_state=0, stratify=y ) poly_transform = PolynomialFeatures(2) X_train_real_stratified = poly_transform.fit_transform(X_train_real_stratified) X_test_real_stratified = poly_transform.transform(X_test_real_stratified) scaler = StandardScaler() X_train_real_stratified = scaler.fit_transform(X_train_real_stratified) X_test_real_stratified = scaler.transform(X_test_real_stratified) X_train_stratified = sparse_hstack([X_train_real_stratified, X_train_cat_oh_stratified]) X_test_stratified = sparse_hstack([X_test_real_stratified, X_test_cat_oh_stratified]) X_train_stratified = X_train_stratified.tocsr() estimator = LogisticRegression(fit_intercept=False, class_weight='balanced') optimizer_stratified = GridSearchCV(estimator, param_grid, cv=cv) optimizer_stratified.fit(X_train_stratified, y_train_stratified) predicted_stratified = optimizer_stratified.predict_proba(X_test_stratified)[:, 1] fpr, tpr, thr = roc_curve(y_test_stratified, predicted_stratified) roc_auc_stratified = roc_auc_score(y_test_stratified, predicted_stratified) plt.figure(figsize=(12, 12)) plt.plot(fpr, tpr, label='stratified: {:.7f}'.format(roc_auc_stratified)) plt.xlabel("FPR") plt.ylabel("TPR") plt.legend(loc=4) plt.axis([-0.1, 1.1, -0.1, 1.1]) write_answer_5(roc_auc_stratified) ###Output _____no_output_____ ###Markdown Регрессия Lasso.К логистической регрессии также можно применить L1-регуляризацию (Lasso), вместо регуляризации L2, которая будет приводить к отбору признаков. Вам предлагается применить L1-регуляцию к исходным признакам и проинтерпретировать полученные результаты (применение отбора признаков к полиномиальным так же можно успешно применять, но в нём уже будет отсутствовать компонента интерпретации, т.к. смысловое значение оригинальных признаков известно, а полиномиальных - уже может быть достаточно нетривиально). Для вызова логистической регрессии с L1-регуляризацией достаточно передать параметр penalty='l1' в инициализацию класса. Задание 6. Отбор признаков при помощи регрессии Lasso.1. Обучите регрессию Lasso на стратифицированных отмасштабированных выборках, используя балансировку классов при помощи весов.2. Получите ROC AUC регрессии, сравните его с предыдущими результатами.3. Найдите номера вещественных признаков, которые имеют нулевые веса в итоговой модели.4. Передайте их список функции write_answer_6. ###Code def write_answer_6(features): with open("out/15_preprocessing_lr_answer6.txt", "w") as fout: fout.write(" ".join([str(num) for num in features])) X_train_real_stratified, X_test_real_stratified, y_train_stratified, y_test_stratified = mdsel.train_test_split( X_real_zeros, y, test_size=0.3, random_state=0, stratify=y ) X_train_cat_oh_stratified, X_test_cat_oh_stratified = mdsel.train_test_split( X_cat_oh, test_size=0.3, random_state=0, stratify=y ) scaler = StandardScaler() X_train_real_stratified = scaler.fit_transform(X_train_real_stratified) X_test_real_stratified = scaler.transform(X_test_real_stratified) X_train_stratified = sparse_hstack([X_train_real_stratified, X_train_cat_oh_stratified]) X_test_stratified = sparse_hstack([X_test_real_stratified, X_test_cat_oh_stratified]) X_train_stratified = X_train_stratified.tocsr() estimator = LogisticRegression(penalty='l1', class_weight='balanced') optimizer = GridSearchCV(estimator, param_grid=param_grid, cv=cv) optimizer.fit(X_train_stratified, y_train_stratified) predicted_stratified = optimizer.predict_proba(X_test_stratified)[:, 1] fpr, tpr, thr = roc_curve(y_test_stratified, predicted_stratified) roc_auc_stratified = roc_auc_score(y_test_stratified, predicted_stratified) plt.figure(figsize=(12, 12)) plt.plot(fpr, tpr, label='stratified: {:.7f}'.format(roc_auc_stratified)) plt.xlabel("FPR") plt.ylabel("TPR") plt.legend(loc=4) plt.axis([-0.1, 1.1, -0.1, 1.1]) coefs = optimizer.best_estimator_.coef_[0] coefs.shape zero_indices = np.where(np.abs(coefs) < 1e-3)[0] cols = X_train_real_zeros.columns num_zero_indices = zero_indices[zero_indices < len(cols)] write_answer_6(num_zero_indices) list(cols[num_zero_indices]) ###Output _____no_output_____
zynq-old/PYNQ_Workshop/Session_1/2_getting_started_with_IPython.ipynb
###Markdown Getting started with IPython----* [Goal](Goal)* [Executing OS shell commands](Executing-OS-shell-commands)* [Getting help with IPython](Getting-help-with-IPython)* [If you remember nothing else ... ?](If-you-remember-nothing-else-...-?)* [%quickref](%quickref)* [The IPython magic commands](The-IPython-magic-commands)* [Software introspection with IPython](Software-introspection-with-IPython)---- GoalThe aim of this notebook is to become familiar with some of the most important capabilities of the IPython (Interactive Python) REPL. Note that term `REPL` is synonymous with `shell`:>A **read–eval–print loop** (REPL), also termed an **interactive toplevel** or **language shell**, is a simple interactive computer programming environment that takes single user inputs, executes them, and returns the result to the user; a program written in a REPL environment is executed piecewise.[wikipedia](https://en.wikipedia.org/wiki/Read–eval–print_loop) Executing OS shell commands Probably the most common use of the IPython REPL is to execute OS shell commands directly from within a `code` cell in any notebook. Within any `code` cell, a command that starts with the '!' (exclamation mark also known as the "bang" character) is redirected to the operating system shell. Since PYNQ runs Linux, we default to the `bash` shell.Here are some simple examples: ###Code !pwd !who !find -name "*PYNQ*" ###Output _____no_output_____ ###Markdown Getting help with IPython IPython has many useful features. However it is not necessary to know or remember all its features. There is a very comprehensive help system available at all times. If you remember nothing else ... `?` The following commands will help you find the answers to most questions that you will encounter while learning IPython. They are also useful long after you are well acquainted with it. Executing `?` in a code cell will pop up a window describing IPython as shown in the excerpt below:```IPython -- An enhanced Interactive Python=========================================IPython offers a fully compatible replacement for the standard Pythoninterpreter, with convenient shell features, special commands, commandhistory mechanism and output results caching.At your system command line, type 'ipython -h' to see the command lineoptions available. This document only describes interactive features.MAIN FEATURES-------------* Access to the standard Python help with object docstrings and the Python manuals. Simply type 'help' (no quotes) to invoke it.* Magic commands: type %magic for information on the magic subsystem.* System command aliases, via the %alias command or the configuration file(s)....``` Try it for yourself here: ###Code ? ###Output _____no_output_____ ###Markdown `%quickref` The `%quickref` command is one example of an IPython _magic function_. IPython _magics_ are special functions that are a key part of the IPython package. Much of the power of the IPython interpreter is accessed by invoking the appropriate magics. We will see many such examples as we progress Executing `%quickref` in a code cell, will bring up a window summarizing all the available IPython commands. Here is a quick excerpt:```IPython -- An enhanced Interactive Python - Quick Reference Card================================================================obj?, obj?? : Get help, or more help for object (also works as ?obj, ??obj).?foo.*abc* : List names in 'foo' containing 'abc' in them.%magic : Information about IPython's 'magic' % functions.Magic functions are prefixed by % or %%, and typically take their argumentswithout parentheses, quotes or even commas for convenience. Line magics take asingle % and cell magics are prefixed with two %%.Example magic function calls:%alias d ls -F : 'd' is now an alias for 'ls -F'alias d ls -F : Works if 'alias' not a python namealist = %alias : Get list of aliases to 'alist'cd /usr/share : Obvious. cd - to choose from visited dirs.%cd?? : See help AND source for magic %cd%timeit x=10 : time the 'x=10' statement with high precision....``` Try it for yourself here: ###Code %quickref ###Output _____no_output_____ ###Markdown The IPython _magic_ commands To learn more about the IPython magics, simple type `%magic` ... Here is an excerpt from the summary of all the available IPython magic functions:```IPython's 'magic' functions===========================The magic function system provides a series of functions which allow you tocontrol the behavior of IPython itself, plus a lot of system-typefeatures. There are two kinds of magics, line-oriented and cell-oriented.Line magics are prefixed with the % character and work much like OScommand-line calls: they get as an argument the rest of the line, wherearguments are passed without parentheses or quotes. For example, this willtime the given statement:: %timeit range(1000)``` Try it for yourself here: ###Code %magic ###Output _____no_output_____ ###Markdown To get just a list of the available magics, execute `%lsmagics` ###Code %lsmagic ###Output _____no_output_____ ###Markdown To get more detailed information on any individual magic simply add a '?' to either the start or the end the magic and execute it in a code cell. For example, type either `?%pdoc` or `%pdoc?` to see the following:```Docstring:Print the docstring for an object.If the given object is a class, it will print both the class and theconstructor docstrings.File: /usr/lib/python3/dist-packages/IPython/core/magics/namespace.py``` Try it for yourself here: ###Code %pdoc? ###Output _____no_output_____ ###Markdown Software introspection with IPython IPython gives us more powerful help tools for any active Python object. We can activate _software introspection_ on any object to learn more about its properties and capabilities.We demonstrate this below with the help of a short Python script that checks palindromes, strings that read the same when scanned from left-to-right as when scanned from right-to-left. ###Code """A case-sensitive and white-space sensitive palindrome checker""" from math import floor pattern = input('Type a pattern that you want to verify is a palindrome: ') if pattern == (pattern[::-1]): print ('\nYes ... "{}" is a palindrome'.format(pattern)) else: print ('\nNo ... "{}" is not a valid palindrome'.format(pattern)) length = len(pattern) mid_pos = floor(length/2) for i, char in enumerate(pattern): i_dual = length - 1 - i if i < mid_pos: if pattern[i] != pattern[i_dual]: print('The character \'{}\' in position {} does not match the character \'{}\' in position {}'. format(pattern[i], i+1, pattern[i_dual], length-i)) else: break # All inconsistent chars on LHS and RHS identified ###Output _____no_output_____ ###Markdown IPython's _software introspection_ capabilities allow us to find out more library objects and also local objects by executing the object name followed by a `?` in a code cell. For example, run the palindrome script with the test string 'able was i ere i saw elba' (attributed to Napoleon who was exiled to the Island of Elba after his defeat at Waterloo) and inspect these objects as follows:`pattern?``mid_pos?``floor?` Try it for yourself here: ###Code pattern? mid_pos? floor? ###Output _____no_output_____
Chapter3_Exercise2.ipynb
###Markdown Chapter 3 - Exercise 2: Đọc và chuyển dữ liệu, sau đó tính BMI theo điều kiện, truy xuất dữ liệu Dữ liệu được trích xuất từ http://wiki.stat.ucla.edu/socr/index.php/SOCR_Data_MLB_HeightsWeights> *Ghi chú:*>Major League Baseball (MLB) là giải đấu bóng chày chuyên nghiệp. Major League Baseball có tổng cộng 30 đội bóng đến từ nhiều bang khác nhau của Mỹ và Canada (29 đội từ Mỹ và 1 đội từ Canada). MLB luôn được sự quan tâm lớn của hầu hết fan bóng chày trên toàn thế giới, và cũng được xem là giải đấu nổi tiếng và uy tín nhất, tập hợp những cầu thủ có trình độ cao nhất trong bộ môn này. Dữ liệu **heights** (tính theo inches) và **weights** (tính theo pounds) là chiều cao và cân nặng của các cầu thủ có tham gia 1 số giải của MLB. Cho tập tin dữ liệu heights_1.txt, weights_1.txt Các kiến thức sử dụng trong bài tập:1. Sử dụng các phép toán số học trên mảng2. Truy xuất phần tử của mảng thông qua chỉ số Thực hiện các yêu cầu sau, và đối chiếu với kết quả cho trước: ###Code import numpy as np # Chép dữ liệu từ tập tin heights_1.txt vào list height height = [74, 74, 72, 72, 73, 69, 69, 71, 76, 71, 73, 73, 74, 74, 69, 70, 73, 75, 78, 79, 76, 74, 76, 72, 71, 75, 77, 74, 73, 74, 78, 73, 75, 73, 75, 75, 74, 69, 71, 74, 73, 73, 76, 74, 74, 70, 72, 77, 74, 70, 73, 75, 76, 76, 78, 74, 74, 76, 77, 81, 78, 75, 77, 75, 76, 74, 72, 72, 75, 73, 73, 73, 70, 70, 70, 76, 68, 71, 72, 75, 75, 75, 75, 68, 74, 78, 71, 73, 76, 74, 74, 79, 75, 73, 76, 74, 74, 73, 72, 74, 73, 74, 72, 73, 69, 72, 73, 75, 75, 73, 72, 72, 76, 74, 72, 77, 74, 77, 75, 76, 80, 74, 74, 75, 78, 73, 73, 74, 75, 76, 71, 73, 74, 76, 76, 74, 73, 74, 70, 72, 73, 73, 73, 73, 71, 74, 74, 72, 74, 71, 74, 73, 75, 75, 79, 73, 75, 76, 74, 76, 78, 74, 76, 72, 74, 76, 74, 75, 78, 75, 72, 74, 72, 74, 70, 71, 70, 75, 71, 71, 73, 72, 71, 73, 72, 75, 74, 74, 75, 73, 77, 73, 76, 75, 74, 76, 75, 73, 71, 76, 75, 72, 71, 77, 73, 74, 71, 72, 74, 75, 73, 72, 75, 75, 74, 72, 74, 71, 70, 74, 77, 77, 75, 75, 78, 75, 76, 73, 75, 75, 79, 77, 76, 71, 75, 74, 69, 71, 76, 72, 72, 70, 72, 73, 71, 72, 71, 73, 72, 73, 74, 74, 72, 75, 74, 74, 77, 75, 73, 72, 71, 74, 77, 75, 75, 75, 78, 78, 74, 76, 78, 76, 70, 72, 80, 74, 74, 71, 70, 72, 71, 74, 71, 72, 71, 74, 69, 76, 75, 75, 76, 73, 76, 73, 77, 73, 72, 72, 77, 77, 71, 74, 74, 73, 78, 75, 73, 70, 74, 72, 73, 73, 75, 75, 74, 76, 73, 74, 75, 75, 72, 73, 73, 72, 74, 78, 76, 73, 74, 75, 70, 75, 71, 72, 78, 75, 73, 73, 71, 75, 77, 72, 69, 73, 74, 72, 70, 75, 70, 72, 72, 74, 73, 74, 76, 75, 80, 72, 75, 73, 74, 74, 73, 75, 75, 71, 73, 75, 74, 74, 72, 74, 74, 74, 73, 76, 75, 72, 73, 73, 73, 72, 72, 72, 72, 71, 75, 75, 74, 73, 75, 79, 74, 76, 73, 74, 74, 72, 74, 74, 75, 78, 74, 74, 74, 77, 70, 73, 74, 73, 71, 75, 71, 72, 77, 74, 70, 77, 73, 72, 76, 71, 76, 78, 75, 73, 78, 74, 79, 75, 76, 72, 75, 75, 70, 72, 70, 74, 71, 76, 73, 76, 71, 69, 72, 72, 69, 73, 69, 73, 74, 74, 72, 71, 72, 72, 76, 76, 76, 74, 76, 75, 71, 72, 71, 73, 75, 76, 75, 71, 75, 74, 72, 73, 73, 73, 73, 76, 72, 76, 73, 73, 73, 75, 75, 77, 73, 72, 75, 70, 74, 72, 80, 71, 71, 74, 74, 73, 75, 76, 73, 77, 72, 73, 77, 76, 71, 75, 73, 74, 77, 71, 72, 73, 69, 73, 70, 74, 76, 73, 73, 75, 73, 79, 74, 73, 74, 77, 75, 74, 73, 77, 73, 77, 74, 74, 73, 77, 74, 77, 75, 77, 75, 71, 74, 70, 79, 72, 72, 70, 74, 74, 72, 73, 72, 74, 74, 76, 82, 74, 74, 70, 73, 73, 74, 77, 72, 76, 73, 73, 72, 74, 74, 71, 72, 75, 74, 74, 77, 70, 71, 73, 76, 71, 75, 74, 72, 76, 79, 76, 73, 76, 78, 75, 76, 72, 72, 73, 73, 75, 71, 76, 70, 75, 74, 75, 73, 71, 71, 72, 73, 73, 72, 69, 73, 78, 71, 73, 75, 76, 70, 74, 77, 75, 79, 72, 77, 73, 75, 75, 75, 73, 73, 76, 77, 75, 70, 71, 71, 75, 74, 69, 70, 75, 72, 75, 73, 72, 72, 72, 76, 75, 74, 69, 73, 72, 72, 75, 77, 76, 80, 77, 76, 79, 71, 75, 73, 76, 77, 73, 76, 70, 75, 73, 75, 70, 69, 71, 72, 72, 73, 70, 70, 73, 76, 75, 72, 73, 79, 71, 72, 74, 74, 74, 72, 76, 76, 72, 72, 71, 72, 72, 70, 77, 74, 72, 76, 71, 76, 71, 73, 70, 73, 73, 72, 71, 71, 71, 72, 72, 74, 74, 74, 71, 72, 75, 72, 71, 72, 72, 72, 72, 74, 74, 77, 75, 73, 75, 73, 76, 72, 77, 75, 72, 71, 71, 75, 72, 73, 73, 71, 70, 75, 71, 76, 73, 68, 71, 72, 74, 77, 72, 76, 78, 81, 72, 73, 76, 72, 72, 74, 76, 73, 76, 75, 70, 71, 74, 72, 73, 76, 76, 73, 71, 68, 71, 71, 74, 77, 69, 72, 76, 75, 76, 75, 76, 72, 74, 76, 74, 72, 75, 78, 77, 70, 72, 79, 74, 71, 68, 77, 75, 71, 72, 70, 72, 72, 73, 72, 74, 72, 72, 75, 72, 73, 74, 72, 78, 75, 72, 74, 75, 75, 76, 74, 74, 73, 74, 71, 74, 75, 76, 74, 76, 76, 73, 75, 75, 74, 68, 72, 75, 71, 70, 72, 73, 72, 75, 74, 70, 76, 71, 82, 72, 73, 74, 71, 75, 77, 72, 74, 72, 73, 78, 77, 73, 73, 73, 73, 73, 76, 75, 70, 73, 72, 73, 75, 74, 73, 73, 76, 73, 75, 70, 77, 72, 77, 74, 75, 75, 75, 75, 72, 74, 71, 76, 71, 75, 76, 83, 75, 74, 76, 72, 72, 75, 75, 72, 77, 73, 72, 70, 74, 72, 74, 72, 71, 70, 71, 76, 74, 76, 74, 74, 74, 75, 75, 71, 71, 74, 77, 71, 74, 75, 77, 76, 74, 76, 72, 71, 72, 75, 73, 68, 72, 69, 73, 73, 75, 70, 70, 74, 75, 74, 74, 73, 74, 75, 77, 73, 74, 76, 74, 75, 73, 76, 78, 75, 73, 77, 74, 72, 74, 72, 71, 73, 75, 73, 67, 67, 76, 74, 73, 70, 75, 70, 72, 77, 79, 78, 74, 75, 75, 78, 76, 75, 69, 75, 72, 75, 73, 74, 75, 75, 73] # Chép dữ liệu từ tập tin weights_1.txt vào list weight weight = [180, 215, 210, 210, 188, 176, 209, 200, 231, 180, 188, 180, 185, 160, 180, 185, 189, 185, 219, 230, 205, 230, 195, 180, 192, 225, 203, 195, 182, 188, 200, 180, 200, 200, 245, 240, 215, 185, 175, 199, 200, 215, 200, 205, 206, 186, 188, 220, 210, 195, 200, 200, 212, 224, 210, 205, 220, 195, 200, 260, 228, 270, 200, 210, 190, 220, 180, 205, 210, 220, 211, 200, 180, 190, 170, 230, 155, 185, 185, 200, 225, 225, 220, 160, 205, 235, 250, 210, 190, 160, 200, 205, 222, 195, 205, 220, 220, 170, 185, 195, 220, 230, 180, 220, 180, 180, 170, 210, 215, 200, 213, 180, 192, 235, 185, 235, 210, 222, 210, 230, 220, 180, 190, 200, 210, 194, 180, 190, 240, 200, 198, 200, 195, 210, 220, 190, 210, 225, 180, 185, 170, 185, 185, 180, 178, 175, 200, 204, 211, 190, 210, 190, 190, 185, 290, 175, 185, 200, 220, 170, 220, 190, 220, 205, 200, 250, 225, 215, 210, 215, 195, 200, 194, 220, 180, 180, 170, 195, 180, 170, 206, 205, 200, 225, 201, 225, 233, 180, 225, 180, 220, 180, 237, 215, 190, 235, 190, 180, 165, 195, 200, 190, 190, 185, 185, 205, 190, 205, 206, 220, 208, 170, 195, 210, 190, 211, 230, 170, 185, 185, 241, 225, 210, 175, 230, 200, 215, 198, 226, 278, 215, 230, 240, 184, 219, 170, 218, 190, 225, 220, 176, 190, 197, 204, 167, 180, 195, 220, 215, 185, 190, 205, 205, 200, 210, 215, 200, 205, 211, 190, 208, 200, 210, 232, 230, 210, 220, 210, 202, 212, 225, 170, 190, 200, 237, 220, 170, 193, 190, 150, 220, 200, 190, 185, 185, 200, 172, 220, 225, 190, 195, 219, 190, 197, 200, 195, 210, 177, 220, 235, 180, 195, 195, 190, 230, 190, 200, 190, 190, 200, 200, 184, 200, 180, 219, 187, 200, 220, 205, 190, 170, 160, 215, 175, 205, 200, 214, 200, 190, 180, 205, 220, 190, 215, 235, 191, 200, 181, 200, 210, 240, 185, 165, 190, 185, 175, 155, 210, 170, 175, 220, 210, 205, 200, 205, 195, 240, 150, 200, 215, 202, 200, 190, 205, 190, 160, 215, 185, 200, 190, 210, 185, 220, 190, 202, 205, 220, 175, 160, 190, 200, 229, 206, 220, 180, 195, 175, 188, 230, 190, 200, 190, 219, 235, 180, 180, 180, 200, 234, 185, 220, 223, 200, 210, 200, 210, 190, 177, 227, 180, 195, 199, 175, 185, 240, 210, 180, 194, 225, 180, 205, 193, 230, 230, 220, 200, 249, 190, 208, 245, 250, 160, 192, 220, 170, 197, 155, 190, 200, 220, 210, 228, 190, 160, 184, 180, 180, 200, 176, 160, 222, 211, 195, 200, 175, 206, 240, 185, 260, 185, 221, 205, 200, 170, 201, 205, 185, 205, 245, 220, 210, 220, 185, 175, 170, 180, 200, 210, 175, 220, 206, 180, 210, 195, 200, 200, 164, 180, 220, 195, 205, 170, 240, 210, 195, 200, 205, 192, 190, 170, 240, 200, 205, 175, 250, 220, 224, 210, 195, 180, 245, 175, 180, 215, 175, 180, 195, 230, 230, 205, 215, 195, 180, 205, 180, 190, 180, 190, 190, 220, 210, 255, 190, 230, 200, 205, 210, 225, 215, 220, 205, 200, 220, 197, 225, 187, 245, 185, 185, 175, 200, 180, 188, 225, 200, 210, 245, 213, 231, 165, 228, 210, 250, 191, 190, 200, 215, 254, 232, 180, 215, 220, 180, 200, 170, 195, 210, 200, 220, 165, 180, 200, 200, 170, 224, 220, 180, 198, 240, 239, 185, 210, 220, 200, 195, 220, 230, 170, 220, 230, 165, 205, 192, 210, 205, 200, 210, 185, 195, 202, 205, 195, 180, 200, 185, 240, 185, 220, 205, 205, 180, 201, 190, 208, 240, 180, 230, 195, 215, 190, 195, 215, 215, 220, 220, 230, 195, 190, 195, 209, 204, 170, 185, 205, 175, 210, 190, 180, 180, 160, 235, 200, 210, 180, 190, 197, 203, 205, 170, 200, 250, 200, 220, 200, 190, 170, 190, 220, 215, 206, 215, 185, 235, 188, 230, 195, 168, 190, 160, 200, 200, 189, 180, 190, 200, 220, 187, 240, 190, 180, 185, 210, 220, 219, 190, 193, 175, 180, 215, 210, 200, 190, 185, 220, 170, 195, 205, 195, 210, 190, 190, 180, 220, 190, 186, 185, 190, 180, 190, 170, 210, 240, 220, 180, 210, 210, 195, 160, 180, 205, 200, 185, 245, 190, 210, 200, 200, 222, 215, 240, 170, 220, 156, 190, 202, 221, 200, 190, 210, 190, 200, 165, 190, 185, 230, 208, 209, 175, 180, 200, 205, 200, 250, 210, 230, 244, 202, 240, 200, 215, 177, 210, 170, 215, 217, 198, 200, 220, 170, 200, 230, 231, 183, 192, 167, 190, 180, 180, 215, 160, 205, 223, 175, 170, 190, 240, 175, 230, 223, 196, 167, 195, 190, 250, 190, 190, 190, 170, 160, 150, 225, 220, 209, 210, 176, 260, 195, 190, 184, 180, 195, 195, 219, 225, 212, 202, 185, 200, 209, 200, 195, 228, 210, 190, 212, 190, 218, 220, 190, 235, 210, 200, 188, 210, 235, 188, 215, 216, 220, 180, 185, 200, 210, 220, 185, 231, 210, 195, 200, 205, 200, 190, 250, 185, 180, 170, 180, 208, 235, 215, 244, 220, 185, 230, 190, 200, 180, 190, 196, 180, 230, 224, 160, 178, 205, 185, 210, 180, 190, 200, 257, 190, 220, 165, 205, 200, 208, 185, 215, 170, 235, 210, 170, 180, 170, 190, 150, 230, 203, 260, 246, 186, 210, 198, 210, 215, 180, 200, 245, 200, 192, 192, 200, 192, 205, 190, 186, 170, 197, 219, 200, 220, 207, 225, 207, 212, 225, 170, 190, 210, 230, 210, 200, 238, 234, 222, 200, 190, 170, 220, 223, 210, 215, 196, 175, 175, 189, 205, 210, 180, 180, 197, 220, 228, 190, 204, 165, 216, 220, 208, 210, 215, 195, 200, 215, 229, 240, 207, 205, 208, 185, 190, 170, 208, 225, 190, 225, 185, 180, 165, 240, 220, 212, 163, 215, 175, 205, 210, 205, 208, 215, 180, 200, 230, 211, 230, 190, 220, 180, 205, 190, 180, 205, 190, 195] print(len(height)) print(len(weight)) # Câu 1: Tạo numpy array arr_height từ list height arr_height = np.array([74, 74, 72, 72, 73, 69, 69, 71, 76, 71, 73, 73, 74, 74, 69, 70, 73, 75, 78, 79, 76, 74, 76, 72, 71, 75, 77, 74, 73, 74, 78, 73, 75, 73, 75, 75, 74, 69, 71, 74, 73, 73, 76, 74, 74, 70, 72, 77, 74, 70, 73, 75, 76, 76, 78, 74, 74, 76, 77, 81, 78, 75, 77, 75, 76, 74, 72, 72, 75, 73, 73, 73, 70, 70, 70, 76, 68, 71, 72, 75, 75, 75, 75, 68, 74, 78, 71, 73, 76, 74, 74, 79, 75, 73, 76, 74, 74, 73, 72, 74, 73, 74, 72, 73, 69, 72, 73, 75, 75, 73, 72, 72, 76, 74, 72, 77, 74, 77, 75, 76, 80, 74, 74, 75, 78, 73, 73, 74, 75, 76, 71, 73, 74, 76, 76, 74, 73, 74, 70, 72, 73, 73, 73, 73, 71, 74, 74, 72, 74, 71, 74, 73, 75, 75, 79, 73, 75, 76, 74, 76, 78, 74, 76, 72, 74, 76, 74, 75, 78, 75, 72, 74, 72, 74, 70, 71, 70, 75, 71, 71, 73, 72, 71, 73, 72, 75, 74, 74, 75, 73, 77, 73, 76, 75, 74, 76, 75, 73, 71, 76, 75, 72, 71, 77, 73, 74, 71, 72, 74, 75, 73, 72, 75, 75, 74, 72, 74, 71, 70, 74, 77, 77, 75, 75, 78, 75, 76, 73, 75, 75, 79, 77, 76, 71, 75, 74, 69, 71, 76, 72, 72, 70, 72, 73, 71, 72, 71, 73, 72, 73, 74, 74, 72, 75, 74, 74, 77, 75, 73, 72, 71, 74, 77, 75, 75, 75, 78, 78, 74, 76, 78, 76, 70, 72, 80, 74, 74, 71, 70, 72, 71, 74, 71, 72, 71, 74, 69, 76, 75, 75, 76, 73, 76, 73, 77, 73, 72, 72, 77, 77, 71, 74, 74, 73, 78, 75, 73, 70, 74, 72, 73, 73, 75, 75, 74, 76, 73, 74, 75, 75, 72, 73, 73, 72, 74, 78, 76, 73, 74, 75, 70, 75, 71, 72, 78, 75, 73, 73, 71, 75, 77, 72, 69, 73, 74, 72, 70, 75, 70, 72, 72, 74, 73, 74, 76, 75, 80, 72, 75, 73, 74, 74, 73, 75, 75, 71, 73, 75, 74, 74, 72, 74, 74, 74, 73, 76, 75, 72, 73, 73, 73, 72, 72, 72, 72, 71, 75, 75, 74, 73, 75, 79, 74, 76, 73, 74, 74, 72, 74, 74, 75, 78, 74, 74, 74, 77, 70, 73, 74, 73, 71, 75, 71, 72, 77, 74, 70, 77, 73, 72, 76, 71, 76, 78, 75, 73, 78, 74, 79, 75, 76, 72, 75, 75, 70, 72, 70, 74, 71, 76, 73, 76, 71, 69, 72, 72, 69, 73, 69, 73, 74, 74, 72, 71, 72, 72, 76, 76, 76, 74, 76, 75, 71, 72, 71, 73, 75, 76, 75, 71, 75, 74, 72, 73, 73, 73, 73, 76, 72, 76, 73, 73, 73, 75, 75, 77, 73, 72, 75, 70, 74, 72, 80, 71, 71, 74, 74, 73, 75, 76, 73, 77, 72, 73, 77, 76, 71, 75, 73, 74, 77, 71, 72, 73, 69, 73, 70, 74, 76, 73, 73, 75, 73, 79, 74, 73, 74, 77, 75, 74, 73, 77, 73, 77, 74, 74, 73, 77, 74, 77, 75, 77, 75, 71, 74, 70, 79, 72, 72, 70, 74, 74, 72, 73, 72, 74, 74, 76, 82, 74, 74, 70, 73, 73, 74, 77, 72, 76, 73, 73, 72, 74, 74, 71, 72, 75, 74, 74, 77, 70, 71, 73, 76, 71, 75, 74, 72, 76, 79, 76, 73, 76, 78, 75, 76, 72, 72, 73, 73, 75, 71, 76, 70, 75, 74, 75, 73, 71, 71, 72, 73, 73, 72, 69, 73, 78, 71, 73, 75, 76, 70, 74, 77, 75, 79, 72, 77, 73, 75, 75, 75, 73, 73, 76, 77, 75, 70, 71, 71, 75, 74, 69, 70, 75, 72, 75, 73, 72, 72, 72, 76, 75, 74, 69, 73, 72, 72, 75, 77, 76, 80, 77, 76, 79, 71, 75, 73, 76, 77, 73, 76, 70, 75, 73, 75, 70, 69, 71, 72, 72, 73, 70, 70, 73, 76, 75, 72, 73, 79, 71, 72, 74, 74, 74, 72, 76, 76, 72, 72, 71, 72, 72, 70, 77, 74, 72, 76, 71, 76, 71, 73, 70, 73, 73, 72, 71, 71, 71, 72, 72, 74, 74, 74, 71, 72, 75, 72, 71, 72, 72, 72, 72, 74, 74, 77, 75, 73, 75, 73, 76, 72, 77, 75, 72, 71, 71, 75, 72, 73, 73, 71, 70, 75, 71, 76, 73, 68, 71, 72, 74, 77, 72, 76, 78, 81, 72, 73, 76, 72, 72, 74, 76, 73, 76, 75, 70, 71, 74, 72, 73, 76, 76, 73, 71, 68, 71, 71, 74, 77, 69, 72, 76, 75, 76, 75, 76, 72, 74, 76, 74, 72, 75, 78, 77, 70, 72, 79, 74, 71, 68, 77, 75, 71, 72, 70, 72, 72, 73, 72, 74, 72, 72, 75, 72, 73, 74, 72, 78, 75, 72, 74, 75, 75, 76, 74, 74, 73, 74, 71, 74, 75, 76, 74, 76, 76, 73, 75, 75, 74, 68, 72, 75, 71, 70, 72, 73, 72, 75, 74, 70, 76, 71, 82, 72, 73, 74, 71, 75, 77, 72, 74, 72, 73, 78, 77, 73, 73, 73, 73, 73, 76, 75, 70, 73, 72, 73, 75, 74, 73, 73, 76, 73, 75, 70, 77, 72, 77, 74, 75, 75, 75, 75, 72, 74, 71, 76, 71, 75, 76, 83, 75, 74, 76, 72, 72, 75, 75, 72, 77, 73, 72, 70, 74, 72, 74, 72, 71, 70, 71, 76, 74, 76, 74, 74, 74, 75, 75, 71, 71, 74, 77, 71, 74, 75, 77, 76, 74, 76, 72, 71, 72, 75, 73, 68, 72, 69, 73, 73, 75, 70, 70, 74, 75, 74, 74, 73, 74, 75, 77, 73, 74, 76, 74, 75, 73, 76, 78, 75, 73, 77, 74, 72, 74, 72, 71, 73, 75, 73, 67, 67, 76, 74, 73, 70, 75, 70, 72, 77, 79, 78, 74, 75, 75, 78, 76, 75, 69, 75, 72, 75, 73, 74, 75, 75, 73]) # In danh sách các phần tử của arr_height print(arr_height) # Cho biết kích thước (shape) của arr_height print(arr_height.shape) ###Output [74 74 72 ... 75 75 73] (1015,) ###Markdown Nhấn vào đây để xem kết quả ! [74 74 72 ... 75 75 73](1015,) ###Code # Câu 2: Tạo numpy array arr_weight từ list weight arr_weight = np.array([180, 215, 210, 210, 188, 176, 209, 200, 231, 180, 188, 180, 185, 160, 180, 185, 189, 185, 219, 230, 205, 230, 195, 180, 192, 225, 203, 195, 182, 188, 200, 180, 200, 200, 245, 240, 215, 185, 175, 199, 200, 215, 200, 205, 206, 186, 188, 220, 210, 195, 200, 200, 212, 224, 210, 205, 220, 195, 200, 260, 228, 270, 200, 210, 190, 220, 180, 205, 210, 220, 211, 200, 180, 190, 170, 230, 155, 185, 185, 200, 225, 225, 220, 160, 205, 235, 250, 210, 190, 160, 200, 205, 222, 195, 205, 220, 220, 170, 185, 195, 220, 230, 180, 220, 180, 180, 170, 210, 215, 200, 213, 180, 192, 235, 185, 235, 210, 222, 210, 230, 220, 180, 190, 200, 210, 194, 180, 190, 240, 200, 198, 200, 195, 210, 220, 190, 210, 225, 180, 185, 170, 185, 185, 180, 178, 175, 200, 204, 211, 190, 210, 190, 190, 185, 290, 175, 185, 200, 220, 170, 220, 190, 220, 205, 200, 250, 225, 215, 210, 215, 195, 200, 194, 220, 180, 180, 170, 195, 180, 170, 206, 205, 200, 225, 201, 225, 233, 180, 225, 180, 220, 180, 237, 215, 190, 235, 190, 180, 165, 195, 200, 190, 190, 185, 185, 205, 190, 205, 206, 220, 208, 170, 195, 210, 190, 211, 230, 170, 185, 185, 241, 225, 210, 175, 230, 200, 215, 198, 226, 278, 215, 230, 240, 184, 219, 170, 218, 190, 225, 220, 176, 190, 197, 204, 167, 180, 195, 220, 215, 185, 190, 205, 205, 200, 210, 215, 200, 205, 211, 190, 208, 200, 210, 232, 230, 210, 220, 210, 202, 212, 225, 170, 190, 200, 237, 220, 170, 193, 190, 150, 220, 200, 190, 185, 185, 200, 172, 220, 225, 190, 195, 219, 190, 197, 200, 195, 210, 177, 220, 235, 180, 195, 195, 190, 230, 190, 200, 190, 190, 200, 200, 184, 200, 180, 219, 187, 200, 220, 205, 190, 170, 160, 215, 175, 205, 200, 214, 200, 190, 180, 205, 220, 190, 215, 235, 191, 200, 181, 200, 210, 240, 185, 165, 190, 185, 175, 155, 210, 170, 175, 220, 210, 205, 200, 205, 195, 240, 150, 200, 215, 202, 200, 190, 205, 190, 160, 215, 185, 200, 190, 210, 185, 220, 190, 202, 205, 220, 175, 160, 190, 200, 229, 206, 220, 180, 195, 175, 188, 230, 190, 200, 190, 219, 235, 180, 180, 180, 200, 234, 185, 220, 223, 200, 210, 200, 210, 190, 177, 227, 180, 195, 199, 175, 185, 240, 210, 180, 194, 225, 180, 205, 193, 230, 230, 220, 200, 249, 190, 208, 245, 250, 160, 192, 220, 170, 197, 155, 190, 200, 220, 210, 228, 190, 160, 184, 180, 180, 200, 176, 160, 222, 211, 195, 200, 175, 206, 240, 185, 260, 185, 221, 205, 200, 170, 201, 205, 185, 205, 245, 220, 210, 220, 185, 175, 170, 180, 200, 210, 175, 220, 206, 180, 210, 195, 200, 200, 164, 180, 220, 195, 205, 170, 240, 210, 195, 200, 205, 192, 190, 170, 240, 200, 205, 175, 250, 220, 224, 210, 195, 180, 245, 175, 180, 215, 175, 180, 195, 230, 230, 205, 215, 195, 180, 205, 180, 190, 180, 190, 190, 220, 210, 255, 190, 230, 200, 205, 210, 225, 215, 220, 205, 200, 220, 197, 225, 187, 245, 185, 185, 175, 200, 180, 188, 225, 200, 210, 245, 213, 231, 165, 228, 210, 250, 191, 190, 200, 215, 254, 232, 180, 215, 220, 180, 200, 170, 195, 210, 200, 220, 165, 180, 200, 200, 170, 224, 220, 180, 198, 240, 239, 185, 210, 220, 200, 195, 220, 230, 170, 220, 230, 165, 205, 192, 210, 205, 200, 210, 185, 195, 202, 205, 195, 180, 200, 185, 240, 185, 220, 205, 205, 180, 201, 190, 208, 240, 180, 230, 195, 215, 190, 195, 215, 215, 220, 220, 230, 195, 190, 195, 209, 204, 170, 185, 205, 175, 210, 190, 180, 180, 160, 235, 200, 210, 180, 190, 197, 203, 205, 170, 200, 250, 200, 220, 200, 190, 170, 190, 220, 215, 206, 215, 185, 235, 188, 230, 195, 168, 190, 160, 200, 200, 189, 180, 190, 200, 220, 187, 240, 190, 180, 185, 210, 220, 219, 190, 193, 175, 180, 215, 210, 200, 190, 185, 220, 170, 195, 205, 195, 210, 190, 190, 180, 220, 190, 186, 185, 190, 180, 190, 170, 210, 240, 220, 180, 210, 210, 195, 160, 180, 205, 200, 185, 245, 190, 210, 200, 200, 222, 215, 240, 170, 220, 156, 190, 202, 221, 200, 190, 210, 190, 200, 165, 190, 185, 230, 208, 209, 175, 180, 200, 205, 200, 250, 210, 230, 244, 202, 240, 200, 215, 177, 210, 170, 215, 217, 198, 200, 220, 170, 200, 230, 231, 183, 192, 167, 190, 180, 180, 215, 160, 205, 223, 175, 170, 190, 240, 175, 230, 223, 196, 167, 195, 190, 250, 190, 190, 190, 170, 160, 150, 225, 220, 209, 210, 176, 260, 195, 190, 184, 180, 195, 195, 219, 225, 212, 202, 185, 200, 209, 200, 195, 228, 210, 190, 212, 190, 218, 220, 190, 235, 210, 200, 188, 210, 235, 188, 215, 216, 220, 180, 185, 200, 210, 220, 185, 231, 210, 195, 200, 205, 200, 190, 250, 185, 180, 170, 180, 208, 235, 215, 244, 220, 185, 230, 190, 200, 180, 190, 196, 180, 230, 224, 160, 178, 205, 185, 210, 180, 190, 200, 257, 190, 220, 165, 205, 200, 208, 185, 215, 170, 235, 210, 170, 180, 170, 190, 150, 230, 203, 260, 246, 186, 210, 198, 210, 215, 180, 200, 245, 200, 192, 192, 200, 192, 205, 190, 186, 170, 197, 219, 200, 220, 207, 225, 207, 212, 225, 170, 190, 210, 230, 210, 200, 238, 234, 222, 200, 190, 170, 220, 223, 210, 215, 196, 175, 175, 189, 205, 210, 180, 180, 197, 220, 228, 190, 204, 165, 216, 220, 208, 210, 215, 195, 200, 215, 229, 240, 207, 205, 208, 185, 190, 170, 208, 225, 190, 225, 185, 180, 165, 240, 220, 212, 163, 215, 175, 205, 210, 205, 208, 215, 180, 200, 230, 211, 230, 190, 220, 180, 205, 190, 180, 205, 190, 195]) # In danh sách các phần tử của arr_weight print(arr_weight) # Cho biết kích thước (shape) của arr_weight print(arr_weight.shape) ###Output [180 215 210 ... 205 190 195] (1015,) ###Markdown Nhấn vào đây để xem kết quả ! [180 215 210 ... 205 190 195](1015,) ###Code # Câu 3: Cho hệ số quy đổi từ inch sang m là 0.0254 # Tạo array arr_height_m dựa trên công thức: arr_height * hệ số quy đổi arr_height_m = arr_height * 0.0254 # In danh sách các phần tử của arr_height_m print(arr_height_m) ###Output [1.8796 1.8796 1.8288 ... 1.905 1.905 1.8542] ###Markdown Nhấn vào đây để xem kết quả ! [1.8796 1.8796 1.8288 ... 1.905 1.905 1.8542] ###Code # Câu 4: Cho hệ số quy đổi từ pound sang kg là 0.453592 # Tạo array arr_weight_kg dựa trên công thức: arr_weight * hệ số quy đổi arr_weight_kg = arr_weight * 0.453592 # In danh sách các phần tử của arr_weight_kg print(arr_weight_kg) ###Output [81.64656 97.52228 95.25432 ... 92.98636 86.18248 88.45044] ###Markdown Nhấn vào đây để xem kết quả ! [81.64656 97.52228 95.25432 ... 92.98636 86.18248 88.45044] ###Code # Câu 5: Tính giá trị BMI (Body Mass Index) của arr_height_m và arr_weight_kg và lưu vào arr_bmi # Gợi ý: Tính theo công thức BMI = Cân nặng / (Chiều cao * Chiều cao) arr_bmi = arr_weight_kg / ( arr_height_m * arr_height_m ) # In ra danh sách các phần tử của arr_bmi print(arr_bmi) ###Output [23.11037639 27.60406069 28.48080465 ... 25.62295933 23.74810865 25.72686361] ###Markdown Nhấn vào đây để xem kết quả ! [23.11037639 27.60406069 28.48080465 ... 25.62295933 23.74810865 25.72686361] ###Code # Câu 6: Cho biết giá trị cân nặng ở vị trí index = 50 trong arr_weight_kg print(arr_weight_kg[50]) ###Output 90.7184 ###Markdown Nhấn vào đây để xem kết quả ! 90.7184 ###Code # Câu 7: Tạo array arr_height_m_100 bao gồm các phần tử có vị trí index từ 100 đến 110 (lấy cả index 110) trong arr_height_m arr_height_m_100 = arr_height_m[100:110] print(arr_height_m_100) print (arr_height_m_100.size) ###Output [1.8542 1.8796 1.8288 1.8542 1.7526 1.8288 1.8542 1.905 1.905 1.8542] 10 ###Markdown Nhấn vào đây để xem kết quả ! [1.8542 1.8796 1.8288 1.8542 1.7526 1.8288 1.8542 1.905 1.905 1.8542 1.8288] ###Code # Câu 8: Tạo và in kết quả của biểu thức điều kiện dùng để lấy ra các cầu thủ bóng chày có bmi < 21 print(arr_bmi < 21) # Áp dụng biểu thức điều kiện đã tạo để in ra các cầu thủ bóng chày có bmi < 21 trong arr_bmi print(arr_bmi[arr_bmi < 21]) ###Output [False False False ... False False False] [20.54255679 20.54255679 20.69282047 20.69282047 20.34343189 20.34343189 20.69282047 20.15883472 19.4984471 20.69282047 20.9205219 ] ###Markdown Nhấn vào đây để xem kết quả ! [False False False ... False False False] [20.54255679 20.54255679 20.69282047 20.69282047 20.34343189 20.34343189 20.69282047 20.15883472 19.4984471 20.69282047 20.9205219 ] ###Code # Câu 9: Cho biết chiều cao trung bình và cân nặng trung bình của các cầu thủ print(np.median(arr_height_m)) # Câu 10: Cho biết chiều cao và cân nặng lớn nhất của các cầu thủ print('Max height', arr_height_m.max()) print('Max weight', arr_weight_kg.max()) # Câu 11: Cho biết chiều cao và cân nặng nhỏ nhất của các cầu thủ print('Min height', arr_height_m.min()) print('Min weight', arr_weight_kg.min()) ###Output Min height 1.7018 Min weight 68.0388
autoencoder/Keras_autoencoder_example.ipynb
###Markdown [Keras autoencoder example](https://blog.keras.io/building-autoencoders-in-keras.html) 1. simplest auto encoder model ###Code from keras.layers import Input, Dense from keras.models import Model # this is the size of our encoded representations encoding_dim = 32 # 32 floats -> compression of factor 24.5, assuming the input is 784 floats # this is our input placeholder input_img = Input(shape=(784,)) # "encoded" is the encoded representation of the input encoded = Dense(encoding_dim, activation='relu')(input_img) # "decoded" is the lossy reconstruction of the input decoded = Dense(784, activation='sigmoid')(encoded) # this model maps an input to its reconstruction autoencoder = Model(input_img, decoded) encoder = Model(input_img, encoded) # create a placeholder for an encoded (32-dimensional) input encoded_input = Input(shape=(encoding_dim,)) # retrieve the last layer of the autoencoder model decoder_layer = autoencoder.layers[-1] # create the decoder model decoder = Model(encoded_input, decoder_layer(encoded_input)) from keras.datasets import mnist import numpy as np (x_train, _), (x_test, _) = mnist.load_data() x_train = x_train.astype('float32') / 255. x_test = x_test.astype('float32') / 255. x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:]))) x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:]))) print (x_train.shape) print (x_test.shape) autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy') autoencoder.fit(x_train, x_train, epochs=100, batch_size=256, shuffle=True, validation_data=(x_test, x_test)) # encode and decode some digits # note that we take them from the *test* set encoded_imgs = encoder.predict(x_test) decoded_imgs = decoder.predict(encoded_imgs) print(encoded_imgs.mean()) import matplotlib.pyplot as plt %matplotlib inline n = 10 # how many digits we will display plt.figure(figsize=(20, 4)) for i in range(n): # display original ax = plt.subplot(2, n, i + 1) plt.imshow(x_test[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # display reconstruction ax = plt.subplot(2, n, i + 1 + n) plt.imshow(decoded_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() ###Output _____no_output_____ ###Markdown 2. Sparsity Constraint on encoded representations ###Code from keras import regularizers # this is the size of our encoded representations encoding_dim = 32 input_img = Input(shape=(784,)) # add a Dense layer with a L1 activity regularizer #encoded = Dense(encoding_dim, activation='relu')(input_img) encoded = Dense(encoding_dim, activation='relu', activity_regularizer=regularizers.l1(10e-5))(input_img) decoded = Dense(784, activation='sigmoid')(encoded) autoencoder = Model(input_img, decoded) autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy') autoencoder.fit(x_train, x_train, epochs=10, batch_size=256, shuffle=True, validation_data=(x_test, x_test)) encoder = Model(input_img, encoded) # create a placeholder for an encoded (32-dimensional) input encoded_input = Input(shape=(encoding_dim,)) # retrieve the last layer of the autoencoder model decoder_layer = autoencoder.layers[-1] # create the decoder model decoder = Model(encoded_input, decoder_layer(encoded_input)) # encode and decode some digits # note that we take them from the *test* set encoded_imgs = encoder.predict(x_test) decoded_imgs = decoder.predict(encoded_imgs) ###Output _____no_output_____ ###Markdown avarage of encoded_imgs ###Code print(encoded_imgs.mean()) n = 10 # how many digits we will display plt.figure(figsize=(20, 4)) for i in range(n): # display original ax = plt.subplot(2, n, i + 1) plt.imshow(x_test[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # display reconstruction ax = plt.subplot(2, n, i + 1 + n) plt.imshow(decoded_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() ###Output _____no_output_____ ###Markdown Looks not good as expected~ 3. Deep autoencoder ###Code input_img = Input(shape=(784,)) encoded = Dense(128, activation='relu')(input_img) encoded = Dense(64, activation='relu')(encoded) encoded = Dense(32, activation='relu')(encoded) decoded = Dense(64, activation='relu')(encoded) decoded = Dense(128, activation='relu')(decoded) decoded = Dense(784, activation='sigmoid')(decoded) autoencoder = Model(input_img, decoded) autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy') autoencoder.fit(x_train, x_train, epochs=100, batch_size=256, shuffle=True, validation_data=(x_test, x_test)) encoder = Model(input_img, encoded) # create a placeholder for an encoded (32-dimensional) input encoded_input = Input(shape=(encoding_dim,)) # retrieve the last layer of the autoencoder model decoder_layer3 = autoencoder.layers[-3] decoder_layer2 = autoencoder.layers[-2] decoder_layer1 = autoencoder.layers[-1] # create the decoder model decoder = Model(encoded_input, decoder_layer1(decoder_layer2(decoder_layer3(encoded_input)))) # encode and decode some digits # note that we take them from the *test* set encoded_imgs = encoder.predict(x_test) decoded_imgs = decoder.predict(encoded_imgs) n = 10 # how many digits we will display plt.figure(figsize=(20, 4)) for i in range(n): # display original ax = plt.subplot(2, n, i + 1) plt.imshow(x_test[i].reshape(28, 28)) # note that we take them from the *test* set plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # display reconstruction ax = plt.subplot(2, n, i + 1 + n) plt.imshow(decoded_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() ###Output _____no_output_____ ###Markdown 4. Convolutional autoencoder ###Code from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D from keras.models import Model from keras import backend as K input_img = Input(shape=(28, 28, 1)) # adapt this if using `channels_first` image data format x = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img) x = MaxPooling2D((2, 2), padding='same')(x) x = Conv2D(8, (3, 3), activation='relu', padding='same')(x) x = MaxPooling2D((2, 2), padding='same')(x) x = Conv2D(8, (3, 3), activation='relu', padding='same')(x) encoded = MaxPooling2D((2, 2), padding='same')(x) # at this point the representation is (4, 4, 8) i.e. 128-dimensional x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded) x = UpSampling2D((2, 2))(x) x = Conv2D(8, (3, 3), activation='relu', padding='same')(x) x = UpSampling2D((2, 2))(x) x = Conv2D(16, (3, 3), activation='relu')(x) x = UpSampling2D((2, 2))(x) decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x) autoencoder = Model(input_img, decoded) autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy') from keras.datasets import mnist import numpy as np (x_train, _), (x_test, _) = mnist.load_data() x_train = x_train.astype('float32') / 255. x_test = x_test.astype('float32') / 255. x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) # adapt this if using `channels_first` image data format x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) from keras.callbacks import TensorBoard autoencoder.fit(x_train, x_train, epochs=50, batch_size=128, shuffle=True, validation_data=(x_test, x_test), callbacks=[TensorBoard(log_dir='/tmp/autoencoder')]) ###Output Train on 60000 samples, validate on 10000 samples Epoch 1/50 60000/60000 [==============================] - 98s 2ms/step - loss: 0.1378 - val_loss: 0.1340 Epoch 2/50 60000/60000 [==============================] - 99s 2ms/step - loss: 0.1324 - val_loss: 0.1313 Epoch 3/50 60000/60000 [==============================] - 81s 1ms/step - loss: 0.1283 - val_loss: 0.1237 Epoch 4/50 60000/60000 [==============================] - 79s 1ms/step - loss: 0.1251 - val_loss: 0.1219 Epoch 5/50 60000/60000 [==============================] - 84s 1ms/step - loss: 0.1226 - val_loss: 0.1204 Epoch 6/50 60000/60000 [==============================] - 88s 1ms/step - loss: 0.1204 - val_loss: 0.1161 Epoch 7/50 60000/60000 [==============================] - 85s 1ms/step - loss: 0.1184 - val_loss: 0.1169 Epoch 8/50 60000/60000 [==============================] - 85s 1ms/step - loss: 0.1172 - val_loss: 0.1135 Epoch 9/50 60000/60000 [==============================] - 87s 1ms/step - loss: 0.1156 - val_loss: 0.1152 Epoch 10/50 60000/60000 [==============================] - 82s 1ms/step - loss: 0.1145 - val_loss: 0.1134 Epoch 11/50 60000/60000 [==============================] - 82s 1ms/step - loss: 0.1136 - val_loss: 0.1130 Epoch 12/50 60000/60000 [==============================] - 87s 1ms/step - loss: 0.1126 - val_loss: 0.1118 Epoch 13/50 60000/60000 [==============================] - 86s 1ms/step - loss: 0.1121 - val_loss: 0.1106 Epoch 14/50 60000/60000 [==============================] - 98s 2ms/step - loss: 0.1112 - val_loss: 0.1091 Epoch 15/50 60000/60000 [==============================] - 101s 2ms/step - loss: 0.1104 - val_loss: 0.1079 Epoch 16/50 60000/60000 [==============================] - 84s 1ms/step - loss: 0.1097 - val_loss: 0.1080 Epoch 17/50 60000/60000 [==============================] - 85s 1ms/step - loss: 0.1093 - val_loss: 0.1080 Epoch 18/50 60000/60000 [==============================] - 87s 1ms/step - loss: 0.1090 - val_loss: 0.1085 Epoch 19/50 60000/60000 [==============================] - 82s 1ms/step - loss: 0.1087 - val_loss: 0.1080 Epoch 20/50 60000/60000 [==============================] - 87s 1ms/step - loss: 0.1081 - val_loss: 0.1075 Epoch 21/50 60000/60000 [==============================] - 86s 1ms/step - loss: 0.1074 - val_loss: 0.1045 Epoch 22/50 60000/60000 [==============================] - 93s 2ms/step - loss: 0.1068 - val_loss: 0.1057 Epoch 23/50 60000/60000 [==============================] - 102s 2ms/step - loss: 0.1066 - val_loss: 0.1055 Epoch 24/50 60000/60000 [==============================] - 86s 1ms/step - loss: 0.1062 - val_loss: 0.1051 Epoch 25/50 60000/60000 [==============================] - 81s 1ms/step - loss: 0.1060 - val_loss: 0.1039 Epoch 26/50 60000/60000 [==============================] - 84s 1ms/step - loss: 0.1056 - val_loss: 0.1036 Epoch 27/50 60000/60000 [==============================] - 92s 2ms/step - loss: 0.1053 - val_loss: 0.1044 Epoch 28/50 60000/60000 [==============================] - 96s 2ms/step - loss: 0.1050 - val_loss: 0.1019 Epoch 29/50 60000/60000 [==============================] - 88s 1ms/step - loss: 0.1043 - val_loss: 0.1038 Epoch 30/50 60000/60000 [==============================] - 97s 2ms/step - loss: 0.1042 - val_loss: 0.1023 Epoch 31/50 60000/60000 [==============================] - 98s 2ms/step - loss: 0.1041 - val_loss: 0.1046 Epoch 32/50 60000/60000 [==============================] - 86s 1ms/step - loss: 0.1036 - val_loss: 0.1013 Epoch 33/50 60000/60000 [==============================] - 88s 1ms/step - loss: 0.1028 - val_loss: 0.1016 Epoch 34/50 60000/60000 [==============================] - 86s 1ms/step - loss: 0.1028 - val_loss: 0.1013 Epoch 35/50 60000/60000 [==============================] - 90s 1ms/step - loss: 0.1029 - val_loss: 0.1028 Epoch 36/50 60000/60000 [==============================] - 85s 1ms/step - loss: 0.1027 - val_loss: 0.1016 Epoch 37/50 60000/60000 [==============================] - 89s 1ms/step - loss: 0.1024 - val_loss: 0.1006 Epoch 38/50 60000/60000 [==============================] - 85s 1ms/step - loss: 0.1022 - val_loss: 0.1007 Epoch 39/50 60000/60000 [==============================] - 83s 1ms/step - loss: 0.1019 - val_loss: 0.0994 Epoch 40/50 60000/60000 [==============================] - 98s 2ms/step - loss: 0.1018 - val_loss: 0.1021 Epoch 41/50 60000/60000 [==============================] - 86s 1ms/step - loss: 0.1014 - val_loss: 0.1021 Epoch 42/50 60000/60000 [==============================] - 80s 1ms/step - loss: 0.1013 - val_loss: 0.0992 Epoch 43/50 60000/60000 [==============================] - 95s 2ms/step - loss: 0.1012 - val_loss: 0.0994 Epoch 44/50 60000/60000 [==============================] - 99s 2ms/step - loss: 0.1008 - val_loss: 0.0979 Epoch 45/50 60000/60000 [==============================] - 92s 2ms/step - loss: 0.1003 - val_loss: 0.0999 Epoch 46/50 60000/60000 [==============================] - 83s 1ms/step - loss: 0.1002 - val_loss: 0.1001 Epoch 47/50 60000/60000 [==============================] - 93s 2ms/step - loss: 0.1000 - val_loss: 0.0982 Epoch 48/50 60000/60000 [==============================] - 94s 2ms/step - loss: 0.1000 - val_loss: 0.0987 Epoch 49/50 60000/60000 [==============================] - 86s 1ms/step - loss: 0.0997 - val_loss: 0.0982 Epoch 50/50 60000/60000 [==============================] - 86s 1ms/step - loss: 0.0995 - val_loss: 0.0986 ###Markdown also get the encoded images and decoded imagesdecoded_imgs are computed using autoencoder itself ###Code decoded_imgs = autoencoder.predict(x_test) encoder = Model(input_img, encoded) encoded_imgs = encoder.predict(x_test) encoded_imgs.shape n = 10 plt.figure(figsize=(20, 4)) for i in range(n): # display original ax = plt.subplot(2, n, i + 1) plt.imshow(x_test[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # display reconstruction ax = plt.subplot(2, n, i + 1 + n) plt.imshow(decoded_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() ###Output _____no_output_____ ###Markdown let's check the encoded image ###Code n = 10 plt.figure(figsize=(20, 8)) for i in range(n): ax = plt.subplot(1, n, i + 1) plt.imshow(encoded_imgs[i].reshape(4, 4* 8).T) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() ###Output _____no_output_____ ###Markdown 5. image denoise ###Code from keras.datasets import mnist import numpy as np (x_train, _), (x_test, _) = mnist.load_data() x_train = x_train.astype('float32') / 255. x_test = x_test.astype('float32') / 255. x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) # adapt this if using `channels_first` image data format x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) # adapt this if using `channels_first` image data format noise_factor = 0.5 x_train_noisy = x_train + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_train.shape) x_test_noisy = x_test + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_test.shape) x_train_noisy = np.clip(x_train_noisy, 0., 1.) x_test_noisy = np.clip(x_test_noisy, 0., 1.) n = 10 plt.figure(figsize=(20, 2)) for i in range(n): ax = plt.subplot(1, n, i + 1) plt.imshow(x_test_noisy[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() input_img = Input(shape=(28, 28, 1)) # adapt this if using `channels_first` image data format x = Conv2D(32, (3, 3), activation='relu', padding='same')(input_img) x = MaxPooling2D((2, 2), padding='same')(x) x = Conv2D(32, (3, 3), activation='relu', padding='same')(x) encoded = MaxPooling2D((2, 2), padding='same')(x) # at this point the representation is (7, 7, 32) x = Conv2D(32, (3, 3), activation='relu', padding='same')(encoded) x = UpSampling2D((2, 2))(x) x = Conv2D(32, (3, 3), activation='relu', padding='same')(x) x = UpSampling2D((2, 2))(x) decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x) autoencoder = Model(input_img, decoded) autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy') autoencoder.fit(x_train_noisy, x_train, epochs=10, batch_size=128, shuffle=True, validation_data=(x_test_noisy, x_test), callbacks=[TensorBoard(log_dir='/tmp/tb', histogram_freq=0, write_graph=False)]) decoded_imgs = autoencoder.predict(x_test_noisy) encoder = Model(input_img, encoded) encoded_imgs = encoder.predict(x_test) n = 10 plt.figure(figsize=(20, 4)) for i in range(n): # display original ax = plt.subplot(2, n, i + 1) plt.imshow(x_test_noisy[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # display reconstruction ax = plt.subplot(2, n, i + 1 + n) plt.imshow(decoded_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() ###Output _____no_output_____
gnn-tracking/notebooks/GNN_Evaluation/CTD2019_KaggleData.ipynb
###Markdown Tensor Flow with High density, trained with weighted Loss function ###Code import networkx as nx import numpy as np import matplotlib.pyplot as plt from graph_nets import utils_np from trackml.dataset import load_event from heptrkx.nx_graph.utils_plot import plot_networkx from heptrkx.nx_graph.utils_data import correct_networkx from heptrkx.nx_graph.utils_data import merge_truth_info_to_hits from heptrkx.postprocess.evaluate_tf import create_evaluator from heptrkx import load_yaml import os import glob config_file = '../configs/train_edge_classifier_kaggle.yaml' input_ckpt = '/global/project/projectdirs/atlas/xju/heptrkx/models/trackml/nxgraph_big_007/bak' iteration = 89908 model = create_evaluator(config_file, iteration, input_ckpt) config = load_yaml(config_file) file_dir = config['make_graph']['out_graph'] hits_graph_dir = config['data']['input_hitsgraph_dir'] trk_dir = config['track_ml']['dir'] base_dir = os.path.join(file_dir, "event{:09d}_g{:09d}_INPUT.npz") true_features = ['pt', 'particle_id', 'nhits'] print(file_dir) evtid = 1391 isec = -1 batch_size = 4 file_names = [] if isec < 0: section_patten = base_dir.format(evtid, 0).replace('_g{:09}'.format(0), '*') n_sections = int(len(glob.glob(section_patten))) file_names = [(base_dir.format(evtid, ii), ii) for ii in range(n_sections)] else: file_names = [(base_dir.format(evtid, isec), isec)] n_batches = len(file_names)//batch_size if len(file_names)%batch_size==0 else len(file_names)//batch_size + 1 split_inputs = np.array_split(file_names, n_batches) dd = os.path.join(trk_dir, 'event{:09d}') hits, particles, truth = load_event(dd.format(evtid), parts=['hits', 'particles', 'truth']) hits = merge_truth_info_to_hits(hits, particles, truth) true_features = ['pt', 'particle_id', 'nhits'] %%time all_graphs = [] is_digraph = True is_bidirection = False # evaluate each graph for ibatch in range(n_batches): ## pad batch_size current_files = list(split_inputs[ibatch]) if len(current_files) < batch_size: last_file = current_files[-1] current_files += [last_file] *(batch_size-len(current_files)) # print(current_files) input_graphs = [] target_graphs = [] for items in current_files: file_name = items[0] with np.load(file_name) as f: input_graphs.append(dict(f.items())) with np.load(file_name.replace("INPUT", "TARGET")) as f: target_graphs.append(dict(f.items())) graphs = model(utils_np.data_dicts_to_graphs_tuple(input_graphs), utils_np.data_dicts_to_graphs_tuple(target_graphs), use_digraph=is_digraph, bidirection=is_bidirection ) if len(graphs) != batch_size: raise ValueError("graph size not the same as batch-size") # decorate the graph with truth info for ii in range(batch_size): idx = int(current_files[ii][1]) id_name = os.path.join(hits_graph_dir, "event{:09d}_g{:03d}_ID.npz".format(evtid, idx)) with np.load(id_name) as f: hit_ids = f['ID'] for node in graphs[ii].nodes(): hit_id = hit_ids[node] graphs[ii].node[node]['hit_id'] = hit_id graphs[ii].node[node]['info'] = hits[hits['hit_id'] == hit_id][true_features].values graphs[ii].graph['info'] = [idx] ## section ID all_graphs += graphs ###Output total_graphs 4 total_graphs 4 total_graphs 4 total_graphs 4 CPU times: user 7min 14s, sys: 52.7 s, total: 8min 7s Wall time: 4min 20s ###Markdown Analyze these graphs. Edge features include 'predict' for predictions and 'solution' for truth ###Code from heptrkx.nx_graph import utils_plot import sklearn.metrics fontsize=16 minor_size=14 def plot_metrics2(odd, tdd, odd_th=0.5, tdd_th=0.5, outname='roc_graph_nets.eps'): y_pred, y_true = (odd > odd_th), (tdd > tdd_th) accuracy = sklearn.metrics.accuracy_score(y_true, y_pred) precision = sklearn.metrics.precision_score(y_true, y_pred) recall = sklearn.metrics.recall_score(y_true, y_pred) print('Accuracy: %.4f' % accuracy) print('Precision (purity): %.4f' % precision) print('Recall (efficiency): %.4f' % recall) fpr, tpr, _ = sklearn.metrics.roc_curve(y_true, odd) fig, ax0 = plt.subplots(figsize=(6, 6), constrained_layout=True) # Plot the model outputs # binning=dict(bins=50, range=(0,1), histtype='step', log=True) binning=dict(bins=50, histtype='step', log=True) ax0.hist(odd[y_true==False], lw=2, label='fake', **binning) ax0.hist(odd[y_true], lw=2, label='true', **binning) ax0.set_xlabel('Model output', fontsize=fontsize) ax0.tick_params(width=2, grid_alpha=0.5, labelsize=minor_size) ax0.set_xlim(0, 1) ax0.legend(loc=0, fontsize=fontsize) plt.savefig(outname) weights = [] truths = [] for G in all_graphs: weights += [G.edges[edge]['predict'][0] for edge in G.edges()] truths += [G.edges[edge]['solution'][0] for edge in G.edges()] weights = np.array(weights) truths = np.array(truths) utils_plot.plot_metrics(weights, truths, odd_th=0.5) # plot_metrics2(weights, truths, odd_th=0.5) from heptrkx.postprocess import wrangler, analysis all_true_tracks = wrangler.get_tracks(G, feature_name='solution') all_predict_tracks = wrangler.get_tracks(G, feature_name='predict') true_df = analysis.graphs_to_df(all_true_tracks) pred_df = analysis.graphs_to_df(all_predict_tracks) total_particles = np.unique(true_df.merge(truth, on='hit_id', how='left')['particle_id']) print(len(total_particles)) th = 0. good_pids, bad_pids = analysis.label_particles(pred_df, truth, th, ignore_noise=True) good_trks = hits[hits['particle_id'].isin(good_pids)] def print_info(res_pred): print(res_pred['n_correct'], res_pred['n_wrong']) res_pred = analysis.summary_on_prediction(G, good_trks, pred_df) print("Prediction Info") print_info(res_pred) print("True Info") res_truth = analysis.summary_on_prediction(G, good_trks, true_df) print_info(res_truth) %%time eff, purity, true_ones, fake_ones = analysis.trk_eff_purity(all_true_tracks, all_predict_tracks) print("efficiency:", eff) print("purity:", purity) ###Output efficiency: 0.8704784130688448 purity: 0.9221260815822002 CPU times: user 3min 52s, sys: 1.65 s, total: 3min 53s Wall time: 3min 53s
PredictWaterSplit/package use guide.ipynb
###Markdown Input a new molecule: ###Code #Input a new molecule: anion='O2N' A_ion='Ca' B_ion='Nb' volume=1.1 mass=32 your_data,comment=fs.input_screening(A_ion,B_ion,anion,mass,volume) # Call the function to check whether it is in our database, and get its features. Then to see its water-splitting ability. your_data ###Output _____no_output_____ ###Markdown Show the result of its water-splitting ability. ###Code comment ###Output _____no_output_____ ###Markdown If it is not in our database, then use NN to predict its bandgap, heat of formation. Finally get its water-splitting ability. ###Code prediction_watersplitting.model() if comment=='The molecule is not in our database, so we need to predict': prediction_watersplitting.model() heat_of_formation=prediction_watersplitting.prediction_hof(data_total,your_data) VB_dir=prediction_watersplitting.prediction_on_dir_VB(data_total,your_data) CB_dir=prediction_watersplitting.prediction_on_dir_CB(data_total,your_data) VB_ind=prediction_watersplitting.prediction_on_indir_VB(data_total,your_data) CB_ind=prediction_watersplitting.prediction_on_indir_CB(data_total,your_data) gllbsc_dir_gap=CB_dir-VB_dir gllbsc_ind_gap=CB_ind-VB_ind E0=-4.5 if (heat_of_formation<=0.21) & (gllbsc_ind_gap >= 1.4) & (gllbsc_ind_gap <= 3.1)& (CB_ind <= 0 - E0) & (VB_ind >= 1.23 - E0) | (gllbsc_dir_gap >= 1.4) & (gllbsc_dir_gap <= 3.1) & (CB_dir <= 0 - E0) & (VB_dir >= 1.23 - E0): comment2='Yes,it can do water-slpitting' else: comment2='No,it can\'t do water-splitting.' comment2 ###Output _____no_output_____
Samples/design-of-experiments-master/DesignOfExperiments_2020_04_05.ipynb
###Markdown A short tutorial for analyzing a [Design of Experiments](https://en.wikipedia.org/wiki/Design_of_experiments) using the [statmodels](https://www.statsmodels.org/stable/index.html) python library.For this example, I'm going to use the dataset: [CAT/TEMP/PRES/CONC EFFECT ON CHEMICAL PROCESS CONVERSION YIELDBOX, HUNTER & HUNTER (1978)](https://www.itl.nist.gov/div898/education/dex/boxchem.dat), which is available on the [NIST](https://www.itl.nist.gov/div898/education/datasets.htm) website. ###Code import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import statsmodels.api as sm %matplotlib inline PlotWidth = 6 import warnings warnings.filterwarnings('ignore') ###Output _____no_output_____ ###Markdown Unfortunately the data aren't in a _super_ convenient format for importing or data analysis, so we need to do some work to get the data into a pandas dataframe. ###Code columns = ['%Conversion', 'Catalyst Charge', 'Temperature', 'Pressure', 'Concentration', 'Run Order'] myDoE = pd.read_csv('https://www.itl.nist.gov/div898/education/dex/boxchem.dat', skiprows=25, sep='\s+', names=columns, index_col=False) display(myDoE) ###Output _____no_output_____ ###Markdown This is a perfectly valid format to work up the DoE, but it's also nice to be able to assign numeric values to the factors, rather than the -1/+1 notation. ###Code myDoE['Catalyst Charge'] = myDoE['Catalyst Charge'].replace([-1,1], [10,15]) myDoE['Temperature'] = myDoE['Temperature'].replace([-1,1], [220,240]) myDoE['Pressure'] = myDoE['Pressure'].replace([-1,1], [50,80]) myDoE['Concentration'] = myDoE['Concentration'].replace([-1,1], [10,12]) display(myDoE) ###Output _____no_output_____ ###Markdown Before getting too much into modeling, it's good to visualize the data to see what's going on. We'll start with 3 plots for the response:* Histogram* Box Plot* Line Plot vs. Run Order ###Code plt.figure(figsize=(PlotWidth, PlotWidth)) sns.boxplot(data=myDoE['%Conversion']) plt.title('Box Plot of %Conversion') plt.ylabel('%Conversion') plt.show() plt.figure(figsize=(PlotWidth, PlotWidth)) myDoE['%Conversion'].hist() plt.title('%Conversion') plt.xlabel('%Conversion)') plt.show() plt.figure(figsize=(PlotWidth*2, PlotWidth)) plt.scatter(myDoE['Run Order'], myDoE['%Conversion']) plt.title('%Conversion vs. Run Order') plt.xlabel('Run Order') plt.ylabel('%Conversion') plt.show() ###Output _____no_output_____ ###Markdown So it's looks like all the conversion data are of the same order of magnitude and is not autocorrelated to the order in which it was run.Using Statmodels, let's look at just the main effects. ###Code y = myDoE['%Conversion'] X = myDoE[['Catalyst Charge', 'Temperature', 'Pressure','Concentration']] ## An intercept is not added by default, so we need to add that here X = sm.add_constant(X) results = sm.OLS(y, X).fit() results.summary() print(results.summary()) ###Output OLS Regression Results ============================================================================== Dep. Variable: %Conversion R-squared: 0.964 Model: OLS Adj. R-squared: 0.951 Method: Least Squares F-statistic: 74.47 Date: Fri, 28 May 2021 Prob (F-statistic): 6.81e-08 Time: 15:37:29 Log-Likelihood: -37.344 No. Observations: 16 AIC: 84.69 Df Residuals: 11 BIC: 88.55 Df Model: 4 Covariance Type: nonrobust =================================================================================== coef std err t P>|t| [0.025 0.975] ----------------------------------------------------------------------------------- const -148.6250 19.844 -7.490 0.000 -192.301 -104.949 Catalyst Charge -1.6000 0.301 -5.313 0.000 -2.263 -0.937 Temperature 1.2000 0.075 15.940 0.000 1.034 1.366 Pressure -0.0750 0.050 -1.494 0.163 -0.185 0.035 Concentration -2.7500 0.753 -3.653 0.004 -4.407 -1.093 ============================================================================== Omnibus: 1.312 Durbin-Watson: 1.696 Prob(Omnibus): 0.519 Jarque-Bera (JB): 0.806 Skew: 0.033 Prob(JB): 0.668 Kurtosis: 1.902 Cond. No. 6.32e+03 ============================================================================== Notes: [1] Standard Errors assume that the covariance matrix of the errors is correctly specified. [2] The condition number is large, 6.32e+03. This might indicate that there are strong multicollinearity or other numerical problems. ###Markdown The catalyst charge, pressure, and concentration are all signficant terms, but the pressure is not.Again, it's a good idea to plot the data, here is the actual %Conversion vs. the model. ###Code plt.figure(figsize=(PlotWidth, PlotWidth)) sns.regplot(x=results.predict(X), y=y) plt.xlabel('Predicted %Conversion') plt.ylabel('Actual %Conversion') plt.title('Actual vs. Predicted %Conversion') plt.show() ###Output _____no_output_____ ###Markdown So that's pretty good, but we seem to be missing interaction terms. In order to obtain those, we will want to add columns to our dataframe for each interaction. Statmodels technically has some built-in capability to do this, but I'm not convinced it properly centers the data. ###Code Factors = ['Catalyst Charge', 'Temperature', 'Pressure', 'Concentration'] Interactions = [[p,q] for p in Factors for q in Factors] InteractionsToUse = Interactions[1:4] + Interactions [6:8] + Interactions [11:12] # newDoE = myDoE.copy(deep=True) InteractionTerms = [] for i in np.arange(len(InteractionsToUse)): InteractionTerms += [InteractionsToUse[i][0] + ' * ' + InteractionsToUse[i][1]] myDoE[InteractionsToUse[i][0] + ' * ' + InteractionsToUse[i][1]] = (myDoE[InteractionsToUse[i][0]]-myDoE[InteractionsToUse[i][0]].mean()) * (myDoE[InteractionsToUse[i][1]]-myDoE[InteractionsToUse[i][1]].mean()) display(myDoE[Factors + InteractionTerms]) y = myDoE['%Conversion'] X = myDoE[Factors + InteractionTerms] ## An intercept is not added by default, so we need to add that here X = sm.add_constant(X) results = sm.OLS(y, X).fit() results.summary() print(results.summary()) plt.figure(figsize=(PlotWidth, PlotWidth)) sns.regplot(x=results.predict(X), y=y) plt.xlabel('Predicted %Conversion') plt.ylabel('Actual %Conversion') plt.title('Actual vs. Predicted %Conversion') plt.show() ###Output OLS Regression Results ============================================================================== Dep. Variable: %Conversion R-squared: 0.998 Model: OLS Adj. R-squared: 0.994 Method: Least Squares F-statistic: 232.9 Date: Fri, 28 May 2021 Prob (F-statistic): 4.95e-06 Time: 15:37:29 Log-Likelihood: -14.856 No. Observations: 16 AIC: 51.71 Df Residuals: 5 BIC: 60.21 Df Model: 10 Covariance Type: nonrobust =================================================================================================== coef std err t P>|t| [0.025 0.975] --------------------------------------------------------------------------------------------------- const -148.6250 7.219 -20.589 0.000 -167.181 -130.069 Catalyst Charge -1.6000 0.110 -14.606 0.000 -1.882 -1.318 Temperature 1.2000 0.027 43.818 0.000 1.130 1.270 Pressure -0.0750 0.018 -4.108 0.009 -0.122 -0.028 Concentration -2.7500 0.274 -10.042 0.000 -3.454 -2.046 Catalyst Charge * Temperature 0.0200 0.011 1.826 0.127 -0.008 0.048 Catalyst Charge * Pressure 0.0100 0.007 1.369 0.229 -0.009 0.029 Catalyst Charge * Concentration -8.882e-16 0.110 -8.11e-15 1.000 -0.282 0.282 Temperature * Pressure -0.0042 0.002 -2.282 0.071 -0.009 0.001 Temperature * Concentration 0.2250 0.027 8.216 0.000 0.155 0.295 Pressure * Concentration -0.0083 0.018 -0.456 0.667 -0.055 0.039 ============================================================================== Omnibus: 0.035 Durbin-Watson: 1.635 Prob(Omnibus): 0.983 Jarque-Bera (JB): 0.260 Skew: 0.000 Prob(JB): 0.878 Kurtosis: 2.375 Cond. No. 6.32e+03 ============================================================================== Notes: [1] Standard Errors assume that the covariance matrix of the errors is correctly specified. [2] The condition number is large, 6.32e+03. This might indicate that there are strong multicollinearity or other numerical problems. ###Markdown This time our model is significantly _overfit_, and has too many terms! We need to [remove terms](https://en.wikipedia.org/wiki/Stepwise_regression), one at a time. We could write an algorithm to do it based on p-value thresholds, Log-Liklihood, AIC, or BIC (as far as I know, one isn't built into statsmodels). I'm not going to demonstrate that here, but jump right to the final model (optimized for BIC). ###Code y = myDoE['%Conversion'] X = myDoE[['Catalyst Charge', 'Temperature', 'Pressure', 'Concentration', 'Catalyst Charge * Temperature', 'Catalyst Charge * Pressure', 'Temperature * Pressure', 'Temperature * Concentration']] ## An intercept is not added by default, so we need to add that here X = sm.add_constant(X) results = sm.OLS(y, X).fit() results.summary() print(results.summary()) plt.figure(figsize=(PlotWidth, PlotWidth)) sns.regplot(x=results.predict(X), y=y) plt.xlabel('Predicted %Conversion') plt.ylabel('Actual %Conversion') plt.title('Actual vs. Predicted %Conversion') plt.show() ###Output OLS Regression Results ============================================================================== Dep. Variable: %Conversion R-squared: 0.998 Model: OLS Adj. R-squared: 0.995 Method: Least Squares F-statistic: 391.3 Date: Fri, 28 May 2021 Prob (F-statistic): 1.40e-08 Time: 15:37:29 Log-Likelihood: -15.183 No. Observations: 16 AIC: 48.37 Df Residuals: 7 BIC: 55.32 Df Model: 8 Covariance Type: nonrobust ================================================================================================= coef std err t P>|t| [0.025 0.975] ------------------------------------------------------------------------------------------------- const -148.6250 6.227 -23.869 0.000 -163.349 -133.901 Catalyst Charge -1.6000 0.094 -16.933 0.000 -1.823 -1.377 Temperature 1.2000 0.024 50.798 0.000 1.144 1.256 Pressure -0.0750 0.016 -4.762 0.002 -0.112 -0.038 Concentration -2.7500 0.236 -11.641 0.000 -3.309 -2.191 Catalyst Charge * Temperature 0.0200 0.009 2.117 0.072 -0.002 0.042 Catalyst Charge * Pressure 0.0100 0.006 1.587 0.156 -0.005 0.025 Temperature * Pressure -0.0042 0.002 -2.646 0.033 -0.008 -0.000 Temperature * Concentration 0.2250 0.024 9.525 0.000 0.169 0.281 ============================================================================== Omnibus: 0.358 Durbin-Watson: 1.630 Prob(Omnibus): 0.836 Jarque-Bera (JB): 0.365 Skew: 0.288 Prob(JB): 0.833 Kurtosis: 2.536 Cond. No. 6.32e+03 ============================================================================== Notes: [1] Standard Errors assume that the covariance matrix of the errors is correctly specified. [2] The condition number is large, 6.32e+03. This might indicate that there are strong multicollinearity or other numerical problems.
jupyter/hold4others/croppedImageSender.ipynb
###Markdown croppedImageSender - docs and installInteractive cropping tool to define region of interest on a video frame and send the video frames to the Streams application.This is the cropping tool...- https://openbits.app/posts/python-interactive-cropping/You need to install it:```pip install interactivecrop``` ###Code # The orignal test code # crop(sample_images, sample_names, optimize=True, continuous_update=True) ###Output _____no_output_____ ###Markdown Specify video that will be cropped and analyized. **StaticVideo** should point to the video that is to analyized. ###Code StaticVideo = '/Users/siegenth/Data/airportGate.mp4' ###Output _____no_output_____ ###Markdown import all the support components ###Code from interactivecrop.interactivecrop import main as crop from interactivecrop.samples import sample_images, sample_names """" Send video, frame-by-frame to Kafka interface - Frame is encoded into ascii so no one gets upset with the data. - Frame will be decomposed into chunks of 'CHUNK_SIZE'. When debugging found Kafka would not send message if it went over threshold. - Receiving test notebook VideoRcvKafka - The Steams application VideoRcvKafka recieves the encode image and scores it with Model. """ import kafka import os import sys import json import base64 import ssl import time import datetime import io from PIL import Image import logging import cv2 import matplotlib.pyplot as plt import numpy as np if '../juypter' not in sys.path: sys.path.insert(0, '../juypter') import credential import streams_aid as aid logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO")) # img_encoded = str(base64.b64encode(response.content).decode("utf-8")) # img_encoded = str(base64.b64encode(img).decode('utf-8')) def bts_to_img(bts): buff = np.fromstring(bts, np.uint8) buff = buff.reshape(1, -1) img = cv2.imdecode(buff, cv2.IMREAD_COLOR) return img def convertToRGB(image): return cv2.cvtColor(image, cv2.COLOR_BGR2RGB) def encode_img(img): """must be easier way""" with io.BytesIO() as output: img.save(output, format="JPEG") contents = output.getvalue() return base64.b64encode(contents).decode('ascii') def decode_img(bin64): """must be easier way""" img = Image.open(io.BytesIO(base64.b64decode(bin64))) return img ###Output _____no_output_____ ###Markdown Get and image from video and set region of interest. collect one frame from the video ###Code def collect_frames(video_url, frame_count=1, frame_modulo=24, debug=False): """collect a set of frames from the video to work out the cropping region. Notes: - pull out the frames based upon the modulo and frame_count - the correct way, find frames that hav signficant difference between each - now """ frames = [] """get the crop region for a video. [] pull up some frames... [x] - send frames to cropper [x] - get cropper regionquick :param kafka_prod: the handle to sent out messages on kafka :param frame_modulo: send every x frames :param send_wait: after sending a frame wait time :param debug: decode image and write out to verify :return: None """ frame_num = 0 cap = cv2.VideoCapture(video_url) while(cap.isOpened()): ret, frame = cap.read() if ret is False: break frame_num += 1 if not(frame_num % frame_modulo): if debug: image_encoded =encode_img(Image.fromarray(frame, 'RGB')) # debugging - render what we will send. img_raw = decode_img(image_encoded) plt.imshow(img_raw) plt.show() # break down frame into chunks frames.append(frame) if frame_count <= len(frames): break if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows() return frames secs = 30 frames = collect_frames(video_url=StaticVideo,frame_modulo=30*secs, frame_count=1, debug=False) print("Collected {} frames at the {} second mark.".format(len(frames), secs)) ###Output Collected 1 frames at the 30 second mark. ###Markdown Use the collected frame to define a crop region. ###Code SHAPE = None def grabCropShape(image_name, shape): global SHAPE SHAPE = shape print("set SHAPE ", image_name, shape, flush=True) ###Output _____no_output_____ ###Markdown Press the 'Save Crop Size' button to capture the crop region. ###Code crop(frames, callback=grabCropShape) ###Output _____no_output_____ ###Markdown Verify that the captured region is what you expected- verify what we collected- **RegionOfInterest** is the cropping specification that will be applied to frame sent ###Code image_encoded =encode_img(Image.fromarray(frames[0], 'RGB')) img_raw = decode_img(image_encoded) print("Image size : {} crop region : {} ".format(img_raw.size, SHAPE.size)) RegionOfInterest = (SHAPE.size[0], SHAPE.size[1], SHAPE.size[0]+SHAPE.size[2], SHAPE.size[1]+SHAPE.size[3]) print("regionOfInterest:",RegionOfInterest) cropped = img_raw.crop(RegionOfInterest) plt.imshow(cropped) plt.show() ###Output _____no_output_____ ###Markdown Send Cropped Region.... ###Code def kafka_producer(credentials): """ Open the connection to the kafka producer :param credentials: :return: kafka producer Request is responsilbe for closing producer. """ prod = None while prod is None: try: prod = kafka.KafkaProducer(bootstrap_servers=credentials["kafka_brokers_sasl"], security_protocol="SASL_SSL", sasl_mechanism="PLAIN", sasl_plain_username=credentials["user"], sasl_plain_password=credentials["api_key"], ssl_cafile=ssl.get_default_verify_paths().cafile) except kafka.errors.NoBrokersAvailable: logging.warning("No Brokers Available. Retrying ...") time.sleep(1) prod = None return prod CHUNK_SIZE = 100000 # maximum number of bytes to transmit at a time def video_kafka(video_url, regionOfInterest, kafka_prod, kafka_topic='VideoFrame', frame_modulo=24, send_wait=.25, debug=False): """Send video via Kafka :param video_url: url of video to pull in and send :param kafka_prod: the handle to sent out messages on kafka :param frame_modulo: send every x frames :param send_wait: after sending a frame wait time :param debug: decode image and write out to verify :return: None """ frame_num = 0 cap = cv2.VideoCapture(video_url) while(cap.isOpened()): ret, frame = cap.read() if ret is False: break frame_num += 1 if not(frame_num % frame_modulo): # crop each frame before sending it. orginal_encoded =encode_img(Image.fromarray(frame, 'RGB')) img_raw = decode_img(orginal_encoded) cropped = img_raw.crop(regionOfInterest) image_encoded = encode_img(cropped) if debug: # debugging - render what we will send. img_raw = decode_img(image_encoded) plt.imshow(img_raw) plt.show() # break down frame into chunks chunks = [image_encoded[i * CHUNK_SIZE:(i + 1) * CHUNK_SIZE] for i in range((len(image_encoded) + CHUNK_SIZE - 1) // CHUNK_SIZE)] # send the chunks. for idx, chunk in enumerate(chunks): logging.debug("chunking - {} #chunks :{} idx:{} len(chunk):{}".format(video_url, len(chunks), idx, len(chunk))) chunk_content = {'video': video_url, 'frame': frame_num, 'chunk_idx':idx, 'chunk_total':len(chunks), 'timestamp': datetime.datetime.utcnow().isoformat() + 'Z', 'data': chunk } kafka_prod.send(kafka_topic, value=json.dumps(chunk_content).encode('utf-8')) ## finish the frame frame chunk_complete = {'video': video_url, 'frame': frame_num, 'chunk_idx': len(chunks), 'chunk_total': len(chunks), 'timestamp': datetime.datetime.utcnow().isoformat() + 'Z', 'data': "" } logging.info("Transmit frame #{}".format(chunk_content["frame"])) kafka_prod.send(kafka_topic, value=json.dumps(chunk_complete).encode('utf-8')) time.sleep(send_wait) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows() return @aid.catchInterrupt def videoStream(topic="VideoFrame", videoUrl=None, regionOfInterest=None): creds = json.loads(credential.magsEventStream) prod = kafka_producer(creds,) video_kafka(videoUrl, regionOfInterest, prod, kafka_topic=topic, send_wait=1, frame_modulo=24, debug=False) prod.close() TOPIC="VideoFrame" videoStream(topic=TOPIC, videoUrl=StaticVideo, regionOfInterest=RegionOfInterest) ###Output _____no_output_____
_build/html/_sources/lectures/lecture6.ipynb
###Markdown Naive Bayes and Hyperparameter Optimization*Hayley Boyce, May 5th, 2021* ###Code # Importing our libraries import pandas as pd import altair as alt import numpy as np from sklearn.tree import DecisionTreeClassifier from sklearn.dummy import DummyClassifier, DummyRegressor from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor from sklearn.model_selection import cross_validate, train_test_split from sklearn.svm import SVR, SVC import sys sys.path.append('code/') from display_tree import display_tree from plot_classifier import plot_classifier import matplotlib.pyplot as plt # Preprocessing and pipeline from sklearn.impute import SimpleImputer from sklearn.metrics.pairwise import euclidean_distances from sklearn.pipeline import Pipeline, make_pipeline from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, StandardScaler, MinMaxScaler ###Output _____no_output_____ ###Markdown House Keeping - Quiz Today!- Result of Polls- Assignment due Monday- Project groups this week- Project instructions next week- Heavy class today, more learning, less practice (sorry!) Lecture Learning Objectives - Explain the naive assumption of naive Bayes. - Predict targets by hands-on toy examples using naive Bayes.- Use `scikit-learn`'s `MultiNomialNB`.- Use `predict_proba` and explain its usefulness. - Explain the need for smoothing in naive Bayes.- Explain how `alpha` controls the fundamental tradeoff. - Explain the need for hyperparameter optimization - Carry out hyperparameter optimization using `sklearn`'s `GridSearchCV` and `RandomizedSearchCV`. Five Minute Recap/ Lightning Questions - What kind of preprocessing must I do if I have a feature with categories that have an order to them?- How many columns do I need for a binary feature?- What tool do we use to preprocess all our pipelines and build a model without breaking the golden rule? - Between `Pipeline()` and `make_pipeline()`, which one assigns names to the steps on our behalf? - In text data, what are our features made up of? Some lingering questions- How do I tune multiple hyperparameters at once?- What algorithm works well with our `spam`, `non spam` problem? Naive Bayes introduction - spam/non spamLast lecture we saw this spam classification problem where we used `CountVectorizer()` to vectorize the text into features and used an `SVC` to classify each text message into either a class of `spam` or `non spam`. $X = \begin{bmatrix}\text{"URGENT!! You have been selected to receive a £900 prize reward!",}\\ \text{"Lol your always so convincing."}\\ \text{"Congrats! 1 year special cinema pass for 2 is yours. call 09061209465 now!"}\\ \end{bmatrix}$ and $y = \begin{bmatrix}\text{spam} \\ \text{non spam} \\ \text{spam} \end{bmatrix}$ ###Code For years, the best spam filtering methods used naive Bayes. Naive Bayes is based on Bayes' Theorem: <img src='imgs/bayes.png' width="50%"> - This is our first probabilistic classifier where we think of learning as a problem of statistical inference. - Other applications of Naive Bayes: - Folder ordering, document clustering, etc. - Sentiment analysis (e.g., movies, restaurants, etc.) - Classifying products into groups based on descriptions ###Output _____no_output_____ ###Markdown Naive Bayes from scratch Let's do some naive Bayes calculations **by hand**🖐 🤚 . Yes, there is going to be some math here but it's going to be really helpful in understanding how this algorithm works! Below we have a few texts and they are classed as either being **spam** or **non spam**. ###Code df = pd.DataFrame({'X': [ "URGENT!! As a valued network customer you have been selected to receive a £900 prize reward!", "Lol you are always so convincing.", "Sauder has interesting courses.", "URGENT! You have won a 1 week FREE membership in our £100000 prize Jackpot!", "Had your mobile 11 months or more? U R entitled to Update to the latest colour mobiles with camera for Free!", "Sauder has been interesting so far." ], 'y': ["spam", "non spam", "non spam", "spam", "spam", "non spam"]}) df ###Output _____no_output_____ ###Markdown We know that we need to encode categorical data and transform it to numeric data to use it with machine learning since categoric columns throw an error when we try to fit our model.This sounds like a job for `CountVectorizer()` since we have words that need to be converted into features! Here we are going to set `max_features=4` to make our calculations a little easier and `stop_words='english'` so we are getting meaningful words as features and not stop words. ###Code from sklearn.feature_extraction.text import CountVectorizer count_vect = CountVectorizer(max_features = 4, stop_words='english') data = count_vect.fit_transform(df['X']) train_bow_df = pd.DataFrame(data.toarray(), columns=sorted(count_vect.vocabulary_), index=df['X']) train_bow_df['target'] = df['y'].tolist() train_bow_df ###Output _____no_output_____ ###Markdown Suppose we are given 2 text messages in and we want to find the targets for these examples, how do we do it using naive Bayes?First, let's get a numeric representation of our text messages. ###Code test_texts = ["URGENT! Free!!", "I like Sauder"] data = count_vect.transform(test_texts).toarray() test_bow_df = pd.DataFrame(data, columns=count_vect.vocabulary_, index=test_texts) test_bow_df ###Output _____no_output_____ ###Markdown Let's look at the text: "**URGENT! Free!!**"> Is this **spam** or **non spam**? So what we want to know is: $$P(\textrm{spam}|\textrm{"URGENT! Free!!"})$$$$ \text{and} $$$$P(\textrm{non spam}|\textrm{"URGENT! Free!!"})$$We really only care which one of these is bigger and whichever probability is larger is how we can classify our sentence as **spam** or **non spam**.$$P(\textrm{spam}|\textrm{"URGENT! Free!!"}) > P(\textrm{non spam}|\textrm{"URGENT! Free!!"})$$ Remember our Bayes' Theorem is the following:$$\text{P}(Y|X) = \frac{\text{P}(X | Y) \text{P}(Y)}{\text{P}(X)}$$In this case:$X$ is the representation of the words in our text ie; $\text{free} = 1, \text{prize} = 0, \text{sauder} = 0, \text{urgent} = 1$ $y$ is our target either spam or non spam Substituting into Bayes rule we get:$$\frac{P(\text{free} = 1, \text{prize} = 0,\text{sauder} = 0, \text{urgent} = 1 |\textrm{spam})*P(\textrm{spam})}{P(\text{free} = 1, \text{prize} = 0,\text{sauder} = 0, \text{urgent} = 1 )}>\frac{P(\text{free} = 1, \text{prize} = 0, \text{sauder} = 0, \text{urgent} = 1 |\textrm{non spam})*P(\textrm{non spam})}{P(\text{free} = 1, \text{prize} = 0,\text{sauder} = 0, \text{urgent} = 1 )}$$ Now, there are two reasons naive Bayes is so easy:1. We can cancel out the denominator which leads us to this: $$P(\text{free} = 1, \text{prize} = 0,\text{sauder} = 0, \text{urgent} = 1|\textrm{spam})*P(\textrm{spam}) > P(\text{free} = 1, \text{prize} = 0,\text{sauder} = 0, \text{urgent} = 1|\textrm{non spam})*P(\textrm{non spam})$$2. We can simplify the numerator Naive Bayes' approximationWe assume each feature (word) is conditionally independent. (Assume that all features in $X$ are mutually independent, conditional on the target class.)- In general, $$P(\text{message} \mid \text{spam}) = P(w_1, w_2, . . . , w_d \mid \text{spam}) \approx \prod_{i=1}^{d}P(w_i \mid \text{spam})$$$$P(\text{message} \mid \text{non spam}) = P(w_1, w_2, . . . , w_d \mid \text{non spam}) \approx \prod_{i=1}^{d}P(w_i \mid \text{non spam})$$ That means simply:$$\begin{equation}\begin{split}& P(\text{free} = 1, \text{prize} = 0,\text{sauder} = 0, \text{urgent} = 1 \mid \text{spam}) \\&\approx P(\text{free} = 1 \mid \text{spam}) \times P(\text{prize} = 0 \mid \text{spam}) \times P(\text{sauder} = 0 \mid \text{spam}) \times P(\text{urgent} = 1 \mid \text{spam})\end{split}\end{equation}$$And for the other class **non spam**:$$\begin{equation}\begin{split}& P(\text{free} = 1, \text{prize} = 0,\text{sauder} = 0, \text{urgent} = 1 \mid \text{non spam}) \\&\approx P(\text{free} = 1 \mid \text{non spam}) \times P(\text{prize} = 0 \mid \text{non spam}) \times P(\text{sauder} = 0 \mid \text{non spam}) \times P(\text{urgent} = 1 \mid \text{non spam})\end{split}\end{equation}$$ So our equation has boiled down to is:$$ P(\text{free} = 1 \mid \text{spam}) \times P(\text{prize} = 0 \mid \text{spam}) \times P(\text{sauder} = 0 \mid \text{spam}) \times P(\text{urgent} = 1 \mid \text{spam})*P(\textrm{spam}) >$$ $$ P(\text{free} = 1 \mid \text{non spam}) \times P(\text{prize} = 0 \mid \text{non spam}) \times P(\text{sauder} = 0 \mid \text{non spam}) \times P(\text{urgent} = 1 \mid \text{non spam}) *P(\textrm{non spam})$$ - Now we just need to calculate each of those probabilities which is easy! Estimating $P(\text{spam} \mid \text{message})$ (The left side of our equation)$$P(\text{free} = 1 \mid \text{spam}) \times P(\text{prize} = 0 \mid \text{spam}) \times P(\text{sauder} = 0 \mid \text{spam}) \times P(\text{urgent} = 1 \mid \text{spam})*P(\textrm{spam}) $$ We need the following: 1. Prior probability: $P(\text{spam})$ 2. Conditional probabilities: 1. $P(\text{free} = 1 \mid \text{spam})$ 2. $P(\text{prize} = 0 \mid \text{spam})$ 3. $P(\text{sauder} = 0 \mid \text{spam})$ 4. $P(\text{urgent} = 1 \mid \text{spam})$ ###Code train_bow_df ###Output _____no_output_____ ###Markdown - Prior probability - $P(\text{spam}) = 3/6$ - Conditional probabilities - What is $P(\text{free} = 1 \mid \text{spam})$ ?? - Given target is spam, how often "free"= 1? $= 2/3$ - $P(\text{prize} = 0 \mid \text{spam}) = 1/3$ - $P(\text{sauder} = 0 \mid \text{spam}) = 3/3$ - $P(\text{urgent} = 1 \mid \text{spam}) = 2/3$ Now we have everything we need to do our calculations! $$P(\textrm{spam}|\text{free} = 1, \text{prize} = 0, \text{sauder} = 0, \text{urgent} = 1) = P(\text{free} = 1|\textrm{spam})*P(\text{prize} = 0|\textrm{spam})*P(\textrm{sauder = 0}|\textrm{spam})*P(\text{urgent} = 1|\textrm{spam})*P(\textrm{spam})$$$$= \frac{2}{3} * \frac{1}{3}* \frac{3}{3} * \frac{2}{3} *\frac{3}{6} $$ ###Code spam_prior = 3/6 sauder0_spam = 3/3 free1_spam = 2/3 prize0_spam = 1/3 urgent1_spam = 2/3 spam_prob = spam_prior * sauder0_spam * free1_spam * prize0_spam * urgent1_spam spam_prob ###Output _____no_output_____ ###Markdown Ok, So we've done our left side! Now we have to do the right! Estimating $P(\text{non spam} \mid \text{message})$ (The right side of our equation)$$P(\text{free} = 1 \mid \text{ non spam}) \times P(\text{prize} = 0 \mid \text{non spam}) \times P(\text{sauder} = 0 \mid \text{non spam}) \times P(\text{urgent} = 1 \mid \text{non spam})*P(\textrm{non spam}) $$ Now we need the following:1. Prior probability: $P(\text{non spam})$ 2. Conditional probabilities: 1. $P(\text{free} = 1 \mid \text{non spam})$ 2. $P(\text{prize} = 0 \mid \text{non spam})$ 3. $P(\text{sauder} = 0 \mid \text{non spam})$ 4. $P(\text{urgent} = 1 \mid \text{non spam})$Again we use the data to calculate these probabilities. ###Code train_bow_df ###Output _____no_output_____ ###Markdown - Prior probability - $P(\text{non spam}) = 3/6$- Conditional probabilities - What is $P(\text{free} = 1 \mid \text{non spam})$ ? - Given the target is non spam, how ofter "free"=1? $0/3$ - $P(\text{prize} = 0 \mid \text{non spam}) = 3/3$ - $P(\text{sauder} = 0 \mid \text{non spam}) =1/3$ - $P(\text{urgent} = 1 \mid \text{non spam}) = 0/3$ Time for our calculation:$$P(\textrm{non spam}|\text{free} = 1, \text{prize} = 0,\text{sauder} = 0, \text{urgent} = 1) = P(\text{free} = 1|\textrm{non spam})*P( \text{prize} = 0|\textrm{non spam})*P(\textrm{sauder = 0}|\textrm{non spam})*P(\text{urgent} = 1|\textrm{non spam})*P(\textrm{non spam})$$$$= \frac{1}{3} * \frac{0}{3} * \frac{3}{3}* \frac{0}{3} *\frac{3}{6} $$ ###Code non_spam_prior = 3/6 sauder0_non_spam = 0/3 free1_non_spam = 1/3 prize0_non_spam = 1/3 urgent1_non_spam = 2/3 non_spam_prob = non_spam_prior * sauder0_non_spam * free1_non_spam * prize0_non_spam * urgent1_non_spam non_spam_prob ###Output _____no_output_____ ###Markdown so our equation: $$ P(\text{free} = 1 \mid \text{spam}) \times P(\text{prize} = 0 \mid \text{spam}) \times P(\text{sauder} = 0 \mid \text{spam}) \times P(\text{urgent} = 1 \mid \text{spam})*P(\textrm{spam}) >$$ $$ P(\text{free} = 1 \mid \text{non spam}) \times P(\text{prize} = 0 \mid \text{non spam}) \times P(\text{sauder} = 0 \mid \text{non spam}) \times P(\text{urgent} = 1 \mid \text{non spam}) *P(\textrm{non spam})$$has been calculated to 0.07407407407407407 > 0.0Since our left side is greater than the right side, our text is classified as **spam**!We could normalize this result and say 100% spam and 0% non spam so that the probabilities add up to 100%. Now let's verify our result using sklearn. Naive Bayes classifierThe main Naive Bayes classifier in sklearn is called `MultinomialNB` and exists in the `naive_bayes` module. ###Code from sklearn.naive_bayes import MultinomialNB train_bow_df ###Output _____no_output_____ ###Markdown Let's split up our data into our features and targets: ###Code X_train = train_bow_df.drop(columns='target') y_train = train_bow_df['target'] ###Output _____no_output_____ ###Markdown Here I am selecting the first row of our test set which was the **URGENT! Free!!** text. ###Code test_bow_df.iloc[[0]] ###Output _____no_output_____ ###Markdown Here we get a prediction of spam: ###Code nb = MultinomialNB(alpha=0) nb.fit(X_train, y_train) nb.predict(test_bow_df.iloc[[0]]) ###Output /usr/local/lib/python3.8/site-packages/sklearn/naive_bayes.py:511: UserWarning: alpha too small will result in numeric errors, setting alpha = 1.0e-10 warnings.warn('alpha too small will result in numeric errors, ' ###Markdown Instead of using `predict`, we can use something called `predict_proba()` with Naive Bayes classifier which gives us the ***proba***bilities of each class happening. - `predict` returns the class with the highest probability.- `predict_proba` gives us the actual probability scores. - Looking at the probabilities can help us understand the model.We will look more into this in Lecture 7. ###Code prediction = nb.predict_proba(test_bow_df.iloc[[0]]) pd.DataFrame(data =prediction,columns = nb.classes_) ###Output _____no_output_____ ###Markdown We get the same probabilities as we did it by hand. (Ok 2.250000e-20 is essentially 0 but due to computing and storage, python specifies this 0 as an extremely small number.)What about this warning we see? > 'alpha too small will result in numeric errors'Well, let's look at our conditional probabilities again from the right side of our equation. - Conditional probabilities - $P(\text{free} = 1 \mid \text{non spam}) = 0/3$ - $P(\text{prize} = 0 \mid \text{non spam}) = 3/3$ - $P(\text{sauder} = 0 \mid \text{non spam}) = 1/3$ - $P(\text{urgent} = 1 \mid \text{non spam}) = 0/3$ Is it wise to say that given a text that is non spam the probability of free occurring is 0? Not really. We only are using 6 examples here and setting this to 0 (and $P(\text{urgent} = 1 \mid \text{non spam}) = 0$) is making the whole right side of the equation equal to 0. Naive Bayes naively multiplies all the feature likelihoods together, and if any of the terms is zero, it's going to void all other evidence and the probability of the class is going to be zero. This is somewhat problematic. We have limited data and if we do not see a feature occurring with a class, it doesn't mean it would never occur with that class. How can we fix this? A simple solution: Laplace smoothing- The simplest way to avoid zero probabilities is to add a value($\alpha$) to all the counts. This is called **Laplace smoothing**Generally, we set alpha ($\alpha$) equal to 1 and in `scikit-learn` we control it using hyperparameter `alpha`.This means that we give an instance of every word appearing once with a target of spam, as well as a target of non spam. By default `alpha=1.0` in `scikit-learn`.Let's see what our probabilities are now using alpha=1. ###Code nb = MultinomialNB(alpha=1) nb.fit(X_train, y_train) pd.DataFrame(data = nb.predict_proba(test_bow_df.iloc[[0]]), columns = nb.classes_) ###Output _____no_output_____ ###Markdown A bit smoother now, wouldn't you say? `alpha` hyperparameter and the fundamental tradeoff - High alpha $\rightarrow$ underfitting - means we are adding large counts to everything and so we are diluting the data- Low alpha $\rightarrow$ overfitting Naive Bayes on Real Datalet's try `scikit-learn`'s implementation of Naive Bayes on a modified version of Kaggle's [Disaster Tweets](https://www.kaggle.com/vstepanenko/disaster-tweets). ###Code tweets_df = pd.read_csv("data/tweets_mod.csv") tweets_df ###Output _____no_output_____ ###Markdown Let's split it into our training and test sets as well as our features and target objects. ###Code train_df, test_df = train_test_split(tweets_df, test_size=0.2, random_state=123) X_train, y_train = train_df["text"], train_df["target"] X_test, y_test = test_df["text"], test_df["target"] train_df.head() ###Output _____no_output_____ ###Markdown Next, we make a pipeline and cross-validate! ###Code pipe_nb = make_pipeline(CountVectorizer(), MultinomialNB(alpha=1)) scores = cross_validate(pipe_nb, X_train, y_train, return_train_score=True) pd.DataFrame(scores) pd.DataFrame(scores).mean() ###Output _____no_output_____ ###Markdown Let's PracticeUsing naive Bayes by hand, what class would naive Bayes predict for the second example "I like Sauder". ###Code train_bow_df test_bow_df.iloc[[1]] ###Output _____no_output_____ ###Markdown Let's do some of the steps here: **spam side**1\. Prior probability: $P(\text{spam}) = $ 2\. Conditional probabilities: 2.1 $P(\text{free} = 0 \mid \text{spam}) = $ 2.2 $P(\text{prize} = 0 \mid \text{spam}) = $2.3 $P(\text{sauder} = 1 \mid \text{spam}) = $2.4 $P(\text{urgent} = 0 \mid \text{spam}) = $ 3\. $P(\textrm{spam}|\text{free} = 0, \text{prize} = 0, \text{sauder} = 1, \text{urgent} = 0) = $**non spam side** 4\. Prior probability: $P(\text{non spam}) = $ 5\. Conditional probabilities: 5.1 $P(\text{free} = 0 \mid \text{non spam}) = $ 5.2 $P(\text{prize} = 0 \mid \text{non spam}) = $ 5.3 $P(\text{sauder} = 1 \mid \text{non spam}) = $ 5.4 $P(\text{urgent} = 0 \mid \text{non spam}) = $ 6\. $P(\textrm{non spam}|\text{free} = 0, \text{prize} = 0, \text{sauder} = 1, \text{urgent} = 0) =$ **Final Class** 7\. CLASS AS: ```{admonition} Solutions!:class: dropdown1/. $3/6$ 2.1 $1/3$ 2.2 $1/3$ 2.3 $0/3$ 2.4 $1/3$ 3\. $\frac{1}{3} * \frac{1}{3}* \frac{0}{3} * \frac{1}{3} *\frac{3}{6} = 0$ 4\. $3/6$ 5.1 $3/3$ 5.2 $3/3$ 5.3 $2/3$ 5.4 $3/3$ 6\. $\frac{3}{3} * \frac{3}{3}* \frac{2}{3} * \frac{3}{3} *\frac{3}{6} = 1/3$ 7\. Non spam ``` Automated Hyperparameter OptimizationWe’ve seen quite a few different hyperparameters for different models. We’ve seen `max_depth` and `min_samples_split` for decision trees. We’ve seen `n_neighbors` and `weights` for K-Nearest Neighbours and we’ve seen `gamma` and `C` for SVMs with RBF.We’ve even seen hyperparameters for our transformations like `strategy` for our `SimpleImputer()`. They are important and we’ve seen they can really help optimize your model, but we’ve also seen how difficult it can be to figure out how to set them. The problem with hyperparameters- We may have a lot of them. (deep learning!)- Picking reasonable hyperparameters is important -> it helps avoid underfit or overfit models. - Nobody knows exactly how to choose them.- May interact with each other in unexpected ways.- The best settings depend on the specific data/problem.- Can take a long time to execute. How to pick hyperparameters - Manual hyperparameter optimization (What we've done so far) - We may have some intuition about what might work. - It takes a lot of work. **OR...**- **Automated hyperparameter optimization** (hyperparameter tuning) - Reduce human effort. - Less prone to error. - Data-driven approaches may be effective. - It may be hard to incorporate intuition. - Overfitting on the validation set. Automated hyperparameter optimization- Exhaustive grid search: [`sklearn.model_selection.GridSearchCV`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html)- Randomized hyperparameter optimization: `sklearn.model_selection.RandomizedSearchCV` Let's Apply itLet's bring back the cities dataset we worked with in previous lectures. ###Code cities_df = pd.read_csv("data/canada_usa_cities.csv") train_df, test_df = train_test_split(cities_df, test_size=0.2, random_state=123) X_train, y_train = train_df.drop(columns=['country']), train_df['country'] X_test, y_test = test_df.drop(columns=['country']), test_df['country'] X_train.head() ###Output _____no_output_____ ###Markdown Exhaustive grid search - Trying ALL the optionsWe import `GridSearchCV` from `sklearn.model_selection` ###Code from sklearn.model_selection import GridSearchCV ###Output _____no_output_____ ###Markdown We need to first decide on our model and which hyperparameters we want to tune. We are going to use an SVC classifier. After that, we built a dictionary called `param_grid` and we specify the values we wish to look over for the hyperparameter. ###Code param_grid = {"gamma": [0.1, 1.0, 10, 100]} ###Output _____no_output_____ ###Markdown Then we initiate our model: ###Code svc = SVC() grid_search = GridSearchCV(svc, param_grid, verbose=2) ###Output _____no_output_____ ###Markdown Assigning `verbose` tells `GridSearchCV` to print some output while it's running. ###Code grid_search.fit(X_train, y_train) ###Output Fitting 5 folds for each of 4 candidates, totalling 20 fits [CV] gamma=0.1 ....................................................... [CV] ........................................ gamma=0.1, total= 0.0s [CV] gamma=0.1 ....................................................... [CV] ........................................ gamma=0.1, total= 0.0s [CV] gamma=0.1 ....................................................... [CV] ........................................ gamma=0.1, total= 0.0s [CV] gamma=0.1 ....................................................... [CV] ........................................ gamma=0.1, total= 0.0s [CV] gamma=0.1 ....................................................... [CV] ........................................ gamma=0.1, total= 0.0s [CV] gamma=1.0 ....................................................... [CV] ........................................ gamma=1.0, total= 0.0s [CV] gamma=1.0 ....................................................... [CV] ........................................ gamma=1.0, total= 0.0s [CV] gamma=1.0 ....................................................... [CV] ........................................ gamma=1.0, total= 0.0s [CV] gamma=1.0 ....................................................... [CV] ........................................ gamma=1.0, total= 0.0s [CV] gamma=1.0 ....................................................... [CV] ........................................ gamma=1.0, total= 0.0s [CV] gamma=10 ........................................................ [CV] ......................................... gamma=10, total= 0.0s [CV] gamma=10 ........................................................ [CV] ......................................... gamma=10, total= 0.0s [CV] gamma=10 ........................................................ [CV] ......................................... gamma=10, total= 0.0s [CV] gamma=10 ........................................................ [CV] ......................................... gamma=10, total= 0.0s [CV] gamma=10 ........................................................ [CV] ......................................... gamma=10, total= 0.0s [CV] gamma=100 ....................................................... [CV] ........................................ gamma=100, total= 0.0s [CV] gamma=100 ....................................................... [CV] ........................................ gamma=100, total= 0.0s [CV] gamma=100 ....................................................... [CV] ........................................ gamma=100, total= 0.0s [CV] gamma=100 ....................................................... [CV] ........................................ gamma=100, total= 0.0s [CV] gamma=100 ....................................................... [CV] ........................................ gamma=100, total= 0.0s ###Markdown The nice thing about this is we can do this for multiple hyperparameters simultaneously as well. ###Code param_grid = { "gamma": [0.1, 1.0, 10, 100], "C": [0.1, 1.0, 10, 100] } svc = SVC() grid_search = GridSearchCV(svc, param_grid, cv= 5, verbose=2, n_jobs=-1) grid_search.fit(X_train, y_train) ###Output Fitting 5 folds for each of 16 candidates, totalling 80 fits ###Markdown The grid in `GridSearchCV` stands for the way that it’s checking the hyperparameters. Since there 4 options for each, grid search is checking every value in each hyperparameter to one another. That means it’s checking 4 x 4 = 16 different combinations of hyperparameter values for the model. In `GridSearchCV` we can specify the number of folds of cross-validation with the argument `cv`. Since we are specifying `cv=5` that means that fit is called a total of 80 times (16 different combinations x 5 cross-validation folds). Something new we've added here is `n_jobs=-1`. This is a little more complex. Setting this to -1 helps make this process faster by running hyperparameter optimization in parallel instead of in a sequence. Implement with Pipelines ###Code pipe = Pipeline( steps=[ ("imputer", SimpleImputer(strategy="median")), ("scaler", StandardScaler()), ("clf", SVC())]) ###Output _____no_output_____ ###Markdown After specifying the steps in a pipeline, a user must specify a set of values for each hyperparameter in `param_grid` as we did before but this time we specify the name of the step followed by two underscores `__` and the name of the hyperparameter.This is because the pipeline would not know which hyperparameter goes with each step. Does `gamma` correspond to the hyperparameter in `SimpleImputer()` or `StandardScaler()`?This now gives the pipeline clear instructions on which hyperparameters correspond with which step. ###Code param_grid = { "clf__gamma": [0.1, 1.0, 10, 100], "clf__C": [0.1, 1.0, 10, 100] } ###Output _____no_output_____ ###Markdown Notice that we named our steps in the pipeline, so `clf` corresponds to the model initialization of the SVM classifier. If we used `make_pipeline()` remember that the function names the steps by default the lower case name of each transformation or model. ###Code pipe = make_pipeline(SimpleImputer(strategy="median"), StandardScaler(), SVC()) pipe param_grid = { "svc__gamma": [0.1, 1.0, 10, 100], "svc__C": [0.1, 1.0, 10, 100] } ###Output _____no_output_____ ###Markdown Now when we initiate `GridSearchCV`, we set the first argument to the pipeline name instead of the model name this time. ###Code grid_search = GridSearchCV(pipe, param_grid, cv=5, return_train_score=True, verbose=2, n_jobs=-1) grid_search.fit(X_train, y_train); ###Output Fitting 5 folds for each of 16 candidates, totalling 80 fits ###Markdown Looking a bit closer these are the steps being performed with `GridSearchCV`. ```for gamma in [0.1, 1.0, 10, 100]: for C in [0.1, 1.0, 10, 100]: for fold in folds: fit in training portion with the given C and gamma score on validation portion compute average score pick hyperparameters with the best score```In this case, we can see from the output that 80 executions are done, just like we calculated (4 x 4 x 5 = 80). Why a grid? If we fix `C` with a value of 1 and loop over the values of 1, 10 and 100 for `gamma`.This results in `100` having the best score with 0.82. Next, we fix `gamma` at `100` since that was what we found was the most optimal when `C` was equal to 1. When we loop over the values of 1, 10 and 100 for `C` we get the most optimal value to be 10. So naturally, we would pick the values `100` for `gamma` and `10` for `C`. HOWEVER - if we had performed every possible combination, we would have seen that the optimal values would have actually been `10` for both `gamma` and `C`. The same thing is shown if we did it the other way around, first fixing `gamma` at a value of 1 and then looping over all possible values of `C`. This time the most optimal combination is `gamma` equal to 1 and `C` equal to 100 which is again not the optimal value of 10 for each. This is why it is so important not to fix either of the hyperparameters since it won’t necessarily help you find the most optimal values. Now what?How do we know what the best hyperparameter values are after fitting?We can extract the best hyperparameter values with `.best_params_` and their corresponding score with `.best_score_`. ###Code grid_search.best_params_ grid_search.best_score_ ###Output _____no_output_____ ###Markdown We can extract the optimal classifier inside with `.best_estimator_`. ###Code best_model = grid_search.best_estimator_ ###Output _____no_output_____ ###Markdown This has already been fully fitted on with all the data and not just a portion from cross-validation so all we need to do is score! ###Code best_model.score(X_train, y_train) best_model.score(X_test, y_test) ###Output _____no_output_____ ###Markdown We can either save it as a new model and fit and score on this new one *or* we can use the `grid_search` object directly and it will by default score using the optimal model. These both give the same results. ###Code grid_search.score(X_train, y_train) grid_search.score(X_test, y_test) ###Output _____no_output_____ ###Markdown The same can be done for `.predict()` as well, either using the saved model or using the `grid_search` object directly. ###Code best_model.predict(X_test) grid_search.predict(X_test) ###Output _____no_output_____ ###Markdown Notice any problems? This seems pretty nice and obeys the golden rule however the new problem is the execution time. Think about how much time it would take if we had 5 hyperparameters each with 10 different values.That would mean we would be needing to call `cross_validate()` 100,000 times!Exhaustive search may become infeasible fairly quickly.**Enter randomized hyperparameter search!** Randomized hyperparameter optimization ###Code from sklearn.model_selection import RandomizedSearchCV param_grid = { "svc__gamma": [0.1, 1.0, 10, 100], "svc__C": [0.1, 1.0, 10, 100] } random_search = RandomizedSearchCV(pipe, param_grid, cv=5, verbose=2, n_jobs=-1, n_iter=10) random_search.fit(X_train, y_train); ###Output Fitting 5 folds for each of 10 candidates, totalling 50 fits ###Markdown Notice that we use the same arguments in `RandomizedSearchCV()` as in `GridSearchCV()` however with 1 new addition - `n_iter`. This argument gives us more control and lets us restrict how many candidates are searched over. `GridSearchCV()` conducts `cross_validate()` on every single possible combination of the hyperparameters specified in `param_grid`. Now we can change that and control that using `n_iter` which will pick a random subset containing the specified number of combinations.The last time when we used exhaustive grid search, we had 80 fits (4 x 4 x 5). This time we see only 50 fits (10 combinations instead of 16 and 5 folds)! Continuous values for hyperparameter tuning - optionalFor randomized grid search we can search over a range of continuous values instead of discrete values like in `GridSearchCV()`. We can specify a range of values instead of a list of values for each hyperparameter. ###Code import scipy param_grid = { "svc__C": scipy.stats.uniform(0, 100), "svc__gamma": scipy.stats.uniform(0, 100)} random_gs = RandomizedSearchCV(pipe, param_grid, n_jobs=-1, cv=10, return_train_score=True, n_iter=10) random_gs.fit(X_train, y_train); random_gs.best_params_ random_gs.best_score_ random_gs.score(X_test, y_test) ###Output _____no_output_____ ###Markdown **How differently does exhaustive and random search score?** ###Code grid_search.score(X_test, y_test) random_search.score(X_test, y_test) ###Output _____no_output_____ ###Markdown Here, (and often) they produce similar scores. The problem with hyperparameter tuning - overfitting the validation setSince we are repeating cross-validation over and over again, it’s not necessarily unseen data anymore.This may produce overly optimistic results. If our dataset is small and if our validation set is hit too many times, we suffer from **optimization bias** or **overfitting the validation set**. Example: overfitting the validation setAttribution: [Mark Scmidt](https://www.cs.ubc.ca/~schmidtm/)This exercise helps explain the concept of overfitting on the validation set.Consider a multiple-choice (a,b,c,d) "test" with 10 questions:- If you choose answers randomly, the expected grade is 25% (no bias).- If you fill out two tests randomly and pick the best, the expected grade is 33%. - overfitting ~8%.- If you take the best among 10 random tests, the expected grade is ~47%.- If you take the best among 100, the expected grade is ~62%.- If you take the best among 1000, the expected grade is ~73%. - You have so many "chances" that you expect to do well. **But on new questions, the "random choice" accuracy is still 25%.** ###Code # Code attributed to Rodolfo Lourenzutti number_tests = [1, 2, 10, 100, 1000] for ntests in number_tests: y = np.zeros(10000) for i in range(10000): y[i] = np.max(np.random.binomial(10.0, 0.25, ntests)) print( "The expected grade among the best of %d tests is : %0.2f" % (ntests, np.mean(y) / 10.0) ) ###Output The expected grade among the best of 1 tests is : 0.25 The expected grade among the best of 2 tests is : 0.33 The expected grade among the best of 10 tests is : 0.47 The expected grade among the best of 100 tests is : 0.62 The expected grade among the best of 1000 tests is : 0.74 ###Markdown If we instead used a 100-question test then: - Expected grade from best over 1 randomly-filled tests is 25%.- Expected grade from best over 2 randomly-filled tests is ~27%.- Expected grade from best over 10 randomly-filled tests is ~32%.- Expected grade from best over 100 randomly-filled tests is ~36%.- Expected grade from best over 1000 randomly-filled tests is ~40%. ###Code # Code attributed to Rodolfo Lourenzutti number_tests = [1, 2, 10, 100, 1000] for ntests in number_tests: y = np.zeros(10000) for i in range(10000): y[i] = np.max(np.random.binomial(100.0, 0.25, ntests)) print( "The expected grade among the best of %d tests is : %0.2f" % (ntests, np.mean(y) / 100.0) ) ###Output The expected grade among the best of 1 tests is : 0.25 The expected grade among the best of 2 tests is : 0.27 The expected grade among the best of 10 tests is : 0.32 The expected grade among the best of 100 tests is : 0.36 The expected grade among the best of 1000 tests is : 0.40 ###Markdown The optimization bias **grows with the number of things we try**. But, optimization bias **shrinks quickly with the number of examples**. But it’s still non-zero and growing if you over-use your validation set! Essentially our odds of doing well on a multiple-choice exam (if we are guessing) increases the more times we can repeat and randomly take the exam again. Because we have so many chances you’ll eventually do well and perhaps this is not representative of your knowledge (remember you are randomly guessing) The same occurs with selecting hyperparameters. The more hyperparameters values and combinations we try, the more likely we will randomly get a better scoring model by chance and not because the model represents the data well. This overfitting can be decreased somewhat by increasing the number of questions or in our case, the number of examples we have. TLDR: If your test score is lower than your validation score, it may be because did so much hyperparameter optimization that you got lucky and the bigger data set that you have, the better. Let's Practice1\. Which method will attempt to find the optimal hyperparameter for the data by searching every combination possible of hyperparameter values given? 2\. Which method gives you fine-grained control over the amount of time spent searching? 3\. If I want to search for the most optimal hyperparameter values among 3 different hyperparameters each with 3 different values how many trials of cross-validation would be needed? $x= [1,2,3]$ $y= [4,5,6]$ $z= [7,8,9]$ **True or False** 4\. A Larger `n_iter` will take longer but will search over more hyperparameter values. 5\. Automated hyperparameter optimization can only be used for multiple hyperparameters. ```{admonition} Solutions!:class: dropdown1. Exhaustive Grid Search (`GridSearchCV`)2. Randomized Grid Search (`RandomizedSearchCV`)3. $3 * 3 * 3 = 27$4. True5. False``` Let's Practice - Coding We are going to practice grid search using our basketball dataset that we have seen before. ###Code # Loading in the data bball_df = pd.read_csv('data/bball.csv') bball_df = bball_df[(bball_df['position'] =='G') | (bball_df['position'] =='F')] # Define X and y X = bball_df.loc[:, ['height', 'weight', 'salary']] y = bball_df['position'] # Split the dataset X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=7) bb_pipe = Pipeline( steps=[("imputer", SimpleImputer(strategy="median")), ("scaler", StandardScaler()), ("knn", KNeighborsClassifier())]) ###Output _____no_output_____
soln/test_units_off.ipynb
###Markdown Modeling and Simulation in PythonCopyright 2017 Allen DowneyLicense: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0) ###Code # Configure Jupyter so figures appear in the notebook %matplotlib inline # Configure Jupyter to display the assigned value after an assignment %config InteractiveShell.ast_node_interactivity='last_expr_or_assign' # import functions from the modsim.py module from modsim import * ###Output _____no_output_____ ###Markdown Low pass filter ###Code with units_off(): for i, name in enumerate(dir(UNITS)): unit = getattr(UNITS, name) try: res = 1*unit - 1 if res == 0: print(name, 1*unit - 1) except TypeError: pass if i > 10000: break with units_off(): print(2 * UNITS.farad - 1) with units_off(): print(2 * UNITS.volt - 1) with units_off(): print(2 * UNITS.newton - 1) mN = UNITS.gram * UNITS.meter / UNITS.second**2 with units_off(): print(2 * mN - 1) ###Output 1.0 dimensionless ###Markdown Now I'll create a `Params` object to contain the quantities we need. Using a Params object is convenient for grouping the system parameters in a way that's easy to read (and double-check). ###Code params = Params( R1 = 1e6, # ohm C1 = 1e-9, # farad A = 5, # volt f = 1000, # Hz ) ###Output _____no_output_____ ###Markdown Now we can pass the `Params` object `make_system` which computes some additional parameters and defines `init`.`make_system` uses the given radius to compute `area` and the given `v_term` to compute the drag coefficient `C_d`. ###Code def make_system(params): """Makes a System object for the given conditions. params: Params object returns: System object """ unpack(params) init = State(V_out = 0) omega = 2 * np.pi * f tau = R1 * C1 cutoff = 1 / R1 / C1 t_end = 3 / f return System(params, init=init, t_end=t_end, omega=omega, cutoff=cutoff) ###Output _____no_output_____ ###Markdown Let's make a `System` ###Code system = make_system(params) ###Output _____no_output_____ ###Markdown Here's the slope function, ###Code def slope_func(state, t, system): """Compute derivatives of the state. state: position, velocity t: time system: System object returns: derivatives of y and v """ V_out, = state unpack(system) V_in = A * np.cos(omega * t) V_R1 = V_in - V_out I_R1 = V_R1 / R1 I_C1 = I_R1 dV_out = I_C1 / C1 return dV_out ###Output _____no_output_____ ###Markdown As always, let's test the slope function with the initial conditions. ###Code slope_func(system.init, 0, system) ###Output _____no_output_____ ###Markdown And then run the simulation. ###Code ts = linspace(0, system.t_end, 301) results, details = run_ode_solver(system, slope_func, t_eval=ts) details ###Output The solver successfully reached the end of the integration interval. ###Markdown Here are the results. ###Code # results ###Output _____no_output_____ ###Markdown Here's the plot of position as a function of time. ###Code def plot_results(results): xs = results.V_out.index ys = results.V_out.values t_end = get_last_label(results) if t_end < 10: xs *= 1000 xlabel = 'Time (ms)' else: xlabel = 'Time (s)' plot(xs, ys) decorate(xlabel=xlabel, ylabel='$V_{out}$ (volt)', legend=False) plot_results(results) ###Output _____no_output_____ ###Markdown And velocity as a function of time: ###Code fs = [1, 10, 100, 1000, 10000, 100000] for i, f in enumerate(fs): system = make_system(Params(params, f=f)) ts = linspace(0, system.t_end, 301) results, details = run_ode_solver(system, slope_func, t_eval=ts) subplot(3, 2, i+1) plot_results(results) ###Output The solver successfully reached the end of the integration interval. The solver successfully reached the end of the integration interval. The solver successfully reached the end of the integration interval. The solver successfully reached the end of the integration interval. The solver successfully reached the end of the integration interval. The solver successfully reached the end of the integration interval.
Introduction to Data Science/Removing Values.ipynb
###Markdown Removing ValuesYou have seen:1. sklearn break when introducing missing values2. reasons for dropping missing valuesIt is time to make sure you are comfortable with the methods for dropping missing values in pandas. You can drop values by row or by column, and you can drop based on whether **any** value is missing in a particular row or column or **all** are values in a row or column are missing.A useful set of many resources in pandas is available [here](https://chrisalbon.com/). Specifically, Chris takes a close look at missing values [here](https://chrisalbon.com/python/data_wrangling/pandas_dropping_column_and_rows/). Another resource can be found [here](https://stackoverflow.com/questions/13413590/how-to-drop-rows-of-pandas-dataframe-whose-value-in-certain-columns-is-nan). ###Code import numpy as np import pandas as pd import RemovingValues as t import matplotlib.pyplot as plt %matplotlib inline small_dataset = pd.DataFrame({'col1': [1, 2, np.nan, np.nan, 5, 6], 'col2': [7, 8, np.nan, 10, 11, 12], 'col3': [np.nan, 14, np.nan, 16, 17, 18]}) small_dataset ###Output _____no_output_____ ###Markdown Question 1**1.** Drop any row with a missing value. ###Code all_drop = small_dataset.dropna()# Drop any row with a missing value #print result all_drop t.all_drop_test(all_drop) #test ###Output Nice job! That looks right! ###Markdown Question 2**2.** Drop only the row with all missing values. ###Code all_row = small_dataset.dropna(how='all')# Drop only rows with all missing values #print result all_row t.all_row_test(all_row) #test ###Output Nice job! That looks right! ###Markdown Question 3**3.** Drop only the rows with missing values in column 3. ###Code only3_drop =small_dataset.dropna(how='any',subset=['col3']) # Drop only rows with missing values in column 3 #print result only3_drop t.only3_drop_test(only3_drop) #test ###Output Nice job! That looks right! ###Markdown Question 4**4.** Drop only the rows with missing values in column 3 or column 1. ###Code only3or1_drop = small_dataset.dropna(how='any',subset=['col1','col3'])# Drop rows with missing values in column 1 or column 3 #print result only3or1_drop t.only3or1_drop_test(only3or1_drop) #test ###Output Nice job! That looks right!
mars_demo.ipynb
###Markdown First, we import the necessary TOM Toolkit classes: ###Code from tom_alerts.brokers.mars import MARSQueryForm, MARSBroker from tom_alerts.models import BrokerQuery ###Output _____no_output_____ ###Markdown We can instantiate the `MARSQueryForm` class with the desired parameters, and calling `is_valid()` ensures that our datatypes are consistent with what's specified in the form class. As you can see, we end up with a dictionary of our query parameters. ###Code mars_form = MARSQueryForm({ 'time__since': 864000, 'objectcone': 'm101, 5', 'rb__gte': 0.95 }) mars_form.is_valid() print(mars_form.cleaned_data) ###Output _____no_output_____ ###Markdown Now, we can go ahead and run the query on MARS. With our `MARSQueryForm` instance, it's as simple as instantiating the `MARSBroker` and calling `fetch_alerts()` on the cleaned data. ###Code mars = MARSBroker() alerts = mars.fetch_alerts(mars_form.cleaned_data) print(sum(1 for _ in alerts)) ###Output _____no_output_____ ###Markdown With the TOM Toolkit, we can also store our query in our TOM as a `BrokerQuery` object with just a few parameters--a name for the query, the name of the `MARSBroker`, and the query parameters. ###Code query = BrokerQuery.objects.create( name='Test Query', broker=mars.name, parameters=mars_form.serialize_parameters() ) ###Output _____no_output_____ ###Markdown It's now trivial to simply get the query out of the database, and run it again on MARS: ###Code query = BrokerQuery.objects.filter(name='Test Query', broker=mars.name)[0] alerts = mars.fetch_alerts(query.parameters_as_dict) print(sum(1 for _ in alerts)) ###Output _____no_output_____ ###Markdown With the available alerts from a query, we can convert each of them to a TOM `Target` object and store them in our local TOM DB. ###Code alerts = mars.fetch_alerts(query.parameters_as_dict) for alert in alerts: mars.to_target(alert) ###Output _____no_output_____ ###Markdown And as expected, we now have as many `Target` objects in our TOM as we had alerts, which can be used for later reference. ###Code from tom_targets.models import Target print(len(Target.objects.all())) ###Output _____no_output_____
old/analysis.ipynb
###Markdown Flip Flop Analysis ###Code def rhc(problem_fit, problem_name, max_iters= 100): start = time.time() fitness_score = mlrose.random_hill_climb(problem_fit, max_attempts=100, max_iters= max_iters, restarts=10, random_state = rs)[1] return [max_iters, "random_hill_climb", problem_name,fitness_score, time.time()-start] def sa(problem_fit, problem_name, max_iters= 100): start = time.time() fitness_score = mlrose.simulated_annealing(problem_fit, max_attempts=100, max_iters= max_iters, random_state = rs)[1] return [max_iters, "simulated_annealing", problem_name,fitness_score, time.time()-start] def ga(problem_fit, problem_name, max_iters= 100): start = time.time() fitness_score = mlrose.genetic_alg(problem_fit, max_attempts=100, max_iters= max_iters, pop_size= 200, mutation_prob=0.1, random_state = rs)[1] return [max_iters, "genetic_alg", problem_name,fitness_score, time.time()-start] def mimic(problem_fit, problem_name, max_iters= 100): start = time.time() fitness_score = mlrose_old.mimic(problem_fit, pop_size=200, keep_pct=0.2, max_attempts=10, max_iters=max_iters, curve=False, random_state=rs, fast_mimic=True)[1] return [max_iters, "mimic", problem_name,fitness_score, time.time()-start] fitness = mlrose.FlipFlop() results = [] edges = [(0, 1), (0, 2), (0, 4), (1, 3), (2, 0), (2, 3), (3, 4)] problems_name = ["Flip Flop", "One Max", "Max-K Color"] fitness_functions = [mlrose.FlipFlop(), mlrose.OneMax(), mlrose.MaxKColor(edges)] problems = [mlrose.DiscreteOpt(length = 100, fitness_fn = fitness_function, maximize=True, max_val = 2) for fitness_function in fitness_functions] for j in range(len(problems)): for i in range(0, 100, 10): results.append(rhc(problems[j], problems_name[j], max_iters= i)) results.append(sa(problems[j], problems_name[j], max_iters= i)) results.append(ga(problems[j], problems_name[j], max_iters= i)) results.append(mimic(problems[j], problems_name[j], max_iters= i)) print(i, end=" ") df = pd.DataFrame(results, columns=["Iteration", "Algorithm", "Problem","Fitness", "Time"]) sns.lineplot(data=df[df['Problem']==problems_name[2]], x="Iteration", y="Fitness", hue="Algorithm") sns.lineplot(data=df[df['Problem']==problems_name[1]], x="Iteration", y="Time", hue="Algorithm") df.to_csv("problems.csv", index=False) ###Output _____no_output_____
Muriel/TidalEllipses/JuandeFucaEllipses.ipynb
###Markdown This notebook will be used to look at and understand the model output currents through Juan de Fuca with the use of ellipses ###Code import os import datetime import matplotlib.pylab as plt from matplotlib.patches import Ellipse import numpy as np from IPython.display import display, Math, Latex import csv import pandas as pd import subprocess as sp from subprocess import Popen, PIPE import seaborn as sns import netCDF4 as nc from salishsea_tools import viz_tools %matplotlib inline font = {'family' : 'Arial', 'weight' : 'normal', 'size' : 20} axisfont = {'family' : 'Arial', 'weight' : 'light', 'size' : 16} ###Output _____no_output_____ ###Markdown Which grid poitns are we interested in? ###Code thalweg = np.loadtxt('/data/dlatorne/MEOPAR/tools/bathymetry/thalweg_working.txt', dtype=int, unpack=True) grid = '/data/nsoontie/MEOPAR/NEMO-forcing/grid/bathy_meter_SalishSea2.nc' fB = nc.Dataset(grid) bathy = fB.variables['Bathymetry'][:] lons = fB.variables['nav_lon'][:] lats = fB.variables['nav_lat'][:] ###Output _____no_output_____ ###Markdown Forman locations ###Code #Juan de Fuca West iJW=8 jJW=408 #Juan de Fuca East iJE = 151 jJE = 275 cmap = plt.get_cmap('winter_r') cmap.set_bad('burlywood') fig, ax = plt.subplots(1,1) plt.pcolormesh(bathy, cmap=cmap) plt.plot(iJW,jJW,'ro') plt.plot(iJE,jJE,'ro') plt.plot(thalweg[1,0:350:25],thalweg[0,0:350:25], 'yo') plt.axis([0,220,200,450]) ax.set_title('Juan de Fuca', **font) sns.set_style('darkgrid') lats[jJW,iJW], lons[jJW, iJW], lats[jJE,iJE], lons[jJE, iJE] phi = 29 fig, ax = plt.subplots(1,1, figsize=(8,8)) viz_tools.set_aspect(ax) ax.contourf(lons,lats,bathy.data,cmap=plt.get_cmap('winter_r')) ax.plot(lons[jJW, iJW], lats[jJW, iJW],'ro') ax.plot(lons[jJE, iJE], lats[jJE, iJE],'ro') ax.plot(lons[thalweg[0,0:350:25],thalweg[1,0:350:25]], lats[thalweg[0,0:350:25],thalweg[1,0:350:25]], 'mo') viz_tools.plot_land_mask(ax, fB, coords='map') viz_tools.plot_coastline(ax, fB, coords='map', isobath=5) ax.set_xlim([lons[jJW, iJW]-0.12, lons[jJE, iJE]+0.3]) ax.set_ylim([lats[jJE, iJE]-0.18, lats[jJW, iJW]+0.18]) ax.set_ylabel('Latitude (degrees N)', **axisfont) ax.set_xlabel('Longitude (degrees W)', **axisfont) ax.set_title('Juan de Fuca Strait', **font) ax.tick_params(axis='both', which='major', labelsize=14) ###Output _____no_output_____ ###Markdown Load parameters ###Code to = datetime.datetime(2014,11,26) tf = datetime.datetime(2015, 7, 13) t_o = to.strftime('%d%b%y').lower() t_f = tf.strftime('%d%b%y').lower() loc = np.arange(0,350,25) count = np.arange(0,len(loc)) parj = np.zeros((len(loc),39,5)) parj_av = np.zeros((len(loc),8)) cols=np.arange(0,5) i = np.zeros((14)) j = np.zeros((14)) for k,l in zip(loc,count): i = thalweg[1,loc] j = thalweg[0,loc] runname1 = '{}_{}_JdF_{}'.format(t_o, t_f, k) runname2 = '{}_{}_JdF_{}_depav(20-400)'.format(t_o, t_f, k) df1 = pd.read_csv('/ocean/mdunn/MEOPAR/analysis/Muriel/TidalEllipseData/JuandeFuca/'+runname1+'.csv', usecols=cols) df2 = pd.read_csv('/ocean/mdunn/MEOPAR/analysis/Muriel/TidalEllipseData/JuandeFuca/'+runname2+'.csv', index_col=False) parj[l,:,:] = df1.as_matrix() parj_av[l,:] = df2.as_matrix() ###Output _____no_output_____ ###Markdown Plot model ellipses ###Code # Set up the figure and axes fig, (axl, axcb) = plt.subplots(1, 2, figsize=(20, 10)) land_colour = 'burlywood' axl.set_axis_bgcolor(land_colour) axl.set_position((0.125, 0.125, 0.6, 0.775)) axcb.set_position((0.73, 0.125, 0.02, 0.775)) smin, smax, dels = 29, 37, 0.5 cmap = plt.get_cmap('Blues') cmap.set_bad(land_colour) tracers = nc.Dataset('/ocean/dlatorne/MEOPAR/SalishSea/results/spin-up/18sep27sep/SalishSea_1d_20030918_20030927_grid_T.nc') sal = tracers.variables['vosaline'] npsal = sal[:] zlevels = tracers.variables['deptht'] sal_0 = npsal[-1, :, thalweg[0], thalweg[1]] sal_tzyx = np.ma.masked_values(sal_0, 0) x, z = np.meshgrid(np.arange(thalweg.shape[1]), zlevels) mesh = axl.pcolormesh(x, z, sal_tzyx.T, cmap=cmap, vmin=smin, vmax=smax) cbar = plt.colorbar(mesh, cax=axcb) cbar.set_label('Practical Salinity') scale = 15 for c, ind in zip(count, loc): for dep, value in zip(np.arange(0,39), parj[0,:,0]): if parj[c,dep,2] > 0: thec = 'b' else: thec = 'r' ellsc = Ellipse(xy=(ind, value), width=scale*parj[c,dep,1], height=scale*parj[c,dep,2], angle=parj[c, dep,3], color=thec) axl.add_artist(ellsc) ellsc.set_facecolor(thec) axl.set_ylim(-2,400) axl.invert_yaxis() axl.set_xlim(-5,355) axl.set_title('Tidal ellipses in Juan de Fuca') axl.set_ylabel('Depth (m)') axl.set_xlabel('Distance along the thalweg') ###Output _____no_output_____ ###Markdown Get Foreman values ###Code i = thalweg[1,loc] j = thalweg[0,loc] lon = lons[j,i] lat = lats[j,i] parjf = np.zeros((14,8)) os.chdir(r'/ocean/mdunn/Tides/') for k in count: p = Popen(["matlab","-nodesktop","-nodisplay","-r", "findparam(%s,%s)" % (lon[k], lat[k])], stdout=PIPE, stderr=PIPE) output, err = p.communicate() parjf[k] = np.loadtxt('output.txt') ###Output _____no_output_____ ###Markdown Compare modelsMaps ###Code csv1 = pd.read_csv('/ocean/mdunn/MEOPAR/analysis/Muriel/param_mat.csv', index_col=False) param_mat = csv1.as_matrix() csv2 = pd.read_csv('/ocean/mdunn/MEOPAR/analysis/Muriel/params.csv', index_col=False) params = csv2.as_matrix() phi=29 fig = plt.figure(figsize=(20,10)) k = np.zeros((898,398)); m = np.zeros((898,398)) ax = fig.add_subplot(111) viz_tools.set_aspect(ax) ex = 2 scale = 25 imin = min(i) imax = max(i) jmin = min(j) jmax = 425 for q in np.arange(jmin-ex,jmax+ex): for l in np.arange(imin-ex,imax+ex): k[q,l] = q*np.cos(phi*np.pi/180.)+l*np.sin(phi*np.pi/180.) m[q,l] = -q*np.sin(phi*np.pi/180.)+l*np.cos(phi*np.pi/180.) for index, x, y in zip(count, i ,j): if parj_av[index,1] > 0: thec = 'b' else: thec = 'r' ellsc = Ellipse(xy=(m[y,x],k[y,x]), width=scale*parj_av[index,0], height=scale*parj_av[index,1], angle=parj_av[index,2], color=thec) ax.add_artist(ellsc) ellsc.set_facecolor(thec) if parjf[index,1] > 0: thec = [0,0,0.6, 0.3] else: thec = [0.6,0,0, 0.3] ellsc = Ellipse(xy=(m[y,x],k[y,x]), width=scale*parjf[index,0], height=scale*parjf[index,1], angle=parjf[index,2], color=thec) ax.add_artist(ellsc) ellsc.set_facecolor(thec) #Plot the Haro Strait current meter location ax.plot(m[jJE,iJE], k[jJE,iJE],'g^', markersize=12) ax.plot(m[jJW,iJW], k[jJW,iJW],'g^', markersize=12) #Values from EllipsesComparisonswithForemanModel notebook [Salishparam,Foremanparam] for ind, x, y in zip([0,1], [iJW,iJE], [jJW,jJE]): if params[ind,1] > 0: thec = 'b' else: thec = 'r' ellsc = Ellipse(xy=(m[y,x],k[y,x]), width=scale*params[0,0], height=scale*params[0,1], angle=params[0,2], color=thec) ax.add_artist(ellsc) ellsc.set_facecolor(thec) if param_mat[ind,1] > 0: thec = [0,0,0.6, 0.3] else: thec = [0.6,0,0, 0.3] ellsc = Ellipse(xy=(m[y,x],k[y,x]), width=scale*param_mat[ind,0], height=scale*param_mat[ind,1], angle=param_mat[ind,2], color=thec) ax.add_artist(ellsc) ellsc.set_facecolor(thec) # y-axis in k, but labelled in latitude ax.set_ylim(280,395) slope = (lats[jmax,imax]-lats[jmin,imin])/(k[jmax,imax]-k[jmin,imin]) mylist = (k[jmax,imax]+(np.arange(47.13, 47.8, 0.1)- lats[jmin,imin])/slope).tolist() labels = ['48.3','48.35', '48.4', '48.45', '48.5', '48.55'] ax.set_yticks(mylist) ax.set_yticklabels(labels) ax.set_ylabel('Latitude (degrees N)') #x-axis in m, but labelled in longitude ax.set_xlim(0,60) slope = (lons[jmax,imax]-lons[jmin,imin])/(m[jmax,imax]-m[jmin,imin]) mylist = (m[jmax,imax]+(np.arange(-125.01,-124.5,0.5)- lons[jmin,imin])/slope).tolist() labels = ['124.75','124.25',] ax.set_xticks(mylist) ax.set_xticklabels(labels) ax.set_xlabel('Longitude (degrees W)') # land, and 5 m contour contour_interval = [-0.01, 0.01] ax.contourf(m[jmin-ex:jmax+ex,imin-ex:imax+ex],k[jmin-ex:jmax+ex,imin-ex:imax+ex], bathy.data[jmin-ex:jmax+ex,imin-ex:imax+ex],contour_interval,colors='black') ax.contour(m[jmin-ex:jmax+ex,imin-ex:imax+ex],k[jmin-ex:jmax+ex,imin-ex:imax+ex], bathy.data[jmin-ex:jmax+ex,imin-ex:imax+ex],[5],colors='black') ax.set_title('Depth averaged M2 tidal ellipses through Juan de Fuca') print "red is clockwise" ###Output red is clockwise ###Markdown Line Graphs ###Code sns.set_style('darkgrid') parameter = ['major-axis', 'minor-axis', 'inclination'] unit = ['m/s' ,'m/s','deg'] JWobs = 0.39 JEobs = 0.73 JWpha = [294, 196] JEpha = [304, 21] JWlon = 124.713 JElon = 123.530 model = [0.39, 0.51, 0.79, 0.93] fig, (ax0, ax1, ax2, ax3, ax4)= plt.subplots(5,1,figsize=(20,20), sharex=True) for ax , k, par, u in zip([ax0, ax1, ax2], [0,1,2], parameter, unit): ax.plot(-lon[:],parj_av[:,k], '-', label='Salish Sea Model') ax.plot(-lon[:], parjf[:,k], '-', label='Foreman Model') ax.set_title('Comparing the {}.'.format(par), **font) ax.set_ylabel('{} ({})'.format(par,u), **axisfont) ax.set_xlim([124.9, 123.25]) ax.tick_params(axis='both', which='major', labelsize=14) ax0.plot(JWlon, JWobs, 'm*', markersize = 18) ax0.plot(JElon, JEobs, 'm*', markersize = 18, label='Observations by CM') ax0.plot(JWlon, params[0,0], 'bo') ax0.plot(JElon, params[1,0], 'bo', label = 'Salish Sea Model at CM') ax0.plot(JWlon, param_mat[0,0], 'go') ax0.plot(JElon, param_mat[1,0], 'go', label = 'Foreman Model at CM') ax0.legend(loc=0, fontsize=16) for ax, ind ,k in zip([ax3,ax4], [0,1],[3,7]): ax.plot(-lon[:], parj_av[:,k], '-', label='Salish Sea Model') ax.plot(-lon[:],parjf[:,k],'-', label='Foreman Model') ax.plot(JWlon, JWpha[ind], 'm*', markersize = 18) ax.plot(JElon, JEpha[ind], 'm*', markersize = 18, label='Observations by CM') ax.plot(JWlon, params[0,k], 'bo', label = 'Salish Sea Model at CM') ax.plot(JWlon, param_mat[0,k], 'go', label = 'Foreman Model at CM') ax.set_ylabel('Phase (deg)', **axisfont) ax.plot(JElon, params[1,k], 'bo') ax.plot(JElon, param_mat[1,k], 'go') ax.invert_xaxis() ax.set_ylim([0,370]) ax.tick_params(axis='both', which='major', labelsize=14) ax3.set_title('Comparing the M2 Phase', **font) ax4.set_title('Comparing the K1 Phase', **font) ax4.set_xlabel('Longitude', **axisfont) ###Output _____no_output_____
course_material/03_Regular_Expressions/03_Regex_lab_solution.ipynb
###Markdown Laboratory 03 1. Write regular expressions that match any number of digits greedily. Do not match anything else. ###Code import re patt = re.compile(r'^\d+$') assert(patt.match("123")) assert(patt.match("123ab") is None) ###Output _____no_output_____ ###Markdown 2. Match Hungarian phone numbers. The numbers do not include spaces or hyphens. ###Code patt = re.compile(r'^\+36\d{9}$') assert(patt.match("+36301234567")) assert(patt.match("+37000000000") is None) assert(patt.match("+363012345678") is None) ###Output _____no_output_____ ###Markdown 3. Match Hungarian phone numbers that may be grouped in the following ways. ###Code patt = re.compile(r'^\+36[ -]?\d{2}[ -]?\d{3}[ -]?\d{4}$') assert(patt.match("+36301234567")) assert(patt.match("+37000000000") is None) assert(patt.match("+363012345678") is None) assert(patt.match("+36 30 123 4566")) assert(patt.match("+36-30-123-4566")) assert(patt.match("+36-30-123-45667") is None) ###Output _____no_output_____ ###Markdown 4. Match any floating point numbers. Do not match invalid numbers such as 0.34.1 ###Code patt = re.compile(r'^\d+(\.\d+)?$') assert(patt.match("1.9")) assert(patt.match("1.9.2") is None) ###Output _____no_output_____ ###Markdown 5. Match email addresses. ###Code patt = re.compile(r'^[a-zA-Z._][a-zA-Z0-9._]*@[a-zA-Z0-9._]+\.[a-z]{2,3}$') assert(patt.match("[email protected]")) assert(patt.match("abc") is None) assert(patt.match("abc@[email protected]") is None) ###Output _____no_output_____ ###Markdown 6. Match any number in hexadecimal format. Hexadecimal numbers are prefixed with 0x. ###Code patt = re.compile(r'^0x[a-fA-F0-9]+$') assert(patt.match("0xa")) assert(patt.match("0x16FA")) assert(patt.match("16FA") is None) assert(patt.match("0x16FG") is None) ###Output _____no_output_____ ###Markdown 7. Remove everything between parentheses. Be careful about the next pair of parantheses. ###Code patt = re.compile(r'\([^)]*\)') assert(patt.sub("", "(a)bc") == "bc") assert(patt.sub("", "abc") == "abc") assert(patt.sub("", "a (bc) de (12)") == "a de ") ###Output _____no_output_____ ###Markdown 8. Create a simple word tokenizer using regular expressions. What patterns should you split on? Add more tests. ###Code patt = re.compile(r'[\s.]+') assert(patt.split("simple sentence.") == ["simple", "sentence", ""]) assert(patt.split("multiple \t whitespaces") == ["multiple", "whitespaces"]) ###Output _____no_output_____
stochastic_model/Fig_S30_stochastic_model.ipynb
###Markdown Robust nucleation control via crisscross polymerization of highly coordinated DNA slatsDionis Minev, Christopher M. Wintersinger, Anastasia Ershova, William M. Shih Stochastic model simulations for reproducing results used in figure 3G and S30v6.1 at experimental conditions of 40°C, 20 mM MgCl2, and 1 µM each slat, considering only seed-initiated assembly Import dependencies and data ###Code import matplotlib.pyplot as plt %matplotlib inline import numpy as np import pandas as pd import seaborn as sns from scipy.stats import sem from scipy.stats import ks_2samp from scipy.optimize import minimize plt.rcParams['axes.grid'] = True plt.rcParams['grid.linestyle'] = '--' plt.rcParams['font.sans-serif'] = 'Arial' plt.rcParams['lines.linewidth'] = 5 plt.rcParams['lines.markersize'] = 9 plt.rcParams['axes.labelsize'] = 20 plt.rcParams['axes.labelweight'] = 'bold' plt.rcParams['xtick.labelsize'] = 20 plt.rcParams['ytick.labelsize'] = 20 plt.rcParams['figure.figsize'] = [10,8] plt.rcParams['legend.fontsize'] = 18 plt.rcParams['legend.title_fontsize'] = 18 plt.rcParams['axes.titlesize'] = 20 plt.rcParams['axes.titleweight'] = 'bold' plt.rcParams['figure.titlesize'] = 20 plt.rcParams['figure.titleweight'] = 'bold' sns.set_palette('colorblind') filename = 'TEM_measurements_v6_1_40C_20mM_MgCl2_1uM_slat' df = pd.read_csv(f'./data/{filename}.csv') t = [60, 120, 180, 480, 1500] ###Output _____no_output_____ ###Markdown Fit model to data ###Code random_seed = 42 seed_num = 150 mean_lengths = [df.mean()[i]/1000 for i in range(len(t))] # in micrometres sem_lengths = [df.sem()[i]/1000 for i in range(len(t))] # in micrometres filament_counts = [len(df.iloc[:,i].dropna()) for i in range(len(t))] p_term_init = 0.0001 growth_rate_init = 2.5 p_stall_init = 0.05 def run_fitting(variables): timesteps = t[:] # use all data for fitting # scipy.optimize.minimize uses a step size of 5% - too small for our purposes # therefore here I convert a 5% change into a 50% change p_term,growth_rate,p_stall = variables p_term = p_term_init-20*(p_term_init-p_term) growth_rate = growth_rate_init-20*(growth_rate_init-growth_rate) p_stall = p_stall_init-20*(p_stall_init-p_stall) # avoid negative values due to conversion above if p_term < 0: p_term = 0 if growth_rate < 0: growth_rate = 0 if p_stall < 0: p_stall = 0 print(p_term,growth_rate,p_stall) total_filaments = [] for i,step in enumerate(t): # run simulation for all incubation times if i == 0: # initialise simulation for first timestep np.random.seed(random_seed) term_filaments = np.array([]) growing_filaments = np.full(seed_num,0) stall_filaments = np.array([]) prev_step = 0 # perform reaction for length of incubation for ts in np.arange(prev_step,step): recover_inds = [] for j,filament in enumerate(stall_filaments): if np.random.choice([0,1],p=[p_stall,1-p_stall]) == 0: # recover growth of some stalled filaments recover_inds.append(j) growing_filaments = np.append(growing_filaments,stall_filaments[j]) stall_filaments = np.delete(stall_filaments,recover_inds) # grow all filaments growing_filaments = growing_filaments + np.full(len(growing_filaments),growth_rate) term_inds = [] for j,filament in enumerate(growing_filaments): if np.random.choice([0,1],p=[p_term,1-p_term]) == 0: # terminate growth of filaments term_inds.append(j) term_filaments = np.append(term_filaments,growing_filaments[j]) growing_filaments = np.delete(growing_filaments,term_inds) term_inds = [] for j,filament in enumerate(stall_filaments): if np.random.choice([0,1],p=[p_term,1-p_term]) == 0: # terminate growth of stalled filaments term_inds.append(j) term_filaments = np.append(term_filaments,stall_filaments[j]) stall_filaments = np.delete(stall_filaments,term_inds) stall_inds = [] for j,filament in enumerate(growing_filaments): if np.random.choice([0,1],p=[p_stall,1-p_stall]) == 0: # pause growth of filaments stall_inds.append(j) stall_filaments = np.append(stall_filaments,growing_filaments[j]) growing_filaments = np.delete(growing_filaments,stall_inds) total_filaments.append(np.concatenate((growing_filaments,term_filaments,stall_filaments),axis=None)) prev_step = step ks = [] for i,step in enumerate(timesteps): data = df.iloc[:,i].dropna() model = total_filaments[i] ks.append(ks_2samp(model,data)[0]) return np.mean(ks) minim_results = minimize(run_fitting,(p_term_init,growth_rate_init,p_stall_init), method='Nelder-Mead',options={'fatol':0.001}) # extract parameters from minimisation and rescale p_term,growth_rate,p_stall = minim_results.x p_term = p_term_init-20*(p_term_init-p_term) growth_rate = growth_rate_init-20*(growth_rate_init-growth_rate) p_stall = p_stall_init-20*(p_stall_init-p_stall) ###Output _____no_output_____ ###Markdown Run model for in-between timesteps ###Code def run_model(t,p_term,growth_rate,p_stall,tem=True): total_filaments = [] for i,step in enumerate(t): # run simulation for all incubation times if i == 0: # initialise simulation for first timestep np.random.seed(random_seed) term_filaments = np.array([]) growing_filaments = np.full(seed_num,0) stall_filaments = np.array([]) prev_step = 0 # perform reaction for length of incubation for ts in np.arange(prev_step,step): recover_inds = [] for j,filament in enumerate(stall_filaments): if np.random.choice([0,1],p=[p_stall,1-p_stall]) == 0: # recover growth of some stalled filaments recover_inds.append(j) growing_filaments = np.append(growing_filaments,stall_filaments[j]) stall_filaments = np.delete(stall_filaments,recover_inds) # grow all filaments growing_filaments = growing_filaments + np.full(len(growing_filaments),growth_rate) term_inds = [] for j,filament in enumerate(growing_filaments): if np.random.choice([0,1],p=[p_term,1-p_term]) == 0: # terminate growth of filaments term_inds.append(j) term_filaments = np.append(term_filaments,growing_filaments[j]) growing_filaments = np.delete(growing_filaments,term_inds) term_inds = [] for j,filament in enumerate(stall_filaments): if np.random.choice([0,1],p=[p_term,1-p_term]) == 0: # terminate growth of stalled filaments term_inds.append(j) term_filaments = np.append(term_filaments,stall_filaments[j]) stall_filaments = np.delete(stall_filaments,term_inds) stall_inds = [] for j,filament in enumerate(growing_filaments): if np.random.choice([0,1],p=[p_stall,1-p_stall]) == 0: # pause growth of filaments stall_inds.append(j) stall_filaments = np.append(stall_filaments,growing_filaments[j]) growing_filaments = np.delete(growing_filaments,stall_inds) total_filaments.append(np.concatenate((growing_filaments,term_filaments,stall_filaments),axis=None)) prev_step = step if tem == True: ks = [] for i,step in enumerate(t): data = df.iloc[:,i].dropna() model = total_filaments[i] ks.append(ks_2samp(model,data)) return total_filaments,ks else: return total_filaments filaments,ks = run_model(t,p_term,growth_rate,p_stall) ###Output _____no_output_____ ###Markdown Evaulate fit by comparing CDFs via KS statistic ###Code ks plt.figure(figsize=[13,20]) for i,step in enumerate(t): plt.subplot(f'32{i+1}') data = df.iloc[:,i].dropna() model = filaments[i] cdf_data = 1. * np.arange(len(data))/(len(data)-1) cdf_model = 1. * np.arange(len(model))/(len(model)-1) plt.plot(np.sort(data),cdf_data,'o',label='Data') plt.plot(np.sort(model),cdf_model,'o',label='Model') plt.ylabel('CDF') plt.xlabel('Filament length (nm)') plt.title(f'{step} minutes, KS statistic: {np.round(ks[i][0],6)}',fontsize=15) plt.legend(loc=2) plt.subplots_adjust(hspace=0.3,wspace=0.3) plt.suptitle(filename) extended_times = np.linspace(0,2000,251) extended_filaments = run_model(extended_times,p_term,growth_rate,p_stall,tem=False) p_term growth_rate p_stall ###Output _____no_output_____ ###Markdown Generate plots for figure S30 ###Code bins = [np.linspace(0,max(df.iloc[:,-1])/1000,65),np.linspace(0,max(df.iloc[:,-1])/1000,65), np.linspace(0,max(df.iloc[:,-1])/1000,65),np.linspace(0,max(df.iloc[:,-1])/1000,35), np.linspace(0,max(df.iloc[:,-1])/1000,20)] plt.figure(figsize=[13,13]) plt.subplot(221) model_means = [np.mean(extended_filaments[i])/1000 for i in range(len(extended_times))] model_sems = [sem(extended_filaments[i])/1000 for i in range(len(extended_times))] plt.plot(extended_times/60,model_means,'-',label='Model') plt.fill_between(extended_times/60,np.asarray(model_means)-np.asarray(model_sems), np.array(model_means)+np.array(model_sems),alpha=0.4) plt.errorbar(np.array(t)/60,mean_lengths,yerr=sem_lengths,fmt='o',label='Data',zorder=3) plt.legend() plt.ylabel('Mean length (μm)') plt.xlabel('Assembly time (hours)') plt.annotate('a',xy=(-0.2,1),xycoords='axes fraction',fontsize=35,fontweight='bold',annotation_clip=False) plt.subplot(222) for i,step in enumerate(t): sns.kdeplot(np.array(df.iloc[:,i].dropna())/1000,kernel='gau',ls='-',alpha=1,label=int(step/60)) plt.legend(title='Incubation time (hours)') plt.gca().set_prop_cycle(None) for i,step in enumerate(t): sns.kdeplot(np.array(filaments[i])/1000,kernel='gau',ls='--',alpha=0.6) plt.xlabel('Filament length (μm)') plt.ylabel('Relative count') plt.xlim([0,None]) plt.annotate('b',xy=(-0.2,1),xycoords='axes fraction',fontsize=35,fontweight='bold',annotation_clip=False) plt.subplot(223) for i,step in enumerate(t): sns.distplot(np.array(filaments[i])/1000,kde=False,norm_hist=True,label=int(step/60),hist_kws=dict(alpha=0.65), bins=bins[i]) plt.legend(title='Incubation time (hours)') plt.xlabel('Filament length (μm)') plt.ylabel('Relative count') plt.ylim([0,8.3]) plt.xlim([0,max(df.iloc[:,-1])/1000]) plt.title('Model') plt.annotate('c',xy=(-0.2,1),xycoords='axes fraction',fontsize=35,fontweight='bold',annotation_clip=False) plt.subplot(224) for i,step in enumerate(t): sns.distplot(np.array(df.iloc[:,i].dropna())/1000,kde=False,norm_hist=True,label=int(step/60), hist_kws=dict(alpha=0.65),bins=bins[i]) plt.legend(title='Incubation time (hours)') plt.xlabel('Filament length (μm)') plt.ylabel('Relative count') plt.ylim([0,8.3]) plt.xlim([0,max(df.iloc[:,-1])/1000]) plt.title('Data') plt.annotate('d',xy=(-0.2,1),xycoords='axes fraction',fontsize=35,fontweight='bold',annotation_clip=False) plt.subplots_adjust(hspace=0.3,wspace=0.3) ###Output _____no_output_____
Tabular Playground Series - Oct 2021/other/lightgbm-feat-sel-all.ipynb
###Markdown IMPORT DEL CLASS LIST ###Code possible_del_class = [] for featute_sel in PATH_FEATURE: with open(os.path.join(featute_sel, 'feature_dic.pkl'), 'rb') as file: feature_dic = pickle.load(file) possible_del_class += [x for x in ALL_FEATURE_LIST if x not in feature_dic['feature']] possible_del_class = list(sorted(set(possible_del_class))) #CONSTANT FEATURE = feature_dic['feature'] CAT_COL = feature_dic['categorical'] NUMERIC_COL = feature_dic['numerical'] FOLD_LIST = list(range(N_FOLD)) gc.collect() ###Output _____no_output_____ ###Markdown FEATURE SELECTION PHASEIterate over POSSIBLE_DEL_* list and take out 1 feature at time ###Code %%time del_list = feat_selection_pipeline(data = train, feature_list = FEATURE, possible_del_list = possible_del_class, params = PARAMS_LGB_BASE, cat_col = CAT_COL, target_name = TARGET_COL, fold_strat_name = REDUCED_FOLD_NAME, new_learning_rate = .05, verbose_eval = -1, early_stopping_rounds = 5) ###Output ###Markdown TRAIN ###Code del_list FEATURE = [x for x in FEATURE if x not in del_list] CAT_COL = [x for x in CAT_COL if x not in del_list] NUMERIC_COL = [x for x in NUMERIC_COL if x not in del_list] feature_dic = { 'feature': FEATURE, 'categorical': CAT_COL, 'numerical': NUMERIC_COL, } score = 0 model_list = [] for i, fold_ in enumerate(FOLD_LIST): mask_train = (train[FOLD_STRAT_NAME] != fold_) mask_test = (train[FOLD_STRAT_NAME] == fold_) train_x, train_y = train.loc[mask_train, FEATURE], train.loc[mask_train, TARGET_COL] test_x, test_y = train.loc[mask_test, FEATURE], train.loc[mask_test, TARGET_COL] model = lgb.train( PARAMS_LGB_BASE, lgb.Dataset(train_x, label=train_y,categorical_feature=CAT_COL), 100000, valid_sets = lgb.Dataset(test_x, label=test_y,categorical_feature=CAT_COL), valid_names ='validation', verbose_eval=20, early_stopping_rounds = 30, ) #evaluate score and save model for importance/prediction score_fold = model.best_score['validation']['auc'] score += score_fold/N_FOLD model_list.append(model) print('\nFold: {}; Auc: {:.5f}\n'.format(fold_, score_fold)) print('-'*50) print('\n\n\n') gc.collect() print('CV-Auc: {:.5f}\n'.format(score)) ###Output Training until validation scores don't improve for 30 rounds [20] validation's auc: 0.839391 [40] validation's auc: 0.841858 [60] validation's auc: 0.8445 [80] validation's auc: 0.846309 [100] validation's auc: 0.84768 [120] validation's auc: 0.848984 [140] validation's auc: 0.849713 [160] validation's auc: 0.850186 [180] validation's auc: 0.850615 [200] validation's auc: 0.850807 [220] validation's auc: 0.850995 [240] validation's auc: 0.851105 [260] validation's auc: 0.8512 [280] validation's auc: 0.851237 [300] validation's auc: 0.851215 Early stopping, best iteration is: [283] validation's auc: 0.851261 Fold: 0; Auc: 0.85126 -------------------------------------------------- Training until validation scores don't improve for 30 rounds [20] validation's auc: 0.838843 [40] validation's auc: 0.841474 [60] validation's auc: 0.844246 [80] validation's auc: 0.846072 [100] validation's auc: 0.847492 [120] validation's auc: 0.848755 [140] validation's auc: 0.849456 [160] validation's auc: 0.84991 [180] validation's auc: 0.850341 [200] validation's auc: 0.85069 [220] validation's auc: 0.850818 [240] validation's auc: 0.85087 [260] validation's auc: 0.850873 [280] validation's auc: 0.850938 [300] validation's auc: 0.850903 [320] validation's auc: 0.850949 [340] validation's auc: 0.851004 [360] validation's auc: 0.850969 [380] validation's auc: 0.851033 [400] validation's auc: 0.850945 Early stopping, best iteration is: [379] validation's auc: 0.851044 Fold: 1; Auc: 0.85104 -------------------------------------------------- Training until validation scores don't improve for 30 rounds [20] validation's auc: 0.839764 [40] validation's auc: 0.842365 [60] validation's auc: 0.844896 [80] validation's auc: 0.846802 [100] validation's auc: 0.848322 [120] validation's auc: 0.849548 [140] validation's auc: 0.85028 [160] validation's auc: 0.85078 [180] validation's auc: 0.851085 [200] validation's auc: 0.851279 [220] validation's auc: 0.851348 [240] validation's auc: 0.851465 [260] validation's auc: 0.851529 [280] validation's auc: 0.851549 [300] validation's auc: 0.851497 Early stopping, best iteration is: [284] validation's auc: 0.851558 Fold: 2; Auc: 0.85156 -------------------------------------------------- Training until validation scores don't improve for 30 rounds [20] validation's auc: 0.840081 [40] validation's auc: 0.842532 [60] validation's auc: 0.845226 [80] validation's auc: 0.847052 [100] validation's auc: 0.848426 [120] validation's auc: 0.849634 [140] validation's auc: 0.850394 [160] validation's auc: 0.850934 [180] validation's auc: 0.851302 [200] validation's auc: 0.851584 [220] validation's auc: 0.851554 [240] validation's auc: 0.851737 [260] validation's auc: 0.851848 [280] validation's auc: 0.85181 Early stopping, best iteration is: [262] validation's auc: 0.85187 Fold: 3; Auc: 0.85187 -------------------------------------------------- Training until validation scores don't improve for 30 rounds [20] validation's auc: 0.839186 [40] validation's auc: 0.841506 [60] validation's auc: 0.8441 [80] validation's auc: 0.845929 [100] validation's auc: 0.847345 [120] validation's auc: 0.848437 [140] validation's auc: 0.849117 [160] validation's auc: 0.849636 [180] validation's auc: 0.849994 [200] validation's auc: 0.850219 [220] validation's auc: 0.850346 [240] validation's auc: 0.850468 [260] validation's auc: 0.850538 [280] validation's auc: 0.850617 [300] validation's auc: 0.850589 [320] validation's auc: 0.850623 [340] validation's auc: 0.850638 [360] validation's auc: 0.850619 Early stopping, best iteration is: [333] validation's auc: 0.850657 Fold: 4; Auc: 0.85066 -------------------------------------------------- CV-Auc: 0.85128 ###Markdown Feature importance ###Code feature_importances = pd.DataFrame() feature_importances['feature'] = FEATURE for fold_, model in enumerate(model_list): feature_importances['fold_{}'.format(fold_ + 1)] = model.feature_importance(importance_type='gain') scaler = MinMaxScaler(feature_range=(0, 100)) average_importance = feature_importances.drop('feature', axis = 1).mean(axis=1) feature_importances['average'] = scaler.fit_transform(X=pd.DataFrame(average_importance)) feature_importances = feature_importances.sort_values(by='average', ascending=False) feature_importances[['feature', 'average']].to_csv('feature_importances.csv',index=False) fig = plt.figure(figsize=(12,8)) sns.barplot(data=feature_importances.head(50), x='average', y='feature'); plt.title(f'50 TOP feature importance over {N_FOLD} average') ###Output _____no_output_____ ###Markdown TEST Blending ###Code del train gc.collect() test = pd.read_pickle( os.path.join(PATH_NOTEBOOK, 'test_unscaled.pkl') ) pred_test = np.zeros(test.shape[0]) for fold_, model in enumerate(model_list): pred_test += model.predict(test[FEATURE])/N_FOLD ###Output _____no_output_____ ###Markdown SAVE RESULT ###Code #FINDED NEW FEATURE with open('feature_dic.pkl', 'wb') as file_name: pickle.dump(feature_dic, file_name) submission = pd.read_csv(os.path.join(INPUT_PATH, 'sample_submission.csv')) submission['target'] = pred_test submission.to_csv('submission.csv', index = False) ###Output _____no_output_____
Proj2/analysis/framework_debug.ipynb
###Markdown Useful Functions ###Code def print_y_vs_yhat(X, y, y_hat=None): if y_hat is not None: fig, axes = plt.subplots(1, 2) axes[0].scatter(X[:, 0], X[:, 1], c=y) axes[0].set_title("Ground truth") axes[1].scatter(X[:, 0], X[:, 1], c=y_hat) axes[1].set_title("Predicted") else: fig, axes = plt.subplots(1, 1) axes.scatter(X[:, 0], X[:, 1], c=y) axes.set_title("Ground truth") axes.legend(['1', '0']) def print_learning_curves(loss_history_train, loss_history_test, accuracy_history_train, accuracy_history_test): fig, axes = plt.subplots(1, 2, figsize=(10, 5)) axes[0].plot(loss_history_train, label='train') axes[0].plot(loss_history_test, label='test') axes[0].set_xlabel("epochs") axes[0].set_ylabel("Loss") axes[0].legend(title='set') axes[1].plot(accuracy_history_train, label='train') axes[1].plot(accuracy_history_test, label='test') axes[1].set_xlabel("epochs") axes[1].set_ylabel("% accuracy") axes[1].legend(title='set') plt.show() return fig ###Output _____no_output_____ ###Markdown Data generator ###Code dg = DataGenerator(1000) X_train, y_train, X_test, y_test = dg.get_data() print_y_vs_yhat(X_train, y_train.argmax(axis=1)) ###Output _____no_output_____ ###Markdown Xor test ###Code x_tr = torch.tensor([[0, 0], [0, 1], [1, 0], [1, 1]]).float() y_tr = torch.tensor([[1, 0], [0, 1], [0, 1], [1, 0]]).float() xor_model = Sequential() xor_model.add(Feedforward(2, 14)) xor_model.add(ReLU()) xor_model.add(Feedforward(14, 2)) xor_model.add(Tanh()) optimizer = SGD(0.01) loss = MSE() loss_history = [] for e in range(2000): loss_stack = 0. counter = 0 for val, tar in zip(x_tr, y_tr): output = xor_model.forward(val) loss(output, tar) loss_stack += loss.value if output.abs().argmax() == tar.argmax(): counter += 1 xor_model.backward(loss) optimizer.step(xor_model) optimizer.zero_grad(xor_model) l = loss_stack / X_train.size(0) print("epoch: ", e, "| loss: ", l, " | accuracy: ", counter / x_tr.size(0)) loss_history.append(l) ## Test that work for Xor problem counter = 0 for i in range(100): rd = random.randint(0, 3) output = xor_model.forward(x_tr[rd]) target = y_tr[rd] y_hat = output.abs().argmax().item() y = target.argmax().item() # print("y: {} | y_hat: {}".format(y, y_hat)) if y_hat == y: counter += 1 print("xor_model accuracy: ", counter) ###Output xor_model accuracy: 77 ###Markdown Testing ###Code ### Working model mlp = Sequential() mlp.add(Feedforward(2, 25)) mlp.add(Tanh()) mlp.add(Feedforward(25, 25)) mlp.add(Tanh()) mlp.add(Feedforward(25, 25)) mlp.add(Tanh()) mlp.add(Feedforward(25, 2)) mlp.add(Tanh()) lr = 0.01 optimizer = SGD(lr) loss = MSE() ### Working model mlp = Sequential() mlp.add(Feedforward(2, 25)) mlp.add(ReLU()) mlp.add(Feedforward(25, 25)) mlp.add(ReLU()) mlp.add(Feedforward(25, 25)) mlp.add(ReLU()) mlp.add(Feedforward(25, 2)) mlp.add(Tanh()) lr = 0.001 optimizer = SGD(lr) loss = MSE() def training(mlp, optimizer, loss, epochs): loss_history_train = [] loss_history_test = [] accuracy_history_train = [] accuracy_history_test = [] gradient_checker = defaultdict(list) for e in range(epochs): correct_train = 0 correct_test = 0 loss_stack_train = 0. loss_stack_test = 0. # Training for i, (val, tar) in enumerate(zip(X_train, y_train)): optimizer.zero_grad(mlp) output = mlp.forward(val) loss(output, tar) if output.abs().argmax() == tar.argmax(): correct_train += 1 loss_stack_train += loss.value.item() mlp.backward(loss) optimizer.step(mlp) # Gradient Checker: for i, l in enumerate(mlp.param()): if l != []: gradient_checker["layer{}_w".format(i)].append(l[1].mean().item()) gradient_checker["layer{}_b".format(i)].append(l[3].mean().item()) # Testing for val, tar in zip(X_test, y_test): output = mlp.forward(val) loss(output, tar) if output.abs().argmax() == tar.argmax(): correct_test += 1 loss_stack_test += loss.value.item() # Metrics evaluation and printing l_train = loss_stack_train / X_train.shape[0] l_test = loss_stack_test / X_test.shape[0] acc_train = correct_train / X_train.shape[0] acc_test = correct_test / X_test.shape[0] accuracy_history_train.append(acc_train) accuracy_history_test.append(acc_test) loss_history_train.append(l_train) loss_history_test.append(l_test) print("epoch: ", e, "| train_loss: ", l_train, " | train_acc: ", acc_train, " | test_loss: ", l_test, " | test_acc: ", acc_test) return loss_history_train, loss_history_test, accuracy_history_train, accuracy_history_test epochs = 40 loss_history_train, loss_history_test, accuracy_history_train, accuracy_history_test = training(mlp, optimizer, loss, epochs) plt.plot(gradient_checker['layer0_w']) plt.plot(gradient_checker['layer0_b']) tanh = print_learning_curves(loss_history_train_tanh, loss_history_test_tanh, accuracy_history_train_tanh, accuracy_history_test_tanh) tanh.savefig("figures/tanh_3layers") ## Tanh activation loss_history_train_tanh = loss_history_train loss_history_test_tanh = loss_history_test accuracy_history_train_tanh = accuracy_history_train accuracy_history_test_tanh = accuracy_history_test loss_history_train_relu = loss_history_train loss_history_test_relu = loss_history_test accuracy_history_train_relu = accuracy_history_train accuracy_history_test_relu = accuracy_history_test # # Save # with open("loss_history_train_tanh.txt", "wb") as f: #Pickling # pickle.dump(loss_history_train_tanh, f) # with open("loss_history_test_tanh.txt", "wb") as f: #Pickling # pickle.dump(loss_history_test_tanh, f) # with open("accuracy_history_train_tanh.txt", "wb") as f: #Pickling # pickle.dump(accuracy_history_train_tanh, f) # with open("accuracy_history_test_tanh.txt", "wb") as f: #Pickling # pickle.dump(accuracy_history_test_tanh, f) # Load with open("loss_history_train_tanh.txt", "rb") as fp: # Unpickling loss_history_train_tanh = pickle.load(fp) with open("loss_history_test_tanh.txt", "rb") as fp: # Unpickling loss_history_test_tanh = pickle.load(fp) with open("accuracy_history_train_tanh.txt", "rb") as fp: # Unpickling accuracy_history_train_tanh = pickle.load(fp) with open("accuracy_history_test_tanh.txt", "rb") as fp: # Unpickling accuracy_history_test_tanh = pickle.load(fp) print_y_vs_yhat(X_test, y_test.argmax(axis=1), y_preds) ###Output _____no_output_____ ###Markdown Torch module ###Code import torch import torch.nn as nn from torch.optim import SGD torch.manual_seed(0) class pytorch_mlp(nn.Module): def __init__(self): super(pytorch_mlp, self).__init__() self.lin1 = nn.Linear(2, 25) self.lin2 = nn.Linear(25, 25) # self.lin3 = nn.Linear(25, 25) self.lin4 = nn.Linear(25, 2) self.activation = nn.Tanh() self.last_activation = nn.Tanh() def forward(self, x): x = self.activation(self.lin1(x)) x = self.activation(self.lin2(x)) # x = self.activation(self.lin3(x)) x = self.last_activation(self.lin4(x)) return x torch_mlp = pytorch_mlp() epochs = 40 lr = 0.01 optimizer = SGD(torch_mlp.parameters(), lr) criterion = nn.MSELoss() loss_history_train = [] loss_history_test = [] accuracy_history_train = [] accuracy_history_test = [] for e in range(epochs): correct_train = 0 correct_test = 0 loss_stack_train = 0. loss_stack_test = 0. # Training for val, tar in zip(X_train, y_train): optimizer.zero_grad() output = torch_mlp(val) loss = criterion(output, tar) loss_stack_train += loss loss.backward() optimizer.step() if output.abs().argmax() == tar.argmax(): correct_train += 1 # Testing for val, tar in zip(X_test, y_test): output = torch_mlp(val) loss = criterion(output, tar) loss_stack_test += loss if output.abs().argmax() == tar.argmax(): correct_test += 1 l_train = loss_stack_train / X_train.shape[0] l_test = loss_stack_test / X_test.shape[0] acc_train = correct_train / X_train.shape[0] acc_test = correct_test / X_test.shape[0] loss_history_train.append(l_train) loss_history_test.append(l_test) accuracy_history_train.append(acc_train) accuracy_history_test.append(acc_test) print("epoch: ", e, "| train_loss: ", l_train.item(), " | train_acc: ", acc_train, " | test_loss: ", l_test.item(), " | test_acc: ", acc_test) pytorch_relu = print_learning_curves(loss_history_train, loss_history_test, accuracy_history_train, accuracy_history_test) pytorch_tanh.savefig("figures/pytorch_tanh_3layers") pytorch_loss_history_train_tanh = loss_history_train pytorch_loss_history_test_tanh = loss_history_test pytorch_accuracy_history_train_tanh = accuracy_history_train pytorch_accuracy_history_test_tanh = accuracy_history_test pytorch_loss_history_train_relu = loss_history_train pytorch_loss_history_test_relu = loss_history_test pytorch_accuracy_history_train_relu = accuracy_history_train pytorch_accuracy_history_test_relu = accuracy_history_test ###Output _____no_output_____ ###Markdown Framework | activation comparison ###Code fig, ax = plt.subplots(figsize=(10, 8)) ax.plot(accuracy_history_train_tanh, label='self | tanh') ax.plot(accuracy_history_train_relu, label='self | relu') ax.plot(pytorch_accuracy_history_train_tanh, label='pytorch | tanh') ax.plot(pytorch_accuracy_history_train_relu, label='pytorch | relu') ax.set_xlabel('epochs') ax.set_ylabel('accuracy') ax.legend(title='framework | activation') fig.savefig("figures/framwork_activation comparison") ###Output _____no_output_____
doc/manual.ipynb
###Markdown Jollity Manual Jollity is a small library of Python functions that process Jupyter notebooks.Jollity does _not_ convert notebooks from/to other formats,like Markdown, PDF and HTML.There are plenty of tools for that, including [pandoc](https://pandoc.org),[nbconvert](https://nbconvert.readthedocs.io), [Jupytext](https://jupytext.readthedocs.io),[nbsphinx](https://nbsphinx.readthedocs.io) and [Jupyter Book](https://jupyterbook.org). Example Let's first see an example of how Jollity is used in practice.I'm authoring in Markdown a textbook for M269,the algorithms and data structures module at The Open University, UK.I wrote a Python script that:1. processes the Markdown files2. converts the Markdown files to Jupyter notebooks3. processes the notebooks4. executes the notebooks with nbconvert5. converts the notebooks to PDF and HTML with nbsphinx6. processes the notebooks7. zips all files into an archive for uploading to the M269 website.Markdown cells at the end of stage 2 may look like this: ```The best-case complexity is ø(1) and the worst-case complexity is ø(2^n).Exponential functions were introduced in [MU123](mu123).**Exercise:** Explain why the worst-case is exponential.In practice the worst-case may only occur very rarely.**Exercise**: Edit the next cell to complete the sentence.The average-case complexity is ...``` This example shows the four kinds of special comments used in M269.(You can define your own.)- The `ANSWER` comment becomes a separate Markdown cell with text '_Write your answer here._' in the final notebook that goes to students, but nothing appears in the PDF or HTML.- The `INFO` and `NOTE` comments are replaced with HTML code that puts the text in a coloured box (different colours for info and note boxes).- The text within `EDIT` comments is put in a separate Markdown cell, so that students don't edit by mistake all other surrounding text.The example also shows the use of special characters or character combinationsto make typing the text faster. The script uses Jollity to replace alloccurrences of ø with Θ and of ^ followed by n with ⁿ.The ø letter is quick to type on my keyboard (Alt-o), but there's noshortcut for uppercase theta.The script also replaces occurrences of `mu123` within a linkwith the corresponding URL. Keeping a mapping of abbreviations to URLsavoids repeatedly writing and updating the same URL in several notebooks.Code cells often include `%timeit` commands to measure the run-time of code.This slows down the execution of the code cells in stage 4, so the scriptcan be run in a 'draft' mode that comments out the `%timeit` commands\before the notebooks are executed.Stage 3 uses Jollity to:- Put the four special comments into their own Markdown cells.- Add HTML code at the start and end of the info and note cells to generate the boxes.- Strip spaces and newlines from the start and end of each cell.- Do text replacements to obtain special characters like superscripts.- Replace `_text_` with `*text*` in certain contexts to avoid a Jupyter bug when rendering italicised text.- Replace one or more spaces between certain words and digits with a single non-breaking space, for example `step 1.2 takes 45 µs` becomes `step&nbsp;1.2 takes 45&nbsp;µs`.- Expand abbreviated URLs and check all URLs lead to existing pages.- Comment out all timing commands in code cells, if in draft mode.- Report code lines longer than 69 characters: they wrap around in the PDF.- Report invisible line breaks in Markdown.- Report when a heading level is skipped.- Remove all code between ` skip` and ` \skip`, as illustrated in the [README](../README.md).- If the notebook is the start of a chapter, add at the end a reminder to check the M269 website for news and errata before working through the chapter.Stage 6 (after execution and conversion to PDF and HTML) uses Jollity to:- Convert the Markdown syntax within the info and note boxes to HTML. Nbsphinx correctly converts the boxes' Markdown content to PDF and HTML, but the Jupyter interface can't render Markdown within HTML.- Insert the boilerplate text in the empty `ANSWER` cells.- If a notebook has code, extract it to a separate file, with a copyright notice before the code. Comment out all IPython commands starting with `%`.File `m269.py` has the code that calls Jollity to do all the above.The Jollity functions are explained below. Using Jollity Jollity uses the `NotebookNode` class of the `nbformat` module torepresent a notebook in memory.Each function takes an instance of that class and modifies it.To process your notebooks, you will need to write code like this: ```pyimport globimport jollityimport nbformat go through all notebooks in `folder` or a subfolder of itfor file in glob.iglob('folder/**/*.ipynb', recursive=True): notebook = nbformat.read(file, 4) 4 is the notebook format version if 'introduction' in file: pre-process chapter introductions jollity.one_function(notebook) jollity.another_function(notebook) same processing for all notebooks nbformat.write(notebook, file) overwrites the original file``` In most authoring workflows you will wish to preserve the originaland write the processed notebook to a different file or folder.For an alternative way of going through files in a folder,see script `generate_doc.py`.It reads the source Markdown file of this manual in folder `md`and writes the notebook to folder `doc`.The script uses [Jupytext](https://jupytext.readthedocs.io) to converta Markdown file to a Jupyter notebook. Jollity requires Python 3.8 or later. Logging The Jollity functions log any warnings and errors as they process notebooks.By default, the warning and error messages are printed on the screen,but you can collect them in a file.If you're using a bash-like command line, you can type`python your_script.py 2> log_file` to redirect the messages,for example `python generate_doc.py 2> log.txt`.This will overwrite the log file every time you run your script.Alternatively, add the following to your script: ###Code import logging logging.basicConfig(filename='log.txt') ###Output _____no_output_____ ###Markdown This will append the messages to the file, if it exists.In this way you can preserve the log of previous runs of your script.If you want the log file to start afresh every time you run the script, write ###Code logging.basicConfig(filename='log.txt', filemode='w') ###Output _____no_output_____
notebooks/list.ipynb
###Markdown list型オブジェクトの生成 ###Code l = [] type(l), l l = [0, 1, 2, None, "5"] type(l), l l = list("Beautiful") type(l), l l = list((0, 1, 2)) type(l), l ###Output _____no_output_____ ###Markdown list 型の算術 ###Code [0] + [1] l = [0] l = l + [1] l l = [0] l += [1] l [0] * 4 ###Output _____no_output_____ ###Markdown list の関数 ###Code l = [0] l l.append(1) l l.extend([2]) l e = l.pop() e l l.remove(0) l l.remove(0) l.index(1) # !pip install see from see import see see(l) ###Output _____no_output_____ ###Markdown list のシークエンス型としての利用 [ ] による要素へのアクセス ###Code weeks = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] weeks len(weeks) weeks[0] weeks[-1] weeks[:5] ###Output _____no_output_____ ###Markdown for による要素へのアクセス ###Code for day in weeks: print(day) ###Output Mon Tue Wed Thu Fri Sat Sun ###Markdown list のミュータブル性の確認 ###Code l = [0, 1, 2, None, "5"] l[3] = 4 l print(id(l), l) l += [6] print(id(l), l) print(id(l), l) l = l + [7] print(id(l), l) ###Output 139659788592944 [0, 1, 2, 4, '5', 6] 139658836055856 [0, 1, 2, 4, '5', 6, 7] ###Markdown list のネスト(list の要素がミュータブルの時)と参照 ###Code l = [[0, 1], [2, 3]] l[0][1] l = [[0]] * 4 l l[0][0] = 1 l l = [[0], [0], [0], [0]] l l[0][0] = 1 l l = [0] * 4 l l[0] = 1 l ###Output _____no_output_____
modules/Pyjokes/Pyjokes.ipynb
###Markdown Usage**1. Command line**- Run pyjoke at the command line to get a random joke:\$ pyjoke\Why did the programmer quit his job? Because he didn't get arrays. **2. import**- Import the pyjokes module in a Python file and use the get_joke function to easily drop a random joke into your application:> import pyjokes> print(pyjokes.get_joke()) ###Code import pyjokes print(pyjokes.get_joke()) ###Output I would tell you a joke about UDP, but you would never get it.
Day 10/Day_10_Puzzle_4.ipynb
###Markdown Puzzle 4: FIZZ BUZZCreate a function that takes a number as a parameter. This function will print out (console.log()) every number from 1 to the number passed as the parameter, now:- if the number is divisible by 3 then the function will print out the word 'Fizz' instead of the number.- if the number is divisible by 3 and 5 then the function will print out the word 'Fizz Buzz' instead of the number.Submit the github link to the repo containing the code, your reasoning and pseudo code of the solution for the problem (code comments).you may use google search to solve the problem but if you do YOU MUST: 1. cite where you got it from. 2. state why you didn't try to solve the problem on your own.3. write a detailed summary of every line of code you copied explaining what it is happening on said line of code. PSEUDO CODE:1. Initialize Variables2. If the number is divisible by 3 and 5, set text 3. If the number is divisible by 3, set text to FIZZ4. If the number is divisible by 3, set text to BUZZ5. If the number is divisible by 3, set text to "" 6. Print the text ###Code //Initialize Variables let text=""; let number=30; for( let i=1; i<=number; i++) { //If the number is divisible by 3 and 5, set text to FIZZ BUZZ if(i%3===0 && i%5===0) { text = "FIZZ BUZZ"; } //If the number is divisible by 3, set text to FIZZ else if(i%3===0) { text += "FIZZ"; } //If the number is divisible by 3, set text to BUZZ else if(i%5===0) { text += "BUZZ"; } //If the number is divisible by 3, set text to "NA" else { text = ""; } //Print the text console.log(i+": "+text) text = ""; } 1: 2: 1: 2: 3: FIZZ 4: 5: BUZZ 6: FIZZ 7: 8: 9: FIZZ 10: BUZZ 11: 12: FIZZ 13: 14: 15: FIZZ BUZZ 16: 17: 18: FIZZ 19: 20: BUZZ 21: FIZZ 22: 23: 24: FIZZ 25: BUZZ 26: 27: FIZZ 28: 29: 30: FIZZ BUZZ ###Output _____no_output_____
Data/Dataset_Cleaning.ipynb
###Markdown CIC-Darknet2020 Dataset CleaningHere we load data from the [CIC-Darknet2020](https://www.unb.ca/cic/datasets/darknet2020.html) dataset remove any invalid values from the dataset and save the cleaned dataset to a new file. First we import all relevant libraries, set a random seed, and print python and library versions for reproducability ###Code import datetime, os, platform, pprint, sys import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd seed: int = 14 # set up pretty printer for easier data evaluation pretty = pprint.PrettyPrinter(indent=4, width=30).pprint # set up pandas display options pd.set_option('display.max_colwidth', None) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 100) print( f''' Last Execution: {datetime.datetime.now()} python:\t{platform.python_version()} \tmatplotlib:\t{mpl.__version__} \tnumpy:\t\t{np.__version__} \tpandas:\t\t{pd.__version__} ''' ) ###Output Last Execution: 2022-02-16 20:59:06.427550 python: 3.7.10 matplotlib: 3.3.4 numpy: 1.20.3 pandas: 1.2.5 ###Markdown Next we prepare some helper functions to help process the data ###Code def get_file_path(directory: str): ''' Closure that will return a function. Function will return the filepath to the directory given to the closure ''' def func(file: str) -> str: return os.path.join(directory, file) return func def load_data(filePath): ''' Loads the Dataset from the given filepath and caches it for quick access in the future Function will only work when filepath is a .csv file ''' # slice off the ./CSV/ from the filePath if filePath[0] == '.' and filePath[1] == '/': filePathClean: str = filePath[11::] pickleDump: str = f'./cache/{filePathClean}.pickle' else: pickleDump: str = f'./cache/{filePath}.pickle' print(f'Loading Dataset: {filePath}') print(f'\tTo Dataset Cache: {pickleDump}\n') # check if data already exists within cache if os.path.exists(pickleDump): df = pd.read_pickle(pickleDump) df['Label1'] = df['Label1'].str.lower() df.Label1.unique() # if not, load data and clean it before caching it else: df = pd.read_csv(filePath, low_memory=True) df['Label1'] = df['Label1'].str.lower() df.Label1.unique() df.to_pickle(pickleDump) return df def features_with_bad_values(df: pd.DataFrame, datasetName: str) -> pd.DataFrame: ''' Function will scan the dataframe for features with Inf, NaN, or Zero values. Returns a new dataframe describing the distribution of these values in the original dataframe ''' # Inf and NaN values can take different forms so we screen for every one of them invalid_values: list = [ np.inf, np.nan, 'Infinity', 'inf', 'NaN', 'nan', 0 ] infs : list = [ np.inf, 'Infinity', 'inf' ] NaNs : list = [ np.nan, 'NaN', 'nan' ] # We will collect stats on the dataset, specifically how many instances of Infs, NaNs, and 0s are present. # using a dictionary that will be converted into a (3, 2+88) dataframe stats: dict = { 'Dataset':[ datasetName, datasetName, datasetName ], 'Value' :['Inf', 'NaN', 'Zero'] } i = 0 for col in df.columns: i += 1 feature = np.zeros(3) for value in invalid_values: if value in infs: j = 0 elif value in NaNs: j = 1 else: j = 2 indexNames = df[df[col] == value].index if not indexNames.empty: feature[j] += len(indexNames) stats[col] = feature return pd.DataFrame(stats) def clean_data(df: pd.DataFrame, prune: list) -> pd.DataFrame: ''' Function will take a dataframe and remove the columns that match a value in prune Inf values will also be removed from Flow Bytes/s and Flow Packets/s once appropriate rows and columns have been removed, we will return the dataframe with the appropriate values ''' # remove the features in the prune list for col in prune: if col in df.columns: df.drop(columns=[col], inplace=True) # drop missing values/NaN etc. df.dropna(inplace=True) # Search through dataframe for any Infinite or NaN values in various forms that were not picked up previously invalid_values: list = [ np.inf, np.nan, 'Infinity', 'inf', 'NaN', 'nan' ] for col in df.columns: for value in invalid_values: indexNames = df[df[col] == value].index if not indexNames.empty: print(f'deleting {len(indexNames)} rows with Infinity in column {col}') df.drop(indexNames, inplace=True) return df ###Output _____no_output_____ ###Markdown Before we do any processing on the data, we need to list out all their filepaths. If trying to reproduce the process carried out here, place files in the same location relative to the notebook. ###Code # This code is used to scale to processing numerous datasets, even though we currently are only looking at one now data_path_1: str = './original/' data_set_1: list = [ 'Darknet.csv', ] data_set: list = data_set_1 file_path_1 = get_file_path(data_path_1) file_set: list = list(map(file_path_1, data_set_1)) current_job: int = 0 ###Output _____no_output_____ ###Markdown Some more helper functions that process the data using the file and dataset information above ###Code def examine_dataset(job_id: int) -> dict({'File': str, 'Dataset': pd.DataFrame, 'Feature_stats': pd.DataFrame, 'Data_composition': pd.DataFrame}): ''' Function will return a dictionary containing dataframe of the job_id passed in as well as that dataframe's feature stats, data composition, and file name. ''' job_id = job_id - 1 # adjusts for indexing while enumerating jobs from 1 print(f'Dataset {job_id+1}/{len(data_set)}: We now look at {file_set[job_id]}\n\n') # Load the dataset df: pd.DataFrame = load_data(file_set[job_id]) # print the data composition print(f''' File:\t\t\t\t{file_set[job_id]} Job Number:\t\t\t{job_id+1} Shape:\t\t\t\t{df.shape} Samples:\t\t\t{df.shape[0]} Features:\t\t\t{df.shape[1]} ''') # return the dataframe and the feature stats data_summary: dict = { 'File': file_set[job_id], 'Dataset': df, 'Feature_stats': features_with_bad_values(df, file_set[job_id]), } return data_summary def package_data_for_inspection(df: pd.DataFrame) -> dict({'File': str, 'Dataset': pd.DataFrame, 'Feature_stats': pd.DataFrame, 'Data_composition': pd.DataFrame}): ''' Function will return a dictionary containing dataframe passed in as well as that dataframe's feature stats. ''' # print the data composition print(f''' Shape:\t\t\t\t{df.shape} Samples:\t\t\t{df.shape[0]} Features:\t\t\t{df.shape[1]} ''') # return the dataframe and the feature stats data_summary: dict = { 'File': '', 'Dataset': df, 'Feature_stats': features_with_bad_values(df, ''), } return data_summary def package_data_for_inspection_with_label(df: pd.DataFrame, label: str) -> dict({'File': str, 'Dataset': pd.DataFrame, 'Feature_stats': pd.DataFrame, 'Data_composition': pd.DataFrame}): ''' Function will return a dictionary containing dataframe passed in as well as that dataframe's feature stats. ''' # print the data composition print(f''' Shape:\t\t\t\t{df.shape} Samples:\t\t\t{df.shape[0]} Features:\t\t\t{df.shape[1]} ''') # return the dataframe and the feature stats data_summary: dict = { 'File': f'{label}', 'Dataset': df, 'Feature_stats': features_with_bad_values(df, f'{label}'), } return data_summary def check_infs(data_summary: dict) -> pd.DataFrame: ''' Function will return a dataframe of features with a value of Inf. ''' vals: pd.DataFrame = data_summary['Feature_stats'] inf_df = vals[vals['Value'] == 'Inf'].T return inf_df[inf_df[0] != 0] def check_nans(data_summary: dict) -> pd.DataFrame: ''' Function will return a dataframe of features with a value of NaN. ''' vals: pd.DataFrame = data_summary['Feature_stats'] nan_df = vals[vals['Value'] == 'NaN'].T return nan_df[nan_df[1] != 0] def check_zeros(data_summary: dict) -> pd.DataFrame: ''' Function will return a dataframe of features with a value of 0. ''' vals: pd.DataFrame = data_summary['Feature_stats'] zero_df = vals[vals['Value'] == 'Zero'].T return zero_df[zero_df[2] != 0] def check_zeros_over_threshold(data_summary: dict, threshold: int) -> pd.DataFrame: ''' Function will return a dataframe of features with a value of 0. ''' vals: pd.DataFrame = data_summary['Feature_stats'] zero_df = vals[vals['Value'] == 'Zero'].T zero_df_bottom = zero_df[2:] return zero_df_bottom[zero_df_bottom[2] > threshold] def check_zeros_over_threshold_percentage(data_summary: dict, threshold: float) -> pd.DataFrame: ''' Function will return a dataframe of features with all features with a frequency of 0 values greater than the threshold ''' vals: pd.DataFrame = data_summary['Feature_stats'] size: int = data_summary['Dataset'].shape[0] zero_df = vals[vals['Value'] == 'Zero'].T zero_df_bottom = zero_df[2:] return zero_df_bottom[zero_df_bottom[2] > threshold*size] def remove_infs_and_nans(data_summary: dict) -> pd.DataFrame: ''' Function will return the dataset with all inf and nan values removed. ''' df: pd.DataFrame = data_summary['Dataset'] df = clean_data(df, []) return df def create_new_prune_candidates(zeros_df: pd.DataFrame) -> list: ''' Function creates a list of prune candidates from a dataframe of features with a high frequency of 0 values ''' return list(zeros_df.T.columns) def intersection_of_prune_candidates(pruneCandidates: list, newPruneCandidates: list) -> list: ''' Function will return a list of features that are in both pruneCandidates and newPruneCandidates ''' return list(set(pruneCandidates).intersection(newPruneCandidates)) def test_infs(data_summary: dict) -> bool: ''' Function asserts the dataset has no inf values. ''' vals: pd.DataFrame = data_summary['Feature_stats'] inf_df = vals[vals['Value'] == 'Inf'].T assert inf_df[inf_df[0] != 0].shape[0] == 2, 'Dataset has inf values' return True def test_nans(data_summary: dict) -> bool: ''' Function asserts the dataset has no NaN values ''' vals: pd.DataFrame = data_summary['Feature_stats'] nan_df = vals[vals['Value'] == 'NaN'].T assert nan_df[nan_df[1] != 0].shape[0] == 2, 'Dataset has NaN values' return True ###Output _____no_output_____ ###Markdown This gives us a set of file locations. Lets look at the set of files that we will be cleaning ###Code print(f'We will be cleaning {len(file_set)} files:') pretty(file_set) ###Output We will be cleaning 1 files: ['./original/Darknet.csv'] ###Markdown The Original CIC-Darknet2020 Dataset ###Code dataset_1: dict = examine_dataset(1) print(f""" Labels in the first layer: {dataset_1['Dataset'].groupby('Label').size()} Labels in the second layer: {dataset_1['Dataset'].groupby('Label1').size()} """) ###Output Dataset 1/1: We now look at ./original/Darknet.csv Loading Dataset: ./original/Darknet.csv To Dataset Cache: ./cache/Darknet.csv.pickle File: ./original/Darknet.csv Job Number: 1 Shape: (141530, 85) Samples: 141530 Features: 85 Labels in the first layer: Label Non-Tor 93356 NonVPN 23863 Tor 1392 VPN 22919 dtype: int64 Labels in the second layer: Label1 audio-streaming 18064 browsing 32808 chat 11478 email 6145 file-transfer 11182 p2p 48520 video-streaming 9767 voip 3566 dtype: int64 ###Markdown Data InspectionHere we just check to see how many inf and nan values are in the dataset. ###Code df = check_infs(dataset_1) df.shape check_nans(dataset_1) check_zeros_over_threshold(dataset_1, dataset_1['Dataset'].shape[0]-1) ###Output _____no_output_____ ###Markdown Data CleaningNow we remove the infs and nan values and then verify that the dataset no longer contains any infs or nans. ###Code dataset_2 = package_data_for_inspection(remove_infs_and_nans(dataset_1)) check_infs(dataset_2) check_nans(dataset_2) check_zeros_over_threshold(dataset_2, dataset_2['Dataset'].shape[0]-1) if(test_infs(dataset_2) and test_nans(dataset_2)): print('Dataset is clean') ###Output Dataset is clean ###Markdown Saving the Cleaned DatasetNow we save the cleaned dataset to a new file. ###Code dataset_2['Dataset'].to_csv('./cleaned/Darknet_cleaned.csv', index=False) print(f'Last Execution: {datetime.datetime.now()}') assert False, 'Nothing is complete after this point' ###Output Last Execution: 2022-02-16 20:59:20.803303
Validate/old_2_daily_validate_mosaic.ipynb
###Markdown run dirs ws-out ###Code ! for i in `cat ../Orchestration/junk` ; do echo $i; aws s3 ls ws-out/CONUS/$i ;echo '---------------' ; done ###Output Run02_17_2022/ PRE r50t7/ --------------- Run02_24_2022/ PRE conus_r50t6/ --------------- Run02_25_2022/ PRE conus_r50t5/ PRE conus_r50t8/ --------------- Run03_01_2022/ PRE conus_r37t0/ --------------- Run03_03_2022/ PRE conus_r37t1/ PRE conus_r37t2/ --------------- Run03_07_2022/ PRE conus_r37t3/ --------------- Run03_11_2022/ PRE conus_r50t9/ --------------- ###Markdown output dirs ###Code ! aws s3 ls ws-enduser/CONUS/ import boto3 def s3_bucket_analyze(bucket, prefix): objs = [] print("bucket", bucket) print("prefix", prefix) bucket_name = bucket prefix = prefix s3 = boto3.resource('s3') bucket = s3.Bucket(bucket_name) cnt=0 storage_class_h = {'STANDARD' : 0, 'GLACIER' : 0, 'INTELLIGENT_TIERING' : 0, } sum_class_h = {'STANDARD' : 0, 'GLACIER' : 0, 'INTELLIGENT_TIERING' : 0, } sum = 0 for obj in bucket.objects.filter(Prefix=prefix): storage_class_h[obj.storage_class] = storage_class_h[obj.storage_class] + 1 cnt = cnt + 1 if not cnt%1000: print(bucket, "bucket object count = ", cnt, flush=True) sum = sum + obj.size sum_class_h[obj.storage_class] = sum_class_h[obj.storage_class] + obj.size my_obj = { 'bucket_name':obj.bucket_name, 'key':obj.key, 'size':obj.size, 'class':obj.storage_class } objs.append(my_obj) print ("COUNT=", cnt) for ky in storage_class_h.keys(): print(ky, storage_class_h[ky]) sum = sum_class_h[ky] print(ky, sum_class_h[ky]) gig = sum/(1024*1024*1024) print (ky, "GBYTES=", gig) if ky == 'GLACIER': cost=.007 else: cost=.023 print (ky, "Cost/Month=", gig * cost) print ("----" * 25) print("END LOOP") my_key = 'STANDARD' ret_gbytes = sum_class_h[my_key]/(1024*1024*1024) if (ret_gbytes < 1): ret_gbytes = 1 ret_costs = .023 * ret_gbytes print('G:', ret_gbytes, ret_costs) return objs bucket='ws-enduser' my_prefix='CONUS/' objs = s3_bucket_analyze(bucket, my_prefix) objs[405] new_objs=[] for o in objs: a = o['key'].split('/') project = a[0] tile = a[1] year = a[2] basenm = a[3] new_o = { 'bucket_name': o['bucket_name'], 'project': project, 'tile': tile, 'year': year, 'basename': basenm, 'size': o['size'] } new_objs.append(new_o) import pandas as pd df = pd.DataFrame(new_objs) df tiles = df['tile'].unique() tiles tdf = df[df['tile'] == tiles[0]] tdf tdf.describe() variables = ['etasw','srf', 'dd','netet', 'etc'] #variables = ['etasw'] start_year=2000 end_year=2000 import re def validate_monthly_data(tdf, tile): total_work_left = 0 records = [] for variable in variables: for year in range(2000,2020+1): ytdf = tdf[tdf['year'] == str(year)] basenames = ytdf['basename'].to_list() #print(basenames) #print(ytdf) #for m in range(1,12+1): rematch=f'{variable}_{year}\d\d\d.tif' r = re.compile(rematch) my_monthly_list_by_var = [x for x in basenames if r.match(x)] my_len = len(my_monthly_list_by_var) if my_len < 366: total_work_left = total_work_left + 365 - my_len o={ 'tile': tile, 'variable': variable, 'year': year, 'completed:': len(my_monthly_list_by_var) } print(o) records.append(o) print('work remaining:',total_work_left) return records tiles for tile in tiles[0:1]: tdf = df[df['tile'] == tile] records = validate_monthly_data(tdf, tile) print(tile, total_work_left) ###Output {'tile': 'r37.0_tile0', 'variable': 'etasw', 'year': 2001, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etasw', 'year': 2002, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etasw', 'year': 2003, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etasw', 'year': 2005, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etasw', 'year': 2006, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etasw', 'year': 2007, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etasw', 'year': 2009, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etasw', 'year': 2010, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etasw', 'year': 2011, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etasw', 'year': 2013, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etasw', 'year': 2014, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etasw', 'year': 2015, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etasw', 'year': 2017, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etasw', 'year': 2018, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etasw', 'year': 2019, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'srf', 'year': 2001, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'srf', 'year': 2002, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'srf', 'year': 2003, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'srf', 'year': 2005, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'srf', 'year': 2006, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'srf', 'year': 2007, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'srf', 'year': 2009, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'srf', 'year': 2010, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'srf', 'year': 2011, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'srf', 'year': 2013, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'srf', 'year': 2014, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'srf', 'year': 2015, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'srf', 'year': 2017, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'srf', 'year': 2018, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'srf', 'year': 2019, 'completed:': 91} {'tile': 'r37.0_tile0', 'variable': 'srf', 'year': 2020, 'completed:': 89} {'tile': 'r37.0_tile0', 'variable': 'dd', 'year': 2000, 'completed:': 69} {'tile': 'r37.0_tile0', 'variable': 'dd', 'year': 2001, 'completed:': 69} {'tile': 'r37.0_tile0', 'variable': 'dd', 'year': 2002, 'completed:': 90} {'tile': 'r37.0_tile0', 'variable': 'dd', 'year': 2003, 'completed:': 68} {'tile': 'r37.0_tile0', 'variable': 'dd', 'year': 2004, 'completed:': 67} {'tile': 'r37.0_tile0', 'variable': 'dd', 'year': 2005, 'completed:': 87} {'tile': 'r37.0_tile0', 'variable': 'dd', 'year': 2006, 'completed:': 88} {'tile': 'r37.0_tile0', 'variable': 'dd', 'year': 2007, 'completed:': 88} {'tile': 'r37.0_tile0', 'variable': 'dd', 'year': 2008, 'completed:': 88} {'tile': 'r37.0_tile0', 'variable': 'dd', 'year': 2009, 'completed:': 88} {'tile': 'r37.0_tile0', 'variable': 'dd', 'year': 2010, 'completed:': 88} {'tile': 'r37.0_tile0', 'variable': 'dd', 'year': 2011, 'completed:': 88} {'tile': 'r37.0_tile0', 'variable': 'dd', 'year': 2012, 'completed:': 87} {'tile': 'r37.0_tile0', 'variable': 'dd', 'year': 2013, 'completed:': 87} {'tile': 'r37.0_tile0', 'variable': 'dd', 'year': 2014, 'completed:': 88} {'tile': 'r37.0_tile0', 'variable': 'dd', 'year': 2015, 'completed:': 0} {'tile': 'r37.0_tile0', 'variable': 'dd', 'year': 2016, 'completed:': 0} {'tile': 'r37.0_tile0', 'variable': 'dd', 'year': 2017, 'completed:': 0} {'tile': 'r37.0_tile0', 'variable': 'dd', 'year': 2018, 'completed:': 0} {'tile': 'r37.0_tile0', 'variable': 'dd', 'year': 2019, 'completed:': 0} {'tile': 'r37.0_tile0', 'variable': 'dd', 'year': 2020, 'completed:': 0} {'tile': 'r37.0_tile0', 'variable': 'netet', 'year': 2001, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'netet', 'year': 2002, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'netet', 'year': 2003, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'netet', 'year': 2005, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'netet', 'year': 2006, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'netet', 'year': 2007, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'netet', 'year': 2009, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'netet', 'year': 2010, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'netet', 'year': 2011, 'completed:': 205} {'tile': 'r37.0_tile0', 'variable': 'netet', 'year': 2013, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'netet', 'year': 2014, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'netet', 'year': 2015, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'netet', 'year': 2017, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'netet', 'year': 2018, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'netet', 'year': 2019, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etc', 'year': 2001, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etc', 'year': 2002, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etc', 'year': 2003, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etc', 'year': 2005, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etc', 'year': 2006, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etc', 'year': 2007, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etc', 'year': 2009, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etc', 'year': 2010, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etc', 'year': 2011, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etc', 'year': 2013, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etc', 'year': 2014, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etc', 'year': 2015, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etc', 'year': 2017, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etc', 'year': 2018, 'completed:': 365} {'tile': 'r37.0_tile0', 'variable': 'etc', 'year': 2019, 'completed:': 365} work remaining: 7135 r37.0_tile0 None
Lab5_1_Modeling_KMeans/assignment1.ipynb
###Markdown K-Means on Chicago crime dataset (gambling) ###Code import pandas as pd from sklearn.cluster import KMeans import matplotlib.pyplot as plt # load the crime data (gambling) flc = '/Users/pinqingkan/Desktop/DataScience/PythonDataScience/DAT210x-master/Module5/Datasets/' fname = flc + 'Crimes_-_2001_to_present.csv' X = pd.read_csv(fname) X.head() # remove NaN entries Y = X.dropna(axis = 0) # convert string to datetime Z = pd.to_datetime(Y.Date) Y2 = Y.drop(axis = 1, labels = ['Date']) Y2 = pd.concat([Y2, Z], axis = 1) Y2.dtypes # K-Means clustering on the crime location Z = Y2.loc[:, ['Longitude', 'Latitude']] kmeans = KMeans(n_clusters = 7) kmeans.fit(Z) KMeans(n_clusters = 7, max_iter = 300, n_init = 10, init = 'k-means++', copy_x = True, n_jobs = 1, precompute_distances = 'auto', random_state = None, tol = 1e-4, verbose = 0) centroids = kmeans.cluster_centers_ print(centroids) # visualize the clustering plt.scatter(Z.Longitude, Z.Latitude, marker = '.', alpha = 0.3) plt.scatter(centroids[:,0], centroids[:,1], marker='x', c='red', alpha=0.5, linewidths=3, s=169) plt.show() # slice the data to keep those occurred after 2011-01-01 idc = Y2.Date > '2011-01-01' Z2 = Y2.loc[idc, ['Longitude', 'Latitude']] # K-Means clustering on crime location kmeans2 = KMeans(n_clusters = 7) kmeans2.fit(Z2) KMeans(n_clusters = 7, max_iter = 300, n_init = 10, init = 'k-means++', copy_x = True, n_jobs = 1, precompute_distances = 'auto', random_state = None, tol = 1e-4, verbose = 0) centroids2 = kmeans2.cluster_centers_ # visualize the clustering plt.scatter(Z2.Longitude, Z2.Latitude, marker = '.', alpha = 0.3) plt.scatter(centroids2[:,0], centroids2[:,1], marker='x', c='red', alpha=0.5, linewidths=3, s=169) plt.show() ###Output _____no_output_____
nb/test_data.ipynb
###Markdown Adapting a new dataset ###Code import pandas as pd import numpy as np df = pd.read_pickle('../data/btc_30m.pkl') df.head() print('start_date: {}, end_date: {}, days: {}'.format(df.index[0], df.index[-1], (df.index[-1] - df.index[0]).days)) assert not df.isna().sum().any() # we leave btc_usdt as the first column, that we treat as the main time series. All the other trades will be as ancillary series df.describe() # %load ../utils/dataset_utils.py # This is the function that format the data for the Dataset iterator in dataset.py # x is the time series before seq_len # y is the time series after seq_len steps # if not ar we x will contain all the ancillary features # you can edit this function as you like but keep the same output interface def make_features(df, seq_len=12, preprocess="normalize", lags=(12,), ar=False, use_x=False): mu = 0. std = 1. x = df.iloc[:, :1].values[:-seq_len] y = df.iloc[:, :1].values[seq_len:] y_features = np.hstack( [df.iloc[:, :1].shift(lag).fillna(0).values[:-seq_len] for lag in lags]) dates = { 'x': df.index[:-seq_len].tolist(), 'y': df.index[seq_len:].tolist(), } if preprocess == "normalize": mu = x.mean() std = x.std() x = (x - mu) / std y_features = (y_features - y_features.mean()) / y_features.std() if not ar: x_features = df.iloc[:, 1:].values[:-seq_len] if preprocess == "normalize": x_features = (x_features - x_features.mean(axis=0)) / (x_features.std(axis=0)) if use_x: x = np.concatenate([x, x_features], axis=-1) else: x = x_features assert np.isnan(x).sum() == 0 return { 'x': x, 'y': y, 'y_features': y_features, }, dates, {'mu': mu, 'std': std} ###Output _____no_output_____
Testing the product review attributes.ipynb
###Markdown Importing the required libraries ###Code import requests from bs4 import BeautifulSoup import pandas as pd import time import math ###Output _____no_output_____ ###Markdown Making a get_soup() function for extracting the soup ###Code def get_soup(url): proxylist = ['46.4.96.137:1080', '198.199.86.148:47638', '31.7.232.178:1080', '51.81.83.181:59427', '5.182.39.88:9988', '13.250.64.147:48540', '69.163.163.59:37884', '159.203.61.169:1080', '103.252.196.163:1081', '47.240.226.173:65432', '104.238.111.194:49228', '88.80.119.7:1080', '91.198.137.31:3550', '47.119.141.40:6111', '172.245.92.238:9300', '47.112.156.174:6111', '43.132.245.147:1080', '123.18.206.50:1080', '130.185.77.48:31870', '51.79.243.234:54892', '161.202.101.222:1080', '82.146.63.105:9050', '91.198.137.31:3598', '159.75.131.107:10808', '93.89.239.90:8080', '90.188.255.99:9100', '69.163.166.113:37884', '72.206.181.105:64935', '95.213.154.54:31337', '69.163.163.180:24260', '103.249.100.40:9050', '104.248.48.211:30588', '178.62.79.115:21945', '174.77.111.196:4145', '178.150.246.144:1088', '112.78.141.76:1080', '184.178.172.14:4145', '167.86.95.73:1088', '104.255.170.66:62625', '143.110.238.119:999', '35.223.101.235:9050', '5.8.41.182:10086', '123.129.219.228:1080', '154.86.9.154:1080', '104.255.170.64:58890', '31.25.243.40:9507', '47.106.232.112:6111', '147.135.114.12:3080', '103.139.163.168:1080', '13.250.96.83:48540', '167.86.92.43:8085', '104.255.170.65:57848', '119.23.230.154:6111', '5.182.39.88:9608', '103.73.74.179:1080', '47.119.128.33:6111', '39.108.100.34:1080', '47.242.202.187:25503', '162.144.64.112:38067', '207.148.74.151:55056', '87.98.156.77:1080', '70.166.167.55:57745', '69.163.162.172:37884', '42.193.123.162:8888', '186.126.179.13:1080', '69.163.163.164:24260', '8.129.232.207:6111', '185.179.196.19:1090', '69.163.166.25:37884', '47.119.133.44:6111', '129.226.178.47:1080', '116.203.128.117:9050', '3.234.82.104:1080', '192.111.138.29:4145', '132.148.156.206:33388', '154.86.12.62:1080', '8.129.5.209:6111', '84.32.188.34:1157', '91.198.137.31:3565', '116.238.54.32:1080', '47.119.130.75:6111', '98.162.96.52:4145', '5.61.51.86:8088', '31.25.243.40:9188', '178.62.79.49:23668', '91.198.137.31:3537', '118.31.46.195:1080', '95.216.123.39:9051', '81.169.171.198:8050', '154.86.10.133:1080', '86.123.22.142:10801', '97.74.230.87:45668', '13.250.32.231:48540', '223.199.160.185:10801', '120.25.164.49:34568', '192.252.211.197:14921', '151.106.34.139:1080', '212.237.59.132:10138', '101.89.115.254:1080', '184.178.172.28:15294', '8.210.163.246:50003', '210.16.73.83:1080', '104.255.170.63:59641', '178.62.79.49:51591', '37.28.155.148:63669', '47.119.112.85:6111', '45.56.91.72:63321', '8.129.57.62:6111', '13.250.122.126:48540', '180.109.41.27:1080', '202.59.9.184:1080', '69.163.162.195:1645', '70.166.167.38:57728', '155.138.202.69:12345', '72.206.181.103:4145', '35.152.32.243:1080', '47.119.124.118:6111', '27.184.248.81:1080', '5.182.39.88:9520', '142.93.245.236:30588', '154.86.12.46:1080', '157.245.254.44:8083', '3.37.249.70:1080', '113.103.8.197:1080', '72.49.49.11:31034', '103.85.232.146:1080', '106.37.96.165:1080', '31.25.243.40:9219', '47.115.164.11:6111', '123.112.230.135:1080', '112.218.231.43:1080', '167.71.249.173:40801', '47.242.14.133:25503', '104.255.170.67:53763', '27.116.51.178:6667', '182.92.148.224:9000', '185.179.196.19:1101', '178.23.36.156:1080', '95.179.158.147:22413', '95.169.186.58:9059', '104.255.170.63:55034', '176.9.75.42:1080', '118.194.233.52:1080', '5.182.39.88:9925', '104.255.170.63:61324', '5.182.39.88:9999', '154.86.11.141:1080', '64.126.160.161:31337', '139.59.1.14:1080', '49.68.228.60:28388', '103.216.82.43:6667', '123.120.54.226:1080', '45.79.252.252:4145', '104.255.170.68:58163', '8.129.1.243:6111', '206.189.92.74:7777', '91.198.137.31:3582', '159.69.204.30:10104', '129.204.227.27:1080', '192.252.215.5:16137', '79.124.62.26:443', '104.255.170.91:53763', '8.210.163.246:50015', '98.162.25.23:4145', '54.254.195.169:1437', '132.148.15.129:38844', '13.250.14.216:48540', '132.148.159.119:7529', '192.169.201.24:48550', '83.167.246.51:52441', '181.3.94.75:1080', '47.115.11.247:6111', '13.250.36.159:48540', '195.242.110.246:1080', '104.255.170.65:58890', '13.250.100.145:48540', '13.250.3.72:48540', '47.106.248.60:6111', '72.195.34.42:4145', '192.111.139.163:19404', '132.148.157.55:4492', '154.86.8.158:1080', '47.115.186.97:6111', '192.111.137.37:18762', '91.198.137.31:3558', '104.255.170.67:62625', '162.144.64.112:10555', '114.249.25.22:1080', '20.52.130.140:16379', '69.163.163.45:49538', '132.148.156.177:7529', '138.68.60.8:1080', '45.221.76.114:8080', '91.210.229.26:1080', '49.232.36.127:1080', '43.129.236.229:9090', '106.15.32.137:1080', '104.248.48.172:30588', '8.129.230.61:6111', '47.242.206.86:25503', '95.169.187.58:9054', '181.3.134.235:1080', '43.226.158.247:1080', '173.236.183.131:31353', '5.8.41.178:10086', '181.3.35.141:1080', '119.23.43.231:6111', '85.175.100.26:1059', '47.112.158.63:6111', '68.183.111.220:12563', '208.102.51.6:58208', '142.93.245.253:30588', '43.249.31.28:8888', '8.210.163.246:50013', '95.216.181.107:9090', '209.97.150.167:1080', '159.75.49.140:10808', '31.25.243.40:9469', '163.172.45.77:9050', '91.198.137.31:3553', '139.198.179.174:3128', '47.115.121.150:6111', '154.81.181.120:1080', '67.205.145.40:10581', '91.198.137.31:3595', '184.181.217.210:4145', '97.74.230.87:26236', '1.65.196.114:1080', '125.115.168.4:1080', '114.249.28.246:1080', '91.198.137.31:3517', '51.68.37.126:9050', '43.224.10.36:6667', '104.255.170.67:53468', '72.223.168.67:4145', '154.86.10.149:1080', '88.202.177.242:1085', '8.210.251.244:6666', '192.111.129.150:4145', '154.86.9.158:1080', '45.147.177.8:1080', '180.106.23.105:1080', '202.98.37.78:1080', '31.25.243.40:9102', '139.162.78.109:1080', '72.210.252.137:4145', '69.163.163.183:1645', '8.210.163.246:50045', '54.254.195.169:8293', '185.107.82.164:3128', '104.255.170.63:57529', '192.111.135.18:18301', '49.71.71.82:1080', '134.209.29.120:1080', '192.227.223.211:8090', '69.163.162.163:24260', '47.115.172.213:6111', '8.129.130.96:6111', '207.148.74.163:32389', '181.102.46.87:8000', '192.169.249.15:63868', '192.111.139.165:19402', '132.148.155.205:4492', '5.8.41.180:10086', '103.21.163.76:6667', '47.119.156.127:6111', '154.86.8.154:1080', '91.198.137.31:3568', '60.168.238.52:1080', '72.195.34.41:4145', '104.255.170.63:58163', '104.255.170.63:58890', '186.126.159.195:1080', '3.144.47.49:9300', '45.150.236.45:8888', '176.9.65.8:36916', '216.126.198.206:1080', '154.86.8.135:1080', '114.246.180.18:10808', '61.38.252.171:1080', '119.23.42.70:6111', '18.117.219.250:9999', '144.202.78.73:20052', '134.175.38.75:8080', '8.129.233.218:6111', '8.129.231.107:6111', '119.23.54.238:6111', '184.181.217.204:4145', '27.128.245.87:9999', '194.190.83.68:1080', '104.248.48.186:30588', '158.255.208.212:8888', '163.172.45.77:9051', '123.168.202.103:1080', '119.23.108.149:6111', '18.183.192.89:10086', '47.243.44.22:80', '142.93.245.214:30588', '51.81.6.160:10366', '91.198.137.31:3592', '47.106.238.117:6111', '31.128.248.2:1080', '186.126.32.23:1080', '23.235.157.31:10086', '182.116.239.165:1080', '188.166.104.152:50725', '194.79.29.196:12345', '185.117.244.136:9050', '97.74.230.87:47150', '146.59.178.57:9300', '8.135.28.152:1080', '154.86.11.137:1080', '66.135.227.181:4145', '67.201.33.10:25283', '174.64.199.82:4145', '103.241.227.110:6667', '88.198.50.103:1080', '161.35.70.249:1080', '5.182.39.88:9897', '13.250.30.88:48540', '172.105.36.167:10843', '104.255.170.91:59874', '120.78.227.197:6111', '167.99.117.75:1081', '192.169.201.24:27229', '47.115.173.157:6111', '27.19.85.186:1080', '47.106.178.207:6111', '8.129.112.106:6111', '43.224.10.13:6667', '104.255.170.63:62625', '54.254.195.169:43778', '5.182.39.88:9682', '178.170.39.27:8081', '75.119.201.215:44771', '154.86.14.38:1080', '174.75.211.222:4145', '185.179.196.19:1083', '5.182.39.88:9712', '85.175.100.26:2067', '8.129.231.35:6111', '24.249.199.12:4145', '31.25.243.40:9274', '47.115.80.78:6111', '117.27.76.153:1080', '107.172.5.146:1080', '104.255.170.68:53763', '162.144.64.112:23779', '8.129.38.124:6111', '39.108.118.227:6111', '191.96.42.80:1080', '27.128.173.36:9999', '220.134.51.150:5678', '45.79.193.124:7158', '104.255.170.70:58163', '159.69.204.30:10738', '43.224.10.43:6667', '72.221.172.203:4145', '69.163.165.137:1645', '69.163.163.63:15268', '119.23.234.50:6111', '104.255.170.89:58890', '130.185.77.48:34424', '167.172.194.230:34454', '154.81.174.49:1080', '116.237.130.74:1080', '185.179.196.19:1084', '85.216.127.185:1080', '119.123.76.14:1080', '5.182.39.88:9739', '94.189.237.49:1080', '104.255.170.90:59874', '119.23.41.225:6111', '174.138.174.210:80', '198.199.86.11:1080', '154.81.181.145:1080', '104.238.111.167:19828', '80.78.73.81:1080', '98.178.72.8:4145', '72.221.196.157:35904', '185.150.117.41:20790', '91.198.137.31:3505', '47.119.112.206:6111', '98.162.25.29:31679', '123.54.49.27:28388', '72.195.34.58:4145', '23.235.157.78:10086', '181.6.76.144:1080', '106.75.173.103:3000', '192.252.214.20:15864', '154.86.14.56:1080', '116.30.220.172:1080', '106.55.25.18:1080', '192.252.215.2:4145', '8.129.60.111:6111', '8.129.186.1:6111', '167.71.5.83:1080', '181.6.111.203:8080', '47.112.119.149:6111', '213.136.89.190:34477', '117.35.253.242:3000', '45.77.209.51:1080', '5.182.39.88:9516', '8.129.230.113:6111', '154.86.14.53:1080', '192.111.135.17:18302', '47.112.166.43:6111', '113.116.122.40:1080', '120.79.192.116:1080', '160.251.45.174:5432', '47.119.152.7:6111', '47.119.157.183:6111', '159.69.204.30:10062', '106.75.171.132:3000', '88.198.24.108:1080', '43.129.97.182:1080', '47.100.138.228:8023', '104.255.170.63:51656', '134.209.191.118:8528', '106.225.137.86:1080', '183.15.89.250:1088', '47.106.183.183:6111', '91.198.137.31:3508', '66.49.207.24:9050', '198.199.86.148:15076', '119.123.173.95:1080', '132.148.129.108:44783', '123.129.219.11:1080', '150.109.93.240:12345', '178.62.79.49:17173', '103.73.75.33:1080', '192.254.94.21:38888', '210.16.73.84:1080', '5.199.174.242:27931', '20.78.252.183:1080', '144.202.103.75:50001', '176.9.119.170:1080', '8.210.163.246:50041', '134.209.191.118:8522', '198.199.86.148:22676', '31.25.243.40:9603', '86.107.197.228:4145', '47.106.98.34:1080', '104.255.170.65:53468', '91.198.137.31:3548', '222.129.38.138:57114', '47.92.33.145:1080', '72.210.252.134:46164', '47.115.134.144:6111', '166.62.85.224:64386', '183.15.123.232:1088', '13.250.100.165:48540', '27.116.51.85:6667', '47.106.187.183:6111', '119.136.91.74:1080', '154.86.13.62:1080', '47.105.138.127:8888', '89.162.185.34:1080', '146.71.84.122:1080', '176.114.216.181:9050', '95.169.187.58:9052', '95.169.186.58:9052', '1.116.77.99:8080', '176.9.65.8:18204', '98.162.25.7:31653', '104.255.170.66:59874', '31.25.243.40:9632', '134.209.191.118:8529', '151.106.30.102:6776', '103.122.246.192:5588', '70.166.167.36:4145', '109.72.226.74:31337', '113.58.16.164:1080', '37.49.127.229:1080', '8.210.163.246:50031', '154.16.63.16:1080', '69.163.164.68:37884', '43.231.160.77:10808', '206.189.14.224:1080', '184.185.2.190:4145', '154.86.9.134:1080', '45.77.217.118:1080', '98.170.57.231:4145', '5.182.39.88:9921', '167.172.155.178:25881', '104.255.170.64:62625', '166.62.52.74:64064', '150.129.151.83:6667', '165.22.223.92:1718', '46.101.218.6:16092', '123.118.109.83:1080', '98.188.47.132:4145', '72.217.216.239:4145', '159.69.204.30:10558', '47.112.165.43:6111', '5.182.39.88:10000', '47.119.118.187:6111', '58.218.17.17:28388', '47.119.119.149:6111', '81.70.29.123:1080', '198.8.94.170:4145', '69.61.200.104:36181', '5.252.161.48:1080', '159.65.180.9:9050', '44.192.95.151:1080', '185.132.2.134:8080', '195.201.130.228:59435', '69.163.163.117:1645', '47.119.114.31:6111', '159.69.204.30:10278', '8.129.233.32:6111', '27.116.51.92:6667', '15.160.45.11:1080', '46.105.235.26:8083', '104.255.170.69:58163', '80.87.194.27:9050', '84.32.188.37:1454', '91.198.137.31:3584', '91.198.137.31:3530', '118.114.189.241:1080', '132.148.12.126:38844', '217.19.216.222:5678', '104.255.170.66:53763', '43.224.10.33:6667', '114.246.181.142:1080', '43.224.10.48:6667', '47.106.168.103:6111', '91.198.137.31:3509', '69.163.160.15:37884', '119.23.43.208:6111', '120.196.237.162:1081', '119.96.92.145:9999', '154.86.12.47:1080', '154.86.8.149:1080', '47.115.83.91:6111', '192.169.249.15:2364', '104.128.237.63:10086', '188.166.104.152:60966', '107.172.96.169:1088', '125.33.81.128:1080', '164.132.58.157:9050', '5.182.39.88:9968', '31.25.243.40:9230', '91.198.137.31:3519', '159.75.138.11:10808', '46.101.6.187:35828', '158.255.208.243:8888', '183.23.73.5:28388', '43.224.10.37:6667', '13.59.148.91:9999', '176.114.204.28:9050', '167.71.251.145:42461', '198.199.86.148:36404', '47.112.136.89:6111', '193.112.118.89:38888', '69.163.160.61:1645', '154.86.12.56:1080', '103.216.82.37:6667', '119.28.137.105:9150', '185.107.82.172:3128', '154.86.12.58:1080', '98.178.72.21:10919', '167.172.155.178:18321', '47.115.188.17:6111', '34.81.119.243:1080', '120.77.252.167:6111', '104.255.170.63:60757', '104.255.170.91:62625', '145.239.75.187:37607', '192.154.246.211:9000', '110.138.12.134:1080', '192.154.247.182:9000', '103.250.166.12:6667', '31.25.243.40:9451', '45.9.148.138:1080', '131.221.192.158:5678', '150.109.148.234:1234', '164.155.196.179:8081', '164.155.195.180:8081', '120.237.253.142:1080', '183.236.164.121:1081', '164.155.198.152:8081', '164.155.193.168:8081', '164.155.192.154:8081', '91.198.137.31:3520', '128.199.202.122:1080'] headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36 OPR/78.0.4093.153","Accept-Encoding":"gzip, deflate", "Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "DNT":"1","Connection":"close", "Upgrade-Insecure-Requests":"1"} for proxy in proxylist: page = requests.get(url,headers=headers,proxies={'http://':proxy,'https://':proxy},timeout=2) if page.status_code==200: break else: continue soup1 = BeautifulSoup(page.content,"html.parser") soup2 = BeautifulSoup(soup1.prettify(), "html.parser") return soup2 ###Output _____no_output_____ ###Markdown Extracting the review data attributes ###Code def get_review_data(soup): reviewlist = [] reviews = soup.find_all('div',{'data-hook':'review'}) try: for item in reviews: review_dict = { 'product':soup.title.text.replace('Amazon.in:Customer reviews: ','').strip(), 'title': item.find(class_="a-size-base a-link-normal review-title a-color-base review-title-content a-text-bold").get_text().strip(), 'rating': float(item.find('i',{'data-hook':'review-star-rating'}).get_text().strip().replace('out of 5 stars','')), 'review': item.find('span',{'data-hook':'review-body'}).get_text().strip() } reviewlist.append(review_dict) except: pass return reviewlist ###Output _____no_output_____ ###Markdown Checking out the functions ###Code url = 'https://www.amazon.in/SQL-Complete-Reference-James-Groff/product-reviews/1259003884/ref=cm_cr_arp_d_paging_btm_next_2?ie=UTF8&reviewerType=all_reviews&pageNumber=2' soup = get_soup(url) reviewlist = get_review_data(soup) reviewlist ###Output _____no_output_____ ###Markdown Function for progress of extraction ###Code def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"): """ Call in a loop to create terminal progress bar @params: iteration - Required : current iteration (Int) total - Required : total iterations (Int) prefix - Optional : prefix string (Str) suffix - Optional : suffix string (Str) decimals - Optional : positive number of decimals in percent complete (Int) length - Optional : character length of bar (Int) fill - Optional : bar fill character (Str) printEnd - Optional : end character (e.g. "\r", "\r\n") (Str) """ percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total))) filledLength = int(length * iteration // total) bar = fill * filledLength + '-' * (length - filledLength) print(f'\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd) # Print New Line on Complete if iteration == total: print() ###Output _____no_output_____ ###Markdown Finding the number of pages to extract ###Code def find_limit(soup): limit = soup.find('div',{'data-hook':'cr-filter-info-review-rating-count'}) limit = limit.get_text().strip() limit_array = limit.split(" ") limit = int(limit_array[4].replace(',','')) return limit ###Output _____no_output_____ ###Markdown Final review extracting function ###Code def product_review_data(url,no_of_pages): product_data_list = [] product_data_list_final = [] printProgressBar(0,20,prefix = 'Progress:', suffix = 'Complete', length = 50) x = 1 timeout = time.time() + 5*no_of_pages while time.time()<timeout: url_array = url.split("/") if x==1: url_1 = 'https://www.amazon.in/'+url_array[3]+'/product-reviews/'+url_array[5]+'/ref=cm_cr_dp_d_show_all_btm?ie=UTF8&reviewerType=all_reviews' else: url_1 = 'https://www.amazon.in/'+url_array[3]+'/product-reviews/'+url_array[5]+'/ref=cm_cr_arp_d_paging_btm_next_'+str(x)+'?ie=UTF8&reviewerType=all_reviews&pageNumber='+str(x) try: soup_var = get_soup(url_1) product_data_list = get_review_data(soup_var) limit = find_limit(soup_var) num = math.ceil(limit/10) if num<no_of_pages: n=num elif num>=no_of_pages: n=no_of_pages if product_data_list==[]: continue except: continue product_data_list_final.extend(product_data_list) #print(len(product_data_list_final)) #time.sleep(0.1) printProgressBar(x,n,prefix = 'Progress:', suffix = 'Complete', length = 50) if x==n: break x = x + 1 if not soup_var.find('li',{'class':'a-disabled a-last'}): pass else: break df = pd.DataFrame(product_data_list_final) return df def load_Amazon_product_review_data(url,no_of_pages): #keyword = input("Enter a keyword to search : ") df_1 = product_review_data(url,no_of_pages) print("Loading Successful, Size of Dataframe : ",df_1.shape) return df_1 df = load_Amazon_product_review_data("https://www.amazon.in/SQL-Complete-Reference-James-Groff/dp/1259003884/ref=sr_1_3?keywords=SQL-Complete-Reference-James-Groff&qid=1641720819&sr=8-3",10) ###Output Progress: |██████████████████████████████████████████████████| 100.0% Complete Loading Successful, Size of Dataframe : (13, 4) ###Markdown Checking out the final Dataframe ###Code df ###Output _____no_output_____
Section 3/Part 3/3-3 Sequential Data.ipynb
###Markdown History-Based Regression RNN's[Documentation on PyTorch's Website](https://pytorch.org/docs/stable/nn.html)[LSTM's tutorial](https://pytorch.org/tutorials/beginner/nlp/sequence_models_tutorial.html) ###Code import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable import torch.optim as optim import numpy as np from matplotlib import pyplot as plt %matplotlib inline seed = 42 # Allows exactly reproducible experiments np.random.seed(seed) torch.manual_seed(seed) # Can also fix Python's random library # Pytorch LSTM's can take a single input, hidden_state pair at a time, or an entire sequence. # For simplicity, we will design our network's forward operations to operate on the entire sequence at once. # The only reason to do otherwise would be if you want to pass information to an earlier part of the network in the next timestep. class SimpleRNN(torch.nn.Module): #Our batch shape for input x is (3, 32, 32) def __init__(self, breadth=100): super(SimpleRNN, self).__init__() # Process each input (mapping from 1 to breadth) self.fc1 = torch.nn.Linear(1, breadth) #Use an LSTM to process the features and create some history-updated information hidden_size = breadth # Heuristic for amount of information to output & preserve across time self.lstm = nn.LSTM(breadth, hidden_size) #Use a final layer to map back down to a single output self.fc2 = torch.nn.Linear(breadth, 1) def forward(self, sequence): # Assumes (in view) that only a single training example is passed. # This is for illustration purposes. #Computes the activation of the first fully connected layer inputs = F.relu(self.fc1(sequence)) # applies operation to each input in the sequence. hidden_sequence, (last_output, last_memory_state) = self.lstm(inputs.view(len(sequence), 1, -1)) # For every timestep, for this one training instance, there is a set of input features (of size breadth) #print("hidden", hidden_sequence) #print("last", last_output) final_result = self.fc2(last_output.view(1, -1)) # Assumes a single training example return(final_result) test_input = Variable(torch.from_numpy(np.array([0, 1, 2, 3]).astype(np.float32))) net=SimpleRNN() outputs = net(test_input.view(len(test_input), 1, -1)) print(outputs) inputs = Variable(torch.arange(0,10,.05)) # 200 points #true_vals = torch.mul(inputs, inputs) true_vals = torch.sin(inputs * inputs) plt.plot(list(inputs.data), list(true_vals.data)) plt.show() training_cutoff = len(inputs)//2 # A good idea to separate train and test # The following are examples: example_sequence_input = inputs[:10] example_sequence_output = inputs[10] print(example_sequence_input, example_sequence_output) example_fixed_history_input = inputs[6:10] example_fixed_history_output = inputs[10] print(example_fixed_history_input, example_fixed_history_output) ###Output _____no_output_____
notebooks/cloudsat_precip_Fanapi-PA.ipynb
###Markdown Table of Contents1&nbsp;&nbsp;Introduction2&nbsp;&nbsp;Read in the height and reflectivity fields3&nbsp;&nbsp;Now go get the rain rate4&nbsp;&nbsp;Make a masked array of the reflectivity so that pcolormesh will plot it5&nbsp;&nbsp;Find the part of the orbiting that corresponds to the 3 minutes containing the storm6&nbsp;&nbsp;convert time to distance by using pyproj to get the greatcircle distance between shots7&nbsp;&nbsp;Make the plot assuming that height is the same for every shot8&nbsp;&nbsp;Now add the rain rate9&nbsp;&nbsp;Repeat for precipitatable liquid water and retrieval uncertainty ###Code import pdb ###Output _____no_output_____ ###Markdown IntroductionThis notebook finds the the point at which the ECMWF tempearture=0 deg C for eachradar pulse and overlays that on the reflectivity plot to check to see whetherthe bright band occurs at the freezing level Read in the height and reflectivity fields ###Code from importlib import reload import numpy as np import datetime as dt from datetime import timezone as tz from matplotlib import pyplot as plt import pyproj from numpy import ma import a301 from a301.cloudsat import get_geo from pathlib import Path from pyhdf.SD import SD, SDC # # new functions to read vdata and sds arrays # from a301.cloudsat import HDFvd_read, HDFsd_read plt.style.use('ggplot') z_file= list(a301.data_dir.glob('*GEOPROF_GRANULE*hdf'))[1] lats,lons,date_times,prof_times,dem_elevation=get_geo(z_file) radar_reflectivity, radar_attrs = HDFsd_read(z_file,'Radar_Reflectivity') radar_scale = radar_attrs['factor'] radar_missing = radar_attrs['missing'] radar_height, height_attrs = HDFsd_read(z_file,'Height') meters2km=1.e3 print(z_file) ###Output C:\Users\Pearl\Desktop\ATSC 301\a301_code\data\2010259161922_23334_CS_2B-GEOPROF_GRANULE_P_R04_E03.hdf ###Markdown Now go get the rain ratemask all values ###Code r_file= list(a301.data_dir.glob('*2C-RAIN-PROFILE*hdf'))[1] rain_rate = HDFvd_read(r_file,'rain_rate',vgroup='Data Fields') invalid = (rain_rate == -9999.) rain_rate[invalid] = np.nan hit = rain_rate < 0. rain_rate[hit] = np.abs(rain_rate[hit]) plt.plot(rain_rate); ###Output C:\Users\Pearl\Anaconda3\lib\site-packages\ipykernel_launcher.py:5: RuntimeWarning: invalid value encountered in less """ ###Markdown Make a masked array of the reflectivity so that pcolormesh will plot itnote that I need to find the missing data before I divide by factor=100 toconvert from int16 to float ###Code hit=(radar_reflectivity == radar_missing) radar_reflectivity=radar_reflectivity.astype(np.float) radar_reflectivity[hit]=np.nan zvals = radar_reflectivity/radar_scale zvals=ma.masked_invalid(zvals) ###Output _____no_output_____ ###Markdown Find the part of the orbiting that corresponds to the 3 minutes containing the stormYou need to enter the start_hour and start_minute for the start time of your cyclone in the granule ###Code first_time=date_times[0] print(f'orbit start: {first_time}') start_hour=17 start_minute=16 storm_start=starttime=dt.datetime(first_time.year,first_time.month,first_time.day, start_hour,start_minute,0,tzinfo=tz.utc) # # get 3 minutes of data from the storm_start # storm_stop=storm_start + dt.timedelta(minutes=3) print(f'storm start: {storm_start}') time_hit = np.logical_and(date_times > storm_start,date_times < storm_stop) print(time_hit.shape) print(rain_rate.shape) storm_lats = lats[time_hit] storm_lons=lons[time_hit] storm_prof_times=prof_times[time_hit] storm_zvals=zvals[time_hit,:] storm_height=radar_height[time_hit,:] storm_date_times=date_times[time_hit] rain_rate=rain_rate[time_hit] ###Output orbit start: 2010-09-16 16:19:29.606000+00:00 storm start: 2010-09-16 17:16:00+00:00 (37081,) (37081, 1) ###Markdown convert time to distance by using pyproj to get the greatcircle distance between shots ###Code great_circle=pyproj.Geod(ellps='WGS84') distance=[0] start=(storm_lons[0],storm_lats[0]) for index in np.arange(1,len(storm_lons)): azi12,azi21,step= great_circle.inv(storm_lons[index-1],storm_lats[index-1], storm_lons[index],storm_lats[index]) distance.append(distance[index-1] + step) distance=np.array(distance)/meters2km ###Output _____no_output_____ ###Markdown Make the plot assuming that height is the same for every shotWe need to customize the subplots so we can share the x axis between the radar reflectivityand the rain_rate, and adjust the sizes to hold a colorbar ###Code %matplotlib inline from matplotlib import cm from matplotlib.colors import Normalize from mpl_toolkits.axes_grid1 import make_axes_locatable def plot_field2(distance,height,field,fig,cmap=None,norm=None): """ draw a 2 panel plot with different panel sizes. Put the radar reflectivity in the top panel with a colorbar along the bottom, and pass the second axis back to be filled in later uses the sharex keyword to give both plots the same x axis (distance) and the gridspec class to lay out the grid https://stackoverflow.com/questions/10388462/matplotlib-different-size-subplots """ from matplotlib import gridspec gs = gridspec.GridSpec(2, 1, height_ratios=[2, 1]) ax1 = fig.add_subplot(gs[0]) ax2 = fig.add_subplot(gs[1],sharex=ax1) if cmap is None: cmap=cm.inferno col=ax1.pcolormesh(distance,height,field,cmap=cmap, norm=the_norm) #https://stackoverflow.com/questions/18195758/set-matplotlib-colorbar-size-to-match-graph # create an axes on the bottom side of ax1. The height of cax will be 5% # of ax and the padding between cax and ax will be fixed at 0.55 inch. divider = make_axes_locatable(ax1) cax = divider.append_axes("bottom", size="5%", pad=0.55) ax1.figure.colorbar(col,extend='both',cax=cax,orientation='horizontal') return ax1, ax2 vmin=-30 vmax=20 the_norm=Normalize(vmin=vmin,vmax=vmax,clip=False) cmap_ref=cm.plasma cmap_ref.set_over('w') cmap_ref.set_under('b',alpha=0.2) cmap_ref.set_bad('0.75') #75% grey cloud_height_km=radar_height[0,:]/meters2km fig = plt.figure(figsize=(15, 8)) ax1, ax2 = plot_field2(distance,cloud_height_km,storm_zvals.T,fig,cmap=cmap_ref,norm=the_norm) ax1.set(ylim=[0,17],xlim=(0,1200)) ax1.set(xlabel='distance (km)',ylabel='height (km)', title='equivalent radar reflectivity in dbZe'); ###Output _____no_output_____ ###Markdown Now add the rain rateUse the second axis to draw the rain rate and redraw the figure ###Code ax2.plot(distance,rain_rate) ax2.set(xlabel='distance (km)',ylabel='rain rate (mm/hour)') display(fig) ###Output _____no_output_____ ###Markdown Repeat for precipitatable liquid water and retrieval uncertaintyMake a new plot pair -- for variable information see the [dataset docs](http://www.cloudsat.cira.colostate.edu/data-products/level-2c/2c-rain-profile?term=56) ###Code liquid_water, lw_attributes = HDFsd_read(r_file,'precip_liquid_water') lw_attributes precip_uncertainty = HDFvd_read(r_file,'rain_rate_uncertainty',vgroup='Data Fields') storm_liquid=liquid_water[time_hit,:]/lw_attributes['factor'] precip_uncert=precip_uncertainty[time_hit] vmin=0.01 vmax=0.5 the_norm=Normalize(vmin=vmin,vmax=vmax,clip=False) cmap_ref=cm.plasma cmap_ref.set_over('w') cmap_ref.set_under('b',alpha=0.02) cmap_ref.set_bad('0.75') #75% grey fig = plt.figure(figsize=(15, 8)) ax1, ax2 = plot_field2(distance,cloud_height_km,storm_liquid.T,fig,cmap=cmap_ref,norm=the_norm) ax1.set(ylim=[0,6],xlim=(0,1200)) ax1.set(xlabel='distance (km)',ylabel='height (km)', title='precipitable liquid water content (g/m^3)') ax2.plot(distance,precip_uncert) ax2.set(ylim=[0,15],xlabel='distance (km)',ylabel='rain rate uncertainty (%)'); lw_attributes ###Output _____no_output_____
_notebooks/2021-10-06-Logistic-Regression-For_Real.ipynb
###Markdown Create dataset ###Code f = lambda x: x X, y = z.binary_classification_dataset(f, 1000, scale=2) fig = plt.figure(figsize = (6, 6)) ax1 = fig.add_subplot(111) ax1.set_xlim(left = -1.05, right=1.05) ax1.set_ylim(bottom = -1.05, top=1.05) z.plot_2Dpoints(X.numpy(), y.numpy(), ax1) model = z.LogisticClassifier() criterion = nn.BCELoss() optimizer = optim.Adam(model.parameters(), lr=0.001) ###Output _____no_output_____ ###Markdown Train model ###Code n_epochs = 20000 losses = [] fig = plt.figure(figsize=(6,6)) ax = fig.add_axes([0,0,1,1]) ax.set_xlim(left = -1.05, right=1.05) ax.set_ylim(bottom = -1.05, top=1.05) for epoch in range(n_epochs): optimizer.zero_grad() outputs = model(X) loss = criterion(outputs, y) loss.backward() optimizer.step() losses.append(loss.item()) if epoch % (n_epochs // 100) == 0: weights = list(model.parameters())[0][0].detach().numpy() bias = list(model.parameters())[1][0].detach().numpy() z.plot_line(-weights[0]/weights[1], bias/weights[1], ax, color='g--') weights = list(model.parameters())[0][0].detach().numpy() bias = list(model.parameters())[1][0].detach().numpy() z.plot_line(-weights[0]/weights[1], bias/weights[1], ax, color='black') z.plot_2Dpoints(X.detach().numpy(), y.detach().numpy(), ax) plt.show() # Plotting the loss plt.title("Training Loss") plt.xlabel('Number of epochs') plt.ylabel('Loss') plt.plot(list(range(len(losses))), losses) plt.show() print("Final training loss:", losses[-1]) fig = plt.figure(figsize=(6,6)) ax=fig.add_axes([0,0,1,1]) ax_list = fig.axes print(ax_list) ###Output [<matplotlib.axes._axes.Axes object at 0x7f490be0a280>]
examples/denoising2D_probabilistic/2_prediction.ipynb
###Markdown Demo: Probabilistic CARE model for denoising of synthetic 2D dataThis notebook demonstrates applying a probabilistic CARE model for a 2D denoising task, assuming that training was already completed via [1_training.ipynb](1_training.ipynb). The trained model is assumed to be located in the folder `models` with the name `my_model`.More documentation is available at http://csbdeep.bioimagecomputing.com/doc/. ###Code from __future__ import print_function, unicode_literals, absolute_import, division import numpy as np import matplotlib.pyplot as plt %matplotlib inline %config InlineBackend.figure_format = 'retina' from csbdeep.utils import Path, download_and_extract_zip_file, plot_some from csbdeep.io import load_training_data, save_tiff_imagej_compatible from csbdeep.models import CARE ###Output _____no_output_____ ###Markdown Download example dataThe example data should have been downloaded in [1_training.ipynb](1_training.ipynb). Just in case, we will download it here again if it's not already present. ###Code download_and_extract_zip_file ( url = 'http://csbdeep.bioimagecomputing.com/example_data/synthetic_disks.zip', targetdir = 'data', ) ###Output _____no_output_____ ###Markdown Load the validation images using during model training. ###Code X_val, Y_val = load_training_data('data/synthetic_disks/data.npz', validation_split=0.1, verbose=True)[1] ###Output _____no_output_____ ###Markdown We will apply the trained CARE model here to restore one validation image `x` (with associated ground truth `y`). ###Code y = Y_val[2,...,0] x = X_val[2,...,0] axes = 'YX' ###Output _____no_output_____ ###Markdown Input image and associated ground truthPlot the test image pair. ###Code print('image size =', x.shape) print('image axes =', axes) plt.figure(figsize=(16,10)) plot_some(np.stack([x,y]), title_list=[['input','target (GT)']]); ###Output _____no_output_____ ###Markdown CARE modelLoad trained model (located in base directory `models` with name `my_model`) from disk. The configuration was saved during training and is automatically loaded when `CARE` is initialized with `config=None`. ###Code model = CARE(config=None, name='my_model', basedir='models') ###Output _____no_output_____ ###Markdown Typical CARE predictionPredict the restored image as in the non-probabilistic case if you're only interested in a restored image. But actually, the network returns the expected restored image for the probabilistic network outputs.Note 1: Since the synthetic image is already normalized, we don't need to do additional normalization.**Note 2**: *Out of memory* problems during `model.predict` often indicate that the GPU is used by another process. In particular, shut down the training notebook before running the prediction (you may need to restart this notebook). ###Code restored = model.predict(x, axes, normalizer=None) plt.figure(figsize=(16,10)) plot_some(np.stack([x,restored]), title_list=[['input','expected restored image']]); ###Output _____no_output_____ ###Markdown Save restored imageSave the restored image stack as a ImageJ-compatible TIFF image, i.e. the image can be opened in ImageJ/Fiji with correct axes semantics. ###Code Path('results').mkdir(exist_ok=True) save_tiff_imagej_compatible('results/%s_validation_image.tif' % model.name, restored, axes) ###Output _____no_output_____ ###Markdown Probabilistic CARE predictionWe now predict the per-pixel Laplace distributions and return an object to work with these. ###Code restored_prob = model.predict_probabilistic(x, axes, normalizer=None) ###Output _____no_output_____ ###Markdown Plot the *mean* and *scale* parameters of the per-pixel Laplace distributions. ###Code plt.figure(figsize=(16,10)) plot_some(np.stack([restored_prob.mean(),restored_prob.scale()]), title_list=[['mean','scale']]); ###Output _____no_output_____ ###Markdown Plot the *variance* and *entropy* parameters of the per-pixel Laplace distributions. ###Code plt.figure(figsize=(16,10)) plot_some(np.stack([restored_prob.var(),restored_prob.entropy()]), title_list=[['variance','entropy']]); ###Output _____no_output_____ ###Markdown Sampling restored imagesDraw 50 samples of the distribution of the restored image. Plot the first 3 samples. ###Code samples = np.stack(restored_prob.sampling_generator(50)) plt.figure(figsize=(16,5)) plot_some(samples[:3], pmin=0.1,pmax=99.9); ###Output _____no_output_____ ###Markdown Make an animation of the 50 samples. ###Code from matplotlib import animation from IPython.display import HTML fig = plt.figure(figsize=(8,8)) im = plt.imshow(samples[0], vmin=np.percentile(samples,0.1), vmax=np.percentile(samples,99.9), cmap='magma') plt.close() def updatefig(j): im.set_array(samples[j]) return [im] anim = animation.FuncAnimation(fig, updatefig, frames=len(samples), interval=100) HTML(anim.to_jshtml()) ###Output _____no_output_____ ###Markdown Inspect predicted distribution along line profile with credible intervals ###Code i = 61 line = restored_prob[i] n = len(line) plt.figure(figsize=(16,9)) plt.subplot(211) plt.imshow(restored_prob.mean()[i-15:i+15], cmap='magma') plt.plot(range(n),15*np.ones(n),'--w',linewidth=2) plt.title('expected restored image') plt.xlim(0,n-1); plt.axis('off') plt.subplot(212) q = 0.025 plt.fill_between(range(n), line.ppf(q), line.ppf(1-q), alpha=0.5, label='%.0f%% credible interval'%(100*(1-2*q))) plt.plot(line.mean(),linewidth=3, label='expected restored image') plt.plot(y[i],'--',linewidth=3, label='ground truth') plt.plot(x[i],':',linewidth=1, label='input image') plt.title('line profile') plt.xlim(0,n-1); plt.legend(loc='lower right') None; ###Output _____no_output_____
image-classificaton/2.2.tensorflow_training.ipynb
###Markdown [모듈 3.2] SageMaker TensorFlow 훈련 Download | Structure | Preprocessing (TensorFlow) | **Train Model (TensorFlow)** (4단계 중의 4/4) [알림] conda_tensorflow2_p36 커널 과 함께 사용해야 합니다.* 이 노트북은 `1.1.download_data`, `1.2.structuring_data` 및 `3.1.tensorflow_preprocessing`으로 시작하는 일련의 노트북의 일부입니다. 노트북 요약---- SageMaker 에서 관리형 EC2 인스턴스로 훈련을 하기 위해서 "훈련 스크립트"의 주요한 내용을 확인 합니다.- SageMaker Estimator 설정을 하고 모델 훈련을 합니다.- 훈련된 모델의 가중치를 S3에서 다운로드 받아서 모델을 생성합니다.- 테스트 데이터 셋트를 생성 합니다.- 생성된 모델 및 테스트 세트를 통해서 추론을 하고 실제 예측이 잘 되었는지를 확인 합나다. 참고- SageMaker-Tensorflow-Step-by-Step 워크샵 - 세이지 메이커 TF Getting Started, Horovod, Data Distributed Parallelism 포함 - https://github.com/gonsoomoon-ml/SageMaker-Tensorflow-Step-By-Step 0. 환경 설정___ ###Code import pickle import pathlib import sagemaker import subprocess import numpy as np import tensorflow as tf import matplotlib.pyplot as plt import tensorflow_datasets as tfds from sagemaker.tensorflow import TensorFlow print(f"tensorflow : {tf.__version__}") ###Output tensorflow : 2.3.1 ###Markdown 버킷 이름 및 훈련/검증 데이터 셋 로딩 ###Code %store -r bucket_name %store -r train_tf_s3_uri %store -r val_tf_s3_uri ###Output _____no_output_____ ###Markdown 1. 훈련 스크립트 리뷰___ Helper 함수이러한 도우미 함수는 훈련 전에 TFRecords 데이터 세트에 수행해야 하는 변환을 정의합니다. 더 자세한 정보는 이 시리즈의 전처리 가이드를 참조하세요. ###Code !pygmentize "training_tensorflow/tensorflow_train.py" | sed -n 7,27p ###Output def tfrecord_parser(record): features = { "height": tf.io.FixedLenFeature([], tf.int64), "width": tf.io.FixedLenFeature([], tf.int64), "depth": tf.io.FixedLenFeature([], tf.int64), "label": tf.io.FixedLenFeature([], tf.int64), "image_raw": tf.io.FixedLenFeature([], tf.string), } parsed_features = tf.io.parse_single_example(record, features) return tf.io.decode_jpeg(parsed_features["image_raw"]), parsed_features["label"] def augment(image, label): image = tf.image.random_flip_left_right(image) image = tf.image.random_flip_up_down(image) image = tf.image.random_brightness(image, 0.2) image = tf.image.random_hue(image, 0.1) return (image, label) ###Markdown 훈련 스크립트 메인 함수훈련 스크립트는 if 문에서 훈련 코드를 래핑합니다. ###Code !pygmentize "training_tensorflow/tensorflow_train.py" | sed -n 29p ###Output if __name__ == "__main__": ###Markdown 입력 파라미터 분석이러한 입력 파라미터는 하이퍼파라미터 인수와 fit 메소드에 대한 입력 인수를 통해 전달됩니다. ###Code !pygmentize "training_tensorflow/tensorflow_train.py" | sed -n 29,39p ###Output if __name__ == "__main__": arg_parser = argparse.ArgumentParser() arg_parser.add_argument("--epochs", type=int, default=50) arg_parser.add_argument("--batch-size", type=int, default=4) arg_parser.add_argument("--learning-rate", type=float, default=0.001) arg_parser.add_argument("--train-dir", type=str, default=os.environ.get("SM_CHANNEL_TRAINING")) arg_parser.add_argument( "--validation-dir", type=str, default=os.environ.get("SM_CHANNEL_VALIDATION") ) ###Markdown 병렬화 구성을 위해 autotune 사용- 훈련 속도를 높이기 위해 TensorFlow는 특정 작업을 여러 코어에 분산할 수 있습니다. 작업을 분산할 최적의 작업자 수를 결정하는 것은 어려울 수 있습니다(너무 적으면 GPU 활용도가 낮고 너무 많으면 작업 예약 오버헤드로 인해 지연이 발생함). - TensorFlow는 훈련을 수행하는 컴퓨터를 기반으로 적절한 양을 결정하는 방법과 함께 제공됩니다. ###Code !pygmentize "training_tensorflow/tensorflow_train.py" | sed -n 42p ###Output AUTOTUNE = tf.data.experimental.AUTOTUNE ###Markdown 데이터세트 로드- 훈련 및 검증 데이터세트가 로드됩니다. - 이미지를 TFRecord 파일로 변환할 때 크기 조정이나 스케일을 했기 때문에 다시 수행할 필요가 없습니다.- 훈련 데이터에는 증강이 적용되지만 검증 데이터에는 적용되지 않습니다. ###Code !pygmentize "training_tensorflow/tensorflow_train.py" | sed -n 47,65p ###Output train_ds = tf.data.TFRecordDataset( filenames=[train_data.as_posix()], num_parallel_reads=AUTOTUNE ) val_ds = tf.data.TFRecordDataset(filenames=[val_data.as_posix()], num_parallel_reads=AUTOTUNE) train_ds = ( train_ds.map(tfrecord_parser, num_parallel_calls=AUTOTUNE) .map(augment, num_parallel_calls=AUTOTUNE) .batch(args.batch_size) .prefetch(AUTOTUNE) ) val_ds = ( val_ds.map(tfrecord_parser, num_parallel_calls=AUTOTUNE) .batch(args.batch_size) .prefetch(AUTOTUNE) ) ###Markdown GPU를 사용할 수 있는지 확인GPU를 사용할 수 있는 경우 훈련 장치를 GPU로 설정하고, 그렇지 않으면 CPU를 사용합니다. ###Code !pygmentize "training_tensorflow/tensorflow_train.py" | sed -n 66,71p ###Output gpu_devices = tf.config.experimental.list_physical_devices("GPU") if any(gpu_devices): device = gpu_devices[0].device_type else: device = "/cpu:0" print(f"Training with: {device}") ###Markdown 기본 모델 생성 및 수정- 먼저 장치 컨텍스트를 설정하여 적절한 장치(GPU 또는 CPU)를 사용하고 있는지 확인합니다. - 그런 다음 ResNet50 아키텍처를 사용하고 ImageNet 데이터 세트에서 사전 훈련된 가중치로 가중치를 초기화합니다. - Pretained 모델의 최상위 계층은 ImageNet 이미지에 대해 구성되어 있으므로 분류 계층(`inlcude_top=False`)을 제거- 11마리 동물에 대한 분류 계층으로 교체해야 합니다. ###Code !pygmentize "training_tensorflow/tensorflow_train.py" | sed -n 72,80p ###Output with tf.device(device): base_model = tf.keras.applications.ResNet50(include_top=False, weights="imagenet") global_avg = tf.keras.layers.GlobalAveragePooling2D()(base_model.output) output = tf.keras.layers.Dense(11, activation="softmax")(global_avg) model = tf.keras.Model(inputs=base_model.input, outputs=output) ###Markdown 옵티마이저 정의 및 모델 학습이 예에서는 SGD를 사용하여 모델의 가중치를 최적화합니다. 훈련이 끝나면 최고의 검증 정확도를 수행한 에포크에 대한 가중치가 저장되므로 나중에 테스트 데이터 세트에 대한 예측을 위해 모델을 로드할 수 있습니다. ###Code !pygmentize "training_tensorflow/tensorflow_train.py" | sed -n 80,90p ###Output optimizer = tf.keras.optimizers.SGD(lr=args.learning_rate, momentum=0.9, decay=0.01) model.compile( loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"] ) print("Beginning Training...") model.fit(train_ds, epochs=args.epochs, validation_data=val_ds, verbose=2) model.save("/opt/ml/model/model") ###Markdown 2. SageMaker Estimator 설정 ___교육에 사용할 리소스와 리소스 구성 방법을 정의합니다. 상세 사항은 아래를 참조 하세요.- [TensorFlow Estimator](https://sagemaker.readthedocs.io/en/stable/frameworks/tensorflow/sagemaker.tensorflow.htmltensorflow-estimator) 알고리즘 하이퍼파라미터___- 하이퍼파라미터는 훈련이 시작되기 전에 설정한 알고리즘의 튜닝 파라미터를 나타냅니다. 일반적으로 기본값으로 미리 설정되어 있으므로 지정하지 않으면 훈련 알고리즘을 계속 실행할 수 있지만 일반적으로 최적의 결과를 얻으려면 조정이 필요합니다. 이러한 값이 무엇인지는 전적으로 데이터 세트에 따라 다릅니다. - 불행히도 최고의 설정이 무엇인지 알려주는 공식은 없습니다. 직접 시도하고 결과를 확인해야 하지만 선택하는 데 도움이 되는 모범 사례와 팁이 있습니다.* **학습률** - 교육의 각 배치 후에 우리는 해당 배치에 대해 가능한 최상의 결과를 제공하기 위해 모델의 가중치를 업데이트합니다. 학습률은 가중치를 업데이트해야 하는 정도를 제어합니다. 모범 사례는 0.2에서 .001 사이의 값을 지정하며 일반적으로 1보다 높지 않습니다. 학습률이 높을수록 훈련이 최적의 가중치로 더 빨리 수렴되지만 너무 빠르면 목표를 초과할 수 있습니다. 이 예에서는 사전 훈련된 모델의 가중치를 사용하므로 가중치가 이미 최적화되어 있고 가중치에서 너무 멀리 이동하고 싶지 않기 때문에 더 낮은 학습률로 시작하려고 합니다.* **에포크** - 에포크는 훈련 세트의 한 주기를 나타내며 훈련할 에포크가 많다는 것은 정확도를 향상시킬 기회가 더 많다는 것을 의미합니다. 적절한 값은 시간과 예산 제약에 따라 5~25 Epoch 범위입니다. 이상적으로는 검증 정확도가 안정되기 직전에 값이 올바른 Epoch 수 입니다.* **Batch Size** - 일괄 학습은 RAM에 보관해야 하는 데이터의 양을 줄이고 학습 알고리즘의 속도를 높일 수 있습니다. 이러한 이유로 훈련 데이터는 거의 항상 일괄 처리됩니다. 최적의 배치 크기는 데이터 세트, 이미지 크기 및 훈련 컴퓨터의 RAM 용량에 따라 다릅니다. 우리와 같은 데이터 세트의 경우 합리적인 값은 배치당 18개에서 64개 이미지입니다. 훈련을 위한 하이퍼파라미터 정의 ###Code hyperparameters = { "epochs": 10, "batch-size": 32, "learning-rate": 0.001, } ###Output _____no_output_____ ###Markdown Estimator 설정현재 노트북의 TF Version 으로 훈련 EC2 인스턴스안으로 다운로드 받을 TF Docker image의 버전을 맞춥니다. ###Code estimator_config = { "entry_point": "tensorflow_train.py", "source_dir": "training_tensorflow", "framework_version": "2.3", "py_version": "py37", "instance_type": "ml.p3.2xlarge", "instance_count": 1, "role": sagemaker.get_execution_role(), "hyperparameters": hyperparameters, "output_path": f"s3://{bucket_name}", } tf_estimator = TensorFlow(**estimator_config) ###Output _____no_output_____ ###Markdown 훈련 및 검증 데이터 채널 설정 ###Code s3_data_channels = { "training": train_tf_s3_uri, "validation": val_tf_s3_uri, } ###Output _____no_output_____ ###Markdown 3. 모델 훈련- 총 소요시간이 약 5분 걸립니다. ###Code tf_estimator.fit(s3_data_channels) ###Output 2022-01-04 02:00:55 Starting - Starting the training job... 2022-01-04 02:00:57 Starting - Launching requested ML instances...... 2022-01-04 02:02:15 Starting - Preparing the instances for training...... 2022-01-04 02:03:23 Downloading - Downloading input data... 2022-01-04 02:03:46 Training - Downloading the training image............ 2022-01-04 02:05:37 Training - Training image download completed. Training in progress.2022-01-04 02:05:38.698252: W tensorflow/core/profiler/internal/smprofiler_timeline.cc:460] Initializing the SageMaker Profiler. 2022-01-04 02:05:38.702062: W tensorflow/core/profiler/internal/smprofiler_timeline.cc:105] SageMaker Profiler is not enabled. The timeline writer thread will not be started, future recorded events will be dropped. 2022-01-04 02:05:38.946257: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudart.so.10.2 2022-01-04 02:05:39.038293: W tensorflow/core/profiler/internal/smprofiler_timeline.cc:460] Initializing the SageMaker Profiler. 2022-01-04 02:05:42,406 sagemaker-training-toolkit INFO Imported framework sagemaker_tensorflow_container.training 2022-01-04 02:05:42,893 sagemaker-training-toolkit INFO Installing dependencies from requirements.txt: /usr/local/bin/python3.7 -m pip install -r requirements.txt Collecting tensorflow-datasets==3.2.1 Downloading tensorflow_datasets-3.2.1-py3-none-any.whl (3.4 MB) Requirement already satisfied: termcolor in /usr/local/lib/python3.7/site-packages (from tensorflow-datasets==3.2.1->-r requirements.txt (line 1)) (1.1.0) Collecting promise Downloading promise-2.3.tar.gz (19 kB) Collecting future Downloading future-0.18.2.tar.gz (829 kB) Requirement already satisfied: requests>=2.19.0 in /usr/local/lib/python3.7/site-packages (from tensorflow-datasets==3.2.1->-r requirements.txt (line 1)) (2.24.0) Collecting tensorflow-metadata Downloading tensorflow_metadata-1.5.0-py3-none-any.whl (48 kB) Collecting tqdm Downloading tqdm-4.62.3-py2.py3-none-any.whl (76 kB) Requirement already satisfied: protobuf>=3.6.1 in /usr/local/lib/python3.7/site-packages (from tensorflow-datasets==3.2.1->-r requirements.txt (line 1)) (3.17.2) Requirement already satisfied: wrapt in /usr/local/lib/python3.7/site-packages (from tensorflow-datasets==3.2.1->-r requirements.txt (line 1)) (1.12.1) Requirement already satisfied: six in /usr/local/lib/python3.7/site-packages (from tensorflow-datasets==3.2.1->-r requirements.txt (line 1)) (1.16.0) Requirement already satisfied: numpy in /usr/local/lib/python3.7/site-packages (from tensorflow-datasets==3.2.1->-r requirements.txt (line 1)) (1.18.5) Requirement already satisfied: dill in /usr/local/lib/python3.7/site-packages (from tensorflow-datasets==3.2.1->-r requirements.txt (line 1)) (0.3.3) Requirement already satisfied: absl-py in /usr/local/lib/python3.7/site-packages (from tensorflow-datasets==3.2.1->-r requirements.txt (line 1)) (0.10.0) Requirement already satisfied: attrs>=18.1.0 in /usr/local/lib/python3.7/site-packages (from tensorflow-datasets==3.2.1->-r requirements.txt (line 1)) (21.2.0) Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/site-packages (from requests>=2.19.0->tensorflow-datasets==3.2.1->-r requirements.txt (line 1)) (1.25.11) Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/site-packages (from requests>=2.19.0->tensorflow-datasets==3.2.1->-r requirements.txt (line 1)) (2.10) Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/site-packages (from requests>=2.19.0->tensorflow-datasets==3.2.1->-r requirements.txt (line 1)) (2021.5.30) Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/site-packages (from requests>=2.19.0->tensorflow-datasets==3.2.1->-r requirements.txt (line 1)) (3.0.4) Collecting googleapis-common-protos<2,>=1.52.0 Downloading googleapis_common_protos-1.54.0-py2.py3-none-any.whl (207 kB) Building wheels for collected packages: future, promise Building wheel for future (setup.py): started  Building wheel for future (setup.py): finished with status 'done' Created wheel for future: filename=future-0.18.2-py3-none-any.whl size=491070 sha256=7c804126baa219e64ece6cad70de48f3b1c04a975cc3851980c7539cb1b1af01 Stored in directory: /root/.cache/pip/wheels/56/b0/fe/4410d17b32f1f0c3cf54cdfb2bc04d7b4b8f4ae377e2229ba0 Building wheel for promise (setup.py): started Building wheel for promise (setup.py): finished with status 'done' Created wheel for promise: filename=promise-2.3-py3-none-any.whl size=21502 sha256=6c1b086517174f9b4e83d72168f9f8c25dc00b0ba7fe5ebff2ee4e7e6fd7bbec Stored in directory: /root/.cache/pip/wheels/29/93/c6/762e359f8cb6a5b69c72235d798804cae523bbe41c2aa8333d Successfully built future promise Installing collected packages: googleapis-common-protos, tqdm, tensorflow-metadata, promise, future, tensorflow-datasets Successfully installed future-0.18.2 googleapis-common-protos-1.54.0 promise-2.3 tensorflow-datasets-3.2.1 tensorflow-metadata-1.5.0 tqdm-4.62.3 WARNING: Running pip as root will break packages and permissions. You should install packages reliably by using venv: https://pip.pypa.io/warnings/venv WARNING: You are using pip version 21.1.2; however, version 21.3.1 is available. You should consider upgrading via the '/usr/local/bin/python3.7 -m pip install --upgrade pip' command. 2022-01-04 02:05:49,703 sagemaker-training-toolkit INFO Invoking user script Training Env: { "additional_framework_parameters": {}, "channel_input_dirs": { "training": "/opt/ml/input/data/training", "validation": "/opt/ml/input/data/validation" }, "current_host": "algo-1", "framework_module": "sagemaker_tensorflow_container.training:main", "hosts": [ "algo-1" ], "hyperparameters": { "batch-size": 32, "learning-rate": 0.001, "model_dir": "s3://sagemaker-ap-northeast-2-057716757052/tensorflow-training-2022-01-04-02-00-55-413/model", "epochs": 10 }, "input_config_dir": "/opt/ml/input/config", "input_data_config": { "training": { "TrainingInputMode": "File", "S3DistributionType": "FullyReplicated", "RecordWrapperType": "None" }, "validation": { "TrainingInputMode": "File", "S3DistributionType": "FullyReplicated", "RecordWrapperType": "None" } }, "input_dir": "/opt/ml/input", "is_master": true, "job_name": "tensorflow-training-2022-01-04-02-00-55-413", "log_level": 20, "master_hostname": "algo-1", "model_dir": "/opt/ml/model", "module_dir": "s3://sagemaker-ap-northeast-2-057716757052/tensorflow-training-2022-01-04-02-00-55-413/source/sourcedir.tar.gz", "module_name": "tensorflow_train", "network_interface_name": "eth0", "num_cpus": 8, "num_gpus": 1, "output_data_dir": "/opt/ml/output/data", "output_dir": "/opt/ml/output", "output_intermediate_dir": "/opt/ml/output/intermediate", "resource_config": { "current_host": "algo-1", "hosts": [ "algo-1" ], "network_interface_name": "eth0" }, "user_entry_point": "tensorflow_train.py" } Environment variables: SM_HOSTS=["algo-1"] SM_NETWORK_INTERFACE_NAME=eth0 SM_HPS={"batch-size":32,"epochs":10,"learning-rate":0.001,"model_dir":"s3://sagemaker-ap-northeast-2-057716757052/tensorflow-training-2022-01-04-02-00-55-413/model"} SM_USER_ENTRY_POINT=tensorflow_train.py SM_FRAMEWORK_PARAMS={} SM_RESOURCE_CONFIG={"current_host":"algo-1","hosts":["algo-1"],"network_interface_name":"eth0"} SM_INPUT_DATA_CONFIG={"training":{"RecordWrapperType":"None","S3DistributionType":"FullyReplicated","TrainingInputMode":"File"},"validation":{"RecordWrapperType":"None","S3DistributionType":"FullyReplicated","TrainingInputMode":"File"}} SM_OUTPUT_DATA_DIR=/opt/ml/output/data SM_CHANNELS=["training","validation"] SM_CURRENT_HOST=algo-1 SM_MODULE_NAME=tensorflow_train SM_LOG_LEVEL=20 SM_FRAMEWORK_MODULE=sagemaker_tensorflow_container.training:main SM_INPUT_DIR=/opt/ml/input SM_INPUT_CONFIG_DIR=/opt/ml/input/config SM_OUTPUT_DIR=/opt/ml/output SM_NUM_CPUS=8 SM_NUM_GPUS=1 SM_MODEL_DIR=/opt/ml/model SM_MODULE_DIR=s3://sagemaker-ap-northeast-2-057716757052/tensorflow-training-2022-01-04-02-00-55-413/source/sourcedir.tar.gz SM_TRAINING_ENV={"additional_framework_parameters":{},"channel_input_dirs":{"training":"/opt/ml/input/data/training","validation":"/opt/ml/input/data/validation"},"current_host":"algo-1","framework_module":"sagemaker_tensorflow_container.training:main","hosts":["algo-1"],"hyperparameters":{"batch-size":32,"epochs":10,"learning-rate":0.001,"model_dir":"s3://sagemaker-ap-northeast-2-057716757052/tensorflow-training-2022-01-04-02-00-55-413/model"},"input_config_dir":"/opt/ml/input/config","input_data_config":{"training":{"RecordWrapperType":"None","S3DistributionType":"FullyReplicated","TrainingInputMode":"File"},"validation":{"RecordWrapperType":"None","S3DistributionType":"FullyReplicated","TrainingInputMode":"File"}},"input_dir":"/opt/ml/input","is_master":true,"job_name":"tensorflow-training-2022-01-04-02-00-55-413","log_level":20,"master_hostname":"algo-1","model_dir":"/opt/ml/model","module_dir":"s3://sagemaker-ap-northeast-2-057716757052/tensorflow-training-2022-01-04-02-00-55-413/source/sourcedir.tar.gz","module_name":"tensorflow_train","network_interface_name":"eth0","num_cpus":8,"num_gpus":1,"output_data_dir":"/opt/ml/output/data","output_dir":"/opt/ml/output","output_intermediate_dir":"/opt/ml/output/intermediate","resource_config":{"current_host":"algo-1","hosts":["algo-1"],"network_interface_name":"eth0"},"user_entry_point":"tensorflow_train.py"} SM_USER_ARGS=["--batch-size","32","--epochs","10","--learning-rate","0.001","--model_dir","s3://sagemaker-ap-northeast-2-057716757052/tensorflow-training-2022-01-04-02-00-55-413/model"] SM_OUTPUT_INTERMEDIATE_DIR=/opt/ml/output/intermediate SM_CHANNEL_TRAINING=/opt/ml/input/data/training SM_CHANNEL_VALIDATION=/opt/ml/input/data/validation SM_HP_BATCH-SIZE=32 SM_HP_LEARNING-RATE=0.001 SM_HP_MODEL_DIR=s3://sagemaker-ap-northeast-2-057716757052/tensorflow-training-2022-01-04-02-00-55-413/model SM_HP_EPOCHS=10 PYTHONPATH=/opt/ml/code:/usr/local/bin:/usr/local/lib/python37.zip:/usr/local/lib/python3.7:/usr/local/lib/python3.7/lib-dynload:/usr/local/lib/python3.7/site-packages Invoking script with the following command: /usr/local/bin/python3.7 tensorflow_train.py --batch-size 32 --epochs 10 --learning-rate 0.001 --model_dir s3://sagemaker-ap-northeast-2-057716757052/tensorflow-training-2022-01-04-02-00-55-413/model Training with: GPU [2022-01-04 02:05:56.241 ip-10-0-146-144.ap-northeast-2.compute.internal:52 INFO utils.py:27] RULE_JOB_STOP_SIGNAL_FILENAME: None [2022-01-04 02:05:56.529 ip-10-0-146-144.ap-northeast-2.compute.internal:52 INFO profiler_config_parser.py:102] Unable to find config at /opt/ml/input/config/profilerconfig.json. Profiler is disabled. Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5 #015 8192/94765736 [..............................] - ETA: 18s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 49152/94765736 [..............................] - ETA: 1:40#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 172032/94765736 [..............................] - ETA: 57s #010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 548864/94765736 [..............................] - ETA: 26s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 1703936/94765736 [..............................] - ETA: 11s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 4202496/94765736 [>.............................] - ETA: 9s #010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#015 7380992/94765736 [=>............................] - ETA: 5s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01510592256/94765736 [==>...........................] - ETA: 4s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01513197312/94765736 [===>..........................] - ETA: 3s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01515736832/94765736 [===>..........................] - ETA: 3s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01518292736/94765736 [====>.........................] - ETA: 2s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01520832256/94765736 [=====>........................] - ETA: 2s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01523371776/94765736 [======>.......................] - ETA: 2s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01525976832/94765736 [=======>......................] - ETA: 2s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01528499968/94765736 [========>.....................] - ETA: 2s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01531055872/94765736 [========>.....................] - ETA: 1s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01533660928/94765736 [=========>....................] - ETA: 1s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01536265984/94765736 [==========>...................] - ETA: 1s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01538969344/94765736 [===========>..................] - ETA: 1s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01541541632/94765736 [============>.................] - ETA: 1s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01544204032/94765736 [============>.................] - ETA: 1s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01546825472/94765736 [=============>................] - ETA: 1s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01549422336/94765736 [==============>...............] - ETA: 1s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01552158464/94765736 [===============>..............] - ETA: 1s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01554763520/94765736 [================>.............] - ETA: 1s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01557417728/94765736 [=================>............] - ETA: 0s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01560088320/94765736 [==================>...........] - ETA: 0s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01562644224/94765736 [==================>...........] - ETA: 0s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01565314816/94765736 [===================>..........] - ETA: 0s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01567117056/94765736 [====================>.........] - ETA: 0s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01569476352/94765736 [====================>.........] - ETA: 0s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01572146944/94765736 [=====================>........] - ETA: 0s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01574809344/94765736 [======================>.......] - ETA: 0s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01577422592/94765736 [=======================>......] - ETA: 0s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01580076800/94765736 [========================>.....] - ETA: 0s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01582567168/94765736 [=========================>....] - ETA: 0s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01585368832/94765736 [==========================>...] - ETA: 0s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01588039424/94765736 [==========================>...] - ETA: 0s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01590710016/94765736 [===========================>..] - ETA: 0s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01593380608/94765736 [============================>.] - ETA: 0s#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#010#01594773248/94765736 [==============================] - 2s 0us/step Beginning Training... [2022-01-04 02:06:02.908 ip-10-0-146-144.ap-northeast-2.compute.internal:52 INFO json_config.py:91] Creating hook from json_config at /opt/ml/input/config/debughookconfig.json. [2022-01-04 02:06:02.908 ip-10-0-146-144.ap-northeast-2.compute.internal:52 INFO hook.py:201] tensorboard_dir has not been set for the hook. SMDebug will not be exporting tensorboard summaries. [2022-01-04 02:06:02.909 ip-10-0-146-144.ap-northeast-2.compute.internal:52 INFO hook.py:255] Saving to /opt/ml/output/tensors [2022-01-04 02:06:02.909 ip-10-0-146-144.ap-northeast-2.compute.internal:52 INFO state_store.py:77] The checkpoint config file /opt/ml/input/config/checkpointconfig.json does not exist. [2022-01-04 02:06:02.910 ip-10-0-146-144.ap-northeast-2.compute.internal:52 INFO hook.py:425] Monitoring the collections: losses, sm_metrics, metrics [2022-01-04 02:06:02.951 ip-10-0-146-144.ap-northeast-2.compute.internal:52 INFO hook.py:425] Monitoring the collections: losses, sm_metrics, metrics Epoch 1/10 69/69 - 11s - loss: 1.5290 - accuracy: 0.4900 - val_loss: 0.8731 - val_accuracy: 0.7091 - batch: 0.0000e+00 Epoch 2/10 69/69 - 9s - loss: 0.8040 - accuracy: 0.7464 - val_loss: 0.6871 - val_accuracy: 0.7745 - batch: 1.0000 Epoch 3/10 69/69 - 9s - loss: 0.6364 - accuracy: 0.7991 - val_loss: 0.6181 - val_accuracy: 0.7927 - batch: 2.0000 Epoch 4/10 69/69 - 9s - loss: 0.5498 - accuracy: 0.8373 - val_loss: 0.5936 - val_accuracy: 0.8073 - batch: 3.0000 Epoch 5/10 69/69 - 9s - loss: 0.4993 - accuracy: 0.8500 - val_loss: 0.5895 - val_accuracy: 0.8000 - batch: 4.0000 Epoch 6/10 69/69 - 9s - loss: 0.4493 - accuracy: 0.8723 - val_loss: 0.5778 - val_accuracy: 0.8182 - batch: 5.0000 Epoch 7/10 69/69 - 9s - loss: 0.4073 - accuracy: 0.8873 - val_loss: 0.5473 - val_accuracy: 0.8291 - batch: 6.0000 Epoch 8/10 69/69 - 9s - loss: 0.3782 - accuracy: 0.8977 - val_loss: 0.5410 - val_accuracy: 0.8182 - batch: 7.0000 Epoch 9/10 69/69 - 9s - loss: 0.3697 - accuracy: 0.8991 - val_loss: 0.5365 - val_accuracy: 0.8364 - batch: 8.0000 Epoch 10/10 69/69 - 9s - loss: 0.3456 - accuracy: 0.9082 - val_loss: 0.5307 - val_accuracy: 0.8400 - batch: 9.0000 2022-01-04 02:05:50.060220: W tensorflow/core/profiler/internal/smprofiler_timeline.cc:460] Initializing the SageMaker Profiler. 2022-01-04 02:05:50.060367: W tensorflow/core/profiler/internal/smprofiler_timeline.cc:105] SageMaker Profiler is not enabled. The timeline writer thread will not be started, future recorded events will be dropped. 2022-01-04 02:05:50.110319: W tensorflow/core/profiler/internal/smprofiler_timeline.cc:460] Initializing the SageMaker Profiler. WARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/tensorflow/python/training/tracking/tracking.py:111: Model.state_updates (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version. Instructions for updating: This property should not be used in TensorFlow 2.0, as updates are applied automatically. WARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/tensorflow/python/training/tracking/tracking.py:111: Model.state_updates (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version. Instructions for updating: This property should not be used in TensorFlow 2.0, as updates are applied automatically. 2022-01-04 02:07:55.260226: W tensorflow/python/util/util.cc:348] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them. WARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/tensorflow/python/training/tracking/tracking.py:111: Layer.updates (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version. Instructions for updating: This property should not be used in TensorFlow 2.0, as updates are applied automatically. WARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/tensorflow/python/training/tracking/tracking.py:111: Layer.updates (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version. Instructions for updating: This property should not be used in TensorFlow 2.0, as updates are applied automatically. INFO:tensorflow:Assets written to: /opt/ml/model/model/assets INFO:tensorflow:Assets written to: /opt/ml/model/model/assets 2022-01-04 02:08:11,726 sagemaker_tensorflow_container.training WARNING Your model will NOT be servable with SageMaker TensorFlow Serving containers. The SavedModel bundle is under directory "model", not a numeric name. 2022-01-04 02:08:11,727 sagemaker-training-toolkit INFO Reporting training SUCCESS 2022-01-04 02:08:15 Uploading - Uploading generated training model 2022-01-04 02:09:34 Completed - Training job completed Training seconds: 371 Billable seconds: 371 ###Markdown 4. 훈련된 모델 로드 및 테스트 데이터 로딩___- 모델 가중치는 S3에 저장이 되고, 다운로드 후에 모델에 가중치를 다시 로드하여 예측을 생성할 수 있습니다. - 훈련 후에 테스트 데이터에서 모델을 평가하는 것이 중요합니다. S3에서 훈련된 모델 다운로드 및 압축 해제 ###Code sagemaker.s3.S3Downloader().download(tf_estimator.model_data, "training_tensorflow") import tarfile tfile = tarfile.open("training_tensorflow/model.tar.gz") tfile.extractall("training_tensorflow") ###Output _____no_output_____ ###Markdown 훈련된 모델 로딩- 아래 셀을 실행을 하고, 셀이 더 이상 진행이 안되는 경우가 발생할 수 있습니다. - 이러한 이유 중의 하나는 OOM (Out of Memory) 이 발생한 상황일 수 있습니다. - 메모리 확보(다른 노트북 커널 셧다운 혹은 프로세스 제거 등)를 하여 다시 실행하시기 바랍니다. ###Code try: model = tf.keras.models.load_model("training_tensorflow/model") except Exception: import traceback traceback.print_exc() ###Output _____no_output_____ ###Markdown 예측을 위해 테스트 데이터 세트에서 이미지 로드 ###Code image_folder = tfds.ImageFolder("./data_structured") def tfrecord_parser(record): features = { "height": tf.io.FixedLenFeature([], tf.int64), "width": tf.io.FixedLenFeature([], tf.int64), "depth": tf.io.FixedLenFeature([], tf.int64), "label": tf.io.FixedLenFeature([], tf.int64), "image_raw": tf.io.FixedLenFeature([], tf.string), } parsed_features = tf.io.parse_single_example(record, features) return tf.io.decode_jpeg(parsed_features["image_raw"]), parsed_features["label"] test_ds = tf.data.TFRecordDataset(filenames=["data_tfrecord/test.tfrecord"], num_parallel_reads=2) test_ds = test_ds.map(tfrecord_parser, num_parallel_calls=2).as_numpy_iterator() ###Output _____no_output_____ ###Markdown 모델 예측(0에서 9까지)을 원래 클래스 이름(곰에서 얼룩말까지)에 다시 연결 ###Code with open("pickled_data/category_labels.pickle", "rb") as f: category_labels = pickle.load(f) category_labels = {idx: name for idx, name in enumerate(sorted(category_labels.values()))} category_labels ###Output _____no_output_____ ###Markdown 5.모델 추론과 함께 검증 이미지 표시더 많은 예측을 보려면 셀을 다시 실행하세요. ###Code fig, axs = plt.subplots(3, 4, figsize=(10, 7)) for ax in axs.flatten(): sample = next(iter(test_ds)) image = sample[0] pred = model.predict(tf.expand_dims(image, axis=0)) pred_name = category_labels[np.argmax(pred)] ax.imshow(image) ax.axis("off") ax.set_title(f"prediction: {pred_name}") ###Output _____no_output_____
cn/.ipynb_checkpoints/sicp-3-37-checkpoint.ipynb
###Markdown title ###Code (define (constant value connector) (define (me request) (error "Unknow request -- CONSTANT" request)) (connect connector me) (set-value! connector value me) me) (define (probe name connector) (define (print-probe value) (newline) (display "Probe: ") (display name) (display " = ") (display value)) (define (process-new-value) (print-probe (get-value connector))) (define (process-forget-value) (print-probe "?")) (define (me request) (cond ((eq? request 'I-have-a-value) (process-new-value)) ((eq? request 'I-lost-my-value) (process-forget-value)) (else (error "Unknow request -- PROBE " request)))) (connect connector me) me) (define (make-connector) (let ((value false) (informant false) (constraints '())) (define (set-my-value newval setter) (cond ((not (has-value? me)) (set! value newval) (set! informant setter) (for-each-except setter inform-about-value constraints)) ((not (= value newval)) (error "Constradiction" (list value newval))) (else 'ignored))) (define (forget-my-value retractor) (if (eq? retractor informant) (begin (set! informant false) (for-each-except retractor inform-about-no-value constraints)) 'ignored)) (define (connect new-constraint) (if (not (memq new-constraint constraints)) (set! constraints (cons new-constraint constraints))) (if (has-value? me) (inform-about-value new-constraint)) 'done) (define (me request) (cond ((eq? request 'has-value?) (if informant true false)) ((eq? request 'value ) value) ((eq? request 'set-value!) set-my-value) ((eq? request 'forget) forget-my-value) ((eq? request 'connect) connect) (else (error "Unknown operation -- CONNECTOR" request)))) me)) (define (inform-about-value constraint) (constraint 'I-have-a-value)) (define (inform-about-no-value constraint) (constraint 'I-lost-my-value)) (define (for-each-except exception procedure list) (define (loop items) (cond ((null? items) 'done) ((eq? (car items) exception ) (loop ( cdr items))) (else (procedure (car items)) (loop (cdr items))))) (loop list)) (define (has-value? connector) (connector 'has-value?)) (define (get-value connector) (connector 'value)) (define (set-value! connector new-value informant) ((connector 'set-value!) new-value informant)) (define (forget-value! connector retractor) ((connector 'forget) retractor)) (define (connect connector new-constraint) ((connector 'connect) new-constraint)) (define (adder a1 a2 sum) (define (process-new-value) (cond ((and (has-value? a1) (has-value? a2)) (set-value! sum (+ (get-value a1) (get-value a2)) me)) ((and (has-value? a1) (has-value? sum)) (set-value! a2 (- (get-value sum) (get-value a1)) me)) ((and (has-value? a2) (has-value? sum)) (set-value! a1 (- (get-value sum) (get-value a2)) me)))) (define (process-forget-value) (forget-value! sum me) (forget-value! a1 me) (forget-value! a2 me) (process-new-value)) (define (me request) (cond ((eq? request 'I-have-a-value) (process-new-value)) ((eq? request 'I-lost-my-value) (process-forget-value)) (else (error "Unknown request -- ADDER" request)))) (connect a1 me) (connect a2 me) (connect sum me) me) (define (multiplier m1 m2 product) (define (process-new-value) (cond ((or (and (has-value? m1) (= (get-value m1) 0)) (and (has-value? m2) (= (get-value m2) 0))) (set-value! product 0 me)) ((and (has-value? m1) (has-value? m2)) (set-value! product (* (get-value m1) (get-value m2)) me)) ((and (has-value? product) (has-value? m1)) (set-value! m2 (/ (get-value product) (get-value m1)) me)) ((and (has-value? product) (has-value? m2)) (set-value! m1 (/ (get-value product) (get-value m2)) me)))) (define (process-forget-value) (forget-value! product me) (forget-value! m1 me) (forget-value! m2 me) (process-new-value)) (define (me request) (cond ((eq? request 'I-have-a-value) (process-new-value)) ((eq? request 'I-lost-my-value) (process-forget-value)) (else (error "Unknown request -- MULTIPLIER " request)))) (connect m1 me) (connect m2 me) (connect product me) me) (define (start-unit-test-adder) (define value1 (make-connector)) (define value2 (make-connector)) (define my-sum (make-connector)) (adder value1 value2 my-sum) (probe 'value1 value1) (probe 'value2 value2) (probe 'my-sum my-sum) (set-value! value1 1 'user) (set-value! value2 2 'user) (forget-value! value1 'user) ; (forget-value! value2 'user) (set-value! value1 4 'user) (forget-value! value1 'user) (forget-value! my-sum 'user) (set-value! my-sum 19 'user) ) (define (start-unit-test-multiplier) (define value1 (make-connector)) (define value2 (make-connector)) (define my-product (make-connector)) (multiplier value1 value2 my-product) (probe 'value1 value1) (probe 'value2 value2) (probe 'my-product my-product) (set-value! value1 1 'user) (set-value! value2 2 'user) (forget-value! value1 'user) ; (forget-value! value2 'user) (set-value! value1 4 'user) (forget-value! value1 'user) (forget-value! my-product 'user) (set-value! my-product 19 'user) ) (define (averager a b c) (define number-2 (make-connector)) (define sum-value (make-connector)) (adder a b sum-value) (multiplier c number-2 sum-value) (constant 2 number-2) 'ok) (define (start-test-3-33) (define a (make-connector)) (define b (make-connector)) (define c (make-connector)) (averager a b c) (probe 'a a) (probe 'b b) (probe 'c c) (set-value! a 3 'user) (set-value! b 5 'user) ) (define (squarer a b) (define (process-new-value) (if (has-value? b) (if (< (get-value b) 0) (error "square less than 0 -- SQUARE" (get-value b)) (set-value! a (sqrt (get-value b)) me)) (if (has-value? a) (set-value! b (* (get-value a) (get-value a)) me) ))) (define (process-forget-value ) (forget-value! a me) (forget-value! b me) (process-new-value)) (define (me request) (cond ((eq? request 'I-have-a-value) (process-new-value)) ((eq? request 'I-lost-my-value) (process-forget-value)) (else (error "Unknown request -- SQUARER " request)))) (connect a me) (connect b me) me) (define (start-test-3-35) (define a (make-connector)) (define b (make-connector)) (squarer a b) (probe 'a a) (probe 'b b) (set-value! b 16 'user) ) (define (c+ x y) (let ((z (make-connector))) (adder x y z) z)) (define (c- x y) (let ((z (make-connector))) (adder y z x) z)) (define (c* x y) (let ((z (make-connector))) (multiplier x y z) z)) (define (c/ x y) (let ((z (make-connector))) (multiplier z y x) z)) (define (cv x) (let ((z (make-connector))) (constant x z) z)) (define (celsius-fahrenheit-converter x) (c+ (c* (c/ (cv 9) (cv 5)) x) (cv 32))) (define C (make-connector)) (define F (celsius-fahrenheit-converter C)) (define (start-test-3-37) (set-value! C 10 'user) (display "F is:") (display (get-value F)) (newline)) ###Output _____no_output_____
mgnify/src/notebooks/answers/ANSWER_ERP001736_go_temperature_analysis.ipynb
###Markdown Plotting temperature and photosynthesis-related GO term counts, normalised by number of InterPro annotations, for Tara Oceans project PRJEB1787.The following task shows how to analysie metadata and annotations retrieved from the EMG API and combined on the fly to generate the visualisations. ###Code import copy try: from urllib import urlencode except ImportError: from urllib.parse import urlencode from pandas import DataFrame import matplotlib.pyplot as plt import numpy as np from jsonapi_client import Session, Filter API_BASE = 'https://www.ebi.ac.uk/metagenomics/api/latest/' ###Output _____no_output_____ ###Markdown List all analyses in the projecthttps://www.ebi.ac.uk/metagenomics/api/latest/analyses?experiment_type=metagenomic&study_accession=PRJEB1787 ###Code def find_metadata(metadata, key): """ Extract metadata value for given key """ for m in metadata: if m['key'].lower() == key.lower(): return m['value'] return None metadata_key = 'temperature' normilize_key = 'Predicted CDS with InterProScan match' # map GO terms to the temperature result = {} with Session(API_BASE) as s: # temporary dict to store accession and metadata metadata_map = {} # list of runs missing metadata missing_meta = list() print('Loading data from API.', end='', flush=True) # preparing url params = { 'experiment_type': 'metagenomic', 'study_accession': 'PRJEB1787', 'page_size': 100, 'include': 'sample', } f = Filter(urlencode(params)) # list runs for anls in s.iterate('analyses', f): print('.', end='', flush=True) # find temperature for each run try: m_value = float(find_metadata(anls.sample.sample_metadata, metadata_key)) except: m_value = None if m_value is not None: metadata_map[anls.accession] = m_value else: # missing value, skip run! missing_meta.append(anls.accession) continue _pcds = int(find_metadata(anls.analysis_summary, normilize_key)) if _pcds is None: # missing value, skip run! continue _temperature = metadata_map[anls.accession] try: result[_temperature] except KeyError: result[_temperature] = {} # list a summary of GO terms derived from InterPro matches for ann in anls.go_slim: try: result[_temperature][ann.accession] except KeyError: result[_temperature][ann.accession] = list() # normalize annotation counts, adjusting value _norm = int(ann.count)/_pcds # assign value result[_temperature][ann.accession].append(_norm) print("DONE") # print("Missing: ", missing_meta) ###Output Loading data from API..........................................................................................................................................................................................................................................................DONE ###Markdown Clean up data ###Code # remove invalid temperatures for k in copy.deepcopy(list(result.keys())): if k > 1000: del result[k] # average value of the same temperature for k in result: for k1 in result[k]: result[k][k1] = np.mean(result[k][k1]) ###Output _____no_output_____ ###Markdown Calculate correlation ###Code from scipy.stats import spearmanr df = DataFrame(result) df_go = df.T[['GO:0009579','GO:0015979']].copy() x = df_go.index.tolist() correl = [] correl_p = [] for k in df_go.keys(): y = list(df_go[k]) rho, p = spearmanr(x, y) correl.append(rho) correl_p.append(p) df_go.loc['rho'] = correl df_go.loc['p'] = correl_p df_go ###Output _____no_output_____ ###Markdown Plot ###Code df = DataFrame(result) df_go_plot = df.T[['GO:0009579','GO:0015979']].copy() pl = df_go_plot.plot( y=['GO:0009579', 'GO:0015979'], use_index=True, style='o', figsize=(8,5), title='Temperature and photosynthesis-related GO term counts, normalised by number of InterPro annotations, for Tara Oceans project PRJEB1787', ) pl.set_xlabel("Temperature °C") # pl.set_xlabel("Depth m") pl.set_ylabel("Relative abundance") plt.show() ###Output _____no_output_____
Proposed Method/1_online_tensor_decomposition.ipynb
###Markdown Online Tensor Decomposition to optimize online tensor decomposition (streaming analysis) ###Code import time import numpy as np import tensorly as tl from tensorly.decomposition import parafac from tensorly.decomposition.candecomp_parafac import initialize_factors, unfolding_dot_khatri_rao, KruskalTensor # for sample video from cv2 import VideoWriter, VideoWriter_fourcc def make_video(tensor, filename): start = time.time() height = tensor.shape[1] width = tensor.shape[2] FPS = 24 fourcc = VideoWriter_fourcc(*'MP42') video = VideoWriter(filename, fourcc, float(FPS), (width, height)) for frame in tensor: video.write(np.uint8(frame)) video.release() print('created', filename, time.time()-start) def construct_tensor(factors): weights = tl.ones(factors[0].shape[1]) est_tensor = tl.kruskal_to_tensor((weights, factors)) return est_tensor def print_tensor(X, n_digit=1): print(np.round(X, n_digit)) def compare_tensors(A, B): print('||A-B||:', tl.norm(A - B)) def create_tensor_stream(X, start_to_stream, batch_sizes): total_batch_size = np.sum(batch_sizes) if X.shape[0] != start_to_stream + total_batch_size: raise ValueError('Total batch size should be the size of streaming part of the tensor.') X_stream = [X[:start_to_stream]] batch_start = start_to_stream for batch_size in batch_sizes: batch_end = batch_start + batch_size X_stream.append(X[batch_start:batch_end]) batch_start = batch_end return np.asarray(X_stream) def get_KhatriRao(factors): n_dim = len(factors) lefts = [factors[n_dim-1]] rights = [factors[0]] if n_dim > 2: for mode in range(1, n_dim-1): lefts.append(tl.tenalg.khatri_rao((lefts[mode-1], factors[n_dim-mode-1]))) rights.append(tl.tenalg.khatri_rao((factors[mode], rights[mode-1]))) K = lefts.copy() K[0] = lefts[n_dim-2] K.append(rights[n_dim-2].copy()) if n_dim > 2: for mode in range(1, n_dim-1): K[mode] = tl.tenalg.khatri_rao((lefts[n_dim-mode-2], rights[mode-1])) return K def get_KhatriRao_except0(factors): n_dim = len(factors) lefts = np.empty((n_dim), dtype=object) rights = np.empty((n_dim), dtype=object) K = np.empty((n_dim), dtype=object) lefts[1] = factors[n_dim-1] rights[1] = factors[1] if n_dim > 3: for mode in range(2, n_dim-1): lefts[mode] = tl.tenalg.khatri_rao((factors[n_dim-mode], lefts[mode-1])) rights[mode] = tl.tenalg.khatri_rao((rights[mode-1], factors[mode])) K[1] = lefts[n_dim-2] K[n_dim-1] = rights[n_dim-2] if n_dim > 3: for mode in range(2, n_dim-1): K[mode] = tl.tenalg.khatri_rao((rights[mode-1], lefts[n_dim-mode-1])) return K def get_Hadamard(factors): rank = factors[0].shape[1] H = tl.tensor(np.ones((rank, rank))) for factor in factors: H = H * tl.dot(tl.transpose(factor), factor) return H ###Output _____no_output_____ ###Markdown Online CP ###Code def online_cp(factors_old, X_old, X_new, rank, P, Q, n_iter=1, mu=1, verbose=False, transformed=False): weights = tl.ones(rank) if verbose: X = tl.tensor(np.concatenate((X_old, X_new))) n_dim = tl.ndim(X_old) U = factors_old.copy() if not transformed: K = get_KhatriRao_except0(factors_old) H = get_Hadamard(factors_old[1:]) for i in range(n_iter): # temporal mode for A1 if not transformed: mttkrp = tl.dot(tl.unfold(X_new, 0), tl.tenalg.khatri_rao((U[1], K[1]))) else: # for higher accracy, lower speed mttkrp_parts = [] for r in range(rank): component = tl.tenalg.multi_mode_dot(X_new, [f[:, r] for f in U], skip=0) mttkrp_parts.append(component) mttkrp = np.stack(mttkrp_parts, axis=1) A1 = tl.transpose(tl.solve(tl.transpose(H), tl.transpose(mttkrp))) # non-temporal mode for mode in range(1, n_dim): if not transformed: dP = tl.dot(tl.unfold(X_new, mode), tl.tenalg.khatri_rao((A1, K[mode]))) UTU = tl.dot(tl.transpose(U[mode]), U[mode]) dQ = tl.dot(tl.transpose(A1), A1) * H / UTU U[mode] = tl.transpose(tl.solve(tl.transpose(mu*Q[mode] + dQ), tl.transpose(mu*P[mode] + dP))) # K = updated K due to non-temporal mode change # H = H_mode * tl.dot(tl.transpose(U[mode]), U[mode]) / UTU P[mode] = P[mode] + dP Q[mode] = Q[mode] + dQ else: U1 = U.copy() U1[0] = A1 H_mode = H / tl.dot(tl.transpose(U[mode]), U[mode]) V = (mu * tl.dot(tl.transpose(U[0]), U[0]) + tl.dot(tl.transpose(A1), A1)) * H_mode mttkrp0 = unfolding_dot_khatri_rao(X_old, (None, U), mode) mttkrp1 = unfolding_dot_khatri_rao(X_new, (None, U1), mode) U[mode] = tl.transpose(tl.solve(tl.transpose(V), tl.transpose(mu*mttkrp0 + mttkrp1))) H = H_mode * tl.dot(tl.transpose(U[mode]), U[mode]) # temporal mode for A0 if transformed: mttkrp = unfolding_dot_khatri_rao(X_old, (None, U), 0) # mttkrp = tl.dot(tl.unfold(X_old, 0), tl.tenalg.khatri_rao((U[1], K[1]))) U[0] = tl.transpose(tl.solve(tl.transpose(H), tl.transpose(mttkrp))) if verbose: U1 = U.copy() U1[0] = np.concatenate((U[0], A1)) X_est = construct_tensor(U1) compare_tensors(X, X_est) U[0] = np.concatenate((U[0], A1)) return (KruskalTensor((weights, U)), P, Q) ###Output _____no_output_____ ###Markdown DTD ###Code def dtd(factors_old, X_old, X_new, rank, n_iter=1, mu=1, verbose=False): weights = tl.ones(rank) if verbose: X = tl.tensor(np.concatenate((X_old, X_new))) n_dim = tl.ndim(X_old) U = factors_old.copy() for i in range(n_iter): # temporal mode for A1 V = tl.tensor(np.ones((rank, rank))) for j, factor in enumerate(U): if j != 0: V = V * tl.dot(tl.transpose(factor), factor) mttkrp = unfolding_dot_khatri_rao(X_new, (None, U), 0) A1 = tl.transpose(tl.solve(tl.transpose(V), tl.transpose(mttkrp))) # non-temporal mode for mode in range(1, n_dim): U1 = U.copy() U1[0] = A1 V = tl.tensor(np.ones((rank, rank))) W = tl.tensor(np.ones((rank, rank))) for j, factor in enumerate(U): factor_old = factors_old[j] if j != mode: W = W * tl.dot(tl.transpose(factor_old), factor) if j == 0: V = V * (mu*tl.dot(tl.transpose(factor), factor) + tl.dot(tl.transpose(A1), A1)) else: V = V * tl.dot(tl.transpose(factor), factor) mttkrp0 = mu * tl.dot(factors_old[mode], W) mttkrp1 = unfolding_dot_khatri_rao(X_new, (None, U1), mode) U[mode] = tl.transpose(tl.solve(tl.transpose(V), tl.transpose(mttkrp0 + mttkrp1))) # temporal mode for A0 V = tl.tensor(np.ones((rank, rank))) W = tl.tensor(np.ones((rank, rank))) for j, factor in enumerate(U): factor_old = factors_old[j] if j != 0: V = V * tl.dot(tl.transpose(factor), factor) W = W * tl.dot(tl.transpose(factor_old), factor) mttkrp = tl.dot(factors_old[0], W) U[0] = tl.transpose(tl.solve(tl.transpose(V), tl.transpose(mttkrp))) if verbose: U1 = U.copy() U1[0] = np.concatenate((U[0], A1)) X_est = construct_tensor(U1) compare_tensors(X, X_est) U[0] = np.concatenate((U[0], A1)) return KruskalTensor((weights, U)) ###Output _____no_output_____ ###Markdown Online Tensor Decomposition* `onlinecp`, `transformed_onlinecp`, `dtd` ###Code def online_tensor_decomposition(X_stream, X, rank, n_iter=1, mu=1, verbose=False, method='onlinecp'): if method == 'onlinecp': onlinecp = True transformed = False elif method == 'transformed_onlinecp': onlinecp = True transformed = True elif method == 'dtd': onlinecp = False else: raise ValueError('The method does not exist.') X_old = X_stream[0] n_dim = tl.ndim(X_old) start = time.time() (weights, factors) = parafac(X_old, rank, init='random') print('making init decomposition result:', time.time()-start) if verbose: X_est = construct_tensor(factors) compare_tensors(X_old, X_est) if onlinecp: start = time.time() print('\n >> onlinecp rank-{} n_iter-{} mu-{} transformed-{}'.format(rank, n_iter, mu, transformed)) K = get_KhatriRao_except0(factors) H = get_Hadamard(factors) P = np.empty((n_dim), dtype=object) Q = np.empty((n_dim), dtype=object) for mode in range(1, n_dim): P[mode] = tl.dot(tl.unfold(X_old, mode), tl.tenalg.khatri_rao((factors[0], K[mode]))) Q[mode] = H / tl.dot(tl.transpose(factors[mode]), factors[mode]) print('init_time:', time.time()-start) else: print('\n >> dtd rank-{} n_iter-{} mu-{}'.format(rank, n_iter, mu)) for i, X_new in enumerate(X_stream[1:]): start = time.time() if onlinecp: ((weights, factors), P, Q) = online_cp(factors, X_old, X_new, rank, P, Q, n_iter=n_iter, mu=mu, verbose=False, transformed=transformed) else: (weights, factors) = dtd(factors, X_old, X_new, rank, n_iter=n_iter, mu=mu, verbose=False) U = factors.copy() U[0] = U[0][-X_new.shape[0]-1:-1] dX_est = construct_tensor(U) print('{}th_iter:'.format(i+1), time.time()-start, tl.norm(X_new-dX_est)) X_old = np.concatenate((X_old, X_new)) if verbose: X_est = construct_tensor(factors) compare_tensors(X_old, X_est) weights = tl.ones(rank) return KruskalTensor((weights, factors)) ###Output _____no_output_____ ###Markdown Single tensor example ###Code tensor = tl.tensor(np.arange(12000000, dtype='d').reshape((500, 40, 30, 20))) tensor_old = tensor[:300,:,:,:] tensor_new = tensor[300:,:,:,:] rank = 4 n_dim = tl.ndim(tensor) start = time.time() (weights, factors_old) = parafac(tensor_old, rank) print('making prev decomposition result:', time.time()-start) start = time.time() K = get_KhatriRao_except0(factors_old) H = get_Hadamard(factors_old) P = np.empty((n_dim), dtype=object) Q = np.empty((n_dim), dtype=object) for mode in range(1, n_dim): P[mode] = tl.dot(tl.unfold(tensor_old, mode), tl.tenalg.khatri_rao((factors_old[0], K[mode]))) Q[mode] = H / tl.dot(tl.transpose(factors_old[mode]), factors_old[mode]) print('init time:', time.time()-start) start = time.time() print('\n >> online_cp start') ((weights, factors), P, Q) = online_cp(factors_old, tensor_old, tensor_new, rank, P, Q, n_iter=10, mu=0.95, verbose=False, transformed=True) print('exec time:', time.time()-start) tensor_est = construct_tensor(factors) compare_tensors(tensor, tensor_est) print_tensor(np.asarray((tensor, tensor_est))[:,0,0,0,:10]) start = time.time() print('\n >> online_cp start') ((weights, factors), P, Q) = online_cp(factors_old, tensor_old, tensor_new, rank, P, Q, mu=1, verbose=False, transformed=False) print('exec time:', time.time()-start) tensor_est = construct_tensor(factors) compare_tensors(tensor, tensor_est) print_tensor(np.asarray((tensor, tensor_est))[:,0,0,0,:10]) start = time.time() print('\n >> dtd start') (weights, factors) = dtd(factors_old, tensor_old, tensor_new, rank, mu=0.7, verbose=False) print('exec time:', time.time()-start) tensor_est = construct_tensor(factors) compare_tensors(tensor, tensor_est) print_tensor(np.asarray((tensor, tensor_est))[:,0,0,0,:10]) ###Output init time: 0.19338297843933105 >> online_cp start exec time: 12.507805109024048 ||A-B||: 808.3492851254828 [[ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9.] [ 1. 2. 3. 4. 5. 6. 7. 8. 9. 10.]] >> online_cp start exec time: 0.40058422088623047 ||A-B||: 905.4862415426394 [[ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9. ] [ 1.5 2.5 3.5 4.5 5.5 6.5 7.5 8.5 9.5 10.5]] >> dtd start exec time: 0.6710240840911865 ||A-B||: 831.7680802854289 [[ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9. ] [ 1.1 2.1 3.1 4.1 5.1 6.1 7.1 8.1 9.1 10.1]] ###Markdown Load Sample Video Dataset ###Code import csv X = tl.tensor(np.zeros([205, 240, 320, 3], dtype='d')) for i in range(41): start = time.time() with open('../Data/sample_video/data/video{}.tensor'.format(i)) as file: reader = csv.reader(file, delimiter='\t') for row in reader: indices = [[index] for index in np.int64(np.asarray(row[:-1]))-1] X[tuple(indices)] = np.double(row[-1]) print('>> sample_video{} loaded '.format(i), time.time() - start) make_video(X, './sample_video/original.avi') ###Output created ./sample_video/original.avi 0.449080228805542 ###Markdown Usage for Online Tensor Decomposition* Create a tensor stream (sum of batch sizes should match with total size of the tensor) * `create_tensor_stream(tensor, start_to_stream, batch_sizes)`* Invoke online tensor decomposition * `online_tensor_decomposition(tensor stream, original tensor, rank, verbose, method)`* Construct an estimated tensor w. factors, leaving weights (=identity matrix) * `construct_tensor(factors)` ###Code X_stream = create_tensor_stream(X, start_to_stream=10, batch_sizes=np.full((39), 5, dtype=int)) %%capture cap --no-stderr rank = 10 (weights, factors) = online_tensor_decomposition(X_stream, X, rank, verbose=False, method='onlinecp') X_est = construct_tensor(factors) compare_tensors(X, X_est) print_tensor(np.asarray((X, X_est))[:,100,100,100:110,0]) make_video(X_est, './sample_video/online_cp-{}.avi'.format(rank)) with open('./sample_video/online_cp-{}.txt'.format(rank), 'w') as f: f.write(cap.stdout) %%capture cap --no-stderr rank = 10 (weights, factors) = online_tensor_decomposition(X_stream, X, rank, n_iter=1, verbose=False, method='transformed_onlinecp') X_est = construct_tensor(factors) compare_tensors(X, X_est) print_tensor(np.asarray((X, X_est))[:,100,100,100:110,0]) make_video(X_est, './sample_video/transformed_online_cp-{}.avi'.format(rank)) with open('./sample_video/transformed_online_cp-{}.txt'.format(rank), 'w') as f: f.write(cap.stdout) %%capture cap --no-stderr rank = 10 (weights, factors) = online_tensor_decomposition(X_stream, X, rank, n_iter=1, verbose=False, method='dtd') X_est = construct_tensor(factors) compare_tensors(X, X_est) print_tensor(np.asarray((X, X_est))[:,100,100,100:110,0]) make_video(X_est, './sample_video/dtd-{}.avi'.format(rank)) with open('./sample_video/dtd-{}.txt'.format(rank), 'w') as f: f.write(cap.stdout) ###Output _____no_output_____
Intro_to_Hyperparameter_Sweeps_with_W&B.ipynb
###Markdown Introduction to Hyperparameter Sweeps – A Battle Royale To Find The Best Model In 3 StepsSearching through high dimensional hyperparameter spaces to find the most performant model can get unwieldy very fast. Hyperparameter sweeps provide an organized and efficient way to conduct a battle royale of models and pick the most accurate model. They enable this by automatically searching through combinations of hyperparameter values (e.g. learning rate, batch size, number of hidden layers, optimizer type) to find the most optimal values.In this tutorial we'll see how you can run sophisticated hyperparameter sweeps in 3 easy steps using Weights and Biases.We'll train a plethora of convolutional neural networks and our battle royale will surface the model that classifies Simpsons characters with the highest accuracy. We'll work with [this dataset](https://www.kaggle.com/alexattia/the-simpsons-characters-dataset) from Kaggle.We'll also use Weights & Biases to log models metrics, inspect performance and share findings about the best architecture for the network. In this example we're using Google Colab as a convenient hosted environment, but you can run your own training scripts from anywhere and visualize metrics with W&B's experiment tracking tool. Getting Started1. Click "Open in playground" to create a copy of this notebook for yourself.2. Save a copy in Google Drive for yourself.3. To enable a GPU, please click Edit > Notebook Settings. Change the "hardware accelerator" to GPU.4. Step through each section below, pressing play on the code blocks to run the cells.Results will be logged to a [shared W&B project page](https://app.wandb.ai/sweep/simpsons?workspace=user-lavanyashukla).![](https://paper-attachments.dropbox.com/s_A8A9577ACEF2EF9A66A68CAA0D798FE3970C9A78CA8BF44A10FA307611490E90_1572034121760_Screenshot+2019-10-25+13.08.13.png) Sweeps: An OverviewRunning a hyperparameter sweep with Weights & Biases is very easy. There are just 3 simple steps:1. **Define the sweep:** we do this by creating a dictionary or a [YAML file](https://docs.wandb.com/library/sweeps/configuration) that specifies the parameters to search through, the search strategy, the optimization metric et all.2. **Initialize the sweep:** with one line of code we initialize the sweep and pass in the dictionary of sweep configurations:`sweep_id = wandb.sweep(sweep_config)`3. **Run the sweep agent:** also accomplished with one line of code, we call wandb.agent() and pass the sweep_id to run, along with a function that defines your model architecture and trains it:`wandb.agent(sweep_id, function=train)`And voila! That's all there is to running a hyperparameter sweep! In the notebook below, we'll walk through these 3 steps in more detail.You can also find the full sweeps docs with all configuration options [here](https://docs.wandb.com/library/sweeps).We highly encourage you to fork this notebook, tweak the parameters, or try the model with your own dataset! Let's Dive In!You know how sweeps work on a fundamental level. Now let's use them with a real model. ###Code # Essentials import pandas as pd import numpy as np import seaborn as sns from sklearn import model_selection from sklearn.model_selection import train_test_split, learning_curve, KFold, cross_val_score, StratifiedKFold # Models import tensorflow from tensorflow import keras from keras import backend as K from keras import regularizers from keras.models import Sequential, model_from_json from keras.preprocessing.image import ImageDataGenerator from keras.utils import np_utils from keras.layers import Dense, Flatten, Conv2D, BatchNormalization, Dropout, MaxPooling2D, Activation from keras.optimizers import RMSprop, SGD, Adam, Nadam from keras import callbacks from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, Callback, EarlyStopping # Image Libraries from PIL import Image, ImageFilter, ImageStat import random random.seed(42) import imageio import PIL import os import itertools import glob import cv2, glob import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import AxesGrid %matplotlib inline # Ignore excessive warnings os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorflow as tf tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) ###Output _____no_output_____ ###Markdown SetupStart out by installing the experiment tracking library and setting up your free W&B account:* **pip install wandb** – Install the W&B library* **import wandb** – Import the wandb library ###Code # WandB – Install the W&B library %pip install wandb -q import wandb from wandb.keras import WandbCallback ###Output  |████████████████████████████████| 1.3MB 9.4MB/s  |████████████████████████████████| 256kB 74.3MB/s  |████████████████████████████████| 460kB 70.5MB/s  |████████████████████████████████| 92kB 14.3MB/s  |████████████████████████████████| 92kB 12.6MB/s  |████████████████████████████████| 102kB 13.8MB/s  |████████████████████████████████| 184kB 70.5MB/s  |████████████████████████████████| 71kB 11.6MB/s [?25h Building wheel for shortuuid (setup.py) ... [?25l[?25hdone Building wheel for watchdog (setup.py) ... [?25l[?25hdone Building wheel for gql (setup.py) ... [?25l[?25hdone Building wheel for subprocess32 (setup.py) ... [?25l[?25hdone Building wheel for pathtools (setup.py) ... [?25l[?25hdone ###Markdown Explore The Simpsons Dataset ###Code # Fetch the dataset form Github !git clone https://github.com/lavanyashukla/simpsons-dataset.git # Visualize images in the dataset characters = glob.glob('simpsons-dataset/kaggle_simpson_testset/kaggle_simpson_testset/**') plt.figure(figsize=(10,10)) plt.subplots_adjust(wspace=0, hspace=0) i = 0 for character in characters[:25]: img = cv2.imread(character) img = cv2.resize(img, (250, 250)) plt.axis('off') plt.subplot(5, 5, i+1) #.set_title(l) plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) i += 1 # Define the labels for the Simpsons characters we're detecting character_names = {0: 'abraham_grampa_simpson', 1: 'apu_nahasapeemapetilon', 2: 'bart_simpson', 3: 'charles_montgomery_burns', 4: 'chief_wiggum', 5: 'comic_book_guy', 6: 'edna_krabappel', 7: 'homer_simpson', 8: 'kent_brockman', 9: 'krusty_the_clown', 10: 'lenny_leonard', 11:'lisa_simpson', 12: 'marge_simpson', 13: 'mayor_quimby',14:'milhouse_van_houten', 15: 'moe_szyslak', 16: 'ned_flanders', 17: 'nelson_muntz', 18: 'principal_skinner', 19: 'sideshow_bob'} img_size = 64 num_classes = 20 dir = "simpsons-dataset/simpsons_dataset/simpsons_dataset" # Load training data X_train = [] y_train = [] for label, name in character_names.items(): list_images = os.listdir(dir+'/'+name) for image_name in list_images: image = imageio.imread(dir+'/'+name+'/'+image_name) X_train.append(cv2.resize(image, (img_size,img_size))) y_train.append(label) X_train = np.array(X_train) y_train = np.array(y_train) # Split data for cross validation X_train = X_train[:1000] y_train = y_train[:1000] X_test = X_train[-100:] y_test = y_train[-100:] # Normalize the data X_train = X_train / 255.0 X_test = X_test / 255.0 # One hot encode the labels (neural nets only like numbers) y_train = np_utils.to_categorical(y_train, num_classes) y_test = np_utils.to_categorical(y_test, num_classes) ###Output _____no_output_____ ###Markdown Run A SweepAs you'll recall there are just 3 simple steps to running a sweep:**1. Define the sweep****2. Initialize the sweep****3. Run the sweep agent**Let's walk through each step in more detail.![](https://paper-attachments.dropbox.com/s_A8A9577ACEF2EF9A66A68CAA0D798FE3970C9A78CA8BF44A10FA307611490E90_1572034183402_Screenshot+2019-10-25+13.09.37.png) 1. Define the SweepWeights & Biases sweeps give you powerful levers to configure your sweeps exactly how you want them, with just a few lines of code. The sweeps config can be defined as a dictionary or a [YAML file](https://docs.wandb.com/library/sweeps).Let's walk through some of them together:* **Metric** – This is the metric the sweeps are attempting to optimize. Metrics can take a `name` (this metric should be logged by your training script) and a `goal` (maximize or minimize). * **Search Strategy** – Specified using the 'method' variable. We support several different search strategies with sweeps. * **Grid Search** – Iterates over every combination of hyperparameter values. * **Random Search** – Iterates over randomly chosen combinations of hyperparameter values. * **Bayesian Search** – Creates a probabilistic model that maps hyperparameters to probability of a metric score, and chooses parameters with high probability of improving the metric. The objective of Bayesian optimization is to spend more time in picking the hyperparameter values, but in doing so trying out fewer hyperparameter values.* **Stopping Criteria** – The strategy for determining when to kill off poorly peforming runs, and try more combinations faster. We offer several custom scheduling algorithms like [HyperBand](https://arxiv.org/pdf/1603.06560.pdf) and Envelope.* **Parameters** – A dictionary containing the hyperparameter names, and discreet values, max and min values or distributions from which to pull their values to sweep over.You can find a list of all configuration options [here](https://docs.wandb.com/library/sweeps/configuration). A Note To Advanced UsersAdvanced users can modify sweep algorithms or write their own based on the W&B base classes `wandb.sweeps.base.Search` and `wandb.sweeps.base.EarlyTerminate`, which you can find in our [open source client library](https://github.com/wandb/client).They can also create a [local controller](https://docs.wandb.com/library/sweeps/local-controller), which lets users take full control of search and stopping criteria, inspect and modify the code in order to debug issues as well as develop new features which can be incorporated into the cloud service. ###Code # Configure the sweep – specify the parameters to search through, the search strategy, the optimization metric et all. sweep_config = { 'method': 'random', #grid, random 'metric': { 'name': 'accuracy', 'goal': 'maximize' }, 'parameters': { 'epochs': { 'values': [10, 50, 100] }, 'batch_size': { 'values': [256, 128, 64, 32] }, 'weight_decay': { 'values': [0.0005, 0.005, 0.05] }, 'learning_rate': { 'values': [1e-2, 1e-3] }, 'optimizer': { 'values': ['adam', 'nadam', 'sgd', 'rmsprop'] } } } ###Output _____no_output_____ ###Markdown 2. Initialize the Sweep ###Code # Initialize a new sweep # Arguments: # – sweep_config: the sweep config dictionary defined above # – entity: Set the username for the sweep # – project: Set the project name for the sweep sweep_id = wandb.sweep(sweep_config, entity="sweep", project="simpsons") ###Output Create sweep with ID: bfozkh0e Sweep URL: https://app.wandb.ai/sweep/simpsons/sweeps/bfozkh0e ###Markdown Define Your Neural NetworkBefore we can run the sweep, let's define a function that creates and trains our neural network.In the function below, we define a simplified version of a VGG19 model in Keras, and add the following lines of code to log models metrics, visualize performance and output and track our experiments easily:* **wandb.init()** – Initialize a new W&B run. Each run is single execution of the training script.* **wandb.config** – Save all your hyperparameters in a config object. This lets you use our app to sort and compare your runs by hyperparameter values.* **callbacks=[WandbCallback()]** – Fetch all layer dimensions, model parameters and log them automatically to your W&B dashboard.* **wandb.log()** – Logs custom objects – these can be images, videos, audio files, HTML, plots, point clouds etc. Here we use wandb.log to log images of Simpson characters overlaid with actual and predicted labels. ###Code # Configure the sweep – specify the parameters to search through, the search strategy, the optimization metric et all. sweep_config = { 'method': 'random', #grid, random 'metric': { 'name': 'accuracy', 'goal': 'maximize' }, 'parameters': { 'epochs': { 'values': [10, 20, 50] }, 'dropout': { 'values': [0.3, 0.4, 0.5] }, 'conv_layer_size': { 'values': [16, 32, 64] }, 'encoder_size': { 'values': [128, 256, 512] }, 'decoder_size': { 'values': [256, 512, 1024] }, 'weight_decay': { 'values': [0.0005, 0.005, 0.05] }, 'learning_rate': { 'values': [1e-2, 1e-3, 1e-4, 3e-4, 3e-5, 1e-5] }, 'optimizer': { 'values': ['adam', 'nadam', 'sgd', 'rmsprop'] }, 'activation': { 'values': ['relu', 'elu', 'selu', 'softmax'] }, 'layer': { 'values': ['LSTM', 'GRU'] } } } # The sweep calls this function with each set of hyperparameters def train(): # Default values for hyper-parameters we're going to sweep over config_defaults = { 'epochs': 2, 'batch_size': 128, 'weight_decay': 0.0005, 'learning_rate': 1e-3, 'activation': 'relu', 'optimizer': 'nadam', 'seed': 42 } # Initilize a new wandb run wandb.init(config=config_defaults) # Config is a variable that holds and saves hyperparameters and inputs config = wandb.config # Determine input shape input_shape = (X_train.shape[1], X_train.shape[2], 3) # Define the model architecture - This is a simplified version of the VGG19 architecture model = Sequential() # Set of Conv2D, Conv2D, MaxPooling2D layers with 32 and 64 filters model.add(Conv2D(filters = 32, kernel_size = (3, 3), padding = 'same', activation ='relu', input_shape = input_shape, kernel_regularizer=regularizers.l2(config.weight_decay))) model.add(BatchNormalization()) model.add(Dropout(0.3)) model.add(Conv2D(filters = 64, kernel_size = (3, 3), padding = 'same', activation ='relu', input_shape = input_shape, kernel_regularizer=regularizers.l2(config.weight_decay))) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) # Another set of Conv2D, Conv2D, MaxPooling2D layers with 128 filters model.add(Conv2D(filters = 128, kernel_size = (3, 3), padding = 'same', activation ='relu', input_shape = input_shape, kernel_regularizer=regularizers.l2(config.weight_decay))) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Conv2D(filters = 128, kernel_size = (3, 3), padding = 'same', activation ='relu', input_shape = input_shape, kernel_regularizer=regularizers.l2(config.weight_decay))) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) # Another set of Conv2D, Conv2D, MaxPooling2D layers with 256 filters model.add(Conv2D(filters = 256, kernel_size = (3, 3), padding = 'same', activation ='relu', input_shape = input_shape, kernel_regularizer=regularizers.l2(config.weight_decay))) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Conv2D(filters = 256, kernel_size = (3, 3), padding = 'same', activation ='relu', input_shape = input_shape, kernel_regularizer=regularizers.l2(config.weight_decay))) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) # Another set of Conv2D, Conv2D, MaxPooling2D layers with 512 filters model.add(Conv2D(filters = 512, kernel_size = (3, 3), padding = 'same', activation ='relu', input_shape = input_shape, kernel_regularizer=regularizers.l2(config.weight_decay))) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Conv2D(filters = 512, kernel_size = (3, 3), padding = 'same', activation ='relu', input_shape = input_shape, kernel_regularizer=regularizers.l2(config.weight_decay))) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) # Another set of Conv2D, Conv2D, MaxPooling2D layers with 512 filters model.add(Conv2D(filters = 1024, kernel_size = (3, 3), padding = 'same', activation ='relu', input_shape = input_shape, kernel_regularizer=regularizers.l2(config.weight_decay))) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Conv2D(filters = 1024, kernel_size = (3, 3), padding = 'same', activation ='relu', input_shape = input_shape, kernel_regularizer=regularizers.l2(config.weight_decay))) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) # Flatten model.add(Flatten()) model.add(Dense(512, activation ='relu', kernel_regularizer=regularizers.l2(config.weight_decay))) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation = "softmax")) # Define the optimizer if config.optimizer=='sgd': optimizer = SGD(lr=config.learning_rate, decay=1e-5, momentum=0.9, nesterov=True) elif config.optimizer=='rmsprop': optimizer = RMSprop(lr=config.learning_rate, decay=1e-5) elif config.optimizer=='adam': optimizer = Adam(lr=config.learning_rate, beta_1=0.9, beta_2=0.999, clipnorm=1.0) elif config.optimizer=='nadam': optimizer = Nadam(lr=config.learning_rate, beta_1=0.9, beta_2=0.999, clipnorm=1.0) model.compile(loss = "categorical_crossentropy", optimizer = optimizer, metrics=['accuracy']) #data augmentation datagen = ImageDataGenerator( featurewise_center=False, # set input mean to 0 over the dataset samplewise_center=False, # set each sample mean to 0 featurewise_std_normalization=False, # divide inputs by std of the dataset samplewise_std_normalization=False, # divide each input by its std zca_whitening=False, # apply ZCA whitening rotation_range=15, # randomly rotate images in the range (degrees, 0 to 180) width_shift_range=0.1, # randomly shift images horizontally (fraction of total width) height_shift_range=0.1, # randomly shift images vertically (fraction of total height) horizontal_flip=True, # randomly flip images vertical_flip=False) # randomly flip images # (std, mean, and principal components if ZCA whitening is applied). datagen.fit(X_train) model.fit_generator(datagen.flow(X_train, y_train, batch_size=config.batch_size), steps_per_epoch=len(X_train) / 32, epochs=config.epochs, validation_data=(X_test, y_test), callbacks=[WandbCallback(data_type="image", validation_data=(X_test, y_test), labels=character_names), keras.callbacks.EarlyStopping(patience=10, restore_best_weights=True)]) predicted_images = [] for i in range(20): character = character_names[i] # Read in a character image from the test dataset image = cv2.imread(np.random.choice([k for k in glob.glob('simpsons-dataset/kaggle_simpson_testset/kaggle_simpson_testset/*.*') if character in k])) img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Resize image and normalize it pic = cv2.resize(image, (64, 64)).astype('float32') / 255. # Get predictions for the character prediction = model.predict(pic.reshape(1, 64, 64,3))[0] # Get true name of the character name = character.split('_')[0].title() # Format predictions to string to overlay on image text = sorted(['{:s} : {:.1f}%'.format(character_names[k].split('_')[0].title(), 100*v) for k,v in enumerate(prediction)], key=lambda x:float(x.split(':')[1].split('%')[0]), reverse=True)[:3] # Upscale image img = cv2.resize(img, (352, 352)) # Create background to overlay text on cv2.rectangle(img, (0,260),(215,352),(255,255,255), -1) # Add text to image font = cv2.FONT_HERSHEY_DUPLEX cv2.putText(img, 'True Name : %s' % name, (10, 280), font, 0.7,(73,79,183), 2, cv2.LINE_AA) for k, t in enumerate(text): cv2.putText(img, t, (10, 300+k*18), font, 0.65,(0,0,0), 2, cv2.LINE_AA) # Add predicted image from test dataset with annotations to array predicted_images.append(wandb.Image(img, caption="Actual: %s" % name)) # Log images from test set to wandb automatically, along with predicted and true labels by passing pytorch tensors with image data into wandb.Image wandb.log({"predictions": predicted_images}) ###Output _____no_output_____ ###Markdown 3. Run the sweep agent ###Code # Initialize a new sweep # Arguments: # – sweep_id: the sweep_id to run - this was returned above by wandb.sweep() # – function: function that defines your model architecture and trains it wandb.agent(sweep_id, train) ###Output wandb: Agent Starting Run: qhzyiysj with config: batch_size: 128 epochs: 100 learning_rate: 0.001 optimizer: sgd weight_decay: 0.05 wandb: Agent Started Run: qhzyiysj ###Markdown Visualize Predictions Live Project Overview1. Check out the [project page](https://app.wandb.ai/sweep/simpsons) to see your results in the shared project. 1. Press 'option+space' to expand the runs table, comparing all the results from everyone who has tried this script. 1. Click on the name of a run to dive in deeper to that single run on its own run page.![](https://paper-attachments.dropbox.com/s_A8A9577ACEF2EF9A66A68CAA0D798FE3970C9A78CA8BF44A10FA307611490E90_1572034121760_Screenshot+2019-10-25+13.08.13.png) Visualize Sweep ResultsUse a parallel coordinates chart to see which hyperparameter values led to the best accuracy.![](https://paper-attachments.dropbox.com/s_A8A9577ACEF2EF9A66A68CAA0D798FE3970C9A78CA8BF44A10FA307611490E90_1572034183402_Screenshot+2019-10-25+13.09.37.png)We can tweak the slides in the parallel co-ordinates chart to only view the runs that led to the best accuracy values. This can help us hone in on ranges of hyperparameter values to sweep over next.![](https://paper-attachments.dropbox.com/s_A8A9577ACEF2EF9A66A68CAA0D798FE3970C9A78CA8BF44A10FA307611490E90_1572035368646_Screenshot+2019-10-25+13.29.23.png) Visualize PerformanceClick through to a single run to see more details about that run. For example, on [this run page](https://app.wandb.ai/sweep/simpsons/runs/adyi8vpr) you can see the performance metrics I logged when I ran this script.![](https://paper-attachments.dropbox.com/s_A8A9577ACEF2EF9A66A68CAA0D798FE3970C9A78CA8BF44A10FA307611490E90_1572034810322_Screenshot+2019-10-25+13.19.54.png) Visualize PredictionsYou can visualize predictions made at everystep by clicking on the Media tab.![](https://paper-attachments.dropbox.com/s_A8A9577ACEF2EF9A66A68CAA0D798FE3970C9A78CA8BF44A10FA307611490E90_1572035073411_Screenshot+2019-10-25+13.24.04.png) Review CodeThe overview tab picks up a link to the code. In this case, it's a link to the Google Colab. If you're running a script from a git repo, we'll pick up the SHA of the latest git commit and give you a link to that version of the code in your own GitHub repo.![](https://paper-attachments.dropbox.com/s_A8A9577ACEF2EF9A66A68CAA0D798FE3970C9A78CA8BF44A10FA307611490E90_1572034739245_Screenshot+2019-10-25+13.18.45.png) Visualize System MetricsThe System tab on the runs page lets you visualize how resource efficient your model was. It lets you monitor the GPU, memory, CPU, disk, and network usage in one spot.![](https://paper-attachments.dropbox.com/s_A8A9577ACEF2EF9A66A68CAA0D798FE3970C9A78CA8BF44A10FA307611490E90_1572034693236_Screenshot+2019-10-25+13.17.16.png) Next StepsAs you can see running sweeps is super easy! We highly encourage you to fork this notebook, tweak the parameters, or try the model with your own dataset! More about Weights & BiasesWe're always free for academics and open source projects. Email [email protected] with any questions or feature suggestions. Here are some more resources:1. [Documentation](http://docs.wandb.com) - Python docs2. [Gallery](https://app.wandb.ai/gallery) - example reports in W&B3. [Articles](https://www.wandb.com/articles) - blog posts and tutorials4. [Community](bit.ly/wandb-forum) - join our Slack community forum ###Code ###Output _____no_output_____
Section2_SQL_Statements/postgres_using_psycopg2.ipynb
###Markdown Table of Contents1&nbsp;&nbsp;Module imports2&nbsp;&nbsp;Load the database3&nbsp;&nbsp;Run some queries4&nbsp;&nbsp;Show all table names in given database5&nbsp;&nbsp;Show given tables info6&nbsp;&nbsp;Show tables primary keys7&nbsp;&nbsp;Get pandas dataframe8&nbsp;&nbsp;Make dataframes columns dtype good9&nbsp;&nbsp;Create pandas dataframe of all tables and all columns Module imports ###Code import numpy as np import pandas as pd import os import time import yaml import psycopg2 ###Output _____no_output_____ ###Markdown Load the database ###Code import sys sys.path.append('../utils') import psycopg2 import util_database from util_database import * def show_method_attributes(method): x = [i for i in dir(method) if i[0].islower()] x = [i for i in x if i not in 'os np pd sys time psycopg2'.split()] return pd.DataFrame(np.array_split(x,2)).T.fillna('') show_method_attributes(util_database) print(get_postgres_configs.__doc__) dbname, dbuser, dbpass, dbport = get_postgres_configs('dvdrental') %load_ext sql %sql postgres://postgres:$dbpass@localhost:$dbport/$dbname ###Output _____no_output_____ ###Markdown Run some queries ###Code q = "select * from customer limit 2;" execute_query(q,dbname) ##%%sql q = "select * from customer limit 2;" execute_query(q,dbname) %sql select * from actor limit 2; %%sql select * from film limit 2; ###Output * postgres://postgres:***@localhost:5432/dvdrental 2 rows affected. ###Markdown Show all table names in given database ###Code from util_database import get_all_tables_names_in_database get_all_tables_names_in_database('dvdrental') ###Output _____no_output_____ ###Markdown Show given tables info ###Code from util_database import show_given_tables_info show_given_tables_info(dbname, 'customer') ###Output _____no_output_____ ###Markdown Show tables primary keys ###Code def show_tables_primary_keys(table_name): sql_query = """SELECT KU.table_name as TABLENAME,column_name as PRIMARYKEYCOLUMN FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS TC INNER JOIN INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KU ON TC.CONSTRAINT_TYPE = 'PRIMARY KEY' AND TC.CONSTRAINT_NAME = KU.CONSTRAINT_NAME AND KU.table_name='{}' ORDER BY KU.TABLE_NAME, KU.ORDINAL_POSITION;""".format(table_name) ###Output _____no_output_____ ###Markdown Get pandas dataframe ###Code customer = get_pandas_dataframe(dbname, 'customer') customer.head() ###Output _____no_output_____ ###Markdown Make dataframes columns dtype good ###Code show_method_attributes(util_database) get_all_tables_names_in_database('dvdrental') tables_names = ['actor', 'store', 'address', 'category', 'city', 'country', 'customer', 'film_actor', 'film_category', 'inventory', 'language', 'rental', 'staff', 'payment', 'film'] from util_database import show_pandas_dataframes_list (actor, store, address, category, city, country, customer, film_actor, film_category, inventory, language, rental, staff, payment, film) = [get_pandas_dataframe(dbname, i) for i in tables_names] df_tables = [actor, store, address, category, city, country, customer, film_actor, film_category, inventory, language, rental, staff, payment, film ] len(df_tables) from util_database import show_df_tables_first_value_and_dtype show_df_tables_first_value_and_dtype(df_tables, tables_names = tables_names, num=14) customer['create_date'] = pd.to_datetime(customer.create_date, errors='coerce') ###Output _____no_output_____ ###Markdown Create pandas dataframe of all tables and all columns ###Code from util_database import get_dataframe_of_all_tables_and_all_columns get_dataframe_of_all_tables_and_all_columns(df_tables, tables_names, style=True) ###Output _____no_output_____
_notebooks/28_12_20_Primeira_Post.ipynb
###Markdown minha página > aprendendo a criar uma página de blog com jupyter notebook - toc: false - branch: master - badges: false - comments: false - author: Leandro - categories: (blog) ###Code import pandas as pd import numpy as np data = np.random.randint(0,1000,(10,10)) data df1 = pd.DataFrame(data) df1 #hide_input df2 = pd.DataFrame(data) df2 #hide df3 = pd.DataFrame(data) df3 ###Output _____no_output_____
mapping_models/examples/run_inference.ipynb
###Markdown Mapping Model Inference Install Dependencies ###Code import os os.chdir("/content") repo_dir = "rg_sound_generation" if not os.path.exists(repo_dir): !git clone https://github.com/TheSoundOfAIOSR/$repo_dir os.chdir(repo_dir) os.chdir('mapping_models') !python -m pip -q install -e . os.chdir('examples') ###Output _____no_output_____ ###Markdown ImportsRestart kernel after installing `mapping_models` above ###Code %matplotlib inline import os import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import ddsp.training import gin from ddsp.training.preprocessing import F0LoudnessPreprocessor from mapping_models.trainer import create_dataset from IPython.display import Audio os.chdir("/content/rg_sound_generation/mapping_models/examples") from causal_conv_test import features_map, create_model ###Output _____no_output_____ ###Markdown Mount Google Drive ###Code from google.colab import drive drive.mount('/content/drive') ###Output _____no_output_____ ###Markdown Load Data and Model CheckpointsIf you don't have the checkpoints, please take a look at the following:Training a mapping model: [Notebook](https://github.com/TheSoundOfAIOSR/rg_sound_generation/blob/main/mapping_models/examples/run_training.ipynb)Training DDSP on Nsynth Guitar Subset: [Notebook](https://github.com/TheSoundOfAIOSR/rg_sound_generation/blob/main/members/fabio/train_ddsp_nsynth_guitar.ipynb) ###Code dataset_dir = '/content/drive/MyDrive/complete_dataset' set_name = 'test' batch_size = 1 mapping_model_checkpoint_path = '/content/drive/MyDrive/causal_single_stage/cp.ckpt' gin_file_path = '/content/drive/MyDrive/ddsp_trained_30k/operative_config-30000.gin' ddsp_model_path = '/content/drive/MyDrive/ddsp_trained_30k' sr = 16000 def map_func(features): inputs, outputs = features_map(features) return inputs, outputs, features dataset = create_dataset( dataset_dir=dataset_dir, split=set_name, batch_size=batch_size, map_func=map_func ) datagen = iter(dataset) mapping_model = create_model(model_type='single_stage') _ = mapping_model.load_weights(mapping_model_checkpoint_path) gin.parse_config_file(gin_file_path) ddsp_model = ddsp.training.models.Autoencoder() ddsp_model.restore(ddsp_model_path) ###Output _____no_output_____ ###Markdown Get Predictions ###Code def get_preds(): inputs, outputs, features = next(datagen) f0_scaled = np.squeeze(outputs.get('f0_scaled').numpy()) ld_scaled = np.squeeze(outputs.get('ld_scaled').numpy()) f0_pred, ld_pred = mapping_model.predict(inputs) features['f0_scaled'] = tf.convert_to_tensor(f0_pred) features['ld_scaled'] = tf.convert_to_tensor(ld_pred) f0_pred = np.squeeze(f0_pred) ld_pred = np.squeeze(ld_pred) f0_hz, loudness_db = F0LoudnessPreprocessor.invert_scaling(f0_pred, ld_pred) features['f0_hz'] = f0_hz features['loudness_db'] = loudness_db plt.figure(figsize=(12, 6)) plt.subplot(1, 2, 1) plt.plot(f0_scaled, label='f0_gt') plt.plot(f0_pred, label='f0_pred') plt.xlabel('time') plt.ylabel('f0_scaled') plt.legend() plt.subplot(1, 2, 2) plt.plot(ld_scaled, label='ld_gt') plt.plot(ld_pred, label='ld_pred') plt.xlabel('time') plt.ylabel('ld_scaled') plt.legend() plt.show() ddsp_outputs = ddsp_model(features, training=False) audio_pred = ddsp_model.get_audio_from_outputs(ddsp_outputs) return features['audio'], audio_pred audio, audio_pred = get_preds() print('Original Audio') Audio(audio, rate=sr) print('Predicted Audio') Audio(audio_pred, rate=sr) ###Output _____no_output_____
01 - Data retrieval system.ipynb
###Markdown Code for part 1: Data retrieval system ###Code import json import requests def get_top_stories(top=10): sess = requests.Session() url = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty' html = sess.get(url) ids = json.loads(html.content.decode('utf-8')) ids = ids[:top] return ids ids = get_top_stories(top=10) ids def get_item_dict(ids): item_dict = {} sess = requests.Session() for item in ids: url = 'https://hacker-news.firebaseio.com/v0/item/{}.json?print=pretty'.format(item) html = sess.get(url) item_data = json.loads(html.content.decode('utf-8')) item_dict[item] = item_data return item_dict item_dict = get_item_dict(ids) item_dict def process_info(item_dict): titles = [] for key in item_dict.keys(): titles.append(item_dict[key].get('title')) item_info = "... ".join([x for x in titles]) return item_info process_info(item_dict) def get_headlines(): top_stories_ids = get_top_stories() item_dict = get_item_dict(top_stories_ids) data = process_info(item_dict) return data get_headlines() ###Output _____no_output_____
Mathematics/InterpretingStatisticalData/interpreting-statistical-data.ipynb
###Markdown ![Callysto.ca Banner](https://github.com/callysto/curriculum-notebooks/blob/master/callysto-notebook-banner-top.jpg?raw=true) ###Code from IPython.display import HTML HTML('''<script> code_show=true; function code_toggle() { if (code_show){ $('div.input').hide(); } else { $('div.input').show(); } code_show = !code_show } $( document ).ready(code_toggle); </script> Raw code hidden. To show code, click <a href="javascript:code_toggle()">here</a>. <b>To begin the notebook, click Kernel then click Restart & Run All. </b> ''') import random import math import scipy as sp import scipy.stats as st import pandas as pd import numpy as np import matplotlib.pyplot as plt from numpy import linspace from scipy.stats import truncnorm from ipywidgets import widgets, interact, Layout, Button, Box, interact_manual, fixed from IPython.display import display, Markdown, Javascript # Function: obtains z-value to use given confidence level # Input: confidence level (enter value from 0 to 1.00) # Output: positive z-value def get_z_value(confidence_level): z_value_to_obtain = 1 - ((1-confidence_level)/2) z_value = st.norm.ppf(z_value_to_obtain) return round( z_value , 4 ) # Function: calculate confidence inteval using 95% confidence level # Input: data set (as a list), confidence level, and a string for output 'confidence interval','mean','std','error' # Output: confidence interval # Example: calculate_confidence_interval([12,19,32,1,9,10,23,23,12,19],'confidence interval') def calculate_confidence_interval(data_set,get_output,confidence_level): # Associated z-value with 95% confidence level z = get_z_value(confidence_level) # Convert data set data_set = np.array(data_set) # Data set parameters n = data_set.size x = np.mean(data_set) s = np.std(data_set) moe = z*(s/n**(1/2)) # Output confidence_interval = ( x-z*(s/n**(1/2)) , x+z*(s/n**(1/2)) ) if(get_output == 'confidence interval'): return confidence_interval if(get_output == 'mean'): return x if(get_output == 'std'): return s if(get_output == 'margin of error'): return moe # Function: modified truncated normal # Input: mean, standard deviation, lower bound, upper bound # Output: truncated normal value # Example: get_truncated_normal(mean=8,sd=2,low=1,upp=10) def get_truncated_normal(mean=0, sd=1, low=0, upp=10): return truncnorm( (low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd) # Function: generate data points that are noramlly distributed # Input: mean, standard deviation, lower bound, upper bound, number of data points # Output: list of data points where the data follow a normal distribution # Example: generate_data_points(175,15,150,200,100) def generate_data_values(mean,st_dev,lower_bd,uper_bd,data_pts): x = get_truncated_normal(mean=mean,sd=st_dev,low=lower_bd,upp=uper_bd) return x.rvs(data_pts) # Function: create a list of intervals # Input: start value, size of interval, number of intervals # Output: list of intervals with fixed sizes # Example: create_intervals(150,2.5,20) def create_intervals(start,interval_size,number_of_intervals): list_of_intervals = [] for i in range(number_of_intervals): current_interval = [ start + i*interval_size , start + (i+1)*interval_size ] list_of_intervals.append(current_interval) return list_of_intervals # Function: create a dictionary with interval as keys # Input: list of intervals # Output: dictionary, with keys as intervals, values intiated at 0 # Example: interval_dictionary( [ [1,2] , [3,4] ]) def interval_dict(interval_list): interval_item_count = {} for interval in interval_list: interval_key = "[" + str(interval[0]) + "," + str(interval[1]) + "]" interval_item_count[interval_key] = 0 return interval_item_count # Function: counts the number of values that belong in a certain interval # Input: values of data, list of intervals, dictionary with intervals as key # Ouput: dictionary with interval as key and value as the number of times a value in the data set lives in that interval def interval_value_count(data_values,interval_list,interval_dict): for value in data_values: for interval in interval_list: lower_bd = interval[0] upper_bd = interval[1] interval_key = "[" + str(lower_bd) + "," + str(upper_bd) + "]" if(lower_bd <= value and value < upper_bd): interval_dict[interval_key] += 1 return interval_dict # Function: plots confidence intervals, showing where the real mean lies and showing intervals that "miss" the mean # Input: data set, lower bound and upper bound of data set, sample set, iteration (number of intervals to generate) # Output: confidence interval plots def ci_plot(data, lower_bound, upper_bound, sample, iterations, print_statement,confidence_level): fig = plt.figure(figsize=(20, 0.75*iterations)) ax = fig.add_subplot(111) # Obtain data set statistics data = np.array(data) data_mean = data.mean() # We could use the data set's minimum and maximum to set as x_min and x_max, but we've set it to be 150,200 # in this case, just to make the plot visually appealing. # (i.e. the dataset is randomly generated from 150-200, however, we might have 151.3,198.7 as min,max. xmin = lower_bound xmax = upper_bound # Plot confidence intervals y = 2*iterations counter = 0 for i in range(iterations): sample_set = random.sample(data.tolist(), sample) confidence_interval = calculate_confidence_interval(sample_set, 'confidence interval',confidence_level) plt.hlines(0.75*i , xmin, confidence_interval[0]) plt.hlines(0.75*i, confidence_interval[1], xmax) if (data_mean < confidence_interval[0] or data_mean > confidence_interval[1]): plt.hlines(0.75*i, confidence_interval[0], confidence_interval[1], color = 'r', linewidth = 4) plt.text(confidence_interval[0], 0.75*i -0.1, '(', horizontalalignment='center', fontsize = 25, color = 'r', weight = 'bold') plt.text(confidence_interval[1], 0.75*i -0.1, ')', horizontalalignment='center', fontsize = 25, color = 'r', weight = 'bold') counter += 1 else: plt.hlines(0.75*i, confidence_interval[0], confidence_interval[1], color = 'g', linewidth = 4) plt.text(confidence_interval[0], 0.75*i -0.1, '(', horizontalalignment='center', fontsize = 25, color = 'g', weight = 'bold') plt.text(confidence_interval[1], 0.75*i -0.1, ')', horizontalalignment='center', fontsize = 25, color = 'g', weight = 'bold') ci_min = str("{0:.2f}".format(confidence_interval[0])) ci_max = str("{0:.2f}".format(confidence_interval[1])) plt.text(confidence_interval[0], 0.75*i -0.4, ci_min, horizontalalignment='right', fontsize = 12) plt.text(confidence_interval[1], 0.75*i -0.4, ci_max, horizontalalignment='left', fontsize = 12) plt.text(lower_bound - 3, 0.75*i, lower_bound, verticalalignment = 'center', fontsize = 15) plt.text(upper_bound + 2, 0.75*i, upper_bound, verticalalignment = 'center', fontsize = 15) if (print_statement == True): percentage_containing_true = float("{0:.2f}".format((iterations-counter)/iterations))*100 percentage_not_containing_true = (100 - percentage_containing_true) display(Markdown("<center>" + str(round(percentage_containing_true,2)) +"% of the confidence intervals contain the true mean value. That is, " + str(round(percentage_not_containing_true,2)) + "% do not. <center>")) plt.text(data_mean, 0.75*iterations+0.25 , str("{0:.2f}".format(data_mean)), horizontalalignment = 'center', fontsize = 15, color = 'b') plt.vlines(data_mean, -0.25, iterations, linestyle = '--', color = 'b', linewidth = 3) ax.set_xlim(lower_bound-5,upper_bound+10) ax.set_ylim(-.25,0.75*iterations) plt.axis('off') plt.show() # Function: executes the cell below on click event using a Jupyter button widget def run_cells(ev): display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1,IPython.notebook.get_selected_index()+2)')) # Function: executes the cell above on click event using Jupyter button widget # Note: I tried parametrizing this function by allowing for which range of cells to run, but the JS command # does not work for some reason. def run_cell_above(ev): display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()-1,IPython.notebook.get_selected_index())')) # Function: executes the cell previous on click event using a Jupyter button widget def run_prev(ev): display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()-2,IPython.notebook.get_selected_index()+0)')) def run_above_current(ev): display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()-2,IPython.notebook.get_selected_index()+1)')) # Function: checks if input is float # Input: string # Output: boolean def isfloat(val): try: float(val) return True except ValueError: return False # Function: check if input is positive float # Input: string # Output: 1 or 0 def is_positive_float(val): if(isfloat(val) == True and float(val) >= 0): return 1 else: return 0 # Parameters for data generation data_points = 25000 mean = 175 standard_error = 15 lower_bound = 150 upper_bound = 200 # Functions to call to set up dictionary containing height intervals and population count data_values = generate_data_values(mean,standard_error,lower_bound,upper_bound,data_points) interval_list = create_intervals(150,2.5,20) interval_dictionary = interval_dict(interval_list) data_dict = interval_value_count(data_values,interval_list,interval_dictionary) # Note: # This interval is added with 0 value to get the last ticker value of 200 to show up in the bar plot later # It's for improving the presentation of the bar plot data_dict['200.0,202.25'] = 0 ###Output _____no_output_____ ###Markdown ---Developing Statistical Reasoning: Interpreting Statistical Data AbstractStatistical reasoning plays an increasingly large role in everyday's life, from politics (opinion polls) to sports (statistics about sports teams), through many news about health or economy for example. The goal of this notebook is to introduce and illustrate two important notions about interpreting statistical data, the notions of confidence interval and confidence level.This notebook is split into two parts. The first part is a short lecture about the statistical concepts we want to work with. The second one provides some interactive tools to experiment with them and illustrates the use of these concepts in some real-world applications. Part 1. Statistical Concepts 1.A. Mean and Standard DeviationWe first introduce well known concepts, mean and standard deviation.Definition: mean. Given a data set of $N$ numbers $\{x_1,x_2,\dots,x_N\}$, we denote the **mean** of the data set $\overline{x}$ (pronounced "x bar") to be$$ $$$$\overline{x} = \frac{x_1+x_2+\cdots+x_N}{N}.$$**Example. ** Suppose we have the following set of data: $\{13, 19, 7, 3\}$. Here, we have $4$ numbers, so $N=4$. The mean of this data set is given by:$$\begin{align} \overline{x} &= \frac{x_1+x_2+x_3+x_4}{4} \\&= \frac{13+19+7+3}{4} \\&= 10.5 \end{align}$$ ###Code # Generate random mean for question expected_answer = round( random.random()*100 , 1) # Display exercise prompt display(Markdown("**Exercise. ** Produce a data set with five different positive distinct values with a mean of " + "$" + str(expected_answer) + "$")) display(Markdown("In the input box below, separate each of your values with a comma.")) display(Markdown("For example: `52.7, 39.2, 11.3, 42.1 , 56.5`")) # Set up text and button widget parameters mean_exercise_text = widgets.Text( placeholder='Enter your numbers here.', description='', disabled=False , layout=Layout(width='40%') ) mean_exercise_button = widgets.Button( button_style='info',description="Enter", layout=Layout(width='20%', height='30px') ) generate_new_mean = widgets.Button( button_style='info',description="Generate New Exercise", layout=Layout(width='20%', height='30px') ) # Display widgets display(mean_exercise_text) display(mean_exercise_button) display(generate_new_mean) # On button click, execute the next cell mean_exercise_button.on_click( run_cells ) generate_new_mean.on_click( run_prev ) # Obtain user input user_input = mean_exercise_text.value user_entries = user_input.split(",") # Check user input validity input_valid = 1 user_input_total = 0 user_input_set = set() # Check for number of entries if(len(user_entries) > 5): display(Markdown("Too much entries. Please enter five distinct decimal values.")) input_valid = 0 if(user_input != ""): # Check entries are of positive decimal values only for entry in user_entries: if(is_positive_float(entry) == False): display(Markdown("`" + entry + "`" + " is not a valid input. Please enter positive decimal values only.")) input_valid = 0 else: user_input_total += float(entry) user_input_set.add(float(entry)) # Check if five distinct positive decimal entries if(input_valid == 1 and len(user_input_set) < 5): display(Markdown("Please enter five distinct positive decimal values only.")) input_valid = 0 # Prepare string output format string_out = "" for entry in user_input_set: string_out += "$" + str(entry) + "$" + ", " string_out = string_out[0:len(string_out) - 2] # Check user mean user_mean = round( user_input_total/5 , 2 ) user_mean_str = "$" + str(user_mean) + "$" if(input_valid == 1 and user_mean == expected_answer): display(Markdown("Your data set " + string_out + " has a mean value of " + user_mean_str + " which is the expected mean value!")) mean_exercise_button.close() mean_exercise_text.close() elif(input_valid == 1 and user_mean != expected_answer): display(Markdown("Your data set " + string_out + " has a mean value of " + user_mean_str + ". The expected mean value is " + "$" + str(expected_answer) + "$" + " Please try again.")) ###Output _____no_output_____ ###Markdown $$\cdots$$Definition: standard deviation. Given a data set of $N$ numbers $\{x_1,x_2,\dots,x_N\}$, we define the **standard deviation** of the data set, that we denote by $s$ as follows:$$\displaystyle s = \sqrt{ \frac{1}{N}\left( (x_1-\overline{x})^2 + (x_2-\overline{x})^2 + \cdots + (x_N-\overline{x})^2 \right) }.$$**Example. ** Suppose again that we have the following set of data: $\{13, 19, 7, 3\}$. In the previous example, we've calculated $N=4$ and $\overline{x} = 10.5$. The standard deviation of this data set is given to be:$$\begin{align} s &= \sqrt{ \frac{1}{4}\left( (x_1-\overline{x})^2 + (x_2-\overline{x})^2 + (x_3-\overline{x})^2 + + (x_4-\overline{x})^2 \right) } \\&= \sqrt{ \frac{1}{4}\left( (13 - 10.5)^2 + (19-10.5)^2 + (7-10.5)^2 + (3-10.5)^2 \right) } \\&= \sqrt{ \frac{1}{4}\left( 6.25 + 72.5 + 12.25 + 56.25 \right) } \\&= \sqrt{ \frac{147.25}{4} } \\&\approx 6.06733\dots \\\end{align}$$**Significance. ** The standard deviation complements the mean by telling us how much we can expect that an element of our data set deviates from the mean. The mean alone does not differentiate between the case where all elements of our data set have the same value (for example $\{2,2,2,2\}$ with mean $2$ and standard deviation $0$) or have value alternating around the mean ($\{1,3,1,3\}$ has also mean $2$ but standard deviation $1$).The standard deviation of a data set is a measure that quantifies the amount of variation in the data set. What a low standard deviation says is that the data points tend to be closer to the mean of the data set, whereas a high standard deviation says that the data points are more spread apart and take over a wider range of values that are further from the mean. **Exercise: ** Produce a data set of four values with a mean value of $5$ and standard deviation of $2.24$.**Hint: ** Use integer values only. In the input box below, separate each of your values with a comma.Example: `1, 7, 8, 9` ###Code # Set up text and button widget parameters std_exercise_text = widgets.Text( placeholder='Enter your numbers here.', description='', disabled=False , layout=Layout(width='40%') ) std_exercise_button = widgets.Button( button_style='info',description="Enter", layout=Layout(width='20%', height='30px') ) # Display widgets display(std_exercise_text) display(std_exercise_button) # On button click, execute the next cell std_exercise_button.on_click( run_cells ) # Vector holding mean, std values mean_vector = [] std_vector = [] dataset_vector = [] # Expected input expected_mean = 5 expected_std = 2.24 # Obtain user input user_input = std_exercise_text.value user_entries = user_input.split(",") # Check user input validity input_valid = 1 user_input_total = 0 user_input_set = set() # Check for number of entries if(len(user_entries) > 4): display(Markdown("Too much entries. Please enter four decimal values only.")) input_valid = 0 if(user_input != ""): # Check entries are of positive decimal values only for entry in user_entries: if(is_positive_float(entry) == False): display(Markdown("`" + entry + "`" + " is not a valid input. Please enter positive decimal values only.")) input_valid = 0 else: user_input_total += float(entry) user_input_set.add(float(entry)) # Check if five distinct positive decimal entries if(input_valid == 1 and len(user_input_set) < 4): display(Markdown("Please enter four distinct positive decimal values only.")) input_valid = 0 if(input_valid == 1): # Calculate user mean user_mean = 0 for entry in user_entries: user_mean += float(entry) user_mean = round( user_mean/len(user_entries) , 2 ) # Calculate user STD total_sum = 0 for entry in user_entries: entry = float(entry) total_sum += (entry-user_mean)**2 user_std = round( (total_sum/len(user_entries))**(1/2) , 2 ) # Correct input if(input_valid == 1 and user_mean == expected_mean and user_std == expected_std): display(Markdown("Your dataset has the expected mean of $5$ and standard deviation of $2.58!$")) # Close tabs std_exercise_button.close() std_exercise_text.close() # Display values elif(input_valid == 1 and user_mean != expected_mean and user_std != expected_std): wrong_mean_message = "Your data set has a mean of " + "$" + str(user_mean) + "$" + " which does not equal the expected mean of $5$." display(Markdown(wrong_mean_message)) display(Markdown("Your data set gives the following values: ")) display(Markdown("Mean: " + "$" + str(user_mean) + "$")) display(Markdown("Standard Deviation: " + "$" + str(round(user_std,2)) + "$")) elif(input_valid == 1 and user_mean == expected_mean and user_std != expected_std): display(Markdown("Your data set gives the following values: ")) display(Markdown("Mean: " + "$" + str(user_mean) + "$")) display(Markdown("Standard Deviation: " + "$" + str(round(user_std,2)) + "$")) mean_vector.append(user_mean) std_vector.append(user_std) dataset_vector.append(user_entries) # Check for std the user came up with that is closest to the expected answer diff_vector = [] for entry in std_vector: diff = abs(entry-expected_std) diff_vector.append( round(diff,2) ) # Obtain index of minimum difference in the list index_of_best = np.argmin(diff_vector) # Check if user's current answer is better than their best answer so far display(Markdown("**Best input so far: **")) current_diff = abs(user_std - expected_std) best_dataset = "" for entry in dataset_vector[index_of_best]: best_dataset += entry + "," best_dataset_msg = "Data set with values: " + "$" + str(best_dataset[:-1]) + "$" + " has standard value of " + "$" + str(std_vector[index_of_best]) + "$" + " which is closest to the expected standard value of $2.24.$" display(Markdown(best_dataset_msg)) ###Output _____no_output_____ ###Markdown 1.B. Sampling DataWe consider now the following context, inspired by real-world application of statistics. We want to obtain the average (mean) value of a statistic over a very large number of cases. For example, we could be interested in the average height of people in Canada. In order to obtain the *exact answer*, we would need to measure the height of all people living in Canada. This is obviously impossible. So we need to proceed in a different way: we will select a **random sample** of the Canadian population and obtain the height measurements of the people in that sample. And the question that naturally occurs is: how can we deduce anything about the average height of the whole population from this sample? Surprisingly, we will see that we can say something, using the notions of **confidence level**, **confidence interval** and **margin of error**, concepts you might have heard about especially in opinion polls. This is based on non-trivial statistics, and we will not explore the technical aspects, but illustrate these notions with concrete examples. Confidence interval and confidence levelSo there exists a quantity $x$ we want to estimate because we can not obtain it exactly (the average height of people living in Canada in our example). We have measured the height of a random sample of $N$ people, let's denote it by $\{x_1,x_2,\dots,x_N\}$.The first important point related to sampling data is to estimate the mean of a quantity, that is, we will not compute a single number to approximate $x$, but an **interval** $(x_\text{low},x_\text{high})$. So we decide to lose *precision*, as an interval is less precise than a single number. The key point is that we want to be **confident** that the value $x$ is in that interval. This notion of confidence is important and we want to quantify it in some way. We would like to be able to say something like "I am $95\%$ confident that the value $x$ is in the interval $(x_\text{low},x_\text{high})$ I computed". This is a sentence we read often with opinion polls, aimed at describing their **accuracy**.Now, for us, we want to understand how to interpret the expression "$95\%$ confident". In our example about averaging the height of Canadian people, it can be understood as follows: if we were going to repeat *often* (ideally an infinite number of times ... obviously this is impossible) and *independently* (our samples are truly random) the process of* selecting a random set of $N$ people* measuring the height of the sampled people, $\{x_1,x_2,\dots,x_N\}$* using the same method to deduce from $\{x_1,x_2,\dots,x_N\}$ the interval $(x_\text{low},x_\text{high})$then $95\%$ of the intervals generated as above would contain the exact value $x$ we want to estimate.So we have **no guarantee** about the precise interval $(x_\text{low},x_\text{high})$ we computed from a specific sample $\{x_1,x_2,\dots,x_N\}$ but we expect it contains $x$ with probability $95\%$. Computing a confidence interval for a given confidence levelThe question now is: if we want to have a given confidence level (so we chose it), what is the method to compute $(x_\text{low},x_\text{high})$ from $\{x_1,x_2,\dots,x_N\}$? Let's denote the confidence level by $\beta$, a real number between $0$ and $1$ ($\beta=0.95$ means a confidence level of $95\%$).The method to compute $(x_\text{low},x_\text{high})$ is as follows:* Step 1. We calculate the mean $\overline{x}$ of the sampled set $\{x_1,x_2,\dots,x_N\}$, and its standard deviation $s$.* Step 2. We find the **z-score** $z$ corresponding to the chosen confidence level $\beta$ (we will not discuss in this notebook how these z-scores are obtained, but they can be read in a table, see http://www.statisticshowto.com/tables/z-table/ and we provide a separate notebook to compute it given $\beta$).* Step 3. Calculate $\displaystyle x_\text{low}=\overline{x}- z\frac{s}{\sqrt{N}}$ and $\displaystyle x_\text{high}= \overline{x}+ z\frac{s}{\sqrt{N}}$. So our confidence interval is:$$(x_\text{low},x_\text{high}) = \displaystyle \left(\overline{x}- z\frac{s}{\sqrt{N}} ,\overline{x}+ z\frac{s}{\sqrt{N}} \right).$$ Comments.This formula is actually quite intuitive. We can see that this confidence interval is centred around the mean of the sampled data ($\overline{x}$), with some slack on both sides that depends on the standard deviation, the number of samples and the mysterious z-score. So intuitively, we assume that $\overline{x}$ is more or less our approximation of $x$; this makes sense. But if the standard deviation is large (i.e. we see lots of variation in the sampled measurements) then we widen the interval. Conversely, if we sample many data points (i.e. if $N$ is large) then we shrink the interval (because we divide by $\sqrt{N}$); again this make sense, as the more we sample the more we expect to get a good idea of the quantity we want to approximate. What is nice is that all these ideas combine into a quite simple formula, thanks to the z-score. Example. Engineers are concerned with designing devices operated by workers to achieve high productivity and comfort. The article "Studies on Ergonomically Designed Alphanumeric Keyboards" (_Human Factors_, 185: 175-187) reports a study of preferred height for a keyboard. A sample of $N=31$ is given and a preferred keyboard height was reported by each of the $31$ sampled typists, with the sample average preferred being $\overline{x} = 80.0$ cm and the standard deviation being $s=2.0$ cm. Using the steps above, how may we obtain a 95% confidence interval for the real mean - the real preferred height for the population of experienced typists.* Step 1. We know $N=31, \overline{x}=80$, and $s=2$.* Step 2. Since we want a confidence level of 95%, we obtain an associated z-score of $1.96$. * Step 3. Calculating the estimates, we have: $\overline{x} \pm 1.96\cdot \frac{s}{\sqrt{n}} = 80 \pm (1.96)\frac{2.0}{\sqrt{31}} = 80.0 \pm 0.7 $. So $x_\text{low} = 79.3$ and $x_\text{high} = 80.7$. So our confidence interval is $(79.3,80.7)$ and the confidence level of this interval (the probability it contains the mean preferred height for the population of experienced typists) is 95%. we can observe that the interval is quite narrow, thanks to the small standard deviation and the large value of $N$. Margin of error.Last we introduce the notion of margin of error.Definition: margin of error. In a confidence interval, we define the value $z\frac{s}{\sqrt{n}}$ to be the **margin of error** of the confidence interval.So we can see that actually the margin of error is nothing else that the half-size of the interval, based on the standard deviation, the sample size and the z-score associated to the confidence level. This is exactly how you should understand the notion of margin of error that we see in many opinion polls: they have polled a sample of $N$ people, chosen a confidence level, computed the corresponding confidence interval and report the centre value $\overline{x}$ and the half-size of the interval as margin of error. Part 2: Interactive Activities and Real Examples. How Does the Sample Size, Confidence Level, and Margin of Error Affect Confidence Intervals?When calculating confidence interval, margin of error, and determining what confidence level to use, the size of the random sample we use is important. In the example we develop now, we look at the height distribution of men, among a population size of 25,000 individuals. In an interactive tool below, we will be looking at the impact of the random sample size on the size of the confidence interval and the margin of error. ###Code x_values = data_dict.keys() y_values = list( data_dict.values() ) # Fix labeling for barchart x_labels = linspace(150,200, len(y_values) ) # Bar plot fig = plt.figure( figsize=(20,5) ) ax = fig.add_subplot(111) bars = plt.bar( x_labels + (1.25) , y_values , 2.5 , hatch = '//' , alpha = 1 , edgecolor = 'black' , color = 'orange' , linewidth=2 ) mean_line = plt.vlines(data_values.mean(), 0, 2000, linestyle = '--', color = 'b', linewidth = 3) mean_text = plt.text(data_values.mean(), 2150 , 'Mean = ' + str("{0:.2f}".format(data_values.mean())), horizontalalignment = 'center', fontsize = 15, color = 'b') # Note: # A dummy bar is intoduced at the right end of the bar plot, this is to have the maximum value of the plot # on the x-axis show up for presentation purposes. bars[-1].set_color('white') # Plot display commands ax.set_ylim(0,2500) plt.title("Height Distribution of Men \n (Population Size = 25000)",fontsize = 20) plt.xlabel('Height (in centimeters)',fontsize = 16) plt.ylabel('Number of people',fontsize = 16) plt.xticks(fontsize = 16) plt.yticks(fontsize = 16) plt.show() ###Output _____no_output_____ ###Markdown In the bar chart above, each bar represents the number of men from the population that belongs in a certain height range. We have used intervals of size 2.5 to group our countings. For instance, the first bar counts the number of men with height 150-152.5, and we see that they are approximately 500 of the 25,000 population size. The next bar counts the number of men with height 152.5-150, with the count approximately at 600, and so on. The dashed line in blue shows the location of the mean in the bar plot. Using a 95% confidence level means that if we record the heights of say 1,000 men from the population over and over again and calculate the confidence interval, we expect about 95% of the confidence intervals will obtain the true average height of men in the population.Try it for yourself! The interactive tool below generates confidence intervals using 95% confidence levels for a sample size of your choice. The green intervals obtain the true mean whereas red intervals do not. With this tool you can appreciate visually how increasing the number of samples impacts positively the likelihood that the confidence intervals actually contain the mean height. ###Code # Counter to prevent plotting intervals until run interact is clicked counter_ci = 0 # Initialize widgets style = {'description_width': 'initial'} sample_size = widgets.IntSlider(value = 2500, min = 20, max = 5000, step = 20, description = 'Sample Size', style=style, readout_format='d') iterations = widgets.IntSlider(value = 50,min = 5, max = 100, description = 'Number of Intervals', style=style, readout_format='d') ci_button = widgets.Button(button_style='info', description="Run Interact", layout=Layout(width='15%', height='30px')) # Display widgets display(sample_size) display(iterations) display(ci_button) # Plot intervals on click ci_button.on_click( run_cells ) # Plot intervals once run interact is clicked counter_ci += 1 if counter_ci >= 2: ci_plot(data_values, lower_bound, upper_bound, sample_size.value , iterations.value, True, 0.95) ###Output _____no_output_____ ###Markdown What happens to the size of the confidence intervals and margin of errors as sample sizes increase? Justify your answer. ###Code # Text widget for this interactives question prompt samplesize_interp_text = widgets.Textarea( value='', placeholder='', description='', disabled=False , layout=Layout(width='100%', height='100px') ) # Counter to question attempts counter_samplesize_q = 0 # Other widgets for this interactive's question prompt box_layout = Layout(display='flex', flex_flow='row', align_items='stretch', width='100%', justify_content = 'center') save_button = widgets.Button(button_style='info',description="Save", layout=Layout(width='15%', height='30px')) save_box = Box(children=[save_button], layout=box_layout) display(samplesize_interp_text) display(save_box) save_button.on_click( run_cells ) def run_edit_cells( b ): display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()-1,IPython.notebook.get_selected_index())')) edit_button.close() samplesize_interp = samplesize_interp_text.value edit_button = widgets.Button(button_style='info',description="Edit", layout=Layout(width='15%', height='30px')) answer_button = widgets.Button(button_style='info',description="Reveal Answer", layout=Layout(width='15%', height='30px')) options_box = Box(children=[edit_button, answer_button], layout=box_layout) if ( samplesize_interp != ''): samplesize_interp_text.close() save_button.close() samplesize_interp_text = widgets.Textarea( value= samplesize_interp, placeholder='', description='', disabled=False , layout=Layout(width='100%', height='100px') ) display(Markdown('<center>' + samplesize_interp + '</center>')) display(options_box) edit_button.on_click( run_edit_cells ) answer_button.on_click(run_cells) counter_samplesize_q += 1 if (counter_samplesize_q >= 2): display(Markdown("<b> Expected Answer: </b> ")) display(Markdown("As sample size increases, confidence intervals and margin of errors decrease.")) display(Markdown("<b> Explanation: </b> ")) display(Markdown("By increasing sample size, we are increasing our data set and thus, we can acheive a more precise" + " estimate of the true average height of men in the population. Note that the margin of error is half" + " of the size of the confidence interval. Hence, we see that if margin of error increases, then confidence increases and if margin of error decreases, then confidence intervals decreases.")) ###Output _____no_output_____ ###Markdown In the interactive tool below, we will fix our random sample size to be 1,000 individuals and compute 50 confidence intervals. So the only parameter we need to set is the confidence level, anbd we will see how it impacts the margin of error, i.e. the size of the intervals. Indeed the margin of error depends on the sample size (fixed to 1000 here) and the z-value, that gets lower as the confidence level decreases. Below, we can see that as we choose a higher value for the confidence level, the greater the z-value we need to use, which in turns makes the margin of error term larger, thus making the confidence interval larger. Intuitively, this is what we expect as a larger interval is more likely to contain the exact mean we want to estimate. With a low confidence level, the confidence intervals are very tight, and have a higher probability of not containing the true mean. ###Code # Tried a step size of 0.1, but IntSlider widget does not have this feature confidence_level_slider = widgets.IntSlider( value = 50, min = 1, max = 99, description = 'Confidence Level', style=style, readout_format='d') confidence_level_button = widgets.Button(button_style='info', description="Run Interact", layout=Layout(width='15%', height='30px')) display(confidence_level_slider) display(confidence_level_button) confidence_level_button.on_click( run_cells ) # Obtain confidence level and z-values confidence_level = (confidence_level_slider.value)*(0.01) z = get_z_value(confidence_level) # Display confidence_level_str = str( float(confidence_level)*100)[:-2] + "\%" message_out = "To obtain a confidence interval with $" + confidence_level_str + "$ confidence level requires a z-value of " + "$" + str(z) + ".$" display(Markdown(message_out)) # Interval plots sample_size_value = 1000 number_of_intervals = 50 ci_plot(data_values, lower_bound, upper_bound, sample_size_value , number_of_intervals, True, confidence_level) ###Output _____no_output_____ ###Markdown Confidence Intervals and Confidence Levels in Media In 2017, [The Hockey News](https://thehockeynews.com/news/article/front-office-confidence-rankings-part-2-fans-weigh-in-on-how-each-team-is-doing) created a survey asking people to mark each NHL team's front office on a scale of 5 in six different categories: roster building, cap management, draft and develop, trading, free agency, and their overall vision. Over 1300 people contributed in the survey and since not everyone was familiar with every team's management group, each NHL team had roughly 800 to 1000 votes. The goal was to **rank** the NHL teams in each of the 6 categories. For a given category, this would have been easy if each team had received as many marks as there were people contributing to the survey: it would have been sufficient to rank according to the mean mark. But as noted above, not every participant marked every team, so The Hockey news faced an issue akin to sampling marks, where a participant not marking a team in a category was considered as this (missing) mark being un-sampled. So instead of a mean mark per category for each team, The Hockey News computed a 95% confidence interval. That is, for each of the 30 teams, they obtained six confidence intervals (one for each ranking category), with 95% certainty that the intervals contains the actual mark value. Now, each of these confidence intervals have their associated margin of error (there are 180 intervals in total) which averaged out to be 0.06, which is quite small.Take for example the Nashville Predators who ranked first overall out of the 30 teams. Since the margin of error is about 0.06, the true mark (out of 5) of the team's ability in roster building likely (with probability 95%) lies in the confidence interval:$$(4.4 - 0.06, 4.4 + 0.06) = (4.34, 4.46).$$ Confidence Intervals and Margin of Error in Media ###Code # Create nested column labels inner_columns = ['Men, Sample Size','Men, Mean','Men, Standard Deviation','Women, Sample Size','Women, Mean','Women, Standard Deviation'] # Row names indices_labels = ['Systolic Blood Pressure','Diastolic Blood Pressure','Total Serum Cholesterol', 'Weight', 'Height', 'Body Mass Index'] dataframe = pd.DataFrame( [ [1623, 128.2, 17.5, 1911,126.5, 20.1] , [1622, 75.6, 9.8, 1910, 72.6, 9.7] , [1544, 192.4, 35.2, 1766, 207.1, 36.7], [1612, 194.0, 33.8, 1984, 157.7, 34.6], [1545, 68.9, 2.7, 1781, 63.4, 2.5], [1545, 28.8, 4.6, 1781, 27.6, 5.9] ] , columns = inner_columns) # Group the labels as split by " , " labels = dataframe.columns.str.split(', ', expand=True).values dataframe.columns = pd.MultiIndex.from_tuples([x for x in labels]) original_index_list = dataframe.index.tolist() for current_index in range(len(original_index_list)): idx = original_index_list.index(current_index) original_index_list[current_index] = indices_labels[current_index] d = dict(selector="th", props=[('text-align', 'left')]) dataframe.index = original_index_list ###Output _____no_output_____ ###Markdown [Boston University of Public Health](http://sphweb.bumc.bu.edu/otlt/MPH-Modules/BS/BS704_Confidence_Intervals/BS704_Confidence_Intervals_print.html) analyzed data of 3,539 participants who attended the 7th Examination of the Offspring Cohort in The Framingham Heart Study. For people participating to the study, several health measures were taken, again with a missing data issue as not all participants submitted their information for each category, an issue similar to the hockey example above. The summarized data set is presented below. ###Code pd.options.display.max_rows = 120 display(dataframe.style.set_properties(**{'width':'12em', 'text-align':'center'})\ .set_table_styles([d])) ###Output _____no_output_____ ###Markdown With 95% confidence, the confidence interval of Men's Systolic Blood Pressure is $\left(127.35, 129.05\right)$ and the margin of error is $0.85$. That is, there is a 95% chance that the actual average of Men's Systolic Blood Pressure in the population belongs in this confidence interval. ###Code def run_edit1_cells( b ): display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()-1,IPython.notebook.get_selected_index())')) edit_button_1.close() def run_edit2_cells( b ): display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()-1,IPython.notebook.get_selected_index())')) edit_button_2.close() ###Output _____no_output_____ ###Markdown **Exercise 1.** In the next two exercises, we will be computing confidence intervals with a certain confidence level. In the following input box below, enter the confidence level desired to obtain the appropriate z-value to use. ###Code # Set up text and button widget parameters get_zv_text = widgets.Text( placeholder='Enter a value between 0 and 100.', description='', disabled=False , layout=Layout(width='25%') ) get_zv_button = widgets.Button(button_style='info',description="Enter", layout=Layout(width='10%', height='30px') ) # Display widgets display(get_zv_text) display(get_zv_button) # On button click, execute the next cell get_zv_button.on_click( run_cells ) # Obtain user input confidence_level = get_zv_text.value # Check range & display output if value is valid invalid_input_msg = "Enter a value between 0 and 100." if(isfloat(confidence_level) == 1): confidence_level = float(get_zv_text.value)*(0.01) if( (confidence_level > 0) and (confidence_level < 1) ): z_value = get_z_value(confidence_level) md_print = "The z-value you need to use for a " + "$" + str(get_zv_text.value) + "\%$" + " confidence interval is " + "$" + str(z_value) + ".$" display(Markdown( md_print) ) else: display(Markdown(invalid_input_msg)) else: display(Markdown(invalid_input_msg)) ###Output _____no_output_____ ###Markdown **(i)** Calculate the confidence interval of Women's Weight with a $95\%$ confidence level. Round each value to the nearest hundredth. ###Code # Widgets for interval question box_layout = Layout(display='flex', flex_flow='row', align_items='stretch', width='100%', justify_content = 'center') # Interval display format comma_a = widgets.HTMLMath(value="$,$") open_bracket_a = widgets.HTMLMath(value="$\Big($") close_bracket_a = widgets.HTMLMath(value="$\Big)$") # Text input for left and right interval left_endpoint_text_a = widgets.Textarea( value='', placeholder='', description='', disabled=False , layout=Layout(width='10%', height='32.5px') ) right_endpoint_text_a = widgets.Textarea( value='', placeholder='', description='', disabled=False , layout=Layout(width='10%', height='32.5px') ) # Display widgets confidence_interval = [open_bracket_a, left_endpoint_text_a, comma_a, right_endpoint_text_a, close_bracket_a] ci_box = Box(children=confidence_interval, layout=box_layout) display(ci_box) submit_button_1 = widgets.Button(button_style='info',description="Submit", layout=Layout(width='15%', height='30px')) submit_box = Box(children=[submit_button_1], layout=box_layout) display(submit_box) # Run next cell on submit click submit_button_1.on_click( run_cells ) # Question attempt counter counter_a = 0 # Obtain user's interval values min_range_a = left_endpoint_text_a.value max_range_a = right_endpoint_text_a.value # Fix format correct_min_range_a = "{0:.2f}".format(157.7-1.96*34.6/math.sqrt(1984)) correct_max_range_a = "{0:.2f}".format(157.7+1.96*34.6/math.sqrt(1984)) if ( min_range_a == correct_min_range_a and max_range_a == correct_max_range_a): # Close question prompt widgets open_bracket_a.close() close_bracket_a.close() comma_a.close() left_endpoint_text_a.close() right_endpoint_text_a.close() submit_button_1.close() confidence_interval_string = '(' + min_range_a + ',' + max_range_a + ')' # Display user answer display(Markdown("**You answered: **")) display(Markdown("<center>$"+confidence_interval_string+"$ </center>")) display(Markdown("This is correct!")) # Display expected answer + explanations display(Markdown("<b> Expected Answer: </b> ")) display(Markdown("$(156.18, 159.22)$")) display(Markdown("<b> Explanation: </b> ")) display(Markdown("For Women's weight, the sample size is $N = 1984$, the mean is $\overline x = 157.7$, and the standard deviation is $s = 34.6$." + " Since the z-value for a $95\%$ confidence level is $1.96$, the confidence interval is <center> $\Bigg( 157.7 - 1.96 \cdot \displaystyle " + "{34.6 \over \sqrt{1984} },157.7 + 1.96 \cdot \displaystyle {34.6 \over \sqrt{1984}} \Bigg) = (156.18,159.22)$</center>")) else: # Increment question counter counter_a += 1 if (counter_a != 1): incorrect = widgets.HTML(value=" <center>Incorrect! Please try again. </center>") display(incorrect) # Question attempts maximum limit if (counter_a > 3): # Close question prompt widgets open_bracket_a.close() close_bracket_a.close() comma_a.close() left_endpoint_text_a.close() right_endpoint_text_a.close() submit_button_1.close() incorrect.close() # Display expected answer + explanation display(Markdown("<b> Expected Answer: </b> ")) display(Markdown("$(156.18, 159.22)$")) display(Markdown("<b> Explanation: </b> ")) display(Markdown("For Women's weight, the sample size is $N = 1984$, the mean is $\overline x = 157.7$, and the standard deviation is $s = 34.6$." + " Since the z-value for a $95\%$ confidence level is $1.96$, the confidence interval is <center> $\Bigg( 157.7 - 1.96 \cdot \displaystyle " + "{34.6 \over \sqrt{1984} },157.7 + 1.96 \cdot \displaystyle {34.6 \over \sqrt{1984}} \Bigg) = (156.18,159.22)$</center>")) ###Output _____no_output_____ ###Markdown **(ii)** Use the terms confidence level, confidence interval and margin of error to interpret the true measurement of women's average weight in Boston. ###Code interpretation_text_a = widgets.Textarea( value='', placeholder='', description='', disabled=False , layout=Layout(width='100%', height='100px') ) save_button_1 = widgets.Button(button_style='info',description="Save", layout=Layout(width='15%', height='30px')) save_box = Box(children=[save_button_1], layout=box_layout) display(interpretation_text_a) display(save_box) save_button_1.on_click( run_cells ) interp_a = interpretation_text_a.value edit_button_1 = widgets.Button(button_style='info',description="Edit", layout=Layout(width='15%', height='30px')) edit_box = Box(children=[edit_button_1], layout=box_layout) if ( interp_a != ''): interpretation_text_a.close() save_button_1.close() interpretation_text_a = widgets.Textarea( value= interp_a, placeholder='', description='', disabled=False , layout=Layout(width='100%', height='100px') ) display(Markdown('<center>' + interp_a + '</center>')) display(edit_box) edit_button_1.on_click( run_edit1_cells ) ###Output _____no_output_____ ###Markdown **Exercise 2.****(i)** Calculate the confidence interval of Men's Body Mass Index (BMI) with a $99\%$ confidence level. Round each value to the nearest hundredth. ###Code # Text display for interval comma_b = widgets.HTMLMath(value="$,$") open_bracket_b = widgets.HTMLMath(value="$\Big($") close_bracket_b = widgets.HTMLMath(value="$\Big)$") # Text input for user's interval left_endpoint_text_b = widgets.Textarea( value='', placeholder='', description='', disabled=False , layout=Layout(width='10%', height='32.5px') ) right_endpoint_text_b = widgets.Textarea( value='', placeholder='', description='', disabled=False , layout=Layout(width='10%', height='32.5px') ) # Display widgets confidence_interval = [open_bracket_b, left_endpoint_text_b, comma_b, right_endpoint_text_b, close_bracket_b] ci_box = Box(children=confidence_interval, layout=box_layout) display(ci_box) submit_button_2 = widgets.Button(button_style='info',description="Submit", layout=Layout(width='15%', height='30px')) submit_box = Box(children=[submit_button_2], layout=box_layout) display(submit_box) # Run next cell on submit click submit_button_2.on_click( run_cells ) # Question counter counter_b = 0 # Obtain user's values min_range_b = left_endpoint_text_b.value max_range_b = right_endpoint_text_b.value # z-value for this exercise z_value = 2.5758 # Format range correct_min_range_b = "{0:.2f}".format(28.8-z_value*4.6/math.sqrt(1545)) correct_max_range_b = "{0:.2f}".format(28.8+z_value*4.6/math.sqrt(1545)) if ( min_range_b == correct_min_range_b and max_range_b == correct_max_range_b): # Close question prompt widget open_bracket_b.close() close_bracket_b.close() comma_b.close() left_endpoint_text_b.close() right_endpoint_text_b.close() submit_button_2.close() # Display user answer confidence_interval_string = '(' + min_range_b + ',' + max_range_b + ')' display(Markdown("**Your answer: **")) display(Markdown("<center>$"+confidence_interval_string+"$ </center>")) # Display expected answe + explanations display(Markdown("<b> Expected Answer: </b> ")) display(Markdown("$(28.50, 29.10)$")) # Change display(Markdown("<b> Explanation: </b> ")) display(Markdown("For Men's Body Mass Index, the sample size is $N = 1545$, the mean is $\overline x = 28.8$, and the standard deviation is $s = 4.6$." + " Since the z-value for a $99\%$ confidence level is $2.5758$, the confidence interval is:")) display(Markdown("<center> $\displaystyle \Bigg( 28.8 - 2.5758 \cdot {4.6 \over \sqrt{1545} },28.8 + 2.5758 \cdot {4.6 \over \sqrt{1545}} \Bigg) = (28.50,29.10)$</center>")) else: # Increment question counter on failed attempts counter_b += 1 if (counter_b != 1): incorrect = widgets.HTML(value="<center>Incorrect! Please try again. </center>") display(incorrect) # Question attempt maximum limit if (counter_b > 3): # Close question prompts open_bracket_b.close() close_bracket_b.close() comma_b.close() left_endpoint_text_b.close() right_endpoint_text_b.close() submit_button_2.close() # Display expected answer + explanation incorrect.close() display(Markdown("<b> Expected Answer: </b> ")) display(Markdown("$(28.50, 29.10)$")) # Change display(Markdown("<b> Explanation: </b> ")) display(Markdown("For Men's Body Mass Index, the sample size is $N = 1545$, the mean is $\overline x = 28.8$, and the standard deviation is $s = 4.6$." + " Since the z-value for a $99\%$ confidence level is $2.5758$, the confidence interval is:")) display(Markdown("<center> $\displaystyle \Bigg( 28.8 - 2.5758 \cdot {4.6 \over \sqrt{1545} },28.8 + 2.5758 \cdot {4.6 \over \sqrt{1545}} \Bigg) = (28.50,29.10)$</center>")) ###Output _____no_output_____ ###Markdown **(ii)** Use the terms confidence level, confidence interval and margin of error to interpret the true measurement of Men's average BMI in Boston. ###Code interpretation_text_b = widgets.Textarea( value='', placeholder='', description='', disabled=False , layout=Layout(width='100%', height='100px') ) save_button_2 = widgets.Button(button_style='info',description="Save", layout=Layout(width='15%', height='30px')) save_box = Box(children=[save_button_2], layout=box_layout) display(interpretation_text_b) display(save_box) save_button_2.on_click( run_cells ) interp_b = interpretation_text_b.value edit_button_2 = widgets.Button(button_style='info',description="Edit", layout=Layout(width='15%', height='30px')) edit_box = Box(children=[edit_button_2], layout=box_layout) if ( interp_b != ''): interpretation_text_b.close() save_button_2.close() interpretation_text_b = widgets.Textarea( value= interp_b, placeholder='', description='', disabled=False , layout=Layout(width='100%', height='100px') ) display(Markdown('<center>' + interp_b + '</center>')) display(edit_box) edit_button_2.on_click( run_edit2_cells ) ###Output _____no_output_____
2016-2017.Meetings/01.pca-mnist.ipynb
###Markdown Each image is 8 $\times$ 8. We can flatten these into ${8*8 = 64}$ dimensional vectors such that every image is described by a single point in $\mathbb{R}^{64}$. Here is one of those points: ###Code print(zeroes[0]) ###Output [ 0. 0. 5. 13. 9. 1. 0. 0. 0. 0. 13. 15. 10. 15. 5. 0. 0. 3. 15. 2. 0. 11. 8. 0. 0. 4. 12. 0. 0. 8. 8. 0. 0. 5. 8. 0. 0. 9. 8. 0. 0. 4. 11. 0. 1. 12. 7. 0. 0. 2. 14. 5. 10. 12. 0. 0. 0. 0. 6. 13. 10. 0. 0. 0.] ###Markdown How can we visualize the distribution of these points in $\mathbb{R}^{64}$? We need to approximate the relative positions of the points in 1, 2 or 3 dimensions in order to get a sense for how they are distributed. We can do this with principal components analysis (PCA). The overarching idea here is that our points approximate some high dimensional manifold (a manifold with dimensionality less than or equal to 64), and we can approximate this approximation with dimensionality reduction techniques such as PCA. If this is all very confusing to you, don't worry about it! I'm purposely being vague in my description - a more thorough explanation would require a more rigorous treatment of the math behind PCA. Let's try to approximate the relative positions of our points in $\mathbb{R}^3$, then: ###Code from sklearn.decomposition import PCA both = [X[i] for i in range(len(y)) if y[i] == 0 or y[i] == 1] labels = [y_ for y_ in y if y_ == 0 or y_ == 1] pca = PCA(n_components=3) Xproj3d = pca.fit_transform(both) ###Output _____no_output_____ ###Markdown `zeroes[0]` is now approximated by this point in $\mathbb{R}^3$: ###Code print(Xproj3d[labels.index(0)]) # labels.index(0) gives us the first index that is 0, i.e., what used to be zeroes[0] ###Output [-24.84615349 -0.89358165 0.58202371] ###Markdown Of course, this is completely meaningless without the rest of the points for context. ###Code print(Xproj3d) from mpl_toolkits.mplot3d import Axes3D fig = plt.figure() ax3d = Axes3D(fig) ax3d.set_xlabel('X') ax3d.set_ylabel('Y') ax3d.set_zlabel('Z') ax3d.view_init(elev=25., azim=120) colors = ['orange' if y == 0 else 'lightblue' for y in labels] ax3d.scatter(Xproj3d.T[0], Xproj3d.T[1], Xproj3d.T[2], c=colors, s=30, alpha=0.7) ###Output _____no_output_____ ###Markdown The 0s are an amber color and the 1s are turquoise in the above plot. It looks like they form two (three?) distinct clusters! How does the two-dimensional projection look? ###Code pca = PCA(n_components=2) Xproj2d = pca.fit_transform(both) plt.scatter(Xproj2d.T[0], Xproj2d.T[1], c=colors, s=30, alpha=0.7) ###Output _____no_output_____
preprocesamiento/preprocesamiento_IEBS.ipynb
###Markdown Preprocesamiento de Datos Estudiantiles: caso de estudio IEBS Metodología- Paso 1: Limpieza de datos- Paso 2: Transformar direcciones a coordenadas de latidud y longitud- Paso 3: Normalización y escalado de datos- Paso 4: Transformación de características estudiantiles basada en métricas Configuración Inicial ###Code %matplotlib notebook %matplotlib inline import numpy as np import pandas as pd import seaborn as sb from googlemaps import Client import pylab import pingouin as pg import math from math import sin, cos, sqrt, atan2, radians, asin, pi import matplotlib.pyplot as plt from matplotlib import cm from sklearn.feature_selection import SelectKBest,chi2, mutual_info_classif, f_classif from sklearn.cluster import KMeans from sklearn.metrics import pairwise_distances_argmin_min from sklearn.model_selection import train_test_split from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler, MinMaxScaler, LabelEncoder import scipy.stats as stats from scipy.stats import shapiro, ks_2samp, ks_1samp, kstest, chi2_contingency, norm dataset_ini = pd.read_csv('/Users/Downloads/Dataset/Final_Dataset/Data_original_IEBS.csv') #dataset_ini.info() ###Output _____no_output_____ ###Markdown Paso 1: Limpieza de datos- Se eliminan los registros vacíos e inconsistentes.- Al final también se deben eliminar los datos atípico (ruido en los datos). ###Code missing_data = dataset_ini.isnull() for column in missing_data.columns.values.tolist(): print(column) print(missing_data[column].value_counts()) print('') dataset_ini.replace('?', np.nan, inplace = True) #dataset_ini.dropna(subset=['age'], axis = 0, inplace= True) dataset_ini.reset_index(drop = True, inplace = True) #dataset_ini['gender'].replace(np.nan, 'Top string', inplace = True) #average_age = dataset_ini['age'].astype(float).mean(axis = 0) #dataset_ini['course'].replace(np.nan, average_column_name, inplace = True) ###Output _____no_output_____ ###Markdown Paso 2: Transformar direcciones a coordenadas de latidud y longitud- Se utiliza una clave para utilizar la API de google (tiene un costo) y georreferenciar las direcciones a partir de las características dirección de residencia y ciudad de residencia. Luego, se devuelve el valor en coordenadas de latitud y longitud. Es importante tener en cuenta que se debe especificar la ciudad junto con la dirección. Ejemplo: CL 104 81 -13, medellín colombia. ###Code # Convertir direcciones con la API de google #KEY = 'AIzaSyCAHqWE9DbBOGsOjVxIojZMrYt_ZuxNoD' # Key para usar API de google (key de prueba) gmaps = Client(key=KEY) df_address = dataset_ini def get_coordinates(address): #city = 'medellin, Colombia' #geocode_result = gmaps.geocode(str(address) +' '+ city) geocode_result = gmaps.geocode(str(address)) if len(geocode_result) > 0: return list(geocode_result[0]['geometry']['location'].values()) else: return [np.NaN, np.NaN] coordinates = df_address['address'].apply(lambda x: pd.Series(get_coordinates(x), index=['LATITUDE', 'LONGITUDE'])) df_address = pd.concat([df_address[:], coordinates[:]], axis="columns") df_address ###Output _____no_output_____ ###Markdown Paso 3: Normalización y escalado de datos- Se utiliza el escalado estandar de los datos, este tipo de escalado de los datos, transforma cada valor en un rango alrededor de la media 0 y la desviación estándar 1, es decir, cada valor será escalado restando la media y dividiendo por la desviación estándar.\begin{equation}\label{equ:standard}\begin{split}\hspace{6cm} z& = \frac{x - u}{s}\end{split}\end{equation} ###Code scaler= StandardScaler() dataset_ini_ = dataset_ini.drop(['Class'], axis=1) # quito la variable dependiente "Y" scaler.fit(dataset_ini_) # calculo la media para poder hacer la transformacion dataset_ini_scaled=scaler.transform(dataset_ini_)# Ahora si, escalo los datos ###Output _____no_output_____ ###Markdown Paso 4: Transformación de características estudiantiles basada en métricas (característica demográficas)- Aquí se utilizan las características de latitud y longitud de ubicación de la residencia del estudiante para calcular la distancia que existe entre ese punto y la institución educativa.Fórmula de Haversine: \begin{equation} d=2r\,sen^{-1}\Biggl(\sqrt{sen^2\biggl(\frac{lat2-lat1}{2}\biggr)+cos(lat1)\,cos(lat2)\,sen^2\biggl(\frac{lon2-lon1}{2}\biggr)}\Biggr) \end{equation}- Se calcula la edad del estudiante con respecto a una fecha dada (fracción)- Se calcula la extradad:\begin{equation}Edad_{teorica}=Grado_{ingreso} + 5\end{equation}\begin{equation}Riesgo_{1}= Edad_{ingreso} - Edad_{teorica}\end{equation}- Se calcula la repitencia de grados\begin{equation}Riesgo_{2}= (Year_{actual} - Year_{in} + Grado_{in}) - Grado_{2019}\end{equation}- Se transforma los valores de la característica Puntaje sisbén: Nivel 1: 0 - 11, Nivel 2: 12 - 22, Nivel 3: 23 - 43, Nivel 4: 44 - 65, Nivel 5: 66 - 79, Nivel 6: 80 - 100\begin{equation}Riesgo_{3}= Nivel Sisben III\end{equation}- se binariza los valores de la característica de hermanos en el colegio: si tiene hermanos = 1 sino = 0\begin{equation}Riesgo_{4}= binarizacion\end{equation} ###Code import datetime as dt from datetime import datetime, date, time, timedelta #import calendar fech1_str = '8/11/2008' #fech1 = dt.datetime(2010, 08, 01) fech1 = datetime.strptime(fech1_str, '%d/%m/%Y').date() fech2 = datetime.now().date()# criterio de medición (año) #edad = round((fech2-fech1)/dt.timedelta(365,0,0,0),2) #print(edad) edad_ = round((fech2-fech1)/dt.timedelta(365,5,49,12),2) #promedio de años comunes y bisiestos #print(edad_) df_address = pd.read_csv('/Users/Downloads/Dataset/AcademicDATA/1/20201121/address/output_address_3017.csv') #df_address = pd.read_csv('/Users//Downloads/Dataset/Final_dataset/datasets/Data_1449_416.csv') # convert decimal degrees to radians df_address['lat1'] = df_address['latitude'].apply(math.radians) df_address['lon1'] = df_address['longitude'].apply(math.radians) lon2 = math.radians(-75.57433) lat2 = math.radians(6.307670) #lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) # haversine formula df_address['dlon'] = lon2 - df_address['lon1'] df_address['dlat'] = lat2 - df_address['lat1'] R = 6371 #radio de la tierra d_1 = (np.sin((df_address['dlat'])/2))**2 + np.cos(df_address['lat1'])* np.cos(lat2) * (np.sin((df_address['dlon'])/2))**2 df_address['distance'] = 2 * R * np.arcsin(np.sqrt(d_1)) #df_address # visualizar datos atípicos con respecto a la ubicación geográfica para luego eliminarlos lat = np.array(df_address['latitude']) lon = np.array(df_address['longitude']) media = lat.mean() std_x = lat.std()*2 media_y =lon.mean() std_y = lon.std()*2 print(media) print(std_x) print(media_y) print(std_y) colors = ['blue']*len(lat) for index, x in enumerate(lat): if abs(x-media) > std_x: colors[index] = 'red' for index, x in enumerate(lon): if abs(x-media_y) > std_y: colors[index] = 'red' plt.figure() plt.scatter(lat, lon, s=2, color=colors) plt.axhline(media_y, color='k', linestyle='--') plt.axvline(media, color='k', linestyle='--') v=media #x-position of the center u=media_y #y-position of the center b=std_x #radius on the x-axis a=std_y #radius on the y-axis t = np.linspace(0, 2*pi, 100) plt.plot(v+b*np.cos(t), u+a*np.sin(t)) plt.xlabel('Latitude') plt.ylabel('Longitude') plt.show() ###Output _____no_output_____
AV- ML Hikeathon Predicting Links - Modelling.ipynb
###Markdown ![title](hike.png) ML Hikeathon Problem Statement**Link Prediction on Hike’s Social Network**Social networks are highly dynamic; they grow and change quickly over time through the addition of new edges, signifying the appearance of new interactions in the underlying social structure. The fundamental computational problem underlying social-network evolution is the Link Prediction problem: Hike is a social platform and predicting links in their network forms the basis for recommending new friends to our users with whom they can possibly start a chat. High-quality recommendations help in strengthening the network by aiding the creation of new social connections between existing users. It also helps in the retention of new users by helping them find friends as they join the platform.Can you develop an algorithm to predict whether a Hike user will chat another Hike user who is part of his/her phone contact book? ![title](pic.png) Data DescriptionThe data for this competition is a subset of the Hike’s social graph and the anonymised features of users. Explicitly, the training data is of the following form:train.zip contains 2 files namely **train.csv, user_features.csv****train.csv****node1_id, node2_id, is_chat**Where node1_id and node2_id are anonymised identifiers for users who are in each other’s phone address book. is_chat signifies their chat relationship. is_chat is 1, if the first user sends a chat message with the second user, and 0 otherwise. **user_features.csv****node_id, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13**This file contains some anonymised features for all nodes/users. Here node_id (corresponding to node1_id and node2_id in train/test files) represents the user for whom we have features from f1 to f13Mostly these features convey information around how active the users are in the app for the given time period - different slices of user engagement metrics. f13 is a categorical feature, f1-f12 are ordinal features each representing no. of days a user did some specific activity on the app in the last 31 days.**test.csv (contained in test.zip)**Build a model that can learn to predict probability of a node-pair in the test set to have a chat relation. The test set contains an id and a pairs of nodes**id, node1_id, node2_id**for which participants are required to predict is_chat on the test set. (Note that id is just here to identify a unique row in test set and is used in the submission format section) Submission FormatSubmission file must be in a zipped format containing only one csv corresponding to the submission. The format to be followed is: |id|is_chat||---|---||1|0.25||2|0.26||3|0.69||4|0.27||5|0.22| Evaluation MetricThe submitted output will be evaluated by the **AUC-ROC score** Modelling ###Code # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load in import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory import os print(os.listdir("../input/test")) # Any results you write to the current directory are saved as output. user=pd.read_csv('../input/train/user_features.csv') train=pd.read_csv('../input/train/train.csv') train.head() train['is_chat'].value_counts() from sklearn.utils import shuffle dftrain=shuffle(train[train['is_chat']==1].append(train[train['is_chat']==0].sample(13003568,random_state=1994))) dftrain.shape del train import gc gc.collect() dftrain.head() gc.collect() df = pd.merge(pd.merge(dftrain, user.rename(columns={'node_id':'node1_id'}), how='left',on='node1_id'), user.rename(columns={'node_id':'node2_id'}), how='left',on='node2_id') df.head() del user gc.collect() df.head() from lightgbm import LGBMClassifier from sklearn.metrics import accuracy_score,confusion_matrix,roc_auc_score m=LGBMClassifier(n_estimators=3000,random_state=1994) m.fit(df.drop(['is_chat'],axis=1),df['is_chat'],eval_metric=['auc'],verbose=100,categorical_feature=[0,1]) print('done') import pickle pickle.dump(m, open('lgbm_model5.sav', 'wb')) ###Output _____no_output_____
lab2_group_9.ipynb
###Markdown Lab2John Carbo - jmc1367Singwa Cheng - sc5553Ghazal Rezaei - gr2312Aaron Thomas - at5201 ###Code Group = "9" Course = "ISSEM" Lab = "Lab2" print("Group:"+Group, Lab, "Course:"+Course) ###Output Group:9 Lab2 Course:ISSEM ###Markdown Question 1: Please explain the following terms: Cryptography - Cryptography is the practice and study of techniques for secure communications in the presence of adversaries (ref: https://en.wikipedia.org/wiki/Cryptography). Cryptography allows two parties to communicate with each other in secret even if a third party is listening. It is used to protect the confidentiality and integrity of the message. The authorized parties are the only ones that can interpret the message (confidentiality) and know if it has been tampered with (integrity). Encryption - Encryption is the process that takes a message (data - physical or digital) and converts (encodes) it into an unintelligible form. Encryption makes it possible to send a secret message in plain view in a way that only intended recipients can make sense of it. Plain Text - Plain text is data or information that has not been encrypted. Data in this form can be read and understood by anyone, and it offers no protection if captured by an adversary. Cipher Text - When plain text is run through or encoded by an encryption algorithm, it becomes cipher text. In this form, the data is not human or computer readable unless the proper cipher or key is known. Decryption - Decryption is the process of reverting cipher text back into plain text. To do this, the original encryption algorithm or key must be known. Double Strength Encryption - This is the process of encrypting an already encrypted message using the same or different encryption algorithm. Hybrid Encryption - Hybrid encryption uses different ciphers or encryption algorithmns to gain the benefits of different encryption schemes. For example, a message can be encrypted with a symmetric key and then then symmetric key is encrypted with an asymmetric key. This gives the efficiency of symmetric encryption and the convenience of asymmetric encryption. In this example, both parties can encrypt and decrypt the message without having to coordinate the exchange of the symmetric key. ###Code ###Output _____no_output_____ ###Markdown Question 2: Implement the above shift cipher in Python. You are expected to write two functions: 2a - Encryption ###Code ###Output _____no_output_____ ###Markdown 2b - Decryption ###Code ###Output _____no_output_____
docs/lectures/lecture33/notebook/Lec33_solutions.ipynb
###Markdown CS-109A Introduction to Data Science Lecture 33 Notebook: Training a FFN **Harvard University****Fall 2020****Instructors:** Pavlos Protopapas, Kevin Rader, Chris Tanner**Authors:** Eleni Kaxiras, David Sondak, and Pavlos Protopapas. ###Code ## RUN THIS CELL TO PROPERLY HIGHLIGHT CELLS import requests from IPython.core.display import HTML styles = requests.get("https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/cs109.css").text HTML(styles) import numpy as np import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np import pandas as pd from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img from sklearn.preprocessing import StandardScaler %matplotlib inline from PIL import Image, ImageOps from __future__ import absolute_import, division, print_function, unicode_literals # TensorFlow and tf.keras import tensorflow as tf tf.keras.backend.clear_session() # For easy reset of notebook state. print(tf.__version__) # You should see a 2.0.0 here! # set the seed for reproducability seed = 7 np.random.seed(seed) ###Output _____no_output_____ ###Markdown Tensorflow 2.0: All references to Keras should be written as `tf.keras`. For example: ```model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10, activation='softmax')])model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) tf.keras.models.Sequentialtf.keras.layers.Dense, tf.keras.layers.Activation, tf.keras.layers.Dropout, tf.keras.layers.Flatten, tf.keras.layers.Reshapetf.keras.optimizers.SGDtf.keras.preprocessing.image.ImageDataGeneratortf.keras.regularizerstf.keras.datasets.mnist ```You could avoid the long names by using```from tensorflow import kerasfrom tensorflow.keras import layers```These imports do not work on some systems, however, because they pick up previous versions of `keras` and `tensorflow`. TensorsWe can think of tensors as multidimensional arrays of real numerical values; their job is to generalize matrices to multiple dimensions. - **scalar** = just a number = rank 0 tensor ($a$ ∈ $F$,) - **vector** = 1D array = rank 1 tensor ( $x = (\;x_1,...,x_i\;)⊤$ ∈ $F^n$ ) - **matrix** = 2D array = rank 2 tensor ( $\textbf{X} = [a_{ij}] ∈ F^{m×n}$ ) - **3D array** = rank 3 tensor ( $\mathscr{X} =[t_{i,j,k}]∈F^{m×n×l}$ ) First you build the network- **The input layer**: our dataset.- **The internal architecture or hidden layers** the number of layers, the activation functions, the learnable parameters and other hyperparameters)- **The output layer**: what we want from the network, a probability for belonging in a class (classification) or a number (regression).1. Load and pre-process the data2. Define the layers of the model.3. Compile the model. ... and then you train it!4. Fit the model to the train set (also using a validation set). Save the model.5. Evaluate the model on the test set.6. We learn a lot by studying history: metric traceplots. 7. Regularize the model.8. Now let's use the Network to predict on the test set.9. Try our model on a sandal from the Kanye West collection! 1. Load the data Fashion MNIST **Fashion-MNIST** is a dataset of clothing article images (created by [Zalando](https://github.com/zalandoresearch/fashion-mnist)), consisting of a training set of 60,000 examples and a test set of 10,000 examples. Each example is a **28 x 28** grayscale image, associated with a label from **10 classes**. The creators intend Fashion-MNIST to serve as a direct drop-in replacement for the original MNIST dataset for benchmarking machine learning algorithms. It shares the same image size and structure of training and testing splits. Each pixel is 8 bits so its value ranges from 0 to 255. These images do not have a `channel` dimension because they are B&W.Let's load and look at it! ###Code # get the data from keras fashion_mnist = tf.keras.datasets.fashion_mnist # load the data splitted in train and test! (x_train, y_train),(x_test, y_test) = fashion_mnist.load_data() print(x_train.shape, y_train.shape, '\n\n', x_train[56][:2], '\n\n', set(y_train)) # checking the min and max of x_train and x_test print(x_train.min(), x_train.max(), x_test.min(), x_test.max()) # normalize the data by dividing with pixel intensity # (each pixel is 8 bits so its value ranges from 0 to 255) x_train, x_test = x_train / 255.0, x_test / 255.0 print(x_train.min(), x_train.max(), x_test.min(), x_test.max()) # inspect a single image array print(x_train[45].shape) print(x_train[45][:2][:2]) # Give names to classes for clarity class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] # plot plt.figure(figsize=(10,10)) for i in range(25): plt.subplot(5,5,i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(x_train[i], cmap=plt.cm.binary) plt.xlabel(class_names[y_train[i]]) plt.show() ###Output _____no_output_____ ###Markdown 2. Define the layers of the model. ###Code model = tf.keras.Sequential() model.add(tf.keras.layers.Flatten(input_shape=(28, 28))) model.add(tf.keras.layers.Dense(154, activation='relu')) model.add(tf.keras.layers.Dense(154, activation='relu')) model.add(tf.keras.layers.Dense(154, activation='relu')) model.add(tf.keras.layers.Dense(64, activation='relu')) model.add(tf.keras.layers.Dense(10, activation='softmax')) ###Output _____no_output_____ ###Markdown 3. Compile the model ###Code loss_fn = tf.keras.losses.SparseCategoricalCrossentropy() optimizer = tf.keras.optimizers.Adam() model.compile(optimizer=optimizer, loss=loss_fn, metrics=['accuracy']) model.summary() tf.keras.utils.plot_model( model, #to_file='model.png', # if you want to save the image show_shapes=True, # True to see more details show_layer_names=True, rankdir='TB', expand_nested=True, dpi=150 ) ###Output _____no_output_____ ###Markdown [Everything you wanted to know about a Keras Model and were afraid to ask](https://www.tensorflow.org/api_docs/python/tf/keras/Model) 4. Fit the model to the train set (also using a validation set)This is the part that takes the longest.-----------------------------------------------------------**ep·och** noun: epoch; plural noun: epochs. A period of time in history or a person's life, typically one marked by notable events or particular characteristics. Examples: "the Victorian epoch", "my Neural Netwok's epochs". ----------------------------------------------------------- ###Code %%time # Fit the model <--- always a good idea to time it! history = model.fit(x_train, y_train, validation_split=0.33, epochs=50, verbose=2) ###Output Train on 40199 samples, validate on 19801 samples Epoch 1/50 40199/40199 - 10s - loss: 0.5395 - accuracy: 0.8052 - val_loss: 0.4072 - val_accuracy: 0.8530 Epoch 2/50 40199/40199 - 10s - loss: 0.3894 - accuracy: 0.8593 - val_loss: 0.3931 - val_accuracy: 0.8596 Epoch 3/50 40199/40199 - 11s - loss: 0.3483 - accuracy: 0.8709 - val_loss: 0.3556 - val_accuracy: 0.8706 Epoch 4/50 40199/40199 - 11s - loss: 0.3260 - accuracy: 0.8798 - val_loss: 0.3400 - val_accuracy: 0.8767 Epoch 5/50 40199/40199 - 10s - loss: 0.3089 - accuracy: 0.8853 - val_loss: 0.3654 - val_accuracy: 0.8718 Epoch 6/50 40199/40199 - 10s - loss: 0.2904 - accuracy: 0.8919 - val_loss: 0.3781 - val_accuracy: 0.8647 Epoch 7/50 40199/40199 - 11s - loss: 0.2792 - accuracy: 0.8946 - val_loss: 0.3177 - val_accuracy: 0.8836 Epoch 8/50 40199/40199 - 10s - loss: 0.2676 - accuracy: 0.8994 - val_loss: 0.3233 - val_accuracy: 0.8830 Epoch 9/50 40199/40199 - 10s - loss: 0.2600 - accuracy: 0.9024 - val_loss: 0.3434 - val_accuracy: 0.8786 Epoch 10/50 40199/40199 - 10s - loss: 0.2476 - accuracy: 0.9068 - val_loss: 0.3325 - val_accuracy: 0.8841 Epoch 11/50 40199/40199 - 11s - loss: 0.2408 - accuracy: 0.9089 - val_loss: 0.3075 - val_accuracy: 0.8908 Epoch 12/50 40199/40199 - 10s - loss: 0.2324 - accuracy: 0.9116 - val_loss: 0.3568 - val_accuracy: 0.8737 Epoch 13/50 40199/40199 - 10s - loss: 0.2233 - accuracy: 0.9149 - val_loss: 0.3407 - val_accuracy: 0.8841 Epoch 14/50 40199/40199 - 10s - loss: 0.2166 - accuracy: 0.9179 - val_loss: 0.3270 - val_accuracy: 0.8909 Epoch 15/50 40199/40199 - 10s - loss: 0.2141 - accuracy: 0.9173 - val_loss: 0.3320 - val_accuracy: 0.8845 Epoch 16/50 40199/40199 - 10s - loss: 0.2038 - accuracy: 0.9213 - val_loss: 0.3584 - val_accuracy: 0.8883 Epoch 17/50 40199/40199 - 10s - loss: 0.1957 - accuracy: 0.9248 - val_loss: 0.4190 - val_accuracy: 0.8666 Epoch 18/50 40199/40199 - 10s - loss: 0.1914 - accuracy: 0.9263 - val_loss: 0.3391 - val_accuracy: 0.8911 Epoch 19/50 40199/40199 - 10s - loss: 0.1842 - accuracy: 0.9286 - val_loss: 0.3530 - val_accuracy: 0.8879 Epoch 20/50 40199/40199 - 10s - loss: 0.1834 - accuracy: 0.9299 - val_loss: 0.3606 - val_accuracy: 0.8950 Epoch 21/50 40199/40199 - 10s - loss: 0.1755 - accuracy: 0.9336 - val_loss: 0.3612 - val_accuracy: 0.8878 Epoch 22/50 40199/40199 - 11s - loss: 0.1706 - accuracy: 0.9349 - val_loss: 0.3670 - val_accuracy: 0.8924 Epoch 23/50 40199/40199 - 10s - loss: 0.1674 - accuracy: 0.9359 - val_loss: 0.3781 - val_accuracy: 0.8909 Epoch 24/50 40199/40199 - 10s - loss: 0.1599 - accuracy: 0.9382 - val_loss: 0.3840 - val_accuracy: 0.8913 Epoch 25/50 40199/40199 - 11s - loss: 0.1620 - accuracy: 0.9374 - val_loss: 0.4030 - val_accuracy: 0.8837 Epoch 26/50 40199/40199 - 11s - loss: 0.1515 - accuracy: 0.9406 - val_loss: 0.4329 - val_accuracy: 0.8872 Epoch 27/50 40199/40199 - 11s - loss: 0.1509 - accuracy: 0.9429 - val_loss: 0.3789 - val_accuracy: 0.8935 Epoch 28/50 40199/40199 - 10s - loss: 0.1492 - accuracy: 0.9428 - val_loss: 0.4366 - val_accuracy: 0.8852 Epoch 29/50 40199/40199 - 10s - loss: 0.1427 - accuracy: 0.9463 - val_loss: 0.4263 - val_accuracy: 0.8892 Epoch 30/50 40199/40199 - 10s - loss: 0.1435 - accuracy: 0.9443 - val_loss: 0.4443 - val_accuracy: 0.8922 Epoch 31/50 40199/40199 - 10s - loss: 0.1361 - accuracy: 0.9483 - val_loss: 0.3965 - val_accuracy: 0.8922 Epoch 32/50 40199/40199 - 10s - loss: 0.1351 - accuracy: 0.9490 - val_loss: 0.4376 - val_accuracy: 0.8932 Epoch 33/50 40199/40199 - 11s - loss: 0.1283 - accuracy: 0.9510 - val_loss: 0.5149 - val_accuracy: 0.8864 Epoch 34/50 40199/40199 - 10s - loss: 0.1303 - accuracy: 0.9502 - val_loss: 0.4671 - val_accuracy: 0.8952 Epoch 35/50 40199/40199 - 10s - loss: 0.1245 - accuracy: 0.9521 - val_loss: 0.4765 - val_accuracy: 0.8871 Epoch 36/50 40199/40199 - 10s - loss: 0.1272 - accuracy: 0.9528 - val_loss: 0.4459 - val_accuracy: 0.8937 Epoch 37/50 40199/40199 - 10s - loss: 0.1209 - accuracy: 0.9532 - val_loss: 0.4638 - val_accuracy: 0.8910 Epoch 38/50 40199/40199 - 10s - loss: 0.1206 - accuracy: 0.9535 - val_loss: 0.4426 - val_accuracy: 0.8882 Epoch 39/50 40199/40199 - 10s - loss: 0.1128 - accuracy: 0.9560 - val_loss: 0.5459 - val_accuracy: 0.8908 Epoch 40/50 40199/40199 - 10s - loss: 0.1190 - accuracy: 0.9559 - val_loss: 0.4887 - val_accuracy: 0.8910 Epoch 41/50 40199/40199 - 10s - loss: 0.1108 - accuracy: 0.9586 - val_loss: 0.4818 - val_accuracy: 0.8899 Epoch 42/50 40199/40199 - 10s - loss: 0.1078 - accuracy: 0.9592 - val_loss: 0.5823 - val_accuracy: 0.8910 Epoch 43/50 40199/40199 - 10s - loss: 0.1119 - accuracy: 0.9576 - val_loss: 0.5467 - val_accuracy: 0.8899 Epoch 44/50 40199/40199 - 10s - loss: 0.1038 - accuracy: 0.9593 - val_loss: 0.5568 - val_accuracy: 0.8920 Epoch 45/50 40199/40199 - 10s - loss: 0.1008 - accuracy: 0.9604 - val_loss: 0.5974 - val_accuracy: 0.8837 Epoch 46/50 40199/40199 - 10s - loss: 0.1053 - accuracy: 0.9610 - val_loss: 0.5819 - val_accuracy: 0.8915 Epoch 47/50 40199/40199 - 10s - loss: 0.0977 - accuracy: 0.9632 - val_loss: 0.5327 - val_accuracy: 0.8895 Epoch 48/50 40199/40199 - 10s - loss: 0.1019 - accuracy: 0.9621 - val_loss: 0.5178 - val_accuracy: 0.8938 Epoch 49/50 40199/40199 - 10s - loss: 0.0966 - accuracy: 0.9641 - val_loss: 0.5877 - val_accuracy: 0.8888 Epoch 50/50 40199/40199 - 10s - loss: 0.0923 - accuracy: 0.9636 - val_loss: 0.6040 - val_accuracy: 0.8901 CPU times: user 28min 22s, sys: 28min 31s, total: 56min 54s Wall time: 8min 24s ###Markdown Save the modelYou can save the model so you do not have `.fit` everytime you reset the kernel in the notebook. Network training is expensive!For more details on this see [https://www.tensorflow.org/guide/keras/save_and_serialize](https://www.tensorflow.org/guide/keras/save_and_serialize) ###Code # save the model so you do not have to run the code everytime model.save('fashion_model.h5') # Recreate the exact same model purely from the file #model = tf.keras.models.load_model('fashion_model.h5') ###Output _____no_output_____ ###Markdown 5. Evaluate the model on the test set. ###Code test_loss, test_accuracy = model.evaluate(x_test, y_test, verbose=0) print(f'Test accuracy={test_accuracy}') ###Output Test accuracy=0.8830999732017517 ###Markdown 6. We learn a lot by studying history: metric traceplots. You can learn a lot about neural networks by observing how they perform while training. The networks's performance is stored in a variable named `history` which can be plotted. ###Code print(history.history.keys()) # plot accuracy and loss for the test set fig, ax = plt.subplots(1,2, figsize=(20,6)) ax[0].plot(history.history['accuracy']) ax[0].plot(history.history['val_accuracy']) ax[0].set_title('Model accuracy') ax[0].set_ylabel('accuracy') ax[0].set_xlabel('epoch') ax[0].legend(['train', 'val'], loc='best') ax[1].plot(history.history['loss']) ax[1].plot(history.history['val_loss']) ax[1].set_title('Model loss') ax[1].set_ylabel('loss') ax[1].set_xlabel('epoch') ax[1].legend(['train', 'val'], loc='best') ###Output _____no_output_____ ###Markdown What do you observe in these traceplots? 7. RegularizationLet's try adding a regularizer in our model. For more see `tf.keras` [regularizers](https://www.tensorflow.org/api_docs/python/tf/keras/regularizers).1. Norm penalties: `kernel_regularizer= tf.keras.regularizers.l2(l=0.1)`2. Early stopping via `tf.keras.callbacks`. Callbacks provide a way to interact with the model while it's training and inforce some decisions automatically. Callbacks need to be instantiated and are added to the `.fit()` function via the `callbacks` argument.3. Dropout ###Code model_regular = tf.keras.models.Sequential() model_regular.add(tf.keras.layers.Flatten(input_shape=(28, 28))) model_regular.add(tf.keras.layers.Dense(154, activation='relu')) model_regular.add(tf.keras.layers.Dense(64, activation='relu')) model_regular.add(tf.keras.layers.Dropout(0.2)) model_regular.add(tf.keras.layers.Dense(10, activation='softmax')) # alternative ways of regularizing # model_regular = tf.keras.Sequential() # model_regular.add(tf.keras.layers.Flatten(input_shape=(28, 28))) # model_regular.add(tf.keras.layers.Dense(154, activation='relu', # kernel_regularizer= tf.keras.regularizers.l2(l=0.001))) # model_regular.add(tf.keras.layers.Dense(64, activation='relu', # kernel_regularizer= tf.keras.regularizers.l2(l=0.001))) # model_regular.add(tf.keras.layers.Dropout(0.2)) # model_regular.add(tf.keras.layers.Dense(10, activation='softmax')) # callbacks: watch validation loss and be "patient" for 10 epochs of no improvement es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', verbose=1, patience=20) # compile loss_fn = tf.keras.losses.SparseCategoricalCrossentropy() optimizer = tf.keras.optimizers.Adam() model_regular.compile(optimizer=optimizer, loss=loss_fn, metrics=['accuracy']) %%time # fit history_regular = model_regular.fit(x_train, y_train, validation_split=0.2, epochs=50, verbose=1, callbacks=[es]) test_loss_regular, test_accuracy_regular = model_regular.evaluate(x_test, y_test, verbose=0) print(f'Test accuracy:\nBaseline model={test_accuracy:.4f}\nRegularized model={test_accuracy_regular:.4f}') # plot accuracy and loss for the test set fig, ax = plt.subplots(1,2, figsize=(20,6)) ax[0].plot(history_regular.history['accuracy']) ax[0].plot(history_regular.history['val_accuracy']) ax[0].set_title('Model accuracy') ax[0].set_ylabel('accuracy') ax[0].set_xlabel('epoch') ax[0].legend(['train', 'val'], loc='best') ax[1].plot(history_regular.history['loss']) ax[1].plot(history_regular.history['val_loss']) ax[1].set_title('Model loss') ax[1].set_ylabel('loss') ax[1].set_xlabel('epoch') ax[1].legend(['train', 'val'], loc='best') ###Output _____no_output_____ ###Markdown ConclusionWe notice that Dropout helped our first model achive a 0.88 accuracy. In our second model which also used L2 regularization, we get a lower accuracy. There is no simple recipe for regularizing neural nets. They are all different. Different are also the tasks that each is called to solve. 7. Now let's predict in the test set ###Code predictions = model_regular.predict(x_test) # choose a specific item to predict item = 6000 predictions[item] np.argmax(predictions[item]), class_names[np.argmax(predictions[item])] ###Output _____no_output_____ ###Markdown Let's see if our network predicted right! Is the first item what was predicted? ###Code plt.figure() plt.imshow(x_test[item], cmap=plt.cm.binary) plt.xlabel(class_names[y_test[item]]) plt.colorbar() # code source: https://www.tensorflow.org/tutorials/keras/classification def plot_image(i, predictions_array, true_label, img): predictions_array, true_label, img = predictions_array, true_label[i], img[i] plt.grid(False) plt.xticks([]) plt.yticks([]) plt.imshow(img, cmap=plt.cm.binary) predicted_label = np.argmax(predictions_array) if predicted_label == true_label: color = 'blue' else: color = 'red' plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label], 100*np.max(predictions_array), class_names[true_label]), color=color) def plot_value_array(i, predictions_array, true_label): predictions_array, true_label = predictions_array, true_label[i] plt.grid(False) plt.xticks(range(10)) plt.yticks([]) thisplot = plt.bar(range(10), predictions_array, color="#777777") plt.ylim([0, 1]) predicted_label = np.argmax(predictions_array) thisplot[predicted_label].set_color('red') thisplot[true_label].set_color('blue') i = item plt.figure(figsize=(6,3)) plt.subplot(1,2,1) plot_image(i, predictions[i], y_test, x_test) plt.subplot(1,2,2) plot_value_array(i, predictions[i], y_test) plt.show() ###Output _____no_output_____ ###Markdown 8. Try our model on a sandal from the Kanye West collection!The true usefullness of a NN is to be able to classigy unseen data and not only on the test set. Let's see if our network can generalize beyond the MNIST fashion dataset. Let's give it a trendy shoe and see what it predicts. This image is not part of the test set, it was downloaded from the internet. In class discussion : What kinds of images can our model predict?Bias measures how much the network's output, averaged over all possible data sets differs from the true function. Variance measures how much the network output varies between datasets. ###Code # Let'see the tensor shape shoe = Image.open('kanye_shoe.jpg') imgplot = plt.imshow(shoe) ###Output _____no_output_____ ###Markdown Pre-processing ###Code # Resize image to 28x28 shoe = shoe.resize((28, 28)) imgplot = plt.imshow(shoe) shoe = ImageOps.mirror(shoe) imgplot = plt.imshow(shoe) # Delete the other 2 channels to make image B&W. shoe_data = np.array(shoe) # cast into munpy array shoe_data = shoe_data[:,:,0] print(shoe_data.shape) imgplot = plt.imshow(shoe_data, cmap=plt.cm.binary) ###Output (28, 28) ###Markdown `tf.keras` models are optimized to make predictions on a batch, or collection, of examples at once. Accordingly, even though you're using a single image, you need to add it to a list: ###Code # Add the image to a batch where it's the only member. shoe_batch = (np.expand_dims(shoe_data,0)) print(shoe_batch.shape) predictions_single = model_regular.predict(shoe_batch) print(predictions_single[0]) print(np.argmax(predictions_single[0]), class_names[np.argmax(predictions_single[0])]) shoe_data = np.ones(shoe_data.shape) * 255 - shoe_data plt.figure() plt.imshow(shoe_data, cmap=plt.cm.binary) plt.xlabel('a cool shoe') plt.colorbar() # Add the image to a batch where it's the only member. shoe_batch = (np.expand_dims(shoe_data,0)) print(shoe_batch.shape) predictions_single = model_regular.predict(shoe_batch) print(predictions_single[0]) print(np.argmax(predictions_single[0]), class_names[np.argmax(predictions_single[0])]) ###Output [0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 1.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 9.767409e-18] 5 Sandal ###Markdown Data augementationData augmentation generates more training data by applying a series of random transformations that yield belivable images. Be careful of transformations that result in unlikely images. ###Code datagen = ImageDataGenerator( rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest') shoe_augm = (np.expand_dims(shoe_data,0)) shoe_augm = (np.expand_dims(shoe_augm, 3)) print(shoe_augm.shape) img = shoe_augm img.shape from keras.preprocessing import image # the .flow() command below generates batches of randomly transformed images # and saves the results to the `preview/` directory (this directory must exist) i = 0 for batch in datagen.flow(shoe_augm, batch_size=1, save_to_dir='preview', save_prefix='shoe', save_format='jpeg'): plt.figure(i) imgplot = plt.imshow(image.array_to_img(batch[0]), cmap=plt.cm.binary) i += 1 if i % 6 == 0: break # otherwise the generator would loop indefinitely plt.show() ###Output _____no_output_____
data-science/projetos/Projeto1-Analisando-os-dados-do-Airbnb/Analisando_os_Dados_do_Airbnb_(New_York_City)[Projeto].ipynb
###Markdown **Projeto do módulo 1 | Curso de Data Science na Prática** *by João Victor Loureiro* Analise de dados do Airbnb | New York City A maior rede hoteleira do mundo que não possui nenhum quarto de hotel O [Airbnb](https://www.airbnb.com.br/) é um serviço muito procurado por turistas, viajantes e profissionais que buscam por acomodações ao redor do mundo, seu grande diferencial está nos custos mais baixos e na facilidade de alugar um imóvel sem muita burocracia. Pessoas do mundo inteiro podem oferecer suas casas para usuários que buscam estadia, é possível oferecer desde apenas um quarto ou até mesmo a casa completa. A plataforma criada em 2008 já se tornou uma das principais no ramo de hospedagens do mundo (mesmo sem possuir nenhum quarto de hotel próprio), e trás um serviço sem muita complexidade, o Airbnb funciona como qualquer outra rede de compartilhamento e oferta de serviços, é necessário apenas criar um perfil na rede e decidir se deseja buscar alguma acomodação ou então, se deseja disponibilizar a sua. Hoje o serviço está presente em mais de 30 mil cidades e 192 países, e pode ser a solução ideal para quem busca uma acomodação. Nesta análise, serão usados os dados do Airbnb referentes a cidade de Nova York, **a maior cidade dos Estados Unidos** e um relevante local de interesse para turistas e profissonais à negócios. Nova York, "*a cidade que nunca dorme*" Com um horizonte conhecido na televisão e nos cinemas de todas as partes do mundo, a cidade de Nova York oferece atrações de todos os tipos. Localizada na costa atlântica nordeste dos Estados Unidos, com cerca de 8,4 milhões de habitantes, a cidade é composta por cinco distritos: Bronx, Brooklyn, Manhattan, Queens e Staten Island. A cidade é popularmente referida como New York City, para diferenciá-la do estado de Nova York, do qual faz parte. Também é conhecida como “a cidade que nunca dorme”, em referência a sua rede de transportes, cuja maioria funciona 24h por dia. Assim como outras cidades famosas, Nova York também possui seus cartões-postais e monumentos. Na **Times Square** (figura 1) os grandes painéis fazem a noite parecer dia. Na região de Manhattan é possível encontrar centenas de arranha-céus, incluindo o **Empire State Building** (figura 2) e as extintas torres gêmeas do World Trade Center. Figura 1 - Times Square, localizada na região central de Manhattan. Imagem de Bruce Emmerling por Pixabay Figura 2 - Empire State Building, um arranha-céu de 102 andares. Imagem de Free-Photos por Pixabay Além de ser a cidade mais populosa dos Estados Unidos, Nova York está entre as áreas urbanas mais populosas do mundo, e exerce uma poderosa influência sobre o comércio mundial. Também sendo localidade sede da Organização das Nações Unidas. Um ponto turístico conhecido mundialmente, é a **Estátua da Liberdade** (figura 3), que recebeu milhões de imigrantes que vieram para o país no final do século XIX. Figura 3 - Um dos principais símbolos norte-americano, a Estátua da Liberdade. Imagem de Bruce Emmerling por Pixabay Agora que já foi entendindo um pouco mais sobre o tema, chegou a hora de trabalhar com os dados. Neste notebook, como já dito anteriormente, serão analisados os dados do Airbnb referentes aos imóveis localizados na cidade de Nova York. O objetivo é identificar algumas informações sobre os imóveis, disponibilizados para locação, e também para extrair *insights* que o conjunto de dados poderá fornecer. Análise exploratória Obtenção dos dados Todos os dados que serão importados aqui foram obtidos no site [Inside Airbnb](http://insideairbnb.com/get-the-data.html), que fornece um conjunto de ferramentas e dados que permitem explorar como o Airbnb está realmente sendo usado nas cidades ao redor do mundo. O arquivo selecionado para importação dos dados será o `listings.csv - Summary information and metrics for listings in New York City (good for visualisations)`. ###Code # Importando os pacotes necessários para realizar a Análise Exploratória import pandas as pd import matplotlib.pyplot as plt import numpy as np from matplotlib.patches import Circle from matplotlib.offsetbox import (TextArea, DrawingArea, OffsetImage, AnnotationBbox) from matplotlib.cbook import get_sample_data %matplotlib inline # Importando o arquivo, contendo os dados, para um dataframe df = pd.read_csv('http://data.insideairbnb.com/united-states/ny/new-york-city/2020-12-10/visualisations/listings.csv') # Exibindo as primeiras entradas do dataframe, para verificar se o mesmo foi importado corretamente df.head(3) ###Output _____no_output_____ ###Markdown Dimensionamento e identificação dos dados É importante ter em mente o volume dos dados que serão analisados e os tipos de variáveis que eles representam. Esta etapa tem como objetivo criar uma consciência situacional inicial e permitir um entendimento de como os dados estão estruturados. ###Code # Identificando o volume de dados do dataframe print("\tVOLUME DE DADOS") print("Número de entradas (linhas): \t {}".format(df.shape[0])) print("Número de variáveis (colunas): \t {}".format(df.shape[1])) # Identificand o tipo das variáveis df.dtypes ###Output _____no_output_____ ###Markdown Dicionário das variáveis * `id` - Número de ID gerado para identificar o imóvel. * `name` - Nome do imóvel anunciado. * `host_id` - Número de ID do proprietário (anfitrião) do imóvel. * `host_name` - Nome do proprietário (anfitrião) do imóvel. * `neighbourhood_group` - Distrito onde o imóvel fica localizado. * `neighbourhood` - Bairro onde o imóvel fica localizado. * `latitude` - Coordenada da latitude do imóvel. * `longitude` - Coordenada da longitude do imóvel. * `room_type` - Informa o tipo de quarto que é oferecido. * `price` - Preço para alugar o imóvel. * `minimum_nights` - Quantidade mínima de noites para reservar. * `number_of_reviews` - Número de reviews (avaliações) que a propriedade possui. * `last_review` - Data do último review (avaliação). * `reviews_per_month` - Quantidade de reviews por mês (avaliação). * `calculated_host_listings_count` - Quantidade de imóveis do mesmo anfitrião. * `availability_365` - Número de dias de disponibilidade do imóvel (dentro de 365 dias). Dados ausentes e/ou inválidos A qualidade de um dataset está diretamente relacionada à quantidade de valores ausentes. É importante entender logo no início se esses valores nulos são significativos comparados ao total de entradas. ###Code # Identificando a porcentagem de valores ausentes no dataframe round((df.isnull().sum() / df.shape[0] * 100), 2).sort_values(ascending=False) ###Output _____no_output_____ ###Markdown * Apenas 4 variáveis possuem valores nulos, sendo apenas 2 em quantidade significativa. * As variáveis `reviews_per_month` e `last_review` possuem cerca de 1/4 de seus valores ausentes, cada uma. * As variáveis `name` e `host_name` possuem menos de 0,1% de seus valores nulos, ou seja, não é uma quantidade significante comparado ao total de entradas. Remoção de dados desnecessários Nem todas as variáveis presentes no conjunto de dados são úteis para a análise exploratória que está sendo feita. Portanto, elas serão removidas a fim de simplificar o dataframe. São elas: * `reviews_per_month` * `last_review` * `availability_365` * `calculated_host_listings_count` * `host_id` * `id` ###Code # Eliminando as variáveis com alto valor de dados ausentes e outras que não serão úteis para a análise df.drop(['reviews_per_month', 'last_review', 'availability_365', 'calculated_host_listings_count', 'host_id','id'], axis=1, inplace=True) # Exibindo as entradas do dataframe novamente, agora sem as variáveis que foram removidas df.head() # Recalculando o volume de dados do dataframe após a remoção das variáveis específicas acima print("\tVOLUME DE DADOS") print("Número de entradas (linhas): \t {}".format(df.shape[0])) print("Número de variáveis (colunas): \t {}".format(df.shape[1])) ###Output VOLUME DE DADOS Número de entradas (linhas): 36923 Número de variáveis (colunas): 10 ###Markdown * O número de variáveis (colunas) que antes era 16, agora passou a ser 10. Análise estatística Distribução das variáveis Para identificar a distribuição das variáveis, será plotado os histogramas das variáveis numéricas `price`, `minimum_nights` e `number_of_reviews`. ###Code # Plotando o histograma das variáveis df[['price', 'minimum_nights', 'number_of_reviews']].hist(bins=10, figsize=(15,10)) plt.show() ###Output _____no_output_____ ###Markdown Observando os histogramas acima, pode-se concluir que: * É possível verificar indícios da presença de outliers nos dados das três variáveis (`price`, `minimum_nights` e `number_of_reviews`). Os valores não seguem uma distribuição, e distorcem toda a representação gráfica Identificação e remoção de *outliers* Há duas maneiras rápidas que auxiliam na detecção de outliers, o resumo estatístico das variáveis e o `boxplot` de cada uma delas. Veja abaixo: Resumo estatístico Segue abaixo o resumo estatístico das variáveis `price` e `minimum_nights`, que são as que mais interessam. ###Code # Resumo estatístico das variáveis df[['price', 'minimum_nights']].describe() ###Output _____no_output_____ ###Markdown Observando o resumo estatístico acima, podemos confirmar algumas hipóteses como: * A variável `price` possui 75% dos valores abaixo de 159, porém seu valor máximo é 10000. * A variável `minimum_nights` possui 75% dos valores abaixo de 30, porém seu valor máximo é 1250, muito acima de 1 ano. Boxplot A fim de observar melhor esses outliers, que foram detectados com base no resumo estatístico das váriaveis, serão plotados os boxplot para cada variável. Boxplot `price` ###Code # Boxplot da variável 'price' df.price.plot(kind='box', vert=False, figsize=(15,3)) plt.show() ###Output _____no_output_____ ###Markdown Como era esperado, a variável `price` possui outliers. Baseando-se no resumo estatístico e após testar alguns índices, pôde-se concluir que são os valores acima de 250. Portanto, é preciso calcular o volume desses outliers, para uma possível remoção. ###Code # Ver quantidade de valores acima de 250 para 'price' print("\nprice - valores acima de 250:") print("\t{} entradas".format(len(df[df.price > 250]))) print("\t{:.2f}% do total de entradas".format((len(df[df.price > 250]) / df.shape[0]) * 100)) ###Output price - valores acima de 250: 3504 entradas 9.49% do total de entradas ###Markdown Sendo assim, os dados considerados outliers da variável `price` representam 9,49% do total de entradas. Boxplot `minimum_nights` ###Code # Boxplot da variável 'minimum_nights' df.minimum_nights.plot(kind='box', vert=False, figsize=(15,3)) plt.show() ###Output _____no_output_____ ###Markdown Como era esperado, a variável `minimum_nights` possui outliers. E provavelmente, baseando-se no resumo estatístico, são os valores acima de 30. Portanto, é preciso calcular o volume desses outliers, para uma possível remoção. ###Code # Ver quantidade de valroes acime de 30 dias para 'minimum_nights' print("\nminimum_nights - valores acima de 30:") print("\t{} entradas".format(len(df[df.minimum_nights > 30]))) print("\t{:.2f}% do total de entradas".format((len(df[df.minimum_nights > 30]) / df.shape[0]) * 100)) ###Output minimum_nights - valores acima de 30: 1172 entradas 3.17% do total de entradas ###Markdown Sendo assim, os dados considerados outliers da variável `minimum_nights` representam 3,17% do total de entradas. Coeficiente de variação Serão calculados os coeficientes de variação das variáveis `price` e `minimum_nights` para analisar a variabilidade, em termos relativos a seu valor médio, de seus dados. Indicadores do coeficiente: * Menor ou igual a 15% → Baixa dispersão: dados homogêneos * Entre 15% e 30% → Média dispersão * Maior que 30% → Alta dispersão: dados heterogêneos ###Code # Calculando os coeficientes de variação print("Coeficiente de variação | 'price' = {}%".format(round((df.price.std() / df.price.mean()) * 100, 2))) print("Coeficiente de variação | 'minimum_nights' = {}%".format(round((df.minimum_nights.std() / df.minimum_nights.mean()) * 100, 2))) ###Output Coeficiente de variação | 'price' = 183.93% Coeficiente de variação | 'minimum_nights' = 113.11% ###Markdown Como pode ser observado acima, os coeficientes de variação estão altíssimos. Deixando claro que os dados estão muito dispersos, provalvemente, por conta dos outliers presentes em ambas as variáveis. Eliminando os outliers Como calculado mais acima, os outliers das variáveis `price` e `minimum_nights` representam 9,49% e 3,17%, respectivamente, do valor total de entradas de cada variável. Sendo assim, eles serão removidos. ###Code # Removendo os outliers das variáveis 'price' e 'minimum_nights' df.drop(df[df.minimum_nights > 30].index, axis=0, inplace=True) df.drop(df[df.price > 250].index, axis=0, inplace=True) ###Output _____no_output_____ ###Markdown Após a remoção dos outliers, pode-se calcular novamente os coeficientes de variação e observar se eles apresentam um resultado mais próximo do esperado. ###Code # Calculando os coeficientes de variação print("Coeficiente de variação | 'price' = {}%".format(round((df.price.std() / df.price.mean()) * 100, 2))) print("Coeficiente de variação | 'minimum_nights' = {}%".format(round((df.minimum_nights.std() / df.minimum_nights.mean()) * 100, 2))) ###Output Coeficiente de variação | 'price' = 53.63% Coeficiente de variação | 'minimum_nights' = 58.99% ###Markdown Apesar dos coeficientes ainda continuarem altos, houve uma boa redução em comparação ao mesmos antes da limpeza dos outliers. Portanto, a visualização gráfica dos dados já deve ter tido uma melhora. Histogramas Agora, sem a presença dos outliers, serão plotados os histogramas das variáveis `price`e `minimum_nights` novamente. Para se ter uma melhor observação de suas distribuições. ###Code # Plotando o histograma para as variáveis 'minimum_nights' e 'price' df[['minimum_nights']].hist(bins=6, figsize=(10,5)); df[['price']].hist(bins=10, figsize=(10,5)); ###Output _____no_output_____ ###Markdown Agora é possível perceber algumas informações através dos histogramas, como: * A maior parte dos imóveis tem como requisito uma quantidade mínima variando entre 25 e 30 noites, para se fazer uma reserva. * E o preço para locação da maior parte dos imóveis varia entre algo em torno de 25 à 125 dólares. Extração de *insights* Após essa breve análise estatística é possível extrair algumas informações, que podem ser obtidas, manipulando o conjunto de dados. Tipo dos imóveis disponíveis para locação em Nova York ###Code # Porcentagem de cada tipo de imóvel disponível tipo_de_imoveis = round((df.room_type.value_counts() / df.shape[0]) * 100, 2).sort_values(ascending=False) print(tipo_de_imoveis) # Pegando apenas o valor de cada tipo de imóvel tipo_de_imoveis = tipo_de_imoveis.values # Plotando o gráfico para os tipos de imóveis disponíveis entire_home = tipo_de_imoveis[0] private_room = tipo_de_imoveis[1] shared_room = tipo_de_imoveis[2] hotel_room = tipo_de_imoveis[3] fig, ax = plt.subplots(figsize=(5, 8)) imoveis = [''] ax.bar(imoveis, entire_home, color="#0AA439", width=0.5) ax.bar(imoveis, private_room, color="#23C554", bottom=entire_home, width=0.5) ax.bar(imoveis, shared_room, color="#98F3B4", bottom=entire_home+private_room, width=0.5) ax.bar(imoveis, hotel_room, color="#CDFCDB", bottom=entire_home+private_room+shared_room, width=0.5) ax.bar(imoveis, height=0, width=1) ax.set_xlabel('') ax.set_title('Imóveis disponíveis para locacação em Nova York\n', horizontalalignment='right', fontsize=18, color='#8f8d8d') ax.axis('off') ax.annotate('48.8%', fontsize=30, xy=(0.125, entire_home / 2), xytext=(0.5, entire_home / 2), color='#0AA439') ax.annotate('Casa / Apartamento inteiro', fontsize=20, xy=(0.125, entire_home / 2), xytext=(0.9, entire_home / 2), color='#0AA439') ax.annotate('48.7%', fontsize=30, xy=(0.125, private_room / 2), xytext=(0.5, (private_room / 2) + entire_home), color='#23C554') ax.annotate('Quarto privado', fontsize=20, xy=(0.125, private_room / 2), xytext=(0.9, (private_room / 2) + entire_home), color='#23C554') ax.annotate('1.9%', fontsize=15, xy=(0.125, shared_room / 2), xytext=(0.5, (shared_room / 2) + entire_home + private_room - 1), color='#b3b1b1') ax.annotate('Quarto compartilhado', fontsize=12, xy=(0.125, shared_room / 2), xytext=(0.7, (shared_room / 2) + entire_home + private_room - 1), color='#b3b1b1') ax.annotate('0.6%', fontsize=15, xy=(0.125, 2), xytext=(0.5, (hotel_room / 2) + entire_home + private_room + shared_room + 1), color='#b3b1b1') ax.annotate('Quarto de hotel', fontsize=12, xy=(0.125, 2), xytext=(0.7, (hotel_room / 2) + entire_home + private_room + shared_room + 1), color='#b3b1b1') # Fix the display limits to see everything ax.set_xlim(0, 1) ax.set_ylim(0, 100) ax.invert_yaxis() #plt.savefig('grafico-imoveis-disponiveis-em-nova-york.jpeg', format='jpeg', quality=100, dpi = 300, bbox_inches='tight') plt.show() # Quantidade de cada tipo de imóvel disponível df.room_type.value_counts() ###Output _____no_output_____ ###Markdown Como pôde ser visto no gráfico e nos dados, a maior parte dos imóveis disponíveis para locação na cidade de Nova York, ou são casas/apartamentos inteiros ou são quartos privados. Isso pode representar que, a grande maioria dos hóspedes preferem acomodações mais privativas e individuais. O percentual de imóveis com quarto compatilhado é extremamente baixo, cerca de 2% do total de imóveis disponíveis, o que reforça essa ideia, de que a procura por acomodações desse tipo é bem baixa. Localidades mais caras ###Code # Localidades mais caras (por distrito) distritos_mais_caros = tuple(df.groupby(['neighbourhood_group']).price.mean().sort_values(ascending=False).index) distritos_mais_caros_values = tuple(df.groupby(['neighbourhood_group']).price.mean().sort_values(ascending=False).values) # Resumo estatístico da variável 'price' df.price.describe() # Plotando o gráfico das localidades mais caras plt.rcdefaults() fig, ax = plt.subplots() ax.barh(distritos_mais_caros, distritos_mais_caros_values, align='center', color=['#0AA439', '#23C554', '#54E07E', '#98F3B4', '#CDFCDB']) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['bottom'].set_color('#d9d9d9') ax.spines['left'].set_color('#d9d9d9') ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() ax.set_yticklabels(distritos_mais_caros, fontsize=11) ax.invert_yaxis() # labels read top-to-bottom ax.tick_params(axis='x', colors='#786d6d') ax.tick_params(axis='y', colors='#786d6d') ax.set_xlabel('Performance') ax.set_title('Localidades mais caras em Nova York\n', color='#8f8d8d', horizontalalignment='right', fontsize=15) ax.set_xlabel("Valor médio de locação dos imóveis (em dólares)", labelpad=10, size=9, color='#9e9191') ax.axvline(df.price.mean(), color='#09897B', alpha=0.5) ax.annotate('Valor médio de Nova York: $103', fontsize=10, xy=(0.5, 2), xytext=(102, -0.67), color='#09897B') ax.annotate('$119', fontsize=15, xy=(0.5, 2), xytext=(105, 0.1), color='#fff') ax.annotate('$97', fontsize=15, xy=(0.5, 2), xytext=(86.5, 1.1), color='#fff') ax.annotate('$90', fontsize=15, xy=(0.5, 2), xytext=(79.5, 2.1), color='#fff') # ax.annotate('$79', fontsize=15, xy=(0.5, 2), xytext=(69, 3.1), color='#dbdbdb') # ax.annotate('$78', fontsize=15, xy=(0.5, 2), xytext=(67, 4.1), color='#dbdbdb') plt.show() ###Output _____no_output_____ ###Markdown Como é possível observar no gráfico acima, o distrito com aluguéis mais caro é Manhattan. Com um aluguel médio de \$119. Em contra partida, Queens e Bronx são os que possuem aluguéis bem abaixo da média da cidade de Nova York, com valores em torno de $80. Locais mais populares de Nova York no Airbnb No gráfico a seguir, pode-se observar os locais onde se encontram os imóveis com maior número de reviews (avaliações) na plataforma do Airbnb. Tal indicador pode revelar que provalvelmente esses locais, com um grande número de avaliações, são também os mais populares da cidade. Visto que, o número de avaliações está diretamente relacionado com o número de visitantes que locaram o imóvel. ###Code # Imóveis com maior número de reviews maior_reviews = tuple(df.groupby(['neighbourhood', 'neighbourhood_group']).number_of_reviews.sum().sort_values(ascending=False).head(5).values) print(df.groupby(['neighbourhood', 'neighbourhood_group']).number_of_reviews.sum().sort_values(ascending=False).head(5)) # Plotando o gráfico de locais mais populares plt.rcdefaults() fig, ax = plt.subplots() ax.barh(distritos_mais_caros, maior_reviews, align='center', color=['#0AA439', '#23C554', '#54E07E', '#98F3B4', '#CDFCDB']) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['bottom'].set_color('#d9d9d9') ax.spines['left'].set_color('#d9d9d9') ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() ax.set_yticklabels(('Bedford-Stuyvesant | Brooklyn', 'Williamsburg | Brooklyn', 'Harlem | Manhattan', 'Bushwick | Brooklyn', 'Hell\'s Kitchen | Manhattan'), fontsize=11) ax.invert_yaxis() # labels read top-to-bottom ax.tick_params(axis='x', colors='#786d6d') ax.tick_params(axis='y', colors='#786d6d') ax.set_title('Locais mais populares de Nova York no Airbnb\n', color='#8f8d8d', horizontalalignment='right', fontsize=15) ax.set_xlabel("Número de reviews (avaliações) dos imóveis", labelpad=10, size=9, color='#9e9191') ax.axvline(df.price.mean(), color='#09897B', alpha=0.5) plt.show() ###Output _____no_output_____
scripts/structure_optimize_dms/Interactive structure-optimized DM analysis.ipynb
###Markdown Notebook descriptionThis notebook can be used to interactively fit dispersion measure by maximizing burst temporal structure. This code is an extension of https://github.com/danielemichilli/DM_phase/.-D.V. 2020-2021 ###Code %load_ext autoreload %autoreload 2 %config InlineBackend.figure_format='retina' import os import sys import warnings warnings.simplefilter('ignore') from IPython.display import display # Add modules paths to system path module_paths = ['.', '/usr/local/lib/python3.8/site-packages'] for module_path in module_paths: if os.path.abspath(os.path.join(module_path)) not in sys.path: sys.path.append(module_path) import pickle import numpy as np import pandas as pd import builtins import matplotlib.pyplot as plt from ipywidgets import interact, fixed import ipywidgets as widgets from struct_opt_dms.utils import save, load from struct_opt_dms.dataset import load_dataframe from struct_opt_dms.interactive_analysis import initialize, prep_power, select_frequency_range current_input_id = -1 input_filterbanks_repository = '../../../data/filterbanks/R3/' state_variable_name = 'df_arts_R3' ###Output _____no_output_____ ###Markdown Prepare bursts metadata(detection parameters, repository name, local and arts paths) ###Code df_R3 = load_dataframe(input_filterbanks_repository, state_variable_name) # df_missing = df_R3.loc[df_R3['struct_opt_dm'].isnull()] df_R3 ###Output _____no_output_____ ###Markdown Load input data ###Code # Main verbose = True # Go from one file to the other in the list in ascending (True) or decending (False) order # `None` will reload the same file. incr = True current_input_id = -1 if incr: current_input_id += 1 elif incr is not None: current_input_id -= 1 # Or access directly to a burst by name # current_input_id = df_R3.loc[df_R3['paper_name'] == 'A17'].index[0] print ('Current burst (index=%d):' % current_input_id) print (df_R3.iloc[current_input_id]) print () if df_R3.iloc[current_input_id]['file_location'] == '': if incr: current_input_id += 1 elif incr is not None: current_input_id -= 1 spectra, dm_trials, filename = initialize(df_R3.iloc[current_input_id]['file_location'], df_R3.iloc[current_input_id]['detection_dm'], df_R3.iloc[current_input_id]['detection_downsampling'], around_peak=False, verbose=verbose) detection_mjd = df_R3.iloc[current_input_id]['detection_mjd'] power_vs_dm, d_power_vs_dm = prep_power(spectra, dm_trials, verbose=verbose) ###Output _____no_output_____ ###Markdown Interactive visualisation ###Code fig = plt.figure(constrained_layout=True, figsize=(10, 7)) # plt.rcParams["font.family"] = "Times New Roman" # plt.rcParams["font.size"] = "12" gs = fig.add_gridspec(10, 6) def load_value(param, df, default=0): if pd.isnull(df.loc[df['detection_mjd'] == detection_mjd, param].values[0]): return default else: return df.loc[df['detection_mjd'] == detection_mjd, param] interact( select_frequency_range, spectra = fixed(spectra), dm_trials = fixed(dm_trials), power_vs_dm = fixed(power_vs_dm), d_power_vs_dm = fixed(d_power_vs_dm), fig=fixed(fig), gs=fixed(gs), descriptor = fixed(df_R3.loc[df_R3['detection_mjd'] == detection_mjd, 'paper_name'].values[0]), fitting_method = widgets.Dropdown(options=['Gaussian', 'dm_phase'], value='Gaussian'), fluct_id_low = widgets.IntSlider(min = 0, max = power_vs_dm.shape[0], step = 1, value = load_value('fluct_id_low', df_R3, 0), continuous_update=False), fluct_id_high = widgets.IntSlider(min = 0, max = power_vs_dm.shape[0], step = 1, value = load_value('fluct_id_high', df_R3, power_vs_dm.shape[0]), continuous_update=False), freq_id_low = widgets.IntSlider(min = 0, max = spectra.data.shape[0], step = 1, value = load_value('freq_id_low', df_R3, 0), continuous_update=False), freq_id_high = widgets.IntSlider(min = 1, max = spectra.data.shape[0], step = 1, value = load_value('freq_id_high', df_R3, spectra.data.shape[0]), continuous_update=False), t0 = widgets.IntSlider(min = 0, max = spectra.data.shape[1], step = 1, value = load_value('t0', df_R3, 0), continuous_update=False), t1 = widgets.IntSlider(min = 1, max = spectra.data.shape[1], step = 1, value = load_value('t1', df_R3, spectra.data.shape[1]), continuous_update=False), ds_freq = widgets.IntSlider(min=1, max=32, step=1, value=load_value('ds_freq', df_R3, 1), continuous_update=False), ds_time = widgets.IntSlider(min=1, max=32, step=1, value=load_value('ds_time', df_R3, 1), continuous_update=False), delta_dm = widgets.FloatSlider(min = -10, max = 10, step = 0.01, value = load_value('delta_dm', df_R3, 0), continuous_update=False), smooth = widgets.IntSlider(min=0, max=4, step=1, value=load_value('smooth', df_R3, 0), continuous_update=False), ) # Save button button = widgets.Button(description="Save figure") display(button) button_raw = widgets.Button(description="Save raw figure") display(button_raw) button_clear = widgets.Button(description="Clear params") display(button_clear) def save_to_df(): print (detection_mjd) df_R3.loc[df_R3['detection_mjd'] == detection_mjd, 'struct_opt_dm'] = builtins.struct_opt_dm df_R3.loc[df_R3['detection_mjd'] == detection_mjd, 'struct_opt_dm_err'] = builtins.struct_opt_dm_err # Widgets values df_R3.loc[df_R3['detection_mjd'] == detection_mjd, 'fluct_id_low'] = builtins.fluct_id_low df_R3.loc[df_R3['detection_mjd'] == detection_mjd, 'fluct_id_high'] = builtins.fluct_id_high df_R3.loc[df_R3['detection_mjd'] == detection_mjd, 'freq_id_low'] = builtins.freq_id_low df_R3.loc[df_R3['detection_mjd'] == detection_mjd, 'freq_id_high'] = builtins.freq_id_high df_R3.loc[df_R3['detection_mjd'] == detection_mjd, 't0'] = builtins.t0 df_R3.loc[df_R3['detection_mjd'] == detection_mjd, 't1'] = builtins.t1 df_R3.loc[df_R3['detection_mjd'] == detection_mjd, 'ds_freq'] = builtins.ds_freq df_R3.loc[df_R3['detection_mjd'] == detection_mjd, 'ds_time'] = builtins.ds_time df_R3.loc[df_R3['detection_mjd'] == detection_mjd, 'delta_dm'] = builtins.delta_dm df_R3.loc[df_R3['detection_mjd'] == detection_mjd, 'smooth'] = builtins.smooth save(state_variable_name, df_R3) df_R3.to_csv('arts_r3.csv', index=False) def check_dir(folder): if not os.path.exists(folder): os.makedirs(folder) def save_figure(b): check_dir('images/manual_opt') check_dir('images/manual_opt/data') _filename = df_R3.loc[df_R3['detection_mjd'] == detection_mjd, 'paper_name'].values[0] print ("saved to images/manual_opt/%s.png" % _filename) fig.savefig("images/manual_opt/%s.png" % _filename, dpi=300) with open("images/manual_opt/data/%s_waterfall.npy" % _filename, 'wb') as f: np.savetxt(f, builtins.sub_waterfall) with open("images/manual_opt/data/%s_fluctuation.npy" % _filename, 'wb') as f: np.savetxt(f, builtins.power_vs_dm) save_to_df() def save_figure_raw(b): check_dir('images/manual_opt/raw') check_dir('images/manual_opt/raw/data') _filename = df_R3.loc[df_R3['detection_mjd'] == detection_mjd, 'paper_name'].values[0] print ("saved to images/manual_opt/raw/%s.png" % _filename) fig.savefig("images/manual_opt/raw/%s.png" % _filename, dpi=300) with open("images/manual_opt/raw/data/%s_waterfall.npy" % _filename, 'wb') as f: np.savetxt(f, builtins.sub_waterfall) with open("images/manual_opt/raw/data/%s_fluctuation.npy" % _filename, 'wb') as f: np.savetxt(f, builtins.power_vs_dm) save_to_df() def clear_struct_params(b): for param in ['struct_opt_dm', 'struct_opt_dm_err', 'fluct_id_low', 'fluct_id_high', 'freq_id_low', 'freq_id_high', 't0', 't1', 'ds_freq', 'ds_time', 'delta_dm', 'smooth']: df_R3.loc[df_R3['detection_mjd'] == detection_mjd, param] = np.nan save(state_variable_name, df_R3) print ('Parameters cleared') button.on_click(save_figure) button_raw.on_click(save_figure_raw) button_clear.on_click(clear_struct_params) ###Output _____no_output_____ ###Markdown Plot DM vs Phase / time ###Code df = df_R3 plt.scatter(df['detection_phase'], df['struct_opt_dm']) plt.errorbar(df['detection_phase'], df['struct_opt_dm'], yerr=df['struct_opt_dm_err'], ls='none') plt.hlines(np.nanmedian(df['struct_opt_dm']), np.min(df['detection_phase']), np.max(df['detection_phase']), linestyle='dashed', colors='black', alpha = 0.5) plt.annotate(r'$\~{\tt{DM}}$ = %.2f pc/cc' % (np.nanmedian(df['struct_opt_dm'])), xy=(0, 0.5), xycoords='axes fraction', xytext=(0.05, 0.85),) plt.xlabel('Phase') plt.ylabel('DM (pc/cc)') plt.tight_layout() plt.savefig('images/DM_v_Time_R3.png', dpi=300) df_R3[['detection_isot', 'detection_mjd', 'detection_phase', 'struct_opt_dm', 'struct_opt_dm_err']].to_csv('R3_arts.csv', index=False) ###Output _____no_output_____
abs/State Eligible Populations.ipynb
###Markdown State Populations Table of Contents1&nbsp;&nbsp;Set-up2&nbsp;&nbsp;Capture Set-up ###Code import numpy as np import pandas as pd files = { 'NSW': '3101051.xls', 'Victoria': '3101052.xls', 'Queensland': '3101053.xls', 'SA': '3101054.xls', 'WA': '3101055.xls', 'Tasmania': '3101056.xls', 'NT': '3101057.xls', 'ACT': '3101058.xls', } ###Output _____no_output_____ ###Markdown Capture ###Code age_and_up = 0 # 0 or 16 totals = {} for state, filename in files.items(): xl = pd.ExcelFile(filename) data_sheets = [x for x in xl.sheet_names if 'Data' in x] frame = [xl.parse(name, index_col=0, header=0) for name in data_sheets] frame = pd.concat(frame, axis=1) frame = frame[frame.columns[frame.columns.str.contains('Persons')]] state_total = frame[frame.columns[age_and_up:]].iloc[-1].sum() totals[state] = state_total totals ###Output _____no_output_____
code/create_landmarkDB.ipynb
###Markdown This notebook contains the scripts to download the annotations of landmarks of Archey's frogs from Zooniverse and create the landmark DB Requirements Install required packages We use the "panoptes_client" package to communicate with Zooniverse. If you don't have it installed, run the command below. ###Code !pip install panoptes_client ###Output Requirement already satisfied: panoptes_client in /usr/local/lib/python3.7/dist-packages (1.3.0) Requirement already satisfied: six>=1.9 in /usr/local/lib/python3.7/dist-packages (from panoptes_client) (1.15.0) Requirement already satisfied: redo>=1.7 in /usr/local/lib/python3.7/dist-packages (from panoptes_client) (2.0.4) Requirement already satisfied: python-magic<0.5,>=0.4 in /usr/local/lib/python3.7/dist-packages (from panoptes_client) (0.4.22) Requirement already satisfied: future<0.19,>=0.16 in /usr/local/lib/python3.7/dist-packages (from panoptes_client) (0.16.0) Requirement already satisfied: requests<2.25,>=2.4.2 in /usr/local/lib/python3.7/dist-packages (from panoptes_client) (2.23.0) Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<2.25,>=2.4.2->panoptes_client) (2020.12.5) Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<2.25,>=2.4.2->panoptes_client) (2.10) Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<2.25,>=2.4.2->panoptes_client) (3.0.4) Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<2.25,>=2.4.2->panoptes_client) (1.24.3) ###Markdown Load required libraries Load generic libraries ###Code import io import zipfile import json import pandas as pd import numpy as np import os import re import matplotlib.pyplot as plt import cv2 import getpass from google.colab import drive from datetime import date from panoptes_client import ( SubjectSet, Subject, Project, Panoptes, ) from google.colab import drive drive.mount('/content/gdrive') import sys sys.path.append('./gdrive/MyDrive/wildlife/Archeys_frogs') import config ###Output _____no_output_____ ###Markdown Connect to Zooniverse You need to specify your Zooniverse username and password. Uploading and downloading information from Zooniverse is only accessible to those user with access to the project. ###Code # Your user name and password for Zooniverse. zoo_user = getpass.getpass('Enter your Zooniverse user') zoo_pass = getpass.getpass('Enter your Zooniverse password') # Connect to Zooniverse with your username and password auth = Panoptes.connect(username=zoo_user, password=zoo_pass) if not auth.logged_in: raise AuthenticationError("Your credentials are invalid. Please try again.") # Connect to the Zooniverse project (our frog project # is 13355) project = Project(13355) ###Output Enter your Zooniverse user·········· Enter your Zooniverse password·········· ###Markdown Download Zooniverse annotations ###Code WHAREORINA_DIR_FORMAT = "/content/gdrive/MyDrive/wildlife/Archeys_frogs/whareorino_{grid_letter_lower}" PUKEOKAHU_DIR = "/content/gdrive/MyDrive/wildlife/Archeys_frogs/pukeokahu/Pukeokahu Frog Monitoring" GRID_DIR_FORMAT = os.path.join(WHAREORINA_DIR_FORMAT, "Grid {grid_letter}") INDIVIDUAL_FROGS_DIR_NAME = "Individual Frogs" NEW_FROGS_DIR_NAME = "New Frogs" GRID_LETTER_REGEX = re.compile("Grid (.)") PUKEOKAHU_IDENTIFIER = "Pukeokahu" # Determines the maximum amount of pixel difference between min and max of x,y # As a percentage of image size, differences bigger than # MAX_PIXEL_PERCENT_DIFF will be discarded MAX_PIXEL_PERCENT_DIFF = 0.03 def get_file_path(subject_data_dict): # Searches for grid letter, using match because currently only at the beginning grid_letter = GRID_LETTER_REGEX.match(subject_data_dict['grid']) if grid_letter != None: grid_letter = grid_letter.group(1) # Find file in individual frogs dir grid_dir = GRID_DIR_FORMAT.format(grid_letter_lower = grid_letter.lower(), grid_letter = grid_letter) elif PUKEOKAHU_IDENTIFIER in subject_data_dict['grid']: grid_dir = PUKEOKAHU_DIR else: return None file_path = os.path.join(grid_dir, INDIVIDUAL_FROGS_DIR_NAME, subject_data_dict['frog_id'], subject_data_dict['filename']) if os.path.isfile(file_path): return file_path # Find file in new frog dir file_path = os.path.join(grid_dir, NEW_FROGS_DIR_NAME, subject_data_dict['filename']) if os.path.isfile(file_path): return file_path return None # Get the export classifications export = project.get_export("classifications") # Save the response as pandas data frame classifications = pd.read_csv( io.StringIO(export.content.decode("utf-8")), usecols=[ "user_name", "subject_ids", "subject_data", "classification_id", #"workflow_id", #"workflow_version", "annotations" ], ) # Convert JSON strings into Python dictionaries, providing access to key-value pairs. classifications['annotations'] = [json.loads(q) for q in classifications.annotations] classifications['subject_data'] = [json.loads(q) for q in classifications.subject_data] classifications['image_path'] = [get_file_path(next(iter(q.values()))) for q in classifications.subject_data] # Flatten annotations x =[] y = [] label = [] classification_id = [] for i,row in classifications.iterrows(): class_id = row['classification_id'] for t in row['annotations']: # Select survey Task = T0 if t['task'] == 'T0': if len(t['value']) > 0: for l in t['value']: x.append(l['x']) y.append(l['y']) label.append(l['tool_label']) classification_id.append(class_id) else: x.append('') y.append('') label.append('') classification_id.append(class_id) # Combine all the annotations into a data frame annotations = pd.concat([ pd.DataFrame(x, columns =['x']), pd.DataFrame(y, columns =['y']), pd.DataFrame(label, columns =['label']), pd.DataFrame(classification_id, columns =['classification_id'])], axis=1) # Drop metadata and index columns from original df classifications = classifications.drop(columns=["annotations"]) # Add metadata information based on the classification id flat_anotations = pd.merge(annotations, classifications, how="left", on=["classification_id"]) ###Output _____no_output_____ ###Markdown Analyze Landmark distribution ###Code flat_anotations.info() flat_anotations.head() ###Output _____no_output_____ ###Markdown Building the annotation DB ###Code df = flat_anotations.copy() df.dropna() df = df.drop(['user_name', 'subject_data', 'subject_ids'], axis = 1) pivot_columns = ['x','y'] df = df.pivot_table(pivot_columns, index = 'classification_id', columns = 'label') df.columns = ['_'.join([col[0],str(col[1]).replace(' ', '_')]).strip() for col in df.columns.values] df = pd.merge(df, flat_anotations[['classification_id','image_path']], left_on = 'classification_id', right_on = 'classification_id', how = 'left') df = df.groupby(['classification_id']).first() df.head() ###Output _____no_output_____ ###Markdown Generating a few statistics How many images were classified 1,2,3,.... times ###Code df.value_counts('image_path').value_counts() ###Output _____no_output_____ ###Markdown Percentage of images that are classified 1,2,3,... times ###Code df.value_counts('image_path').value_counts(normalize = True) ###Output _____no_output_____ ###Markdown Creating an new df easier to work with to find distances between classification points ###Code df_v = flat_anotations.copy() df_v.dropna() df_v = df_v.drop(['user_name', 'subject_data', 'subject_ids'], axis = 1) df_v = df_v.groupby(['image_path', 'label']).agg(x = ('x', 'mean'), y = ('y', 'mean'), x_range=('x', lambda x: x.max() - x.min()), y_range=('y', lambda y: y.max() - y.min()) ) df_v['point_max_diff'] = df_v.apply(lambda row: np.linalg.norm((row.x_range,row.y_range)), axis=1) ###Output _____no_output_____ ###Markdown Max point diff: ###Code df_v[df_v['point_max_diff'] == np.max(df_v['point_max_diff'])] ###Output _____no_output_____ ###Markdown Looking at the distribution of the distances between duplicate annotations ###Code image_sizes = [] diff_ratios = [] for im_path in df_v.index.get_level_values('image_path').unique(): im = cv2.imread(im_path) im_size = im.shape[:2] image_sizes.append(im_size) im_diff_width = np.max(df_v.loc[[im_path]]['x_range']) im_diff_height = np.max(df_v.loc[[im_path]]['y_range']) im_diff_p = np.divide((im_diff_height, im_diff_width), im_size) diff_ratios.append([np.linalg.norm(im_diff_p), im_path]) image_sizes = np.array(image_sizes) diff_ratios = np.array(diff_ratios) plt.scatter(*zip(*image_sizes)) plt.show() diffs = np.float32(diff_ratios[:,0]) fig, ax = plt.subplots() ax.hist(diffs[np.where(diffs != 0)], bins = 20) plt.setp(ax.get_xticklabels(), rotation=60, horizontalalignment='right') plt.show() np.max(diffs) np.mean(diffs[diffs != 0]) ###Output _____no_output_____ ###Markdown Highest point difference is currently 4 percent of the imagetherefore it isn't a substancial difference and whole classifications are kept.MAX_PIXEL_DIFF = 0.05 (defined before hand with all the constants) ###Code non_valid_images = diff_ratios[np.where(np.float32(diff_ratios[:,0]) > MAX_PIXEL_PERCENT_DIFF)][:, 1] df = df[~np.isin(df['image_path'], non_valid_images)] df = df.groupby(['image_path']).mean() df.reset_index(inplace = True) # cols = ['image_path', 'x_Left_eye', 'y_Left_eye', 'x_Left_front_leg', 'y_Left_front_leg', # 'x_Right_eye', 'y_Right_eye', 'x_Right_front_leg', 'y_Right_front_leg', # 'x_Tip_of_snout', 'y_Tip_of_snout', 'x_Vent', 'y_Vent'] cols = ['image_path'] + config.COLS_DF_NAMES df = df[cols] df.head() # Using loop and not apply because this is considerably faster image_sizes = [] for im_path in df.image_path: im = cv2.imread(im_path) im_size = im.shape[:2] image_sizes.append(im_size) image_sizes = np.array(image_sizes) df['original_width_size'] = image_sizes[:, 1] df['original_height_size'] = image_sizes[:, 0] df.to_pickle("./gdrive/MyDrive/wildlife/Archeys_frogs/image_path_anotations_db.pkl") # Quick validation that nothing weird happend r_l = [] for i in df.index: row = df.loc[i] im = cv2.imread(row.image_path) r_l.append(not (im.shape[1] == row.original_width_size and im.shape[0] == row.original_height_size)) r_l = np.array(r_l) np.sum(r_l) len(df) ###Output _____no_output_____
Modified Nodal Analysis Report.ipynb
###Markdown Author: Philippe Nadon Project Description IntroductionOftentimes in electrical engineering, the voltage at each node in a circuit needs to be known to understand how the circuit works. As the complexity of the circuit grows, it becomes increasingly difficult or even impossible to use many of the common ways of solving the circuit, let alone doing it by hand. Problem DefinitionTo solve this problem one would need to take a circuit and represent it in a textual manner (ie. a dataframe).The dataframe would then need to be fed into an algorithm, which analyzes each component and outputs the value of every unknown in the circuit (eg. the voltage at each node) based on the relationship between each component, at discrete time intervals to simulate the current flow throughout the circuit.The challenge of such a tool, is attempting to speed up the simulation of the circuit, since due to the data dependency between each time step, one cannot distribute the work per time step to multiple processes. The most parallelizable area is solving the system of linear equations. SolutionThus, a solution is to algorithmically implement Modified Nodal Analysis (MNA); a technique which is guaranteed to solve any circuit. To speed up computation time, the MNA implementation can be parallelized, which is greatly appreciated when working with very large and complex circuits.The way the MNA algorithm works is that it takes the textual representation of the circuit, assembles a system of linear equations based off the known values of the components in the circuit, and solves for the unknowns in these equations. Due to the variation in the current sources, the voltage and current will vary in the circuit with respect to time, and thus the solver will repeatedly solve for these unknowns at each time step.Thus, the computation resembles the form of Ax = b. Introduction to ToolThis project explores methods in which one can implement a circuit analysis tool, using [Modified Nodal Analysis](https://en.wikipedia.org/wiki/Modified_nodal_analysis). A large portion of the work in this project is thanks to [this tutorial](https://lpsa.swarthmore.edu/Systems/Electrical/mna/MNA1.html), which was immensely useful in developing this software.The code was implemented in Python, using the typical scientific libraries (Numpy, Scipy, Pandas), as well as CuPy, which serves as a wrapper for Python code to interact with CUDA. Techniques Used Modified Nodal AnalysisThe algorithm uses Modified Nodal Analysis to construct a system of linear equations, which is then solved on the CPU or GPU. As a result, both algorithm design and parallelism were part of the learning experience for making this tool.Modified Nodal Analysis is a way of Random Circuit GeneratorTo test for varying sizes and densities of circuits, a random circuit generator was developed. This circuit generator ensures that any two nodes on the circuit forms a cycle, and that the distribution of connections is well varied and distributed, using various algorithms as well as a [gamma distribution](https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.random.gamma.html). Thus, this project also involves some graph theory. Sparse Linear SolverBoth a [CPU-based solver](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.sparse.linalg.lsqr.html) and a [GPU-based solver](https://docs-cupy.chainer.org/en/stable/reference/generated/cupyx.scipy.sparse.linalg.lsqr.html) were used, to compare the performance of both. Some knowledge on how CUDA works was required to use the GPU-based solver, which will be explained below.Both solvers compute the least-squares solution for a sparse linear system of equations. Meaning, the values in x correspond to the minimum value of $||b - Ax||^2$, within a certain tolerance. About CUDAThe following information is from Cheng, Jie. “Programming Massively Parallel Processors. A Hands-on Approach.” *Scalable Computing: Practice and Experience 11* (2010): n. pag.CUDA is a parallel computing platform released by NVIDIA in 2007, and was designed to tackle numerically-intensive tasks in parallel. CUDA offers developers an interface where they can directly execute code using the cores on a GPU. Because a GPU has many more cores than a CPU, its total throughput is orders of magnitude greater than that of a typical CPU. Since GPUs are typically used for graphics processing, they are especially adept at matrix computations, and thus performing linear solves on a GPU should have some real-world advantages over typical CPUs.One significant bottleneck to CUDA computing, is the latency between beginning the transfer of data to the GPU through the PCIe lanes, and beginning the computation on the GPU. High levels of memory transfer between main memory and the dedicated GPU memory can slow down the computation, and so one must ensure that the amount of time spent transferring data is minimal. About Least Squares SolverFrom: Prikopa, Karl E. et al. “Parallel iterative refinement linear least squares solvers based on all-reduce operations.” *Parallel Computing 57* (2016): 167-184.There are actually many variations of the Least Squares solver, the main one being the All-Reduce Parallel Linear Least Squares (ARPLS) Solver.There are 3 main steps involved in this solver:1. Parallel QR Factorization2. Parallel Matrix-vector multiplication, with 1+ local triangular solves3. IR to stabilize and improve solution (only if >1 local triangular solves)There are also 2 types of QR factorization: 1. All-Reduce Modified Gram-Schmidt, best for average-case matrices2. Tall-Skinny QR, best for narrow matricesIn summary, the matrix A is split into QR, Q being an orthogonal matrix, and R being an upper triangular matrix. Then, both are combined into the solution using matrix-vector multiplication and an All-Reduce operation. Specifications of Hardware UsedIntel i5 4690, 4 cores at 3.5GHz, turbo boosting up to 3.9GHz16GB DDR3 main memoryNVIDIA GTX 970 with 4GB of GDDR5 memory, 1664 CUDA cores at 1050MHz ResultsThe results are displayed below, first is the resulting CSV containing the times for each type of solver, per matrix size and node density: ###Code import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt ###Output _____no_output_____ ###Markdown Time per matrix size, CPU vs GPU (CUDA) ###Code df = pd.read_csv("stats.csv") df = df.dropna().drop(columns=['fname', 'transfer', 'solve']) pd.set_option('display.max_rows', 20) df ###Output _____no_output_____ ###Markdown Plotting the time taken versus the matrix size gives us the following: ###Code fig=plt.figure(figsize=(9, 8), dpi= 160, facecolor='w', edgecolor='k') g = sns.scatterplot(x="matrix_size", y="total", hue="avg_connect", style="processor", size="num_nodes", data=df) ###Output _____no_output_____ ###Markdown - The avg_connect represents the average number of connections each node has, as the average connectivity increases, so does the complexity and density of the matrix.- processor denotes which processor was used to compute the solution, either the GPU or the CPU- The matrix size is simply the height or width of the A matrix- total denotes the total computation time required to solve the linear system, including data transfer to each respective processor We can see here that the GPU is consistently faster than the CPU for almost every matrix size. However, for matrix sizes / connectivity higher than the ones displayed, the GPU failed to produce any output as a result of insufficient bandwidth / memory. Speedup (CPU time / GPU time) ###Code cuda_df = df[df["processor"] == "cuda"].copy().reset_index().drop(columns=['index', 'processor']) cpu_df = df[df["processor"] == "cpu"].copy().reset_index() cuda_df["speedup"] = cpu_df["total"] / cuda_df["total"] cuda_df fig=plt.figure(figsize=(9, 8), dpi= 160, facecolor='w', edgecolor='k') cmap = sns.cubehelix_palette(light=0.8, dark=0, as_cmap=True) g = sns.scatterplot(x="matrix_size", y="speedup", hue="total", palette=cmap, style="avg_connect", size="num_nodes", data=cuda_df) ###Output _____no_output_____
_notebooks/2020-05-30-Deep_learning_approach_for_text_classification_with_spacy.ipynb
###Markdown "Mother's day Sentiment analysis - with spaCy"> "In this notebook I try to use a competition dataset of tweets reacting to Mother's day and classify their sentiments with spaCy"- toc: true- branch: master- badges: true- comments: true- categories: [nlp, eda, sentiment]- hide: false ###Code #hide import requests zip_file = requests.get('https://he-s3.s3.amazonaws.com/media/hackathon/hackerearth-test-draft-1-102/predicting-tweet-sentiments-231101b4/fa62f5d69a9f11ea.zip?Signature=2yxQgjub3w4jc%2BhnFKq0GEwmNEE%3D&Expires=1590825609&AWSAccessKeyId=AKIA6I2ISGOYH7WWS3G5') with open('data.zip', 'wb') as f: f.write(zip_file.content) #hide !cp /content/drive/My\ Drive/Data/tweets_mother_day.zip ./data.zip !unzip data.zip ###Output Archive: data.zip creating: dataset/ inflating: dataset/train.csv inflating: dataset/test.csv ###Markdown Setup paths We will use the method from my previous [post](https://mani2106.github.io/Blog-Posts/nlp/eda/sentiment/2020/05/23/_Hackerearth_mothers_day_sentiment.html) to clean the text. ###Code from pathlib import Path import pandas as pd DATA_PATH = Path('dataset/') DRIVE_PATH = Path(r"/content/drive/My Drive/Spacy/Pretrained") train_data = pd.read_csv(DATA_PATH/'train.csv', index_col=0) train_data.head() ###Output _____no_output_____ ###Markdown Let's check average length of text before cleaning. ###Code #collapse print(sum( train_data['original_text'].apply(len).tolist() )/train_data.shape[0]) ###Output 227.42102009273572 ###Markdown Clean links with regex ###Code train_data['original_text'].replace( # Regex is match : the text to replace with {'(https?:\/\/.*|pic.*)[\r\n]*' : ''}, regex=True, inplace=True) ###Output _____no_output_____ ###Markdown Let's check the average length again. ###Code #hide print(sum( train_data['original_text'].apply(len).tolist() )/train_data.shape[0]) ###Output 185.95672333848532 ###Markdown The regex did it's job I suppose. ###Code train_data.head() ###Output _____no_output_____ ###Markdown In my previous exploratory [post](https://mani2106.github.io/Blog-Posts/nlp/eda/sentiment/2020/05/23/_Hackerearth_mothers_day_sentiment.html), I have seen the data and I think that the features other than the text may not be required, (ie)- lang- retweet_count- original_author Class distribution - `0` must mean `Neutral`- `1` means `Positive`- `-1` means `Negative` ###Code train_data['sentiment_class'].value_counts().plot(kind='bar') ###Output _____no_output_____ ###Markdown Let's see some sentences with negative examples, I am interested why they should be negative on a happy day(Mother's day) ###Code list_of_neg_sents = train_data.loc[train_data['sentiment_class'] == -1, 'original_text'].tolist() #collapse pprint(list_of_neg_sents[:5]) ###Output ['Happy mothers day To all This doing a mothers days work. Today been quiet ' 'but Had time to reflect. Dog walk, finish a jigsaw do the garden, learn few ' 'more guitar chords, drunk some strawberry gin and tonic and watch Lee evens ' 'on DVD. My favourite place to visit. #isolate ', 'Remembering the 3 most amazing ladies who made me who I am! My late ' 'grandmother iris, mum carol and great grandmother Ethel. Missed but never ' 'forgotten! Happy mothers day to all those great mums out there! Love sent to ' 'all xxxx ', 'Happy Mothers Day to everyone tuning in. This is the 4th Round game between ' 'me and @CastigersJ Live coverage on @Twitter , maybe one day @SkySportsRL or ' 'on the OurLeague app', "Happy Mothers Day ! We hope your mums aren't planning to do any work around " 'the house today! Surely it can wait until next week? #plumbers ' '#heatingspecialists #mothersday #mothersday ', "Happy mothers day to all those mums whos children can't be with them today. " 'My son Dylan lives in heaven I wish I could see him for one more hug. I wish ' 'I could tell him how much I love and miss him. Huge happy mothers day to ' 'your mum too.'] ###Markdown Well some tweets actually express their feelings for their deceased mothers. This is understandable. We can use traditional NLP methods or deep learning methods to model the text. We will try the deep learning in this notebook . Deep Learning approach with Spacy It's recommended [here](https://spacy.io/usage/trainingtransfer-learning) that to improve performance of the classifier, **Language model pretraining** is one way to do so. Spacy requires a `.jsonl` format of input to train text Get texts from the dataframe and store in `jsonl` format more about that [here](https://spacy.io/api/clipretrain-jsonl). We can also load the test data to get some more sample for the `pretraining`, this will not cause **Data Leakage** because we are not giving any labels to the model. ###Code test_data = pd.read_csv(DATA_PATH/'test.csv', index_col=0) test_data.head() ###Output _____no_output_____ ###Markdown Let's clean the test set for links as well ###Code test_data['original_text'].replace( # Regex pattern to match : the text to replace with {'(https?:\/\/.*|pic.*)[\r\n]*' : ''}, regex=True, inplace=True) test_data.shape texts_series = pd.concat([train_data['original_text'], test_data['original_text']], axis='rows') ###Output _____no_output_____ ###Markdown Let's check the length ###Code texts_series.shape[0], train_data.shape[0]+test_data.shape[0] ###Output _____no_output_____ ###Markdown So now we can use this `texts_series` to create the `jsonl` file. ###Code list_of_texts = [ # Form dictionary with 'text' key {'text': value} for _, value in texts_series.items() ] ###Output _____no_output_____ ###Markdown I will use `srsly` to write this list of dictionaries to a `jsonl` file ###Code import srsly # saving to my Google drive srsly.write_jsonl(DRIVE_PATH/'pretrain_texts.jsonl', list_of_texts) ###Output _____no_output_____ ###Markdown We can see a few lines from the saved file. ###Code #collapse from pprint import pprint with Path(DRIVE_PATH/'pretrain_texts.jsonl').open() as f: lines = [next(f) for x in range(5)] pprint(lines) ###Output ['{"text":"Happy #MothersDay to all you amazing mothers out there! I know ' "it's hard not being able to see your mothers today but it's on all of us to " 'do what we can to protect the most vulnerable members of our society. ' '#BeatCoronaVirus "}\n', '{"text":"Happy Mothers Day Mum - I\'m sorry I can\'t be there to bring you ' "Mothers day flowers & a cwtch - honestly at this point I'd walk on hot coals " "to be able to. But I'll be there with bells on as soon as I can be. Love you " 'lots xxx (p.s we need more photos!) "}\n', '{"text":"Happy mothers day To all This doing a mothers days work. Today been ' 'quiet but Had time to reflect. Dog walk, finish a jigsaw do the garden, ' 'learn few more guitar chords, drunk some strawberry gin and tonic and watch ' 'Lee evens on DVD. My favourite place to visit. #isolate "}\n', '{"text":"Happy mothers day to this beautiful woman...royalty soothes you ' 'mummy jeremy and emerald and more #PrayForRoksie #UltimateLoveNG "}\n', '{"text":"Remembering the 3 most amazing ladies who made me who I am! My late ' 'grandmother iris, mum carol and great grandmother Ethel. Missed but never ' 'forgotten! Happy mothers day to all those great mums out there! Love sent to ' 'all xxxx "}\n'] ###Markdown Start Pretraining We should download a pretrained to model to use, Here I am using _en_core_web_md_ from `Spacy`. This can be confusing (ie) Why should I train a pretrained model, if I can download one, The idea is that the downloaded pretrained model would have been trained with a **very different** type of dataset, but it already has some knowledge on interpreting words in English sentences. But here we have dataset of tweets which the downloaded pretrained model may or may not have seen during it's training, So we use our dataset to **fine-tune** the downloaded model, so that with minimum training it can start understanding the tweets right away. ###Code #collapse !python -m spacy download en_core_web_md ###Output _____no_output_____ ###Markdown Training results ###Code #collapse %%bash # Command to pretrain a language model # Path to jsonl file with data # Using md model as the base # saving the model on my Drive folder # training for 50 iterations with seed set to 0 python -m spacy pretrain /content/drive/My\ Drive/Spacy/Pretrained/pretrain_texts.jsonl \ /usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5 \ /content/drive/My\ Drive/Spacy/Pretrained/ \ -i 50 -s 0 \ ###Output ℹ Not using GPU ⚠ Output directory is not empty It is better to use an empty directory or refer to a new output path, then the new directory will be created for you. ✔ Saved settings to config.json ⠙ Loading input texts... ✔ Loaded input texts ⠙ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠹ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠸ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠼ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠴ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠦ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠧ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠇ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠏ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠙ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠹ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠸ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠼ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠴ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠦ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠧ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠇ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠏ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠙ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠹ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠸ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠼ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠴ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠦ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠧ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠇ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠏ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠙ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠹ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠸ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠼ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠴ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠦ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠧ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠇ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠏ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠙ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠹ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠸ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠼ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠴ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠦ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠧ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠇ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠏ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠙ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠹ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠸ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠼ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠴ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠦ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠧ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠇ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠏ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠙ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠹ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠸ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠼ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠴ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠦ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠧ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠇ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠏ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠙ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠹ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠸ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠼ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠴ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠦ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠧ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠇ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠏ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠙ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠹ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠸ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠼ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠴ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠦ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠧ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠇ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠏ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠙ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠹ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠸ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠼ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠴ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠦ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠧ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠇ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠏ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠙ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠹ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠸ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠼ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠴ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠦ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠧ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠇ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠏ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠙ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠹ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠸ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠼ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠴ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠦ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠧ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠇ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠏ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠙ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠹ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠸ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠼ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠴ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠦ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠧ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠇ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠏ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠙ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠹ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠸ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠼ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠴ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠦ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠧ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠇ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠏ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠙ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠹ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠸ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠼ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠴ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠦ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠧ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠇ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠏ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠙ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠹ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠸ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠼ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠴ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠦ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠧ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠇ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠏ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠙ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠹ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠸ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠼ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠴ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠦ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠧ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠇ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠏ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠙ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠹ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠸ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠼ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠴ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠦ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠧ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠇ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠏ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠙ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠹ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠸ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠼ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠴ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠦ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠧ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠇ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠏ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠙ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠹ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠸ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠼ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠴ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠦ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠧ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠇ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠏ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠙ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠹ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠸ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠼ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠴ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠦ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠧ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠇ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠏ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ⠙ Loading model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'... ✔ Loaded model '/usr/local/lib/python3.6/dist-packages/en_core_web_md/en_core_web_md-2.2.5'  ============== Pre-training tok2vec layer - starting at epoch 0 ============== # # Words Total Loss Loss w/s 0 115177 114619.719 114619 7308 0 177090 174673.695 60053 8123 1 291933 282095.656 107421 8353 1 354180 337893.113 55797 8156 2 468951 432705.457 94812 8398 2 531270 479373.527 46668 8271 3 646206 557380.137 78006 8368 3 708360 595962.348 38582 8151 4 823108 662773.332 66810 8349 4 885450 696823.672 34050 8125 5 1000591 756743.684 59920 8254 5 1062540 787816.266 31072 7943 6 1177552 844198.828 56382 8380 6 1239630 874128.219 29929 7996 7 1354814 928725.262 54597 8291 7 1416720 957604.215 28878 8081 8 1531685 1010607.96 53003 8310 8 1593810 1039022.08 28414 8006 9 1708981 1091185.60 52163 8248 9 1770900 1118857.03 27671 8032 10 1885776 1169906.66 51049 8240 10 1947990 1197361.49 27454 8015 11 2062486 1247384.96 50023 8344 11 2125080 1274605.26 27220 8153 12 2239188 1323843.16 49237 8376 12 2302170 1350702.81 26859 7941 13 2416942 1399298.78 48595 8213 13 2479260 1425267.50 25968 7968 14 2594827 1473118.10 47850 8357 14 2656350 1498307.73 25189 7879 15 2770909 1544661.96 46354 7572 15 2833440 1569846.84 25184 7980 16 2948496 1615382.64 45535 8137 16 3010530 1639715.23 24332 7848 17 3125700 1684541.43 44826 8316 17 3187620 1708564.64 24023 7941 18 3302357 1753053.58 44488 8039 18 3364710 1776999.23 23945 7234 19 3480485 1821211.88 44212 8191 19 3541800 1844436.19 23224 7859 20 3657480 1887923.85 43487 8170 20 3718890 1911031.40 23107 7902 21 3833871 1954109.82 43078 8253 21 3895980 1977017.92 22908 7840 22 4011686 2019201.90 42183 8096 22 4073070 2041539.05 22337 7906 23 4188348 2083159.02 41619 8148 23 4250160 2105332.99 22173 7882 24 4364945 2146261.39 40928 8127 24 4427250 2168323.78 22062 8017 25 4542920 2209202.75 40878 8059 25 4604340 2230725.28 21522 7823 26 4719450 2271131.96 40406 8272 26 4781430 2292648.11 21516 7986 27 4896609 2332466.73 39818 8243 27 4958520 2353768.18 21301 8030 28 5073022 2392817.96 39049 8217 28 5135610 2414267.76 21449 8032 29 5250013 2452981.53 38713 8250 29 5312700 2474212.04 21230 8098 30 5427418 2512822.70 38610 8251 30 5489790 2533517.18 20694 8083 31 5604738 2572000.05 38482 8291 31 5666880 2592500.56 20500 8071 32 5781993 2630529.82 38029 8278 32 5843970 2651013.32 20483 8080 33 5959305 2689165.69 38152 8269 33 6021060 2709293.04 20127 8086 34 6136501 2746749.93 37456 8278 34 6198150 2766661.32 19911 8105 35 6313772 2803964.79 37303 8262 35 6375240 2823781.55 19816 8095 36 6490354 2860641.99 36860 8235 36 6552330 2880476.80 19834 8056 37 6667255 2917062.19 36585 8231 37 6729420 2936732.01 19669 8108 38 6844248 2972992.59 36260 8276 38 6906510 2992718.12 19725 8074 39 7021001 3028854.93 36136 8275 39 7083600 3048416.73 19561 8092 40 7199191 3084584.77 36168 8304 40 7260690 3103795.33 19210 8085 41 7375380 3139675.02 35879 8276 41 7437780 3158848.65 19173 8136 42 7552544 3194214.46 35365 8276 42 7614870 3213532.30 19317 8072 43 7730130 3248964.60 35432 7724 43 7791960 3267930.77 18966 8096 44 7906035 3302653.15 34722 8263 44 7969050 3321900.34 19247 8103 45 8083488 3356590.95 34690 8251 45 8146140 3375484.65 18893 8084 46 8261612 3410429.05 34944 8228 46 8323230 3429043.30 18614 8121 47 8437925 3463440.15 34396 8268 47 8500320 3482173.21 18733 7931 48 8615136 3516342.36 34169 8294 48 8677410 3534982.44 18640 8098 49 8791889 3568641.36 33658 8265 49 8854500 3587280.92 18639 8102 ⚠ Skipped 250 empty values ✔ Successfully finished pretrain ###Markdown I have chosen to use the default parameters however one might need to change them for their problem. We can see from the logs that the loss value in the last iteration is `18639`, but since the batch_size was `3000` our data must have splitted to `2` batches, (number of texts are `4622`) we should also take the previous log entry to account which is loss of `33658`, So the average of them would be `26148.5`, This number might be intimidating but the only way to check if it actually helps is to try to train a model with it. If it doesn't then we can resume the training from the model saved on the last epoch. We keep only the last model from the pretraining. ###Code #hide !mv /content/drive/My\ Drive/Spacy/Pretrained/model49.bin /content/drive/My\ Drive/Spacy/ !mv /content/drive/My\ Drive/Spacy/Pretrained/*.json* /content/drive/My\ Drive/Spacy/ #hide !rm /content/drive/My\ Drive/Spacy/Pretrained/*.bin #hide !mkdir /content/drive/My\ Drive/Spacy/Pretrained/fifty_iter !mv /content/drive/My\ Drive/Spacy/model49.bin /content/drive/My\ Drive/Spacy/Pretrained/fifty_iter !mv /content/drive/My\ Drive/Spacy/*g.json* /content/drive/My\ Drive/Spacy/Pretrained/fifty_iter ###Output _____no_output_____ ###Markdown Let's train a text classifier with `Spacy` Text classifier with Spacy Now that we have a pretrained model, We now need to prepare data for training the text classifier. Let's have a look at the [data format](https://spacy.io/usage/trainingtraining-simple-style) that Spacy expects the data to be in. Data Generation ```json{ "entities": [(0, 4, "ORG")], "heads": [1, 1, 1, 5, 5, 2, 7, 5], "deps": ["nsubj", "ROOT", "prt", "quantmod", "compound", "pobj", "det", "npadvmod"], "tags": ["PROPN", "VERB", "ADP", "SYM", "NUM", "NUM", "DET", "NOUN"], "cats": {"BUSINESS": 1.0},}```This format works for training via code, as given in the examples above, There is also another format mentioned [here](https://spacy.io/api/annotationjson-input) `cats` is the only part we need to worry about, this must be where they look for categories/classes. We have three classes in our dataset - `0` for `Neutral`- `1` for `Positive`- `-1` for `Negative`and they are **mutually-exclusive** (There can be only one label for a sentence) We also need to split the training data we have to training and evaluation sets so that we can see how well our model has learnt the problem. Let's try to programmatically generate the training data from pandas dataframe ###Code label_map = {1:'POSITIVE', -1:'NEGATIVE', 0:'NEUTRAL'} ###Output _____no_output_____ ###Markdown We need list of tuples of text and the annotation details in a dictionary as mentioned above. ###Code # Adapted from sample data in https://spacy.io/usage/training#training-simple-style train_json = [ # Get the text from dataframe row (tweet.original_text, {'cats':{ label_map[tweet.sentiment_class]:1.0 } }) for tweet in train_data[['original_text', 'sentiment_class']].itertuples(index=False, name='Tweet') ] train_json[0] ###Output _____no_output_____ ###Markdown Now we will split the training data ###Code from sklearn.model_selection import train_test_split # Stratified split with labels train_split, eval_split = train_test_split(train_json, test_size=0.2, stratify=train_data['sentiment_class']) len(train_split), len(eval_split) ###Output _____no_output_____ ###Markdown We should save them as `json` files to give them as input to the command line `train` utility in spacy. ###Code import json with Path(DRIVE_PATH/'train_clas.json').open('w') as f: json.dump(train_split, f) with Path(DRIVE_PATH/'eval_clas.json').open('w') as f: json.dump(eval_split, f) ###Output _____no_output_____ ###Markdown Validate data input for spacy Now should if we have enough data to train the model with `train` spacy command in CLI, for that I will use Spacy's `debug-data` command in CLI. ###Code !python -m spacy debug-data -h %%bash (python -m spacy debug-data en \ /content/drive/My\ Drive/Spacy/Pretrained/train_clas.json \ /content/drive/My\ Drive/Spacy/Pretrained/eval_clas.json \ -p 'textcat' \ ) ###Output  =========================== Data format validation =========================== ⠙ Loading corpus... ⠹ Loading corpus... ⠸ Loading corpus... ⠼ Loading corpus... ⠴ Loading corpus... ⠦ Loading corpus... ⠧ Loading corpus... ⠇ Loading corpus... ⠏ Loading corpus... ⠙ Loading corpus... ⠹ Loading corpus... ⠸ Loading corpus... ⠼ Loading corpus... ⠴ Loading corpus... ⠦ Loading corpus... ⠧ Loading corpus... ⠇ Loading corpus... ✔ Corpus is loadable  =============================== Training stats =============================== Training pipeline: textcat Starting with blank model 'en' 0 training docs 0 evaluation docs ✘ No evaluation docs ✔ No overlap between training and evaluation data ✘ Low number of examples to train from a blank model (0)  ============================== Vocab & Vectors ============================== ℹ 0 total words in the data (0 unique) ℹ No word vectors present in the model  ============================ Text Classification ============================ ℹ Text Classification: 0 new label(s), 0 existing label(s) ℹ The train data contains only instances with mutually-exclusive classes.  ================================== Summary ================================== ✔ 2 checks passed ✘ 2 errors ###Markdown Data Generation (again) There must be something I missed now, I asked a question on [stackoverflow](https://stackoverflow.com/q/62003962/7752347) regarding this, turns out we need to get `.jsonl` format(again) and use the script provided in the [repo](https://github.com/explosion/spaCy/tree/master/examples/training/textcat_example_data) to convert to the required json format for training, now I need to change the data generation a little bit to do that. ###Code train_jsonl = [ # Get the text from dataframe row {'text': tweet.original_text, 'cats': {v: 1.0 if tweet.sentiment_class == k else 0.0 for k, v in label_map.items()}, 'meta':{"id": str(tweet.Index)} } for tweet in train_data[['original_text', 'sentiment_class']].itertuples(index=True, name='Tweet') ] train_jsonl[0] ###Output _____no_output_____ ###Markdown So instead of a list of `tuples` now I have a list of `dictionaries`. We need to split again to have an evaluation set ###Code # Stratified split with labels train_split, eval_split = train_test_split(train_jsonl, test_size=0.2, stratify=train_data['sentiment_class']) len(train_split), len(eval_split) #hide srsly.write_jsonl(DRIVE_PATH.parent/'train_texts.jsonl', train_split) srsly.write_jsonl(DRIVE_PATH.parent/'eval_texts.jsonl', eval_split) ###Output _____no_output_____ ###Markdown We still need to convert the `jsonl` to the required `json` format, now for that I will use the script named `textcatjsonl_to_trainjson.py` in this [repo](https://github.com/explosion/spaCy/tree/master/examples/training/textcat_example_data). Let's download the script from the repo. ###Code !wget -O script.py https://raw.githubusercontent.com/explosion/spaCy/master/examples/training/textcat_example_data/textcatjsonl_to_trainjson.py %%bash python script.py -m en /content/drive/My\ Drive/Spacy/train_texts.jsonl /content/drive/My\ Drive/Spacy python script.py -m en /content/drive/My\ Drive/Spacy/eval_texts.jsonl /content/drive/My\ Drive/Spacy ###Output _____no_output_____ ###Markdown Let's try to debug again Validate (again) ###Code #hide_input %%bash (python -m spacy debug-data en \ /content/drive/My\ Drive/Spacy/train_texts.json \ /content/drive/My\ Drive/Spacy/eval_texts.json \ -p 'textcat' \ ) ###Output  =========================== Data format validation =========================== ⠙ Loading corpus... ⠹ Loading corpus... ⠸ Loading corpus... ⠼ Loading corpus... ⠴ Loading corpus... ⠦ Loading corpus... ⠧ Loading corpus... ⠇ Loading corpus... ⠏ Loading corpus... ⠙ Loading corpus... ⠹ Loading corpus... ⠸ Loading corpus... ⠼ Loading corpus... ⠴ Loading corpus... ⠦ Loading corpus... ⠧ Loading corpus... ⠇ Loading corpus... ⠏ Loading corpus... ⠙ Loading corpus... ⠹ Loading corpus... ⠸ Loading corpus... ⠼ Loading corpus... ⠴ Loading corpus... ⠦ Loading corpus... ⠧ Loading corpus... ⠇ Loading corpus... ⠏ Loading corpus... ⠙ Loading corpus... ⠹ Loading corpus... ⠸ Loading corpus... ⠼ Loading corpus... ⠴ Loading corpus... ⠦ Loading corpus... ⠧ Loading corpus... ⠇ Loading corpus... ⠏ Loading corpus... ⠙ Loading corpus... ⠹ Loading corpus... ⠸ Loading corpus... ✔ Corpus is loadable  =============================== Training stats =============================== Training pipeline: textcat Starting with blank model 'en' 2584 training docs 647 evaluation docs ⚠ 5 training examples also in evaluation data  ============================== Vocab & Vectors ============================== ℹ 98859 total words in the data (10688 unique) ℹ No word vectors present in the model  ============================ Text Classification ============================ ℹ Text Classification: 3 new label(s), 0 existing label(s) ℹ The train data contains only instances with mutually-exclusive classes.  ================================== Summary ================================== ✔ 1 check passed ⚠ 1 warning ###Markdown It worked !, Thanks to the answerer of this [question](https://stackoverflow.com/q/62003962/7752347), now we know that our data format is correct. Turns out there is another command to `convert` our files to spacy's JSON format which is mentioned [here](https://spacy.io/api/cliconvert).The output is pointing out that the evaluation set has some **data leakage**. I will try to remove that now. ###Code new_eval = [annot for annot in eval_split if all([annot['text'] != t['text'] for t in train_split])] len(new_eval), len(eval_split) ###Output _____no_output_____ ###Markdown We thought there were 5 samples leaking into the training data, it is six here, anyway let's try to validate the data again. ###Code #hide srsly.write_jsonl(DRIVE_PATH.parent/'eval_texts.jsonl', new_eval) #hide !python script.py -m en /content/drive/My\ Drive/Spacy/eval_texts.jsonl /content/drive/My\ Drive/Spacy %%bash (python -m spacy debug-data en \ /content/drive/My\ Drive/Spacy/train_texts.json \ /content/drive/My\ Drive/Spacy/eval_texts.json \ -p 'textcat' \ ) ###Output  =========================== Data format validation =========================== ⠙ Loading corpus... ⠹ Loading corpus... ⠸ Loading corpus... ⠼ Loading corpus... ⠴ Loading corpus... ⠦ Loading corpus... ⠧ Loading corpus... ⠇ Loading corpus... ⠏ Loading corpus... ⠙ Loading corpus... ⠹ Loading corpus... ⠸ Loading corpus... ⠼ Loading corpus... ⠴ Loading corpus... ⠦ Loading corpus... ⠧ Loading corpus... ⠇ Loading corpus... ⠏ Loading corpus... ⠙ Loading corpus... ⠹ Loading corpus... ⠸ Loading corpus... ⠼ Loading corpus... ⠴ Loading corpus... ⠦ Loading corpus... ⠧ Loading corpus... ⠇ Loading corpus... ⠏ Loading corpus... ⠙ Loading corpus... ⠹ Loading corpus... ⠸ Loading corpus... ⠼ Loading corpus... ⠴ Loading corpus... ⠦ Loading corpus... ⠧ Loading corpus... ⠇ Loading corpus... ⠏ Loading corpus... ⠙ Loading corpus... ⠹ Loading corpus... ⠸ Loading corpus... ⠼ Loading corpus... ✔ Corpus is loadable  =============================== Training stats =============================== Training pipeline: textcat Starting with blank model 'en' 2584 training docs 641 evaluation docs ✔ No overlap between training and evaluation data  ============================== Vocab & Vectors ============================== ℹ 98859 total words in the data (10688 unique) ℹ No word vectors present in the model  ============================ Text Classification ============================ ℹ Text Classification: 3 new label(s), 0 existing label(s) ℹ The train data contains only instances with mutually-exclusive classes.  ================================== Summary ================================== ✔ 2 checks passed ###Markdown We are all set to start training now! Classifier Training I have made the command to train in CLI, Please refer the comments for details in the order of the arguments given here ###Code %%bash ## Arguement info # Language of text in which the Model is going to be trained # Path to store model # Training data json path # Evaluation data json path # Pipeline components that we are going to train # Number of iterations in total # Nummber of iterations to wait before improvement in eval accuracy # Pretrained model to start with # version # Augmentation for data(2 params) # Model Architecture for text classifier (cnn + bow) (python -m spacy train \ en \ -b en_core_web_sm \ /content/drive/My\ Drive/Spacy/Classifier \ /content/drive/My\ Drive/Spacy/train_texts.json \ /content/drive/My\ Drive/Spacy/train_texts.json \ -p "textcat" \ -n 100 \ -ne 10 \ -t2v /content/drive/My\ Drive/Spacy/Pretrained/fifty_iter/model49.bin \ -V 0.1 \ -nl 0.1 \ -ovl 0.1) ###Output Training pipeline: ['textcat'] Starting with base model 'en_core_web_sm' Adding component to base model 'textcat' Counting training words (limit=0) Loaded pretrained tok2vec for: [] Textcat evaluation score: F1-score macro-averaged across the labels 'POSITIVE, NEGATIVE, NEUTRAL' Itn Textcat Loss Textcat Token % CPU WPS --- ------------ ------- ------- ------- 1 26.738 39.853 100.000 177034 2 5.179 65.120 100.000 157933 3 1.483 76.615 100.000 178008 4 0.686 83.266 100.000 177567 5 0.288 86.236 100.000 169033 6 0.151 88.381 100.000 176679 7 0.090 90.099 100.000 166485 8 0.057 91.000 100.000 171279 9 0.135 92.472 100.000 175907 10 0.028 93.237 100.000 171838 11 0.023 94.147 100.000 175174 12 0.022 94.729 100.000 155840 13 0.021 95.248 100.000 161975 14 0.021 95.485 100.000 168029 15 0.019 95.980 100.000 161440 16 0.018 96.226 100.000 167550 17 0.019 96.713 100.000 172607 18 0.017 96.849 100.000 169682 19 0.017 97.026 100.000 167330 20 0.015 97.299 100.000 173145 21 0.016 97.405 100.000 173020 22 0.015 97.526 100.000 165310 23 0.014 97.704 100.000 165994 24 0.013 97.865 100.000 176089 25 0.013 98.106 100.000 172153 26 0.013 98.201 100.000 172878 27 0.013 98.241 100.000 175909 28 0.012 98.320 100.000 170099 29 0.013 98.400 100.000 175274 30 0.012 98.481 100.000 170135 31 0.012 98.521 100.000 164726 32 0.011 98.536 100.000 171204 33 0.011 98.536 100.000 163467 34 0.011 98.576 100.000 150728 35 0.011 98.696 100.000 172780 36 0.010 98.735 100.000 163459 37 0.010 98.695 100.000 162075 38 0.010 98.750 100.000 165827 39 0.010 98.790 100.000 165852 40 0.010 98.830 100.000 174490 41 0.009 98.870 100.000 165485 42 0.010 98.990 100.000 164896 43 0.009 99.045 100.000 172563 44 0.008 99.045 100.000 169908 45 0.009 99.005 100.000 152600 46 0.008 99.084 100.000 166329 47 0.009 99.084 100.000 173841 48 0.008 99.164 100.000 163433 49 0.008 99.203 100.000 162648 50 0.008 99.258 100.000 177108 51 0.009 99.298 100.000 173468 52 0.008 99.298 100.000 169904 53 0.008 99.298 100.000 171979 54 0.008 99.298 100.000 166437 55 0.008 99.298 100.000 170520 56 0.007 99.337 100.000 172712 57 0.007 99.337 100.000 174966 58 0.007 99.392 100.000 173173 59 0.008 99.392 100.000 173910 60 0.007 99.392 100.000 169447 61 0.007 99.431 100.000 161931 62 0.007 99.471 100.000 106123 63 0.007 99.471 100.000 177625 64 0.007 99.511 100.000 172946 65 0.007 99.511 100.000 173579 66 0.007 99.511 100.000 172204 67 0.007 99.550 100.000 172994 68 0.006 99.550 100.000 174403 69 0.007 99.590 100.000 173900 70 0.006 99.630 100.000 169824 71 0.007 99.630 100.000 171172 72 0.006 99.669 100.000 172633 73 0.006 99.669 100.000 159052 74 0.007 99.669 100.000 174377 75 0.007 99.669 100.000 163376 76 0.006 99.669 100.000 174366 77 0.007 99.669 100.000 175517 78 0.007 99.669 100.000 175583 79 0.006 99.669 100.000 174024 80 0.006 99.669 100.000 174381 81 0.006 99.669 100.000 177120 82 0.006 99.708 100.000 175032 83 0.006 99.708 100.000 173298 84 0.007 99.708 100.000 171622 85 0.006 99.709 100.000 163705 86 0.006 99.709 100.000 175330 87 0.006 99.709 100.000 178355 88 0.006 99.709 100.000 170868 89 0.006 99.709 100.000 164401 90 0.005 99.709 100.000 173884 91 0.006 99.709 100.000 159754 92 0.006 99.709 100.000 177335 93 0.006 99.709 100.000 169868 94 0.006 99.709 100.000 168164 95 0.005 99.709 100.000 151894 96 0.006 99.709 100.000 171580 97 0.005 99.709 100.000 169471 98 0.005 99.724 100.000 156458 99 0.005 99.724 100.000 168167 100 0.006 99.724 100.000 172201 ✔ Saved model to output directory /content/drive/My Drive/Spacy/Classifier/model-final ⠙ Creating best model... ⠹ Creating best model... ⠸ Creating best model... ⠼ Creating best model... ⠴ Creating best model... ✔ Created best model /content/drive/My Drive/Spacy/Classifier/model-best ###Markdown I also tried to train without the pretrained model (ie)`en_core_web_sm`, The logs for that are here below. (Uncollapse to view), the results are not very different, the evaluation metrics are off the roof. We need to predict the test data and try to submit to the competition for a better picture of the model. ###Code #collapse %%bash ## Arguement info # Language of text in which the Model is going to be trained # Path to store model # Training data json path # Evaluation data json path # Pipeline components that we are going to train # Number of iterations in total # Nummber of iterations to wait before improvement in eval accuracy # Pretrained model to start with # version # Augmentation for data(2 params) # Model Architecture for text classifier (cnn + bow) (python -m spacy train \ en \ /content/drive/My\ Drive/Spacy/Classifier_without_using_websm \ /content/drive/My\ Drive/Spacy/train_texts.json \ /content/drive/My\ Drive/Spacy/train_texts.json \ -p "textcat" \ -n 100 \ -ne 10 \ -t2v /content/drive/My\ Drive/Spacy/Pretrained/fifty_iter/model49.bin \ -V 0.1 \ -nl 0.1 \ -ovl 0.1) ###Output ✔ Created output directory: /content/drive/My Drive/Spacy/Classifier_without_using_websm Training pipeline: ['textcat'] Starting with blank model 'en' Counting training words (limit=0) Loaded pretrained tok2vec for: [] Textcat evaluation score: F1-score macro-averaged across the labels 'POSITIVE, NEGATIVE, NEUTRAL' Itn Textcat Loss Textcat Token % CPU WPS --- ------------ ------- ------- ------- 1 26.755 40.980 100.000 166278 2 5.293 65.846 100.000 172083 3 1.506 76.992 100.000 175595 4 0.695 83.314 100.000 173543 5 0.293 86.284 100.000 172609 6 0.156 88.784 100.000 171486 7 0.091 90.136 100.000 161118 8 0.056 91.761 100.000 156752 9 0.112 92.442 100.000 167948 10 0.028 93.329 100.000 162446 11 0.024 94.144 100.000 165753 12 0.022 95.206 100.000 168336 13 0.021 95.769 100.000 161408 14 0.020 96.150 100.000 162562 15 0.019 96.474 100.000 163309 16 0.018 96.775 100.000 168399 17 0.018 97.140 100.000 169412 18 0.017 97.357 100.000 171364 19 0.017 97.503 100.000 172552 20 0.016 97.584 100.000 167923 21 0.016 97.678 100.000 168228 22 0.015 97.934 100.000 158830 23 0.014 98.055 100.000 170587 24 0.013 98.216 100.000 161772 25 0.014 98.256 100.000 160948 26 0.013 98.296 100.000 163401 27 0.013 98.391 100.000 168392 28 0.012 98.351 100.000 162147 29 0.012 98.391 100.000 171460 30 0.012 98.511 100.000 171279 31 0.012 98.511 100.000 161304 32 0.011 98.511 100.000 171576 33 0.011 98.511 100.000 171248 34 0.011 98.591 100.000 166902 35 0.010 98.710 100.000 164750 36 0.011 98.830 100.000 164097 37 0.011 98.790 100.000 170317 38 0.010 98.790 100.000 163521 39 0.010 98.830 100.000 162378 40 0.009 98.964 100.000 164281 41 0.009 98.964 100.000 173645 42 0.011 99.004 100.000 165681 43 0.009 99.044 100.000 165916 44 0.009 99.044 100.000 168503 45 0.008 99.044 100.000 166608 46 0.008 99.123 100.000 170394 47 0.009 99.084 100.000 171932 48 0.008 99.124 100.000 172888 49 0.009 99.084 100.000 169469 50 0.009 99.084 100.000 167170 51 0.008 99.084 100.000 169762 52 0.008 99.124 100.000 166178 53 0.008 99.124 100.000 161415 54 0.008 99.164 100.000 164241 55 0.008 99.164 100.000 172629 56 0.008 99.164 100.000 164923 57 0.008 99.243 100.000 160153 58 0.007 99.243 100.000 171699 59 0.007 99.283 100.000 165604 60 0.008 99.323 100.000 161672 61 0.007 99.362 100.000 157016 62 0.007 99.417 100.000 171005 63 0.007 99.417 100.000 168709 64 0.007 99.417 100.000 170886 65 0.007 99.417 100.000 164144 66 0.007 99.417 100.000 154789 67 0.007 99.417 100.000 162214 68 0.006 99.457 100.000 164467 69 0.006 99.457 100.000 169052 70 0.006 99.496 100.000 168125 71 0.007 99.496 100.000 164085 72 0.006 99.575 100.000 163078 73 0.006 99.575 100.000 162955 74 0.006 99.575 100.000 166206 75 0.007 99.575 100.000 164477 76 0.006 99.575 100.000 169814 77 0.006 99.575 100.000 162547 78 0.006 99.575 100.000 168980 79 0.007 99.575 100.000 172534 80 0.006 99.575 100.000 161797 81 0.007 99.575 100.000 162510 82 0.006 99.575 100.000 172787 83 0.005 99.535 100.000 159187 84 0.006 99.535 100.000 168200 85 0.005 99.614 100.000 167757 86 0.006 99.614 100.000 158842 87 0.006 99.654 100.000 166849 88 0.005 99.654 100.000 162507 89 0.006 99.654 100.000 167156 90 0.005 99.654 100.000 97872 91 0.006 99.654 100.000 162397 92 0.006 99.708 100.000 168693 93 0.005 99.708 100.000 167645 94 0.005 99.708 100.000 163485 95 0.006 99.708 100.000 171732 96 0.005 99.708 100.000 165686 97 0.005 99.708 100.000 167604 98 0.005 99.708 100.000 166435 99 0.005 99.708 100.000 161645 100 0.005 99.708 100.000 171467 ✔ Saved model to output directory /content/drive/My Drive/Spacy/Classifier_without_using_websm/model-final ⠙ Creating best model... ⠹ Creating best model... ⠸ Creating best model... ✔ Created best model /content/drive/My Drive/Spacy/Classifier_without_using_websm/model-best ###Markdown Prediction on test data ###Code test_data = pd.read_csv(DATA_PATH/'test.csv', index_col=0) test_data.head() ###Output _____no_output_____ ###Markdown Clean test data We will clean the test data of links with regex as well. ###Code test_data['original_text'].replace( # Regex pattern to match : the text to replace with {'(https?:\/\/.*|pic.*)[\r\n]*' : ''}, regex=True, inplace=True) test_data.shape list_of_test_texts = test_data['original_text'].tolist() ###Output _____no_output_____ ###Markdown Let's load the Spacy model from our training ###Code import spacy textcat_mod = spacy.load(DRIVE_PATH.parent/'Classifier/model-best') ###Output _____no_output_____ ###Markdown I will try to fasten the prediction by using multithreading as mentioned [here](https://explosion.ai/blog/multithreading-with-cython) ###Code d = textcat_mod(list_of_test_texts[0]) d.cats max(d.cats, key=lambda x: d.cats[x]) # to facilitate mapping the predictions label_map = {'POSITIVE':1, 'NEGATIVE':-1, 'NEUTRAL':0} # to gather predictions preds = [] for doc in textcat_mod.pipe(list_of_test_texts, n_threads=4, batch_size=100): pred_cls = max(doc.cats, key=lambda x: doc.cats[x]) preds.append(label_map[pred_cls]) len(preds), len(list_of_test_texts) ###Output _____no_output_____ ###Markdown Let's form the submission ###Code sub_df = pd.DataFrame( preds, index=test_data.index, columns=['sentiment_class'] ) sub_df.shape sub_df.head() sub_df.to_csv(DRIVE_PATH.parent/'submission.csv') ###Output _____no_output_____ ###Markdown The submitted predictions scored a mere `39/100` in weighted f1-score, that's disappointing. -_- Let's analyze the predictions Prediction distribution ###Code sub_df['sentiment_class'].value_counts().plot(kind='bar') sub_df['sentiment_class'].value_counts() ###Output _____no_output_____ ###Markdown This looks very similar to the train data ###Code train_data['sentiment_class'].value_counts() ###Output _____no_output_____
examples/ch07/snippets_ipynb/07_05selfcheck.ipynb
###Markdown ![Self Check Exercises check mark image](files/art/check.png) 7.5 Self Check **2. _(IPython Session)_** Use NumPy function `arange` to create an `array` of 20 even integers from 2 through 40, then reshape the result into a 4-by-5 `array`.**Answer:** ###Code import numpy as np np.arange(2, 41, 2).reshape(4, 5) ########################################################################## # (C) Copyright 2019 by Deitel & Associates, Inc. and # # Pearson Education, Inc. All Rights Reserved. # # # # DISCLAIMER: The authors and publisher of this book have used their # # best efforts in preparing the book. These efforts include the # # development, research, and testing of the theories and programs # # to determine their effectiveness. The authors and publisher make # # no warranty of any kind, expressed or implied, with regard to these # # programs or to the documentation contained in these books. The authors # # and publisher shall not be liable in any event for incidental or # # consequential damages in connection with, or arising out of, the # # furnishing, performance, or use of these programs. # ########################################################################## ###Output _____no_output_____
examples/buffering_observations.ipynb
###Markdown Buffering Observations ###Code import numpy as np import matplotlib.pyplot as plt from river import naive_bayes from river import stream from river.metrics import Accuracy, Rolling from river.drift import ADWIN from streamselect.concept_representations import ErrorRateRepresentation from streamselect.states import State from streamselect.repository import Repository, AbsoluteValueComparer from streamselect.adaptive_learning import BaseAdaptiveLearner, BaseBufferedAdaptiveLearner, get_constant_max_buffer_scheduler, get_increasing_buffer_scheduler ###Output _____no_output_____ ###Markdown Let us consider a data set featuring concept drift. ###Code rng = np.random.default_rng(seed=1) X = rng.normal(0.5, 0.5, 10000) T = np.linspace(0, 10000, 10000) Y = X > (0.0) concept_drift_t = 5000 Y[concept_drift_t:] = X[concept_drift_t:] > (1.0) plt.scatter(T, X, c=Y) plt.xlabel("Time") plt.ylabel("x") ###Output _____no_output_____ ###Markdown In this univariate data set, the task is to predict a class y from the labels {0, 1} determined by some threshold on x.Halfway through the dataset, the threshold changes, so observations which would be labelled as 1 are now labelled as 1.This is an example of concept drift.In the example shown in the `detecting_concept_drift.ipynb' file, we adapt to this concept drift using an adaptive learning system, which detects the change and constructs a new classifier to handle the new distribution of data. ###Code # Set some parameters for out system window_size = 25 update_period = 1 classifier: BaseAdaptiveLearner = BaseAdaptiveLearner( classifier_constructor=naive_bayes.GaussianNB, representation_constructor=ErrorRateRepresentation, representation_comparer=AbsoluteValueComparer(), drift_detector_constructor=ADWIN, representation_window_size=window_size, representation_update_period=update_period, drift_detection_mode="lower" ) accuracy = [] acc_metric = Rolling(metric=Accuracy(), window_size=window_size) observations = [] for t, (x, y) in enumerate(stream.iter_array(X.reshape(-1, 1), Y)): p = classifier.predict_one(x, timestep=t) classifier.learn_one(x, y, timestep=t) acc_metric.update(y, p) accuracy.append(acc_metric.get()) observations.append(classifier.performance_monitor.last_observation) print(f"Overall average accuracy was: {np.mean(accuracy)}") plt.plot(T, accuracy, label="Rolling Accuracy") plt.ylim([0, 1]) plt.legend() plt.show() ###Output Overall average accuracy was: 0.9722441416977828 ###Markdown However, lets inspect which observations were classified by each state ###Code state_ids = list(classifier.repository.states) for s_id in state_ids: ob_X = [ob.x[list(ob.x)[0]] for ob in observations if ob is not None and ob.active_state_id == s_id] ob_T = [ob.seen_at for ob in observations if ob is not None and ob.active_state_id == s_id] ob_y = [ob.y for ob in observations if ob is not None and ob.active_state_id == s_id] plt.scatter(ob_T, ob_X, c=ob_y) plt.xlim([0, 10000]) plt.xlabel("Time") plt.ylabel("x") plt.title(f"Classified by state {s_id}") plt.show() ###Output _____no_output_____ ###Markdown We observe that while the adaptive learning system was able to roughly split the data into the two concepts, however there is a short period where state 0 is classifying and learning from observations drawn from the following concept. This overlap period is inevitable, because there will always be some delay in detecting concept drift. This period harms performance in two ways:- We are classifying observations with a state representing the wrong concept.- We are training a state with observations from a different concept.While we can't avoid classifying with the wrong state, it is out best guess, we *can* avoid training state on these wrong observations by introducing some delay.The basic idea is that we add observations to a *buffer*, which releases observations after some delay. If we do not detect a concept drift while an observation is buffered, we can assume it correctly represents the current state and we can learn from it.We can use the BaseBufferedAdaptiveLearner to utilize a buffer. First, we show that it behaves identically with a buffer which delays observations by 0 timeteps, the default. ###Code # Set some parameters for out system window_size = 25 update_period = 1 classifier: BaseAdaptiveLearner = BaseBufferedAdaptiveLearner( classifier_constructor=naive_bayes.GaussianNB, representation_constructor=ErrorRateRepresentation, representation_comparer=AbsoluteValueComparer(), drift_detector_constructor=ADWIN, representation_window_size=window_size, representation_update_period=update_period, drift_detection_mode="lower", buffer_timeout_max=0.0, ) accuracy = [] acc_metric = Rolling(metric=Accuracy(), window_size=window_size) observations_buffered = [] for t, (x, y) in enumerate(stream.iter_array(X.reshape(-1, 1), Y)): p = classifier.predict_one(x, timestep=t) classifier.learn_one(x, y, timestep=t) acc_metric.update(y, p) accuracy.append(acc_metric.get()) observations_buffered.append(classifier.performance_monitor.last_observation) # Since we haven't set a buffer_timeout, the buffered system should behave identically. assert observations_buffered[t].active_state_id == observations[t].active_state_id assert observations_buffered[t].predictions[observations_buffered[t].active_state_id] == observations[t].predictions[observations[t].active_state_id] print(f"Overall average accuracy was: {np.mean(accuracy)}") plt.plot(T, accuracy, label="Rolling Accuracy") plt.ylim([0, 1]) plt.legend() plt.show() ###Output Overall average accuracy was: 0.9722441416977828 ###Markdown Now we test with a longer buffer_timeout ###Code # Set some parameters for out system window_size = 25 update_period = 1 # We will test setting the buffer_timeout to 25, i.e., delay learning from observations for 25 # timesteps. buffer_timeout_max = 500 classifier: BaseAdaptiveLearner = BaseBufferedAdaptiveLearner( classifier_constructor=naive_bayes.GaussianNB, representation_constructor=ErrorRateRepresentation, representation_comparer=AbsoluteValueComparer(), drift_detector_constructor=ADWIN, representation_window_size=window_size, representation_update_period=update_period, drift_detection_mode="lower", buffer_timeout_max=buffer_timeout_max, buffer_timeout_scheduler=get_constant_max_buffer_scheduler() ) accuracy = [] acc_metric = Rolling(metric=Accuracy(), window_size=window_size) observations_buffered = [] for t, (x, y) in enumerate(stream.iter_array(X.reshape(-1, 1), Y)): p = classifier.predict_one(x, timestep=t) classifier.learn_one(x, y, timestep=t) acc_metric.update(y, p) accuracy.append(acc_metric.get()) observations_buffered.append(classifier.performance_monitor.last_observation) print(f"Overall average accuracy was: {np.mean(accuracy)}") plt.plot(T, accuracy, label="Rolling Accuracy") plt.ylim([0, 1]) plt.legend() plt.show() state_ids = list(classifier.repository.states) for s_id in state_ids: ob_X = [ob.x[list(ob.x)[0]] for ob in observations_buffered if ob is not None and ob.active_state_id == s_id] ob_T = [ob.seen_at for ob in observations_buffered if ob is not None and ob.active_state_id == s_id] ob_y = [ob.y for ob in observations_buffered if ob is not None and ob.active_state_id == s_id] plt.scatter(ob_T, ob_X, c=ob_y) plt.xlim([0, 10000]) plt.xlabel("Time") plt.ylabel("x") plt.title(f"Classified by state {s_id}") plt.show() ###Output _____no_output_____ ###Markdown The buffer seems correctly only trained states on observations from a single concept, but it also seems to have made performance worse! Why is this?Well, the buffer delays training on new observations by buffer_timeout. This means with a buffer_timeout of 500 observations, we cannot even start learning until 500 observations have been predicted.There seems to be some risk/reward tradeoff. The risk of training on a new observation is that we may introduce errors by training on an observation from a different concept. The reward is that we may improve performance be training on an observation from the same concept.How can we incorporate this into our buffer_timeout?We note that at the start of a classifiers life, it's possible reward is very high as a new observation may improve accuracy substantially. As it trains, the possible reward gets lower as the classifier converges to some maximum performance. On the otherhand, risk is relatively stationary (we could look at other ways of measuring risk on a per observation level, this is future work as per my thesis!). This means we could schedule buffer_timeout to account for this changing tradeoff by increasing over time.In the example below, we schedule buffer_timeout to be the amount of training weight seen by the active state, up to some maximum. ###Code # Set some parameters for out system window_size = 25 update_period = 1 # We will test setting the maximum buffer_timout buffer_timeout_max = 500 classifier: BaseAdaptiveLearner = BaseBufferedAdaptiveLearner( classifier_constructor=naive_bayes.GaussianNB, representation_constructor=ErrorRateRepresentation, representation_comparer=AbsoluteValueComparer(), drift_detector_constructor=ADWIN, representation_window_size=window_size, representation_update_period=update_period, drift_detection_mode="lower", buffer_timeout_max=buffer_timeout_max, buffer_timeout_scheduler=get_increasing_buffer_scheduler(1) ) accuracy = [] acc_metric = Rolling(metric=Accuracy(), window_size=window_size) observations_buffered = [] for t, (x, y) in enumerate(stream.iter_array(X.reshape(-1, 1), Y)): # The buffer scheduling using get_increasing_buffer_scheduler is equivalent to calling: # classifier.buffer_timeout = min(classifier.get_active_state().seen_weight, buffer_timeout_max) p = classifier.predict_one(x, timestep=t) classifier.learn_one(x, y, timestep=t) acc_metric.update(y, p) accuracy.append(acc_metric.get()) observations_buffered.append(classifier.performance_monitor.last_observation) print(f"Overall average accuracy was: {np.mean(accuracy)}") plt.plot(T, accuracy, label="Rolling Accuracy") plt.ylim([0, 1]) plt.legend() plt.show() ###Output Overall average accuracy was: 0.9741441416977826 ###Markdown Accuracy has now increased! ###Code state_ids = list(classifier.repository.states) for s_id in state_ids: ob_X = [ob.x[list(ob.x)[0]] for ob in observations_buffered if ob is not None and ob.active_state_id == s_id] ob_T = [ob.seen_at for ob in observations_buffered if ob is not None and ob.active_state_id == s_id] ob_y = [ob.y for ob in observations_buffered if ob is not None and ob.active_state_id == s_id] plt.scatter(ob_T, ob_X, c=ob_y) plt.xlim([0, 10000]) plt.xlabel("Time") plt.ylabel("x") plt.title(f"Classified by state {s_id}") plt.show() ###Output _____no_output_____
09_multi_omics/09_a_genes_to_peaks from class.ipynb
###Markdown ChIP-Seq and RNA-SeqWe now want to combine our ChIP-Seq and RNA-Seq data, using pandas. ###Code import pandas as pd ###Output _____no_output_____ ###Markdown Gene annotationsWe can read our GFF file of gene annotations. It's just a text table.There are no headers, though. The columns of the file are:```['seqid', 'source', 'type', 'start', 'end', 'score', 'strand', 'phase', 'attributes']``` ###Code genes = pd.read_csv("~/shared/MCB280A_data/S288C_R64-3-1/saccharomyces_cerevisiae_R64-3-1_20210421.gff", delimiter='\t', header=None, names=['seqid', 'source', 'type', 'start', 'end', 'score', 'strand', 'phase', 'attributes']) genes.head() ###Output _____no_output_____ ###Markdown There are many different "types" of entries in the file. We can use the `value_counts()` method on the `type` column to see all of them. ###Code genes['type'].value_counts() ###Output _____no_output_____ ###Markdown We only want the `"gene"` type of entry and so we'll pick out just these rows out of the data frame. ###Code genes = genes[genes['type'] == "gene"] genes.head() ###Output _____no_output_____ ###Markdown The systematic name of the gene is buried inside the `attributes` column. This column has a bunch of labeled data in the form _key_=_value_.We can extract it using the `.str.split()` method and then use `.str.replace()` to get rid of the wanted `ID=` part of it to create a `name` column. ###Code genes['name'] = genes['attributes'].str.split(';').str[0].str.replace("ID=", "") genes.head() ###Output _____no_output_____ ###Markdown The data table lists the coordinates of genes, but we want to look for ChIP-Seq peaks in the promoter regions. For a `+` strand gene, the promoter is to the left (smaller coordinate numbers) and for a `-` strand gene it is to the right.We'll use a 1 kb window here.For a `+` strand gene, the promoter is _start_-1000 to _start_-1.For a `-` strand gene, the promoter is _end_+1 to _end_+1000.We can use the `np.where(...)` function from `numpy` to handle this situation. We'll compute the starting position for each promoter in the `prmstart` column. ###Code import numpy as np genes['prmstart'] = np.where(genes['strand'] == "+", genes['start'] - 1000, genes['end'] + 1) genes.head() ###Output _____no_output_____ ###Markdown We'll be sure to check our coordinate math for genes on each strand.Next, we'll make a `prmend` column that is 1000 bases after the start. We don't need to do anything different based on the strand here. ###Code genes['prmend'] = genes['prmstart'] + 999 genes.head() ###Output _____no_output_____ ###Markdown ChIP-Seq PeaksNow, we'll read in the table of ChIP-Seq peaks. ###Code peaks = pd.read_csv("~/Hsf1/ChIP-Seq/macs2/Hsf1_ChIP_heatshk_peaks.xls", delimiter = '\t', comment = '#') peaks.head() ###Output _____no_output_____ ###Markdown At this point we need to loop over each peak and find out which promoter(s) it affects.We'll do this in a multi-step process.1. Generate a dictionary where keys are gene names and values are associated ChIP-Seq peak names. There won't be an entry for every gene.1. Convert this dictionary into a pandas `Series` and merge it in to a new column of the genes data frame, holding peak names. It will have many `NaN` entries for "missing" data.1. Merge the whole table of peaks (with enrichment and p-values) into the gene table based on the names.First, we will loop over each peak, using the `.itertuples()` method. In class, we'll just use the top 10 peaks by significance. ###Code gene_peaks = {} top_peaks = peaks.sort_values('fold_enrichment', ascending=False).head(10) for peak in top_peaks.itertuples(): for gene in genes.itertuples(): if (peak.chr == gene.seqid) and (peak.abs_summit >= gene.prmstart) and (peak.abs_summit <= gene.prmend): gene_peaks[gene.name] = peak.name gene_peaks ###Output _____no_output_____ ###Markdown Now we need to convert this into a `Series` and give it a name. ###Code gene_peaks = pd.Series(gene_peaks, name='peak') gene_peaks ###Output _____no_output_____ ###Markdown We can merge our named `Series` into the data frame of genes. We want to match up the `name` column in the genes table with the "index" of the `Series. ###Code genes2 = pd.merge(genes, gene_peaks, left_on='name', right_index=True, how='left') genes2.head() ###Output _____no_output_____ ###Markdown To make sure this worked, check on the row for the gene `YAL005C`, which does have a peak. ###Code genes2[genes2['name'] == "YGR210C"] ###Output _____no_output_____ ###Markdown Now, we will merge in the peaks table by matching up the `peak` column with the `name` column in the peaks table. ###Code genes3 = pd.merge(genes2, peaks, left_on='peak', right_on='name', how='left') genes3.head() ###Output _____no_output_____ ###Markdown Again, we'll need to check on a row that has a peak to be sure it worked. ###Code genes3[genes3['name_x'] == "YGR210C"] ###Output _____no_output_____ ###Markdown RNA-Seq dataFinally, we're ready to read in the table of RNA-Seq results. ###Code results = pd.read_csv("~/Hsf1/RNA-Seq/full.results.csv", index_col = 0) results.head() ###Output _____no_output_____ ###Markdown Again we can pull out some statistically significant genes to be sure it worked. Notice that almost every significant gene is down-regulated. ###Code results[results['padj'] < 0.1] ###Output _____no_output_____ ###Markdown Finally, we're ready to merge the results with the genes.We'll match up the `name_x` column of the genes (which was renamed because the peak table also had a name column) with the "index" of the results table. ###Code genes4 = pd.merge(genes3, results, left_on='name_x', right_index=True, how='left') genes4.head() ###Output _____no_output_____ ###Markdown Let's check out the ChIP-Seq genes in the RNA-Seq data. ###Code genes4.sort_values('fold_enrichment', ascending=False).head() ###Output _____no_output_____ ###Markdown Of course, most genes have a `NaN` missing value for fold enrichment. We can use the `pd.isna()` function to test whether a value is `NaN` or not.Rows where `fold_enrichment` is _not_ `NaN` are genes with a potential ChIP-Seq peak. ###Code pd.isna(genes4['fold_enrichment']).value_counts() ###Output _____no_output_____ ###Markdown Many of these genes have significant adjusted p-values and negative log fold-changes. Let's look at this trend more rigorously by plotting the histogram of fold-changes for these groups.Start by importing matplotlib.pyplot. ###Code import matplotlib.pyplot as plt ###Output _____no_output_____ ###Markdown We'll make a histogram of `fold_enrichment` values for all genes. ###Code plt.hist(genes4['log2FoldChange']) ###Output _____no_output_____ ###Markdown Then, we'll make a similar histogram, for genes with a ChIP-Seq peak.We'll use the `range` parameter so the histograms are easier to compare. ###Code genes_with_peak = genes4[~pd.isna(genes4['fold_enrichment'])] plt.hist(genes_with_peak['log2FoldChange'], range=(-4,4)) ###Output _____no_output_____ ###Markdown This looks different—but is it significant?We'll use the `mannwhitneyu` function from `scipy.stats` to run a statistical test. To do this, we'll need to remove the `NaN` values using the `.dropna()` method. ###Code import scipy.stats as stats nochip = genes4[pd.isna(genes4['fold_enrichment'])]['log2FoldChange'].dropna() yeschip = genes4[~pd.isna(genes4['fold_enrichment'])]['log2FoldChange'].dropna() stats.mannwhitneyu(nochip, yeschip) ###Output _____no_output_____ ###Markdown Finally, we'll make a table of the high-confidence targets that have a ChIP-Seq peak and an expression change. ###Code targets = genes4[(~pd.isna(genes4['fold_enrichment'])) & (genes4['padj'] < 0.1)] targets ###Output _____no_output_____
courses/Neural Networks and Deep Learning/WEEK 4/Building_your_Deep_Neural_Network_Step_by_Step_v8a.ipynb
###Markdown Building your Deep Neural Network: Step by StepWelcome to your week 4 assignment (part 1 of 2)! You have previously trained a 2-layer Neural Network (with a single hidden layer). This week, you will build a deep neural network, with as many layers as you want!- In this notebook, you will implement all the functions required to build a deep neural network.- In the next assignment, you will use these functions to build a deep neural network for image classification.**After this assignment you will be able to:**- Use non-linear units like ReLU to improve your model- Build a deeper neural network (with more than 1 hidden layer)- Implement an easy-to-use neural network class**Notation**:- Superscript $[l]$ denotes a quantity associated with the $l^{th}$ layer. - Example: $a^{[L]}$ is the $L^{th}$ layer activation. $W^{[L]}$ and $b^{[L]}$ are the $L^{th}$ layer parameters.- Superscript $(i)$ denotes a quantity associated with the $i^{th}$ example. - Example: $x^{(i)}$ is the $i^{th}$ training example.- Lowerscript $i$ denotes the $i^{th}$ entry of a vector. - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the $l^{th}$ layer's activations).Let's get started! Updates to Assignment If you were working on a previous version* The current notebook filename is version "4a". * You can find your work in the file directory as version "4".* To see the file directory, click on the Coursera logo at the top left of the notebook. List of Updates* compute_cost unit test now includes tests for Y = 0 as well as Y = 1. This catches a possible bug before students get graded.* linear_backward unit test now has a more complete unit test that catches a possible bug before students get graded. 1 - PackagesLet's first import all the packages that you will need during this assignment. - [numpy](www.numpy.org) is the main package for scientific computing with Python.- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.- dnn_utils provides some necessary functions for this notebook.- testCases provides some test cases to assess the correctness of your functions- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work. Please don't change the seed. ###Code import numpy as np import h5py import matplotlib.pyplot as plt from testCases_v4a import * from dnn_utils_v2 import sigmoid, sigmoid_backward, relu, relu_backward %matplotlib inline plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' %load_ext autoreload %autoreload 2 np.random.seed(1) ###Output _____no_output_____ ###Markdown 2 - Outline of the AssignmentTo build your neural network, you will be implementing several "helper functions". These helper functions will be used in the next assignment to build a two-layer neural network and an L-layer neural network. Each small helper function you will implement will have detailed instructions that will walk you through the necessary steps. Here is an outline of this assignment, you will:- Initialize the parameters for a two-layer network and for an $L$-layer neural network.- Implement the forward propagation module (shown in purple in the figure below). - Complete the LINEAR part of a layer's forward propagation step (resulting in $Z^{[l]}$). - We give you the ACTIVATION function (relu/sigmoid). - Combine the previous two steps into a new [LINEAR->ACTIVATION] forward function. - Stack the [LINEAR->RELU] forward function L-1 time (for layers 1 through L-1) and add a [LINEAR->SIGMOID] at the end (for the final layer $L$). This gives you a new L_model_forward function.- Compute the loss.- Implement the backward propagation module (denoted in red in the figure below). - Complete the LINEAR part of a layer's backward propagation step. - We give you the gradient of the ACTIVATE function (relu_backward/sigmoid_backward) - Combine the previous two steps into a new [LINEAR->ACTIVATION] backward function. - Stack [LINEAR->RELU] backward L-1 times and add [LINEAR->SIGMOID] backward in a new L_model_backward function- Finally update the parameters. **Figure 1****Note** that for every forward function, there is a corresponding backward function. That is why at every step of your forward module you will be storing some values in a cache. The cached values are useful for computing gradients. In the backpropagation module you will then use the cache to calculate the gradients. This assignment will show you exactly how to carry out each of these steps. 3 - InitializationYou will write two helper functions that will initialize the parameters for your model. The first function will be used to initialize parameters for a two layer model. The second one will generalize this initialization process to $L$ layers. 3.1 - 2-layer Neural Network**Exercise**: Create and initialize the parameters of the 2-layer neural network.**Instructions**:- The model's structure is: *LINEAR -> RELU -> LINEAR -> SIGMOID*. - Use random initialization for the weight matrices. Use `np.random.randn(shape)*0.01` with the correct shape.- Use zero initialization for the biases. Use `np.zeros(shape)`. ###Code # GRADED FUNCTION: initialize_parameters def initialize_parameters(n_x, n_h, n_y): """ Argument: n_x -- size of the input layer n_h -- size of the hidden layer n_y -- size of the output layer Returns: parameters -- python dictionary containing your parameters: W1 -- weight matrix of shape (n_h, n_x) b1 -- bias vector of shape (n_h, 1) W2 -- weight matrix of shape (n_y, n_h) b2 -- bias vector of shape (n_y, 1) """ np.random.seed(1) ### START CODE HERE ### (≈ 4 lines of code) W1 = np.random.randn(n_h, n_x) * 0.01 b1 = np.zeros((n_h, 1)) W2 = np.random.randn(n_y, n_h) * 0.01 b2 = np.zeros((n_y, 1)) ### END CODE HERE ### assert(W1.shape == (n_h, n_x)) assert(b1.shape == (n_h, 1)) assert(W2.shape == (n_y, n_h)) assert(b2.shape == (n_y, 1)) parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2} return parameters parameters = initialize_parameters(3,2,1) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) ###Output W1 = [[ 0.01624345 -0.00611756 -0.00528172] [-0.01072969 0.00865408 -0.02301539]] b1 = [[ 0.] [ 0.]] W2 = [[ 0.01744812 -0.00761207]] b2 = [[ 0.]] ###Markdown **Expected output**: **W1** [[ 0.01624345 -0.00611756 -0.00528172] [-0.01072969 0.00865408 -0.02301539]] **b1** [[ 0.] [ 0.]] **W2** [[ 0.01744812 -0.00761207]] **b2** [[ 0.]] 3.2 - L-layer Neural NetworkThe initialization for a deeper L-layer neural network is more complicated because there are many more weight matrices and bias vectors. When completing the `initialize_parameters_deep`, you should make sure that your dimensions match between each layer. Recall that $n^{[l]}$ is the number of units in layer $l$. Thus for example if the size of our input $X$ is $(12288, 209)$ (with $m=209$ examples) then: **Shape of W** **Shape of b** **Activation** **Shape of Activation** **Layer 1** $(n^{[1]},12288)$ $(n^{[1]},1)$ $Z^{[1]} = W^{[1]} X + b^{[1]} $ $(n^{[1]},209)$ **Layer 2** $(n^{[2]}, n^{[1]})$ $(n^{[2]},1)$ $Z^{[2]} = W^{[2]} A^{[1]} + b^{[2]}$ $(n^{[2]}, 209)$ $\vdots$ $\vdots$ $\vdots$ $\vdots$ $\vdots$ **Layer L-1** $(n^{[L-1]}, n^{[L-2]})$ $(n^{[L-1]}, 1)$ $Z^{[L-1]} = W^{[L-1]} A^{[L-2]} + b^{[L-1]}$ $(n^{[L-1]}, 209)$ **Layer L** $(n^{[L]}, n^{[L-1]})$ $(n^{[L]}, 1)$ $Z^{[L]} = W^{[L]} A^{[L-1]} + b^{[L]}$ $(n^{[L]}, 209)$ Remember that when we compute $W X + b$ in python, it carries out broadcasting. For example, if: $$ W = \begin{bmatrix} j & k & l\\ m & n & o \\ p & q & r \end{bmatrix}\;\;\; X = \begin{bmatrix} a & b & c\\ d & e & f \\ g & h & i \end{bmatrix} \;\;\; b =\begin{bmatrix} s \\ t \\ u\end{bmatrix}\tag{2}$$Then $WX + b$ will be:$$ WX + b = \begin{bmatrix} (ja + kd + lg) + s & (jb + ke + lh) + s & (jc + kf + li)+ s\\ (ma + nd + og) + t & (mb + ne + oh) + t & (mc + nf + oi) + t\\ (pa + qd + rg) + u & (pb + qe + rh) + u & (pc + qf + ri)+ u\end{bmatrix}\tag{3} $$ **Exercise**: Implement initialization for an L-layer Neural Network. **Instructions**:- The model's structure is *[LINEAR -> RELU] $ \times$ (L-1) -> LINEAR -> SIGMOID*. I.e., it has $L-1$ layers using a ReLU activation function followed by an output layer with a sigmoid activation function.- Use random initialization for the weight matrices. Use `np.random.randn(shape) * 0.01`.- Use zeros initialization for the biases. Use `np.zeros(shape)`.- We will store $n^{[l]}$, the number of units in different layers, in a variable `layer_dims`. For example, the `layer_dims` for the "Planar Data classification model" from last week would have been [2,4,1]: There were two inputs, one hidden layer with 4 hidden units, and an output layer with 1 output unit. This means `W1`'s shape was (4,2), `b1` was (4,1), `W2` was (1,4) and `b2` was (1,1). Now you will generalize this to $L$ layers! - Here is the implementation for $L=1$ (one layer neural network). It should inspire you to implement the general case (L-layer neural network).```python if L == 1: parameters["W" + str(L)] = np.random.randn(layer_dims[1], layer_dims[0]) * 0.01 parameters["b" + str(L)] = np.zeros((layer_dims[1], 1))``` ###Code # GRADED FUNCTION: initialize_parameters_deep def initialize_parameters_deep(layer_dims): """ Arguments: layer_dims -- python array (list) containing the dimensions of each layer in our network Returns: parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL": Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1]) bl -- bias vector of shape (layer_dims[l], 1) """ np.random.seed(3) parameters = {} L = len(layer_dims) # number of layers in the network for l in range(1, L): ### START CODE HERE ### (≈ 2 lines of code) parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) * 0.01 parameters['b' + str(l)] = np.zeros((layer_dims[l], 1)) ### END CODE HERE ### assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1])) assert(parameters['b' + str(l)].shape == (layer_dims[l], 1)) return parameters parameters = initialize_parameters_deep([5,4,3]) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) ###Output W1 = [[ 0.01788628 0.0043651 0.00096497 -0.01863493 -0.00277388] [-0.00354759 -0.00082741 -0.00627001 -0.00043818 -0.00477218] [-0.01313865 0.00884622 0.00881318 0.01709573 0.00050034] [-0.00404677 -0.0054536 -0.01546477 0.00982367 -0.01101068]] b1 = [[ 0.] [ 0.] [ 0.] [ 0.]] W2 = [[-0.01185047 -0.0020565 0.01486148 0.00236716] [-0.01023785 -0.00712993 0.00625245 -0.00160513] [-0.00768836 -0.00230031 0.00745056 0.01976111]] b2 = [[ 0.] [ 0.] [ 0.]] ###Markdown **Expected output**: **W1** [[ 0.01788628 0.0043651 0.00096497 -0.01863493 -0.00277388] [-0.00354759 -0.00082741 -0.00627001 -0.00043818 -0.00477218] [-0.01313865 0.00884622 0.00881318 0.01709573 0.00050034] [-0.00404677 -0.0054536 -0.01546477 0.00982367 -0.01101068]] **b1** [[ 0.] [ 0.] [ 0.] [ 0.]] **W2** [[-0.01185047 -0.0020565 0.01486148 0.00236716] [-0.01023785 -0.00712993 0.00625245 -0.00160513] [-0.00768836 -0.00230031 0.00745056 0.01976111]] **b2** [[ 0.] [ 0.] [ 0.]] 4 - Forward propagation module 4.1 - Linear Forward Now that you have initialized your parameters, you will do the forward propagation module. You will start by implementing some basic functions that you will use later when implementing the model. You will complete three functions in this order:- LINEAR- LINEAR -> ACTIVATION where ACTIVATION will be either ReLU or Sigmoid. - [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID (whole model)The linear forward module (vectorized over all the examples) computes the following equations:$$Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}\tag{4}$$where $A^{[0]} = X$. **Exercise**: Build the linear part of forward propagation.**Reminder**:The mathematical representation of this unit is $Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}$. You may also find `np.dot()` useful. If your dimensions don't match, printing `W.shape` may help. ###Code # GRADED FUNCTION: linear_forward def linear_forward(A, W, b): """ Implement the linear part of a layer's forward propagation. Arguments: A -- activations from previous layer (or input data): (size of previous layer, number of examples) W -- weights matrix: numpy array of shape (size of current layer, size of previous layer) b -- bias vector, numpy array of shape (size of the current layer, 1) Returns: Z -- the input of the activation function, also called pre-activation parameter cache -- a python tuple containing "A", "W" and "b" ; stored for computing the backward pass efficiently """ ### START CODE HERE ### (≈ 1 line of code) Z = np.dot(W,A)+b ### END CODE HERE ### assert(Z.shape == (W.shape[0], A.shape[1])) cache = (A, W, b) return Z, cache A, W, b = linear_forward_test_case() Z, linear_cache = linear_forward(A, W, b) print("Z = " + str(Z)) ###Output Z = [[ 3.26295337 -1.23429987]] ###Markdown **Expected output**: **Z** [[ 3.26295337 -1.23429987]] 4.2 - Linear-Activation ForwardIn this notebook, you will use two activation functions:- **Sigmoid**: $\sigma(Z) = \sigma(W A + b) = \frac{1}{ 1 + e^{-(W A + b)}}$. We have provided you with the `sigmoid` function. This function returns **two** items: the activation value "`a`" and a "`cache`" that contains "`Z`" (it's what we will feed in to the corresponding backward function). To use it you could just call: ``` pythonA, activation_cache = sigmoid(Z)```- **ReLU**: The mathematical formula for ReLu is $A = RELU(Z) = max(0, Z)$. We have provided you with the `relu` function. This function returns **two** items: the activation value "`A`" and a "`cache`" that contains "`Z`" (it's what we will feed in to the corresponding backward function). To use it you could just call:``` pythonA, activation_cache = relu(Z)``` For more convenience, you are going to group two functions (Linear and Activation) into one function (LINEAR->ACTIVATION). Hence, you will implement a function that does the LINEAR forward step followed by an ACTIVATION forward step.**Exercise**: Implement the forward propagation of the *LINEAR->ACTIVATION* layer. Mathematical relation is: $A^{[l]} = g(Z^{[l]}) = g(W^{[l]}A^{[l-1]} +b^{[l]})$ where the activation "g" can be sigmoid() or relu(). Use linear_forward() and the correct activation function. ###Code # GRADED FUNCTION: linear_activation_forward def linear_activation_forward(A_prev, W, b, activation): """ Implement the forward propagation for the LINEAR->ACTIVATION layer Arguments: A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples) W -- weights matrix: numpy array of shape (size of current layer, size of previous layer) b -- bias vector, numpy array of shape (size of the current layer, 1) activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu" Returns: A -- the output of the activation function, also called the post-activation value cache -- a python tuple containing "linear_cache" and "activation_cache"; stored for computing the backward pass efficiently """ if activation == "sigmoid": # Inputs: "A_prev, W, b". Outputs: "A, activation_cache". ### START CODE HERE ### (≈ 2 lines of code) Z, linear_cache = linear_forward(A_prev,W,b) A, activation_cache = sigmoid(Z) ### END CODE HERE ### elif activation == "relu": # Inputs: "A_prev, W, b". Outputs: "A, activation_cache". ### START CODE HERE ### (≈ 2 lines of code) Z, linear_cache = linear_forward(A_prev,W,b) A, activation_cache = relu(Z) ### END CODE HERE ### assert (A.shape == (W.shape[0], A_prev.shape[1])) cache = (linear_cache, activation_cache) return A, cache A_prev, W, b = linear_activation_forward_test_case() A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "sigmoid") print("With sigmoid: A = " + str(A)) A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "relu") print("With ReLU: A = " + str(A)) ###Output With sigmoid: A = [[ 0.96890023 0.11013289]] With ReLU: A = [[ 3.43896131 0. ]] ###Markdown **Expected output**: **With sigmoid: A ** [[ 0.96890023 0.11013289]] **With ReLU: A ** [[ 3.43896131 0. ]] **Note**: In deep learning, the "[LINEAR->ACTIVATION]" computation is counted as a single layer in the neural network, not two layers. d) L-Layer Model For even more convenience when implementing the $L$-layer Neural Net, you will need a function that replicates the previous one (`linear_activation_forward` with RELU) $L-1$ times, then follows that with one `linear_activation_forward` with SIGMOID. **Figure 2** : *[LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID* model**Exercise**: Implement the forward propagation of the above model.**Instruction**: In the code below, the variable `AL` will denote $A^{[L]} = \sigma(Z^{[L]}) = \sigma(W^{[L]} A^{[L-1]} + b^{[L]})$. (This is sometimes also called `Yhat`, i.e., this is $\hat{Y}$.) **Tips**:- Use the functions you had previously written - Use a for loop to replicate [LINEAR->RELU] (L-1) times- Don't forget to keep track of the caches in the "caches" list. To add a new value `c` to a `list`, you can use `list.append(c)`. ###Code # GRADED FUNCTION: L_model_forward def L_model_forward(X, parameters): """ Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation Arguments: X -- data, numpy array of shape (input size, number of examples) parameters -- output of initialize_parameters_deep() Returns: AL -- last post-activation value caches -- list of caches containing: every cache of linear_activation_forward() (there are L-1 of them, indexed from 0 to L-1) """ caches = [] A = X L = len(parameters) // 2 # number of layers in the neural network # Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list. for l in range(1, L): A_prev = A ### START CODE HERE ### (≈ 2 lines of code) A, cache = linear_activation_forward(A_prev,parameters['W'+str(l)],parameters['b'+str(l)],activation='relu') caches.append(cache) ### END CODE HERE ### # Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list. ### START CODE HERE ### (≈ 2 lines of code) AL, cache = linear_activation_forward(A,parameters['W'+str(L)],parameters['b'+str(L)],activation='sigmoid') caches.append(cache) ### END CODE HERE ### assert(AL.shape == (1,X.shape[1])) return AL, caches X, parameters = L_model_forward_test_case_2hidden() AL, caches = L_model_forward(X, parameters) print("AL = " + str(AL)) print("Length of caches list = " + str(len(caches))) ###Output AL = [[ 0.03921668 0.70498921 0.19734387 0.04728177]] Length of caches list = 3 ###Markdown **AL** [[ 0.03921668 0.70498921 0.19734387 0.04728177]] **Length of caches list ** 3 Great! Now you have a full forward propagation that takes the input X and outputs a row vector $A^{[L]}$ containing your predictions. It also records all intermediate values in "caches". Using $A^{[L]}$, you can compute the cost of your predictions. 5 - Cost functionNow you will implement forward and backward propagation. You need to compute the cost, because you want to check if your model is actually learning.**Exercise**: Compute the cross-entropy cost $J$, using the following formula: $$-\frac{1}{m} \sum\limits_{i = 1}^{m} (y^{(i)}\log\left(a^{[L] (i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right)) \tag{7}$$ ###Code # GRADED FUNCTION: compute_cost def compute_cost(AL, Y): """ Implement the cost function defined by equation (7). Arguments: AL -- probability vector corresponding to your label predictions, shape (1, number of examples) Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples) Returns: cost -- cross-entropy cost """ m = Y.shape[1] # Compute loss from aL and y. ### START CODE HERE ### (≈ 1 lines of code) cost = -1/m * np.sum(np.multiply(Y, np.log(AL))+np.multiply(1-Y,np.log(1-AL))) ### END CODE HERE ### cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17). assert(cost.shape == ()) return cost Y, AL = compute_cost_test_case() print("cost = " + str(compute_cost(AL, Y))) ###Output cost = 0.279776563579 ###Markdown **Expected Output**: **cost** 0.2797765635793422 6 - Backward propagation moduleJust like with forward propagation, you will implement helper functions for backpropagation. Remember that back propagation is used to calculate the gradient of the loss function with respect to the parameters. **Reminder**: **Figure 3** : Forward and Backward propagation for *LINEAR->RELU->LINEAR->SIGMOID* *The purple blocks represent the forward propagation, and the red blocks represent the backward propagation.* <!-- For those of you who are expert in calculus (you don't need to be to do this assignment), the chain rule of calculus can be used to derive the derivative of the loss $\mathcal{L}$ with respect to $z^{[1]}$ in a 2-layer network as follows:$$\frac{d \mathcal{L}(a^{[2]},y)}{{dz^{[1]}}} = \frac{d\mathcal{L}(a^{[2]},y)}{{da^{[2]}}}\frac{{da^{[2]}}}{{dz^{[2]}}}\frac{{dz^{[2]}}}{{da^{[1]}}}\frac{{da^{[1]}}}{{dz^{[1]}}} \tag{8} $$In order to calculate the gradient $dW^{[1]} = \frac{\partial L}{\partial W^{[1]}}$, you use the previous chain rule and you do $dW^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial W^{[1]}}$. During the backpropagation, at each step you multiply your current gradient by the gradient corresponding to the specific layer to get the gradient you wanted.Equivalently, in order to calculate the gradient $db^{[1]} = \frac{\partial L}{\partial b^{[1]}}$, you use the previous chain rule and you do $db^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial b^{[1]}}$.This is why we talk about **backpropagation**.!-->Now, similar to forward propagation, you are going to build the backward propagation in three steps:- LINEAR backward- LINEAR -> ACTIVATION backward where ACTIVATION computes the derivative of either the ReLU or sigmoid activation- [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID backward (whole model) 6.1 - Linear backwardFor layer $l$, the linear part is: $Z^{[l]} = W^{[l]} A^{[l-1]} + b^{[l]}$ (followed by an activation).Suppose you have already calculated the derivative $dZ^{[l]} = \frac{\partial \mathcal{L} }{\partial Z^{[l]}}$. You want to get $(dW^{[l]}, db^{[l]}, dA^{[l-1]})$. **Figure 4** The three outputs $(dW^{[l]}, db^{[l]}, dA^{[l-1]})$ are computed using the input $dZ^{[l]}$.Here are the formulas you need:$$ dW^{[l]} = \frac{\partial \mathcal{J} }{\partial W^{[l]}} = \frac{1}{m} dZ^{[l]} A^{[l-1] T} \tag{8}$$$$ db^{[l]} = \frac{\partial \mathcal{J} }{\partial b^{[l]}} = \frac{1}{m} \sum_{i = 1}^{m} dZ^{[l](i)}\tag{9}$$$$ dA^{[l-1]} = \frac{\partial \mathcal{L} }{\partial A^{[l-1]}} = W^{[l] T} dZ^{[l]} \tag{10}$$ **Exercise**: Use the 3 formulas above to implement linear_backward(). ###Code # GRADED FUNCTION: linear_backward def linear_backward(dZ, cache): """ Implement the linear portion of backward propagation for a single layer (layer l) Arguments: dZ -- Gradient of the cost with respect to the linear output (of current layer l) cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer Returns: dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev dW -- Gradient of the cost with respect to W (current layer l), same shape as W db -- Gradient of the cost with respect to b (current layer l), same shape as b """ A_prev, W, b = cache m = A_prev.shape[1] ### START CODE HERE ### (≈ 3 lines of code) dW = 1/m * np.dot(dZ,cache[0].T) db = 1/m * np.sum(dZ,axis=1,keepdims=True) dA_prev = np.dot(cache[1].T,dZ) ### END CODE HERE ### assert (dA_prev.shape == A_prev.shape) assert (dW.shape == W.shape) assert (db.shape == b.shape) return dA_prev, dW, db # Set up some test inputs dZ, linear_cache = linear_backward_test_case() dA_prev, dW, db = linear_backward(dZ, linear_cache) print ("dA_prev = "+ str(dA_prev)) print ("dW = " + str(dW)) print ("db = " + str(db)) ###Output dA_prev = [[-1.15171336 0.06718465 -0.3204696 2.09812712] [ 0.60345879 -3.72508701 5.81700741 -3.84326836] [-0.4319552 -1.30987417 1.72354705 0.05070578] [-0.38981415 0.60811244 -1.25938424 1.47191593] [-2.52214926 2.67882552 -0.67947465 1.48119548]] dW = [[ 0.07313866 -0.0976715 -0.87585828 0.73763362 0.00785716] [ 0.85508818 0.37530413 -0.59912655 0.71278189 -0.58931808] [ 0.97913304 -0.24376494 -0.08839671 0.55151192 -0.10290907]] db = [[-0.14713786] [-0.11313155] [-0.13209101]] ###Markdown ** Expected Output**: ```dA_prev = [[-1.15171336 0.06718465 -0.3204696 2.09812712] [ 0.60345879 -3.72508701 5.81700741 -3.84326836] [-0.4319552 -1.30987417 1.72354705 0.05070578] [-0.38981415 0.60811244 -1.25938424 1.47191593] [-2.52214926 2.67882552 -0.67947465 1.48119548]]dW = [[ 0.07313866 -0.0976715 -0.87585828 0.73763362 0.00785716] [ 0.85508818 0.37530413 -0.59912655 0.71278189 -0.58931808] [ 0.97913304 -0.24376494 -0.08839671 0.55151192 -0.10290907]]db = [[-0.14713786] [-0.11313155] [-0.13209101]]``` 6.2 - Linear-Activation backwardNext, you will create a function that merges the two helper functions: **`linear_backward`** and the backward step for the activation **`linear_activation_backward`**. To help you implement `linear_activation_backward`, we provided two backward functions:- **`sigmoid_backward`**: Implements the backward propagation for SIGMOID unit. You can call it as follows:```pythondZ = sigmoid_backward(dA, activation_cache)```- **`relu_backward`**: Implements the backward propagation for RELU unit. You can call it as follows:```pythondZ = relu_backward(dA, activation_cache)```If $g(.)$ is the activation function, `sigmoid_backward` and `relu_backward` compute $$dZ^{[l]} = dA^{[l]} * g'(Z^{[l]}) \tag{11}$$. **Exercise**: Implement the backpropagation for the *LINEAR->ACTIVATION* layer. ###Code # GRADED FUNCTION: linear_activation_backward def linear_activation_backward(dA, cache, activation): """ Implement the backward propagation for the LINEAR->ACTIVATION layer. Arguments: dA -- post-activation gradient for current layer l cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu" Returns: dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev dW -- Gradient of the cost with respect to W (current layer l), same shape as W db -- Gradient of the cost with respect to b (current layer l), same shape as b """ linear_cache, activation_cache = cache if activation == "relu": ### START CODE HERE ### (≈ 2 lines of code) dZ = relu_backward(dA,activation_cache) dA_prev, dW, db = linear_backward(dZ,linear_cache) ### END CODE HERE ### elif activation == "sigmoid": ### START CODE HERE ### (≈ 2 lines of code) dZ = sigmoid_backward(dA,activation_cache) dA_prev, dW, db = linear_backward(dZ,linear_cache) ### END CODE HERE ### return dA_prev, dW, db dAL, linear_activation_cache = linear_activation_backward_test_case() dA_prev, dW, db = linear_activation_backward(dAL, linear_activation_cache, activation = "sigmoid") print ("sigmoid:") print ("dA_prev = "+ str(dA_prev)) print ("dW = " + str(dW)) print ("db = " + str(db) + "\n") dA_prev, dW, db = linear_activation_backward(dAL, linear_activation_cache, activation = "relu") print ("relu:") print ("dA_prev = "+ str(dA_prev)) print ("dW = " + str(dW)) print ("db = " + str(db)) ###Output sigmoid: dA_prev = [[ 0.11017994 0.01105339] [ 0.09466817 0.00949723] [-0.05743092 -0.00576154]] dW = [[ 0.10266786 0.09778551 -0.01968084]] db = [[-0.05729622]] relu: dA_prev = [[ 0.44090989 -0. ] [ 0.37883606 -0. ] [-0.2298228 0. ]] dW = [[ 0.44513824 0.37371418 -0.10478989]] db = [[-0.20837892]] ###Markdown **Expected output with sigmoid:** dA_prev [[ 0.11017994 0.01105339] [ 0.09466817 0.00949723] [-0.05743092 -0.00576154]] dW [[ 0.10266786 0.09778551 -0.01968084]] db [[-0.05729622]] **Expected output with relu:** dA_prev [[ 0.44090989 0. ] [ 0.37883606 0. ] [-0.2298228 0. ]] dW [[ 0.44513824 0.37371418 -0.10478989]] db [[-0.20837892]] 6.3 - L-Model Backward Now you will implement the backward function for the whole network. Recall that when you implemented the `L_model_forward` function, at each iteration, you stored a cache which contains (X,W,b, and z). In the back propagation module, you will use those variables to compute the gradients. Therefore, in the `L_model_backward` function, you will iterate through all the hidden layers backward, starting from layer $L$. On each step, you will use the cached values for layer $l$ to backpropagate through layer $l$. Figure 5 below shows the backward pass. **Figure 5** : Backward pass ** Initializing backpropagation**:To backpropagate through this network, we know that the output is, $A^{[L]} = \sigma(Z^{[L]})$. Your code thus needs to compute `dAL` $= \frac{\partial \mathcal{L}}{\partial A^{[L]}}$.To do so, use this formula (derived using calculus which you don't need in-depth knowledge of):```pythondAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) derivative of cost with respect to AL```You can then use this post-activation gradient `dAL` to keep going backward. As seen in Figure 5, you can now feed in `dAL` into the LINEAR->SIGMOID backward function you implemented (which will use the cached values stored by the L_model_forward function). After that, you will have to use a `for` loop to iterate through all the other layers using the LINEAR->RELU backward function. You should store each dA, dW, and db in the grads dictionary. To do so, use this formula : $$grads["dW" + str(l)] = dW^{[l]}\tag{15} $$For example, for $l=3$ this would store $dW^{[l]}$ in `grads["dW3"]`.**Exercise**: Implement backpropagation for the *[LINEAR->RELU] $\times$ (L-1) -> LINEAR -> SIGMOID* model. ###Code # GRADED FUNCTION: L_model_backward def L_model_backward(AL, Y, caches): """ Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group Arguments: AL -- probability vector, output of the forward propagation (L_model_forward()) Y -- true "label" vector (containing 0 if non-cat, 1 if cat) caches -- list of caches containing: every cache of linear_activation_forward() with "relu" (it's caches[l], for l in range(L-1) i.e l = 0...L-2) the cache of linear_activation_forward() with "sigmoid" (it's caches[L-1]) Returns: grads -- A dictionary with the gradients grads["dA" + str(l)] = ... grads["dW" + str(l)] = ... grads["db" + str(l)] = ... """ grads = {} L = len(caches) # the number of layers m = AL.shape[1] Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL # Initializing the backpropagation ### START CODE HERE ### (1 line of code) dAL= -1*(np.divide(Y,AL)-np.divide(1-Y,1-AL)) ### END CODE HERE ### # Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "dAL, current_cache". Outputs: "grads["dAL-1"], grads["dWL"], grads["dbL"] ### START CODE HERE ### (approx. 2 lines) dA_prev, dW, db = linear_activation_backward(dAL,caches[L - 1],'sigmoid'); grads["dA" + str(L-1)], grads["dW" + str(L)], grads["db" + str(L)] = dA_prev, dW, db; ### END CODE HERE ### # Loop from l=L-2 to l=0 for l in reversed(range(L-1)): # lth layer: (RELU -> LINEAR) gradients. # Inputs: "grads["dA" + str(l + 1)], current_cache". Outputs: "grads["dA" + str(l)] , grads["dW" + str(l + 1)] , grads["db" + str(l + 1)] ### START CODE HERE ### (approx. 5 lines) dA = dA_prev; dA_prev, dW, db = linear_activation_backward(dA, caches[l],'relu'); grads["dA" + str(l)] = dA_prev; grads["dW" + str(l + 1)] = dW; grads["db" + str(l + 1)] = db; ### END CODE HERE ### return grads AL, Y_assess, caches = L_model_backward_test_case() grads = L_model_backward(AL, Y_assess, caches) print_grads(grads) ###Output dW1 = [[ 0.41010002 0.07807203 0.13798444 0.10502167] [ 0. 0. 0. 0. ] [ 0.05283652 0.01005865 0.01777766 0.0135308 ]] db1 = [[-0.22007063] [ 0. ] [-0.02835349]] dA1 = [[ 0.12913162 -0.44014127] [-0.14175655 0.48317296] [ 0.01663708 -0.05670698]] ###Markdown **Expected Output** dW1 [[ 0.41010002 0.07807203 0.13798444 0.10502167] [ 0. 0. 0. 0. ] [ 0.05283652 0.01005865 0.01777766 0.0135308 ]] db1 [[-0.22007063] [ 0. ] [-0.02835349]] dA1 [[ 0.12913162 -0.44014127] [-0.14175655 0.48317296] [ 0.01663708 -0.05670698]] 6.4 - Update ParametersIn this section you will update the parameters of the model, using gradient descent: $$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{16}$$$$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{17}$$where $\alpha$ is the learning rate. After computing the updated parameters, store them in the parameters dictionary. **Exercise**: Implement `update_parameters()` to update your parameters using gradient descent.**Instructions**:Update parameters using gradient descent on every $W^{[l]}$ and $b^{[l]}$ for $l = 1, 2, ..., L$. ###Code # GRADED FUNCTION: update_parameters def update_parameters(parameters, grads, learning_rate): """ Update parameters using gradient descent Arguments: parameters -- python dictionary containing your parameters grads -- python dictionary containing your gradients, output of L_model_backward Returns: parameters -- python dictionary containing your updated parameters parameters["W" + str(l)] = ... parameters["b" + str(l)] = ... """ L = len(parameters) // 2 # number of layers in the neural network # Update rule for each parameter. Use a for loop. ### START CODE HERE ### (≈ 3 lines of code) for l in range(L): parameters["W" + str(l+1)] = parameters["W" + str(l+1)]-learning_rate*grads['dW'+str(l+1)] parameters["b" + str(l+1)] = parameters["b" + str(l+1)]-learning_rate*grads['db'+str(l+1)] ### END CODE HERE ### return parameters parameters, grads = update_parameters_test_case() parameters = update_parameters(parameters, grads, 0.1) print ("W1 = "+ str(parameters["W1"])) print ("b1 = "+ str(parameters["b1"])) print ("W2 = "+ str(parameters["W2"])) print ("b2 = "+ str(parameters["b2"])) ###Output W1 = [[-0.59562069 -0.09991781 -2.14584584 1.82662008] [-1.76569676 -0.80627147 0.51115557 -1.18258802] [-1.0535704 -0.86128581 0.68284052 2.20374577]] b1 = [[-0.04659241] [-1.28888275] [ 0.53405496]] W2 = [[-0.55569196 0.0354055 1.32964895]] b2 = [[-0.84610769]]
Datacamp Projects/Who Is Drunk and When in Ames, Iowa_/notebook.ipynb
###Markdown 1. Breath alcohol tests in Ames, Iowa, USAAmes, Iowa, USA is the home of Iowa State University, a land grant university with over 36,000 students. By comparison, the city of Ames, Iowa, itself only has about 65,000 residents. As with any other college town, Ames has had its fair share of alcohol-related incidents. (For example, Google 'VEISHEA riots 2014'.) We will take a look at some breath alcohol test data from Ames that is published by the State of Iowa. The data file 'breath_alcohol_ames.csv' contains 1,556 readings from breath alcohol tests administered by the Ames and Iowa State University Police Departments from January 2013 to December 2017. The columns in this data set are year, month, day, hour, location, gender, Res1, Res2. ###Code # import pandas # ... YOUR CODE FOR TASK 1 ... import pandas as pd # read the data into your workspace ba_data = pd.read_csv("datasets/breath_alcohol_ames.csv") # quickly inspect the data print(ba_data.head()) # obtain counts for each year ba_year = ba_data["year"].value_counts() ba_year ###Output year month day hour location gender Res1 Res2 0 2017 12 17 1 Ames PD M 0.046 0.046 1 2017 12 14 3 ISU PD F 0.121 0.120 2 2017 12 10 5 ISU PD F 0.068 0.067 3 2017 12 10 3 ISU PD F 0.077 0.077 4 2017 12 9 2 ISU PD M 0.085 0.084 ###Markdown 2. What is the busiest police department in Ames?There are two police departments in the data set: the Iowa State University Police Department and the Ames Police Department. Which one administers more breathalyzer tests? ###Code # use value_counts to tally up the totals for each department pds = ba_data["location"].value_counts() pds ###Output _____no_output_____ ###Markdown 3. Nothing Good Happens after 2amWe all know that "nothing good happens after 2am." Thus, there are inevitably some times of the day when breath alcohol tests, especially in a college town like Ames, are most and least common. Which hours of the day have the most and least breathalyzer tests? ###Code %matplotlib inline # count by hour hourly = ba_data.groupby(by = ["hour"]).size() # create a vertical bar graph of the arrest count by hour hourly.plot.bar(x="hour"); ###Output _____no_output_____ ###Markdown 4. Breathalyzer tests by monthNow that we have discovered which time of day is most common for breath alcohol tests, we will determine which time of the year has the most breathalyzer tests. Which month will have the most recorded tests? ###Code # count by month and arrange by descending frequency monthly = ba_data.groupby(['month']).size() # use plot.bar to make the appropriate bar chart monthly.plot.bar(x='month') ###Output _____no_output_____ ###Markdown 5. COLLEGE When we think of (binge) drinking in college towns in America, we usually think of something like this image at the left. And so, one might suspect that breath alcohol tests are given to men more often than women and that men drink more than women. ###Code # count by gender counts_gender = ba_data["gender"].value_counts() # create a dataset with no NAs in gender gen = ba_data.dropna(subset=["gender"]) # create a mean test result variable mean_bas = gen.assign(meanRes=(gen["Res1"]+gen["Res2"])/len(gen)) # # create side-by-side boxplots to compare the mean blood alcohol levels of men and women mean_bas.boxplot(['meanRes'], by = "gender") ###Output _____no_output_____ ###Markdown 6. Above the legal limitIn the USA, it is illegal to drive with a blood alcohol concentration (BAC) above 0.08%. This is the case for all 50 states. Assuming everyone tested in our data was driving (though we have no way of knowing this from the data), if either of the results (Res1, Res2) are above 0.08, the person would be charged with DUI (driving under the influence). ###Code # Filter the data duis = ba_data[(ba_data["Res1"] > 0.08) | (ba_data["Res2"] > 0.08)] # proportion of tests that would have resulted in a DUI p_dui = duis.shape[0] / ba_data.shape[0] p_dui ###Output _____no_output_____ ###Markdown 7. Breathalyzer tests: is there a pattern over time?We previously saw that 2am is the most common time of day for breathalyzer tests to be administered, and August is the most common month of the year for breathalyzer tests. Now, we look at the weeks in the year over time. ###Code # Create date variable ba_data['date'] = pd.to_datetime(ba_data[['year', 'month', 'day']]) # Create a week variable ba_data['week'] = ba_data['date'].dt.week # Check your work ba_data.head() ###Output _____no_output_____ ###Markdown 8. Looking at timelinesHow do the weeks differ over time? One of the most common data visualizations is the time series, a line tracking the changes in a variable over time. We will use the new week variable to look at test frequency over time. We end with a time series plot showing the frequency of breathalyzer tests by week in year, with one line for each year. ###Code # choose and count the variables of interest timeline = ba_data.groupby(['week','year']).count()['Res1'] # unstack and plot timeline.unstack().plot(title='VEISHEA DUIs', legend=True) ###Output _____no_output_____ ###Markdown 9. The end of VEISHEAFrom Wikipedia: "VEISHEA was an annual week-long celebration held each spring on the campus of Iowa State University in Ames, Iowa. The celebration featured an annual parade and many open-house demonstrations of the university facilities and departments. Campus organizations exhibited products, technologies, and held fundraisers for various charity groups. In addition, VEISHEA brought speakers, lecturers, and entertainers to Iowa State. [...] VEISHEA was the largest student-run festival in the nation, bringing in tens of thousands of visitors to the campus each year."This over 90-year tradition in Ames was terminated permanently after riots in 2014, where drunk celebrators flipped over multiple vehicles and tore light poles down. This was not the first incidence of violence and severe property damage in VEISHEA's history. Did former President Leath make the right decision by canceling VEISHEA? ###Code ## Was it right to permanently cancel VEISHEA? TRUE or FALSE? canceling_VEISHEA_was_right = True ###Output _____no_output_____
notes/15 KNN/KNN_MAIN.ipynb
###Markdown KNN KNN stands for K-Nearest Neighbours. KNN is simple classification algorithm and it is generally used for datasets in which data points are separated into several classes and we have to predict the class for the new sample point.KNN is non-parametric and lazy learning algorithm.Non-parametric basically means that the algorithm does not make any assumptions on the given data distribution. NON-parametric covers technique that do not rely on data belonging to particular distribution and do not assume the structure of model to be fixed. So, KNN is used as classification algorithm in cases where we do not have much information about the distribution of the data.KNN is referred to as Lazy algorithm since is does not uses training points to do any generalization, which means that there is no separate training phase. KNN keeps all the training data and uses most of the training data during the testing phase. Thus, KNN does not learn any model, it make predictions on the fly, computing similarity between testing point and each training data point. KNN algorithm is based on feature similarity. We can classify the testing data point on the basis of resemblance of its features with that of the training data set.![title](knn_intro.png) The test sample(green circle) should either be classified into Class 1(blue squares) or Class 2(red triangles).The class for the sample testing point is decided on the basis of majority vote-out.If value of K=1, the nearest training point belongs to Class 1, so we will say that the testing sample belongs to Class 1. Now, take the value of K to be 3, again using the methodology of majority vote, we will say that testing sample belongs to Class 2(red traingles), since out of three nearest training data points, two belongs to Class 2 and one belong to Class 1.Hence, using the same method, we can predict the class for the tesing smaple, using pre-decided value of K. KNN can be used for Classification as well as Regression.While using KNN for classification - output is a class membership, we will classify the sample point using the technique of majority vote among its neighbors and the most common class among its k nearest neighbours is assigned to the testing point. In regression, output is the property value of object, this value is average or median of the value of its K nearest neighbors.In classification, KNN is used to predict a class which is a discrete value whereas in regression, KNN predicts continuous values. Why don't we choose value value of K=1 ?Choosing the value of K=1 makes our model more prone to outliers and overfitting. Value of K=1 means that we will consider only the closest (or nearest) neighbor to predict the class for our testing sample, and in majority of the cases it will lead to overfitting.![title](knn_k1.jpg) Consider the case here, if we choose value of K=1, the given training sample will get classified as dot(Class 2), instead of being classified as a cross(Class 1). So, in such cases certain optimal value of K should be choosen to get good results. Choosing the value of K = 1, leads to formation of complex decision boundaries and hence will lead to overfitting to the data. Since we will be using the majority vote technique, so value of K is taken to be odd, to obtain clear result about the class of the testing data sample. Distance Metric for KNNThere are various distance metrices that can be choosen are Manhattan Distance, Euclidian Distance, etc. \begin{equation*}\textbf{MANHATTAN DISTANCE} = \left| \sum_{i=1}^n X_1 ^ i - X_2 ^ i \right|\end{equation*} \begin{equation*}\textbf{EUCLIDIAN DISTANCE} = \sqrt{ \sum_{i=1}^n \left( X_1 ^ i - X_2 ^ i \right) ^ 2 }\end{equation*} Where X1 and X2 are two different data points and 'i' traverse over all the features in the given dataset. Variations in KNNVariations in KNN are possible on the basis of how neighboring points are going to vote. The weight of the vote of the testing point(s) is inversely proportional to its distance from the testing point. We can go either with uniform voting or with weighted voting. In case of weighted voting, the point nearer to the testing sample will have a larger say in the vote as compared to the point which is farther away from the testing point. Feature Scaling before KNNIn KNN, we are looking for points which are closest to the testing point. In case we do not perform feature scaling, if we have value of one feature in thousands and value of other feature in smaller units, then the effect of first feature will completely overpower and dominate over the effect of second feature in the final output. So, it is of utmost importance to apply feature scaling before applying KNN so that all the features have equal contribution in the final predicted output for the given testing point. KNN in SklearnWe will be applying KNN on Breast Cancer Dataset and use inbuilt KNN Classifier inside sklearn. ###Code from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import cross_val_score import matplotlib.pyplot as plt cancer = datasets.load_breast_cancer() cancer.feature_names cancer.target x_train, x_test, y_train, y_test = train_test_split(cancer.data, cancer.target, test_size = 0.2, random_state = 0) clf = KNeighborsClassifier() clf.fit(x_train, y_train) clf.score(x_test, y_test) ###Output _____no_output_____ ###Markdown Default value of K (number of neighbors) is equal to 5 in Sklearn. By default, Sklearn implements Minkowski distance metric. General form of Minkowski distance is : \begin{equation*}\textbf{MINKOWSKI DISTANCE} = \left| \sum_{i=1}^n X_1 ^ i - X_2 ^ i \right| ^ p\end{equation*} If p = 1, that means we are using Manhattan Distance and if p = 2, that means we are using Euclidian Distance. Cross Validation The best value of K is the one for which we get lowest error on testing data. So, what we can do is to repeatedly train our model using both training and testing data, for different values of parameter K and then finally choose value of K which results in minimum error. But in this process, we are using the testing data as a part of our training process to obtain the optimal value of K. Hence, this process is not to be used. On the other hand if we use only training data and tune the value of parameter K, it will lead to overfitting. This will result in lower value of error on training data, but comparatively higher value of error on the testing data.So, to obtain the optimal value of K, we use a method known as CROSS VALIDATION. Cross Validation basically means taking out the subset from the training data and not using this subset in the training process. This subset of training data is called the 'validation set'. There are various techniques available for cross validation, we will be using the most general one, known as K-fold cross validation. ![title](knn_crossval.png) In K fold cross validation, the training data is randomly split into K different samples(or folds). One of the sample is taken to be the validation set and the model is fitted on the remaining K-1 samples. The accuracy of the model is then computed. The same process is repeated K times, each time taking a different sample of points to be in the validation set. This results in K value for test error and these values are averaged out to obtain the overall result.Cross Validation is used to estimate the test error and generate more robust models. Cross Validation in Sklearn ###Code x_axis = [] k_scores = [] for k in range(1,50, 2): x_axis.append(k) clf = KNeighborsClassifier(n_neighbors=k) scores = cross_val_score(clf, x_train, y_train, cv=10, scoring='accuracy') k_scores.append(scores.mean()) #Printing values print("K = ",k) print("Scores : ") print(scores) print("Mean Score = ",scores.mean()) print() plt.plot(x_axis, k_scores) plt.show() optimal_k = x_axis[k_scores.index(max(k_scores))] optimal_k ###Output _____no_output_____ ###Markdown By this way, we can choose the optimal value of K using cross validation. If the value of K is very less, for eg, say K=1, it will lead to overfitting and result in formation of complex decision boundaries. On the the hand, if the value of K is very high, we are basically underfitting and not actually taking class of neighbors into consideration. In this case, we predict the class of the testing sample from the majority class of the overall training data rather than from the majority class given by the neighbors. Lower values of K corresponds to low bias but high variance and result in jagged and complex decision boundaries, while higher value of K corresponds to lower variance but increased bias and leads to formation of smoother decision boundaries.K = 1 | K = 20:-------------------------:|:-------------------------:![](knn_dec1.png) | ![](knn_dec20.png) Implement KNN ###Code from sklearn import datasets from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import train_test_split from collections import Counter from sklearn.metrics import accuracy_score cancer = datasets.load_breast_cancer() X_train, X_test, Y_train, Y_test = train_test_split(cancer.data, cancer.target, test_size = 0.2, random_state = 0) clf = KNeighborsClassifier(n_neighbors = 7) clf.fit(X_train, Y_train) clf.score(X_test,Y_test) def train(x,y): return def predict_one(x_train, y_train, x_test, k): distances = [] for i in range(len(x_train)): distance = ((x_train[i,:] - x_test)**2).sum() distances.append([distance,i]) distances = sorted(distances) targets = [] for i in range(k): index_of_training_data = distances[i][1] targets.append(y_train[index_of_training_data]) return Counter(targets).most_common(1)[0][0] def predict(x_train, y_train, x_test_data, k): predictions = [] for x_test in x_test_data: predictions.append(predict_one(x_train, y_train, x_test, k)) return predictions y_pred = predict(X_train, Y_train, X_test, 7) accuracy_score(Y_test, y_pred) ###Output _____no_output_____
Solution_5_Pytorch_of_Solutiion4.ipynb
###Markdown [Kaggle常用图片增强包 很好用!!!](https://github.com/aleju/imgaug) [效果展示案例](https://imgaug.readthedocs.io/en/latest/source/augmenters.html) ###Code import imgaug as ia from imgaug import augmenters as iaa import numpy as np ia.seed(1) # Example batch of images. # The array has shape (32, 64, 64, 3) and dtype uint8. images = np.array( [ia.quokka(size=(64, 64)) for _ in range(32)], dtype=np.uint8 ) seq = iaa.Sequential([ iaa.Fliplr(0.5), # horizontal flips iaa.Crop(percent=(0, 0.1)), # random crops # Small gaussian blur with random sigma between 0 and 0.5. # But we only blur about 50% of all images. iaa.Sometimes(0.5, iaa.GaussianBlur(sigma=(0, 0.5)) ), # Strengthen or weaken the contrast in each image. iaa.ContrastNormalization((0.75, 1.5)), # Add gaussian noise. # For 50% of all images, we sample the noise once per pixel. # For the other 50% of all images, we sample the noise per pixel AND # channel. This can change the color (not only brightness) of the # pixels. iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5), # Make some images brighter and some darker. # In 20% of all cases, we sample the multiplier once per channel, # which can end up changing the color of the images. iaa.Multiply((0.8, 1.2), per_channel=0.2), # Apply affine transformations to each image. # Scale/zoom them, translate/move them, rotate them and shear them. iaa.Affine( #scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)}, rotate=(-25, 25), shear=(-8, 8) ) ], random_order=True) # apply augmenters in random order images_aug = seq.augment_images(images) # 显示增强后的效果 import matplotlib.pyplot as plt plt.figure() for i in range(images_aug.shape[0]): plt.imshow(images_aug[i]) plt.axis('off') plt.show() ###Output _____no_output_____ ###Markdown 作者原实现 80epoch 水平翻转测试加权(0.011) 多阶段LR BCELoss ![](http://op4a94iq8.bkt.clouddn.com/18-9-2/22394851.jpg) ###Code import os import sys import random import pandas as pd import numpy as np import matplotlib.pyplot as plt plt.style.use('seaborn-white') import seaborn as sns sns.set_style("white") %matplotlib inline # import cv2 from sklearn.model_selection import train_test_split # 分层分割数据集超方便的 from tqdm import tqdm_notebook, tnrange from itertools import chain from skimage.io import imread, imshow, concatenate_images from skimage.transform import resize from skimage.morphology import label from keras.preprocessing.image import load_img # Set some parameters im_width = 101 im_height = 101 im_chan = 1 basicpath = '' path_train = basicpath + 'TrainData/' path_test = basicpath + 'TestData/' path_train_images = path_train + 'images/' path_train_masks = path_train + 'masks/' path_test_images = path_test + 'images/' print("Training imgs path:",path_train_images) print("Masks of training imgs path:",path_train_masks) print("Test imgs path:",path_test_images) img_size_ori = 101 img_size_target = 101 def upsample(img):# not used if img_size_ori == img_size_target: return img return resize(img, (img_size_target, img_size_target), mode='constant', preserve_range=True) #res = np.zeros((img_size_target, img_size_target), dtype=img.dtype) #res[:img_size_ori, :img_size_ori] = img #return res def downsample(img):# not used if img_size_ori == img_size_target: return img return resize(img, (img_size_ori, img_size_ori), mode='constant', preserve_range=True) #return img[:img_size_ori, :img_size_ori] # Loading of training/testing ids and depths train_df = pd.read_csv("train.csv", index_col="id", usecols=[0]) depths_df = pd.read_csv("depths.csv", index_col="id") train_df = train_df.join(depths_df) # 每个id链接上相应的深度 test_df = depths_df[~depths_df.index.isin(train_df.index)] print("number of train:",len(train_df)) print(train_df.head(5)) print(depths_df.head(5)) test_df.head(5) train_df["images"] = [np.array(load_img("TrainData/images/{}.png".format(idx), grayscale=True)) / 255 for idx in tqdm_notebook(train_df.index)] train_df["masks"] = [np.array(load_img("TrainData/masks/{}.png".format(idx), grayscale=True))/ 255 for idx in tqdm_notebook(train_df.index)] train_df.head(10) train_df["coverage"] = train_df.masks.map(np.sum) / pow(img_size_ori, 2) # 统计图片中像素为1的个数占整个图片101x101=10201个像素的比例 def cov_to_class(val): for i in range(0, 11): if val * 10 <= i : return i train_df["coverage_class"] = train_df.coverage.map(cov_to_class) #按照10%递增一档的比例进行阶梯分类 train_df.head(15) fig, axs = plt.subplots(1, 2, figsize=(15,5)) sns.distplot(train_df.coverage, kde=False, ax=axs[0]) sns.distplot(train_df.coverage_class, bins=10, kde=False, ax=axs[1]) plt.suptitle("Salt coverage") axs[0].set_xlabel("Coverage") axs[1].set_xlabel("Coverage class") #Plotting the depth distributions¶ sns.distplot(train_df.z, label="Train") sns.distplot(test_df.z, label="Test") plt.legend() plt.title("Depth distribution") # Create train/validation split stratified by salt coverage ids_train, ids_valid, x_train, x_valid, y_train, y_valid, cov_train, cov_test, depth_train, depth_test = train_test_split( train_df.index.values, np.array(train_df.images.map(upsample).tolist()).reshape(-1, img_size_target, img_size_target, 1), # [4000,101,101,1] np.array(train_df.masks.map(upsample).tolist()).reshape(-1, img_size_target, img_size_target, 1), # [4000,101,101,1] train_df.coverage.values,# 4000个值 train_df.z.values, # 4000个值 test_size=0.05, stratify=train_df.coverage_class, random_state= 1234) # !!!! stratify 分层split的指标 覆盖比例的等级 后面还可以使用深度等级分层split print(ids_train.shape) print(x_train.shape) print(y_train.shape) print(cov_train.shape) print(depth_train.shape) type(x_train) # all are np.ndarray ###Output (3800,) (3800, 101, 101, 1) (3800, 101, 101, 1) (3800,) (3800,) ###Markdown 模型定义模块 ###Code from torch import nn from torch.nn import functional as F from torch.autograd import Variable import torch from torchvision import models import torchvision # # 反卷积计算示例 # a_Deconv=nn.ConvTranspose2d(256,128,3,stride=2,\ # padding=1,output_padding=1,bias=True, dilation=1) # x=torch.randn(2,256,6,6) # out=a_Deconv(x) # out.shape class Conv2D_block(nn.Module): def __init__(self,C_in,C_out,filter_size=3,paddingType="SAME",ActionType="Relu"): super().__init__() padding={} if filter_size==3: padding["SAME"]=1 if filter_size==1: padding["SAME"]=0 padding["VALID"]=0 self.Conv=nn.Conv2d(C_in, C_out, filter_size, stride=1,padding=padding[paddingType]) self.BN=nn.BatchNorm2d(C_out) self.ActionType=ActionType def forward(self,x): out=self.Conv(x) out=self.BN(out) if self.ActionType=="Relu": out=F.relu6(out,inplace=True) return out class Residual_block(nn.Module): def __init__(self,C_in,C_out,filter_size=3,paddingType="SAME",ActionType="Relu"): super().__init__() self.BN=nn.BatchNorm2d(C_in) self.Conv1=Conv2D_block(C_in,C_out,filter_size,paddingType,ActionType) self.Conv2=Conv2D_block(C_out,C_out,filter_size,paddingType,ActionType=None) def forward(self,x): out=F.relu6(x,inplace=True) out=self.BN(out) out=self.Conv1(out) out=self.Conv2(out) out=out+x # skip connection return out class Eecoder_with_residual_block(nn.Module): def __init__(self,C_in,C_out,DropoutRatio = 0.5,filter_size=3,paddingType="SAME"): super().__init__() padding={} if filter_size==3: padding["SAME"]=1 if filter_size==1: padding["SAME"]=0 padding["VALID"]=0 self.Conv_1=nn.Conv2d(C_in,C_out,filter_size,stride=1,padding=padding[paddingType]) self.Res_1=Residual_block(C_out,C_out) self.Res_2=Residual_block(C_out,C_out) def forward(self,x): out=self.Conv_1(x) out=self.Res_1(out) out=self.Res_2(out) out=F.relu6(out,inplace=True) return out class Decoder_with_residual_block(nn.Module): def __init__(self,C_in,C_out,DropoutRatio = 0.5,filter_size=3,paddingType="SAME"): super().__init__() padding={} if filter_size==3: padding["SAME"]=1 if filter_size==1: padding["SAME"]=0 padding["VALID"]=0 if paddingType=="VALID": output_padding=0 else: output_padding=1 self.Deconv_1=nn.ConvTranspose2d(C_in,C_out,filter_size, stride=2,\ padding=padding[paddingType],output_padding=output_padding,bias=True, dilation=1) self.Dropout=nn.Dropout(DropoutRatio) self.Conv_1=nn.Conv2d(C_in,C_out,filter_size,stride=1,padding=1) self.Res_1=Residual_block(C_out,C_out) self.Res_2=Residual_block(C_out,C_out) def forward(self,x,skipx): out=self.Deconv_1(x) out=torch.cat([out,skipx],dim=1) out=self.Dropout(out) out=self.Conv_1(out) out=self.Res_1(out) out=self.Res_2(out) out=F.relu6(out,inplace=True) return out class Middle_with_residual_block(nn.Module): def __init__(self,C_in,C_out,DropoutRatio = 0.5,filter_size=3,paddingType="SAME"): super().__init__() padding={} if filter_size==3: padding["SAME"]=1 if filter_size==1: padding["SAME"]=0 padding["VALID"]=0 self.Conv_1=nn.Conv2d(C_in,C_out,filter_size,stride=1,padding=padding[paddingType]) self.Res_1=Residual_block(C_out,C_out) self.Res_2=Residual_block(C_out,C_out) def forward(self,x): out=self.Conv_1(x) out=self.Res_1(out) out=self.Res_2(out) out=F.relu6(out,inplace=True) return out class U_net_with_residual(nn.Module): def __init__(self,filter_num=16,DropoutRatio = 0.5,filter_size=3,paddingType="SAME"): super().__init__() self.Dropout_1=nn.Dropout(DropoutRatio/2) self.Dropout_2=nn.Dropout(DropoutRatio) im_chan=1 self.Encoder_L1=Eecoder_with_residual_block(im_chan,filter_num,DropoutRatio/2) self.Encoder_L2=Eecoder_with_residual_block(filter_num*1,filter_num*2,DropoutRatio) self.Encoder_L3=Eecoder_with_residual_block(filter_num*2,filter_num*4,DropoutRatio) self.Encoder_L4=Eecoder_with_residual_block(filter_num*4,filter_num*8,DropoutRatio) self.Middle=Middle_with_residual_block(filter_num*8,filter_num*16,DropoutRatio) self.Decoder_L4=Decoder_with_residual_block(filter_num*16,filter_num*8,DropoutRatio,paddingType="SAME") self.Decoder_L3=Decoder_with_residual_block(filter_num*8,filter_num*4,DropoutRatio,paddingType="VALID") self.Decoder_L2=Decoder_with_residual_block(filter_num*4,filter_num*2,DropoutRatio,paddingType="SAME") self.Decoder_L1=Decoder_with_residual_block(filter_num*2,filter_num*1,DropoutRatio,paddingType="VALID") self.Out=nn.Conv2d(filter_num,1,1,stride=1,padding=0) def forward(self,x): Encoder_L1=self.Encoder_L1(x) Pool_L1=F.max_pool2d(Encoder_L1,2, stride=2) Pool_L1=self.Dropout_1(Pool_L1) Encoder_L2=self.Encoder_L2(Pool_L1) Pool_L2=F.max_pool2d(Encoder_L2,2, stride=2) Pool_L2=self.Dropout_2(Pool_L2) Encoder_L3=self.Encoder_L3(Pool_L2) Pool_L3=F.max_pool2d(Encoder_L3,2, stride=2) Pool_L3=self.Dropout_2(Pool_L3) Encoder_L4=self.Encoder_L4(Pool_L3) Pool_L4=F.max_pool2d(Encoder_L4,2, stride=2) Pool_L4=self.Dropout_2(Pool_L4) Middle=self.Middle(Pool_L4) Summary=True if Summary: print("1:",Encoder_L1.shape) print("2:",Encoder_L2.shape) print("3:",Encoder_L3.shape) print("4:",Encoder_L4.shape) print("Middle:",Middle.shape) Decoder_L4=self.Decoder_L4(Middle,Encoder_L4) Decoder_L3=self.Decoder_L3(Decoder_L4,Encoder_L3) Decoder_L2=self.Decoder_L2(Decoder_L3,Encoder_L2) Decoder_L1=self.Decoder_L1(Decoder_L2,Encoder_L1) if Summary: print("6:",Decoder_L4.shape) print("7:",Decoder_L3.shape) print("8:",Decoder_L2.shape) print("9:",Decoder_L1.shape) logits=self.Dropout_1(Decoder_L1) logits=self.Out(logits) loits=F.sigmoid(logits) if Summary: print("10:",logits.shape) return logits import time since=time.time() x=torch.randn(32,1,101,101) model=U_net_with_residual(32) if torch.cuda.is_available(): x=x.cuda() model.cuda() im_chan=1 out=model(x) print("cost time:",time.time()-since) # ############################################# # ## 网络架构及Pytorch版本的Fine-tune手法 ## # ############################################# # from torch import nn # from torch.nn import functional as F # import torch # from torchvision import models # import torchvision # def conv3x3(in_, out): # return nn.Conv2d(in_, out, 3, padding=1) # class ConvRelu(nn.Module): # def __init__(self, in_, out): # super().__init__() # self.conv = conv3x3(in_, out) # self.activation = nn.ReLU(inplace=True) # def forward(self, x): # x = self.conv(x) # x = self.activation(x) # return x # class DecoderBlock(nn.Module): # def __init__(self, in_channels, middle_channels, out_channels): # super().__init__() # self.block = nn.Sequential( # ConvRelu(in_channels, middle_channels), # nn.ConvTranspose2d(middle_channels, out_channels, kernel_size=3, stride=2, padding=1, output_padding=1), # nn.ReLU(inplace=True) # ) # def forward(self, x): # return self.block(x) # class UNet11(nn.Module): # def __init__(self, num_filters=32, pretrained=False): # """ # :param num_classes: # :param num_filters: # :param pretrained: # False - no pre-trained network is used # True - encoder is pre-trained with VGG11 # """ # super().__init__() # self.pool = nn.MaxPool2d(2, 2) # self.encoder = models.vgg11(pretrained=pretrained).features # self.relu = self.encoder[1] # #print(self.relu) # output ==> RELU(inplace) # self.conv1 = self.encoder[0] # self.conv2 = self.encoder[3] # self.conv3s = self.encoder[6] # self.conv3 = self.encoder[8] # self.conv4s = self.encoder[11] # self.conv4 = self.encoder[13] # self.conv5s = self.encoder[16] # self.conv5 = self.encoder[18] # self.center = DecoderBlock(num_filters * 8 * 2, num_filters * 8 * 2, num_filters * 8) # self.dec5 = DecoderBlock(num_filters * (16 + 8), num_filters * 8 * 2, num_filters * 8) # self.dec4 = DecoderBlock(num_filters * (16 + 8), num_filters * 8 * 2, num_filters * 4) # self.dec3 = DecoderBlock(num_filters * (8 + 4), num_filters * 4 * 2, num_filters * 2) # self.dec2 = DecoderBlock(num_filters * (4 + 2), num_filters * 2 * 2, num_filters) # self.dec1 = ConvRelu(num_filters * (2 + 1), num_filters) # self.final = nn.Conv2d(num_filters, 1, kernel_size=1) # 1x1卷积 输出是1通道 # def forward(self, x): # conv1 = self.relu(self.conv1(x)) # conv2 = self.relu(self.conv2(self.pool(conv1))) # conv3s = self.relu(self.conv3s(self.pool(conv2))) # conv3 = self.relu(self.conv3(conv3s)) # conv4s = self.relu(self.conv4s(self.pool(conv3))) # conv4 = self.relu(self.conv4(conv4s)) # conv5s = self.relu(self.conv5s(self.pool(conv4))) # conv5 = self.relu(self.conv5(conv5s)) # center = self.center(self.pool(conv5)) # dec5 = self.dec5(torch.cat([center, conv5], 1)) # dec4 = self.dec4(torch.cat([dec5, conv4], 1)) # dec3 = self.dec3(torch.cat([dec4, conv3], 1)) # dec2 = self.dec2(torch.cat([dec3, conv2], 1)) # dec1 = self.dec1(torch.cat([dec2, conv1], 1)) # return self.final(dec1) # def unet11(pretrained=False, **kwargs): # """ # pretrained: # False - no pre-trained network is used # True - encoder is pre-trained with VGG11 # carvana - all weights are pre-trained on # Kaggle: Carvana dataset https://www.kaggle.com/c/carvana-image-masking-challenge # """ # if pretrained== True: # model = UNet11(pretrained=pretrained, **kwargs) # else: # model= UNet11(pretrained=False, **kwargs) # # 根据 pretrained 加载 'carvana'数据集上训练好的网络全部权重 # if pretrained == 'carvana': # state = torch.load('TernausNet.pt',map_location={'cuda:0': 'cpu'}) # # model.load_state_dict(state['model']) #,map_location='cpu' # return model # class DecoderBlockV2(nn.Module): # def __init__(self, in_channels, middle_channels, out_channels, is_deconv=True): # super(DecoderBlockV2, self).__init__() # self.in_channels = in_channels # if is_deconv: # """ # Paramaters for Deconvolution were chosen to avoid artifacts, following # link https://distill.pub/2016/deconv-checkerboard/ # """ # self.block = nn.Sequential( # ConvRelu(in_channels, middle_channels), # nn.ConvTranspose2d(middle_channels, out_channels, kernel_size=4, stride=2, # padding=1), # nn.ReLU(inplace=True) # ) # else: # self.block = nn.Sequential( # nn.Upsample(scale_factor=2, mode='bilinear'), # ConvRelu(in_channels, middle_channels), # ConvRelu(middle_channels, out_channels), # ) # def forward(self, x): # return self.block(x) # class AlbuNet(nn.Module): # """ # UNet (https://arxiv.org/abs/1505.04597) with Resnet34(https://arxiv.org/abs/1512.03385) encoder # Proposed by Alexander Buslaev: https://www.linkedin.com/in/al-buslaev/ # """ # def __init__(self, num_classes=1, num_filters=32, pretrained=False, is_deconv=False): # """ # :param num_classes: # :param num_filters: # :param pretrained: # False - no pre-trained network is used # True - encoder is pre-trained with resnet34 # :is_deconv: # False: bilinear interpolation is used in decoder # True: deconvolution is used in decoder # """ # super().__init__() # self.num_classes = num_classes # self.pool = nn.MaxPool2d(2, 2) # self.encoder = torchvision.models.resnet34(pretrained=pretrained) # self.relu = nn.ReLU(inplace=True) # self.conv1 = nn.Sequential(self.encoder.conv1, # self.encoder.bn1, # self.encoder.relu, # self.pool) # self.conv2 = self.encoder.layer1 # self.conv3 = self.encoder.layer2 # self.conv4 = self.encoder.layer3 # self.conv5 = self.encoder.layer4 # self.center = DecoderBlockV2(512, num_filters * 8 * 2, num_filters * 8, is_deconv) # self.dec5 = DecoderBlockV2(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv) # self.dec4 = DecoderBlockV2(256 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv) # self.dec3 = DecoderBlockV2(128 + num_filters * 8, num_filters * 4 * 2, num_filters * 2, is_deconv) # self.dec2 = DecoderBlockV2(64 + num_filters * 2, num_filters * 2 * 2, num_filters * 2 * 2, is_deconv) # self.dec1 = DecoderBlockV2(num_filters * 2 * 2, num_filters * 2 * 2, num_filters, is_deconv) # self.dec0 = ConvRelu(num_filters, num_filters) # self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1) # def forward(self, x): # conv1 = self.conv1(x) # conv2 = self.conv2(conv1) # conv3 = self.conv3(conv2) # conv4 = self.conv4(conv3) # conv5 = self.conv5(conv4) # center = self.center(self.pool(conv5)) # dec5 = self.dec5(torch.cat([center, conv5], 1)) # dec4 = self.dec4(torch.cat([dec5, conv4], 1)) # dec3 = self.dec3(torch.cat([dec4, conv3], 1)) # dec2 = self.dec2(torch.cat([dec3, conv2], 1)) # dec1 = self.dec1(dec2) # dec0 = self.dec0(dec1) # if self.num_classes > 1: # x_out = F.log_softmax(self.final(dec0), dim=1) # else: # x_out = self.final(dec0) # return x_out # class UNet16(nn.Module): # def __init__(self, num_classes=1, num_filters=32, pretrained=False, is_deconv=False): # """ # :param num_classes: # :param num_filters: # :param pretrained: # False - no pre-trained network used # True - encoder pre-trained with VGG16 # :is_deconv: # False: bilinear interpolation is used in decoder # True: deconvolution is used in decoder # """ # super().__init__() # self.num_classes = num_classes # self.pool = nn.MaxPool2d(2, 2) # self.encoder = torchvision.models.vgg16(pretrained=pretrained).features # self.relu = nn.ReLU(inplace=True) # self.conv1 = nn.Sequential(self.encoder[0], # self.relu, # self.encoder[2], # self.relu) # self.conv2 = nn.Sequential(self.encoder[5], # self.relu, # self.encoder[7], # self.relu) # self.conv3 = nn.Sequential(self.encoder[10], # self.relu, # self.encoder[12], # self.relu, # self.encoder[14], # self.relu) # self.conv4 = nn.Sequential(self.encoder[17], # self.relu, # self.encoder[19], # self.relu, # self.encoder[21], # self.relu) # self.conv5 = nn.Sequential(self.encoder[24], # self.relu, # self.encoder[26], # self.relu, # self.encoder[28], # self.relu) # self.center = DecoderBlockV2(512, num_filters * 8 * 2, num_filters * 8, is_deconv) # self.dec5 = DecoderBlockV2(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv) # self.dec4 = DecoderBlockV2(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv) # self.dec3 = DecoderBlockV2(256 + num_filters * 8, num_filters * 4 * 2, num_filters * 2, is_deconv) # self.dec2 = DecoderBlockV2(128 + num_filters * 2, num_filters * 2 * 2, num_filters, is_deconv) # self.dec1 = ConvRelu(64 + num_filters, num_filters) # self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1) # def forward(self, x): # conv1 = self.conv1(x) # conv2 = self.conv2(self.pool(conv1)) # conv3 = self.conv3(self.pool(conv2)) # conv4 = self.conv4(self.pool(conv3)) # conv5 = self.conv5(self.pool(conv4)) # center = self.center(self.pool(conv5)) # dec5 = self.dec5(torch.cat([center, conv5], 1)) # dec4 = self.dec4(torch.cat([dec5, conv4], 1)) # dec3 = self.dec3(torch.cat([dec4, conv3], 1)) # dec2 = self.dec2(torch.cat([dec3, conv2], 1)) # dec1 = self.dec1(torch.cat([dec2, conv1], 1)) # if self.num_classes > 1: # x_out = F.log_softmax(self.final(dec1), dim=1) # else: # x_out = self.final(dec1) # return x_out ###Output _____no_output_____ ###Markdown 指标计算 ###Code #Score the model and do a threshold optimization by the best IoU. # src: https://www.kaggle.com/aglotero/another-iou-metric def iou_metric(y_true_in, y_pred_in, print_table=False): labels = y_true_in y_pred = y_pred_in true_objects = 2 pred_objects = 2 # Jiaxin fin that if all zeros, then, the background is treated as object temp1 = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=([0,0.5,1], [0,0.5, 1])) # temp1 = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=(true_objects, pred_objects)) #print(temp1) intersection = temp1[0] #print("temp2 = ",temp1[1]) #print(intersection.shape) # print(intersection) # Compute areas (needed for finding the union between all objects) #print(np.histogram(labels, bins = true_objects)) area_true = np.histogram(labels,bins=[0,0.5,1])[0] #print("area_true = ",area_true) area_pred = np.histogram(y_pred, bins=[0,0.5,1])[0] area_true = np.expand_dims(area_true, -1) area_pred = np.expand_dims(area_pred, 0) # Compute union union = area_true + area_pred - intersection # Exclude background from the analysis intersection = intersection[1:,1:] intersection[intersection == 0] = 1e-9 union = union[1:,1:] union[union == 0] = 1e-9 # Compute the intersection over union iou = intersection / union # Precision helper function def precision_at(threshold, iou): matches = iou > threshold true_positives = np.sum(matches, axis=1) == 1 # Correct objects false_positives = np.sum(matches, axis=0) == 0 # Missed objects false_negatives = np.sum(matches, axis=1) == 0 # Extra objects tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives) return tp, fp, fn # Loop over IoU thresholds prec = [] if print_table: print("Thresh\tTP\tFP\tFN\tPrec.") for t in np.arange(0.5, 1.0, 0.05): tp, fp, fn = precision_at(t, iou) if (tp + fp + fn) > 0: p = tp / (tp + fp + fn) else: p = 0 if print_table: print("{:1.3f}\t{}\t{}\t{}\t{:1.3f}".format(t, tp, fp, fn, p)) prec.append(p) if print_table: print("AP\t-\t-\t-\t{:1.3f}".format(np.mean(prec))) return np.mean(prec) def iou_metric_batch(y_true_in, y_pred_in): y_pred_in = y_pred_in > 0.5 # added by sgx 20180728 batch_size = y_true_in.shape[0] metric = [] for batch in range(batch_size): value = iou_metric(y_true_in[batch], y_pred_in[batch]) metric.append(value) #print("this batch metric = ",metric) return np.mean(metric) def my_iou_metric(label, pred): ''' tf.py_func接收的是tensor,然后将其转化为numpy array送入func函数,最后再将func函数输出的numpy array转化为tensor返回。 ref: https://blog.csdn.net/tiankongtiankong01/article/details/80568311 ''' metric_value = tf.py_func(iou_metric_batch, [label, pred], tf.float64) return metric_value ###Output _____no_output_____ ###Markdown 准备训练数据并训练 ###Code #Data augmentation x_train2 = np.transpose(np.append(x_train, [np.fliplr(x) for x in x_train], axis=0),(0,3,1,2)).astype(np.float32) y_train2 = np.transpose(np.append(y_train, [np.fliplr(x) for x in y_train], axis=0),(0,3,1,2)).astype(np.float32) x_valid2 = np.transpose(x_valid,(0,3,1,2)).astype(np.float32) y_valid2 = np.transpose(y_valid,(0,3,1,2)).astype(np.float32) print(x_train2.shape) print(y_train2.shape) print(x_valid2.shape) print(y_valid2.shape) # !!!不要对valid进行数据增强 print(type(x_train2[0,0,0,0])) print(type(x_valid2[0,0,0,0])) # 自定义一个Pytorch的数据加载器,必须继承自torch.utils.data.Dataset # https://stackoverflow.com/questions/50052295/how-do-you-load-images-into-pytorch-dataloader class saltIDDataset(torch.utils.data.Dataset): def __init__(self,preprocessed_images,train=True, preprocessed_masks=None): """ Args: text_file(string): path to text file root_dir(string): directory with all train images """ self.train = train self.images = preprocessed_images if self.train: self.masks = preprocessed_masks def __len__(self): return len(self.images) def __getitem__(self, idx): image = self.images[idx] mask = None if self.train: mask = self.masks[idx] return (image, mask) salt_ID_dataset_train = saltIDDataset(x_train2, train=True, preprocessed_masks=y_train2) salt_ID_dataset_val = saltIDDataset(x_valid2, train=True, preprocessed_masks=y_valid2) batch_size = 32 train_loader = torch.utils.data.DataLoader(dataset=salt_ID_dataset_train, batch_size=batch_size, shuffle=True) val_loader = torch.utils.data.DataLoader(dataset=salt_ID_dataset_val, batch_size=batch_size, shuffle=False) print("Trainng samples:",train_loader.dataset.__len__()) print("Validation samples:",val_loader.dataset.__len__()) class ArgParser(object): def __init__(self): self.MaxEpoch=150 self.LR_Policy="MultiStage" self.n_gpus=1 self.load_pretrained_model=False self.pretarined_model_path="Results" self.device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.start_save_epoch=30 self.save_step=20 import time import shutil def custom_weights_init(m): pass def save_checkpoint(state, is_best, path,args): save_path=os.path.join(path,'saved_Gpu_model_state.pt') torch.save(state, save_path) if is_best: shutil.copyfile(save_path, os.path.join(args.pretarined_model_path ,'best_Gpu_model_state.pt')) def LearningRateAdjust(LR_Policy,optimizer,cur_epoch,history_metirc): if LR_Policy=="MultiSatge": if cur_epoch==1: lr=0.001 elif cur_epoch<=9: lr=(cur_epoch)*0.001 elif cur_epoch==10: lr=0.01 elif cur_epoch==80: lr=0.003 elif cur_epoch==110: lr=0.001 elif cur_epoch==130: lr=0.0001 elif cur_epoch==140: lr=0.00001 else: return for param_group in optimizer.param_groups: param_group["lr"]=lr def train(model,train_loader,optimizer,criterion,args,epoch,class_weights): since=time.time() model.train() train_losses = [] for batch_idx, (images, masks) in enumerate(train_loader): images, masks = images.to(args.device), masks.to(args.device) images, masks = Variable(images), Variable(masks) logits = model(images) loss = criterion(logits, masks) train_losses.append(loss.data) #dice_loss = bioloss.dice_error(output, target) optimizer.zero_grad() loss.backward() optimizer.step() # if batch_idx%20==0: # now=time.time() # print("Loss:{},escaped time:{}".format(loss,now-since)) # since=now # #break now=time.time() mean_loss=np.mean(train_losses) print("Mean Train Loss:{},escaped time:{}".format(mean_loss,now-since)) return mean_loss def test(model,test_loader,criterion,args,epoch,class_weights): since=time.time() model.eval() test_losses = [] for batch_idx, (images, masks) in enumerate(test_loader): images, masks = images.to(args.device), masks.to(args.device) images, masks = Variable(images), Variable(masks) logits = model(images) loss = criterion(logits, masks) test_losses.append(loss.data) #dice_loss = bioloss.dice_error(output, target) now=time.time() mean_loss=np.mean(test_losses) print("Mean Val Loss:{},escaped time:{}".format(mean_loss,now-since)) return mean_loss train_losses_history=[] val_losses_history=[] def main(): args=ArgParser() torch.cuda.manual_seed_all(4200)# 设置全局固定随机种子方便复现 np.random.seed(133700) model=U_net_with_residual(16) # 16 is the basic filter_nums, for controlling the width of U-Net # gpu_ids=range(args.n_gpus) # model=nn.parallel.DataParallel(model,device_ids=gpu_ids) if args.load_pretrained_model==True: state=torch.load(args.pretarined_model_path) model.load_state_dict(state) else: model.apply(custom_weights_init) # deploy model to GPU or CPU model.to(args.device) # -------------------------------------------------------------------------------- # #optimizer = torch.optim.Adam(model.parameters(), lr=0.001) optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4) #criterion = nn.CrossEntropyLoss() criterion = nn.BCEWithLogitsLoss() # -------------------------------------------------------------------------------- # class_weights=[0.5,0.5] best_metric=100 # 注意指标的大小与性能的关系 metirc_history=[] for epoch in range(1,args.MaxEpoch): LearningRateAdjust(args.LR_Policy,optimizer,epoch,metirc_history) mean_train_loss_=train(model,train_loader,optimizer,criterion,args,epoch,class_weights) train_losses_history.append(mean_train_loss_) mean_val_loss_=test(model,val_loader,criterion,args,epoch,class_weights) val_losses_history.append(mean_val_loss_) if epoch>=args.start_save_epoch : if mean_val_loss_<best_metric: best_metric=mean_val_loss_ is_best=True save_checkpoint(model.state_dict(), is_best, args.pretarined_model_path,args) else: is_best=False if epoch % args.save_step ==0: save_path=os.path.join(args.pretarined_model_path,str(epoch)) if not os.path.exists(save_path): os.mkdir(save_path) save_checkpoint(model.state_dict(), is_best, save_path, args) torch.save(model.state_dict(),"Results/mm.pt") return model model=main() # 模型运行时间和申请到的GPU新旧有关 第一次运行一个epoch只需要21s torch.save(model.state_dict(),"Results/mm.pt") ###Output _____no_output_____ ###Markdown 加载最优模型 ###Code import os model=U_net_with_residual(16) state = torch.load('Results/mm.pt')#,map_location={'cuda:0': 'cpu'}) # model.load_state_dict(state) #,map_location='cpu' model=model.cuda() train_loss_series = pd.Series(train_losses_history) val_loss_series = pd.Series(val_losses_history) train_loss_series.plot(label="train") val_loss_series.plot(label="validation") plt.legend() ###Output _____no_output_____ ###Markdown ![](http://op4a94iq8.bkt.clouddn.com/18-9-3/31054653.jpg) ###Code mask_pred_valid = [] mask_truth_valid=[] for images, masks in train_loader: images = Variable(images.cuda()) # images = Variable(images) y_preds = model(images) for i, _ in enumerate(images): y_pred = y_preds[i] y_pred = y_pred.cpu().data.numpy() mask_pred_valid.append(y_pred) mask_truth_valid.append(masks[i].numpy()) mask_pred_valid=np.array(mask_pred_valid) mask_truth_valid=np.array(mask_truth_valid) print(mask_truth_valid.shape) print(mask_truth_valid.shape) ## Scoring for last model thresholds = np.linspace(0.3, 0.7, 31) ious = np.array([iou_metric_batch(mask_truth_valid, np.int32(mask_pred_valid > threshold)) for threshold in tqdm_notebook(thresholds)]) threshold_best_index = np.argmax(ious) iou_best = ious[threshold_best_index] threshold_best = thresholds[threshold_best_index] plt.plot(thresholds, ious) plt.plot(threshold_best, iou_best, "xr", label="Best threshold") plt.xlabel("Threshold") plt.ylabel("IoU") plt.title("Threshold vs IoU ({}, {})".format(threshold_best, iou_best)) plt.legend() ###Output _____no_output_____ ###Markdown 准备测试数据并测试 ###Code """ used for converting the decoded image to rle mask Fast compared to previous one """ def rle_encode(im): ''' im: numpy array, 1 - mask, 0 - background Returns run length as string formated ''' pixels = im.flatten(order = 'F') pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return ' '.join(str(x) for x in runs) x_test = np.array([(np.array(load_img("TestData/images/{}.png".format(idx), grayscale = True))) / 255 for idx in tqdm_notebook(test_df.index)]).reshape(-1, img_size_target, img_size_target, 1) x_test.shape x_test2=np.transpose(x_test,(0,3,1,2)).astype(np.float32) print(x_test2.shape) y_test2=np.zeros_like(x_test2).astype(np.float32) salt_ID_dataset_test = saltIDDataset(x_test2, train=True, preprocessed_masks=y_test2) batch_size = 32 test_loader = torch.utils.data.DataLoader(dataset=salt_ID_dataset_test, batch_size=batch_size, shuffle=False) x_test_Sym=np.transpose(np.array([np.fliplr(x) for x in x_test]),(0,3,1,2)).astype(np.float32) print(x_test_Sym.shape) y_test_Sym=np.zeros_like(x_test_Sym).astype(np.float32) salt_ID_dataset_test_Sym = saltIDDataset(x_test_Sym, train=True, preprocessed_masks=y_test_Sym) batch_size = 32 test_loader_Sym = torch.utils.data.DataLoader(dataset=salt_ID_dataset_test_Sym, batch_size=batch_size, shuffle=False) mask_pred_test_Sym = [] for images, masks in test_loader_Sym: images = Variable(images.cuda()) # images = Variable(images) y_preds = model(images) for i, _ in enumerate(images): y_pred = y_preds[i] y_pred = y_pred.cpu().data.numpy() mask_pred_test_Sym.append(y_pred[:,:,::-1]) mask_pred_test_Sym=np.array(mask_pred_test_Sym) print(mask_pred_test_Sym.shape) mask_pred_test = [] for images, masks in test_loader: images = Variable(images.cuda()) # images = Variable(images) y_preds = model(images) for i, _ in enumerate(images): y_pred = y_preds[i] y_pred = y_pred.cpu().data.numpy() mask_pred_test.append(y_pred) mask_pred_test=np.array(mask_pred_test) print(mask_pred_test.shape) mask_pred_test=0.5*(mask_pred_test+mask_pred_test_Sym) import time t1 = time.time() pred_dict = {idx: rle_encode(np.round(downsample(mask_pred_test[i]) > threshold_best)) for i, idx in enumerate(tqdm_notebook(test_df.index.values))} t2 = time.time() print(f"Usedtime = {t2-t1} s") sub = pd.DataFrame.from_dict(pred_dict,orient='index') sub.index.names = ['id'] sub.columns = ['rle_mask'] sub.to_csv('submission_solution5.csv') ###Output _____no_output_____
Recomendacoes.ipynb
###Markdown Carregando os dados ###Code recomendacoes=pd.read_csv('Dados/recomendacao_2010_2020.csv', sep = ';', low_memory=False) recomendacoes print(recomendacoes.columns.values) recomendacoes.info() recomendacoes.duplicated().sum() ###Output _____no_output_____ ###Markdown Removendo a coluna recomendacao_dia_feedback e a linha cujo recomendacao_conteudo está vazio ###Code recomendacoes_filtrada = recomendacoes.drop(['recomendacao_dia_feedback'], axis = 1) recomendacoes_filtrada = recomendacoes_filtrada[recomendacoes_filtrada['recomendacao_conteudo'].notna()] recomendacoes_filtrada ###Output _____no_output_____ ###Markdown Observando quais palavras são mais relevantes no conteúdo das recomendações das ocorrências ###Code def nuvem_palavras(coluna_dataframe): palavras = '' stopwords= set(STOPWORDS) # Lista de palavras que não serão contabilizadas novas_palavras = ["de", "da", "do", "para", "dos", "das", "em", "por", "registro", "ltda", "fim", "realizar", "um", "através", "na", "no", "ou", "voo", "aeronave", "aeronaves"] with open("stop_words_pt.txt", 'r') as f: [novas_palavras.append(palavras) for linha in f for palavras in linha.split()] nova_stopwords = stopwords.union(novas_palavras) for val in coluna_dataframe: # converter cada linha para string val = str(val) # splitar cada linha em uma lista de palavras (tokens) tokens = val.split() # Converter cada token em letra minuscula for i in range(len(tokens)): tokens[i] = tokens[i].lower() palavras += " ".join(tokens)+" " # Formando a nunvem de palavras wordcloud = WordCloud(width = 500, height = 500, background_color ='white', stopwords = nova_stopwords, min_font_size = 10).generate(palavras) # Plotando a imagem de nuvem de palavras plt.figure(figsize = (8, 8), facecolor = None) plt.imshow(wordcloud) plt.axis("off") plt.tight_layout(pad = 0) plt.show() nuvem_palavras(recomendacoes_filtrada['recomendacao_conteudo']) ###Output _____no_output_____
examples/ipynb/highstock/lazy-loading.ipynb
###Markdown Highstock Demos=========1.7 million points with async loading: http://www.highcharts.com/stock/demo/lazy-loading------------------------------------------------------------------------------------This example generates a candlestick chart, which updates (async loading) when a different time period is selectedby the navigation bar due to the large dataset.Due to the update, this chart requires JS function in the beginning and xAxis.events options. ###Code from highcharts import Highstock H = Highstock() data_url = 'http://www.highcharts.com/samples/data/from-sql.php?callback=?' H.add_data_from_jsonp(data_url, 'json_data', 'candlestick', dataGrouping = {'enabled': False}) script = """json_data = [].concat(json_data, [[Date.UTC(2011, 9, 14, 19, 59), null, null, null, null]]);""" H.add_JSscript(script, 'head') H.add_navi_series_from_jsonp() # not really useful, but it shows in highstock demo options = { 'chart' : { 'zoomType': 'x' }, 'navigator' : { 'adaptToUpdatedData': False, }, 'scrollbar': { 'liveRedraw': False }, 'title': { 'text': 'AAPL history by the minute from 1998 to 2011' }, 'subtitle': { 'text': 'Displaying 1.7 million data points in Highcharts Stock by async server loading' }, 'rangeSelector' : { 'buttons': [{ 'type': 'hour', 'count': 1, 'text': '1h' }, { 'type': 'day', 'count': 1, 'text': '1d' }, { 'type': 'month', 'count': 1, 'text': '1m' }, { 'type': 'year', 'count': 1, 'text': '1y' }, { 'type': 'all', 'text': 'All' }], 'inputEnabled': False, # it supports only days 'selected' : 4 # all }, 'xAxis' : { 'events' : { 'afterSetExtremes' : """function afterSetExtremes(e) { var chart = $('#container').highcharts(); chart.showLoading('Loading data from server...'); $.getJSON('http://www.highcharts.com/samples/data/from-sql.php?start=' + Math.round(e.min) + '&end=' + Math.round(e.max) + '&callback=?', function (data) { chart.series[0].setData(data); chart.hideLoading(); }); }""" }, 'minRange': 3600 * 1000 # one hour }, 'yAxis': { 'floor': 0 }, } H.set_dict_options(options) H ###Output _____no_output_____
PyTorch Tutorials/Learning_PyTorch.ipynb
###Markdown Converting Numpy to PyTorch Tensors ###Code tensors = torch.from_numpy(arr) tensors tensors[:2] tensors.dtype tensors[1:4] ## Disadvantage of from_numpy. The array and tensor use the same memory location tensors[3] = 100 tensors arr # Prevent this by using torch.tensor tensor_arr = torch.tensor(arr) tensor_arr tensor_arr[3] = 20 print(tensor_arr) print(arr) # Zeroes and Ones torch.zeros(2, 3, dtype=torch.float64) torch.ones(2, 3, dtype=torch.float64) a = torch.tensor(np.arange(0, 15).reshape(5,3)) a[:, 0:2] ###Output _____no_output_____ ###Markdown Arithmetic Operation ###Code a = torch.tensor([3, 4, 5], dtype=torch.float) b = torch.tensor([4, 5, 6], dtype=torch.float) print(a + b) torch.add(a, b) c = torch.zeros(3) torch.add(a, b, out=c) c # Some more operations a = torch.tensor([3, 4, 5], dtype=torch.float) b = torch.tensor([4, 5, 6], dtype=torch.float) torch.add(a, b).sum() ###Output _____no_output_____ ###Markdown Dot Products and Multiplication Operations ###Code x = torch.tensor([3, 4, 5], dtype=torch.float) y = torch.tensor([4, 5, 6], dtype=torch.float) # 3*4, 4*5, 6*5 x.mul(y) # 3*4 + 4*5 + 6*5 x.dot(y) ###Output _____no_output_____ ###Markdown Matrix Multiplication ###Code x = torch.tensor([[1, 4, 2], [1, 5, 5]], dtype=torch.float) y = torch.tensor([[5, 7], [8, 6], [9, 11]], dtype=torch.float) torch.matmul(x, y) torch.mm(x, y) x@y ###Output _____no_output_____
Movie recommendation/[MovieLens] Rating.ipynb
###Markdown ``` This is formatted as code``` Load DatasetMovieLens 1M dataset (https://grouplens.org/datasets/movielens/) ###Code import os from google.colab import drive drive.mount('/content/gdrive') my_drive = '/content/gdrive/MyDrive' path = os.path.join(my_drive, "AISIA/Winter School 2020") data_dir = os.path.join(path, "data") import pandas as pd movies_df = pd.read_csv("/content/gdrive/MyDrive/AISIA/Winter School 2020/data/movies.dat", sep="::", names=['movie_id', 'title', 'genre']) movies_df.head() ratings_df = pd.read_csv("/content/gdrive/MyDrive/AISIA/Winter School 2020/data/ratings.dat", sep="::", names=['user_id', 'movie_id', 'rating', 'timestamp']) ratings_df.head() users_df = pd.read_csv("/content/gdrive/MyDrive/AISIA/Winter School 2020/data/users.dat", sep="::", names=['user_id', 'gender', 'age', 'occupation', 'zipcode']) users_df.head() ###Output /usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:3: ParserWarning: Falling back to the 'python' engine because the 'c' engine does not support regex separators (separators > 1 char and different from '\s+' are interpreted as regex); you can avoid this warning by specifying engine='python'. This is separate from the ipykernel package so we can avoid doing imports until ###Markdown Merge dataset ###Code #Merge the ratings and users with movieID and UserID ratings_user = pd.merge(ratings_df, users_df, on=['user_id']) ratings_movie = pd.merge(ratings_df, movies_df, on=['movie_id']) master_data = pd.merge(ratings_user,ratings_movie, on=['user_id', 'movie_id', 'rating'])[['movie_id', 'title', 'user_id', 'age', 'gender', 'occupation', "rating"]] master_data.head() ###Output _____no_output_____ ###Markdown Data Analysis ###Code #User age distribution import matplotlib.pyplot as plt users_df['age'].hist(bins=50) plt.xlabel('age') plt.ylabel('population') plt.show #User rating of the movie “Toy Story” res = master_data[master_data.title == "Toy Story (1995)"] plt.plot(res.groupby("age")["movie_id"].count(),'--bo') res.groupby("age")["movie_id"].count() #Top 25 movies by viewership rating res = master_data.groupby("title").size().sort_values(ascending=False)[:25] plt.ylabel("title") plt.xlabel("viewership count") res.plot(kind="barh") #Find the ratings for all the movies reviewed by for a particular user of user id = 2696 res = master_data[master_data["user_id"] == 2696] plt.scatter(y=res["title"], x=res["rating"]) #Feature Engineering val = movies_df["genre"].str.split("|") res_col = [] for v in val: for i in v: if i not in res_col: res_col.append(i) res_col.append("gender") res_col.append("age") res_col.append("rating") import numpy as np # Join master data with movies dataframe res = master_data.merge(movies_df, on = ['movie_id'], how="left")[["genre", "rating", "gender", "age"]] # Create main df to input into the model df = pd.DataFrame(columns=res_col) df[["genre", "rating", "gender", "age"]] = res[["genre", "rating", "gender", "age"]] df.fillna(0, inplace=True) def expand_genres(row): for genre in row["genre"].split("|"): row[genre] = 1 return row df = df.apply(expand_genres, axis=1) del df["genre"] df.head() ###Output _____no_output_____ ###Markdown Model ###Code len(df) from sklearn import datasets from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder X = df[df.columns[~df.columns.isin(["rating"])]] y = df["rating"] # dividing X, y into train and test data X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 0) number = LabelEncoder() X_train["gender"] = number.fit_transform(X_train["gender"].astype("str")) X_test["gender"] = number.fit_transform(X_test["gender"].astype("str")) y_train = number.fit_transform(y_train.astype("int")) y_test = number.fit_transform(y_test.astype("int")) # Linear model from sklearn.linear_model import LinearRegression linear_reg = LinearRegression(n_jobs=-1) linear_reg.fit(X_train, y_train) predictions = linear_reg.predict(X_test) mean_absolute_error(y_test, predictions) from sklearn.ensemble import RandomForestRegressor rf_reg = RandomForestRegressor(n_jobs=-1) rf_reg.fit(X_train, y_train) predictions = rf_reg.predict(X_test) mean_absolute_error(y_test, predictions) from sklearn.neural_network import MLPRegressor mlp_reg = MLPRegressor(solver='adam', activation="relu", # alpha=1e-5, learning_rate="adaptive", learning_rate_init=0.001, hidden_layer_sizes=(100), max_iter=100, tol=0.001, random_state=1, verbose=True) mlp_reg.fit(X_train, y_train) predictions = mlp_reg.predict(X_test) mean_absolute_error(y_test, predictions) ###Output _____no_output_____
notebooks/CalculateObservationsThatWentIntoWaterBodyIdentification.ipynb
###Markdown Calculate the number of observations that went into detecting each waterbody * **Compatibility:** Notebook currently compatible with the `NCI` environment only. You can make this notebook `Sandbox` compatible by pointing it to the DEA Waterbodies timeseries located in AWS.* **Products used:** None.* **Prerequisites:** This notebook explores the individual waterbody timeseries csvs contained within the DEA Waterbodies dataset. It has been designed with that very specific purpose in mind, and is not intended as a general analysis notebook. DescriptionThis notebook loops through all of the individal waterbodies timeseries produced within DEA Waterbodies, and generates statistics on the number of observations within each of the individual records. 1. Load the required python modules2. Set up the directory where the timeseries data are all located3. Glob through that directory to get a list of all the files to loop through4. Loop through each file and make a note of its length5. Calculate length statistics*** Getting startedTo run this analysis, run all the cells in the notebook, starting with the "Load packages" cell. Load packagesImport Python packages that are used for the analysis. ###Code %matplotlib inline import matplotlib.pyplot as plt import xarray as xr import pandas as pd import glob ###Output Populating the interactive namespace from numpy and matplotlib ###Markdown Analysis parameters* `TimeseriesDir`: Folder where the DEA Waterbodies timeseries are saved* `AnalysisStartDate`: e.g. `'1985-01-01'`. Date to start counting observations from. The dataset begins in 1987. If you want to select a shorter date range over which to count observations, set this data to your custom range. * `AnalysisEndDate`: e.g. `'2019-01-01'`. Final date to finish counting observations. The dataset is being continually updated. If you want to select a shorter date range over which to count observations, set this data to your custom range. ###Code TimeseriesDir = '/g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid' AnalysisStartDate = '1985-01-01' AnalysisEndDate = '2019-01-01' ###Output _____no_output_____ ###Markdown Get a list of all of the csv files ###Code CSVFiles = glob.glob(f'{TimeseriesDir}/**/*.csv', recursive=True) ###Output _____no_output_____ ###Markdown Open each file, then work out how many observations occur between the observation period ###Code AllObs = [] for FileName in CSVFiles: try: TimeHistory = pd.read_csv(FileName) TimeHistory['Observation Date'] = pd.to_datetime(TimeHistory['Observation Date']) NumObs = len(TimeHistory[(TimeHistory['Observation Date'] > AnalysisStartDate) & (TimeHistory['Observation Date'] < AnalysisEndDate)]) if NumObs < 50: print(FileName) AllObs.append(NumObs) except: print(FileName +' did not work') ###Output /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/rhvj/rhvj0znm9.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/rhg9/rhg9rbvhc.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/r7cd/r7cd57z1h.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/r1r1/r1r1hjuyv.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/r764/r7648hec6.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/r30n/r30nf45jq.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/r5mw/r5mwkrvkb.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/rk22/rk22bz0n6.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/r6up/r6upjhtyq.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/r1cv/r1cvw9450.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/r5er/r5er1jbfq.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/rh38/rh38exyjw.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/r7g5/r7g5z931y.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/r1wk/r1wk02791.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/qvut/qvut32ubz.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/r280/r280t15z8.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/qvzf/qvzfjh7uf.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/qv6p/qv6p0jr15.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/r5ec/r5eczv3vx.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/r6sq/r6sq7bke5.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/r1xr/r1xr43j7d.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/r4z9/r4z9h4k55.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/r74d/r74dznf97.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/rjn4/rjn4r6nh3.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/r1k2/r1k2rccu9.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/rj2h/rj2httbdp.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/r5u3/r5u3juecs.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/rhut/rhute581e.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/r1qz/r1qz238cv.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/rhrz/rhrzd1f33.csv did not work /g/data/r78/cek156/dea-notebooks/Scientific_workflows/DEAWaterbodies/timeseries_aus_uid/r38p/r38pwsvwh.csv did not work ###Markdown Calculate some statistics on observation lengthYou can edit these cells to generate different length statistics. ###Code AllObs.sort() AllObsNP = np.array(AllObs) plt.hist(AllObsNP, bins=20) plt.xlabel(f'Number of Observations') plt.title(f'Number of Observations between {AnalysisStartDate} and {AnalysisEndDate} \n' 'for individual DEA Waterbodies') ###Output _____no_output_____ ###Markdown Interrogate the length some moreYou can change the statistic here depending on what you're interested in. ###Code AllObsNP.min() ###Output _____no_output_____