code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import cv2 # Load an color image in grayscale img = cv2.imread('images/patrick.jpg',0) # display loaded image cv2.imshow('image',img) cv2.waitKey(0) cv2.destroyAllWindows() # display image in named window cv2.namedWindow('image', cv2.WINDOW_NORMAL) cv2.imshow('image',img) cv2.waitKey(0) cv2.destroyAllWindows() cv2.imwrite('images/patric_gray.jpg',img) # clone gray patrick image with keyboard command img = cv2.imread('images/patric_gray.jpg',0) cv2.imshow('image',img) k = cv2.waitKey(0) if k == 27: # wait for ESC key to exit cv2.destroyAllWindows() elif k == ord('s'): # wait for 's' key to save and exit cv2.imwrite('images/patric_gray_clone.jpg',img) cv2.destroyAllWindows() # Use Matplotlib from matplotlib import pyplot as plt # read image using matplotlib img = cv2.imread('images/patrick.jpg',0) plt.imshow(img, cmap = 'gray', interpolation = 'bicubic') plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis plt.show()
Samples/BasicImages.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.6 64-bit (''fastai2'': conda)' # language: python # name: python37664bitfastai2condaf3e9781124be45a78083b472977e8c5c # --- # # Paper Implementations # # > List of papers implemented using fastai2 # - [CAM](https://kshitij09.github.io/fast_impl/visualize.cam) # - [Grad-CAM](https://kshitij09.github.io/fast_impl/visualize.grad_cam)
nbs/paper_impl.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: infinity mirror # language: python # name: infinity-mirror # --- import netlsd as net import networkx as nx import numpy as np import seaborn as sns import matplotlib.pyplot as plt import pandas as pd from sklearn import decomposition from sklearn.manifold import TSNE # + import sys import os sys.path.extend(['./..']) # have to add the project path manually to the Python path os.chdir('./..') # - from src.utils import load_pickle from src.Tree import TreeNode # %matplotlib inline plt.rcParams['figure.figsize'] = (10, 7) def get_graph_vec(g: nx.Graph, kernel: str='heat', dim: int=250, eigenvalues: int=20) -> np.ndarray: return net.netlsd(g, kernel=kernel, timescales=np.logspace(-2, 2, dim), eigenvalues=eigenvalues) def compare_graphs(g1: nx.Graph, g2: nx.Graph, kernel: str='heat', dim: int=250, eigenvalues: int=20) -> float: g_vec1 = get_graph_vec(g=g1, kernel=kernel, dim=dim, eigenvalues=eigenvalues) g_vec2 = get_graph_vec(g=g2, kernel=kernel, dim=dim, eigenvalues=eigenvalues) return net.compare(g_vec1, g_vec2) # ## lets try out some plotting dim = 250 g_orig = nx.karate_club_graph() random_graphs = [nx.configuration_model(deg_sequence=[d for n, d in g_orig.degree()], create_using=nx.Graph) for _ in range(10)] rows = {'name': ['orig'] + [f'rand_{i+1}' for i in range(len(random_graphs))], 'graph': [g_orig] + random_graphs} df = pd.DataFrame(rows) vecs = np.zeros((1+len(random_graphs), dim)) vecs[0, :] = get_graph_vec(g_orig, dim=dim) for i, random_graph in enumerate(random_graphs): vecs[i+1, :] = get_graph_vec(random_graph, dim=dim) df = pd.concat([df, pd.DataFrame(vecs)], axis=1) pca = decomposition.PCA(n_components=2) graph_pca = pca.fit_transform(df.iloc[:, 2:]) # + plt.title('PCA') sns.scatterplot(x=graph_pca[:, 0], y=graph_pca[:, 1]); ax = plt.gca() for i in range(graph_pca.shape[0]): x, y = graph_pca[i,: ] ax.text(x+.01, y, str(i)) # - tsne = TSNE(n_components=2) graph_tsne = tsne.fit_transform(df.iloc[:, 2:]) # + plt.title('tSNE') sns.scatterplot(x=graph_tsne[:, 0], y=graph_tsne[:, 1]); ax = plt.gca() for i in range(graph_tsne.shape[0]): x, y = graph_tsne[i,: ] ax.text(x+0.015, y+0.75, str(i)) # - # ## let's do the infinity mirror stuff tsne_2 = TSNE(n_components=2) pca_2 = decomposition.PCA(n_components=2) tsne_1 = TSNE(n_components=1) pca_1 = decomposition.PCA(n_components=1) def get_row(root, cols, name, model): for tnode in [root] + list(root.descendants): row = {'name': name, 'level': tnode.depth, 'model': model} for i, x in enumerate(get_graph_vec(tnode.graph, dim=dim)): row[f'v{i}'] = x for col in cols[-4: ]: row[col] = None yield row # + data_path = '/data/dgonza26' name = 'clique-ring-500-4' model = 'CNRG' dim = 250 cols = ['name', 'model', 'level'] cols.extend([f'v{i}' for i in range(dim)]) cols.extend(['pca_x', 'pca_y', 'tsne_x', 'tsne_y']) rows = {col: [] for col in cols} T = 20 for trial in range(1, T+1): pickle_path = f'{data_path}/{name}/{model}/fast_20_{trial}.pkl.gz' print('reading', pickle_path) root = load_pickle(pickle_path) for row in get_row(root, cols, name, model): for key, val in row.items(): rows[key].append(val) df = pd.DataFrame(rows) # - df.to_csv(f'./analysis/csv/{name}-{model}-{T}-embed.csv', index=False) pwd df.shape df.head() np.round(df.iloc[:, 3: -6].to_numpy().std(axis=0), 3) # ## few choices for PCA/t-SNE # * Do we compute the stats for all the data at once? Maybe. # * Should we segment by each generation? Probably not. # ### global PCA/t-SNE df.head() df[['pca_x', 'pca_y']] = pca_2.fit_transform(df.iloc[:, 3: -6]) df[['tsne_x', 'tsne_y']] = tsne_2.fit_transform(df.iloc[:, 3: -6]) df['pca_1'] = pca_1.fit_transform(df.iloc[:, 3: -6]); df['tsne_1'] = tsne_1.fit_transform(df.iloc[:, 3: -6]) df.head() # make level into a string df['level'] = df['level'].apply(int) filtered_df = df[df.level<6] filtered_df['level'] = filtered_df.level.apply(str) sns.scatterplot(x='tsne_x', y='tsne_y', marker='o', hue='level', data=filtered_df); sns.catplot(x='level', y='tsne_1', hue='level', data=filtered_df); sns.catplot(x='level', y='pca_1', hue='level', data=filtered_df); sns.scatterplot(x='tsne_x', y='tsne_y', data=df[df.level==0]); sns.scatterplot(x='pca_x', y='pca_y', data=df[df.level==0]); sns.scatterplot(x='pca_x', y='pca_y', hue='level', marker='o', data=df); df[['pca_x', 'pca_y']]
analysis/graph-embedding.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # **Download** (right-click, save target as ...) this page as a jupyterlab notebook from: # # [Homework 0](https://atomickitty.ddns.net/engr-1330-webroot/8-Labs/Lab00/Lab00.ipynb)
2-Homework/ES00/ENGR-1330-2021-3-ES0.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Create data set from file # + import pandas as pd import numpy as np import matplotlib.pyplot as plt from scipy import stats input_file = "CompareComplementation.csv" data_raw = pd.read_csv(input_file) data_raw # - # ## Reorder columns # + col_order = ["NAM 1 Temp BME280", "NAM 2 Temp BME280", "NAM 1 Hum BME280", "NAM 2 Hum BME280", "NAM 1 Temp SHT30", "NAM 2 Temp SHT30", "NAM 1 Hum SHT30", "NAM 2 Hum SHT30", "Luftdaten 1 PM2.5","NAM 1 PM2.5", "Luftdaten 2 PM2.5", "NAM 2 PM2.5", "Reference 1 PM2.5", "Reference 2 PM2.5", "Reference 3 PM2.5", "Luftdaten 1 PM10","NAM 1 PM10", "Luftdaten 2 PM10", "NAM 2 PM10", "Reference 1 PM10", "Reference 2 PM10", "Reference 3 PM10"] data = data_raw[col_order].copy() data # - # ## Remove rows without data from all sensors data.replace('', np.nan, inplace=True) data.dropna(inplace=True) data # ## Calculate average values of relative humidity, temperature, PM2.5 and PM10 # + data.insert(2, "NAM average Temp", (data["NAM 1 Temp BME280"] + data["NAM 2 Temp BME280"]) / 2) data.insert(5, "NAM average Hum", (data["NAM 1 Hum BME280"] + data["NAM 2 Hum BME280"]) / 2) data.insert(17, "Reference PM2.5", (data["Reference 2 PM2.5"] + data["Reference 1 PM2.5"] + data["Reference 3 PM2.5"]) / 3) data.insert(25, "Reference PM10", (data["Reference 2 PM10"] + data["Reference 1 PM10"] + data["Reference 3 PM10"]) / 3) data # - # ## Helper functions # + def add_empty_line(filename): with open(filename,'a') as file: file.write("\n") def add_text(filename, text): with open(filename,'a') as file: file.write(text + "\n") def add_separator(filename): with open(filename,'a') as file: file.write("##########\n\n") def clear_file(filename): with open(filename,'w') as file: file.write("") # - # ## Define conditions # + humid = data["NAM average Hum"] >= 70.0 dry = data["NAM average Hum"] <= 40.0 cold = data["NAM average Temp"] <= 5.0 hot = data["NAM average Temp"] >= 10.0 heater_on_NAM_1 = data["NAM 1 Hum BME280"] - data["NAM 1 Hum SHT30"] >= 15.0 heater_on_NAM_2 = data["NAM 2 Hum BME280"] - data["NAM 2 Hum SHT30"] >= 15.0 high_PM25 = data["Reference PM2.5"] >= 30 low_PM25 = data["Reference PM2.5"] <= 15 high_PM10 = data["Reference PM10"] >= 50 low_PM10 = data["Reference PM10"] <= 25 # Full data set data_sel = data.copy() # Constrains applied #data_sel = data[high_PM10 & high_PM25 & cold] comment = "Data description" output_file = "ResultsComplementation.csv" clear_file(output_file) add_separator(output_file) add_text(output_file, comment) add_empty_line(output_file) data_sel # - # ## Calculations # ### Mean values # + output_PM25 = "PM2.5\n\ Luftdaten 1:,{0:.2f},±,{1:.2f}\n\ NAM 1:,{2:.2f},±,{3:.2f}\n\ Luftdaten 2:,{4:.2f},±,{5:.2f}\n\ NAM 2:,{6:.2f},±,{7:.2f}\n\ Reference 1:,{8:.2f},±,{9:.2f}\n\ Reference 2:,{10:.2f},±,{11:.2f}\n\ Reference 3:,{12:.2f},±,{13:.2f}".format(data_sel["Luftdaten 1 PM2.5"].mean(), data_sel["Luftdaten 1 PM2.5"].std(), data_sel["NAM 1 PM2.5"].mean(), data_sel["NAM 1 PM2.5"].std(), data_sel["Luftdaten 2 PM2.5"].mean(), data_sel["Luftdaten 2 PM2.5"].std(), data_sel["NAM 2 PM2.5"].mean(), data_sel["NAM 2 PM2.5"].std(), data_sel["Reference 1 PM2.5"].mean(), data_sel["Reference 1 PM2.5"].std(), data_sel["Reference 2 PM2.5"].mean(), data_sel["Reference 2 PM2.5"].std(), data_sel["Reference 3 PM2.5"].mean(), data_sel["Reference 3 PM2.5"].std()) output_PM10 = "PM10\n\ Luftdaten 1:,{0:.2f},±,{1:.2f}\n\ NAM 1:,{2:.2f},±,{3:.2f}\n\ Luftdaten 2:,{4:.2f},±,{5:.2f}\n\ NAM 2:,{6:.2f},±,{7:.2f}\n\ Reference 1:,{8:.2f},±,{9:.2f}\n\ Reference 2:,{10:.2f},±,{11:.2f}\n\ Reference 3:,{12:.2f},±,{13:.2f}".format(data_sel["Luftdaten 1 PM10"].mean(), data_sel["Luftdaten 1 PM10"].std(), data_sel["NAM 1 PM10"].mean(), data_sel["NAM 1 PM10"].std(), data_sel["Luftdaten 2 PM10"].mean(), data_sel["Luftdaten 2 PM10"].std(), data_sel["NAM 2 PM10"].mean(), data_sel["NAM 2 PM10"].std(), data_sel["Reference 1 PM10"].mean(), data_sel["Reference 1 PM10"].std(), data_sel["Reference 2 PM10"].mean(), data_sel["Reference 2 PM10"].std(), data_sel["Reference 3 PM10"].mean(), data_sel["Reference 3 PM10"].std()) output_conditions = "Conditions\n\ Temperature:,{0:.2f},±,{1:.2f}\n\ Humidity:,{2:.2f},±,{3:.2f}".format(data_sel["NAM average Temp"].mean(), data_sel["NAM average Temp"].std(), data_sel["NAM average Hum"].mean(), data_sel["NAM average Hum"].std()) add_text(output_file, "Average values of PM2.5 and PM10 (ug/m3)") add_text(output_file, output_PM25) add_empty_line(output_file) add_text(output_file, output_PM10) add_empty_line(output_file) add_text(output_file, output_conditions) add_empty_line(output_file) print(output_PM25.replace(","," ")) print() print(output_PM10.replace(","," ")) print() print(output_conditions.replace(","," ")) # - # ### Select PM data only data_sel_pm25 = data_sel.iloc[:,[10,11,12,13,17]].copy() data_sel_pm25 data_sel_pm10 = data_sel.iloc[:,[18,19,20,21,25]].copy() data_sel_pm10 # ## PM2.5 # ### Comparison of reference devices for PM2.5 concentrations # + fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(19,5)) x = data_sel["Reference 1 PM2.5"] y = data_sel["Reference 2 PM2.5"] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) ax1.plot(x, y, "k.") ax1.plot(x, intercept + slope*x, "r") text_pos_x = 0.02 text_pos_y = 0.95 ax1.text(text_pos_x, text_pos_y, "(a) PM2.5", transform=ax1.transAxes) ax1.text(text_pos_x, text_pos_y - 0.05, "a = {0:.3f}".format(slope), transform=ax1.transAxes) ax1.text(text_pos_x, text_pos_y - 0.10, "b = {0:.3f}".format(intercept), transform=ax1.transAxes) ax1.text(text_pos_x, text_pos_y - 0.15, "R² = {0:.3f}".format(r_value), transform=ax1.transAxes) ax1.set_xlabel("Reference 1 (µg/m³)", fontweight="bold") ax1.set_ylabel("Reference 2 (µg/m³)", fontweight="bold") x = data_sel["Reference 1 PM2.5"] y = data_sel["Reference 3 PM2.5"] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) ax2.plot(x, y, "k.") ax2.plot(x, intercept + slope*x, "r") text_pos_x = 0.02 text_pos_y = 0.95 ax2.text(text_pos_x, text_pos_y, "(b) PM2.5", transform=ax2.transAxes) ax2.text(text_pos_x, text_pos_y - 0.05, "a = {0:.3f}".format(slope), transform=ax2.transAxes) ax2.text(text_pos_x, text_pos_y - 0.10, "b = {0:.3f}".format(intercept), transform=ax2.transAxes) ax2.text(text_pos_x, text_pos_y - 0.15, "R² = {0:.3f}".format(r_value), transform=ax2.transAxes) ax2.set_xlabel("Reference 1 (µg/m³)", fontweight="bold") ax2.set_ylabel("Reference 3 (µg/m³)", fontweight="bold") x = data_sel["Reference 2 PM2.5"] y = data_sel["Reference 3 PM2.5"] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) ax3.plot(x, y, "k.") ax3.plot(x, intercept + slope*x, "r") text_pos_x = 0.02 text_pos_y = 0.95 ax3.text(text_pos_x, text_pos_y, "(c) PM2.5", transform=ax3.transAxes) ax3.text(text_pos_x, text_pos_y - 0.05, "a = {0:.3f}".format(slope), transform=ax3.transAxes) ax3.text(text_pos_x, text_pos_y - 0.10, "b = {0:.3f}".format(intercept), transform=ax3.transAxes) ax3.text(text_pos_x, text_pos_y - 0.15, "R² = {0:.3f}".format(r_value), transform=ax3.transAxes) ax3.set_xlabel("Reference 2 (µg/m³)", fontweight="bold") ax3.set_ylabel("Reference 3 (µg/m³)", fontweight="bold") plt.savefig("Complementation_reference_PM25.png") plt.show() # - # ### Pearson correlations for PM2.5 result = data_sel_pm25.corr(method="pearson").round(3) add_text(output_file, "Pearson correlations for PM2.5") result.to_csv(output_file, mode="a") add_empty_line(output_file) result # ### Comparison between Luftdaten and NAM for PM2.5 # + fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12,5)) x = data_sel["Luftdaten 1 PM2.5"] y = data_sel["NAM 1 PM2.5"] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) ax1.plot(x, y, "k.") ax1.plot(x, intercept + slope*x, "r") text_pos_x = 0.02 text_pos_y = 0.95 ax1.text(text_pos_x, text_pos_y, "(a) PM2.5", transform=ax1.transAxes) ax1.text(text_pos_x, text_pos_y - 0.05, "a = {0:.3f}".format(slope), transform=ax1.transAxes) ax1.text(text_pos_x, text_pos_y - 0.10, "b = {0:.3f}".format(intercept), transform=ax1.transAxes) ax1.text(text_pos_x, text_pos_y - 0.15, "R² = {0:.3f}".format(r_value), transform=ax1.transAxes) ax1.set_xlabel("Luftdaten 1 (µg/m³)", fontweight="bold") ax1.set_ylabel("NAM 1 (µg/m³)", fontweight="bold") x = data_sel["Luftdaten 2 PM2.5"] y = data_sel["NAM 2 PM2.5"] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) ax2.plot(x, y, "k.") ax2.plot(x, intercept + slope*x, "r") text_pos_x = 0.15 text_pos_y = 0.95 ax2.text(text_pos_x, text_pos_y, "(b) PM2.5", transform=ax2.transAxes) ax2.text(text_pos_x, text_pos_y - 0.05, "a = {0:.3f}".format(slope), transform=ax2.transAxes) ax2.text(text_pos_x, text_pos_y - 0.10, "b = {0:.3f}".format(intercept), transform=ax2.transAxes) ax2.text(text_pos_x, text_pos_y - 0.15, "R² = {0:.3f}".format(r_value), transform=ax2.transAxes) ax2.set_xlabel("Luftdaten 2 (µg/m³)", fontweight="bold") ax2.set_ylabel("NAM 2 (µg/m³)", fontweight="bold") plt.savefig("Complementation_LC_regressions_PM25.png") plt.show() # - # ### Scatter plots with linear regressions for PM2.5 # + fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(12,12)) x = data_sel["Reference PM2.5"] y = data_sel["Luftdaten 1 PM2.5"] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) ax1.plot(x, y, "k.") ax1.plot(x, intercept + slope*x, "r") text_pos_x = 0.02 text_pos_y = 0.95 ax1.text(text_pos_x, text_pos_y, "(a) PM2.5", transform=ax1.transAxes) ax1.text(text_pos_x, text_pos_y - 0.05, "a = {0:.3f}".format(slope), transform=ax1.transAxes) ax1.text(text_pos_x, text_pos_y - 0.10, "b = {0:.3f}".format(intercept), transform=ax1.transAxes) ax1.text(text_pos_x, text_pos_y - 0.15, "R² = {0:.3f}".format(r_value), transform=ax1.transAxes) ax1.set_xlabel("Average reference (µg/m³)", fontweight="bold") ax1.set_ylabel("Luftdaten 1 (µg/m³)", fontweight="bold") y = data_sel["NAM 1 PM2.5"] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) ax2.plot(x, y, "k.") ax2.plot(x, intercept + slope*x, "r") text_pos_x = 0.02 text_pos_y = 0.95 ax2.text(text_pos_x, text_pos_y, "(b) PM2.5", transform=ax2.transAxes) ax2.text(text_pos_x, text_pos_y - 0.05, "a = {0:.3f}".format(slope), transform=ax2.transAxes) ax2.text(text_pos_x, text_pos_y - 0.10, "b = {0:.3f}".format(intercept), transform=ax2.transAxes) ax2.text(text_pos_x, text_pos_y - 0.15, "R² = {0:.3f}".format(r_value), transform=ax2.transAxes) ax2.set_xlabel("Average reference (µg/m³)", fontweight="bold") ax2.set_ylabel("NAM 1 (µg/m³)", fontweight="bold") y = data_sel["Luftdaten 2 PM2.5"] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) ax3.plot(x, y, "k.") ax3.plot(x, intercept + slope*x, "r") text_pos_x = 0.02 text_pos_y = 0.95 ax3.text(text_pos_x, text_pos_y, "(c) PM2.5", transform=ax3.transAxes) ax3.text(text_pos_x, text_pos_y - 0.05, "a = {0:.3f}".format(slope), transform=ax3.transAxes) ax3.text(text_pos_x, text_pos_y - 0.10, "b = {0:.3f}".format(intercept), transform=ax3.transAxes) ax3.text(text_pos_x, text_pos_y - 0.15, "R² = {0:.3f}".format(r_value), transform=ax3.transAxes) ax3.set_xlabel("Average reference (µg/m³)", fontweight="bold") ax3.set_ylabel("Luftdaten 2 (µg/m³)", fontweight="bold") y = data_sel["NAM 2 PM2.5"] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) ax4.plot(x, y, "k.") ax4.plot(x, intercept + slope*x, "r") text_pos_x = 0.20 text_pos_y = 0.95 ax4.text(text_pos_x, text_pos_y, "(d) PM2.5", transform=ax4.transAxes) ax4.text(text_pos_x, text_pos_y - 0.05, "a = {0:.3f}".format(slope), transform=ax4.transAxes) ax4.text(text_pos_x, text_pos_y - 0.10, "b = {0:.3f}".format(intercept), transform=ax4.transAxes) ax4.text(text_pos_x, text_pos_y - 0.15, "R² = {0:.3f}".format(r_value), transform=ax4.transAxes) ax4.set_xlabel("Average reference (µg/m³)", fontweight="bold") ax4.set_ylabel("NAM 2 (µg/m³)", fontweight="bold") plt.savefig("Complementation_regressions_PM25.png") plt.show() # - # ## PM10 # ### Comparison of reference devices for PM10 concentrations # + fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(19,5)) x = data_sel["Reference 1 PM10"] y = data_sel["Reference 2 PM10"] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) ax1.plot(x, y, "k.") ax1.plot(x, intercept + slope*x, "r") text_pos_x = 0.02 text_pos_y = 0.95 ax1.text(text_pos_x, text_pos_y, "(a) PM10", transform=ax1.transAxes) ax1.text(text_pos_x, text_pos_y - 0.05, "a = {0:.3f}".format(slope), transform=ax1.transAxes) ax1.text(text_pos_x, text_pos_y - 0.10, "b = {0:.3f}".format(intercept), transform=ax1.transAxes) ax1.text(text_pos_x, text_pos_y - 0.15, "R² = {0:.3f}".format(r_value), transform=ax1.transAxes) ax1.set_xlabel("Reference 1 (µg/m³)", fontweight="bold") ax1.set_ylabel("Reference 2 (µg/m³)", fontweight="bold") x = data_sel["Reference 1 PM10"] y = data_sel["Reference 3 PM10"] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) ax2.plot(x, y, "k.") ax2.plot(x, intercept + slope*x, "r") text_pos_x = 0.02 text_pos_y = 0.95 ax2.text(text_pos_x, text_pos_y, "(b) PM10", transform=ax2.transAxes) ax2.text(text_pos_x, text_pos_y - 0.05, "a = {0:.3f}".format(slope), transform=ax2.transAxes) ax2.text(text_pos_x, text_pos_y - 0.10, "b = {0:.3f}".format(intercept), transform=ax2.transAxes) ax2.text(text_pos_x, text_pos_y - 0.15, "R² = {0:.3f}".format(r_value), transform=ax2.transAxes) ax2.set_xlabel("Reference 1 (µg/m³)", fontweight="bold") ax2.set_ylabel("Reference 3 (µg/m³)", fontweight="bold") x = data_sel["Reference 2 PM10"] y = data_sel["Reference 3 PM10"] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) ax3.plot(x, y, "k.") ax3.plot(x, intercept + slope*x, "r") text_pos_x = 0.02 text_pos_y = 0.95 ax3.text(text_pos_x, text_pos_y, "(c) PM10", transform=ax3.transAxes) ax3.text(text_pos_x, text_pos_y - 0.05, "a = {0:.3f}".format(slope), transform=ax3.transAxes) ax3.text(text_pos_x, text_pos_y - 0.10, "b = {0:.3f}".format(intercept), transform=ax3.transAxes) ax3.text(text_pos_x, text_pos_y - 0.15, "R² = {0:.3f}".format(r_value), transform=ax3.transAxes) ax3.set_xlabel("Reference 2 (µg/m³)", fontweight="bold") ax3.set_ylabel("Reference 3 (µg/m³)", fontweight="bold") plt.savefig("Complementation_reference_PM10.png") plt.show() # - # ### Pearson correlations for PM10 result = data_sel_pm10.corr(method="pearson").round(3) add_text(output_file, "Pearson correlations for PM10") result.to_csv(output_file, mode="a") add_empty_line(output_file) result # ### Comparison between Luftdaten and NAM for PM10 # + fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12,5)) x = data_sel["Luftdaten 1 PM10"] y = data_sel["NAM 1 PM10"] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) ax1.plot(x, y, "k.") ax1.plot(x, intercept + slope*x, "r") text_pos_x = 0.02 text_pos_y = 0.95 ax1.text(text_pos_x, text_pos_y, "(a) PM10", transform=ax1.transAxes) ax1.text(text_pos_x, text_pos_y - 0.05, "a = {0:.3f}".format(slope), transform=ax1.transAxes) ax1.text(text_pos_x, text_pos_y - 0.10, "b = {0:.3f}".format(intercept), transform=ax1.transAxes) ax1.text(text_pos_x, text_pos_y - 0.15, "R² = {0:.3f}".format(r_value), transform=ax1.transAxes) ax1.set_xlabel("Luftdaten 1 (µg/m³)", fontweight="bold") ax1.set_ylabel("NAM 1 (µg/m³)", fontweight="bold") x = data_sel["Luftdaten 2 PM10"] y = data_sel["NAM 2 PM10"] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) ax2.plot(x, y, "k.") ax2.plot(x, intercept + slope*x, "r") text_pos_x = 0.25 text_pos_y = 0.95 ax2.text(text_pos_x, text_pos_y, "(b) PM10", transform=ax2.transAxes) ax2.text(text_pos_x, text_pos_y - 0.05, "a = {0:.3f}".format(slope), transform=ax2.transAxes) ax2.text(text_pos_x, text_pos_y - 0.10, "b = {0:.3f}".format(intercept), transform=ax2.transAxes) ax2.text(text_pos_x, text_pos_y - 0.15, "R² = {0:.3f}".format(r_value), transform=ax2.transAxes) ax2.set_xlabel("Luftdaten 2 (µg/m³)", fontweight="bold") ax2.set_ylabel("NAM 2 (µg/m³)", fontweight="bold") plt.savefig("Complementation_LC_regressions_PM10.png") plt.show() # - # ### Scatter plots with linear regressions for PM10 # + fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(12,12)) x = data_sel["Reference PM10"] y = data_sel["Luftdaten 1 PM10"] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) ax1.plot(x, y, "k.") ax1.plot(x, intercept + slope*x, "r") text_pos_x = 0.02 text_pos_y = 0.95 ax1.text(text_pos_x, text_pos_y, "(a) PM10", transform=ax1.transAxes) ax1.text(text_pos_x, text_pos_y - 0.05, "a = {0:.3f}".format(slope), transform=ax1.transAxes) ax1.text(text_pos_x, text_pos_y - 0.10, "b = {0:.3f}".format(intercept), transform=ax1.transAxes) ax1.text(text_pos_x, text_pos_y - 0.15, "R² = {0:.3f}".format(r_value), transform=ax1.transAxes) ax1.set_xlabel("Average reference (µg/m³)", fontweight="bold") ax1.set_ylabel("Luftdaten 1 (µg/m³)", fontweight="bold") y = data_sel["NAM 1 PM10"] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) ax2.plot(x, y, "k.") ax2.plot(x, intercept + slope*x, "r") text_pos_x = 0.02 text_pos_y = 0.95 ax2.text(text_pos_x, text_pos_y, "(b) PM10", transform=ax2.transAxes) ax2.text(text_pos_x, text_pos_y - 0.05, "a = {0:.3f}".format(slope), transform=ax2.transAxes) ax2.text(text_pos_x, text_pos_y - 0.10, "b = {0:.3f}".format(intercept), transform=ax2.transAxes) ax2.text(text_pos_x, text_pos_y - 0.15, "R² = {0:.3f}".format(r_value), transform=ax2.transAxes) ax2.set_xlabel("Average reference (µg/m³)", fontweight="bold") ax2.set_ylabel("NAM 1 (µg/m³)", fontweight="bold") y = data_sel["Luftdaten 2 PM10"] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) ax3.plot(x, y, "k.") ax3.plot(x, intercept + slope*x, "r") text_pos_x = 0.02 text_pos_y = 0.95 ax3.text(text_pos_x, text_pos_y, "(c) PM10", transform=ax3.transAxes) ax3.text(text_pos_x, text_pos_y - 0.05, "a = {0:.3f}".format(slope), transform=ax3.transAxes) ax3.text(text_pos_x, text_pos_y - 0.10, "b = {0:.3f}".format(intercept), transform=ax3.transAxes) ax3.text(text_pos_x, text_pos_y - 0.15, "R² = {0:.3f}".format(r_value), transform=ax3.transAxes) ax3.set_xlabel("Average reference (µg/m³)", fontweight="bold") ax3.set_ylabel("Luftdaten 2 (µg/m³)", fontweight="bold") y = data_sel["NAM 2 PM10"] slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) ax4.plot(x, y, "k.") ax4.plot(x, intercept + slope*x, "r") text_pos_x = 0.20 text_pos_y = 0.95 ax4.text(text_pos_x, text_pos_y, "(d) PM10", transform=ax4.transAxes) ax4.text(text_pos_x, text_pos_y - 0.05, "a = {0:.3f}".format(slope), transform=ax4.transAxes) ax4.text(text_pos_x, text_pos_y - 0.10, "b = {0:.3f}".format(intercept), transform=ax4.transAxes) ax4.text(text_pos_x, text_pos_y - 0.15, "R² = {0:.3f}".format(r_value), transform=ax4.transAxes) ax4.set_xlabel("Average reference (µg/m³)", fontweight="bold") ax4.set_ylabel("NAM 2 (µg/m³)", fontweight="bold") plt.savefig("Complementation_regressions_PM10.png") plt.show()
Analyze_Complementation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="6aD1ALMdAy6O" # # PySpark en Google Colab # # Instalacion Octubre/2021 # # 1. Instalacion Java # 2. Instalacion de Spark # 3. Instalar PySpark # # ## De forma General para usar pyspark en Colab (2021) siga los siguientes pasos en una celda en Colab: # + id="8JuWZdteAmvv" # instalar Java # !apt-get install openjdk-8-jdk-headless -qq > /dev/null # + id="SoApfKfpRunp" # Descargar la ultima versión de java ( comprobar que existen los path de descarga) # Download latest release. Update if necessary # !wget -q https://downloads.apache.org/spark/spark-3.1.2/spark-3.1.2-bin-hadoop2.7.tgz # + id="PzHJ0bpuN-od" # %ls -la /content/ # + id="uXpriDwJLP1I" # !tar xf spark-3.1.2-bin-hadoop2.7.tgz # + id="aRYGR8EsycZ1" # %ls -la # + [markdown] id="7Qih8Kt1MJla" # # + id="gKlOUyKuMEr7" # instalar pyspark # !pip install -q pyspark # + [markdown] id="EzkJ4JKkCgno" # # Variables de entorno # + id="WVEIOATyCYc6" import os # libreria de manejo del sistema operativo os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["SPARK_HOME"] = "/content/spark-3.1.2-bin-hadoop2.7" # + [markdown] id="9UCH8MwWGA-Q" # # Cargar pyspark en el sistema # + id="38_3RcUSCiwb" from pyspark.sql import SparkSession APP_NAME = "PDGE-tutorialSpark1" SPARK_URL = "local[*]" spark = SparkSession.builder.appName(APP_NAME).master(SPARK_URL).getOrCreate() spark # + [markdown] id="TX-3Z9fcy5bB" # Pruebas # # + colab={"base_uri": "https://localhost:8080/", "height": 196} id="zRK16gv0zAnN" outputId="dd4a473b-daf1-49ce-9de6-6dd2141876f7" sc = spark.sparkContext #obtener el contexto de ejecución de Spark del Driver. sc # + id="DoAGmb4GY829" array = sc.parallelize([1,2,3,4,5,6,7,8,9,10], 2) array # + id="eOLHf13HZ2h1" print(array.collect()) # + id="iuZ5GDGqVwrS" print(array.count()) # + id="gy-anBhZ4f0-" num3 = array.map(lambda elemento: 3*elemento) # + id="MKCuCvPI4sgZ" print(num3.collect()) # + [markdown] id="ogJwPyVjZWcd" # # Ejemplos de operaciones con RDDs de Spark # + id="h_sTML7yZzuH" numeros = sc.parallelize([1,2,3,4,5,6,7,8,9,10],2) print(numeros.reduce(lambda e1,e2: e1+e2)) # + id="7PjaW8UhaPoj" pnumeros = sc.parallelize([1,2,3,4,5,6,7,8,9,10]) rdd = pnumeros.map(lambda e: 2*e) print(rdd.collect()) # + [markdown] id="7EObylHIaZKC" # ## Pregunta TS1.1 ¿Cómo hacer para obtener una lista de los elementos al cuadrado? # + id="320brSCvaj9N" # + id="IpED25-ga6MX" numeros = sc.parallelize([1,2,3,4,5,6,7,8,9,10]) rdd = numeros.filter(lambda elemento: elemento%2==0) print (rdd.collect()) # + [markdown] id="2LtU6J3ia8Hw" # ## Pregunta TS1.2 ¿Cómo filtrar los impares? # + id="r5pQG0VDZzkY" rddi = numeros.filter (lambda e: e%2==1) print (rddi.collect()) # + [markdown] id="qc_udeL9kDXu" # #Operaciones con números # + [markdown] id="RsbMVChlP5r0" # Sumar n numeros # + id="ial-4tcjjMUj" rddnum=sc.range(1000000) rddnum.reduce(lambda a,b: a+b) # + id="EYsRY4yMoB3C" numeros = sc.parallelize([1,1,2,2,5]) unicos = numeros.distinct() print (unicos.collect()) # + id="5uRY-KHycVim" numeros = sc.parallelize([1,2,3,4,5]) rdd = numeros.flatMap(lambda elemento: [elemento, 10*elemento]) print (rdd.collect()) # + [markdown] id="2Iz55urM7Kl3" # Muestreo # + id="4m70js9d7N8x" numeros = sc.parallelize([1,2,3,4,5,6,7,8,9,10]) rdd = numeros.sample(True, 1.0) # True = sin reemplazar # 1.0 = fracción de elementos = 100% print (rdd.collect()) # + [markdown] id="6MlNG4--Pns0" # Union # + id="Nib7-OKadSEn" pares = sc.parallelize([2,4,6,8,10]) impares = sc.parallelize([1,3,5,7,9]) numeros = pares.union(impares) print (numeros.collect()) # + [markdown] id="YexhndIY7ikM" # reducción # # + id="x3R31PVc7k_L" numeros = sc.parallelize([1,2,3,4,5]) # operación asociativa y conmutativa # orden no está definido print (numeros.reduce(lambda elem1,elem2: elem2+elem1)) # + [markdown] id="Z-KQK9fj78uU" # ## Pregunta TS1.3 ¿Tiene sentido esta operación? ¿Si se repite se obtiene siempre el mismo resultado? # + id="Gvr3unXn8NOO" #Tiene sentido esta operación? numeros = sc.parallelize([1,2,3,4,5]) print (numeros.reduce(lambda elem1,elem2: elem1-elem2)) # + id="A6c147To8T4s" #Tiene sentido esta operación? numeros = sc.parallelize([1,2,4,3,5]) print (numeros.reduce(lambda elem1,elem2: elem1-elem2)) # + [markdown] id="c9iVsHyW8lBj" # #Acciones # + id="vNyMypQj8o__" numeros = sc.parallelize([5,3,2,1,4]) print (numeros.take(3)) #¿Qué sucede si ponemos 30 en vez de 3 elementos? # + id="RRuJczUX8kSv" numeros = sc.parallelize([3,2,1,4,5]) # print (numeros.takeOrdered(3, lambda elem: -elem)) # La función lambda se está utilizando para crear el índice de la lista de ordenación # + [markdown] id="ltQB9md2ME1l" # ## Pregunta TS1.4 ¿Cómo lo ordenarías para que primero aparezcan los impares y luego los pares? # + id="BIaKzQy1M98H" # + [markdown] id="ctm29gO0ZIbZ" # # RDDs con string # + id="wzdO64iKaC1q" palabras = sc.parallelize(['HOLA', 'Que', 'TAL', 'Bien']) pal_minus = palabras.map(lambda elemento: elemento.lower()) print (pal_minus.collect()) # + id="_7Pn8FjtakNG" palabras = sc.parallelize(['HOLA', 'Que', 'TAL', 'Bien']) pal_long = palabras.map(lambda elemento: len(elemento)) print (pal_long.collect()) # + id="rmZwI0MGbAOS" log = sc.parallelize(['E: e21', 'W: w12', 'W: w13', 'E: e45']) errors = log.filter(lambda elemento: elemento[0]=='E') print (errors.collect()) # + id="KKDB9UzIcbau" lineas = sc.parallelize(['', 'a', 'a b', 'a b c']) palabras = lineas.flatMap(lambda elemento: elemento.split()) print (palabras.collect()) # + id="ZpM4oMF_cnW4" lineas = sc.parallelize(['', 'a', 'a b', 'a b c']) palabras_flat = lineas.flatMap(lambda elemento: elemento.split()) palabras_map = lineas.map(lambda elemento: elemento.split()) print (palabras_flat.collect()) print (palabras_map.collect()) # + [markdown] id="saVy72uncpMC" # ## Pregunta TS1.5 ¿Cuántos elementos tiene cada rdd? ¿Cuál tiene más? # # + id="pfLDcQrV3e7j" # + [markdown] id="cDgvc1iU3MA2" # ## Pregunta TS1.6 ¿De qué tipo son los elementos del rdd palabras_map? ¿Por qué palabras_map tiene el primer elemento vacío? # + id="_bDH0EwE3Kdi" # + id="zsa9OFbhc0-i" palabras_flat.take(1) # + id="Gwn33hfO0XoJ" palabras_map # + [markdown] id="1Lv-KrP0dNdE" # ## Pregunta TS1.7. Prueba la transformación distinct si lo aplicamos a cadenas. # + id="6oyxAO8KdIWH" # + id="Y2NdOdomdoHP" log = sc.parallelize(['E: e21', 'I: i11', 'W: w12', 'I: i11', 'W: w13', 'E: e45']) infos = log.filter(lambda elemento: elemento[0]=='I') errors = log.filter(lambda elemento: elemento[0]=='E') inferr = infos.union(errors) print (inferr.collect()) # + [markdown] id="mjuin2jgcqCo" # ## Pregunta TS1.8 ¿Cómo se podría obtener la misma salida pero utilizando una sola transformación y sin realizar la unión? # + id="mv4rsT2Fd_1p" numeros = sc.parallelize([1,2,3,4,5]) print (numeros.reduce(lambda elem1,elem2: elem2+elem1)) # + id="Ej3-MXZDeFmy" #Tiene sentido esta operación? numeros = sc.parallelize([1,2,3,4,5]) print (numeros.reduce(lambda elem1,elem2: elem1-elem2)) # + id="jgXmpVzveNaW" palabras = sc.parallelize(['HOLA', 'Que', 'TAL', 'Bien']) pal_minus = palabras.map(lambda elemento: elemento.lower()) print (pal_minus.reduce(lambda elem1,elem2: elem1+"-"+elem2)) #y esta tiene sentido esta operación? # Qué pasa si ponemos elem2+"-"+elem1 # + id="1lzjZV5xeVze" r = sc.parallelize([('A', 1),('C', 4),('A', 1),('B', 1),('B', 4)]) rr = r.reduceByKey(lambda v1,v2:v1+v2) print (rr.collect()) # + id="EZ3sOeL5ebOw" r = sc.parallelize([('A', 1),('C', 4),('A', 1),('B', 1),('B', 4)]) rr1 = r.reduceByKey(lambda v1,v2:v1+v2) print (rr1.collect()) rr2 = rr1.reduceByKey(lambda v1,v2:v1) print (rr2.collect()) # + [markdown] id="LLPvqqELz-Pt" # ## Pregunta TS1.9 ¿Cómo explica el funcionamiento de las celdas anteriores? # + id="fR2ruD-TeeK2" r = sc.parallelize([('A', 1),('C', 4),('A', 1),('B', 1),('B', 4)]) rr1 = r.reduceByKey(lambda v1,v2:'hola') print (rr1.collect()) rr2 = rr1.reduceByKey(lambda v1,v2:'hola') print (rr2.collect()) # + id="gKbP8VKzeitK" r = sc.parallelize([('A', 1),('C', 2),('A', 3),('B', 4),('B', 5)]) rr = r.groupByKey() res= rr.collect() for k,v in res: print (k, list(v)) # Que operación realizar al RDD rr para que la operacion sea como un reduceByKey #¿Y simular un group con un reduceByKey y un map? # + id="1UBMxjgi19V8" rdd1 = sc.parallelize([('A',1),('B',2),('C',3)]) rdd2 = sc.parallelize([('A',4),('B',5),('C',6)]) rddjoin = rdd1.join(rdd2) print (rddjoin.collect()) # Prueba a cambiar las claves del rdd1 y rdd2 para ver cuántos elementos se crean # + id="XXVC6hoC2Kw-" rdd1 = sc.parallelize([('A',1),('B',2),('C',3)]) rdd2 = sc.parallelize([('A',4),('A',5),('B',6),('D',7)]) rddjoin = rdd1.join(rdd2) print (rddjoin.collect()) #Modifica join por leftOuterJoin, rightOuterJoin y fullOuterJoin ¿Qué sucede? # + id="quNvzEdK2gv5" rdd = sc.parallelize([('A',1),('B',2),('C',3),('A',4),('A',5),('B',6)]) res = rdd.sortByKey(False) print (res.collect()) # + [markdown] id="zOir6ry549KP" # # Utilización de ficheros # + id="0HSk78ba3DTv" # Crea una lista de 1000 números y la guarda. # Da error si 'salida' existe, descomenta la linea de borrar en ese caso. # %rm -rf salida numeros = sc.parallelize(range(0,1000)) numeros.saveAsTextFile('salida') # + id="r1oOMj9U3azd" # ls -la # + id="y3T8t98g3koT" # %ls -la salida/* # + id="2yWrIYA83q_c" # %cat salida/part-00000 # + id="D6_KGg9w3Rqm" # Recupera el fichero guardado y realiza la suma n2 = sc.textFile('salida').map(lambda a:int(a)) print(n2.reduce(lambda v1,v2: v1 + v2)) # Prueba este código y mira qué genera? # Borra la salida y cambia las particiones en parallelize ¿Qué sucede? # (pe c.parallelize(xrange(0,1000),8)) # + [markdown] id="33FiLTWp6LQ2" # ## Pregunta TS1.10 Borra la salida y cambia las particiones en parallelize ¿Qué sucede? # (pe c.parallelize(xrange(0,1000),8)) # # + id="H4lLsBhy4Yxq" # %rm -rf salida # %ls -la # + [markdown] id="SyfefweJenb8" # # El quijote # + [markdown] id="xcxH-o_ZQSje" # Montar el directorio de trabajo utilizando Google Drive # Loading Your Data into Google Colaboratory. # 1. First of all, Upload your Data to your Google Drive. # 2. Run the following script in colab shell. # 3. Copy the authorization code of your account. # 4. Paste the authorization code into the output shell. # 5. Congrats! Now your Google Drive is mounted to this location /content/gdrive/MyDrive/ # # # # + [markdown] id="5aS_Xvk2_kSN" # # + id="Vl0Fq5nChoYj" from google.colab import drive #drive.flush_and_unmount() drive.mount('/content/gdrive') # + id="jL-wqZOCZzrL" # %ls /content/gdrive/MyDrive/ # + id="YpBiHwBjauJk" # !cp "/content/gdrive/My Drive/elquijote.txt" . # + id="TmlBII6cbiIt" # %ls -la # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="OgqJUkLqb5ZG" outputId="c0121fe0-2789-4838-bc10-92cabdc8c9fd" # %pwd # + [markdown] id="y9E6_qc-a7GE" # # Procesando el QUIJOTE # + id="_Li3mX8ebMSi" quijote = sc.textFile("elquijote.txt") quijote.take(10) # + [markdown] id="GDVSZOrYbrEt" # Transformaciones # + id="ITsCl47JbfM_" charsPerLine = quijote.map(lambda s: len(s)) allWords = quijote.flatMap(lambda s: s.split()) allWordsNoArticles = allWords.filter(lambda a: a.lower() not in ["el", "la"]) allWordsUnique = allWords.map(lambda s: s.lower()).distinct() sampleWords = allWords.sample(withReplacement=True, fraction=0.2, seed=666) weirdSampling = sampleWords.union(allWordsNoArticles.sample(False, fraction=0.3)) # cómo funciona cada transformación # + id="4kpZSOS7cAYr" allWordsUnique.take(10) # + [markdown] id="PpAuT-AkcRyf" # ## Pregunta TS2.1 Explica la utilidad de cada transformación y detalle para cada una de ellas si cambia el número de elementos en el RDD resultante. Es decir si el RDD de partida tiene N elementos, y el de salida M elementos, indica si N>M, N=M o N<M. # + [markdown] id="-0Xj_MR9eVCf" # * map # * flatmap # * filter # * distinct # * sample # * union # # + id="PWJlgOUDcQhc" numLines = quijote.count() numChars = charsPerLine.reduce(lambda a,b: a+b) # also charsPerLine.sum() sortedWordsByLength = allWordsNoArticles.takeOrdered(20, key=lambda x: -len(x)) numLines, numChars, sortedWordsByLength # + id="A33tgNqLePMn" # + [markdown] id="hIpuogySgB4N" # ## Pregunta TS2.2 Explica el funcionamiento de cada acción anterior # # Implementa la opción count de otra manera: # # # * utilizando transformaciones map y reduce # * utilizando solo reduce en caso de que sea posible. # # # + [markdown] id="PkXwuhGuhrQD" # # Operaciones K-V (Clave Valor) # + id="5JAWu58dhpWk" import requests import re allWords = allWords.flatMap(lambda w: re.sub(""";|:|\.|,|-|–|"|'|\s"""," ", w.lower()).split(" ")).filter(lambda a: len(a)>0) allWords2 = sc.parallelize(requests.get("https://gist.githubusercontent.com/jsdario/9d871ed773c81bf217f57d1db2d2503f/raw/585de69b0631c805dabc6280506717943b82ba4a/el_quijote_ii.txt").iter_lines()) allWords2 = allWords2.flatMap(lambda w: re.sub(""";|:|\.|,|-|–|"|'|\s"""," ", w.decode("utf8").lower()).split(" ")).filter(lambda a: len(a)>0) # + id="NCsub2OPjCyD" allWords.take(10) # + id="1W2pNxR1jKib" allWords2.take(10) # + id="L61OV1NHkBLZ" words = allWords.map(lambda e: (e,1)) words2 = allWords2.map(lambda e: (e,1)) words.take(10) # + id="wqtEo2j9kPuk" frequencies = words.reduceByKey(lambda a,b: a+b) frequencies2 = words2.reduceByKey(lambda a,b: a+b) frequencies.takeOrdered(10, key=lambda a: -a[1]) # + id="CtlQosYPk3M7" res = words.groupByKey().takeOrdered(10, key=lambda a: -len(a)) res # To see the content, res[i][1].data # + id="7nmgk3ZmlFlW" joinFreq = frequencies.join(frequencies2) joinFreq.take(10) # + id="UOY-R88Fl1vq" joinFreq.map(lambda e: (e[0], (e[1][0] - e[1][1])/(e[1][0] + e[1][1]))).takeOrdered(10, lambda v: -v[1]), joinFreq.map(lambda e: (e[0], (e[1][0] - e[1][1])/(e[1][0] + e[1][1]))).takeOrdered(10, lambda v: +v[1]) # + [markdown] id="Xzt9cj-RmLta" # ## Pregunta TS2.3 Explica el proposito de cada una de las operaciones anteriores # + [markdown] id="I9Hx1bZ2mc-V" # ## Pregunta TS2.4 ¿Cómo puede implementarse la frecuencia con groupByKey y transformaciones? # + id="HiuL6clvl-JS" # + [markdown] id="BiZwz7sOnBvF" # # Optimizaciones # # + [markdown] id="avupEVWQnIL5" # ## Pregunta TS2.5 ¿Cuál de las dos siguientes celdas es más eficiente? Justifique la respuesta. # + id="I0_XRWcOndr6" joinFreq.map(lambda e: (e[0], (e[1][0] - e[1][1])/(e[1][0] + e[1][1]))).takeOrdered(10, lambda v: -v[1]), joinFreq.map(lambda e: (e[0], (e[1][0] - e[1][1])/(e[1][0] + e[1][1]))).takeOrdered(10, lambda v: +v[1]) # + id="XKw7IizNnaKA" result = joinFreq.map(lambda e: (e[0], (e[1][0] - e[1][1])/(e[1][0] + e[1][1]))) result.cache() result.takeOrdered(10, lambda v: -v[1]), result.takeOrdered(10, lambda v: +v[1]) # + colab={"base_uri": "https://localhost:8080/"} id="Nq0ME9-NqJ2w" outputId="f2b3d4b2-cc3e-4726-a28d-7ca6b5a74f3e" result.coalesce(numPartitions=2) # Avoids the data movement, so it tries to balance inside each machine result.repartition(numPartitions=2) # We don't care about data movement, this balance the whole thing to ensure all machines are used # + id="79DLzM3WqaEF" result.take(10) allWords.cache() # allWords RDD must stay in memory after computation, we made a checkpoint (well, it's a best effort, so must might be too strong) result.take(10) # + id="NmdxjbAAqjIC" from pyspark import StorageLevel # https://spark.apache.org/docs/2.2.0/rdd-programming-guide.html#rdd-persistence allWords2.persist(StorageLevel.MEMORY_AND_DISK) # Now it will be preserved on disk also # + id="lPbqq1tYqo5H" # !rm -rf palabras_parte2 allWords2.saveAsTextFile("palabras_parte2") # + [markdown] id="nW3ELXvSqtdn" # ## Pregunta TS2.6 Antes de guardar el fichero, utilice coalesce con diferente valores ¿Cuál es la diferencia? # + id="wjBusUNgrMdB" # + [markdown] id="Rrlv7SpGrOvA" # Variables Globales # + id="QhsyqMMXrVZ9" articles = sc.broadcast(["el", "la"]) articles.value # + id="Z51vqUrasDqz" acc = sc.accumulator(0) def incrementar(x): global acc acc += x allWords.map(lambda l:1).foreach(incrementar) acc # + id="N2CkixrL7Ams" print (rdd.getStorageLevel()) rdd = sc.textFile('elquijote.txt') # %time print (rdd.count()) # %time print (rdd.count()) rdd.cache() # %time print (rdd.count()) # %time print (rdd.count()) # + id="bYVHFmzs7Ow3" pals_a_eliminar = ['a', 'ante', 'bajo', 'segun', 'que', 'de'] def elimPalabras(p): global pals_a_eliminar return p not in pals_a_eliminar lineas = sc.textFile('elquijote.txt', 8) pals = (lineas.flatMap(lambda linea: linea.lower().split()) .filter(elimPalabras) .map(lambda pal: (pal, 1)) .reduceByKey(lambda elem1,elem2: elem1 + elem2)) print (pals.takeOrdered(15, key=lambda a:-a[1])) # + id="SBPm64s-7wsO" counter = sc.accumulator(0) rdd = sc.textFile('elquijote.txt') def increment_counter(x): global counter counter += x rdd.map(lambda l:len(l)).foreach(increment_counter) print ("Counter value: %d" % counter.value)
MapReduce/Parte 2/InstalarSpark+Examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.6 64-bit # language: python # name: python3 # --- # + import pandas as pd import numpy as np import tensorflow as tf from tensorflow import keras import seaborn as sn # - data_df = pd.read_csv("train.csv") data_df.drop("Id", axis=1, inplace=True) #Dropping rows because of my laziness ;) data_df.drop(index=1379, inplace=True) #One NaN in "Electrical" #print(data_df) wip_data_df = data_df.fillna( {"LotFrontage": 0, "Alley": "ANoAccess", "MasVnrType": "MVNotExist", "MasVnrArea": 0, "BsmtQual": "BNotExist", "BsmtExposure": "BNotExist", "BsmtFinType1": "BNotExist", "BsmtFinSF1": 0, "BsmtFinType2": "BNotExist", "BsmtFinSF2": 0, "BsmtUnfSF": 0, "BsmtCond": "BNotExist", "TotalBsmtSF": 0, "FireplaceQu": "FPNotExist", "GarageType": "GNotExist", "GarageYrBlt": 0, "GarageFinish": "GNotExist", "GarageQual": "GNotExist", "GarageCond": "GNotExist", "PoolQC": "PNotExist", "Fence": "FCNotExist", "MiscFeature": "MCNotExist" }) to_delete_dict = {"BNotExist": ["BsmtExposure", "BsmtFinType1", "BsmtFinType2","BsmtCond"], "GNotExist": ["GarageFinish", "GarageQual", "GarageCond"]} to_delete = set() for variable in to_delete_dict: for name in to_delete_dict[variable]: to_delete.add(name + "_" + variable) del to_delete_dict # print(to_delete) na = wip_data_df.isnull().sum() print(na.sum()) del na data_strings = list(data_df.select_dtypes("object").columns) #print(data_strings) # + #I tought that (as one tutorial have shown) my one hot variables will be named #"Col_<No. of given column>_<categorical variable>", so I wanted to make sure #that their names will be easy to read. 😅 # for name in data_strings: # for i in wip_data_df.index: # wip_data_df[name][i] = wip_data_df[name][i] + name # print(wip_data_df) # - dummies = pd.get_dummies(wip_data_df) columns = set(dummies.columns) # print(to_delete) # print(columns.intersection(to_delete)) # print(columns.intersection(to_delete) == to_delete) # They are the same :D dummies.drop(to_delete, axis=1, inplace=True)
main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/tuankhoin/COMP30027-Practical-Solutions/blob/main/Week%203.ipynb) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 2968, "status": "ok", "timestamp": 1646969415452, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "17831145053126140220"}, "user_tz": -660} id="3cE3edmmBKEN" outputId="4d742712-e827-4e02-dcbd-d55607c622e0" # FYI, this mounts your Google Drive directory to this Colab notebook, # in case you want to run the worksheet on Colab like I do ;) from google.colab import drive drive.mount('/content/gdrive') path = "gdrive/My Drive/COMP30027 (T)/W3/" # + [markdown] id="ZJj8V9Prah0n" # In case you forgot, my name is Khoi. # ### When assignment comes out: # * Follow-up questions for my prac: `<EMAIL>` || `<EMAIL>` # * 'Dog ate my homework', 'I got covid',...: `<EMAIL>` # * 'I want more marks', 'I want remarks': Same as above # + [markdown] id="wQRuLK4K3rjw" # ### The University of Melbourne, School of Computing and Information Systems # # COMP30027 Machine Learning, 2022 Semester 1 # # ## Week 3 - workshop # + [markdown] id="aWRZJonD3rj2" # * Reading `.csv` files # * Data processing - on an extraction of Iris dataset # * Plotting with `matplotlib` # * Observing plots # + [markdown] id="OvLKE-rA3rj2" # Please ensure that the numpy, matplotlib, defaultdict and re packages are installed. # + executionInfo={"elapsed": 32, "status": "ok", "timestamp": 1646969415454, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "17831145053126140220"}, "user_tz": -660} id="yFY-jNNt3rj3" import numpy as np from collections import defaultdict import re import matplotlib.pyplot as plt # %matplotlib inline # + [markdown] id="DXWJbAX_3rj4" # ### Exercise 1 # Begin by visually inspecting the raw data contained within the (textual) file `iris-data.csv`. Note that “inspecting the data” is mostly ineffectual for very large datasets. In fact, this is one of the main motivators for Machine Learning! :-) # + [markdown] id="mNKYOYAM3rj4" # - How many instances are there? How many attributes? The instances have been labelled with class information—so this dataset is suitable for **supervised** machine learning — how many classes are there? # + [markdown] id="u4XzVe_E3rj5" # - Confirm your observations by writing a function to count the instances, and keep track of how many instances of each class there are. (You might find the `string.split()` method useful.) # + colab={"base_uri": "https://localhost:8080/", "height": 206} executionInfo={"elapsed": 31, "status": "ok", "timestamp": 1646969415454, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "17831145053126140220"}, "user_tz": -660} id="rNVgzoElAIW_" outputId="72efa541-ce65-4e6b-aa89-57966c8b1417" import pandas as pd pd.read_csv(path + 'iris-data.csv').sample(5) # + executionInfo={"elapsed": 30, "status": "ok", "timestamp": 1646969415455, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "17831145053126140220"}, "user_tz": -660} id="KPFXqBf6B9qP" # str.split? # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 48, "status": "ok", "timestamp": 1646969415475, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "17831145053126140220"}, "user_tz": -660} id="Le96Ok8n3rj6" outputId="27db5dca-14c9-4c43-a4b1-aafa7f82cfd1" def count_inst(filestream, class_dict): n_instances = 0 for line in filestream.readlines()[1:]: n_instances += 1 class_dict[line.strip().split(",")[-1]] += 1 return n_instances class_dict = defaultdict(int) f = open(path + "iris-data.csv",'r') print('Our total number of instances is:',count_inst(f,class_dict)) for lbl in class_dict.keys(): print(lbl, ':', class_dict[lbl], 'instances.') f.close() # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 459, "status": "ok", "timestamp": 1646977621363, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "17831145053126140220"}, "user_tz": -660} id="9kVDKC4ffS6-" outputId="3f145732-3fd6-4413-9508-14324399b0ab" # Another approach using Counter from collections import Counter # Can use pd.read_csv as well arr = np.genfromtxt(path+'iris-data.csv', delimiter=',', dtype=None, encoding=None)[:,-1] Counter(arr) # + [markdown] id="uGM5Ifut3rj8" # - You should notice a couple of problems with the class labels; make a copy of the CSV file called `iris-clean.csv`, and edit the erroneous class labels with their (likely) correct values (hint: `re`). # * "setosa" was misspelled to "setossa" for one instance # * A few instances use "versicolor" instead of "Iris-versicolor" # + executionInfo={"elapsed": 46, "status": "ok", "timestamp": 1646969415477, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "17831145053126140220"}, "user_tz": -660} id="7bQxqNdK3rj8" f = open(path + "iris-data.csv",'r') f_out = open(path + "iris-clean.csv",'w') for line in f: # What does the $ do here? # Why do you need the coma before versicolor? line = re.sub(r'Iris-setossa$','Iris-setosa',line) line = re.sub(r',versicolor$',",Iris-versicolor",line) f_out.write(line) f.close() f_out.close() # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 47, "status": "ok", "timestamp": 1646969415478, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "17831145053126140220"}, "user_tz": -660} id="JDcInmln3rj9" outputId="694cfc9c-66ac-40e8-c130-a160a93d7727" n_instances = 0 class_dict = defaultdict(int) f = open(path + "iris-clean.csv",'r') print('Our total number of instances is:',count_inst(f,class_dict)) for lbl in class_dict.keys(): print(lbl, ':', class_dict[lbl], 'instances.') f.close() # + [markdown] id="Gb6XuoTn3rj9" # ### Exercise 2 # Before we can think about building a classifier, we should double–check that the data is formatted **correctly**. Confirm that the first line of the dataset is a header, which describes the expected format. # + [markdown] id="gnG0Rjl73rj-" # **(a)** Write a function `check_csv()` which returns `True` if every instance has the same number of fields (in this case, attribute values separated by commas), and `False` otherwise. Then check if your file has the correct format. # + executionInfo={"elapsed": 43, "status": "ok", "timestamp": 1646969415479, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "17831145053126140220"}, "user_tz": -660} id="rlE3O5w92yFf" # str.strip? # + executionInfo={"elapsed": 55, "status": "ok", "timestamp": 1646969415491, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "17831145053126140220"}, "user_tz": -660} id="mDASWHwb3rj-" def check_csv(filestream,n_fields): ret_val = True for line in filestream: if (len(line.strip().split(",")) != n_fields): ret_val = False return ret_val # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 55, "status": "ok", "timestamp": 1646969415492, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "17831145053126140220"}, "user_tz": -660} id="dHKx5YAe3rj-" outputId="f80fec6c-32e8-46f6-a540-f61fe222cf0c" f = open(path + "iris-clean.csv",'r') header = f.readline() print("Header: ",header.strip()) n_fields = len(header.strip().split(",")) print("CSV passes? ",check_csv(f,n_fields)) f.close() # + [markdown] id="bBXyWH_33rj_" # **(b)** Write a function which, for each attribute, prints the number of instances where the attribute # value is numeric. What do you observe? # + executionInfo={"elapsed": 54, "status": "ok", "timestamp": 1646969415492, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "17831145053126140220"}, "user_tz": -660} id="_VHAFx_d3rj_" def numeric_att_vals(filestream,n_fields): att_counts = [0]*n_fields for line in filestream: instance = line.strip().split(",") for ctr in range(n_fields): try: float(instance[ctr]) att_counts[ctr]+=1 except ValueError: print("A 'not numeric' value found in field :",ctr+1, ", and it's value is", instance[ctr]) return att_counts # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 54, "status": "ok", "timestamp": 1646969415493, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "17831145053126140220"}, "user_tz": -660} id="1qAslpyg3rj_" outputId="ae09f5c5-a642-4d8c-fcd4-dcf9a6a20679" f = open(path + "iris-clean.csv",'r') f.readline() print("Numeric attribute value counts:",numeric_att_vals(f,n_fields-1)) #not including class f.close() # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 52, "status": "ok", "timestamp": 1646969415493, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "17831145053126140220"}, "user_tz": -660} id="Bw8rEt7r1ove" outputId="b5ada3db-2105-4280-86c7-6bfc7ff6dc16" # A pandas alternative: df = pd.read_csv(path + 'iris-data.csv') df.isna().sum() # + [markdown] id="LiYniLBh3rj_" # `It seems that "NA" is used for measurements that weren't recorded. Of course, this might also appear in various other ways, like "?", or "N/A", and so on, but not in the data that we're given.` # + [markdown] id="dIfKgm2h3rj_" # For the instances with non-numeric attribute values, the values appear to be missing. We can’t know what the true values of these attribute were supposed to be, but leaving them unknown might cause problems for our model. # # One possible approach toward solving this issue is replacing the missing values with the **mean of the observed values** for that attribute. # # To check if it's an appropriate method for our data. We can use the `matplotlib` library to plot a histogram for the data as follows: # + colab={"base_uri": "https://localhost:8080/", "height": 281} executionInfo={"elapsed": 723, "status": "ok", "timestamp": 1646969416167, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "17831145053126140220"}, "user_tz": -660} id="JAeyuLYa3rkA" outputId="16db4f19-280f-4b9e-ec71-f3db45019f20" sepal_lengths = [] sepal_widths = [] petal_lengths = [] petal_widths = [] with open(path + "iris-clean.csv",'r') as f: f.readline() for line in f: att_vals = line.strip().split(",") sl = att_vals[0] if not(sl) == "NA": sepal_lengths.append(float(sl)) sw = att_vals[1] if not(sw) == "NA": sepal_widths.append(float(sw)) pl = att_vals[2] if not(pl) == "NA": petal_lengths.append(float(pl)) pw = att_vals[3] if not(pw) == "NA": petal_widths.append(float(pw)) _,p = plt.subplots(2,2, gridspec_kw={'hspace':0.5}) p[0,0].set_title("Sepal Lengths (cm)") p[0,0].hist(np.array(sepal_lengths)) p[0,1].set_title("Sepal Widths (cm)") p[0,1].hist(np.array(sepal_widths)) p[1,0].set_title("Petal Lengths (cm)") p[1,0].hist(np.array(petal_lengths)) p[1,1].set_title("Petal Widths (cm)") p[1,1].hist(np.array(petal_widths)) plt.show() # + [markdown] id="YbEV-sRp3rkA" # **(c)** Explain how these histograms can help us to see check if using the "mean of the observed values" is the proper method for solving the issue of missing value for an attribute # + [markdown] id="iFnUogjF3rkA" # **(d)** Explain why using the **mean of the observed values** is a bad idea for the feature 4 ('Petal Widths'), based on the data you observe? Is there anything else we could do, so as to make mean imputation a plausible strategy here? # + [markdown] id="iDKngVlX3rkA" # `Unlike, say, sepal widths (that has a roghly normal distrubution), the mean value of the petal width distribution (our forth attribute with missing values) doesn't appear to be representative of much of the data.` # # `Based on these bins, the mean isn't even the mode! One possible reason for this is that different classes of Iris might have a different mean petal width: for example, there appear to be roughly three peaks in this distribution (one around 0.3, one around 1.3, and one around 2.0), and three classes. It could also be just a coincidence; we would have to re-plot the data to get a better sense of it:` # + colab={"base_uri": "https://localhost:8080/", "height": 281} executionInfo={"elapsed": 676, "status": "ok", "timestamp": 1646969416831, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "17831145053126140220"}, "user_tz": -660} id="3obDwmZN3rkA" outputId="b3ccc5f7-982f-497f-e5a0-5edfb3e3efc8" iris_setosa = [] iris_versicolor = [] iris_virginica = [] with open(path + "iris-clean.csv",'r') as f: f.readline() for line in f: att_vals = line.strip().split(",") pw = att_vals[3] if not(pw) == "NA": if att_vals[4] == "Iris-setosa": iris_setosa.append(float(pw)) elif att_vals[4] == "Iris-versicolor": iris_versicolor.append(float(pw)) elif att_vals[4] == "Iris-virginica": iris_virginica.append(float(pw)) else: print("Unexpected class: ",att_vals[4]) plt.title("Petal Widths (cm) by Iris type") plt.hist(np.array(iris_setosa)) plt.hist(np.array(iris_versicolor)) plt.hist(np.array(iris_virginica)) plt.legend(['Iris-setosa','Iris-versicolor','Iris-virginica']) plt.show() # + [markdown] id="v04hgxFr3rkB" # `Here, it appears that if we wanted to guess an unseen value of petal width for an instance of a given class, then the mean value for the instances of that class would appear to be reasonable.` # # `Of course, if we didn't know the class in advance (for example, for an instance from the test data, rather than the training) then this style of imputation wouldn't work.` # + [markdown] id="FuVvxviS3rkB" # ### Exercise 3 # Use the method above to plot the histograms for the other attributes. You should observe some clear **outliers** in the attribute values. Take a look at the raw data, and see if you can guess what could have caused the outliers. Modify your `iris-clean.csv` copy of the data, to fix the outlying attribute values. # + colab={"base_uri": "https://localhost:8080/", "height": 809} executionInfo={"elapsed": 597, "status": "ok", "timestamp": 1646969417420, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "17831145053126140220"}, "user_tz": -660} id="hsl6xbUo3rkB" outputId="804a44b2-6662-4b54-8d25-4cd76808c79b" # Using a 2-d (or 3-d) list here would probably be better than several named lists # But the attribute set is small enough that it should be okay in this context sl_setosa = [] sw_setosa = [] pl_setosa = [] sl_versicolor = [] sw_versicolor = [] pl_versicolor = [] sl_virginica = [] sw_virginica = [] pl_virginica = [] with open(path + "iris-clean.csv",'r') as f: f.readline() for line in f: att_vals = line.strip().split(",") sl = att_vals[0] sw = att_vals[1] pl = att_vals[2] if att_vals[4] == "Iris-setosa": if not(sl) == "NA": sl_setosa.append(float(sl)) if not(sw) == "NA": sw_setosa.append(float(sw)) if not(pl) == "NA": pl_setosa.append(float(pl)) elif att_vals[4] == "Iris-versicolor": if not(sl) == "NA": sl_versicolor.append(float(sl)) if not(sw) == "NA": sw_versicolor.append(float(sw)) if not(pl) == "NA": pl_versicolor.append(float(pl)) elif att_vals[4] == "Iris-virginica": if not(sl) == "NA": sl_virginica.append(float(sl)) if not(sw) == "NA": sw_virginica.append(float(sw)) if not(pl) == "NA": pl_virginica.append(float(pl)) else: print("Unexpected class: ",att_vals[4]) plt.title("Sepal Lengths (cm) by Iris type") plt.hist(np.array(sl_setosa)) plt.hist(np.array(sl_versicolor)) plt.hist(np.array(sl_virginica)) plt.legend(['Iris-setosa','Iris-versicolor','Iris-virginica']) plt.show() plt.title("Sepal Widths (cm) by Iris type") plt.hist(np.array(sw_setosa)) plt.hist(np.array(sw_versicolor)) plt.hist(np.array(sw_virginica)) plt.legend(['Iris-setosa','Iris-versicolor','Iris-virginica']) plt.show() plt.title("Petal Lengths (cm) by Iris type") plt.hist(np.array(pl_setosa)) plt.hist(np.array(pl_versicolor)) plt.hist(np.array(pl_virginica)) plt.legend(['Iris-setosa','Iris-versicolor','Iris-virginica'],bbox_to_anchor=(1.05, 1)) plt.show() # + [markdown] id="e_liFdwg3rkB" # `Most of the sepal lengths of all three kinds of Iris are between 4cm and 8cm, however, there are a small number of Iris-versicolor instances for which the sepal lengths are less than 1cm, which looks a little strange. Let's take a look at them, compared to a sample of the rest of the data:` # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 10, "status": "ok", "timestamp": 1646969417421, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "17831145053126140220"}, "user_tz": -660} id="pjqF62Tl3rkB" outputId="7eb1737f-aecc-4d49-e599-056de2b68d49" print("Short sepal lengths:") print([x for x in sl_versicolor if x < 1.0]) print("Some regular sepal lengths:") print([x for x in sl_versicolor if x > 1.0][:10]) # + [markdown] id="0sJBuZt23rkC" # `Looking at the data, the smaller values have the same number of significant digits, but are smaller by two orders of magnitude. One possible explanation is that this information was being recorded in m instead of cm on some instances (perhaps because there were multiple people who were doing the data collection, or the software parameters changed, or various other possiblew reasons).` # # `We would want to double-check this with the data curators; are these values actually correct, or are they erroneous, and therefore should we attempt to fix them? Let's say that we did discover that these are truly erroneous readings, and that we should multiply them by a factor of 100.` # + executionInfo={"elapsed": 8, "status": "ok", "timestamp": 1646969417422, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "17831145053126140220"}, "user_tz": -660} id="rq8LrEgs3rkC" f = open(path + "iris-data.csv",'r') f_out = open(path + "iris-clean.csv",'w') #Pass through the header f_out.write(f.readline()) for line in f: atts = line.strip().split(",") if atts[-1] == "Iris-setossa": atts[-1] = "Iris-setosa" if atts[-1] == "versicolor": atts[-1] = "Iris-versicolor" if float(atts[0]) < 1.0: atts[0]=str(100*float(atts[0])) f_out.write(",".join(atts)+"\n") f.close() f_out.close() # + [markdown] id="JueCXhvz3rkF" # `For posterity, let's sanity-check the histogram of sepal lengths for the Iris-versicolor instances now.` # + colab={"base_uri": "https://localhost:8080/", "height": 281} executionInfo={"elapsed": 802, "status": "ok", "timestamp": 1646969418217, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "17831145053126140220"}, "user_tz": -660} id="5RpWZEVu3rkF" outputId="af11489a-12d7-4910-9ea7-a8180ec2c9d7" sl_versicolor = [] with open(path + "iris-clean.csv",'r') as f: f.readline() for line in f: att_vals = line.strip().split(",") sl = att_vals[0] if att_vals[4] == "Iris-versicolor": if not(sl) == "NA": sl_versicolor.append(float(sl)) plt.title("Sepal Lengths (cm) of Iris-versicolor") plt.hist(np.array(sl_versicolor)) plt.show() # + [markdown] id="JnQCwfOS3rkG" # ### Exercise 4 # Let’s attempt to visualise some relationships in the data. # + [markdown] id="jFeMlgyK3rkG" # First let's make a scatter-plot of the sepal length vs. sepal width by using the following: # + colab={"base_uri": "https://localhost:8080/", "height": 281} executionInfo={"elapsed": 14, "status": "ok", "timestamp": 1646969418218, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "17831145053126140220"}, "user_tz": -660} id="SR2G0RGt3rkG" outputId="72ec41ac-8d5d-4a0d-ca18-e2ade8964790" sepal_lengths = [] sepal_widths = [] petal_lengths = [] petal_widths = [] with open(path + "iris-clean.csv",'r') as f: f.readline() for line in f: att_vals = line.strip().split(",") sl = att_vals[0] if not(sl) == "NA": sepal_lengths.append(float(sl)) sw = att_vals[1] if not(sw) == "NA": sepal_widths.append(float(sw)) pl = att_vals[2] if not(pl) == "NA": petal_lengths.append(float(pl)) pw = att_vals[3] if not(pw) == "NA": petal_widths.append(float(pw)) # Obviously, we would want a better strategy here # But it's enough for now to record the handful of missing values as an impossible value else: petal_widths.append("0") plt.title("Sepal length (cm) vs Sepal Width (cm)") plt.scatter(np.array(sepal_lengths),np.array(sepal_widths)) plt.show() # + [markdown] id="e8bFcJyY3rkG" # **(a)** Can you identify a certain pattern in it? # + [markdown] id="Uyzf4-lP3rkG" # > `It is suggestive, but without a clear indication of the classes of the instances, we can’t really be certain of any patterns in the data.` # + [markdown] id="5HRjSbup3rkG" # The scatter function takes an optional argument for colouring points, based on a list of strings (like red or blue). Let's replot our data using different colours for different classes. # + colab={"base_uri": "https://localhost:8080/", "height": 281} executionInfo={"elapsed": 11, "status": "ok", "timestamp": 1646969418218, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "17831145053126140220"}, "user_tz": -660} id="rDl0W9lF3rkG" outputId="c4400520-3f2d-4239-a540-984652e3ca85" # The data has the instances ordered by class # The first 50 are Iris-setosa, the next 50 are Iris-versicolor, and the final 50 are Iris-virginica # If the data wasn't sorted by class, we would have to do some more work here colours = 50*["red"]+50*["green"]+50*["blue"] # As a brief aside, colourblind readers would want to discriminate using value/saturation instead of hue # and ideally also different shapes for the points of each class plt.title("Sepal length (cm) vs Sepal Width (cm)") plt.scatter(np.array(sepal_lengths),np.array(sepal_widths),c=colours) plt.show() # + [markdown] id="4UvJErpH3rkH" # **(b)** Now can you identify a certain pattern? # + [markdown] id="ul0Laz103rkH" # > At a casual glance, it appears that most of the Iris-setosa instances (red dots) occupy a distinct "region" of this space (relatively short sepal length, but relatively high sepal width), but for the other two classes, the points are somewhat "mixed up". # + [markdown] id="ecGEwTso3rkH" # Now let's try making scatter-plots for different pairs of attributes. # + colab={"base_uri": "https://localhost:8080/", "height": 504} executionInfo={"elapsed": 1849, "status": "ok", "timestamp": 1646969420058, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "17831145053126140220"}, "user_tz": -660} id="USRFEykM3rkH" outputId="e56951f2-58de-48aa-e1e1-51bfd90da523" colours = 50*["red"]+50*["green"]+50*["blue"] _,p = plt.subplots(2,3, gridspec_kw={'hspace':0.2, 'wspace':0.2}, figsize=(20,10)) p[0,0].set_title("Sepal length (cm) vs Petal length (cm)") p[0,0].scatter(np.array(sepal_lengths),np.array(petal_lengths),c=colours) p[0,1].set_title("Sepal length (cm) vs Petal width (cm)") p[0,1].scatter(np.array(sepal_lengths),np.array(petal_widths),c=colours) p[0,2].set_title("Sepal width (cm) vs Petal length (cm)") p[0,2].scatter(np.array(sepal_widths),np.array(petal_lengths),c=colours) p[1,0].set_title("Sepal width (cm) vs Petal width (cm)") p[1,0].scatter(np.array(sepal_widths),np.array(petal_widths),c=colours) p[1,1].set_title("Petal length (cm) vs Petal width (cm)") p[1,1].scatter(np.array(petal_lengths),np.array(petal_widths),c=colours) plt.show() # + [markdown] id="GREka1zl3rkH" # **(c)** # - Do you notice anything that might suggest that one class is distinguishable from the others? # - How might this information be utilised by the Naive Bayes algorithm? # - What about other supervised machine learning methods? # + [markdown] id="ObDqV3Xx3rkH" # > Each of these plots gives us a little bit of information about some distinguishing characteristics of the data; one simple observation for the first four graphs is that the red instances (Iris-setosa) occupy the lower-most area (low petal length/petal width), the green instances (Iris-versicolor) are in the middle, and the blue instances are near the top (high petal length/petal width).` # # > The final graph appears to show a good relationship between two attributes (petal length and petal width), which seems to strongly suggest how to distinguish one class from another. However, the question asks us about Naive Bayes in particular, which might have some slight problems in this case because it uses every attribute value when making a prediction; sepal width doesn't look like a useful predictor in this context, even when combined with each of the other attributes. # # > As the semester continues, we'll talk more about how Naive Bayes manipulates these numerical attributes, and some other methods (like Support Vector Machines) that can better discover some of the discriminating factors that we can readily observe in this case of the Iris data. This is important, because with a larger attribute set, say 1000 or so, we might not be able to sensibly examine all of the pairwise scatterplots; and even if we could, it might be the case that it is the combination of three (or more) attributes which allows us to discriminate the data; we can't realistically observe such a thing by inspection. #
Week 3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from CalcLigRMSD import * # # CalcLigRMSD # CalcLigRMSD calculates the Root-mean-square deviation (RMSD) between two prealigned molecules. # # This jupyter notebook shows examples of how to use the CalcLigRMSD function. # This function is particularly useful in 2 cases: # # 1. when the atom names in the compared structures do not match, i.e. when the same atoms present different names in the coordinate files. # 2. when one or both structures have missing atoms. This occurs, for example, in crystallographic structures when some atoms are not well defined in the electron density map. # # Below, there are also examples of how to align protein structures and extract ligand coordinates using a python/pymol function. The code below may be used both when the protein structures have multiple chains/domains and when protein structures present multiple copies of the ligand of interest, either in the binding pocket or on the surface. # #### How to use the CalcLigRMSD function: help(CalcLigRMSD) # # RMSD between Pre-aligned Ligands with not Matching Atom Names # ## Basic Example # #### Complex between the kinase protein AURKA and the JVE ligand # # Comparison between the crystal structure of JVE (pink, PDB code: 4UZH) and the docked pose of JVE (yellow). # JVE was docked into the the AURKA structure 2C6E using AutodockVina. # + docked_ligand = Chem.MolFromPDBFile("data/docked_2c6e_JVE_pH74_netcharge1.pdb") crystal_ligand = Chem.MolFromPDBFile("data/4uzh_JVE.pdb") rmsd2 = CalcLigRMSD(docked_ligand, crystal_ligand, rename_lig2 = True, output_filename = 'data/JVE_renamed.pdb') print(f"RMSD: {rmsd2:.2f}") # - # The atom names in the docked structure and in the crystal structure are different. However, the function is able to match the atoms and calculate the RMSD. # # <img src="./figures/JVE.png" width=600> # ## Examples where symmetry needs to be considered # #### Complex between the kinase protein AURKA and the N15 ligand # + docked_ligand = Chem.MolFromPDBFile("data/docked_2c6e_N15_pH74_netcharge1.pdb") crystal_ligand = Chem.MolFromPDBFile("data/3w2c_N15.pdb") rmsd2 = CalcLigRMSD(docked_ligand, crystal_ligand, rename_lig2 = True, output_filename = 'data/N15_renamed.pdb') print(f"RMSD: {rmsd2:.2f}") # - # The N15 ligand contains two symmetric groups (highlighted in the Figure below). The algorithm takes symmetry into consideration: it calculates the RMSD between all possible "symmetrical" combinations and returns the minimum RMSD. The structures with matched atom names corresponding to the lowest RMSD are reported in the figure below. # # <img src="./figures/N15.png" width=800> # #### Complex between the kinase protein AURKA and the AKI ligand docked_ligand = Chem.MolFromPDBFile("data/docked_2c6e_AKI_pH74_netcharge1.pdb") crystal_ligand = Chem.MolFromPDBFile("data/3m11_AKI.pdb") rmsd2 = CalcLigRMSD(docked_ligand, crystal_ligand, rename_lig2 = True, output_filename = 'data/AKI_renamed.pdb') print(f"RMSD: {rmsd2:.2f}") # The AKI ligand contains four symmetric groups (highlighted in the Figure below). Again, the code takes symmetry into consideration! # # <img src="./figures/AKI.png" width=800> # ### Example for a very symmetrical molecule # # <img src="./figures/BEG.png" width=300> # BEG is a very symmetrical molecule (see [this ref](https://jcheminf.biomedcentral.com/articles/10.1186/s13321-019-0362-7)). Not only it does it contain two benzene groups but the entire molecule is symmetrical. We have downloaded the crystal structure 1D4I and extracted the coordinates of the BEG molecule (1d4i_BEG.pdb). In a copy (BEG_test.pdb), we have then renamed the atoms randomly. If the alghoritm is able to correctly match the atoms, then it will return an RMSD of zero. And indeed: docked_ligand = Chem.MolFromPDBFile("data/BEG_test.pdb") crystal_ligand = Chem.MolFromPDBFile("data/1d4i_BEG.pdb") rmsd2 = CalcLigRMSD(docked_ligand, crystal_ligand, rename_lig2 = False, output_filename = 'data/AKI_renamed.pdb') print(f"RMSD: {rmsd2:.2f}") # ## Example where there are missing atoms in the crystal structure # Two crystal structures are available for the EGFR/1C9 complex. However, the electron density of the 1C9 is not well defined in any of the structures (see figure below). In such cases, one can calculate the RMSD between the docked and crystal structures by excluding the atoms which are not defined in the crystal structure. # # This is achived by calculating the RMSD for the maximum common substructure. docked_ligand = Chem.MolFromPDBFile("data/docked_3w2p_1C9_pH74_netcharge1.pdb") crystal_ligand = Chem.MolFromPDBFile("data/4i23_1C9.pdb") rmsd2 = CalcLigRMSD(docked_ligand, crystal_ligand, rename_lig2 = True, output_filename = 'data/1C9_renamed.pdb') print(f"RMSD: {rmsd2:.2f}") docked_ligand = Chem.MolFromPDBFile("data/docked_3w2p_1C9_pH74_netcharge1.pdb") crystal_ligand = Chem.MolFromPDBFile("data/4i24_1C9.pdb") rmsd2 = CalcLigRMSD(docked_ligand, crystal_ligand, rename_lig2 = True, output_filename = 'data/1C9_renamed.pdb') print(f"RMSD: {rmsd2:.2f}") # <img src="./figures/1C9.png" width=800> # In this figure, the missing groups are highlighted in green in the docked pose while the truncation point in the crystal structure is highlighted in red. # # Align docking and crystal structures prior to the RMSD calculation # The function above takes as input pre-aligned ligand structures. In this section, we show how one could align crystallographic protein-ligand structure to the docked structure and extract the ligand coordinates from the crystal structure using Pymol. from pymol import cmd # ## 1. Load the crystallographic and docked protein-ligand structures in pymol # # Docking programs often return as output only the coordinates of the docked ligand. # Therefore, in order to be able to perform the protein alignment, # we first need to load both the protein structure used for docking and the docked ligand structure in pymol # and merge the coordinates in a temporary PDB file, complex.pdb # # # Input files: # - `docked_lig_file` : name of file containing the coordinates of the docked ligand # - `protein_file` : name of file containing the the protein structure used for docking # - `crystal_file`: name of file containing the crystal structure of the complex # + # Function to merge the ligand and protein PDB files in a unique PDB file def merge_prot_lig_pdbs(protein_file, docked_lig_file, output_file = 'complex.pdb'): cmd.reinitialize() cmd.load(protein_file, 'protein') cmd.load(docked_lig_file, 'docked_lig') cmd.save(output_file, "protein or docked_lig") # + docked_lig_file = "data/docked_2c6e_SKE_pH74_netcharge1.pdb" protein_file = "data/aurka_protein_2c6e.pdb" crystal_file = "data/5dt0.pdb" merge_prot_lig_pdbs(protein_file, docked_lig_file) # - # Now we can reinitialize Pymol and load both the docked and crystal complexes in Pymol. The step above can be skipped if the docked complex is directly available. # load both the docked and crystallographic protein-ligand structures cmd.reinitialize() cmd.load("complex.pdb", "docked") cmd.load(crystal_file, "crystal") # ## 2. Proteins Alignment # ### Simple Example # Align the proteins on the backbone alpha carbons and extract the coordinates of the ligand from the crystal structure: # # # + # align the protein structures on the CA cmd.align("crystal and name CA", "docked and name CA") # save the coordinates of the ligand in a PDB file cmd.save("data/lig_crystal_aligned.pdb", "crystal and resn SKE") # - # Now, you can calculate the RMSD: docked_ligand = Chem.MolFromPDBFile(docked_lig_file) crystal_ligand = Chem.MolFromPDBFile("data/lig_crystal_aligned.pdb") rmsd2 = CalcLigRMSD(docked_ligand, crystal_ligand, rename_lig2 = True, output_filename = 'data/1C9_renamed.pdb') print(f"RMSD: {rmsd2:.2f}") # ### Alignment for Structures with Multiple Chains/Domains and Different/Multiple Ligands # The structure alignment and ligand extraction becomes more complicated for crystal structures that contain dimers, or higher oligomers, multi-protein complexes, multiple and different ligands! # In these case, the code below can be used. It reads as input the crystallographic and docked structures. # The function does the following: # # - check for the presence of organic molecules in the crystal structure. # - check for the presence of organic molecules in each of the chains # - if a chain contains an organic molecule, then that chain is aligned to the docked structure based on the C-alpha backbone atoms. # - calculate the distance between the organic molecule in the aligned crystal structure and the organic molecule in the aligned docked structure. # - if the distance is lower than a given threshold (max_dist), the coordinates of the organic molecule from the aligned crystal structure are stored in a PDB file. # # + from pymol import stored import numpy as np def find_organic_ligs(obj_name): cmd.select(f"ligs", f"{obj_name} and organic") stored.residues = set() cmd.iterate(f"ligs", 'stored.residues.add(resn)') ligs_resname = list(stored.residues) return list(set(ligs_resname) - set(['ACE', 'NME'])) def extract_lig_from_aligned_crystal(crystal_file, docked_file, max_dist = 7): count = 0 # load structures cmd.reinitialize() cmd.load(docked_file, "docked") cmd.load(crystal_file, "crystal") # identify organic molecules in both the docked and crystal structures lig_docked_name = find_organic_ligs('docked')[0] lig_crystal_names = find_organic_ligs('crystal') # check the number of chains in the crystal structure chains = cmd.get_chains("crystal") print(f"The crystal structure {crystal_file} contains {len(chains)} chains and {len(lig_crystal_names)} ligands {lig_crystal_names}") # loop over the organic molecules for lig_crystal_name in lig_crystal_names: print(f"\n{lig_crystal_name}") # loop over the chains for chain in chains: sel = cmd.select(f"ligs{chain}", f"crystal and chain {chain} and resn {lig_crystal_name}") if sel == 0: continue else: cmd.align(f"crystal and chain {chain} and name CA", "docked and name CA") cmd.save("data/crystal_aligned.pdb", "crystal") cmd.select(f"ligs{chain}", f"crystal and chain {chain} and resn {lig_crystal_name}") stored.residues = set() cmd.iterate(f"ligs{chain}", 'stored.residues.add(resv)') lig_res = list(stored.residues) # measure the distance between the center of mass of the ligands in docked and crystal structures. center2 = cmd.centerofmass(f"docked and resn {lig_docked_name}") # there may be multiple ligand copies in the crystal structure. Loop over them. for lig1 in lig_res: center1 = cmd.centerofmass(f"crystal and chain {chain} and resi {lig1}") # calculate distance dist = np.linalg.norm(np.array(center1)-np.array(center2)) # if the distance is lower than the threshold, the ligand is in the binding pocket. # Therefore, the coordinates of this crystal ligand are written out in a PDB file if dist < max_dist: count += 1 cmd.save(f"data/{lig_crystal_name}_crystal_aligned_{count}.pdb", f"crystal and chain {chain} and resi {lig1}") print(f"The coordinates of {lig_crystal_name} extracted from the crystal structure and chain {chain} are stored in data/{lig_crystal_name}_crystal_aligned_{count}.pdb") else: print(f"PDB {crystal_file} cmpd {lig_crystal_name} res {lig1} is out of the binding pocket") # - # #### Example where the crystal structure contains multiple chains # # The PDB structure 6C83 contains four protein chains, two of the target of interest AURKA and two of another protein. # The two monomers of AURKA, both contain the compound of interest in the binding pocket. The other two proteins do not contain any organic molecule. # # In this case, we can use the function described above to perform the protein alignment and extract the coordinates of the ligand. As both monomers contain the ligand, two PDB files will be outputted. # # <img src="./figures/6c83_complex.png" width=400> # + # merge protein and docked ligand docked_lig_file = "data/docked_2c6e_ACP_pH74_netcharge1.pdb" protein_file = "data/aurka_protein_2c6e.pdb" crystal_file = "data/6c83.pdb" merge_prot_lig_pdbs(protein_file, docked_lig_file) # - # extract the coordinated of the ligand from the crystal structure extract_lig_from_aligned_crystal(crystal_file, "complex.pdb", max_dist = 7) # Calculate RMSD for ACP extracted from chain A: docked_ligand = Chem.MolFromPDBFile(docked_lig_file) crystal_ligand = Chem.MolFromPDBFile("data/ACP_crystal_aligned_1.pdb") rmsd2 = CalcLigRMSD(docked_ligand, crystal_ligand, rename_lig2 = True, output_filename = 'data/ACP_renamed.pdb') print(f"RMSD: {rmsd2:.2f}") # + # Calculate RMSD for ACP extracted from chain B: docked_ligand = Chem.MolFromPDBFile(docked_lig_file) crystal_ligand = Chem.MolFromPDBFile("data/ACP_crystal_aligned_2.pdb") rmsd2 = CalcLigRMSD(docked_ligand, crystal_ligand, rename_lig2 = True, output_filename = 'data/ACP_renamed.pdb') print(f"RMSD: {rmsd2:.2f}") # - # #### Example where the crystal structure contains different ligands # # The PDB structure 5DPV of AURKA contains two different ligands, 5DN and SKE. Tthe figure below shows that SKE (blue) is located in the binding pocket while 5DN (orange) lies on the surface of the protein. # # It occurs very often that a crystal structure contains different ligands or multiple copies of the same ligand. It is important to extract only the coordinates of the ligand located in the binding pocket, corresponding to the ligand of interest. # # The function described above checks the location of the ligand before extracting the coordinates. And this is achived by measuring the distance between the center of mass of the ligand in the docked structure (assuming that it is located in the binding pocket) and that of the ligand in the crystal structure. # # <img src="./figures/5dpv_complex.png" width=300> # + docked_lig_file = "data/docked_2c6e_SKE_pH74_netcharge1.pdb" protein_file = "data/aurka_protein_2c6e.pdb" crystal_file = "data/5dpv.pdb" merge_prot_lig_pdbs(protein_file, docked_lig_file) # - extract_lig_from_aligned_crystal(crystal_file, "complex.pdb", max_dist = 7) # Calculare RMSD: docked_ligand = Chem.MolFromPDBFile(docked_lig_file) crystal_ligand = Chem.MolFromPDBFile("data/SKE_crystal_aligned_1.pdb") rmsd2 = CalcLigRMSD(docked_ligand, crystal_ligand, rename_lig2 = True, output_filename = 'data/SKE_renamed.pdb') print(f"RMSD: {rmsd2:.2f}")
Contrib/CalcLigRMSD/Examples_CalcLigRMSD.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: py37talos # language: python # name: py37talos # --- # # Classifiers - HP - low vs mid vs high complexity - with TFv1 # Exploring different classifiers with with the contractive autoencoder for the NC task. It needs the TFv1 compatibility. # #### Table of contents: # autoencoders: # [Contractive Autoencoder](#Contractive-Autoencoder) # # classifiers: # [Simple dense classifier](#Simple-dense-classifier) # [LSTM-based classifier](#LSTM-based-classifier) # [kNN](#kNN) # [SVC](#SVC) # [Random Forest](#Random-Forest) # [XGBoost](#XGBoost) # + import datareader # made by the previous author for reading the collected data import dataextractor # same as above import pandas import numpy as np import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers, regularizers from tensorflow.keras.preprocessing import sequence from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import Dense, Dropout, Activation, Input from tensorflow.keras.layers import LSTM from tensorflow.keras.layers import Conv1D, MaxPooling1D from tensorflow.keras.utils import to_categorical from tensorflow.keras.optimizers import Adam, Nadam # need to disable eager execution for .get_weights() in contractive autoencoder loss to work tf.compat.v1.disable_eager_execution() # required for the contractive autoencoder import tensorflow.keras.backend as K tf.keras.backend.set_floatx('float32') # call this, to set keras to use float32 to avoid a warning message metrics = ['accuracy'] from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.multiclass import OneVsRestClassifier import json from datetime import datetime import warnings import matplotlib.pyplot as plt # + # from https://github.com/ageron/handson-ml/blob/master/extra_tensorflow_reproducibility.ipynb config = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) with tf.compat.v1.Session(config=config) as sess: #... this will run single threaded pass # + import random random.seed(1) np.random.seed(4) tf.random.set_seed(2) # - # Start the notebook in the terminal with "PYTHONHASHSEED=0 jupyter notebook" # or in anaconda "set PYTHONHASHSEED=0" then start jupyter notebook import os if os.environ.get("PYTHONHASHSEED") != "0": raise Exception("You must set PYTHONHASHSEED=0 when starting the Jupyter server to get reproducible results.") # This is modfied original author's code for reading data: def model_train(model, x_train, y_train, batch_size, epochs, x_valid, y_valid, x_test, y_test): """Train model with the given training, validation, and test set, with appropriate batch size and # epochs.""" epoch_data = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_valid, y_valid), verbose=0) score = model.evaluate(x_test, y_test, batch_size=batch_size, verbose=0) acc = score[1] score = score[0] return score, acc, epoch_data def get_task_complexities_timeframes_br_hb(path, ident, seconds, checkIfValid=True): """Returns raw data along with task complexity class. TODO: join functions. Add parameter to choose different task types and complexities""" dataread = datareader.DataReader(path, ident) # initialize path to data data = dataread.read_grc_data() # read from files samp_rate = int(round(len(data[1]) / max(data[0]))) cog_res = dataread.read_cognitive_load_study(str(ident) + '-primary-extract.txt') tasks_data = np.empty((0, seconds*samp_rate)) tasks_y = np.empty((0, 1)) breathing = np.empty((0,12)) heartbeat = np.empty((0,10)) busy_n = dataread.get_data_task_timestamps(return_indexes=True) for i in cog_res['task_number']: task_num_table = i - 225 # 0 - 17 tmp_tasks_data = np.empty((0, seconds*samp_rate)) tmp_tasks_y = np.empty((0, 1)) tmp_breathing = np.empty((0,12)) tmp_heartbeat = np.empty((0,10)) ### task complexity classification # if cog_res['task_complexity'][task_num_table] == 'medium': # continue if cog_res['task_label'][task_num_table] != 'HP': continue map_compl = { 'low': 0, 'medium': 2, 'high': 1 } for j in range(10): new_end = int(busy_n[task_num_table][1] - j * samp_rate) new_start = int(new_end - samp_rate*30) dataextract = dataextractor.DataExtractor(data[0][new_start:new_end], data[1][new_start:new_end], samp_rate) # get extracted features for breathing tmpBR = dataextract.extract_from_breathing_time(data[0][new_start:new_end], data[1][new_start:new_end]) #get extracted features for heartbeat tmpHB = dataextract.extract_from_heartbeat_time(data[0][new_start:new_end], data[1][new_start:new_end]) if checkIfValid and not(tmpBR['br_ok'][0]): continue try: tmp_tasks_data = np.vstack((tmp_tasks_data, dataextract.y[-samp_rate * seconds:])) tmp_tasks_y = np.vstack((tmp_tasks_y, map_compl.get(cog_res['task_complexity'][task_num_table]))) tmp_breathing = np.vstack((tmp_breathing, tmpBR.to_numpy(dtype='float64', na_value=0)[0][:-1])) tmp_heartbeat = np.vstack((tmp_heartbeat, tmpHB.to_numpy(dtype='float64', na_value=0)[0][:-1])) except ValueError: # print(ident) continue tasks_data = np.vstack((tasks_data, dataextract.y)) tasks_y = np.vstack((tasks_y, map_compl.get(cog_res['task_complexity'][task_num_table]))) breathing = np.vstack((breathing, tmpBR.to_numpy(dtype='float64', na_value=0)[0][:-1])) heartbeat = np.vstack((heartbeat, tmpHB.to_numpy(dtype='float64', na_value=0)[0][:-1])) return tasks_data, tasks_y, breathing, heartbeat def get_data_from_idents_br_hb(path, idents, seconds): """Go through all user data and take out windows of only <seconds> long time frames, along with the given class (from 'divide_each_task' function). """ samp_rate = 43 # hard-coded sample rate data, ys = np.empty((0, samp_rate*seconds)), np.empty((0, 1)) brs = np.empty((0,12)) hbs = np.empty((0,10)) combined = np.empty((0,22)) # was gettign some weird warnings; stack overflow said to ignore them with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) for i in idents: #x, y, br, hb = get_busy_vs_relax_timeframes_br_hb(path, i, seconds) # either 'get_busy_vs_relax_timeframes', # get_engagement_increase_vs_decrease_timeframes, get_task_complexities_timeframes or get_TLX_timeframes x, y, br, hb = get_task_complexities_timeframes_br_hb(path, i, seconds) data = np.vstack((data, x)) ys = np.vstack((ys, y)) brs = np.vstack((brs, br)) hbs = np.vstack((hbs, hb)) combined = np.hstack((brs,hbs)) return data, ys, brs, hbs, combined # Accs is a dictionary which holds 1d arrays of accuracies in each key # except the key 'test id' which holds strings of the id which yielded the coresponding accuracies def print_accs_stats(accs): printDict = {} # loop over each key for key in accs: if (key == 'test id'): # skip calculating ids continue printDict[key] = {} tmpDict = printDict[key] # calculate and print some statistics tmpDict['min'] = np.min(accs[key]) tmpDict['max'] = np.max(accs[key]) tmpDict['mean'] = np.mean(accs[key]) tmpDict['median'] = np.median(accs[key]) print(pandas.DataFrame.from_dict(printDict).to_string()) def set_random_seeds(): # clear session and set seeds again # cannot clear session due to tf.compat.v1 graphs, but add tf.compat.v1.set_random_seed # K.clear_session() tf.compat.v1.set_random_seed(2) random.seed(1) np.random.seed(4) tf.random.set_seed(2) # ## Prepare data # Initialize variables: # + # initialize a dictionary to store accuracies for comparison accuracies = {} # used for reading the data into an array seconds = 30 # time window length samp_rate = 43 # hard-coded sample rate phase_shape = np.empty((0, samp_rate*seconds)) y_shape = np.empty((0, 1)) breathing_shape = np.empty((0,12)) heartbeat_shape = np.empty((0,10)) combined_shape = np.empty((0,22)) idents = ['2gu87', 'iz2ps', '1mpau', '7dwjy', '7swyk', '94mnx', 'bd47a', 'c24ur', 'ctsax', 'dkhty', 'e4gay', 'ef5rq', 'f1gjp', 'hpbxa', 'pmyfl', 'r89k1', 'tn4vl', 'td5pr', 'gyqu9', 'fzchw', 'l53hg', '3n2f9', '62i9y'] path = '../../../../../StudyData/' # change to len(idents) at the end to use all the data n = len(idents) # - # Holds all the data so it doesnt have to be read from file each time data_dict = {} # Fill the data dictionary: # + for ident in idents.copy(): # read data phase, y, breathing, heartbeat, combined = get_data_from_idents_br_hb(path, [ident], seconds) if (y.shape[0] <= 0): idents.remove(ident) print(ident) continue # initialize ident in data_dict[ident] = {} tmpDataDict = data_dict[ident] # load data into dictionary tmpDataDict['phase'] = phase tmpDataDict['y'] = y tmpDataDict['breathing'] = breathing tmpDataDict['heartbeat'] = heartbeat tmpDataDict['combined'] = combined print(n) n = len(idents) print(n) # + # load all phase data to use for training autoencoders phase_all_train = get_data_from_idents_br_hb(path, idents[:-2], seconds)[0] # Scale each row with MinMax to range [0,1] phase_all_train = MinMaxScaler().fit_transform(phase_all_train.T).T # load all validation phase data to use for training autoencoders phase_all_valid = get_data_from_idents_br_hb(path, idents[-2:], seconds)[0] # Scale each row with MinMax to range [0,1] phase_all_valid = MinMaxScaler().fit_transform(phase_all_valid.T).T # - # ## Autoencoders # Train autoencoders to save their encoded representations in the data dictionary: # AE Training params batch_size = 128 epochs = 1000 encoding_dim = 30 ae_encoded_shape = np.empty((0,encoding_dim)) def compare_plot_n(data1, data2, data3, plot_n=5): #plot data1 values plt.figure() plt.figure(figsize=(20, 4)) for i in range(plot_n): plt.subplot(plot_n/5, 6, i+1) plt.plot(data1[i]) #plot data2 values plt.figure() plt.figure(figsize=(20, 4)) for i in range(plot_n): plt.subplot(plot_n/5, 6, i+1) plt.plot(data2[i]) #plot data3 values plt.figure() plt.figure(figsize=(20, 4)) for i in range(plot_n): plt.subplot(plot_n/5, 6, i+1) plt.plot(data3[i]) # #### Contractive Autoencoder # From: https://wiseodd.github.io/techblog/2016/12/05/contractive-autoencoder/ # define a function to be able to access the autoencoder in the loss funciton def loss_with_params(autoencoder): # loss function from https://wiseodd.github.io/techblog/2016/12/05/contractive-autoencoder/ def contractive_loss(y_pred, y_true): lam = 1e-4 mse = K.mean(K.square(y_true - y_pred), axis=1) W = K.variable(value=autoencoder.get_layer('encoded').get_weights()[0]) # N x N_hidden W = K.transpose(W) # N_hidden x N h = autoencoder.get_layer('encoded').output dh = h * (1 - h) # N_batch x N_hidden # N_batch x N_hidden * N_hidden x 1 = N_batch x 1 contractive = lam * K.sum(dh**2 * K.sum(W**2, axis=1), axis=1) return mse + contractive return contractive_loss def contractive_ae(x, encoding_dim=64, encoded_as_model=False): # From https://wiseodd.github.io/techblog/2016/12/05/contractive-autoencoder/ input_data = Input(shape=x[0].shape, name="input") encoded = Dense(encoding_dim, activation='relu', name='encoded')(input_data) outputs = Dense(x[0].shape[0], activation='sigmoid', name="output")(encoded) autoencoder = Model(input_data, outputs, name="autoencoder") # compile the model autoencoder.compile(optimizer='adam', loss=loss_with_params(autoencoder), metrics=metrics) # if return encoder in the encoded variable if encoded_as_model: encoded = Model(input_data, encoded) return autoencoder, encoded # Train autoencoder on data: set_random_seeds() ct_ae, ct_enc = contractive_ae(phase_all_train, encoding_dim=encoding_dim, encoded_as_model=True) ct_ae.fit(phase_all_train, phase_all_train, validation_data=(phase_all_valid, phase_all_valid), batch_size=batch_size, shuffle=True, epochs=epochs, verbose=0) # Plot signal, reconstruction and encoded representation: data2 = ct_ae.predict(phase_all_valid) data3 = ct_enc.predict(phase_all_valid) compare_plot_n(phase_all_valid, data2, data3) # Store the encoded representations in the data dictionary: for ident in data_dict: tmpDataDict = data_dict[ident] # read data phase = tmpDataDict['phase'] ct_data = ct_enc.predict(phase) # load data into dictionary tmpDataDict['contractive_encoded'] = ct_data # Helper function to get data from the dictionary: def get_ident_data_from_dict(idents, data_dict): # Initialize data variables y = y_shape.copy() phase = phase_shape.copy() contractive_encoded = ae_encoded_shape.copy() # Stack data form each ident into the variables for tmp_id in idents: phase = np.vstack((phase, data_dict[tmp_id]['phase'])) y = np.vstack((y, data_dict[tmp_id]['y'])) contractive_encoded = np.vstack((contractive_encoded, data_dict[tmp_id]['contractive_encoded'])) return y, phase, contractive_encoded # ## Classifiers # #### Helper loop function definition # A function that loops over all the data and calls the classifiers with it then stores the returned accuracies. def helper_loop(classifier_function_train, idents, n=5, num_loops_to_average_over=1, should_scale_data=True): #returns a dictionary with accuracies # set the variables in the dictionary accs = {} accs['phase'] = [] accs['contractive'] = [] accs['test id'] = [] start_time = datetime.now() with tf.compat.v1.Session(config=config) as sess: # leave out person out validation for i in range(n): # print current iteration and time elapsed from start print("iteration:", i+1, "of", n, "; time elapsed:", datetime.now()-start_time) ## ----- Data preparation: validation_idents = [idents[i]] test_idents = [idents[i-1]] train_idents = [] for ident in idents: if (ident not in test_idents) and (ident not in validation_idents): train_idents.append(ident) # save test id to see which id yielded which accuracies accs['test id'].append(test_idents[0]) # Load train data train_data = get_ident_data_from_dict(train_idents, data_dict) y_train = train_data[0] # Load validation data valid_data = get_ident_data_from_dict(validation_idents, data_dict) y_valid = valid_data[0] # Load test data test_data = get_ident_data_from_dict(test_idents, data_dict) y_test = test_data[0] data_names_by_index = ['y', 'phase', 'contractive'] # Loop over all data that will be used for classification and send it to the classifier # index 0 is y so we skip it for index in range(1, len(test_data)): set_random_seeds() train_x = train_data[index] valid_x = valid_data[index] test_x = test_data[index] # Scale data if should_scale_data: # Scale with standard scaler sscaler = StandardScaler() sscaler.fit(train_x) train_x = sscaler.transform(train_x) # Scale valid and test with train's scaler valid_x = sscaler.transform(valid_x) test_x = sscaler.transform(test_x) # Initialize variables tmp_acc = [] data_name = data_names_by_index[index] for index in range(num_loops_to_average_over): curr_acc = classifier_function_train(train_x, y_train, valid_x, y_valid, test_x, y_test, data_name) tmp_acc.append(curr_acc) # Store accuracy curr_acc = np.mean(tmp_acc) accs[data_name].append(curr_acc) # Print total time required to run this end_time = datetime.now() elapsed_time = end_time - start_time print("Completed!", "Time elapsed:", elapsed_time) return accs # #### Simple dense classifier # Define the classifier: params_dense_phase = { 'dropout': 0.3, 'hidden_size': 28, 'activation': 'sigmoid', 'loss': 'categorical_crossentropy', 'optimizer': Adam, 'batch_size': 128, 'learning_rate': 0.001, 'epochs': 300 } params_dense_ae_enc = { 'dropout': 0.1, 'hidden_size': 30, 'activation': 'relu', 'loss': 'categorical_crossentropy', 'optimizer': Adam, 'learning_rate': 0.01, 'batch_size': 106, 'epochs': 300 } def dense_train(x_train, y_train, x_valid, y_valid, x_test, y_test, data_name): # change Ys to categorical (one hot encoding) y_train = to_categorical(y_train, num_classes=3) y_valid = to_categorical(y_valid, num_classes=3) y_test = to_categorical(y_test, num_classes=3) params = params_dense_ae_enc if (data_name == 'phase'): params = params_dense_phase # Define the model model = Sequential() model.add(Dropout(params['dropout'])) model.add(Dense(params['hidden_size'])) model.add(Activation(params['activation'])) model.add(Dense(3)) model.add(Activation('sigmoid')) # Compile the model model.compile(loss=params['loss'], optimizer=params['optimizer'](learning_rate=params['learning_rate']), metrics=metrics) # Train the model and return the accuracy sc, curr_acc, epoch_data = model_train(model, x_train, y_train, params['batch_size'], params['epochs'], x_valid, y_valid, x_test, y_test) return curr_acc # Combine the autoencoders with the classifier: accs = helper_loop(dense_train, idents, n=n, should_scale_data=True) accuracies['simple_dense'] = accs # print accuracies of each method and corresponding id which yielded that accuracy (same row) pandas.DataFrame.from_dict(accs) # print some statistics for each method print_accs_stats(accs) # #### LSTM-based classifier # based on the original author's code params_lstm_phase = { 'kernel_size': 4, 'filters': 32, 'strides': 2, 'pool_size': 4, 'dropout': 0.01, 'lstm_output_size': 22, 'activation': 'relu', 'last_activation': 'sigmoid', 'loss': 'categorical_crossentropy', 'optimizer': Nadam, 'learning_rate': 0.005, 'batch_size': 186, 'epochs': 200 } params_lstm_ae_enc = { 'kernel_size': 2, 'filters': 6, 'strides': 2, 'pool_size': 2, 'dropout': 0.01, 'lstm_output_size': 32, 'activation': 'relu', 'last_activation': 'sigmoid', 'loss': 'categorical_crossentropy', 'optimizer': Nadam, 'learning_rate': 0.001, 'batch_size': 64, 'epochs': 100 } def LSTM_train(x_train, y_train, x_valid, y_valid, x_test, y_test, data_name): # change Ys to categorical (one hot encoding) y_train = to_categorical(y_train, num_classes=3) y_valid = to_categorical(y_valid, num_classes=3) y_test = to_categorical(y_test, num_classes=3) params = params_lstm_ae_enc if (data_name == 'phase'): params = params_lstm_phase # Reshape data to fit some layers xt_train = x_train.reshape(-1, x_train[0].shape[0], 1) xt_valid = x_valid.reshape(-1, x_valid[0].shape[0], 1) xt_test = x_test.reshape(-1, x_test[0].shape[0], 1) # Define the model model = Sequential() model.add(Dropout(params['dropout'])) model.add(Conv1D(params['filters'], params['kernel_size'], padding='valid', activation=params['activation'], strides=params['strides'])) model.add(MaxPooling1D(pool_size=params['pool_size'])) if (data_name == 'phase'): model.add(Conv1D(params['filters'], params['kernel_size'], padding='valid', activation=params['activation'], strides=params['strides'])) model.add(MaxPooling1D(pool_size=params['pool_size'])) model.add(Dropout(params['dropout'])) model.add(LSTM(params['lstm_output_size'])) model.add(Dense(3)) model.add(Activation(params['last_activation'])) # Compile the model model.compile(loss=params['loss'], optimizer=params['optimizer'](learning_rate=params['learning_rate']), metrics=['acc']) # Train the model and return the accuracy sc, curr_acc, epoch_data = model_train(model, xt_train, y_train, params['batch_size'], params['epochs'], xt_valid, y_valid, xt_test, y_test) return curr_acc # Combine the autoencoders with the classifier: accs = helper_loop(LSTM_train, idents, n=n) accuracies['LSTM'] = accs # print accuracies of each method and corresponding id which yielded that accuracy (same row) pandas.DataFrame.from_dict(accs) # print some statistics for each method print_accs_stats(accs) # #### kNN params_knn_phase = { 'n_neighbors': 3, 'metric': 'l2' } params_knn_ae_enc = { 'n_neighbors': 5, 'metric': 'manhattan' } # + from sklearn.neighbors import KNeighborsClassifier def KNN_classifier(params): model = KNeighborsClassifier(n_neighbors=params['n_neighbors'], metric=params['metric']) return model # - def KNN_train(x_train, y_train, x_valid, y_valid, x_test, y_test, data_name): params = params_knn_ae_enc if (data_name == 'phase'): params = params_knn_phase model = OneVsRestClassifier(KNN_classifier(params)) model.fit(x_train, y_train.ravel()) curr_acc = np.sum(model.predict(x_test) == y_test.ravel()) / len(y_test.ravel()) return curr_acc # Combine the autoencoders with the classifier: accs = helper_loop(KNN_train, idents, n) accuracies['kNN'] = accs # print accuracies of each method and corresponding id which yielded that accuracy (same row) pandas.DataFrame.from_dict(accs) # print some statistics for each method print_accs_stats(accs) # #### SVC params_svc_phase = { 'C': 3, 'kernel': 'rbf', 'gamma': 'scale' } params_svc_ae_enc = { 'C': 5, 'kernel': 'rbf', 'gamma': 'scale' } # + from sklearn.svm import SVC def SVC_classifier(params): model = SVC(random_state=42, C=params['C'], kernel=params['kernel'], gamma=params['gamma']) return model # - def SVC_train(x_train, y_train, x_valid, y_valid, x_test, y_test, data_name): params = params_svc_ae_enc if (data_name == 'phase'): params = params_svc_phase model = OneVsRestClassifier(SVC_classifier(params)) model.fit(x_train, y_train.ravel()) curr_acc = np.sum(model.predict(x_test) == y_test.ravel()) / len(y_test.ravel()) return curr_acc # Combine the autoencoders with the classifier: accs = helper_loop(SVC_train, idents, n=n, num_loops_to_average_over=5) accuracies['SVC'] = accs # print accuracies of each method and corresponding id which yielded that accuracy (same row) pandas.DataFrame.from_dict(accs) # print some statistics for each method print_accs_stats(accs) # #### Random Forest params_rf_phase = { 'n_estimators': 190, 'max_depth': 50, 'min_samples_split': 4, 'min_samples_leaf': 2, 'oob_score': False, 'ccp_alpha': 0.005 } params_rf_ae_enc = { 'n_estimators': 130, 'max_depth': 100, 'min_samples_split': 5, 'min_samples_leaf': 5, 'oob_score': True, 'ccp_alpha': 0.005 } from sklearn.ensemble import RandomForestClassifier def random_forest_classifier(params): model = RandomForestClassifier(random_state=42, n_estimators = params['n_estimators'], criterion = 'entropy', max_depth = params['max_depth'], min_samples_split = params['min_samples_split'], min_samples_leaf = params['min_samples_leaf'], oob_score = params['oob_score'], ccp_alpha = params['ccp_alpha'], max_features = 'log2', bootstrap = True) return model def random_forest_train(x_train, y_train, x_valid, y_valid, x_test, y_test, data_name): params = params_rf_ae_enc if (data_name == 'phase'): params = params_rf_phase model = OneVsRestClassifier(random_forest_classifier(params)) model.fit(x_train, y_train.ravel()) curr_acc = np.sum(model.predict(x_test) == y_test.ravel()) / len(y_test.ravel()) return curr_acc # Combine the autoencoders with the classifier: accs = helper_loop(random_forest_train, idents, n=n, num_loops_to_average_over=5, should_scale_data=False) accuracies['random_forest'] = accs # print accuracies of each method and corresponding id which yielded that accuracy (same row) pandas.DataFrame.from_dict(accs) # print some statistics for each method print_accs_stats(accs) # #### Naive Bayesian # + from sklearn.naive_bayes import GaussianNB def naive_bayesian_classifier(): model = GaussianNB() return model # - def naive_bayesian_train(x_train, y_train, x_valid, y_valid, x_test, y_test, data_name): model = OneVsRestClassifier(naive_bayesian_classifier()) model.fit(x_train, y_train.ravel()) curr_acc = np.sum(model.predict(x_test) == y_test.ravel()) / len(y_test.ravel()) return curr_acc # Combine the autoencoders with the classifier: accs = helper_loop(naive_bayesian_train, idents, n=n, should_scale_data=True) accuracies['naive_bayesian'] = accs # print accuracies of each method and corresponding id which yielded that accuracy (same row) pandas.DataFrame.from_dict(accs) # print some statistics for each method print_accs_stats(accs) # #### XGBoost params_xgb_phase = { 'n_estimators': 50, 'max_depth': 50, 'booster': 'gbtree' } params_xgb_ae_enc = { 'n_estimators': 130, 'max_depth': 4, 'booster': 'gbtree' } # + from xgboost import XGBClassifier def XGBoost_classifier(params): model = XGBClassifier(random_state=42, n_estimators=params['n_estimators'], max_depth=params['max_depth']) return model # - def XGBoost_train(x_train, y_train, x_valid, y_valid, x_test, y_test, data_name): params = params_xgb_ae_enc if (data_name == 'phase'): params = params_xgb_phase model = OneVsRestClassifier(XGBoost_classifier(params)) model.fit(x_train, y_train.ravel()) curr_acc = np.sum(model.predict(x_test) == y_test.ravel()) / len(y_test.ravel()) return curr_acc # Combine the autoencoders with the classifier: accs = helper_loop(XGBoost_train, idents, n=n, num_loops_to_average_over=5, should_scale_data=False) accuracies['XGBoost'] = accs # print accuracies of each method and corresponding id which yielded that accuracy (same row) pandas.DataFrame.from_dict(accs) # print some statistics for each method print_accs_stats(accs) # ### Compare Accuracies # Save all accuracies to results csv file: # + results_path = "../../results/LvMvH/LvMvH-HP-tfv1.csv" # Make a dataframe from the accuracies accs_dataframe = pandas.DataFrame(accuracies).T # Save dataframe to file accs_dataframe.to_csv(results_path, mode='w') # - # Print min, max, mean, median for each clasifier/autoencoder combination: for classifier in accuracies: print("-----------", classifier + ":", "-----------") accs = accuracies[classifier] print_accs_stats(accs) print("\n") # Print all accuracies in table form: for classifier in accuracies: print(classifier + ":") # print(pandas.DataFrame.from_dict(accuracies[classifier])) # Using .to_string() gives nicer loooking results (doesn't split into new line) print(pandas.DataFrame.from_dict(accuracies[classifier]).to_string()) print("\n")
FeatureExtractionModule/src/autoencoder_approach/Per task approach/HP/autoencoder_classifiers-HP-low-vs-mid-vs-high-with-TFv1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # VacationPy # ---- # # #### Note # * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps. # + # Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import numpy as np import requests import gmaps import os import json # Import API key from api_keys import g_key # - # ### Store Part I results into DataFrame # * Load the csv exported in Part I to a DataFrame # + weather_city_data = pd.read_csv("../WeatherPy/WeatherCityData.csv") #types_df = pd.read_csv(weather_city_data) weather_city_data.dropna() weather_city_data.head(10) # - # ### Humidity Heatmap # * Configure gmaps. # * Use the Lat and Lng as locations and Humidity as the weight. # * Add Heatmap layer to map. # + gmaps.configure(api_key=g_key) locations = weather_city_data.head(10)[["Lat","Lng"]] humidity = weather_city_data.head(10)["Humidity"].astype(float) # + #Plot Heatmap fig = gmaps.figure(center = [0,0], zoom_level = 1) #Heat layer heat_layer = gmaps.heatmap_layer(locations, weights=humidity, dissipating=False, max_intensity=10, point_radius=4) #Add layer fig.add_layer(heat_layer) fig # - # ### Create new DataFrame fitting weather criteria # * Narrow down the cities to fit weather conditions. # * Drop any rows will null values. # + new_weather_df = pd.DataFrame(weather_city_data, columns = ["City","Max Temp", "Wind Speed", "Cloudiness"]) max_temp = (new_weather_df["Max Temp"] <= 80) & (new_weather_df["Max Temp"] > 70) wind_speed = new_weather_df["Wind Speed"] < 10 cloudiness = new_weather_df["Cloudiness"] == 0 new_weather_df.head() # - # ### Hotel Map # * Store into variable named `hotel_df`. # * Add a "Hotel Name" column to the DataFrame. # * Set parameters to search for hotels with 5000 meters. # * Hit the Google Places API for each city's coordinates. # * Store the first Hotel result into the DataFrame. # * Plot markers on top of the heatmap. hotel_df = pd.DataFrame(weather_city_data, columns=["City", "Country", "Lat", "Lng"]) hotel_df["Hotel Name"] = "" hotel_df.head() # + parameters_coordinates = f"{hotel_df['Lat'][0]},{hotel_df['Lng'][0]}" parameters_search = "hotels" parameters_radius = 5000 parameters_type = "hotel" parameters_dict = { "location": parameters_coordinates, "keyword": parameters_search, "radius": parameters_radius, "type": parameters_type, "key": g_key } base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json" # run a request using our parameters dictionary response = requests.get(base_url, params=parameters_dict) # - hotel_df.iloc[0][0] # + #creates a list to capture hotel_names to insert into dataframe hotel_name = [] # params dictionary to update each iteration parameters_dict = { "radius": 5000, "types": "hotel", "keyword": "hotels", "key": g_key } for index, row in hotel_df.iterrows(): lat = row["Lat"] lng = row["Lng"] # change location each iteration while leaving original params in place parameters_dict["location"] = f"{lat},{lng}" base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json" # make request and print url hotel_name = requests.get(base_url, params=parameters_dict) # convert to json hotel_name = hotel_name.json() print(json.dumps(hotel_name, indent=4, sort_keys=True)) #try: # hotel_name.append(name_address['results'][0]['name']) #except IndexError: # hotel_name.append(np.nan) hotel_name.head() # + # NOTE: Do not change any of the code in this cell # Using the template add the hotel marks to the heatmap info_box_template = """ <dl> <dt>Name</dt><dd>{Hotel Name}</dd> <dt>City</dt><dd>{City}</dd> <dt>Country</dt><dd>{Country}</dd> </dl> """ # Store the DataFrame Row # NOTE: be sure to update with your DataFrame name hotel_info = [info_box_template.format(**row) for index, row in narrowed_city_df.iterrows()] locations = hotel_df[["Lat", "Lng"]] # + # Add marker layer ontop of heat map markers = gmaps.marker_layer(locations) fig.add_layer(markers) fig # Display Map # -
VacationPY/VacationPy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ___ # NLP in Practice # # ___ # # spaCy Basics # # **spaCy** (https://spacy.io/) is an open-source Python library that parses and "understands" large volumes of text. Separate models are available that cater to specific languages (English, French, German, etc.). # # In this section we'll install and setup spaCy to work with Python, and then introduce some concepts related to Natural Language Processing. # # Installation and Setup # # First, install spaCy using either conda or pip. Next, download the specific model you want, based on language.<br> For more info visit https://spacy.io/usage/ # # ### 1. From the command line or terminal: # > `conda install -c conda-forge spacy` # > <br>*or*<br> # > `pip install -U spacy` # # > ### Alternatively you can create a virtual environment: # > `conda create -n spacy2_env python=3 spacy=2` # # ### 2. Next, also from the command line (you must run this as admin or use sudo): # # > `python -m spacy download en` # # > ### If successful, you should see a message like: # # > **`Linking successful`**<br> # > ` C:\Anaconda3\envs\spacyenv\lib\site-packages\en_core_web_sm -->`<br> # > ` C:\Anaconda3\envs\spacyenv\lib\site-packages\spacy\data\en`<br> # > ` `<br> # > ` You can now load the model via spacy.load('en')` # # # Working with spaCy in Python # # This is a typical set of instructions for importing and working with spaCy. Don't be surprised if this takes awhile - spaCy has a fairly large library to load: # + # Import spaCy and load the language library import spacy nlp = spacy.load('en_core_web_sm') # Create a Doc object doc = nlp('Tesla is looking at buying U.S. startup for $6 million') # Print each token separately for token in doc: print(token.text, token.pos_, token.dep_) # - # This doesn't look very user-friendly, but right away we see some interesting things happen: # 1. Tesla is recognized to be a Proper Noun, not just a word at the start of a sentence # 2. U.S. is kept together as one entity (we call this a 'token') # # As we dive deeper into spaCy we'll see what each of these abbreviations mean and how they're derived. We'll also see how spaCy can interpret the last three tokens combined `$6 million` as referring to ***money***. # ___ # # spaCy Objects # # After importing the spacy module in the cell above we loaded a **model** and named it `nlp`.<br>Next we created a **Doc** object by applying the model to our text, and named it `doc`.<br>spaCy also builds a companion **Vocab** object that we'll cover in later sections.<br>The **Doc** object that holds the processed text is our focus here. # ___ # # Pipeline # When we run `nlp`, our text enters a *processing pipeline* that first breaks down the text and then performs a series of operations to tag, parse and describe the data. Image source: https://spacy.io/usage/spacy-101#pipelines from IPython.core.display import SVG SVG(url='https://spacy.io/pipeline-fde48da9b43661abcdf62ab70a546d71.svg') # We can check to see what components currently live in the pipeline. In later sections we'll learn how to disable components and add new ones as needed. nlp.pipeline nlp.pipe_names # ___ # ## Tokenization # The first step in processing text is to split up all the component parts (words & punctuation) into "tokens". These tokens are annotated inside the Doc object to contain descriptive information. We'll go into much more detail on tokenization in an upcoming lecture. For now, let's look at another example: # + doc2 = nlp(u"Tesla isn't looking into startups anymore.") for token in doc2: print(token.text, token.pos_, token.dep_) # - # Notice how `isn't` has been split into two tokens. spaCy recognizes both the root verb `is` and the negation attached to it. Notice also that both the extended whitespace and the period at the end of the sentence are assigned their own tokens. # # It's important to note that even though `doc2` contains processed information about each token, it also retains the original text: doc2 doc2[0] type(doc2) # ___ # ## Part-of-Speech Tagging (POS) # The next step after splitting the text up into tokens is to assign parts of speech. In the above example, `Tesla` was recognized to be a ***proper noun***. Here some statistical modeling is required. For example, words that follow "the" are typically nouns. # # For a full list of POS Tags visit https://spacy.io/api/annotation#pos-tagging doc2[0].pos_ # ___ # ## Dependencies # We also looked at the syntactic dependencies assigned to each token. `Tesla` is identified as an `nsubj` or the ***nominal subject*** of the sentence. # # For a full list of Syntactic Dependencies visit https://spacy.io/api/annotation#dependency-parsing # <br>A good explanation of typed dependencies can be found [here](https://nlp.stanford.edu/software/dependencies_manual.pdf) doc2[0].dep_ # To see the full name of a tag use `spacy.explain(tag)` spacy.explain('PROPN') spacy.explain('nsubj') # ___ # ## Additional Token Attributes # We'll see these again in upcoming lectures. For now we just want to illustrate some of the other information that spaCy assigns to tokens: # |Tag|Description|doc2[0].tag| # |:------|:------:|:------| # |`.text`|The original word text<!-- .element: style="text-align:left;" -->|`Tesla`| # |`.lemma_`|The base form of the word|`tesla`| # |`.pos_`|The simple part-of-speech tag|`PROPN`/`proper noun`| # |`.tag_`|The detailed part-of-speech tag|`NNP`/`noun, proper singular`| # |`.shape_`|The word shape – capitalization, punctuation, digits|`Xxxxx`| # |`.is_alpha`|Is the token an alpha character?|`True`| # |`.is_stop`|Is the token part of a stop list, i.e. the most common words of the language?|`False`| # Lemmas (the base form of the word): print(doc2[4].text) print(doc2[4].lemma_) # Simple Parts-of-Speech & Detailed Tags: print(doc2[4].pos_) print(doc2[4].tag_ + ' / ' + spacy.explain(doc2[4].tag_)) # Word Shapes: print(doc2[0].text+': '+doc2[0].shape_) print(doc[5].text+' : '+doc[5].shape_) # Boolean Values: print(doc2[0].is_alpha) print(doc2[0].is_stop) # ___ # ## Spans # Large Doc objects can be hard to work with at times. A **span** is a slice of Doc object in the form `Doc[start:stop]`. doc3 = nlp(u'Although commmonly attributed to <NAME> from his song "Beautiful Boy", \ the phrase "Life is what happens to us while we are making other plans" was written by \ cartoonist <NAME> and published in Reader\'s Digest in 1957, when Lennon was 17.') life_quote = doc3[16:30] print(life_quote) type(life_quote) # In upcoming lectures we'll see how to create Span objects using `Span()`. This will allow us to assign additional information to the Span. # ___ # ## Sentences # Certain tokens inside a Doc object may also receive a "start of sentence" tag. While this doesn't immediately build a list of sentences, these tags enable the generation of sentence segments through `Doc.sents`. Later we'll write our own segmentation rules. doc4 = nlp(u'This is the first sentence. This is another sentence. This is the last sentence.') doc4 doc4[0] for sent in doc4.sents: print(sent) doc4[6].is_sent_start # ## Next up: Tokenization
notebooks/1_NLPBasics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exercise 14 import PyPDF2 import os os.chdir(r'/home/scarlet-speedester/Documents/University/SIC Cource/SIC-Course-Notes/Samsung Course/Exercises/Exercise 14') os.getcwd() my_doc= open('Artificial Intelligence.pdf', 'rb') my_reader= PyPDF2.PdfFileReader(my_doc) n = my_reader.numPages print(n) my_page = my_reader.getPage(10) print(my_page.extractText()) my_page = my_reader.getPage(15) print(my_page.extractText()) # # Working with Word Files: import docx import os os.getcwd() os.chdir(r'/home/scarlet-speedester/Documents/University/SIC Cource/SIC-Course-Notes/Samsung Course/Exercises/Exercise 14') # 1.1: working with Existing documents my_doc= docx.Document('Artificial Intelligence.docx') # this will check the toal number of paragraphs in a documents n = len(my_doc.paragraphs) print(n) # this wil print the display the content of the of a paragraphy print(my_doc.paragraphs[10].text) print(my_doc.paragraphs[12].text) print(my_doc.paragraphs[34].text) m = len(my_doc.paragraphs[34].runs) print(m) # run has attributes as text, bold, italic, underline, etc. print(my_doc.paragraphs[34].runs[0].text) # text attribute af a run. def getAllParagraphs(filename): a_doc = docx.Document(filename) fullText = [] for a_para in a_doc.paragraphs: fullText.append(a_para.text) return '\n'.join(fullText) print(getAllParagraphs('Artificial Intelligence.docx'))
Rafay notes/Samsung Course/Chapter 2/Exercise/Exercise 14/Exercise Solution 14.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # # [Module 2.2] Item-Meta 데이타 준비 (Prepare Item-Meta data file) # # **[중요]: Item-Meta는 예측하고자 하는 타겟인 item_id의 속성 값을 의미 합니다. 여기서 속성 값은 StoreType 입니다. 아래 Store --> item_id 로 변경한 아래 사항 참고 하세요.** # 예측의 타겟은 Store의 주별 판매 예측 입니다. Store는 Dept로 이루어져 있습니다. 문제를 간소하기 위해서 Dept의 판매금액을 모두 합산하였고, Store의 이름을 item_id 로 수정 하였습니다. # item_id는 특정 Store의 판매액으로 생각하시면 됩니다.** # import boto3 import os import pandas as pd import numpy as np # ## Item-Meta 데이터 셋 작성 data_dir = 'data' store_file_name = 'stores.csv' store_data = pd.read_csv(os.path.join(data_dir,store_file_name)) store_df = store_data.copy() store_df = store_df.drop('Size', axis=1) store_df = store_df.rename(columns={'Store':'item_id', 'Type':'StoreType'}) store_df = store_df.set_index('item_id') store_df.head() # ## Item-Meta 데이타 셋을 CSV 로 저장 # With the data in a great state, save it off as a CSV store_meta_filename = "store_meta.csv" store_meta_path = data_dir + "/" + store_meta_filename store_df.to_csv(store_meta_path, header=False) # %store store_meta_filename # %store store_meta_path # %store store_df
WalmartSale/2.2.Prepare_Item_Meta_Data_File.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Standard Import # + # %config IPCompleter.greedy = True # %load_ext autoreload # %autoreload 2 import qiskit_metal as metal from qiskit_metal import designs, qlibrary, draw from qiskit_metal import MetalGUI, Dict # - design = designs.DesignPlanar() gui = MetalGUI(design) # + from qiskit_metal.qlibrary.qubits.transmon_pocket import TransmonPocket from qiskit_metal.qlibrary.interconnects.meandered import RouteMeander connection_pads = dict(connection_pads=dict( a = dict(loc_W=+1,loc_H=+1), b = dict(loc_W=-1,loc_H=+1), c = dict(loc_W=+1,loc_H=-1), d = dict(loc_W=-1,loc_H=-1) )) q1 = TransmonPocket(design, 'Q1', options = dict(pos_x='-1.5mm', pos_y='+0.0mm', **connection_pads)) q2 = TransmonPocket(design, 'Q2', options = dict(pos_x='+1.5mm', pos_y='+0.0mm', **connection_pads)) q3 = TransmonPocket(design, 'Q3', options = dict(pos_x='+0.0mm', pos_y='+1.3mm', **connection_pads)) options = Dict( pin_inputs=Dict( start_pin=Dict( component= 'Q1', pin= 'a'), end_pin=Dict( component= 'Q2', pin= 'b')), ) design.variables.cpw_width = '10um' design.variables.cpw_gap = '6um' cpw = RouteMeander(design, 'cpw', options) gui.rebuild() gui.autoscale() # - design.parse_value("['10um', '1nm']") metal.config.DefaultMetalOptions.default_generic.units # + # outdated: gui.ui.tabWidget.setTabPosition(0) # - table = gui.variables_window.ui.tableView table.resizeColumnsToContents() from PySide2.QtWidgets import QAbstractItemView table.setVerticalScrollMode(QAbstractItemView.ScrollPerPixel) table.setHorizontalScrollMode(QAbstractItemView.ScrollPerPixel) table.adjustSize() gui.config.clear() gui.screenshot() # + # Set variables in the design design.variables.pad_width = '400 um' # Assign variables to component options q1.options.pad_width = 'pad_width' q2.options.pad_width = 'pad_width' q3.options.pad_width = 'pad_width' # Rebuild all compoinent and refresh the gui gui.rebuild() gui.autoscale() # + # Set variables in the design design.variables.pad_width = '300 um' # Rebuild all compoinent and refresh the gui gui.rebuild() gui.autoscale() # + # Set variables in the design design.variables.pad_width = '550 um' # Rebuild all compoinent and refresh the gui gui.rebuild() gui.autoscale() # - q1.qgeometry_bounds() # + # outdated: q1.qgeometry_list() # - q1.qgeometry_plot type(q1.qgeometry_table('poly')) q1.qgeometry_table('poly') # + # outdated: gui.component_window.model.index(1,0) # -
tutorials/Appendix/Quick Topic Tutorials Notebooks/Variables Example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # BLOCO 2: Aula 4 # # * link https://drive.google.com/file/d/1tpfDFrb5jM8fA0mTg3eJcncDAH0btiUf/view?usp=sharing # ## Computacao inspirada na natureza: inteligencia artificial x adaptatividade # # * Inteligencia artifical nao tem a ver com o que o algoritmo faz, mas sim como ele se adapta com o novo # * Termo correto na verdade eh- **inteligencia computacional** # # * Dentro da IA temos ainda- Redes neurais, Logica fuzzy ... # # * Algoritmo evolutivo != Algoritmo genetico # * AG nao tem mutacao variavel e eh mais especifico # * ou seja, chama tudo de Alg Evolutivo (AE) mesmo e boa # # * Inteligencia de exames pelo jeito eh bem louco e o simoes curte mto # ### Controle evolutivos- Objetivos # # * Projeto automatico, "eliminar" o especialista # # * Motivacao eh reduzir erros de porjetos etc # # * Premissa mais importante # * Especificar *O QUE* eh desejado do robo # * Sem definir *COMO* ele deve fazer #
Bloco2/aula4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.3 64-bit ('venv') # metadata: # interpreter: # hash: 4ce963eec525c72576d8dfcde812f8487bbeefd0db94f66638320c10fe71db33 # name: Python 3.8.3 64-bit ('venv') # --- import numpy as np import pandas as pd # ### Load files # # - Files should be in a csv format with two colums [x, y] # - Pressure and volume should be loaded separetely (not on same file) # load files pr_df = pd.read_csv("./raw/Pressure.csv", header=None) vl_df = pd.read_csv("./raw/Volume.csv", header=None) # set columns pr_df.columns = ["x", "y"] vl_df.columns = ["x", "y"] pr_df.plot(x="x", y="y", kind="scatter") vl_df.plot(x="x", y="y", kind="scatter") # ## Adjust Dataset # # Here we will be doining automatic and manual adjustments based on the previous plots # There are three main adjustments: # - crop endpoints that are outside [[0,1]] range # - average datapoints based on median of three for all x and y (this assures that there is no overlapping datapoint) # - adjust min and max of each dataset def m3_avg(arr, reshape=True): for i in range(len(arr) - 2): x1 = arr[i] x2 = arr[i+1] x3 = arr[i+2] x2 = (x1 + x3) / 2 arr[i+1] = x2 if reshape == True: return arr.reshape((len(arr), )) return arr # + # crop data that is x < 0 pr_df.drop(pr_df[pr_df["x"] < 0].index, inplace=True) vl_df.drop(vl_df[vl_df["x"] < 0].index, inplace=True) # crop data that is x > 1 pr_df.drop(pr_df[pr_df["x"] > 1].index, inplace=True) vl_df.drop(vl_df[vl_df["x"] > 1].index, inplace=True) # - # Adjust pressure range (min pressure is zero) pr_df["y"] = pr_df["y"] + (-pr_df["y"].min()) # fix start and end points pr_df["y"].loc[1] = 0.0 pr_df["y"].loc[len(pr_df)] = 0.0 pr_df["x"].loc[1] = 0.0 pr_df["x"].loc[len(pr_df)] = 1.0 # Average dataset in x and y by median of 3 values pr_df["x"][::] = m3_avg(pr_df["x"].values) pr_df["y"][::] = m3_avg(pr_df["y"].values) pr_df.plot(x="x", y="y", kind="scatter") # Adjust volume range (max volume is 1.0) vl_df["y"] = (vl_df["y"] - vl_df["y"].max()) + 1.0 # fix start and end points vl_df["y"][0:2] = 1.0 vl_df["y"][-3:] = 1.0 vl_df["x"][0:1] = 0.0 vl_df["x"].loc[len(vl_df) - 1] = 1.0 vl_df.plot(x="x", y="y", kind="scatter") # # Smooth original data # # To facilitate the interpolation, we can run an fft filter to smooth and scale points from scipy import fftpack def smooth_data_fft(arr, span): # the scaling of "span" is open to suggestions w = fftpack.rfft(arr) spectrum = w ** 2 cutoff_idx = spectrum < (spectrum.max() * (1 - np.exp(-span / 2000))) w[cutoff_idx] = 0 return fftpack.irfft(w) smooth_pr = smooth_data_fft(pr_df["y"].values, 0.01) smooth_vl = smooth_data_fft(vl_df["y"].values, 0.01) # # Interpolate data # # Now we are ready to interpolate and create new datasets with a set length and pairing points # - For pressure, linear interpolation worked better # - For Volume, quadratic interpolation was the best. # At the end, we run an fft filter again to adjust and smooth data # + # Define new x_space xmin = max(pr_df["x"].min(), vl_df["x"].min()) xmax = min(pr_df["x"].max(), vl_df["x"].max()) x_space = [xmin, 0.15, 0.25, 0.36, 0.399, xmax] x_cnt = [150, 150, 150, 150, 150, 150] x_new = np.array([]) for i in range(len(x_space) - 1): bp1 = x_space[i] bp2 = x_space[i+1] x_s = np.linspace(bp1, bp2, x_cnt[i]) x_new = np.append(x_new, x_s) # - # define an interpolation wrapper from scipy.interpolate import interp1d def interp_wrap(x,y,x_new, kind='linear'): f = interp1d(x, y, kind=kind) y_new = f(x_new) return y_new.reshape((y_new.shape[0], 1)) # interpolate pressure and volume pressure_interp = interp_wrap(pr_df["x"], pr_df["y"], x_new) volume_interp = interp_wrap(vl_df["x"], vl_df["y"], x_new, kind="quadratic") # + from matplotlib import pyplot as plt fig, ax = plt.subplots() ax.scatter(x_new, pressure_interp) plt.show() # + fig, ax = plt.subplots() ax.scatter(x_new, volume_interp) plt.show() # - smooth_pr = smooth_data_fft(pressure_interp, 0.0001) smooth_vl = smooth_data_fft(volume_interp, 0.0001) # remove values above 1 from volume smooth_vl[smooth_vl > 1] = 1 # remove values below 0 from pressure (if any) smooth_pr[smooth_pr < 0] = 0 # + fig, ax = plt.subplots() ax.scatter(x_new, volume_interp) ax.plot(x_new, smooth_vl, c="r") plt.show() # + fig, ax = plt.subplots() ax.scatter(x_new, pressure_interp) ax.plot(x_new, smooth_pr, c="r") plt.show() # - # # Final step: create a dataframe # # The final dataframe should have pressure-volume columns, which are going to be used for saving the data # + # combine into df data = np.hstack([smooth_pr, smooth_vl, x_new]) df = pd.DataFrame(data, columns=["pressure", "volume", "timestep"]) # - df.plot(x="volume", y="pressure", kind="scatter") # + from os import path, curdir outdir = path.join(curdir, "out") pickle_file = path.join(outdir, "pv_curve.pickle") excel_file = path.join(outdir, "pv_curve.xlsm") df.to_pickle(pickle_file) df.to_excel(excel_file) # + fig, ax = plt.subplots() ax.scatter(x_new *0.8 , (0.7* volume_interp + 0.3) * 123) plt.show() # + fig, ax = plt.subplots() ax.scatter(x_new *0.8, (13.33 * pressure_interp) * 0.95 ) plt.show() # - a = (13.33 * pressure_interp) * 0.95 a.max()
notebooks/smoothData/smooth_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # ********************************* # Typesetting With XeLaTeX/LuaLaTeX # ********************************* # # How to typeset text with the ``pgf`` backend in Matplotlib. # # Using the ``pgf`` backend, matplotlib can export figures as pgf drawing commands # that can be processed with pdflatex, xelatex or lualatex. XeLaTeX and LuaLaTeX # have full unicode support and can use any font that is installed in the operating # system, making use of advanced typographic features of OpenType, AAT and # Graphite. Pgf pictures created by ``plt.savefig('figure.pgf')`` can be # embedded as raw commands in LaTeX documents. Figures can also be directly # compiled and saved to PDF with ``plt.savefig('figure.pdf')`` by either # switching to the backend # # .. code-block:: python # # matplotlib.use('pgf') # # or registering it for handling pdf output # # .. code-block:: python # # from matplotlib.backends.backend_pgf import FigureCanvasPgf # matplotlib.backend_bases.register_backend('pdf', FigureCanvasPgf) # # The second method allows you to keep using regular interactive backends and to # save xelatex, lualatex or pdflatex compiled PDF files from the graphical user interface. # # Matplotlib's pgf support requires a recent LaTeX_ installation that includes # the TikZ/PGF packages (such as TeXLive_), preferably with XeLaTeX or LuaLaTeX # installed. If either pdftocairo or ghostscript is present on your system, # figures can optionally be saved to PNG images as well. The executables # for all applications must be located on your :envvar:`PATH`. # # Rc parameters that control the behavior of the pgf backend: # # ================= ===================================================== # Parameter Documentation # ================= ===================================================== # pgf.preamble Lines to be included in the LaTeX preamble # pgf.rcfonts Setup fonts from rc params using the fontspec package # pgf.texsystem Either "xelatex" (default), "lualatex" or "pdflatex" # ================= ===================================================== # # <div class="alert alert-info"><h4>Note</h4><p>TeX defines a set of special characters, such as:: # # # $ % & ~ _ ^ \ { } # # Generally, these characters must be escaped correctly. For convenience, # some characters (_,^,%) are automatically escaped outside of math # environments.</p></div> # # # # Multi-Page PDF Files # ==================== # # The pgf backend also supports multipage pdf files using ``PdfPages`` # # .. code-block:: python # # from matplotlib.backends.backend_pgf import PdfPages # import matplotlib.pyplot as plt # # with PdfPages('multipage.pdf', metadata={'author': 'Me'}) as pdf: # # fig1, ax1 = plt.subplots() # ax1.plot([1, 5, 3]) # pdf.savefig(fig1) # # fig2, ax2 = plt.subplots() # ax2.plot([1, 5, 3]) # pdf.savefig(fig2) # # # Font specification # ================== # # The fonts used for obtaining the size of text elements or when compiling # figures to PDF are usually defined in the matplotlib rc parameters. You can # also use the LaTeX default Computer Modern fonts by clearing the lists for # ``font.serif``, ``font.sans-serif`` or ``font.monospace``. Please note that # the glyph coverage of these fonts is very limited. If you want to keep the # Computer Modern font face but require extended unicode support, consider # installing the `Computer Modern Unicode <https://sourceforge.net/projects/cm-unicode/>`_ # fonts *CMU Serif*, *CMU Sans Serif*, etc. # # When saving to ``.pgf``, the font configuration matplotlib used for the # layout of the figure is included in the header of the text file. # # .. literalinclude:: ../../gallery/userdemo/pgf_fonts.py # :end-before: plt.savefig # # # # Custom preamble # =============== # # Full customization is possible by adding your own commands to the preamble. # Use the ``pgf.preamble`` parameter if you want to configure the math fonts, # using ``unicode-math`` for example, or for loading additional packages. Also, # if you want to do the font configuration yourself instead of using the fonts # specified in the rc parameters, make sure to disable ``pgf.rcfonts``. # # .. only:: html # # .. literalinclude:: ../../gallery/userdemo/pgf_preamble_sgskip.py # :end-before: plt.savefig # # .. only:: latex # # .. literalinclude:: ../../gallery/userdemo/pgf_preamble_sgskip.py # :end-before: import matplotlib.pyplot as plt # # # # Choosing the TeX system # ======================= # # The TeX system to be used by matplotlib is chosen by the ``pgf.texsystem`` # parameter. Possible values are ``'xelatex'`` (default), ``'lualatex'`` and # ``'pdflatex'``. Please note that when selecting pdflatex the fonts and # unicode handling must be configured in the preamble. # # .. literalinclude:: ../../gallery/userdemo/pgf_texsystem.py # :end-before: plt.savefig # # # # Troubleshooting # =============== # # * Please note that the TeX packages found in some Linux distributions and # MiKTeX installations are dramatically outdated. Make sure to update your # package catalog and upgrade or install a recent TeX distribution. # # * On Windows, the :envvar:`PATH` environment variable may need to be modified # to include the directories containing the latex, dvipng and ghostscript # executables. See `environment-variables` and # `setting-windows-environment-variables` for details. # # * A limitation on Windows causes the backend to keep file handles that have # been opened by your application open. As a result, it may not be possible # to delete the corresponding files until the application closes (see # `#1324 <https://github.com/matplotlib/matplotlib/issues/1324>`_). # # * Sometimes the font rendering in figures that are saved to png images is # very bad. This happens when the pdftocairo tool is not available and # ghostscript is used for the pdf to png conversion. # # * Make sure what you are trying to do is possible in a LaTeX document, # that your LaTeX syntax is valid and that you are using raw strings # if necessary to avoid unintended escape sequences. # # * The ``pgf.preamble`` rc setting provides lots of flexibility, and lots of # ways to cause problems. When experiencing problems, try to minimalize or # disable the custom preamble. # # * Configuring an ``unicode-math`` environment can be a bit tricky. The # TeXLive distribution for example provides a set of math fonts which are # usually not installed system-wide. XeTeX, unlike LuaLatex, cannot find # these fonts by their name, which is why you might have to specify # ``\setmathfont{xits-math.otf}`` instead of ``\setmathfont{XITS Math}`` or # alternatively make the fonts available to your OS. See this # `tex.stackexchange.com question <http://tex.stackexchange.com/questions/43642>`_ # for more details. # # * If the font configuration used by matplotlib differs from the font setting # in yout LaTeX document, the alignment of text elements in imported figures # may be off. Check the header of your ``.pgf`` file if you are unsure about # the fonts matplotlib used for the layout. # # * Vector images and hence ``.pgf`` files can become bloated if there are a lot # of objects in the graph. This can be the case for image processing or very # big scatter graphs. In an extreme case this can cause TeX to run out of # memory: "TeX capacity exceeded, sorry" You can configure latex to increase # the amount of memory available to generate the ``.pdf`` image as discussed on # `tex.stackexchange.com <http://tex.stackexchange.com/questions/7953>`_. # Another way would be to "rasterize" parts of the graph causing problems # using either the ``rasterized=True`` keyword, or ``.set_rasterized(True)`` as per # :doc:`this example </gallery/misc/rasterization_demo>`. # # * If you still need help, please see `reporting-problems` # #
matplotlib/tutorials_jupyter/text/pgf.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Import method 1 # Get current working directory and append to path for package import # + active="" # import os # import sys # p = os.path.abspath(path_to_install_directory) # sys.path.insert(0, p) # - # ### Import method 2 # Add install directory to # `~/.local/lib/python3.x/site-packages/local_code.pth` # where python3.x is your version of python # ### Import method 3 # Use pip to install package # `$ pip install meerkat-iot` # ### Import method 4 # Use Conda to install package # `$ conda install meerkat-iot` import meerkat meerkat.version
notebooks/import_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: projet_S5 # language: python # name: projet_s5 # --- # List all device from tensorflow.python.client import device_lib # print(device_lib.list_local_devices()) # Check available GPU from keras import backend as K K.tensorflow_backend._get_available_gpus() import os os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"; # The GPU id to use, usually either "0" or "1"; os.environ["CUDA_VISIBLE_DEVICES"]="0"; # Importing the libraries import numpy as np import pandas as pd from keras.models import Sequential from keras.layers import Dense, LSTM, Dropout, Reshape, Lambda, GRU, BatchNormalization, Bidirectional from keras.preprocessing.sequence import TimeseriesGenerator from keras.callbacks import EarlyStopping, ModelCheckpoint from keras.activations import softmax from keras.optimizers import SGD, RMSprop import math import pickle import matplotlib.pyplot as plt from keras.utils import to_categorical from sklearn.preprocessing import StandardScaler index = "dowjones" # index = "frankfurt" with open(f"../data/{index}_calculated/periods750_250_240.txt", "rb") as fp: # Unpickling dataset = pickle.load(fp) i = 7 timestep = 240 feature = 31 # + x_train = dataset[0][i][0].values x_test = dataset[1][i][0].values scaler = StandardScaler().fit(x_train) x_train = scaler.transform(x_train) x_test = scaler.transform(x_test) y_train = to_categorical(dataset[0][i][1].values, 2) y_test = to_categorical(dataset[1][i][1].values, 2) # - print(f"x train shape: {x_train.shape}") print(f"y train shape: {y_train.shape}") print(f"x test shape: {x_test.shape}") print(f"y test shape: {y_test.shape}") x_series = [x_train[i:i+timestep, j] for i in range(1, x_train.shape[0] - timestep) for j in range(feature)] y_series = [y_train[i+timestep, j] for i in range(y_train.shape[0] - timestep - 1) for j in range(feature)] x = np.array(x_series) y = np.array(y_series) print(f"x shape: {x.shape}") print(f"y shape: {y.shape}") x_series1 = [x_test[i:i+timestep, j] for i in range(1, x_test.shape[0] - timestep) for j in range(feature)] y_series1 = [y_test[i+timestep, j] for i in range(y_test.shape[0] - timestep - 1) for j in range(feature)] x1 = np.array(x_series1) y1 = np.array(y_series1) print(f"x1 shape: {x1.shape}") print(f"y1 shape: {y1.shape}") x = np.reshape(x, (x.shape[0], x.shape[1], 1)) x1 = np.reshape(x1, (x1.shape[0], x1.shape[1], 1)) regressor = Sequential() regressor.add(LSTM(units=25, input_shape=(timestep, 1))) # regressor.add(Dense(feature * 2, activation='relu')) # regressor.add(Reshape((feature, 2))) # regressor.add(Lambda(lambda x: softmax(x, axis=-1))) regressor.add(Dense(2, activation='softmax')) regressor.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) regressor.summary() # result = regressor.fit_generator(train_gen, steps_per_epoch=len(train_gen), epochs=1000) result = regressor.fit(x, y,batch_size=1000, epochs=1000, validation_data=(x1, y1), callbacks = [EarlyStopping(monitor='val_loss', mode='min', patience=10), ModelCheckpoint(filepath='best_model.h5', monitor='val_acc', save_best_only=True)]) plt.plot(result.history["acc"], label='sine') plt.plot(result.history["val_acc"]) plt.plot(result.history["loss"]) plt.plot(result.history["val_loss"])
notebook/[Experiment] Long Short Term Memory - Sanity Check - 1 feature.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Import Modules # + import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.feature_extraction.text import CountVectorizer from sklearn import feature_extraction, linear_model, model_selection, preprocessing from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import TfidfTransformer from sklearn.pipeline import Pipeline from sklearn.svm import LinearSVC from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.naive_bayes import BernoulliNB from sklearn.naive_bayes import MultinomialNB from sklearn.metrics import plot_confusion_matrix from sklearn.ensemble import GradientBoostingClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import SGDClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import LabelEncoder from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.metrics import accuracy_score, confusion_matrix,classification_report,roc_curve,auc from sklearn.utils import shuffle import nltk import nltk as nlp import string import re import pickle from nltk.tokenize import word_tokenize import re import string from nltk.corpus import stopwords from nltk.stem.wordnet import WordNetLemmatizer from tqdm import tqdm, tqdm_notebook tqdm.pandas(tqdm_notebook) wordnet = WordNetLemmatizer() regex = re.compile('[%s]' % re.escape(string.punctuation)) # - # ## Load in and prepare datasets # + true = pd.read_csv(r"data/True.csv") fake = pd.read_csv(r"data/Fake.csv") true["target"] = 0 fake["target"] = 1 df = pd.concat([true,fake]) df = shuffle(df) df.head() # - # ## Functions # + def basic_text_cleaning(line_from_column): # This function takes in a string, not a list or an array for the arg line_from_column tokenized_doc = word_tokenize(line_from_column) new_review = [] for token in tokenized_doc: new_token = regex.sub(u'', token) if not new_token == u'': new_review.append(new_token) new_term_vector = [] for word in new_review: if not word in stopwords.words('english'): new_term_vector.append(word) final_doc = [] for word in new_term_vector: final_doc.append(wordnet.lemmatize(word)) return ' '.join(final_doc) def generate_roc_curve(model,y_test, probs, title): preds = probs[:,1] fpr, tpr, threshold = roc_curve(y_test, preds) roc_auc = auc(fpr, tpr) plt.title('Receiver Operating Characteristic - {}'.format(title)) plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc) plt.legend(loc = 'lower right') plt.plot([0, 1], [0, 1],'r--') plt.xlim([0, 1]) plt.ylim([0, 1]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.show() def plot_con_matrix(classifier,X_test,y_test): class_names = df["target"].value_counts() np.set_printoptions(precision=2) titles_options = [("Confusion matrix, without normalization", None), ("Normalized confusion matrix", None)] for title, normalize in titles_options: disp = plot_confusion_matrix(classifier, X_test, y_test, display_labels=class_names, cmap=plt.cm.Blues, normalize=normalize) disp.ax_.set_title(title) print(title) print(disp.confusion_matrix) def load_up_classifier(classifier): x_train,x_test,y_train,y_test = train_test_split(df['text'], df["target"], test_size=0.25, random_state=2020) pipe = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('model', classifier)]) model = pipe.fit(x_train, y_train) prediction = model.predict(x_test) probs = model.predict_proba(x_test) plot_con_matrix(classifier=model,X_test=x_test,y_test=y_test) return model, probs, y_test # - # ## Training classifiers without changing hyperparameters (Before text cleaning) # #### Logistic Regression model_LR, probs, y_test = load_up_classifier(classifier=LogisticRegression()) generate_roc_curve(model=model_LR,y_test=y_test, probs=probs, title="Logistic Regression") # #### Multinomial Naive Bayes model_MNB, probs, y_test = load_up_classifier(classifier=MultinomialNB()) generate_roc_curve(model=model_MNB,y_test=y_test, probs=probs, title = "Multinomial Naive Bayes") # #### Bernoulli Naive Bayes model_BNB, probs, y_test = load_up_classifier(classifier=BernoulliNB()) generate_roc_curve(model=model_BNB,y_test=y_test, probs=probs, title="Bernoulli Naive Bayes ") # #### Gradient Boost Classifier model_GBC, probs, y_test = load_up_classifier(classifier=GradientBoostingClassifier()) generate_roc_curve(model=model_GBC,y_test=y_test, probs=probs, title="Gradient Boosting Classifier") # #### Decision Tree model_DT, probs, y_test = load_up_classifier(classifier=DecisionTreeClassifier()) generate_roc_curve(model=model_DT,y_test=y_test, probs=probs, title="Decision Tree") # #### RFC Classifier model_RFC, probs, y_test = load_up_classifier(classifier=RandomForestClassifier()) generate_roc_curve(model=model_RFC,y_test=y_test, probs=probs, title="Random Forest Classifier") # The evaluation results without tweaking hyperparameter are not too bad, we will save these models first as a backup # + model_file_list = [r"models/LR_model.pkl", r"models/MNVBC_model.pkl", r"models/BNBC_model.pkl", r"models/GBC_model.pkl", r"models/DT_model.pkl", r"models/RFC_model.pkl"] model_list = [model_LR,model_MNB,model_BNB,model_GBC,model_DT,model_RFC] for model, filename in zip(model_list, model_file_list): pickle.dump(model, open(filename, 'wb')) # - # ## Cleaning text data df["clean_text"] = df["text"].progress_map(basic_text_cleaning) df.head() # ## Training classifiers without changing hyperparameters def load_up_classifier_clean(classifier): x_train,x_test,y_train,y_test = train_test_split(df['clean_text'], df["target"], test_size=0.25, random_state=2020) pipe = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('model', classifier)]) model = pipe.fit(x_train, y_train) prediction = model.predict(x_test) probs = model.predict_proba(x_test) plot_con_matrix(classifier=model,X_test=x_test,y_test=y_test) return model, probs, y_test # #### Logistic Regression model_LR, probs, y_test = load_up_classifier_clean(classifier=LogisticRegression()) generate_roc_curve(model=model_LR,y_test=y_test, probs=probs, title="Logistic Regression") # #### Multinomial Naive Bayes model_MNB, probs, y_test = load_up_classifier_clean(classifier=MultinomialNB()) generate_roc_curve(model=model_MNB,y_test=y_test, probs=probs, title="MultinomialNB") # #### Bernoulli Naive Bayes model_BNB, probs, y_test = load_up_classifier_clean(classifier=BernoulliNB()) generate_roc_curve(model=model_BNB,y_test=y_test, probs=probs, title="Bernoulli NB") # #### Gradient Boost Classifier model_GBC, probs, y_test = load_up_classifier_clean(classifier=GradientBoostingClassifier()) generate_roc_curve(model=model_GBC,y_test=y_test, probs=probs, title="Gradient Boosting Classifier") # #### Decision Tree model_DT, probs, y_test = load_up_classifier_clean(classifier=DecisionTreeClassifier()) generate_roc_curve(model=model_DT,y_test=y_test, probs=probs, title="Decision Tree Classifier") # #### RFC model_RFC, probs, y_test = load_up_classifier_clean(classifier=RandomForestClassifier()) generate_roc_curve(model=model_RFC,y_test=y_test, probs=probs, title="Decision Tree Classifier")
Model_Building_using_traditional_ML.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 百度OCR # [官方文档](https://ai.baidu.com/docs#/OCR-Python-SDK/top) # ### 文字识别验证码示例 # + from aip import AipOcr def GetCaptcha(filname): APP_ID = "your app id" API_KEY = "your api key" SECRET_KEY = "your secret key" client = (APP_ID, API_KEY, SECRET_KEY) ##读取图片 def get_image_bytes(filename): with open(filename, "rb") as fp: return fp.read() image = get_image_bytes(filename) ##调用接口 ret = client.basicGeneral(images) code = ret["words_result"][0]["words"] return code
python/modules/jupyter/Ocr.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Import Libraries import pandas as pd import matplotlib.pyplot as plt from datetime import datetime from sklearn.preprocessing import LabelEncoder import numpy as np from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn import linear_model import statsmodels.api as sm # # Import Files test = pd.read_csv("data/test.csv") train = pd.read_csv("data/train.csv") sample = pd.read_csv("data/sample_submission.csv") # # Functions # to calculate score def smape(a, f): return 1/len(a) * np.sum(2 * np.abs(f-a) / (np.abs(a) + np.abs(f))*100) # make to csv def export_me(model, testz, name): result = pd.DataFrame({"id": range(1,len(testz)+1), "sales": model.predict(testz)}) result.to_csv(name+".csv", index=False) return None # # Explore Data train.describe() train.dtypes train.columns test.columns print(len(train)) print(len(test)) print(train.Date.min(), train.Date.max()) print(test.Date.min(), test.Date.max()) print(train.Store.unique()) print(test.Store.unique()) datetime.strptime(train["Date"][0], "%d-%b-%y") # # Preprocessing # # Make Date into separate features train["Date"] = pd.to_datetime(train["Date"], format="%d-%b-%y") train["Year"] = train["Date"].dt.year train["Month"] = train["Date"].dt.month train["Day"] = train["Date"].dt.day train["Store"] = LabelEncoder().fit(train["Store"]).transform(train["Store"]) # # Trial 1 # # If we uncategorize it it would make a lower SMAPE score # + #train1["Item"] = train["Item"].astype("category") #train1["Store"] = train["Store"].astype("category") # - # split feature and target features = train[["Store", "Item", "Year", "Month", "Day"]] target = train["Sales"] # + # Split the data X_train,X_test,y_train,y_test = train_test_split(features,target,test_size = 0.2) # + # Try different Algorithms model = LinearRegression().fit(X_train,y_train) # - predictions = model.predict(X_test) smape(y_test, predictions) model1 = sm.OLS(y_train, X_train).fit() predictions = model1.predict(X_test) smape(y_test, predictions) test["Date"] = pd.to_datetime(test["Date"], format="%d-%b-%y") test["Year"] = test["Date"].dt.year test["Month"] = test["Date"].dt.month test["Day"] = test["Date"].dt.day #test["Item"] = test["Item"].astype("category") test["Store"] = LabelEncoder().fit(test["Store"]).transform(test["Store"]) #test["Store"] = test["Store"].astype("category") testz = test[["Store", "Item", "Year", "Month", "Day"]] predictions # # Trial 2 # # Change linear regression # # Choose columns: Store, Item, Year round(train.corr(),3) # + # split feature and target features = train[["Store", "Year"]] target = train["Sales"] # Split the data X_train,X_test,y_train,y_test = train_test_split(features,target,test_size = 0.2) # Try different Algorithms model = LinearRegression().fit(X_train,y_train) predictions = model.predict(X_test) # - smape(y_test, predictions) train # # Trial 3 # Bayesian Ridge train3 = train train3["Item"] = train["Item"].astype("category") train3["Store"] = train["Store"].astype("category") train3["Month"] = train["Month"].astype("category") train3["Day"] = train["Day"].astype("category") # + # split feature and target features = train3[["Store", "Item", "Year", "Month", "Day"]] target = train3["Sales"] # Split the data X_train,X_test,y_train,y_test = train_test_split(features,target,test_size = 0.2) model3 = linear_model.BayesianRidge().fit(X_train,y_train) prediction3 = np.round(model3.predict(X_test)) # - smape(y_test, prediction3) # # Trial 4 # # Bro its multiple linear regression bro # + # split feature and target features = train[["Store", "Item", "Year", "Month", "Day"]] target = train["Sales"] # Split the data X_train,X_test,y_train,y_test = train_test_split(features,target,test_size = 0.2) # make model X_trainz = sm.add_constant(X_train) model4 = sm.OLS(y_train, X_trainz).fit() X_testz = sm.add_constant(X_test) prediction4 = model4.predict(X_testz) # - model4.summary() smape(y_test, prediction4) # # Trial 5 # + # split feature and target features = train[["Store", "Item", "Year", "Month", "Day"]] target = train["Sales"] from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() # Applying scaler() to all the columns except the 'yes-no' and 'dummy' variables features = scaler.fit_transform(features) # Split the data X_train,X_test,y_train,y_test = train_test_split(features,target,test_size = 0.2) # make model X_trainz = sm.add_constant(X_train) model5 = sm.OLS(y_train, X_trainz).fit() X_testz = sm.add_constant(X_test) prediction5 = model5.predict(X_testz) # - smape(y_test, prediction5) prediction5 # # Trial # # Try to separate each group store_list = train.Store.unique() train2 = {} for u in store_list: train2[u] = train[train["Store"] == u] train2["KMart"]
sales-forecast-py.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + slideshow={"slide_type": "skip"} import pybrain import numpy as np import pandas as pd from matplotlib import pyplot as plt pd.set_option('notebook_repr_html',True) from notebook.services.config import ConfigManager cm = ConfigManager() cm.update('livereveal', { 'theme': 'league', 'transition': 'fade', 'center': 'false', 'overview' : 'true', 'start_slideshow_at': 'selected' }) # %matplotlib inline # + [markdown] slideshow={"slide_type": "slide"} # # Quick and Dirty Introduction to Neural Networks # # [<NAME>](http://dis.unal.edu.co/~fgonza/), Universidad Nacional de Colombia # + [markdown] slideshow={"class": "center", "slide_type": "slide"} # ## Artificial Neuron # # <img src="http://upload.wikimedia.org/wikipedia/commons/thumb/6/60/ArtificialNeuronModel_english.png/600px-ArtificialNeuronModel_english.png" > # # $$o_j^{(n)} = \varphi\left(\sum_{i\; in\; layer (n-1)}w_{ij}o_i^{(n-1)} \right)$$ # + [markdown] slideshow={"slide_type": "subslide"} # ## Step activation function # <img src="https://c.mql5.com/2/4/act1.png" align="middle"> # + [markdown] slideshow={"slide_type": "subslide"} # ## Logistic activation function # # $$\varphi(x) = \frac{1}{1 - e^{-(x-b)}}$$ # # <img width= 300 src="http://upload.wikimedia.org/wikipedia/commons/thumb/b/b5/SigmoidFunction.png/400px-SigmoidFunction.png" align="middle"> # + [markdown] slideshow={"slide_type": "slide"} # ### Question: How to program an artificial neuron to calculate the *and* function? # + [markdown] slideshow={"slide_type": "fragment"} # <br> # <table> # <tr> # <th>$X$</th> # <th>$Y$</th> # <th>$X$ and $Y$</th> # </tr> # <tr> # <td>0</td> # <td>0</td> # <td style="text-align:center">0</td> # </tr> # <tr> # <td>0</td> # <td>1</td> # <td style="text-align:center">0</td> # </tr> # <tr> # <td>1</td> # <td>0</td> # <td style="text-align:center">0</td> # </tr> # <tr> # <td>1</td> # <td>1</td> # <td style="text-align:center">1</td> # </tr> # </table> # + [markdown] slideshow={"slide_type": "subslide"} # ## AND Neural Network # # <img width=500 src="2in-neuron.jpg" align="middle"> # + slideshow={"slide_type": "fragment"} from pybrain.tools.shortcuts import buildNetwork net = buildNetwork(2, 1, outclass=pybrain.SigmoidLayer) print net.params # + slideshow={"slide_type": "subslide"} def print_pred2(dataset, network): df = pd.DataFrame(dataset.data['sample'][:dataset.getLength()],columns=['X', 'Y']) prediction = np.round(network.activateOnDataset(dataset),3) df['output'] = pd.DataFrame(prediction) return df from pybrain.datasets import UnsupervisedDataSet, SupervisedDataSet D = UnsupervisedDataSet(2) # define a dataset in pybrain D.addSample([0,0]) D.addSample([0,1]) D.addSample([1,0]) D.addSample([1,1]) print_pred2(D, net) # + [markdown] slideshow={"slide_type": "subslide"} # ## AND Neural Network # <img width=500 src="2in-neuron.jpg" align="middle"> # + slideshow={"slide_type": "fragment"} net.params[:] = [0, 0, 0] print_pred2(D, net) # + [markdown] slideshow={"slide_type": "slide"} # ### Question: How to program an artificial neuron to calculate the *xor* function? # <br/> # <table> # <tr> # <th>$X$</th> # <th>$Y$</th> # <th>$X$ xor $Y$</th> # </tr> # <tr> # <td>0</td> # <td>0</td> # <td style="text-align:center">0</td> # </tr> # <tr> # <td>0</td> # <td>1</td> # <td style="text-align:center">1</td> # </tr> # <tr> # <td>1</td> # <td>0</td> # <td style="text-align:center">1</td> # </tr> # <tr> # <td>1</td> # <td>1</td> # <td style="text-align:center">0</td> # </tr> # </table> # + [markdown] slideshow={"slide_type": "subslide"} # ## Plotting the NN Output # + slideshow={"slide_type": "-"} def plot_nn_prediction(N): # a function to plot the binary output of a network on the [0,1]x[0,1] space x_list = np.arange(0.0,1.0,0.025) y_list = np.arange(1.0,0.0,-0.025) z = [0.0 if N.activate([x,y])[0] <0.5 else 1.0 for y in y_list for x in x_list] z = np.array(z) grid = z.reshape((len(x_list), len(y_list))) plt.imshow(grid, extent=(x_list.min(), x_list.max(), y_list.min(), y_list.max()),cmap=plt.get_cmap('Greys_r')) plt.show() # + [markdown] slideshow={"slide_type": "subslide"} # ## Plotting the NN Output # + slideshow={"slide_type": "-"} net.params[:] = [-30, 20, 20] plot_nn_prediction(net) # + [markdown] slideshow={"slide_type": "subslide"} # <br/> # <br/> # ## Answer: It is impossible with only one neuron! # <br/> # <br/> # + [markdown] slideshow={"slide_type": "fragment"} # # ## We need to use more than one neuron.... # + [markdown] slideshow={"slide_type": "slide"} # ## Multilayer Neural Network # <img src="http://www.cs.nott.ac.uk/~gxk/courses/g5aiai/006neuralnetworks/images/ffnet.jpg"> # + [markdown] slideshow={"slide_type": "subslide"} # ## Learning an XOR NN # + slideshow={"slide_type": "-"} Dtrain = SupervisedDataSet(2,1) # define a dataset in pybrain Dtrain.addSample([0,0],[0]) Dtrain.addSample([0,1],[1]) Dtrain.addSample([1,0],[1]) Dtrain.addSample([1,1],[0]) from pybrain.supervised.trainers import BackpropTrainer net = buildNetwork(2, 2, 1, hiddenclass=pybrain.SigmoidLayer, outclass=pybrain.SigmoidLayer) T = BackpropTrainer(net, learningrate=0.1, momentum=0.9) T.trainOnDataset(Dtrain, 1000) print_pred2(D, net) # + [markdown] slideshow={"slide_type": "subslide"} # ## XOR NN Output Plot # + slideshow={"slide_type": "-"} plot_nn_prediction(net) # + [markdown] slideshow={"slide_type": "slide"} # ## The Little Red Riding Hood Neural Network # # <img src="http://themaleharem.com/wp-content/uploads/2014/06/Walter-crane-little-red-riding-hood-meets-the-wolf-in-the-woods.jpg"> # + [markdown] slideshow={"slide_type": "subslide"} # ## LRRH Network Architecture # # <img src="lrrh net.jpg"> # + [markdown] slideshow={"slide_type": "subslide"} # ## Training # # + slideshow={"slide_type": "fragment"} from pybrain.tools.validation import Validator validator = Validator() Dlrrh = SupervisedDataSet(4,4) Dlrrh.addSample([1,1,0,0],[1,0,0,0]) Dlrrh.addSample([0,1,1,0],[0,0,1,1]) Dlrrh.addSample([0,0,0,1],[0,1,1,0]) df = pd.DataFrame(Dlrrh['input'],columns=['Big Ears', 'Big Teeth', 'Handsome', 'Wrinkled']) print df.join(pd.DataFrame(Dlrrh['target'],columns=['Scream', 'Hug', 'Food', 'Kiss'])) net = buildNetwork(4, 3, 4, hiddenclass=pybrain.SigmoidLayer, outclass=pybrain.SigmoidLayer) # + [markdown] slideshow={"slide_type": "subslide"} # ## Backpropagation # + slideshow={"slide_type": "fragment"} T = BackpropTrainer(net, learningrate=0.01, momentum=0.99) scores = [] for i in xrange(1000): T.trainOnDataset(Dlrrh, 1) prediction = net.activateOnDataset(Dlrrh) scores.append(validator.MSE(prediction, Dlrrh.getField('target'))) plt.ylabel('Mean Square Error') plt.xlabel('Iteration') plt.plot(scores) # + [markdown] slideshow={"slide_type": "subslide"} # ## Prediction # + slideshow={"slide_type": "-"} def lrrh_input(vals): return pd.DataFrame(vals,index=['big ears', 'big teeth', 'handsome', 'wrinkled'], columns=['input']) def lrrh_output(vals): return pd.DataFrame(vals,index=['scream', 'hug', 'offer food', 'kiss cheek'], columns=['output']) # + slideshow={"slide_type": "fragment"} in_vals = [1, 1, 0, 0] lrrh_input(in_vals) # + slideshow={"slide_type": "fragment"} lrrh_output(net.activate(in_vals))
4.Docs/quickIntro2NN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#Load-Libraries" data-toc-modified-id="Load-Libraries-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Load Libraries</a></span></li><li><span><a href="#Prepare-Data" data-toc-modified-id="Prepare-Data-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Prepare Data</a></span><ul class="toc-item"><li><span><a href="#Adjust-Units-of-Measurements" data-toc-modified-id="Adjust-Units-of-Measurements-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>Adjust Units of Measurements</a></span></li><li><span><a href="#Consolidate-Information-about-Lines" data-toc-modified-id="Consolidate-Information-about-Lines-2.2"><span class="toc-item-num">2.2&nbsp;&nbsp;</span>Consolidate Information about Lines</a></span></li><li><span><a href="#Convert-Runtime-into-Expected-Drive-Time-and-Dwell-Time" data-toc-modified-id="Convert-Runtime-into-Expected-Drive-Time-and-Dwell-Time-2.3"><span class="toc-item-num">2.3&nbsp;&nbsp;</span>Convert Runtime into Expected Drive Time and Dwell Time</a></span></li></ul></li><li><span><a href="#Exploratory-Data-Analysis" data-toc-modified-id="Exploratory-Data-Analysis-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Exploratory Data Analysis</a></span><ul class="toc-item"><li><span><a href="#Distance-Between-Stations-(m)" data-toc-modified-id="Distance-Between-Stations-(m)-3.1"><span class="toc-item-num">3.1&nbsp;&nbsp;</span>Distance Between Stations (m)</a></span></li><li><span><a href="#Drive-Time-between-Stations-(s)" data-toc-modified-id="Drive-Time-between-Stations-(s)-3.2"><span class="toc-item-num">3.2&nbsp;&nbsp;</span>Drive Time between Stations (s)</a></span></li><li><span><a href="#Dwell-Times-(s)" data-toc-modified-id="Dwell-Times-(s)-3.3"><span class="toc-item-num">3.3&nbsp;&nbsp;</span>Dwell Times (s)</a></span></li></ul></li><li><span><a href="#Modelling" data-toc-modified-id="Modelling-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>Modelling</a></span><ul class="toc-item"><li><span><a href="#The-Infrastructure-(Stations,-Depots-etc)" data-toc-modified-id="The-Infrastructure-(Stations,-Depots-etc)-4.1"><span class="toc-item-num">4.1&nbsp;&nbsp;</span>The Infrastructure (Stations, Depots etc)</a></span><ul class="toc-item"><li><span><a href="#Depots" data-toc-modified-id="Depots-4.1.1"><span class="toc-item-num">4.1.1&nbsp;&nbsp;</span>Depots</a></span></li><li><span><a href="#Stations" data-toc-modified-id="Stations-4.1.2"><span class="toc-item-num">4.1.2&nbsp;&nbsp;</span>Stations</a></span></li><li><span><a href="#Cross-Overs" data-toc-modified-id="Cross-Overs-4.1.3"><span class="toc-item-num">4.1.3&nbsp;&nbsp;</span>Cross-Overs</a></span></li></ul></li><li><span><a href="#The-Network" data-toc-modified-id="The-Network-4.2"><span class="toc-item-num">4.2&nbsp;&nbsp;</span>The Network</a></span></li><li><span><a href="#Trains" data-toc-modified-id="Trains-4.3"><span class="toc-item-num">4.3&nbsp;&nbsp;</span>Trains</a></span></li></ul></li><li><span><a href="#Utilities" data-toc-modified-id="Utilities-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>Utilities</a></span><ul class="toc-item"><li><span><a href="#Global-Parameters" data-toc-modified-id="Global-Parameters-5.1"><span class="toc-item-num">5.1&nbsp;&nbsp;</span>Global Parameters</a></span></li><li><span><a href="#Time-Parsing-and-Formating" data-toc-modified-id="Time-Parsing-and-Formating-5.2"><span class="toc-item-num">5.2&nbsp;&nbsp;</span>Time Parsing and Formating</a></span></li></ul></li><li><span><a href="#System-Verification" data-toc-modified-id="System-Verification-6"><span class="toc-item-num">6&nbsp;&nbsp;</span>System Verification</a></span></li><li><span><a href="#References" data-toc-modified-id="References-7"><span class="toc-item-num">7&nbsp;&nbsp;</span>References</a></span></li></ul></div> # - # # Load Libraries # + import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt import scipy.stats as stats import math import numpy as np import simpy # - # # Prepare Data dt=pd.read_csv('LondonTube.csv') dt.head() # ## Adjust Units of Measurements # Convert distances to meters dt['Distance']=dt['Distance'].apply(lambda x: int(x*1000)) # Convert running times into seconds dt['Running Time']=dt['Running Time'].apply(lambda x: int(x*60)) dt['Peak Running Time']=dt['Peak Running Time'].apply(lambda x: int(x*60)) dt['Off-Peak Running Time']=dt['Off-Peak Running Time'].apply(lambda x: int(x*60)) dt.head() # ## Consolidate Information about Lines # Note: A short looping branch line of the Central Line has been deleted from the original data set. list(dt['Line'].unique()) list(dt.Direction.unique()) # Note: Each line has its specific rolling stock with drive parameters depending on the track geometry, electrical power supply, and the year of construction. For simplicity we assume fixed parameters across the network. # + lines=dt['Line'].unique() dl=pd.DataFrame(lines, columns=['Line']) dl=dl.set_index('Line') dl['Direction 1']=None dl['Direction 2']=None dl['Stations']=None dl['Total Dist']=None dl['Min Dist']=None dl['Avg Dist']=None dl['Max Dist']=None dl['max a']=0.7 dl['max v']=25 for l in lines: dirs = dt[dt['Line']==l]['Direction'].unique() dirs = sorted(list(dirs)) dl.at[l, 'Direction 1']=dirs[0] dl.at[l, 'Direction 2']=dirs[1] dist = dt[dt['Line']==l]['Distance'] dl.at[l, 'Stations']=int(dist.count()/2) dl.at[l, 'Total Dist']=int(dist.sum()/2) dl.at[l, 'Min Dist']=int(dist.min()) dl.at[l, 'Avg Dist']=int(dist.mean()) dl.at[l, 'Max Dist']=int(dist.max()) dl # - # ## Convert Runtime into Expected Drive Time and Dwell Time # Physics Calculation: time to travel a given distance based on acceleration and max velocity def timeTo(a, maxV, d): # a constant acceleration, m/s² # maxV maximumum velocity, m/s # d distance, km # return time in seconds required to travel # ta = maxV/a # time to accelerate to maxV da = a*ta**2 # distance traveled during acceleration from 0 to maxV and back to 0 if (d < da): # train never reaches full speed? return np.sqrt(4.0*d/a) # time needed to accelerate to half-way point then decelerate to destination else: return 2*ta + (d-da)/maxV # time to accelerate to maxV plus travel at maxV plus decelerate to destination dt['Drive Time'] = None for i in range(len(dt)): dist = dt.at[i, 'Distance'] l = dt.at[i, 'Line'] a = dl.at[l, 'max a'] v = dl.at[l, 'max v'] dt.at[i, 'Drive Time']=int(timeTo(a, v, dist)) dt['Dwell Time']=dt['Running Time']-dt['Drive Time'] dt['Peak Dwell Time']=dt['Peak Running Time']-dt['Drive Time'] dt['Off-Peak Dwell Time']=dt['Off-Peak Running Time']-dt['Drive Time'] dt=dt.drop(columns=['Running Time', 'Peak Running Time', 'Off-Peak Running Time']) dt.head() # # Exploratory Data Analysis # ## Distance Between Stations (m) dt['Distance'].min() dt['Distance'].max() dt['Distance'].mean() dt['Distance'].hist(bins=np.linspace(0, 12000, 49)) # ## Drive Time between Stations (s) dt['Drive Time'].min() dt['Drive Time'].max() dt['Drive Time'].hist(bins=np.linspace(0, 500, 51)) # ## Dwell Times (s) dt['Peak Dwell Time'].min() dt['Peak Dwell Time'].max() dt['Peak Dwell Time'].mean() dt['Peak Dwell Time'].std() dt['Peak Dwell Time'].hist(bins=np.linspace(0, 300, 61)) dt['Off-Peak Dwell Time'].min() dt['Off-Peak Dwell Time'].max() dt['Off-Peak Dwell Time'].hist(bins=np.linspace(0, 300, 31)) # # Modelling # Components of the system will be described by Python classes. The parameters are passed as data frames. # # We assume that the global variable `env` is initialised before any component of the system is actually generated. # ## The Infrastructure (Stations, Depots etc) # The class NetworkNode is used as the common base class for all infrastructure classes. Each NetworkNode (like a station) has a name and belongs to a track, i.e. a line and a direction. class NetworkNode: def __init__(self, location, line_name): self.location=location self.line_name=line_name self.tracing=False def setLine(self, line): self.line=line def getLine(self): return self.line def isTracing(self): return self.tracing def traceOn(self): self.tracing=True def traceOff(self): self.tracing=False def trace_prefix(self, train): global max_line_name_length global max_direction_length return f"{now():s} " \ f"{self.line_name:{max_line_name_length}s} " \ f"[Train {train.getNo():2d} " \ f"{train.getDirection():{max_direction_length}s}]" def arr(self, train): if self.isTracing() or self.line.isTracing() or train.isTracing(): print(f"{self.trace_prefix(train):s} arr {self.loc():s}") def dep(self, train, dest): if self.isTracing() or self.line.isTracing() or train.isTracing(): print(f"{self.trace_prefix(train):s} dep {self.loc():s} for {dest.loc():s}") def getTrack(self): pass def loc(self): pass def track(self): pass # ### Depots # A train depot serves a line and feeds trains into a particular direction. class Depot(NetworkNode): def __init__(self, location, line, direction): super().__init__(location, line) self.location=location self.line=line self.direction=direction def initialise(self, capacity): self.trains=simpy.Container(env, capacity, init=capacity) def driveTime(self): return 20 def dwellTime(self): return 10 def dep(self, train, dest): train.setDirection(self.direction) super().dep(train, dest) def getTrack(self): return None def loc(self): return self.location+" (DEPOT)" def track(self): return self.location+" (DEPOT)" # ### Stations # Every Station has a common name describing the location and belongs to a line. We found it practical # to have a station object for each of the directions of a line going through the station. Normally we have therefore two stations in a location. When two lines are crossing at an interchange, we will have actually 4 station objects sharing the same common name: two (for each direction) for each of the lines. As such a "station" in our terminology is more like a platform or a track in a real life station. class Station(NetworkNode): def __init__(self, location, line_name, direction): super().__init__(location, line_name) self.location=location self.line_name=line_name self.direction=direction self.tracing=False self.tracks=simpy.Resource(env, capacity=1) dd=dt[dt['Line']==self.line_name] dd=dd[dd['Direction']==self.direction] dd=dd[dd['From']==location] # dd=pd.DataFrame(dt[dt['Line']==self.line_name] \ # [dt['Direction']==self.direction] \ # [dt['From']==location], copy=True) if len(dd)==0: # last node in a direction, has to be adjust for CrossOver or Depot self.drive_time=0 self.off_peak_dwell_time=0 self.peak_dwell_time=0 else: self.drive_time=dd.iloc[0].at['Drive Time'] self.off_peak_dwell_time=dd.iloc[0].at['Off-Peak Dwell Time'] self.peak_dwell_time=dd.iloc[0].at['Peak Dwell Time'] def driveTime(self): return self.drive_time def dwellTime(self): return self.peak_dwell_time if isPeakTime() else self.off_peak_dwell_time def getTrack(self): return self.tracks def loc(self): return self.location def track(self): return self.location+" ("+self.direction+")" # ### Cross-Overs # A cross over node is attached to the last station at the end of a line and allows trains to switch direction. # A cross over node shares the location name and the line with the station it is attached to, however is serves actually both directions. A cross over node may have a limited capacity holding trains. # # The drive time (in and out of the cross over node) is assumed as a fixed constant, as is the dwell time in the cross over node. These parameters could in the future also be stored in the line table. class CrossOver(NetworkNode): def __init__(self, location, line_name, direction_in, direction_out, capacity=2): super().__init__(location, line_name) self.location=location self.line_name=line_name self.direction_in=direction_in self.direction_out=direction_out self.tracks=simpy.Resource(env, capacity=2) def driveTime(self): return 20 def dwellTime(self): return 300 def dep(self, train, dest): train.setDirection(self.direction_out) super().dep(train, dest) def getTrack(self): return self.tracks def loc(self): return self.location+" (X)" def track(self): return self.location+" ("+self.direction_in+" X "+self.direction_out+")" # ## The Network # A track describes a direction of a line. The parameters describing a track are: # - The start and stop times of the line (for example 6:00 to 23:00), # - The time between trains (for example every 5 minutes), but this may vary during the day. # # The parameters for a line will later be taken from the line table. For the time being we use a simple procedure running the simulation of a single track. class Track(object): def __init__(self, network, line_name, direction, depot, stations): self.network=network self.line_name=line_name self.direction=direction # create a slice of data for the line dd = dt[dt['Line']==line_name] dd = dd[dd['Direction']==direction] self.data = dd.reset_index(drop=True) self.depot=depot self.stations=[depot]+stations+[depot] self.tracing=False self.tracingTrains=False self.tracingTrainNo=None for s in self.stations: s.setLine(self) def isTracing(self): return self.network.isTracing() or self.tracing def traceOn(self, loc=None, train=None): if loc==None and train==None: self.tracing=True else: for s in self.stations: if s.loc()==loc: s.traceOn() if train!=None: # print(f"traceOn {self.line_name:s} train {str(train):s}") self.tracingTrains=True self.tracingTrainNo=train def traceOff(self, loc=None, train=None): if loc==None and train==None: self.tracing=False else: for s in self.stations: if s.loc()==loc: s.traceOff() if train!=None: self.tracingTrains=False self.tracingTrainNo=train def getNetwork(self): return self.network def getLineName(self): return self.line_name def getStations(self): return self.stations def setTiming(self, start, stop, timing): self.start=start*3600 self.stop=stop*3600 self.timing=timing def process(self): yield env.timeout(self.start-env.now) # the line starts operating at 6am for i in range(int((self.stop-self.start)/self.timing)): t=Train(i, self.data, self) if self.tracingTrains and self.tracingTrainNo==i: t.traceOn() env.process(t.process()) yield env.timeout(self.timing) def to_string(self): names=[ s.track() for s in self.stations ] return self.line+"-"+",".join(names) def allStations(line, direction): dl=dt[dt['Line']==line] ds=dl[dl['Direction']==direction] stations=ds['From'].to_list() stations+=[ds['To'].to_list()[-1]] return [ Station(s, line, direction) for s in stations ] def allTracks(network): names=dt['Line'].unique() lines=[] for line in names: directions=dt[dt['Line']==line]['Direction'].unique() stations=[ allStations(line, d) for d in directions] for i in range(len(directions)): if stations[i]==None or directions[i]==None: continue if stations[i][0].track()==stations[i][-1].track(): # print(line, directions[i], "is cyclic") depot=Depot(stations[i][0].loc(), line, directions[i]) l=Track(network, line, directions[i], depot, stations[i]) lines+=[l] else: found_reverse=False for j in range(i+1, len(directions)): if stations[j]==None or directions[j]==None: continue if stations[i][-1].loc()==stations[j][0].loc() and \ stations[i][0].loc()==stations[j][-1].loc(): depot=Depot(stations[i][0].loc(), line, directions[i]) x=CrossOver(stations[i][-1].loc(), line, directions[i], directions[j]) l=Track(network, line, directions[i], depot, stations[i]+[x]+stations[j]) lines+=[l] stations[j]=None directions[j]=None found_reverse=True break if found_reverse==False: print("Ignoring", line, directions[i]) return lines class Network(object): def __init__(self, start, stop, timing): self.tracks=allTracks(self) self.start=start self.stop=stop self.timing=timing self.tracing=False def isTracing(self): return self.tracing def traceOn(self, line=None, loc=None, train=None): if line==None and loc==None and train==None: self.tracing=True elif line==None: for t in self.tracks: t.traceOn(loc=loc, train=train) else: for t in self.tracks: if t.getLineName()==line: t.traceOn(loc=loc, train=train) def traceOff(self, line=None, loc=None, train=None): if line==None and loc==None and train==None: self.tracing=False elif line==None: for t in self.tracks: t.traceOn(loc=loc, train=train) else: for t in self.tracks: if t.getLineName()==line: t.traceOff(loc=loc, train=train) def getLines(self, line=None): tracks=[] for t in self.tracks: if line==None or t.getLine()==line: tracks+=[t] return tracks def process(self): for t in self.tracks: # print("Initialising:", t.description()) t.setTiming(self.start, self.stop, self.timing) yield env.timeout(self.start-env.now) # the line starts operating at 6am env.process(t.process()) # print("Initialising finished") # ## Trains class Train(object): def __init__(self, i, data, track): self.no=i self.data = data self.track = track self.name = f"{track.line_name:s}-{track.direction:s} [Train {i:2d}]" self.direction = None self.tracing = False def setDirection(self, direction): self.direction=direction def getDirection(self): return self.direction def getNo(self): return self.no def isTracing(self): return self.tracing def traceOn(self): print(f"Tracing train: {self.track.line_name:s} [Train {self.no:2d}]") self.tracing=True def traceOff(self): self.tracing=False def process(self): stations = self.track.getStations() here=stations[0] here_req=None for dest in stations[1:]: drivetime=here.driveTime() dwelltime=here.dwellTime() yield env.timeout(dwelltime) if dest.getTrack() is not None: dest_req=dest.getTrack().request() yield dest_req # if the train doesn't get immediate access, it continues waiting # in the current station ('here'). # if the train got access the train departs from 'here'for 'dest' here.dep(self, dest) # Once the train has completely left the station, the train # will release the track of station ('here'). # We assume that the train has left the station after 10 secs delaytime=min(drivetime, 10) yield env.timeout(delaytime) if here_req!=None: here.getTrack().release(here_req) # the train proceeds to drive to the next station ('dest') yield env.timeout(drivetime-delaytime) dest.arr(self) here = dest here_req=dest_req # # Utilities # ## Global Parameters max_line_name_length = max([len(line) for line in dt['Line'].unique()]) max_direction_length = max([len(line) for line in dt['Direction'].unique()]) def isPeakTime(): now=env.now return (3600*7<=now and now<=3600*10) or \ (3600*16<=now and now<=3600*19) # ## Time Parsing and Formating # Format time in seconds as hh:mm:ss def daytime(t): t=int(t) return f"{t//3600:02d}:{(t%3600)//60:02d}:{t%60:02d}" def now(): return daytime(env.now) # # System Verification # As a first step we verify that the trains are running in parallel. env = simpy.Environment() network=Network(6, 7, 300) # network.traceOn(line='Bakerloo') # network.traceOn(line='Bakerloo', loc='KENTON') network.traceOn(line='Bakerloo', train=0) # network.traceOn(loc='KINGS CROSS ST PANCRAS') env.process(network.process()) env.run() # # References # [SimPy](https://simpy.readthedocs.io/en/latest/contents.html)
9,10_SingleServerApplication/London_Tube/London Underground Step 3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import tensorflow as tf # a = 2 a = tf.constant(2, name="tb_a") # b = 3 b = tf.constant(3, name="tb_b") # x = a + b x = tf.add(a, b) # for example, # (a, b, x) are varialbe_name in code # (tb_a, tb_b, tb_x) are variable_name in TensorBoard. init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) # run TensorFlow Session & construct graph(TensorBoard) print (sess.run(x)) # -
code/class_2/example1_constant.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # ## Profiling PyTorch training job using Amazon SageMaker Debugger # In this notebook we will demonstrate the deep profiling capability of Amazon SageMaker Debugger # # 1. Setup # 2. Train a PyTorch Tensorflow model for weather prediction with debugger enabled. # 3. Analyze and visualize the system and framework metrics generated by the profiler. # 4. Analyze the profiler report generated by SageMaker Debugger. # 5. Review and implement recommendations from the profiler report. # # # ### Step 1 - Setup # #### 1.1 Import libraries # + import sagemaker import boto3 from sagemaker import image_uris from sagemaker.session import Session from sagemaker.inputs import TrainingInput from sagemaker.pytorch import PyTorch from sagemaker.debugger import ProfilerConfig, FrameworkProfile # - # #### 1.2 Define variables #Set the s3_bucket to the correct bucket name created in your datascience environment s3_bucket = 'datascience-environment-notebookinstance--06dc7a0224df' s3_prefix = 'prepared' region = boto3.Session().region_name # #### 1.3 Setup service clients sagemaker_client = boto3.client("sagemaker") s3_client = boto3.client('s3', region_name=region) # #### 1.4 Training and validation data inputs to training job ##Get the file name at index from the 'prefix' folder def get_file_in_bucket(prefix,index): response = s3_client.list_objects( Bucket=s3_bucket, Prefix=s3_prefix + "/" + prefix ) ## At '0' index you will find the SUCCESS/FAILURE of file uploades to S3. First data file is at index 1 file_name = response['Contents'][index]['Key'] print("Returing file name : " + file_name) return file_name # + content_type = "csv" # Define the data type and paths to the training and validation datasets #Since we are using powerful CPU/GPU instances for training over hours, you can choose to use a single file #for training and validation instead of the entrie dataset to save some time and trainging costs. Change the variable #use_full_data to True to use the complete dataset use_full_data=False #Different train and validation inputs #define the data type and paths to the training and validation datasets if use_full_data == False: ##Update the csv file names to match the contents in your S3 bucket #train_input = TrainingInput("s3://{}/{}/{}/part-00000-2554f113-947e-46bd-be31-9cd75cb4661c-c000.csv".format(s3_bucket, s3_prefix, 'train'), content_type=content_type) #validation_input = TrainingInput("s3://{}/{}/{}/part-00000-85addac2-a753-4bc2-b157-26ff8f5d5952-c000.csv".format(s3_bucket, s3_prefix, 'validation'), content_type=content_type) train_input = TrainingInput("s3://{}/{}".format(s3_bucket, get_file_in_bucket('train',1)), content_type=content_type) validation_input = TrainingInput("s3://{}/{}".format(s3_bucket, get_file_in_bucket('validation',1)), content_type=content_type) else: train_input = TrainingInput("s3://{}/{}/{}/".format(s3_bucket, s3_prefix, 'train'), content_type=content_type, distribution='ShardedByS3Key') validation_input = TrainingInput("s3://{}/{}/{}/".format(s3_bucket, s3_prefix, 'validation'), content_type=content_type, distribution='ShardedByS3Key') # - # ### Step 2 - Train a PyTorch Tensorflow model for weather prediction with debugger enabled. # #### 2.1 Define training related variables train_instance_type = "ml.p3.2xlarge" instance_count = 2 # #### 2.2 Define profiler configuration # # With the following profiler_config parameter configuration, Debugger calls the default settings of monitoring, collecting system metrics every 500 milliseconds. For collecting framework metrics, you can set target steps and target time intervals in detail. profiler_config = ProfilerConfig( framework_profile_params=FrameworkProfile(start_step=2, num_steps=7) ) # With this profiler_config settings, Debugger will collect system metrics every 500 milliseconds and framework metrics on the specified steps (from step 2 to 9). For a complete list of parameters and profiling configurations, see Configure Debugger Using Amazon SageMaker Python SDK in the Amazon SageMaker Debugger developer guide. # #### 2.3 Create PyTorch Estimator hyperparameters = { "nproc_per_node": 4, "nnodes": 2, } # + pt_estimator = PyTorch( entry_point="train_pytorch.py", source_dir="code", role=sagemaker.get_execution_role(), instance_count=instance_count, instance_type=train_instance_type, framework_version="1.6", py_version="py3", volume_size=1024, hyperparameters=hyperparameters, #Debugger-specific parameters profiler_config=profiler_config, ) # - ## Kickoff training pt_estimator.fit({'train': train_input, 'test': validation_input}) # ### Step 3 - Analyze and visualize the system and framework metrics generated by the profiler. # #### 3.1 Manual Analysis from smdebug.profiler.system_metrics_reader import S3SystemMetricsReader import time #All collected metrics are persisted in S3. Define path to the profiler artifacts path = pt_estimator.latest_job_profiler_artifacts_path() #Create a reader for the system metrics system_metrics_reader = S3SystemMetricsReader(path) training_job_name = pt_estimator.latest_training_job.name print(f"Training job name: {training_job_name}") # + training_job_status = "" training_job_secondary_status = "" ##Wait till the profiler data is available while system_metrics_reader.get_timestamp_of_latest_available_file() == 0: system_metrics_reader.refresh_event_file_list() client = sagemaker_client.describe_training_job(TrainingJobName=training_job_name) if "TrainingJobStatus" in client: training_job_status = f"TrainingJobStatus: {client['TrainingJobStatus']}" if "SecondaryStatus" in client: training_job_secondary_status = f"TrainingJobSecondaryStatus: {client['SecondaryStatus']}" print( f"Profiler data from system not available yet. {training_job_status}. {training_job_secondary_status}." ) time.sleep(20) print("\n\nProfiler data from system is available") # + from datetime import datetime def timestamp_to_utc(timestamp): utc_dt = datetime.utcfromtimestamp(timestamp) return utc_dt.strftime("%Y-%m-%d %H:%M:%S") # - # Now that the data is available we can query and inspect it. We get the latest available timestamp and query all the events within the given time range: # + system_metrics_reader.refresh_event_file_list() #Get the latest event last_timestamp = system_metrics_reader.get_timestamp_of_latest_available_file() events = system_metrics_reader.get_events(0, last_timestamp * 1000000) # UTC time in micro seconds print( "Found", len(events), "recorded system metric events. Latest recorded event:", timestamp_to_utc(last_timestamp / 1000000), ) # UTC time in seconds to datetime # - # We can iterate over the list of recorded events. Let's have a look on the first event. #Show the first system metric event collected print( "Event name:", events[0].name, "\nTimestamp:", timestamp_to_utc(events[0].timestamp), "\nValue:", events[0].value, ) # #### 3.2 GPU and CPU usage # MetricHistogram computes a histogram on GPU and CPU utilization values. Bins are between 0 and 100. Good system utilization means that the center of the distribution should be between 80 to 90. In case of multi-GPU training: if distributions of GPU utilization values are not similar it indicates an issue with workload distribution. # # The following cell will plot the histograms per metric. In order to only plot specific metrics, define the list select_dimensions and select_events. A dimension can be CPUUtilization, GPUUtilization, GPUMemoryUtilization IOPS. With CPUUtilization dimension, CPU uiltization histogram for each single core and total CPU usage will be plotted. In case of GPU, it will visualize utilization and memory for each GPU. In case of IOPS, it will plot IO wait time per CPU. If select_events is specified then only metrics that match the name in select_metrics will be shown. If neither select_dimensions nor select_events are specified, all available metrics will be visualized. One can also specify a start and endtime. # + from smdebug.profiler.analysis.notebook_utils.metrics_histogram import MetricsHistogram system_metrics_reader.refresh_event_file_list() metrics_histogram = MetricsHistogram(system_metrics_reader) metrics_histogram.plot() # - # #### 3.3 Read profiling data: framework annotations # + from smdebug.profiler.algorithm_metrics_reader import S3AlgorithmMetricsReader framework_metrics_reader = S3AlgorithmMetricsReader(path) events = [] while framework_metrics_reader.get_timestamp_of_latest_available_file() == 0 or len(events) == 0: framework_metrics_reader.refresh_event_file_list() last_timestamp = framework_metrics_reader.get_timestamp_of_latest_available_file() events = framework_metrics_reader.get_events(0, last_timestamp) print("Profiler data from framework not available yet") time.sleep(20) print("\n\n Profiler data from framework is available") # - # The following code cell retrieves all recorded events from Amazon S3. # + framework_metrics_reader.refresh_event_file_list() last_timestamp = framework_metrics_reader.get_timestamp_of_latest_available_file() events = framework_metrics_reader.get_events(0, last_timestamp) print( "Found", len(events), "recorded framework annotations. Latest event recorded ", timestamp_to_utc(last_timestamp / 1000000), ) # - # Like before we can inspect the recorded events. Since we are reading framework metrics there is now a start and end time for each event. print( "Event name:", events[0].event_name, "\nStart time:", timestamp_to_utc(events[0].start_time / 1000000000), "\nEnd time:", timestamp_to_utc(events[0].end_time / 1000000000), "\nDuration:", events[0].duration, "nanosecond", ) # #### 3.4 Outliers in step duration # # StepHistogram creates histograms of step duration values. Significant outliers are indication of system bottlenecks. In contrast to SetpTimelineChart it helps identify clusters of step duration values. As a simple example: time spent during training phase (forward and backward pass) will likely be different to time spent during validation phase (forward pass), so we would expect at least two clusters. # + from smdebug.profiler.analysis.notebook_utils.step_histogram import StepHistogram framework_metrics_reader.refresh_event_file_list() step_histogram = StepHistogram(framework_metrics_reader) step_histogram.plot() # - # #### 3.5 Heatmap # The following code cell creates a heatmap where each row corresponds to one metric (CPU core and GPU utilizations) and x-axis is the duration of the training job. It allows you to more easily spot CPU bottlenecks (utilization on GPU is low but a utilization of one or more cores is high). # + from smdebug.profiler.analysis.notebook_utils.heatmap import Heatmap view_heatmap = Heatmap( system_metrics_reader, framework_metrics_reader, select_dimensions=["CPU", "GPU"], # optional - comment this line out to see all dimensions. # select_events=["total"], # optional - comment this line out to see all events. plot_height=900, ) # - # ### Section 4 - Analyze the profiler report generated by SageMaker Debugger.<a id='profiler-report'></a> # # In this section we will analyze the report generated by debugger. We will showcase a few sections of the report. For complete details, please download the report from the S3 bucket and review. # # Also note that the exact details in the report generated for your training job may be different from what you see in this section. # #### 4.1 View the location of the report generated. rule_output_path = pt_estimator.output_path + pt_estimator.latest_training_job.job_name + "/rule-output" print( f"You will find the profiler report under `{rule_output_path}/` after the training has finished" ) # To check if the report is generated, list directories and files recursively # ! aws s3 ls {rule_output_path} --recursive # #### Download the report and rule output files recursively using `aws s3 cp` # The following command saves all of the rule output files to the **ProfilerReport-1234567890** folder under your current working directory. # ! aws s3 cp {rule_output_path} ./ --recursive # The following script automatically finds the **ProfilerReport** folder name and returns a link to the downloaded report. # + from IPython.display import FileLink profiler_report_name = [ rule["RuleConfigurationName"] for rule in pt_estimator.latest_training_job.rule_job_summary() if "Profiler" in rule["RuleConfigurationName"] ][0] profiler_report_name display( "Click link below to view the profiler report", FileLink(profiler_report_name + "/profiler-output/profiler-report.html"), ) # - # For more information about how to find, download, and browse Debugger profiling reports, see SageMaker Debugger Profiling Report in the Amazon SageMaker Debugger developer guide. # ### Section 5 - Analyze and Implement recommendations from the report<a id='analyze-profiler-recommendations'></a> # The **Rules Summary** section of the report aggregates all of the rule evaluation results, analysis, rule descriptions, and suggestions. The following table shows a summary of the executed profiler rules. The table is sorted by the rules that triggered most frequently. In training job this was the case for rule LowGPUUtilization. # It has processed 1001 datapoints and triggered 8 times. # # You may see a different rule summary based on the data and the training configuration you use. # # <IMG src = 'images/RulesSummary.png'/> # From the analysis so far and the top recommendations from the table above, there seems to be scope for improving resource utilization and make our training efficient. Based on this change the training configuration settings and re run the training. # #### 5.1 Implement recommendations from the report # # Next, we will rerun the training job with the changed configuration. We will keep the same training cluster of 2 p3.ml.2xlarge and update the batch_size to 1024 increasing it from the default value of 64 # # After second training job with the new settings is complete, there are new system metrics, framework metrics and a new report generated. ##Change the batch size to 512 from the default 64 hyperparameters = { "batch_size": 1024, "nproc_per_node": 4, "nnodes": 2, } pt_estimator_updated_batch_size = PyTorch( entry_point="train_pytorch.py", source_dir="code", role=sagemaker.get_execution_role(), instance_count=instance_count, instance_type=train_instance_type, framework_version="1.6", py_version="py3", volume_size=1024, hyperparameters=hyperparameters, # Debugger-specific parameters profiler_config=profiler_config, ) #Kickoff training pt_estimator_updated_batch_size.fit({'train': train_input, 'test': validation_input}) # #### 5.3 Compare the training jobs # # To understand the impact of the training configuration changes, compare the report analysis from the two training jobs. Repeat the process of analyzing the profiler report, implementing the recommendations and comparing with the previous run, till you are satisfied. # rule_output_path = ( pt_estimator_updated_batch_size.output_path + pt_estimator_updated_batch_size.latest_training_job.job_name + "/rule-output" ) print( f"You will find the profiler report under {rule_output_path}/ after the training has finished" ) # #### Download the new report and files recursively using `aws s3 cp` # ! aws s3 cp {rule_output_path} ./ --recursive # Retrieve a file link to the new profiling report. # + from IPython.display import FileLink profiler_report_name = [ rule["RuleConfigurationName"] for rule in pt_estimator_updated_batch_size.latest_training_job.rule_job_summary() if "Profiler" in rule["RuleConfigurationName"] ][0] profiler_report_name display( "Click link below to view the profiler report", FileLink(profiler_report_name + "/profiler-output/profiler-report.html"), ) # - # You should see that the training time has decreased for the training job executed with the updated configuration. But there is still scope for improvement. For the next round of triaining, lets keep the updated batch size and change the training instance to a less powerful instance "ml.p2.8xlarge" # #### Next round of training # + train_instance_type = "ml.p2.8xlarge" pt_estimator_updated_batch_size_instance_size = PyTorch( entry_point="train_pytorch.py", source_dir="code", role=sagemaker.get_execution_role(), instance_count=instance_count, instance_type=train_instance_type, framework_version="1.6", py_version="py3", volume_size=1024, hyperparameters=hyperparameters, # Debugger-specific parameters profiler_config=profiler_config, ) # - pt_estimator_updated_batch_size_instance_size.fit({'train': train_input, 'test': validation_input}) # Now, repeat the steps of retreiving the profiler report and analyzing it. This time you might notice that the training tie increased slightly but because we used a p2 instance instead of p3, your overall training cost will be reduced. # ## Conclusion # # Profiling feature of Amazon SageMaker Debugger is a powerful tool to gain visibility into machine learning training jobs. This notebook provided insight into training resource utilization to identify bottlenecks, analysis of various phases of training and identifying expensive framework functions. The notebook also demonstrated how to analyze and implement profiler recommendations.
Chapter07/weather-prediction-debugger-profiler.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + #### """ Simple User Interface """ from movielens import * from sklearn.cluster import KMeans import numpy as np import pickle import random import sys import time user = [] item = [] d = Dataset() d.load_users("data/u.user", user) d.load_items("data/u.item", item) n_users = len(user) n_items = len(item) utility_matrix = pickle.load( open("utility_matrix.pkl", "rb") ) # Find the average rating for each user and stores it in the user's object for i in range(0, n_users): x = utility_matrix[i] user[i].avg_r = sum(a for a in x if a > 0) / sum(a > 0 for a in x) # Find the Pearson Correlation Similarity Measure between two users def pcs(x, y, ut): num = 0 den1 = 0 den2 = 0 A = ut[x - 1] B = ut[y - 1] num = sum((a - user[x - 1].avg_r) * (b - user[y - 1].avg_r) for a, b in zip(A, B) if a > 0 and b > 0) den1 = sum((a - user[x - 1].avg_r) ** 2 for a in A if a > 0) den2 = sum((b - user[y - 1].avg_r) ** 2 for b in B if b > 0) den = (den1 ** 0.5) * (den2 ** 0.5) if den == 0: return 0 else: return num / den # Perform clustering on items movie_genre = [] for movie in item: movie_genre.append([movie.unknown, movie.action, movie.adventure, movie.animation, movie.childrens, movie.comedy, movie.crime, movie.documentary, movie.drama, movie.fantasy, movie.film_noir, movie.horror, movie.musical, movie.mystery, movie.romance, movie.sci_fi, movie.thriller, movie.war, movie.western]) movie_genre = np.array(movie_genre) cluster = KMeans(n_clusters=19) cluster.fit_predict(movie_genre) ask = random.sample(item, 10) new_user = np.zeros(100) #print (utility_matrix.shape) #print (new_user.shape) print ("Please rate the following movies (1-5):") for movie in ask: print (movie.title + ": ") a = int(input()) if new_user[cluster.labels_[movie.id - 1]] != 0: new_user[cluster.labels_[movie.id - 1]] = (new_user[cluster.labels_[movie.id - 1]] + a) / 2 else: new_user[cluster.labels_[movie.id - 1]] = a utility_new = np.vstack((utility_matrix, new_user)) user.append(User(944, 21, 'M', 'student', 110018)) pcs_matrix = np.zeros(n_users) print ("Finding users which have similar preferences.") for i in range(0, n_users + 1): if i != 943: pcs_matrix[i] = pcs(944, i + 1, utility_new) user_index = [] for i in user: user_index.append(i.id - 1) user_index = user_index[:943] user_index = np.array(user_index) top_5 = [x for (y,x) in sorted(zip(pcs_matrix, user_index), key=lambda pair: pair[0], reverse=True)] top_5 = top_5[:5] top_5_genre = [] for i in range(0, 5): maxi = 0 maxe = 0 for j in range(0, 19): if maxe < utility_matrix[top_5[i]][j]: maxe = utility_matrix[top_5[i]][j] maxi = j top_5_genre.append(maxi) print ("Movie genres you'd like:") for i in top_5_genre: if i == 0: print ("unknown") elif i == 1: print ("action") elif i == 2: print ("adventure") elif i == 3: print ("animation") elif i == 4: print ("childrens") elif i == 5: print ("comedy") elif i == 6: print ("crime") elif i == 7: print ("documentary") elif i == 8: print ("drama") elif i == 9: print ("fantasy") elif i == 10: print ("film_noir") elif i == 11: print ("horror") elif i == 12: print ("musical") elif i == 13: print ("mystery") elif i == 14: print ("romance") elif i == 15: print ("science fiction") elif i == 16: print ("thriller") elif i == 17: print ("war") else: print ("western") # - # + #### """ Simple User Interface """ from movielens import * from sklearn.cluster import KMeans import numpy as np import pickle import random import sys import time user = [] item = [] d = Dataset() d.load_users("data/u.user", user) d.load_items("data/u.item", item) n_users = len(user) n_items = len(item) utility_matrix = pickle.load( open("utility_matrix.pkl", "rb") ) # Find the average rating for each user and stores it in the user's object for i in range(0, n_users): x = utility_matrix[i] user[i].avg_r = sum(a for a in x if a > 0) / sum(a > 0 for a in x) # Find the Pearson Correlation Similarity Measure between two users def pcs(x, y, ut): num = 0 den1 = 0 den2 = 0 A = ut[x - 1] B = ut[y - 1] num = sum((a - user[x - 1].avg_r) * (b - user[y - 1].avg_r) for a, b in zip(A, B) if a > 0 and b > 0) den1 = sum((a - user[x - 1].avg_r) ** 2 for a in A if a > 0) den2 = sum((b - user[y - 1].avg_r) ** 2 for b in B if b > 0) den = (den1 ** 0.5) * (den2 ** 0.5) if den == 0: return 0 else: return num / den # Perform clustering on items movie_genre = [] for movie in item: movie_genre.append([movie.unknown, movie.action, movie.adventure, movie.animation, movie.childrens, movie.comedy, movie.crime, movie.documentary, movie.drama, movie.fantasy, movie.film_noir, movie.horror, movie.musical, movie.mystery, movie.romance, movie.sci_fi, movie.thriller, movie.war, movie.western]) movie_genre = np.array(movie_genre) cluster = KMeans(n_clusters=19) cluster.fit_predict(movie_genre) ask = random.sample(item, 10) new_user = np.zeros(100) #print (utility_matrix.shape) #print (new_user.shape) print ("Please rate the following movies (1-5):") for movie in ask: print (movie.title + ": ") a = int(input()) if new_user[cluster.labels_[movie.id - 1]] != 0: new_user[cluster.labels_[movie.id - 1]] = (new_user[cluster.labels_[movie.id - 1]] + a) / 2 else: new_user[cluster.labels_[movie.id - 1]] = a utility_new = np.vstack((utility_matrix, new_user)) user.append(User(944, 21, 'M', 'student', 110018)) pcs_matrix = np.zeros(n_users) print ("Finding users which have similar preferences.") for i in range(0, n_users + 1): if i != 943: pcs_matrix[i] = pcs(944, i + 1, utility_new) user_index = [] for i in user: user_index.append(i.id - 1) user_index = user_index[:943] user_index = np.array(user_index) top_5 = [x for (y,x) in sorted(zip(pcs_matrix, user_index), key=lambda pair: pair[0], reverse=True)] top_5 = top_5[:5] top_5_genre = [] for i in range(0, 5): maxi = 0 maxe = 0 for j in range(0, 19): if maxe < utility_matrix[top_5[i]][j]: maxe = utility_matrix[top_5[i]][j] maxi = j top_5_genre.append(maxi) print ("Movie genres you'd like:") for i in top_5_genre: if i == 0: print ("unknown") elif i == 1: print ("action") elif i == 2: print ("adventure") elif i == 3: print ("animation") elif i == 4: print ("childrens") elif i == 5: print ("comedy") elif i == 6: print ("crime") elif i == 7: print ("documentary") elif i == 8: print ("drama") elif i == 9: print ("fantasy") elif i == 10: print ("film_noir") elif i == 11: print ("horror") elif i == 12: print ("musical") elif i == 13: print ("mystery") elif i == 14: print ("romance") elif i == 15: print ("science fiction") elif i == 16: print ("thriller") elif i == 17: print ("war") else: print ("western") # -
Untitled1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Rendimiento vs. Riesgo. ¿Cómo medirlos? # # <img style="float: left; margin: 15px 15px 15px 15px;" src="http://www.creative-commons-images.com/clipboard/images/return-on-investment.jpg" width="300" height="100" /> # <img style="float: right; margin: 15px 15px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/5/5a/Risk-dice-example.jpg" title="github" width="300" height="100" /> # > En mercados competitivos, **rendimientos esperados** más altos solo se dan a un precio: necesitas asumir un **riesgo** mayor. # # *Objetivos:* # - Recordar elementos básicos de probabilidad. # - Entender el equilibrio entre rendimiento y riesgo. # - Entender el concepto de riesgo. # - Desarrollar medidas cuantitativas de rendimiento y riesgo para activos. # # Referencia general: # Notas del curso "Portfolio Selection and Risk Management", Rice University, disponible en Coursera. # ___ # ## 0. Antes de empezar... recorderis de probabilidad # # ### 0.1. Variables aleatorias # En teoría de probabilidad, una variable aleatoria (cantidad aleatoria o variable estocástica) es una variable cuyos posibles valores dependen del resultado de un fenómeno aleatorio. # # Es común que dichos resultados dependan de ciertas variables físicas (o económicas) que no están del todo entendidas o conocidas. Por ejemplo, cuando se tira una moneda justa, el resultado final de cara o sello depende de ciertas cantidades físicas con incertidumbre. # # Referencia: https://en.wikipedia.org/wiki/Probability_theory # En mercados financieros, los precios de las acciones responden a como el mercado agrega información (ese proceso depende de la calidad del mercado). Incluso, aunque ese proceso sea conocido, hay eventos que no podemos anticipar. De modo que los precios y rendimientos de los instrumentos los tratamos como variables aleatorias. # ### 0.2. Función discreta de probabilidad # Consideramos un conjunto finito (o contable) $\Omega$ de todos los posibles resultados (o realizaciones) de una variable aleatoria $X$. Entonces a cada elemento en $x\in\Omega$ se le asocia una probabilidad intrínseca $P(X=x)$ que satisface: # # 1. $0\leq P(X=x)\leq1$ para cada $x\in\Omega$, # 2. $\sum_{x\in\Omega} P(X=x)=1$. # # Referencia: https://en.wikipedia.org/wiki/Probability_theory # Para nuestros fines, el conjunto $\Omega$ lo estimaremos con un conjunto finito de la forma $\Omega=\left\lbrace x_j\,:\,j=1,\dots,m\right\rbrace$. Entonces, la segunda condición se puede escribir como # # $$\sum_{j=1}^m P(X=x_j)=1.$$ # # Equivalentemente, si definimos $p_j=P(X=x_j)$ # # $$\sum_{j=1}^m p_j=1.$$ # ### 0.3. Valor esperado # # <img style="float: left; margin: 0px 15px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/f/f9/Largenumbers.svg" width="400" height="200" /> # # El valor esperado de una variable aleatoria es, intuitivamente, el valor promedio a largo plazo de las repeticiones del experimento que representa. Informalmente, la ley de los grandes números afirma que la media aritmética de los resultados de un experimento aleatorio converge al valor esperado cuando el número de repeticiones tiende a infinito. *Ver ejemplo del dado*. # # Para una variable aleatoria discreta $X$ # # $$E[X]=\sum_{x\in\Omega} xP(X=x).$$ # En el caso finito # # $$E[X]=\sum_{j=1}^{m} p_jx_j.$$ # ### 0.4. Varianza # # <img style="float: right; margin: 0px 15px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/f/f9/Comparison_standard_deviations.svg" width="400" height="200" /> # # La varianza es el valor esperado de la desviación al cuadrado de una variable aleatoria de su media. Informalmente, mide que tan dispersos (lejos) están los datos de su valor esperado. La desviación estándar es la raiz cuadrada de la varianza. # # Para una variable aleatoria discreta $X$ # # $$Var(X)=\sigma_X^2=E[(X-E[X])^2]=\sum_{x\in\Omega} P(X=x)(x-E[X])^2.$$ # En el caso finito # # $$\sigma_X^2=\sum_{j=1}^{m} p_j(x_j-E[X])^2.$$ # ___ # ## 1. Introducción # ### 1.1. Compensación rendimiento/riesgo # - Cuando se realiza una inversión, pobablemente se anticipan ciertos rendimientos (esperados) futuros. # - Sin embargo, dichos rendimientos futuros no pueden ser predichos con precisión. # - Siempre hay cierto riesgo asociado. # - El rendimiento real casi siempre se desviará de lo que inicialmente se esperaba al inicio del periodo de inversión. # **Ejemplo:** # <img style="float: right; margin: 15px 15px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/7/7e/S_and_P_500_chart_1950_to_2016_with_averages.png" title="github" width="300" height="100" /> # - ¿Qué es el índice S&P500? # - En su peor año, el índice S&P500 cayó un 46% (1926). # - En el 2010, el índice subió un 55%. # # Los inversionistas nunca anticiparon estos resultados extremos cuando realizaron sus inversiones en estos periodos. # - Obviamente, todos nosotros preferimos los rendimientos esperados de inversión más altos posibles. # - En economía no hay torta gratis. Si deseamos renimientos esperados más altos, nos sometemos a un nivel de riesgo más alto. ¿Porqué? Intuitivamente: # - Si pudiéramos obtener más rendimiento sin riesgo extra en un activo: todo el mundo compraría este activo, el precio aumentaría y el rendimiento caería. # - Si los rendimientos no estuvieran correlacionados con el riesgo,todo el mundo vendería activos con riesgo: ¿para qué tener activos riesgosos si los rendimientos no están relacionados con el riesgo? mejor tener activos con el mismo rendimiento y sin riesgo. # **Conclusión: hay una compensación de equilibrio entre rendimiento y riesgo.** # ### 1.2. Concepto de riesgo # # <img style="float: left; margin: 0px 15px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/0/09/Playing-risk-venezuela.JPG" title="github" width="300" height="100" /> # # Hasta acá todo bien, pero, ¿qué es **riesgo**?, ¿cómo se mide? # # - Riesgo significa que en realidad no sabemos qué es lo que va a pasar (incertidumbre). # - "Más cosas pueden pasar de las que pasarán en realidad". # - Existen varias posibilidades pero no sabemos cuál será el resultado. # Instintivamente, asociamos riesgo con peligro. Sin embargo, solo porque más cosas pueden pasar de las que pasarán, no significa que cosas malas pasarán: el resultado puede ser mejor de lo que inicialmente esperamos. # # - Piensen en la expresión "me arriesgué". # - Ahora, de acuerdo a lo anterior, ¿cómo podríamos cuantificar el riesgo? # - Debe tener relación con la *dispersión* de los rendimientos de un activo (fluctuación). # - Esta es una parte de la historia; cuando combinamos activos en un portafolio, también debemos pensar cómo los rendimientos de los activos se mueven en relación a los demás. # ___ # ## 2. Midiendo el rendimiento # ### 2.1. Rendimiento medio geométrico # # El **rendimiento** que se obtiene al invertir en un activo sobre un periodo se puede calcular directamente. # **Ejemplo:** # - Suponga que usted invierte en un fondo de acciones. Cada acción se vende actualmente en $\$100$. # - Suponga que su horizonte de inversión es de un año. Si el precio de la acciónal final del año es $\$110$ y los dividendos en el año son $\$5$, ¿cuál es su rendimiento en el periodo de tenencia? # **Ejemplo:** suponga que tiene una serie de rendimientos anuales para el índice S&P500 # Importamos la librería pandas import pandas as pd # Creamos tabla tabla = pd.DataFrame(columns=['ret'], index=range(1,6)) tabla.index.name = 'year' tabla['ret']=[-0.1189,-0.2210,0.2869,0.1088,0.0491] tabla # 1. ¿Cuál es el rendimiento en el periodo de tenencia de los cinco años? # # 2. ¿Cuál es el rendimiento promedio anual a través de los cinco años? # $$r_T=\prod_{i=1}^{T}(1+r_i) - 1,$$ # Respuesta a la pregunta 1 # Respuesta a la pregunta 2 # En general, el **rendimiento medio geométrico** $\bar{r}_g$ satisface # # $$(1+\bar{r}_g)^T=\prod_{i=1}^{T}(1+r_i),$$ # # o equivalentemente # # $$\bar{r}_g=\left[\prod_{i=1}^{T}(1+r_i)\right]^{1/T}-1.$$ # ### 2.2. Rendimiento medio aritmético # # Si pudiéramos obtener escenarios probables para la economía, asociados con ciertas probabilidades, podríamos calcular el **rendimiento esperado** como el promedio ponderado (valor esperado) por probabilidad de los posibles resultados. # # Esto es # # $$E[r]=\sum_{j=1}^{m}p_jr_j,$$ # # donde $r_j$ para $j=1,2,\dots,m$ son los posibles rendimientos y $p_j$ es la probabilidad asociada a que ocurra el rendimiento $r_j$. # **Ejemplo:** # - Suponga que usted invierte en un fondo de acciones. Cada acción se vende actualmente en \$100. # - Suponga que hay cuatro posibles estados futuros de la economía, los cuales se resumen en la siguiente tabla # Creamos tabla tabla2 = pd.DataFrame(columns=['prob', 'price', 'div', 'ret'], index=['excellent', 'good', 'poor', 'crash']) tabla2.index.name = 'state' tabla2['prob']=[0.25,0.45,0.25,0.05] tabla2['price']=[126.50,110.00,89.75,46.00] tabla2['div']=[4.50,4.00,3.50,2.00] tabla2 # Llenar celdas faltantes # Calcular el rendimiento esperado # **Ejemplo:** para la serie de rendimientos anuales para el índice S&P500, podríamos considerar cada uno de los rendimientos observados como posibles resultados igualmente probables... # Entonces el rendimiento esperado se obtiene simplemente como el promedio aritmético de los rendimientos # Primer momento alrededor del cero es la media o valor esperado de la variable aleatoria. La media de una variable aleatoria se considera como una cantidad numérica alrededor de la cual los valores de la variable aleatoria tienden a agruparse. Por lo tanto, la media es una medida de tendencia central. # # **Conclusión: los rendimientos esperados están relacionados con la media (valor esperado) o primer momento alrededor del cero. ** # ### 2.3. Ejercicios # # En esta sección se dejarán algunos ejercicios para ustedes. Si alcanza el tiempo, se harán en clase. # **Ejercicio.** Considere el siguiente reporte de rendimientos de cierta acción en los últimos tres años # | Año | Rendimiento | # | --- | ----------- | # | 1 | -0.10 | # | 2 | 0.20 | # | 3 | 0.30 | # - Calcular el rendimiento medio geométrico. ¿Cuál es su significado? # - Calcular el rendimiento medio aritmético. ¿Cuál es su significado? # importamos pandas # Creamos data frame # rendimiento medio geometrico # rendimiento medio aritmetico # ## 3. Midiendo el riesgo # ### 3.1. La volatilidad como medida de riesgo # Dado que el riesgo está estrechamente relacionado con *cuánto no sabemos* acerca de lo que va a pasar, lo podemos cantificar con alguna medida de dispersión de la variable aleatoria de rendimientos. # **Ejemplo:** # - Tiramos una moneda que no está cargada. # - Definimos la variable aleatoria $X$, la cual toma el valor de $+1$ cuando la moneda cae cara y el valor de $-1$ cuando cae sello. # - Como la moneda no está cargada, los eventos tienen igual probabilidad $P(X=1)=P(X=-1)=0.5$. # # El valor esperado de la variable aleatoria $X$ es: # # $$E[X]=P(X=1) \times (1) + P(X=-1) \times (-1)=0.5\times(1)+0.5\times(-1)=0.$$ # # Aunque el resultado real nunca va a ser cero, el resultado esperado es cero. *Necesitamos otra medida adicional para describir la distribución*. # **Ejemplo:** # - Suponga que cada acción de la compañía XYZ en $t=0$ cuestan \$100. # - Existen tres posibilidades para el precio de una acción de XYZ en $t=1$: # - El precio subirá a \$140 (probabilidad del 25%) # - El precio subirá a \$110 (probabilidad del 50%) # - El precio bajará a \$80 (probabilidad del 25%) # Entonces, ¿cómo describimos una distribución de rendimiento? # 1. Tendencia central: # - Usaremos el valor esperado de los rendimientos como su tendencia central (ya vimos porqué). # $$E[r]=\sum_{j=1}^{m}p_jr_j.$$ # 2. Medida de dispersión: # - Usaremos la desviación estándar (volatilidad) o varianza como medida de dispersión para las distribuciones de rendimiento... # $$\sigma_r^2=\sum_{j=1}^{m} p_j(r_j-E[r])^2.$$ # $$\sigma_r=\sqrt{\sum_{j=1}^{m} p_j(r_j-E[r])^2}.$$ # En el ejemplo anterior # Creamos tabla # Calculamos rendimiento esperado # Calculamos varianza # Calculamos volatilidad # **Conclusión: la varianza y la desviación estándar nos brindad una medida de riesgo (incertidumbre, dispersión, volatilidad) en las realizaciones.** # ### 3.2. Ejercicios # # En esta sección se dejarán algunos ejercicios para ustedes. Si alcanza el tiempo, se harán en clase. # **Ejercicio 1.** A partir del análisis de un asesor financiero, se obtuvieron los siguientes datos de rendimientos de activos de cómputo y de telecomunicaciones, relativos a posibles situaciones económicas futuras # | Condición  económica | Rendimiento activo de cómputo | Rendimiento activo telecomunicaciones | Probabilidad | # | -------------------- | ----------------------------- | ------------------------------------- | ------------ | # | Declive | -0.04 | 0.07 | 0.2 | # | Estable | 0.02 | 0.04 | 0.5 | # | Mejora | 0.10 | 0.03 | 0.3 | # Calcular, para cada activo, su rendimiento esperado y volatilidad. # **Ejercicio 2.** Con base en la siguiente distribución de rendimientos para el activo A, calcular la desviación estándar. # | Probabilidad | Rendimiento | # | ------------ | ----------- | # | 0.3 | 0.10 | # | 0.4 | 0.05 | # | 0.3 | 0.30 | # Creamos tabla # ### 3.3. Más acerca de medición de riesgo # # - Entonces, con lo que hemos visto hasta ahora, la distribución de rendimientos de un activo se puede describir simplemente con el rendimiento esperado y la desviación estándar. # - Esto es porque todos los análisis en finanzas se simplifican increiblemente si podemos aproximar los rendimientos con una distribución nomal. # # Pero, ¿qué pasa si la distribución de rendimientos difiere significativamente de una distribución normal? # **Ejemplo.** # # Referencia: Asset Management: A Systematic Approach to Factor Investing. <NAME>, 2014. ISBN: 9780199959327. # # Las siguientes gráficas presentan la riqueza acumulada de una inversión de \$1 en el índice S&P500, y en una estrategia de volatilidad sobre el mismo índice. # ![image1](figures/VolStrat_S&P500_1) # - Una *estrategia de volatilidad* es una estrategia de inversión que recibe primas durante periodos estables, pero tiene amplias pérdidas en periodos volátiles. # - Veamos histogramas de los rendimientos de las diferentes estrategias # <img style="float: left; margin: 15px 15px 15px 15px;" src="figures/VolStrat_S&P500_2" width="450" height="100" /> # <img style="float: right; margin: 15px 15px 15px 15px;" src="figures/VolStrat_S&P500_3" width="450" height="100" /> # ¿Diferencias notables? # # Se resumen los cuatro momentos para cada uno de los rendimientos en las siguientes tablas. # | Medida | Estrategia Vol. | Índice S&P500 | # | -------------- | ----------------| ------------- | # | Media | 0.099 | 0.097 | # | Desv. Estándar | 0.152 | 0.151 | # | Asimetría | -8.3 | -0.6 | # | Curtosis | 104.4 | 4.0 | # **Asimetría (Skewness):** # - Una distribución normal tiene medida de asimetría de cero. # - Cuando una distribución es asimétrica hacia la izquierda, los valores negativos extremos (lejos de la media a la izquierda) dominan y la medida es negativa. # - Cuando una distribución es asimétrica hacia la derecha, los valores positivos extremos (lejos de la media a la derecha) dominan y la medida es positiva. # - La volatilidad subestima el riesgo cuando hay asimetría significativa. # # ¿Es siempre inconveniente tener asimetría?, ¿cuando sí?, ¿cuando no? # **Curtosis (Kurtosis):** # - Es una medida de cuán pesadas son las colas. # - Una distribución normal tiene medida de curtosis de 3. # - Colas pesadas implican que hay mayor probabilidad de ocurrencia de eventos extremos (lejos de la media). # - De nuevo, la desviación estándar subestima el riesgo cuando hay curtosis significativa. # ### Recapitulando... # # No es que la desviación estándar sólo aplique para distribuciones normales. Más bien, la volatilidad no captura bien la probabilidad de eventos extremos para distribuciones no normales. # # - Para rendimientos normalmente distribuidos, un rendimiento alejado $2\sigma$ del rendimiento esperado es muy poco probable. Un rendimiento alejado $5\sigma$ del rendimiento esperado es casi imposible que suceda. # - Sin embargo, para ciertas estrategias de cobertura, $E[r]\pm 2\sigma$ es común y $E[r]\pm 5\sigma$ podría llegar a pasar. # # Entonces, la volatilidad es una buena medida de riesgo, siempre y cuando tengamos distribuciones simétricas y sin mucho riesgo de eventos extremos. En otro caso, no es una medida apropiada. # Otras medidas de riesgo que intentan capturar estos fenómenos son: # - VaR (valor en riesgo) # - CVaR (valor en riesgo condicional) # # Anuncios parroquiales # # ## 1. Quiz la siguiente clase... ¡estar presentes temprano! # <script> # $(document).ready(function(){ # $('div.prompt').hide(); # $('div.back-to-top').hide(); # $('nav#menubar').hide(); # $('.breadcrumb').hide(); # $('.hidden-print').hide(); # }); # </script> # # <footer id="attribution" style="float:right; color:#808080; background:#fff;"> # Created with Jupyter by <NAME>. # </footer>
Modulo1/Clase3_RentRiesgo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %run -n main.py # !mkdir -p {DATA_DIR} s3 = S3() # - # %run -n main.py if not exists(LENTA): s3.download(S3_LENTA, LENTA) # # syntax # + # sudo docker run -it --rm --gpus all \ # -p 8082:8080 \ # -e SEQ_LEN=256 \ # -e BATCH_SIZE=1024 \ # natasha/slovnet-syntax-bert # + # # %run -n main.py # records = load_lenta(LENTA) # records = log_progress(records, total=LENTA_TOTAL) # items = process_syntax(records) # lines = format_jl(items) # dump_gz_lines(lines, SYNTAX) # + # s3.upload(SYNTAX, S3_SYNTAX) # -
scripts/03_syntax.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Riskfolio-Lib Tutorial: # <br>__[Financionerioncios](https://financioneroncios.wordpress.com)__ # <br>__[Orenji](https://www.orenj-i.net)__ # <br>__[Riskfolio-Lib](https://riskfolio-lib.readthedocs.io/en/latest/)__ # <br>__[<NAME>](https://www.linkedin.com/in/dany-cajas/)__ # ## Part VII: Index Tracking/Replicating Portfolios # # ## 1. Downloading the data: # + import numpy as np import pandas as pd import yfinance as yf import warnings warnings.filterwarnings("ignore") yf.pdr_override() pd.options.display.float_format = '{:.4%}'.format # Date range start = '2016-01-01' end = '2019-12-30' # Tickers of assets assets = ['JCI', 'TGT', 'CMCSA', 'CPB', 'MO', 'NBL', 'APA', 'MMC', 'JPM', 'ZION', 'PSA', 'BAX', 'BMY', 'LUV', 'PCAR', 'TXT', 'DHR', 'DE', 'MSFT', 'HPQ', 'SEE', 'VZ', 'CNP', 'NI'] assets.sort() market_index = ['^GSPC'] all_assets = assets + market_index all_assets.sort() # Downloading data data = yf.download(all_assets, start = start, end = end) data = data.loc[:,('Adj Close', slice(None))] data.columns = all_assets # + # Calculating returns Y = data[all_assets].pct_change().dropna() display(Y.head()) # - # ## 2. Estimating Mean Variance Portfolios with Turnover Constraints # # ### 2.1 Calculating the portfolio that maximizes Sharpe ratio. # + import riskfolio.Portfolio as pf # Building the portfolio object port = pf.Portfolio(returns=Y[assets]) # Calculating optimum portfolio # Select method and estimate input parameters: method_mu='hist' # Method to estimate expected returns based on historical data. method_cov='hist' # Method to estimate covariance matrix based on historical data. port.assets_stats(method_mu=method_mu, method_cov=method_cov, d=0.94) # Create Turnover Constraints port.allowTO = True # Allows to use Turnover Constraints port.turnover = 0.05 # Maximum deviation in absolute value respect to benchmark weights # By default benchweights is the equally weighted portfolio, # if you want to use a different benchmark weights, you must # specify a weights dataframe with assets names in columns #port.benchweights = weights # Use a dataframe # Estimate optimal portfolio: model='Classic' # Could be Classic (historical), BL (Black Litterman) or FM (Factor Model) rm = 'MV' # Risk measure used, this time will be variance obj = 'Sharpe' # Objective function, could be MinRisk, MaxRet, Utility or Sharpe hist = True # Use historical scenarios for risk measures that depend on scenarios rf = 0 # Risk free rate l = 0 # Risk aversion factor, only useful when obj is 'Utility' w = port.optimization(model=model, rm=rm, obj=obj, rf=rf, l=l, hist=hist) display(w.T) # - # ### 2.2 Plotting portfolio composition # + import riskfolio.PlotFunctions as plf # Plotting the composition of the portfolio ax = plf.plot_pie(w=w, title='Sharpe Mean Variance', others=0.05, nrow=25, cmap = "tab20", height=6, width=10, ax=None) # - # ### 2.3 Calculate efficient frontier # + points = 50 # Number of points of the frontier frontier = port.efficient_frontier(model=model, rm=rm, points=points, rf=rf, hist=hist) display(frontier.T.head()) # + # Plotting the efficient frontier label = 'Max Risk Adjusted Return Portfolio' # Title of point mu = port.mu # Expected returns cov = port.cov # Covariance matrix returns = port.returns # Returns of the assets ax = plf.plot_frontier(w_frontier=frontier, mu=mu, cov=cov, returns=returns, rm=rm, rf=rf, alpha=0.01, cmap='viridis', w=w, label=label, marker='*', s=16, c='r', height=6, width=10, ax=None) # + # Plotting efficient frontier composition ax = plf.plot_frontier_area(w_frontier=frontier, cmap="tab20", height=6, width=10, ax=None) # - # ## 3. Estimating Mean Variance Portfolios with Tracking Error Constraints # # ### 3.1 Calculating the portfolio that maximizes Sharpe ratio. # + import riskfolio.Portfolio as pf # Building the portfolio object port = pf.Portfolio(returns=Y[assets]) # Calculating optimum portfolio # Select method and estimate input parameters: method_mu='hist' # Method to estimate expected returns based on historical data. method_cov='hist' # Method to estimate covariance matrix based on historical data. port.assets_stats(method_mu=method_mu, method_cov=method_cov, d=0.94) # Create Tracking Error Constraints port.kindbench = False # True if you have benchmark weights, False if you have an index port.benchindex = Y[market_index] # Index Returns port.allowTE = True # Allows to use Tracking Error Constraints port.TE = 0.005 # Maximum Tracking Error respect to benchmark returns # Estimate optimal portfolio: model='Classic' # Could be Classic (historical), BL (Black Litterman) or FM (Factor Model) rm = 'MV' # Risk measure used, this time will be variance obj = "Sharpe" # Objective function, could be MinRisk, MaxRet, Utility or Sharpe hist = True # Use historical scenarios for risk measures that depend on scenarios rf = 0 # Risk free rate l = 0 # Risk aversion factor, only useful when obj is 'Utility' w = port.optimization(model=model, rm=rm, obj=obj, rf=rf, l=l, hist=hist) display(w.T) # - # ### 3.2 Plotting portfolio composition # + import riskfolio.PlotFunctions as plf # Plotting the composition of the portfolio ax = plf.plot_pie(w=w, title='Sharpe Mean Variance', others=0.05, nrow=25, cmap = "tab20", height=6, width=10, ax=None) # - # ### 3.3 Calculate efficient frontier # + points = 50 # Number of points of the frontier frontier = port.efficient_frontier(model=model, rm=rm, points=points, rf=rf, hist=hist) display(frontier.T.head()) # + # Plotting the efficient frontier label = 'Max Risk Adjusted Return Portfolio' # Title of point mu = port.mu # Expected returns cov = port.cov # Covariance matrix returns = port.returns # Returns of the assets ax = plf.plot_frontier(w_frontier=frontier, mu=mu, cov=cov, returns=returns, rm=rm, rf=rf, alpha=0.01, cmap='viridis', w=w, label=label, marker='*', s=16, c='r', height=6, width=10, ax=None) # + # Plotting efficient frontier composition ax = plf.plot_frontier_area(w_frontier=frontier, cmap="tab20", height=6, width=10, ax=None) # - # ## 3. Estimating Mean Risk Portfolios with Tracking Error Constraints # # In this part I will calculate optimal portfolios for several risk measures using a __Tracking Error Constraint respect to an Index__. First I'm going to calculate the portfolio that maximizes risk adjusted return when CVaR is the risk measure, then I'm going to calculate the portfolios that maximize the risk adjusted return for all available risk measures. # # ### 3.1 Calculating the portfolio that maximizes Return/CVaR ratio. # + rm = 'CVaR' # Risk measure w = port.optimization(model=model, rm=rm, obj=obj, rf=rf, l=l, hist=hist) display(w.T) # - # ### 3.2 Plotting portfolio composition ax = plf.plot_pie(w=w, title='Sharpe Mean CVaR', others=0.05, nrow=25, cmap = "tab20", height=6, width=10, ax=None) # ### 3.3 Calculate efficient frontier # + points = 50 # Number of points of the frontier frontier = port.efficient_frontier(model=model, rm=rm, points=points, rf=rf, hist=hist) display(frontier.T.head()) # + label = 'Max Risk Adjusted Return Portfolio' # Title of point ax = plf.plot_frontier(w_frontier=frontier, mu=mu, cov=cov, returns=returns, rm=rm, rf=rf, alpha=0.01, cmap='viridis', w=w, label=label, marker='*', s=16, c='r', height=6, width=10, ax=None) # + # Plotting efficient frontier composition ax = plf.plot_frontier_area(w_frontier=frontier, cmap="tab20", height=6, width=10, ax=None) # - # ### 3.4 Calculate Optimal Portfolios for Several Risk Measures # + # Risk Measures available: # # 'MV': Standard Deviation. # 'MAD': Mean Absolute Deviation. # 'MSV': Semi Standard Deviation. # 'FLPM': First Lower Partial Moment (Omega Ratio). # 'SLPM': Second Lower Partial Moment (Sortino Ratio). # 'CVaR': Conditional Value at Risk. # 'WR': Worst Realization (Minimax) # 'MDD': Maximum Drawdown of uncompounded returns (Calmar Ratio). # 'ADD': Average Drawdown of uncompounded returns. # 'CDaR': Conditional Drawdown at Risk of uncompounded returns. rms = ['MV', 'MAD', 'MSV', 'FLPM', 'SLPM', 'CVaR', 'WR', 'MDD', 'ADD', 'CDaR'] w_s = pd.DataFrame([]) for i in rms: w = port.optimization(model=model, rm=i, obj=obj, rf=rf, l=l, hist=hist) w_s = pd.concat([w_s, w], axis=1) w_s.columns = rms # - w_s.style.format("{:.2%}").background_gradient(cmap='YlGn') # + import matplotlib.pyplot as plt # Plotting a comparison of assets weights for each portfolio fig = plt.gcf() fig.set_figwidth(14) fig.set_figheight(6) ax = fig.subplots(nrows=1, ncols=1) w_s.plot.bar(ax=ax)
examples/Tutorial 7.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from __future__ import print_function import torch # + x = torch.empty(5, 3) print(x) ############################################################### # Construct a randomly initialized matrix: x = torch.rand(5, 3) print(x) ############################################################### # Construct a matrix filled zeros and of dtype long: x = torch.zeros(5, 3, dtype=torch.long) print(x) ############################################################### # Construct a tensor directly from data: x = torch.tensor([5.5, 3]) print(x) # - print(x.abs()) # + x = x.new_ones(5, 3, dtype=torch.double) # new_* methods take in sizes print(x) x = torch.randn_like(x, dtype=torch.float) # override dtype! print(x) # - print(x.size()) # + ############################################################### # Resizing: If you want to resize/reshape tensor, you can use ``torch.view``: x = torch.randn(4, 4) y = x.view(16) z = x.view(-1, 8) # the size -1 is inferred from other dimensions print(x.size(), y.size(), z.size()) ############################################################### # If you have a one element tensor, use ``.item()`` to get the value as a # Python number x = torch.randn(1) print(x) print(x.item()) # + a = torch.ones(5) print(a) ############################################################### # b = a.numpy() print(b) ############################################################### # See how the numpy array changed in value. a.add_(1) print(a) print(b) # - print (torch.cuda.is_available())
beginner_source/blitz/tensor-tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt from funcs import * import seaborn as seabornInstance #from sklearn.model_selection import train_test_split #from sklearn.linear_model import LinearRegression from sklearn import metrics # %matplotlib inline # ### Import data # + #data = pd.concat([X, y_recovered, y_deaths, y_recovered_smoothed, y_deaths_smoothed], axis=1) # + #Number of infected for past two weeks X = pd.read_csv('data.csv').iloc[:,1:-3].values #Number of recovered y_recovered = pd.read_csv('data.csv').iloc[:,-3].values #Number of recovered with transformation to smooth data y_rec_smoothed = pd.read_csv('data.csv').iloc[:,-1].values # - # # Smoothing # All different smoothing that I have tried: # - simple exponential smoothing: smaller error:0.19 # - from statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt def find_best_beta(): """Returns optimal alpha such that MAPE error is minimized,along with the MAPE index error in question, and its value""" X_new = np.zeros(X.shape) betas = np.linspace(0,1,100) mapes = [] pct_80 = int(np.ceil(80*len(X)/100)) for beta in betas: for j in range(X.shape[1]): #X_new[:,j]= SimpleExpSmoothing(X[:,j]).fit(smoothing_level=alpha,optimized=False).fittedvalues X_new[:,j]= ExponentialSmoothing(X[:,j], damped=False).fit(smoothing_level=0.9595959595959597, smoothing_slope=beta).fittedvalues X_train, X_test = X_new[:pct_80], X_new[pct_80:] y_train, y_test =y_rec_smoothed[:pct_80], y_rec_smoothed[pct_80:] index = find_best_k(X_train, y_train, X_test, y_test, 'mape') P, q, G, h = generate_params(X_train, y_train, index) gamma = cvxopt_solve_qp(P, q, G, h) y_pred = X_test@gamma mapes.append(mape(y_test, y_pred)) return beta[np.argmin(mapes)],np.argmin(mapes), min(mapes) df1 = pd.DataFrame({r'$\alpha=0.2$': exponential_smoothing(X[:,0], 0.2)}) df1.plot() f = find_best_beta() f # f find best alpha with beta=0.2 f # + # f finding best beta with alpha = 0.95959595 Holt damped f # - # f finding the best beta with alpha = 0.9596 ExponentialSmoothing not damped f # + new_X = np.zeros(X.shape) find_best_alpha() for j in range(X.shape[1]): new_X[:,j] = exponential_smoothing(X[:,j], 0.1, 5) # - new_X.shape def exponential_smoothing(x, rho, K): const = (1-rho)/(1-rho**(K+1)) new_x = [] # range of x r_x = np.arange(K, len(x)-K) # range of k r_k = np.arange(0,K) for i in range(len(x)): if i not in r_x: new_x.append(x[i]) else: ls = [] for k in r_k: ls.append(int(const*rho**k*x[i-k])) new_x.append(np.sum(ls)) return new_x def find_best_alpha(): """Returns optimal alpha such that MAPE error is minimized,along with the MAPE index error in question, and its value""" X_new = np.zeros(X.shape) rhos = np.linspace(0,1,10) pct_80 = int(np.ceil(80*len(X)/100)) Ks = np.linspace(3,10) mapes = np.zeros((len(Ks), len(rhos))) for i, K in enumerate(Ks): for j, rho in enumerate(rhos): for j in range(X.shape[1]): X_new[:,j]= exponential_smoothing(X[:,j], rho, 5) X_train, X_test = X_new[:pct_80], X_new[pct_80:] y_train, y_test =y_rec_smoothed[:pct_80], y_rec_smoothed[pct_80:] index = find_best_k(X_train, y_train, X_test, y_test, 'mape') P, q, G, h = generate_params(X_train, y_train, index) gamma = cvxopt_solve_qp(P, q, G, h) y_pred = X_test@gamma mapes[i, j] = mape(y_test, y_pred) # return optimal K, optimal rho, return rhos[np.argmin(mapes)[0]], rhos[np.argmin(mapes)[1]], np.argmin(mapes), min(mapes), mapes f = find_best_alpha() f # + X_new = np.zeros(X.shape) for j in range(X.shape[1]): X_new[:,j] = exponential_smoothing(X[:,j], 0.12121212121212122,5) X_train, X_test = X_new[:pct_80], X_new[pct_80:] y_train, y_test =y_rec_smoothed[:pct_80], y_rec_smoothed[pct_80:] index = find_best_k(X_train, y_train, X_test, y_test, 'mape') P, q, G, h = generate_params(X_train, y_train, index) gamma = cvxopt_solve_qp(P, q, G, h) y_pred = X_test@gamma # - # ## Quadratic Regularization N = X.shape[1] # To do: # - Create matrix M # - Create matrix X (DONE) # - Compute X^TX # - Compute M^TM # - Verify M^TM value, if it coincides with the one G.O. wrote in report # - install library, define instances, run optimizer # + pct_90 = int(np.ceil(90*len(X)/100)) pct_80 = int(np.ceil(80*len(X)/100)) pct_70 = int(np.ceil(70*len(X)/100)) X_train, X_test = X[:pct_80], X[pct_80:] y_train, y_test =y_rec_smoothed[:pct_80], y_rec_smoothed[pct_80:] # + import cvxopt def create_M(N): M = np.zeros((N,N)) for i in range(N): for j in range(N): if i==0: if j == 0: M[i,j]=1 else: M[i,j]=0 elif (i==j): M[i,j]=1 elif (j == (i-1)): M[i,j] = -1 else: M[i,j]=0 return M def generate_G(index): """index: represents k^*, gamma_{k^*} is such that gamma_0 <= gamma_1 <= ...<= gamma_{k^*} >= ... >= gamma_N This function generates a matrix G such that either gamma_index or gamma_{index+1} is the maximum """ #this constraint verifies the gaussian-like distribution of the gamma G = np.zeros((N,N)) for i in range(0, index): for j in range(N): if (i==j): G[i,j] = 1 elif (j == i+1): G[i,j] = -1 for i in range(index, N): for j in range(N): if (i==j): G[i,j] = -1 elif (j == i+1): G[i,j] = 1 # we do not put any condition on idx_th element, and use this line to verify that all gammas are superior or # equal to zero #G[index,:] = 0 #G[index, 0] = -1 #this constraint verifies that -gamma_i <= 0 <=> gamma_i >= 0 forall i # for i in range(N, 2*N): # for j in range(N): # if (i==N+j): # G[i,j]=-1 return G def generate_params(X_train, y_train,k,lambda_=1.0): M = create_M(N) M_tilde = M.T @ M X_tilde = X_train.T @ X_train P = X_tilde + lambda_*(M_tilde) q = -X_train.T@y_train G = generate_G(k) h = np.zeros((N,1)) for i in range(len(h)): h[i] = -0.0000001 return P, q, G, h def find_best_k(X_train, y_train, X_test, y_test, loss): """Returns index of maximum gamma that minimizes the mae loss""" loss = {} for k in range(N): P, q, G, h = generate_params(X_train, y_train, k) gammas = cvxopt_solve_qp(P,q, G, h) y_pred = X_test@gammas loss[k] = mape(y_test,y_pred) return min(loss, key=loss.get) def cvxopt_solve_qp(P, q, G=None, h=None, A=None, b=None): P = .5 * (P + P.T) # make sure P is symmetric args = [cvxopt.matrix(P), cvxopt.matrix(q)] if G is not None: args.extend([cvxopt.matrix(G), cvxopt.matrix(h)]) if A is not None: args.extend([cvxopt.matrix(A), cvxopt.matrix(b)]) sol = cvxopt.solvers.qp(*args) if 'optimal' not in sol['status']: return None return np.array(sol['x']).reshape((P.shape[1],)) # ----------------------------# # LOSS FUNCTIONS # ----------------------------# def mape(y_test, y_pred): return np.mean(np.abs((y_pred-y_test)/y_test)) def mspe(y_test, y_pred): return np.mean(np.square((y_pred-y_test)/y_test)) # ----------------------------# # SMOOTHING # ----------------------------# def simple_exponential_smoothing(series, alpha): result = [series[0]] # first value is same as series for n in range(1, len(series)): result.append(alpha * series[n] + (1 - alpha) * result[n-1]) return result def exponential_smoothing(x, rho, K): const = (1-rho)/(1-rho**(K+1)) new_x = [] # range of x r_x = np.arange(K, len(x)-K) # range of k r_k = np.arange(0,K) for i in range(len(x)): if i not in r_x: new_x.append(x[i]) else: ls = [] for k in r_k: ls.append(int(rho**k*x[i-k])) new_x.append(np.sum(ls)) return new_x def find_best_alpha(): """Returns optimal alpha such that MAPE error is minimized,along with the MAPE index error in question, and its value""" X_new = np.zeros(X.shape) alphas = np.linspace(0,1,100) mapes = [] pct_80 = int(np.ceil(80*len(X)/100)) for alpha in alphas: for j in range(X.shape[1]): X_new[:,j]= exponential_smoothing(X[:,j], alpha) X_train, X_test = X_new[:pct_80], X_new[pct_80:] y_train, y_test =y_rec_smoothed[:pct_80], y_rec_smoothed[pct_80:] index = find_best_k(X_train, y_train, X_test, y_test, 'mape') P, q, G, h = generate_params(X_train, y_train, index) gamma = cvxopt_solve_qp(P, q, G, h) y_pred = X_test@gamma mapes.append(mape(y_test, y_pred)) return alphas[np.argmin(mapes)],np.argmin(mapes), min(mapes) # ----------------------------# # GENERATE PREDICTIONS # ----------------------------# index = find_best_k(X_train, y_train, X_test, y_test, 'mape') P, q, G, h = generate_params(X_train, y_train, index) gamma = cvxopt_solve_qp(P, q, G, h) y_pred = X_test@gamma # - gamma pd.DataFrame({'gammas': gamma}).plot() index df = pd.DataFrame({'Actual': y_test.flatten(), 'Predicted': y_pred.flatten()}) df df.plot(kind='bar',figsize=(10,8)) plt.grid(which='major', linestyle='-', linewidth='0.5', color='green') plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black') plt.show() # + print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred)) print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred)) print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) print('Mean Absolute percentage error:', mape(y_test, y_pred)) print('Mean Square percentage error:', mspe(y_test, y_pred)) # - # ### Run same but delete columns 6: and then delete columns 4: (see diff) # + X_4 = X[:,:4] X_6 = X[:,:6] N = X_4.shape[1] pct_90 = int(np.ceil(90*len(X_4)/100)) pct_80 = int(np.ceil(80*len(X_4)/100)) pct_70 = int(np.ceil(70*len(X_4)/100)) X_train, X_test = X_4[:pct_80], X_4[pct_80:] y_train, y_test =y_rec_smoothed[:pct_80], y_rec_smoothed[pct_80:] # + index = find_best_k(X_train, y_train, X_test, y_test, 'mape') P, q, G, h = generate_params(X_train, y_train, index) gamma = cvxopt_solve_qp(P, q, G, h) y_pred = X_test@gamma # - df = pd.DataFrame({'Actual': y_test.flatten(), 'Predicted': y_pred.flatten()}) df.plot(kind='bar',figsize=(10,8)) plt.grid(which='major', linestyle='-', linewidth='0.5', color='green') plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black') plt.show() gamma # + # X_4 print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred)) print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred)) print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) print('Mean Absolute percentage error:', mape(y_test, y_pred)) print('Mean Square percentage error:', mspe(y_test, y_pred)) # - # ## Cross Validation # ### Four independent splits # Here, we use the model with **4 gammas** to perform cross validation in order to find the best parameters. As we have around 70 data points, and need at least twice the number of gammas as number of training points, we start with 10 data points. We estimate 10 data points for training, and three for validation. As we have $\frac{69}{13}= 5.3$, we will do five folds. Each fold has 14 points, except for last fold with 13. We validate on two last data points. # If we split every 13 data points, this is what we get: splits_X = np.array_split(X_4, 4, axis=0) splits_y = np.array_split(y_rec_smoothed, 4, axis=0) # + def cross_val(splits_X, splits_y,lambda_=1.0): y_vals = [] y_preds = [] mapes = [] maes = [] for X, y in zip(splits_X, splits_y): pct_90 = int(np.floor(90*len(X)/100)) X_train = X[:pct_90] X_val = X[pct_90:] y_train = y[:pct_90] y_val = y[pct_90:] index = find_best_k(X_train, y_train, X_val, y_val, 'mape') P, q, G, h = generate_params(X_train, y_train, index,lambda_) gamma = cvxopt_solve_qp(P, q, G, h) y_pred = X_val@gamma y_vals.append(y_val) y_preds.append(y_pred) mapes.append(mape(y_val, y_pred)) maes.append(metrics.mean_absolute_error(y_val, y_pred)) y_vals = [item for sublist in y_vals for item in sublist] y_preds =[item for sublist in y_preds for item in sublist] return mapes, maes, y_vals, y_preds mapes, maes, y_vals, y_preds = cross_val(splits_X, splits_y) # - mapes maes df = pd.DataFrame({'Actual': y_vals, 'Predicted': y_preds}) df df.plot(kind='bar',figsize=(10,8)) plt.grid(which='major', linestyle='-', linewidth='0.5', color='green') plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black') plt.show() # ### Advancement validation # + # We want our train set to be of size 40, and then we shift of 10 data points at each new iteration. # the size of our test set is the rest of the dataset points splits = int(np.floor((X_4.shape[0] - 40)/10)) ## mapes = [] maes = [] y_vals = [] y_preds = [] for i in range(splits): begin = 10*i end = 40 + 10*i X_tr = X_4[begin:end,:] y_tr = y_rec_smoothed[begin:end] X_te = X_4[end:,:] y_te = y_rec_smoothed[end:] # Run the model index = find_best_k(X_tr, y_tr, X_te, y_te, 'mape') P, q, G, h = generate_params(X_tr, y_tr, index,10e-5) gamma = cvxopt_solve_qp(P, q, G, h) y_pred = X_te@gamma y_vals.append(y_te) y_preds.append(y_pred) mapes.append(mape(y_te, y_pred)) maes.append(metrics.mean_absolute_error(y_te, y_pred)) y_vals = [item for sublist in y_vals for item in sublist] y_preds =[item for sublist in y_preds for item in sublist] # - print('for each split we have the following MAPE losses: {}, \nResulting in a mean MAPE of {}'.format(mapes, np.mean(mapes))) # # Find best hyperparameter $\lambda$ # this is the function we want to minimize # we want to minimize the mean loss function MAE from our cross validation run def f(lambda_): mapes, maes, y_vals, y_preds = cross_val(splits_X, splits_y, lambda_) return np.mean(maes) # + from scipy.optimize import minimize minimize(f,1.0,method='SLSQP') # + from skopt import gp_minimize from skopt.space import Real, Integer space = [Real(10**-5, 10**0, name='learning_rate')] res = gp_minimize(f,space) lambda_ = res['x'][0] # + def plot_loss_per_lambda(): lambdas = [-10,-1,0, 10e-5, 10e-4, 10e-3, 10e-2, 10e-1, 1, 10] mapes = [] for l in lambdas: X_train = X_4[:pct_80] X_test = X_4[pct_80:] y_train = y_recovered[:pct_80] y_test = y_recovered[pct_80:] #print(X_test@gamma) #print(y_test) index = find_best_k(X_train, y_train, X_test, y_test, 'mape') P, q, G, h = generate_params(X_train, y_train, index,l) gamma = cvxopt_solve_qp(P, q, G, h) y_pred = X_test@gamma mapes.append(format(100*mape(y_test, y_pred),'.20')) print(mapes) print(len(mapes) == len(np.unique(mapes))) lambdas1 = ['-10','-1','0','10e-5', '10e-4', '10e-3', '10e-2', '10e-1', '1', '10'] plt.plot(lambdas1, mapes, 'b') #plt.xlabel('Day') #plt.ylabel('Number of Daily Recovered') #plt.legend(['Predicted value','True value']) #plt.title('Baseline Prediction model for k=' + str(k)) #plt.axvline(x=pct_80-1) # - plot_loss_per_lambda() # + def plot_gammas_per_lambda(): lambdas = [-10, -1, 0, 10e-5, 10e-4, 10e-3, 10e-2, 10e-1, 1, 10] gammas = [] for l in lambdas: X_train = X_4[:pct_80] X_test = X_4[pct_80:] y_train = y_recovered[:pct_80] y_test = y_recovered[pct_80:] #print(X_test@gamma) #print(y_test) index = find_best_k(X_train, y_train, X_test, y_test, 'mape') P, q, G, h = generate_params(X_train, y_train, index,l) gamma = cvxopt_solve_qp(P, q, G, h) y_pred = X_test@gamma gammas.append(format(np.mean(gamma), '.20f')) print(gammas) lambdas1 = ['-10','-1','0','10e-5', '10e-4', '10e-3', '10e-2', '10e-1', '1', '10'] plt.plot(lambdas1, gammas, 'b') #plt.xlabel('Day') #plt.ylabel('Number of Daily Recovered') #plt.legend(['Predicted value','True value']) #plt.title('Baseline Prediction model for k=' + str(k)) #plt.axvline(x=pct_80-1) # - plot_gammas_per_lambda()
.ipynb_checkpoints/without_output-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # we consider a least square problem, f(x) = (Ax-b)^2 import numpy as np import numpy.linalg as la from scipy.stats import ortho_group import matplotlib.pyplot as plt from tqdm.notebook import tqdm from math import sqrt import warnings warnings.filterwarnings("error") # + np.random.seed(seed=42) agent_num = 10 d=100 data_num = 20 # x_opt = np.ones((d))/d x_opt = np.random.randn(d)+10 x_local_opt = np.tile(x_opt,(agent_num, 1)) x_local_opt = x_local_opt+ np.random.rand(agent_num, d) # print(x_local_opt[:,0]) # P = ortho_group.rvs(d) A = np.zeros((agent_num, data_num, d)) b = np.zeros((agent_num, data_num)) for i in range(agent_num): P = ortho_group.rvs(d) eigv = (np.random.rand(d)) eigv[0:5] = np.zeros(5) A_temp = np.matmul( np.diag(eigv), P) A[i] = A_temp[0:data_num,:] # A = np.matmul(np.transpose(P), np.matmul( np.diag(eigv), P)) # x_opt = np.random.rand(d) b[i] = np.matmul( A[i], x_local_opt[i]) # filename = "d_1000_n_10" # np.save(filename+"_A", A) # np.save(filename+"_b", b) # np.save(filename+"_x", x_opt) print(np.linalg.matrix_rank(np.matmul(np.transpose(A[1]), A[1]))) # + # gamma = np.load("gamma.npy") # # we first try to calculate the real optimal x_star A_stack = np.reshape(A, (200,100)) b_stack = np.reshape(b, (200)) # C = np.zeros((d,d-1)) # for i in range(d-1): # C[i,i] = 1 # C[d-1,:]=-1 # # print(C) # A_prime = np.matmul(A_stack, C) # c_vector = np.zeros((d)) # c_vector[-1]=1 # # print(c_vector) # b_prime = b_stack - np.matmul(A_stack, c_vector) # pinv_A = np.linalg.pinv(A_prime) # x_temp = np.matmul(pinv_A, b_prime) # x_direct = np.zeros((d)) # x_direct[:d-1] = x_temp # x_direct[d-1] = 1 - np.sum(x_temp) # print(x_direct[0]) # error = np.matmul(A_stack, x_direct)-b_stack # loss_star = np.matmul(np.transpose(error), error) # print(np.sum(loss_star)) pinv_A = np.linalg.pinv(A_stack) x_direct = np.matmul(pinv_A, b_stack) # print(x_direct[0]) error = np.matmul(A_stack, x_direct)-b_stack loss_star = np.matmul(np.transpose(error), error) plt.plot(x_direct) print(np.min(x_direct)) agent_num = 10 rho = 1 P = np.zeros((10,10)) beta = 0.1 for i in range(10): for j in range(10): if i == j: P[i,j] = 1 - beta if np.abs(i-j) == 1: # if np.abs(i-j) == 1 or np.abs(i-j) == 2: P[i, j] = beta/2 if (10 - np.abs(i-j)) == 1 : # if (10 - np.abs(i-j)) == 1 or (10 - np.abs(i-j)) == 2: P[i, j] = beta/2 print(P) P_tilde = (P+np.eye(agent_num))/2 # print(P_tilde) # x_0 = np.random.rand( d) x_0 = np.ones((agent_num, d))/d # x_0 = np.random.rand(agent_num, d) print(np.linalg.matrix_rank(P)) ev = la.eigvals(P) print(ev) # + jupyter={"outputs_hidden": true} import time x_0 = np.random.rand(agent_num, d) #first is gradient Descent iter_num = 1000000 lr = 10 time_int = 0.01 x_k = np.zeros((agent_num,d)) x_k1 = np.zeros((agent_num,d)) z_k = np.zeros((agent_num,d)) z_k1 = np.zeros((agent_num,d)) # x_k2 = np.zeros((agent_num,d)) y_k = np.zeros((agent_num,d)) y_k1 = np.zeros((agent_num,d)) # y_k2 = np.zeros((agent_num,d)) # x_accumulate = np.zeros((agent_num,d)) # y_accumulate = np.zeros((agent_num,d)) # z = np.zeros(d) # loss_z = np.zeros((iter_num)) loss_x_global = np.zeros((agent_num, iter_num)) # accu_x_record = np.zeros((iter_num)) # accu_y_record = np.zeros((iter_num)) x_record = np.zeros((iter_num)) x_mean = np.zeros((iter_num)) loss = np.zeros((agent_num, iter_num)) # z = np.mean(x_0, axis = 0) # for i in range(agent_num): # x_k[i] = np.copy(x_0) x_k = np.copy(x_0) # # z_k = np.copy(x_k) z_k = np.log(x_k)+1 # for k in tqdm(range( iter_num)): for i in range(agent_num): local_x_average = np.matmul(np.expand_dims(P[i,:], axis=0), x_k ) z_k[i,:] = np.log(local_x_average)+1 gradient_k1 = np.matmul(np.transpose(A[i]), (np.matmul(A[i], x_k[i,:])-b[i])) # y_k1[i,:] = y_k[i,:] + time_int*(x_k[(i+1)%agent_num,:]+x_k[(i-1)%agent_num,:] - 2*x_k[i,:]) z_k1[i,:] = z_k[i,:] - time_int*(lr*(1/np.sqrt((k*time_int)+1))*gradient_k1) error = np.matmul(A[i], x_k[i,:])-b[i] loss[i, k] = np.matmul(np.transpose(error), error) # change mirror method for i in range(agent_num): x_k[i,:] = np.exp(z_k1[i,:])/np.exp(1) # print(np.max(x_k[i,:])) # print(np.max(x_k[i,:])) # x_k[i,:] = np.exp(z_k1[i,:])/sum(np.exp(z_k1[i,:])) # z_k = np.copy(z_k1) y_k = np.copy(y_k1) # z = np.mean(x_k, axis = 0) # print(z[0]) # for i in range(agent_num): # error = np.matmul(A_stack, z)-b_stack # loss_z[k] = np.matmul(np.transpose(error), error) temp_sum = 0 # # for agent in range(agent_num): # temp_sum += np.linalg.norm(x_k[agent,:]-x_direct) x_bar = np.mean(x_k, axis=0) for agent in range(agent_num): temp_sum += np.linalg.norm(x_k[agent,:]-x_bar) x_record[k] = temp_sum/agent_num for i in range(agent_num): error = np.matmul(A_stack, x_k[i,:])-b_stack loss_x_global[i,k] = np.matmul(np.transpose(error), error) if((k+1)%10000 == 0): print(loss_x_global[i,k]-loss_star) # print(loss_star) # if((loss_x_global[i,k]-loss_star)< 0.01): # print("reached") # reach=0 # time.sleep(3600) # + plt.plot(x_record, label='x value at agent 1') # plt.plot(x_mean, label='x value of global average') # plt.scatter(accu_x_record[1:]) # plt.plot(gamma[1:20000]) y_range = 0.5 plt.ylim((-y_range,y_range)) plt.xlabel('iteration (k)') plt.ylabel('$x - x^*$') # ax.annotate('', xy=(0.99, 0), ha='left', va='top', xycoords='axes fraction', fontsize=8) plt.title("Diminishing Step-size") plt.legend(loc='upper right') # print(accu_x_record[-1])x_k[i,:] # plt.xlabel("x_tilde value") # print(gamma[100000]) # plt.savefig("consensus_diminish") # print(gamma[100000]) # + # plt.plot(x_record[1000000:]) # plt.plot(gamma[1:20000]) # plt.ylim((-5e10,5000000)) # print(x_record[-1]) # plt.xlabel("local x value") # plt.savefig("non_accel_EXTRA x_record 5m_it over_k") # + # loss_z_k2 = (loss_z-loss_star)*range(iter_num)*range(iter_num) # loss_z_k1 = (loss_z-loss_star)*range(iter_num) plt.plot(loss_x_global[0,:]) # plt.ylim((-0.01,0.5)) # print(loss_z_k2[1:].min()) # print((loss_z*range(iter_num))[-1]) # plt.xlabel("(loss-loss*)xk^2") # plt.savefig("non_accel_EXTRA (loss-loss_star)k 5m_it over_k") # plt.savefig("diminishing_100k_simplex_loss") # + proposed_loss = np.load("100d_simplex_100k_loss.npy") for i in range(1): # loss_x_k2 = (loss_x_global[i,:]-loss_star)*range(iter_num)*range(iter_num) proposedloss_x = (proposed_loss[i,:]-loss_star) loss_x = (loss_x_global[i,:]-loss_star) # loss_x_k2 = (loss_x_global[i,:])*range(iter_num)*range(iter_num) # loss_x_k2 = (loss_x_global[i,:]-loss_star)*range(iter_num) plt.plot(np.log(loss_x[1:])) plt.plot(np.log(proposedloss_x[1:])) # loss_x2_k2 = (loss[1,:]-loss_star)*range(iter_num)*range(iter_num) # plt.savefig("comparison_100k_simplex_log(local_loss-loss_star)") # plt.plot(loss_x2_k2[1:]) # plt.ylim((0,1)) # print(loss_x_k2[1:].min()) # print(loss_z*range(30000)[-1]) # plt.xlabel("(loss-loss*)xk^2") # + proposed_loss = np.load("100d_simplex_100k_loss.npy") # print(proposed_loss) for i in range(1): # loss_x_k2 = (loss_x_global[i,:]-loss_star)*range(iter_num)*range(iter_num) proposedloss_x = (proposed_loss[i,:]-loss_star) loss_x = (loss_x_global[i,:]-loss_star) # loss_x_k2 = (loss_x_global[i,:])*range(iter_num)*range(iter_num) # loss_x_k2 = (loss_x_global[i,:]-loss_star)*range(iter_num) plt.plot((loss_x[:])) plt.plot((proposedloss_x[:])) # loss_x2_k2 = (loss[1,:]-loss_star)*range(iter_num)*range(iter_num) # plt.savefig("comparison_100k_simplex_log(local_loss-loss_star)") # plt.plot(loss_x2_k2[1:]) # plt.ylim((0,1)) # print(loss_x_k2[1:].min()) # print(loss_z*range(30000)[-1])probability simplex diminishing_step-Copy1 # plt.xlabel("(loss-loss*)xk^2") # - # np.save("100d_diminishing_simplex_1m_loss", loss_x_global) # np.save("100d_diminishing_simplex_1m_loss_star", loss_star) np.save("100d_diminishing_simplex_1m_x_minus_bar", x_record)
probability simplex diminishing_step.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import matplotlib.pyplot as plt import numpy as np x = np.arange(-5.0, 5.0, 0.01) y = np.sin(np.pi*x) plt.plot(x,y) plt.title('Plot Example') plt.xlabel('X-axis') plt.ylabel('Y-axis') plt.show() x = np.arange(20) y = np.random.randint(1,100, 20) plt.plot(x,y,marker='o',color='black') plt.title('Line Chart') plt.xlabel('X-Axis') plt.ylabel('Y-Axis') plt.show() y=np.random.normal(100,5,30) plt.plot(y,color='red',marker='o',linestyle='--') plt.title('Line Plot') plt.xlabel('X-Axis') plt.ylabel('Y-Axis') plt.show() n = 100 columns = ['Brand '+str(number) for number in range(1,5)] date = pd.date_range(start = '2021-1-1', periods = n) data = np.random.randint(low = 1, high = [100, 330, 45, 77], size = (n, 4)) df = pd.DataFrame(data,columns = columns, index = date) df # + plt.figure(figsize=(10,10)) plt.plot(df.index,df['Brand 1'],color='black') plt.title('Brand A') plt.xlabel('Time') plt.ylabel('Values') plt.grid(True) plt.show() # - df['Brand 1'].plot() data = df.sum().sort_values(ascending = False) print(data) #plot plt.bar(x = data.index, height = data) plt.ylabel('Values') plt.title('DF') plt.xlabel('Brands') plt.show() n = 100 columns = ['Brand '+str(number) for number in range(1,5)] date = pd.date_range(start = '2021-1-1', periods = n) data = np.random.randint(low = 1, high = [100, 330, 45, 77], size = (n, 4)) df = pd.DataFrame(data,columns = columns, index = date) df data=df.sum().sort_values() data plt.barh(data.index,data) plt.xlabel('Values') plt.ylabel('Brands') plt.show() from sklearn.datasets import make_regression X, y = make_regression(n_samples = 100, n_features = 1, noise = 20) plt.scatter(X,y,marker = '^',alpha = 0.3) plt.xlabel('Feature') plt.ylabel('Target') plt.title('Relation B/W X & Y') plt.grid(True) plt.show() data = np.random.normal(loc = 100, scale = 0.5, size = 100) data plt.hist(data) plt.title('Histogram') plt.xlabel('Data') x = np.arange(1,11) y1 = x**2 y2 = x**3 y3 = x**4 plt.plot(y1, 'o', y2, '^', y3, 'bs') plt.legend(['y1', 'y2','y3'], loc = 'upper left') plt.title('Values', fontsize = 20) plt.xlabel('X', fontsize = 15) plt.ylabel('Y', fontsize = 15) plt.grid(linestyle = '--', alpha = 0.8, color = 'red') plt.show() brand_A = [120, 130, 145, 177, 270, 211] brand_B = [90, 41, 140, 150, 230, 193] months = ['January','February','March','April','May','June'] data={ 'brand_A':brand_A, 'brand_B':brand_B } df=pd.DataFrame(data,index=months) df plt.bar(x = months, height = brand_A, color = 'blue', label = 'Brand A') plt.bar(x = months, height = brand_B, color = 'red', label = 'Brand B') plt.title('Revenue over time') plt.savefig('fig.jpeg') plt.show() sales = [1500.2, 1378, 671, 431, 700] labels = ['Brand '+str(number) for number in range(1,len(sales)+1)] plt.pie(sales,labels=labels) plt.show() # + sales = [1500.2, 1378, 671, 431, 700] labels = ['Brand '+str(number) for number in range(1,len(sales)+1)] explode = (0.1, 0.1, 0.1, 0, 0) plt.pie(sales, labels = labels, autopct = '%1.2f%%', explode = explode, shadow = True) #plt.legend(labels, loc = 'upper left') plt.show() # - np.random.seed(7) data = np.random.randint(low = 100, high = [250, 300, 500, 1000], size = (12, 4)) # + sales = [1500.2, 1378, 671, 431, 700] labels = ['Brand '+str(number) for number in range(1,len(sales)+1)] explode = (0.1, 0.1, 0.1, 0, 0) plt.pie(sales, labels = labels, autopct = '%1.2f%%', explode = explode, shadow = True) plt.legend(labels, loc = 'lower right') plt.show() # - np.random.seed(7) data = np.random.randint(low = 100, high = [250, 300, 500, 1000], size = (12, 4)) date=pd.date_range('01-01-2020',periods=12,freq='M') index=date.month_name() plt.figure(figsize=(10,10)) plt.legend(index,loc='best') plt.plot(index,data) plt.show() # + np.random.seed(7) #dataframe data = np.random.randint(low = 100, high = [250, 300, 500, 1000], size = (12, 4)) index = pd.date_range(start = '2021-1-1', periods = 12, freq = 'M') df = pd.DataFrame(data, columns = list('ABCD'), index = index.month_name()) #plot plt.figure(figsize = (15, 5)) plt.plot(df) plt.title('Values over time') plt.legend(df.columns, loc = 'best') plt.grid(linestyle = '--', alpha = 0.5) plt.xlabel('X-axis') plt.ylabel('Y-axis') plt.ylim(80, 1000) plt.savefig('fig.jpeg') plt.show() # - df # + np.random.seed(7) #dataframe data = np.random.randint(low = 100, high = [250, 300, 500, 1000], size = (12, 4)) index = pd.date_range(start = '2021-1-1', periods = 12, freq = 'M') df = pd.DataFrame(data, columns = list('ABCD'), index = index.month_name()) frequency = df.sum().sort_values() #a) plt.bar(x = frequency.index, height = frequency) plt.title('Bar plot') plt.show() #b) plt.barh(y = frequency.index, width = frequency) plt.title('Horizontal bar plot') plt.show() #c) explode = (0, 0.1, 0.1, 0.1) plt.pie(frequency, shadow = True, autopct='%1.2f%%', explode = explode) plt.legend(df.columns, loc = 'upper left') plt.show() # - df=pd.read_csv('Iris.csv') df.head() df.drop(columns=['Id'],inplace=True) a=df.Species.value_counts() a plt.bar(a.index,a) plt.show() plt.barh(a.index,a) plt.show() plt.pie(a,labels=a.index) plt.scatter(df.SepalLengthCm,df.SepalWidthCm) plt.show() df df.iloc[:,0:4] plt.hist(df.iloc[:,0:4]) plt.show() plt.boxplot(df.iloc[:,0:4]) plt.show() df=pd.read_csv('Airline_passengers.csv') df.head() series = pd.read_csv( 'Airline_passengers.csv' , header=0, index_col=0, parse_dates=True, squeeze=True) series series.describe() series.shape plt.plot(series) plt.boxplot(series) plt.show() plt.hist(series) import seaborn as sns sns.heatmap(df.isnull(), cbar=False) from pandas.plotting import lag_plot lag_plot(series)
matplotlib.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Python statistics essential training - 04_04_testing # Standard imports import math import io # + import numpy as np import pandas as pd import matplotlib import matplotlib.pyplot as pp # %matplotlib inline # - import scipy.stats import scipy.optimize import scipy.spatial pumps = pd.read_csv('pumps.csv') pumps cholera = pd.read_csv('cholera.csv') cholera.loc[0::20] pp.figure(figsize=(6,6)) pp.scatter(pumps.x,pumps.y,color='b') pp.scatter(cholera.x,cholera.y,color='r',s=3) img = matplotlib.image.imread('london.png') # + pp.figure(figsize=(10,10)) pp.imshow(img,extent=[-0.38,0.38,-0.38,0.38]) pp.scatter(pumps.x,pumps.y,color='b') pp.scatter(cholera.x,cholera.y,color='r',s=3) # - cholera.closest.value_counts() cholera.groupby('closest').deaths.sum() def simulate(n): return pd.DataFrame({'closest': np.random.choice([0,1,4,5],size=n,p=[0.65,0.15,0.10,0.10])}) simulate(489).closest.value_counts() sampling = pd.DataFrame({'counts': [simulate(489).closest.value_counts()[0] for i in range(10000)]}) sampling.counts.hist(histtype='step') scipy.stats.percentileofscore(sampling.counts,340) 100 - 98.14
04_04_testing_end.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + # Copyright 2021 Google LLC # Use of this source code is governed by an MIT-style # license that can be found in the LICENSE file or at # https://opensource.org/licenses/MIT. # Notebook authors: <NAME> (<EMAIL>) # and <NAME> (<EMAIL>) # This notebook reproduces figures for chapter 13 from the book # "Probabilistic Machine Learning: An Introduction" # by <NAME> (MIT Press, 2021). # Book pdf is available from http://probml.ai # - # <a href="https://opensource.org/licenses/MIT" target="_parent"><img src="https://img.shields.io/github/license/probml/pyprobml"/></a> # <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/book1/figures/chapter13_neural_networks_for_structured_data_figures.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # ## Figure 13.1:<a name='13.1'></a> <a name='xor'></a> # # (a) Illustration of the fact that the XOR function is not linearly separable, but can be separated by the two layer model using Heaviside activation functions. Adapted from Figure 10.6 of <a href='#Geron2019'>[Aur19]</a> . # Figure(s) generated by [xor_heaviside.py](https://github.com/probml/pyprobml/blob/master/scripts/xor_heaviside.py) #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') google.colab.files.view("./xor_heaviside.py") # %run xor_heaviside.py # ## Figure 13.2:<a name='13.2'></a> <a name='activationFns2'></a> # # (a) Illustration of how the sigmoid function is linear for inputs near 0, but saturates for large positive and negative inputs. Adapted from 11.1 of <a href='#Geron2019'>[Aur19]</a> . (b) Plots of some popular non-saturating activation functions. # Figure(s) generated by [activation_fun_plot.py](https://github.com/probml/pyprobml/blob/master/scripts/activation_fun_plot.py) #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') google.colab.files.view("./activation_fun_plot.py") # %run activation_fun_plot.py # ## Figure 13.3:<a name='13.3'></a> <a name='mlp-playground'></a> # # An MLP with 2 hidden layers applied to a set of 2d points from 2 classes, shown in the top left corner. The visualizations associated with each hidden unit show the decision boundary at that part of the network. The final output is shown on the right. The input is $\mathbf x \in \mathbb R ^2$, the first layer activations are $\mathbf z _1 \in \mathbb R ^4$, the second layer activations are $\mathbf z _2 \in \mathbb R ^2$, and the final logit is $a_3 \in \mathbb R $, which is converted to a probability using the sigmoid function. This is a screenshot from the interactive demo at http://playground.tensorflow.org . #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') show_image("/pyprobml/book1/figures/images/Figure_13.3.png") # ## Figure 13.4:<a name='13.4'></a> <a name='mlpMnistStructure'></a> # # Structure of the MLP used for MNIST classification. Note that $100,480=(784+1) \times 128$, and $16,512 = (128+1) \times 128$. # To reproduce this figure, click the open in colab button: <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/mlp_mnist_tf.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') show_image("/pyprobml/book1/figures/images/Figure_13.4.png") # ## Figure 13.5:<a name='13.5'></a> <a name='mlpMnist'></a> # # Results of applying an MLP (with 2 hidden layers with 128 units and 1 output layer with 10 units) to some MNIST images (cherry picked to include some errors). Red is incorrect, blue is correct. (a) After 1 epoch of training. (b) After 2 epochs. # To reproduce this figure, click the open in colab button: <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/mlp_mnist_tf.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') show_image("/pyprobml/book1/figures/images/Figure_13.5_A.png") show_image("/pyprobml/book1/figures/images/Figure_13.5_B.png") # ## Figure 13.6:<a name='13.6'></a> <a name='mlpImdbStructure'></a> # # Structure of the MLP used for IMDB review classification. We use a vocabulary size of $V=1000$, an embedding size of $E=16$, and a hidden layer of size $16$. The embedding matrix $\mathbf W _1$ has size $10,000 \times 16$, the hidden layer (labeled ``dense'') has a weight matrix $\mathbf W _2$ of size $16 \times 16$ and bias $\mathbf b _2$ of size 16 (note that $16 \times 16 + 16 = 272$), and the final layer (labeled ``dense\_1'') has a weight vector $\mathbf w _3$ of size $16$ and a bias $b_3$ of size 1. The global average pooling layer has no free parameters. # To reproduce this figure, click the open in colab button: <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/mlp_imdb_tf.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') show_image("/pyprobml/book1/figures/images/Figure_13.6.png") # ## Figure 13.7:<a name='13.7'></a> <a name='twoHeaded'></a> # # Illustration of an MLP with a shared ``backbone'' and two output ``heads'', one for predicting the mean and one for predicting the variance. From https://brendanhasz.github.io/2019/07/23/bayesian-density-net.html . Used with kind permission of <NAME>. #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') show_image("/pyprobml/book1/figures/images/Figure_13.7.png") # ## Figure 13.8:<a name='13.8'></a> <a name='twoHeadedSineWaves'></a> # # Illustration of predictions from an MLP fit using MLE to a 1d regression dataset with growing noise. (a) Output variance is input-dependent, as in \cref fig:twoHeaded . (b) Mean is computed using same model as in (a), but output variance is treated as a fixed parameter $\sigma ^2$, which is estimated by MLE after training, as in \cref sec:linregSigmaMLE . # To reproduce this figure, click the open in colab button: <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/mlp_1d_regression_hetero_tfp.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') show_image("/pyprobml/book1/figures/images/Figure_13.8_A.png") show_image("/pyprobml/book1/figures/images/Figure_13.8_B.png") # ## Figure 13.9:<a name='13.9'></a> <a name='reluPolytope2d'></a> # # A decomposition of $\mathbb R ^2$ into a finite set of linear decision regions produced by an MLP with \ensuremath \mathrm ReLU \xspace activations with (a) one hidden layer of 25 hidden units and (b) two hidden layers. From Figure 1 of <a href='#Hein2019'>[MMJ19]</a> . Used with kind permission of <NAME>. #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') show_image("/pyprobml/book1/figures/images/Figure_13.9_A.png") show_image("/pyprobml/book1/figures/images/Figure_13.9_B.png") # ## Figure 13.10:<a name='13.10'></a> <a name='axons'></a> # # Illustration of two neurons connected together in a ``circuit''. The output axon of the left neuron makes a synaptic connection with the dendrites of the cell on the right Electrical charges, in the form of ion flows, allow the cells to communicate. From https://en.wikipedia.org/wiki/Neuron . Used with kind permission of Wikipedia author BruceBlaus. #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') show_image("/pyprobml/book1/figures/images/Figure_13.10.png") # ## Figure 13.11:<a name='13.11'></a> <a name='DNN-size-vs-time'></a> # # Plot of neural network sizes over time. Models 1, 2, 3 and 4 correspond to the perceptron <a href='#Rosenblatt58'>[Ros58]</a> , the adaptive linear unit <a href='#Widrow1960'>[BH60]</a> the neocognitron <a href='#Fukushima1980'>[K80]</a> , and the first MLP trained by backprop <a href='#Rumelhart86'>[RHW86]</a> . From Figure 1.11 of <a href='#GoodfellowBook'>[GBC16]</a> . Used with kind permission of Ian Goodfellow. #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') show_image("/pyprobml/book1/figures/images/Figure_13.11.png") # ## Figure 13.12:<a name='13.12'></a> <a name='feedforward-graph'></a> # # A simple linear-chain feedforward model with 4 layers. Here $\mathbf x $ is the input and $\mathbf o $ is the output. From <a href='#Blondel2020'>[Mat20]</a> . #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') show_image("/pyprobml/book1/figures/images/Figure_13.12.png") # ## Figure 13.13:<a name='13.13'></a> <a name='computation-graph'></a> # # An example of a computation graph with 2 (scalar) inputs and 1 (scalar) output. From <a href='#Blondel2020'>[Mat20]</a> . #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') show_image("/pyprobml/book1/figures/images/Figure_13.13.png") # ## Figure 13.14:<a name='13.14'></a> <a name='backwardsDiff'></a> # # Notation for automatic differentiation at node $j$ in a computation graph. From <a href='#Blondel2020'>[Mat20]</a> . #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') show_image("/pyprobml/book1/figures/images/Figure_13.14.png") # ## Figure 13.15:<a name='13.15'></a> <a name='compGraphD2l'></a> # # Computation graph for an MLP with input $\mathbf x $, hidden layer $\mathbf h $, output $\mathbf o $, loss function $L=\ell (\mathbf o ,y)$, an $\ell _2$ regularizer $s$ on the weights, and total loss $J=L+s$. From Figure 4.7.1 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of <NAME>. #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') show_image("/pyprobml/book1/figures/images/Figure_13.15.png") # ## Figure 13.16:<a name='13.16'></a> <a name='activationWithGrad'></a> # # (a) Some popular activation functions. (b) Plot of their gradients. # To reproduce this figure, click the open in colab button: <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/activation_fun_deriv_pytorch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') show_image("/pyprobml/book1/figures/images/Figure_13.16_A.png") show_image("/pyprobml/book1/figures/images/Figure_13.16_B.png") # ## Figure 13.17:<a name='13.17'></a> <a name='residualVanishing'></a> # # (a) Illustration of a residual block. (b) Illustration of why adding residual connections can help when training a very deep model. Adapted from Figure 14.16 of <a href='#Geron2019'>[Aur19]</a> . #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') show_image("/pyprobml/book1/figures/images/Figure_13.17_A.png") show_image("/pyprobml/book1/figures/images/Figure_13.17_B.png") # ## Figure 13.18:<a name='13.18'></a> <a name='mlpHyper'></a> # # The effects of changing the hyperparameters on an MLP with one hidden layer. (a) Random functions sampled from a Gaussian prior with hyperparameters $\alpha _ 1 =5$, $\beta _1=1$, $\alpha _2=1$, $\beta _2=1$. (b) Increasing $\alpha _ 1 $ by factor of 5. (c) Increasing $\beta _ 1 $ by factor of 5. (d) Inreasing $\alpha _ 2 $ by factor of 5. # Figure(s) generated by [mlpPriorsDemo2.py](https://github.com/probml/pyprobml/blob/master/scripts/mlpPriorsDemo2.py) #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') google.colab.files.view("./mlpPriorsDemo2.py") # %run mlpPriorsDemo2.py # ## Figure 13.19:<a name='13.19'></a> <a name='multiGPU'></a> # # Calculation of minibatch stochastic gradient using data parallelism and two GPUs. From Figure 12.5.2 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of <NAME>. #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') show_image("/pyprobml/book1/figures/images/Figure_13.19.png") # ## Figure 13.20:<a name='13.20'></a> <a name='sparseNnet'></a> # # (a) A deep but sparse neural network. The connections are pruned using $\ell _1$ regularization. At each level, nodes numbered 0 are clamped to 1, so their outgoing weights correspond to the offset/bias terms. (b) Predictions made by the model on the training set. # To reproduce this figure, click the open in colab button: <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/sparse_mlp.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') show_image("/pyprobml/book1/figures/images/Figure_13.20_A.png") show_image("/pyprobml/book1/figures/images/Figure_13.20_B.png") # ## Figure 13.21:<a name='13.21'></a> <a name='dropout'></a> # # Illustration of dropout. (a) A standard neural net with 2 hidden layers. (b) An example of a thinned net produced by applying dropout with $p_0=0.5$. Units that have been dropped out are marked with an x. From Figure 1 of <a href='#Srivastava2014'>[Nit+14]</a> . Used with kind permission of <NAME>. #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') show_image("/pyprobml/book1/figures/images/Figure_13.21_A.png") show_image("/pyprobml/book1/figures/images/Figure_13.21_B.png") # ## Figure 13.22:<a name='13.22'></a> <a name='flatMinima'></a> # # Flat vs sharp minima. From Figures 1 and 2 of <a href='#Hochreiter1997'>[SJ97]</a> . Used with kind permission of <NAME>. #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') show_image("/pyprobml/book1/figures/images/Figure_13.22.png") # ## Figure 13.23:<a name='13.23'></a> <a name='sgd-minima-unstable'></a> # # Each curve shows how the loss varies across parameter values for a given minibatch. (a) A stable local minimum. (b) An unstable local minimum. From https://www.inference.vc/notes-on-the-origin-of-implicit-regularization-in-stochastic-gradient-descent/ . Used with kind permission of <NAME> #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') show_image("/pyprobml/book1/figures/images/Figure_13.23_A.png") show_image("/pyprobml/book1/figures/images/Figure_13.23_B.png") # ## Figure 13.24:<a name='13.24'></a> <a name='xorRBF'></a> # # (a) xor truth table. (b) Fitting a linear logistic regression classifier using degree 10 polynomial expansion. (c) Same model, but using an RBF kernel with centroids specified by the 4 black crosses. # Figure(s) generated by [logregXorDemo.py](https://github.com/probml/pyprobml/blob/master/scripts/logregXorDemo.py) #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') google.colab.files.view("./logregXorDemo.py") # %run logregXorDemo.py # ## Figure 13.25:<a name='13.25'></a> <a name='rbfDemo'></a> # # Linear regression using 10 equally spaced RBF basis functions in 1d. Left column: fitted function. Middle column: basis functions evaluated on a grid. Right column: design matrix. Top to bottom we show different bandwidths for the kernel function: $\sigma =0.5, 10, 50$. # Figure(s) generated by [linregRbfDemo.py](https://github.com/probml/pyprobml/blob/master/scripts/linregRbfDemo.py) #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') google.colab.files.view("./linregRbfDemo.py") # %run linregRbfDemo.py # ## Figure 13.26:<a name='13.26'></a> <a name='mixexp'></a> # # (a) Some data from a one-to-many function. (b) The responsibilities of each expert for the input domain. (c) Prediction of each expert. (d) Overeall prediction. Mean is red cross, mode is black square. Adapted from Figures 5.20 and 5.21 of <a href='#BishopBook'>[Bis06]</a> . # Figure(s) generated by [mixexpDemoOneToMany.m](https://github.com/probml/pmtk3/blob/master/demos/mixexpDemoOneToMany.m) #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') show_image("/pyprobml/book1/figures/images/Figure_13.26.png") show_image("/pyprobml/book1/figures/images/Figure_13.26.png") show_image("/pyprobml/book1/figures/images/Figure_13.26.png") show_image("/pyprobml/book1/figures/images/Figure_13.26.png") # ## Figure 13.27:<a name='13.27'></a> <a name='deepMOE'></a> # # Deep MOE with $m$ experts, represented as a neural network. From Figure 1 of <a href='#Chazan2017'>[SJS17]</a> . Used with kind permission of <NAME>. #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') show_image("/pyprobml/book1/figures/images/Figure_13.27.png") # ## Figure 13.28:<a name='13.28'></a> <a name='HMENN'></a> # # A 2-level hierarchical mixture of experts as a neural network. The top gating network chooses between the left and right expert, shown by the large boxes; the left and right experts themselves choose between their left and right sub-experts. #@title Click me to run setup { display-mode: "form" } try: if PYPROBML_SETUP_ALREADY_RUN: print('skipping setup') except: PYPROBML_SETUP_ALREADY_RUN = True print('running setup...') # !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts import pyprobml_utils as pml import colab_utils import os os.environ["PYPROBML"] = ".." # one above current scripts directory import google.colab from google.colab.patches import cv2_imshow # %reload_ext autoreload # %autoreload 2 def show_image(img_path,size=None,ratio=None): img = colab_utils.image_resize(img_path, size) cv2_imshow(img) print('finished!') show_image("/pyprobml/book1/figures/images/Figure_13.28.png") # ## References: # <a name='Geron2019'>[Aur19]</a> <NAME> "Hands-On Machine Learning with Scikit-Learn and TensorFlow: Concepts, Tools, and Techniques for BuildingIntelligent Systems (2nd edition)". (2019). # # <a name='Widrow1960'>[BH60]</a> <NAME> and <NAME>. "Adaptive Switching Circuits". (1960). # # <a name='BishopBook'>[Bis06]</a> C. Bishop "Pattern recognition and machine learning". (2006). # # <a name='GoodfellowBook'>[GBC16]</a> I. Goodfellow, <NAME> and <NAME>. "Deep Learning". (2016). # # <a name='Fukushima1980'>[K80]</a> <NAME> "Neocognitron: a self organizing neural network model for amechanism of pattern recognition unaffected by shift in position". In: Biol. Cybern. (1980). # # <a name='Hein2019'>[MMJ19]</a> <NAME>, <NAME> and <NAME>. "Why ReLU networks yield high-confidence predictions far awayfrom the training data and how to mitigate the problem". (2019). # # <a name='Blondel2020'>[Mat20]</a> <NAME> "Automatic differentiation". (2020). # # <a name='Srivastava2014'>[Nit+14]</a> <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. "Dropout: A Simple Way to Prevent Neural Networks from Over tting". In: jmlr (2014). # # <a name='Rumelhart86'>[RHW86]</a> <NAME>, <NAME> and <NAME>. "Learning internal representations by error propagation". (1986). # # <a name='Rosenblatt58'>[Ros58]</a> <NAME> "The Perceptron: A Probabilistic Model for Information Storage and Organization in the Brain". In: Psychological Review (1958). # # <a name='Hochreiter1997'>[SJ97]</a> <NAME> and <NAME>. "Flat minima". In: Neural Comput. (1997). # # <a name='Chazan2017'>[SJS17]</a> <NAME>, <NAME> and <NAME>. "Speech Enhancement using a Deep Mixture of Experts". abs/1703.09302 (2017). arXiv: 1703.09302 # # <a name='dive'>[Zha+20]</a> <NAME>, <NAME>, <NAME> and <NAME>. "Dive into deep learning". (2020). # #
book1/figures/chapter13_neural_networks_for_structured_data_figures.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sqlite3 # + try: con = sqlite3.connect('data.db') cur = con.cursor() cur.execute('SELECT SQLITE_VERSION()') data = cur.fetchone()[0] print ("SQLite version: {}".format(data)) except (sqlite3.Error, e): print ("Error {}:".format(e.args[0])) con.rollback() sys.exit(1) finally: if con: con.close() # + con = sqlite3.connect('data.db') with con: cur = con.cursor() cur.execute('create table book(id Int, title Text, author Text)') # Insert element's into table cur.execute('insert into book values(1, "Eleanor y Park", "Rainbow Rowell")') cur.execute('insert into book values(2, "La insoportable levedad del ser", "Milan Kundera")') cur.execute('insert into book values(3, "1984", "George Orwell")') cur.execute('insert into book values(4, "Rebelión en la granja", "George Orwell")') cur.execute('insert into book values(5, "Amor líquido", "Zygmunt Bauman")') # + con = sqlite3.connect('data.db') books = ( (6, "Eleanor y Park", "Rainbow Rowell"), (7, "La insoportable levedad del ser", "Milan Kundera"), (8, "1984", "George Orwell"), (9, "Rebelión en la granja", "George Orwell"), (10, "Amor líquido", "Zygmunt Bauman") ) with con: cur = con.cursor() cur.execute('drop table if exists book') cur.execute('create table book(id Int, title Text, author Text)') cur.executemany('insert into book values(?, ?, ?)', books) # + con = sqlite3.connect('data.db') with con: cur = con.cursor() cur.execute("select * from book") rows = cur.fetchall() for row in rows: print(row) # - with con: cur = con.cursor() cur.execute("select * from book") while True: row = cur.fetchone() if row == None: break; print("{} -- {} -- {}".format(row[0], row[1], row[2])) with con: con.row_factory = sqlite3.Row cur = con.cursor() cur.execute("select * from book") rows = cur.fetchall() for row in rows: print("{} -- {} -- {}".format(row["id"], row["title"], row["author"])) # + with con: con.row_factory = sqlite3.Row cur = con.cursor() cur.execute("insert into book values(6, 'some title', 'without author')") cur.execute("select * from book") rows = cur.fetchall() for row in rows: print("{} -- {} -- {}".format(row["id"], row["title"], row["author"])) print("===============================") with con: book_id = 6 title = "De la brevedad de la vida" author = "Séneca" con.row_factory = sqlite3.Row cur = con.cursor() cur.execute("update book set title=?, author=? where id=?", (title, author, book_id)) print("Row counts: {}".format(cur.rowcount)) cur.execute("select * from book") rows = cur.fetchall() for row in rows: print("{} -- {} -- {}".format(row["id"], row["title"], row["author"])) # - with con: params = {"id": 4} cur = con.cursor() cur.execute("select title from book where id=:id", params) row = cur.fetchone() print("Title for book with id={} is: {}".format(params["id"], row[0]))
book 2/slite3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/", "height": 128} colab_type="code" executionInfo={"elapsed": 43986, "status": "ok", "timestamp": 1593377849423, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgzE51W_GUZ7p7e7vvuyiuD-4fVF9W7cz8suYA=s64", "userId": "10371228985714256553"}, "user_tz": -330} id="6OeMQj5lc3tE" outputId="6b5448c7-735f-41dd-a310-b773e4e260f7" from google.colab import drive drive.mount('/content/drive') # + #import libraries # + colab={} colab_type="code" executionInfo={"elapsed": 6985, "status": "ok", "timestamp": 1593378480076, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgzE51W_GUZ7p7e7vvuyiuD-4fVF9W7cz8suYA=s64", "userId": "10371228985714256553"}, "user_tz": -330} id="k2Y4C2-mQfLZ" import numpy as np import cv2 import matplotlib.pyplot as plt from math import sqrt from helper_functions import * import sys # + ## rectangle 530 289 657 338 592 412 442 348 ## he,vr pt 568 374 585 387 602 378 ## machine centroid 508 343 # - # ### harrcascade import matplotlib.pyplot as plt # ## person+centroid + distance measurement-- without birdeyeview # + import cv2 import math face_cascade=cv2.CascadeClassifier('haarcascade_fullbody.xml') frame=cv2.imread('walk_final.png') gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.1, 4) print(faces) p1 = 508 # image point p2 = 343 cv2.circle(frame, (p1, p2), 5, ((139,0,0)), -1) for face in faces: (x, y, w, h) = face (x1, y1, w1, h1) = faces[0] cx1, cy1 = int(x + w / 2), int(y + h / 2) cx2, cy2 = int(x1 + w1 / 2), int(y1 + h1 / 2) cv2.circle(frame, (cx1, cy1), 2, (0, 245, 30), -1) cv2.circle(frame, (cx2, cy2), 2, (0, 245, 30), -1) # dist=math.sqrt(((cx1-cx2)**2)+(cy2-cy1)**2)) cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 3) dis = (math.sqrt(((cx2 - cx1) ** 2) + ((cy2 - cy1) ** 2)) ** 0.5) dis = int(dis) print("distance is ",dis) cv2.line(frame, (cx1, cy1), (p1,p2 ), (0, 234, 76), 2) (x,y,w,h)=face cv2.putText(frame, 'distance={}'.format(dis), (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.6, (139, 0, 0)) ''' for (x, y, w, h) in faces: cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 3) print(w) print(h) # put text and highlight the center #x=xmin y=ymax cX = int(x+(w/2.0)) cY = int(y +(h/ 2.0)) # put text and highlight the center cv2.circle(frame, (cX, cY), 10, (205, 0, 255), -1) ''' cv2.imshow('img',frame) cv2.waitKey(0) cv2.destroyAllWindows() # - # ## person + centroid detection-video # + import cv2 import math face_cascade=cv2.CascadeClassifier('haarcascade_fullbody.xml') cap=cv2.VideoCapture('walking.avi') fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter('output12345.avi',fourcc, 20.0, (640,480)) #gray = cv2.cvtColor(cap, cv2.COLOR_BGR2GRAY) while cap.isOpened(): ret,frame=cap.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.1, 4) print(faces) for face in faces: (x, y, w, h) = face (x1, y1, w1, h1) = faces[0] cx1, cy1 = int(x + w / 2), int(y + h / 2) cx2, cy2 = int(x1 + w1 / 2), int(y1 + h1 / 2) # cv2.circle(frame,(cx1,cy1),2,(0,245,30),-1) # cv2.circle(frame, (cx2, cy2), 2, (0, 245, 30), -1) # dist=math.sqrt(((cx1-cx2)**2)+(cy2-cy1)**2)) cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 3) dis = (math.sqrt(((cx2 - cx1) ** 2) + ((cy2 - cy1) ** 2)) ** 0.5) dis = int(dis) print(dis) cv2.line(frame, (cx1, cy1), (cx2, cy2), (0, 234, 76), 5) cv2.putText(frame, 'distance={}'.format(dis), (10, 10), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 76)) out.write(frame) ''' for (x, y, w, h) in faces: cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 3) print(w) print(h) # put text and highlight the center #x=xmin y=ymax cX = int(x+(w/2.0)) cY = int(y +(h/ 2.0)) # put text and highlight the center cv2.circle(frame, (cX, cY), 10, (205, 0, 255), -1) ''' cv2.imshow('frame',frame) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() out.release() cv2.destroyAllWindows() # - # + ####### video save & b&w import numpy as np import cv2 import time cap = cv2.VideoCapture('walking.avi') start = time.time() while(cap.isOpened()): ret, frame = cap.read() start = time.time() # gray = cv2.cvtColor(frame, cv2.C/OLOR_BGR2GRAY) end = time.time() seconds = end - start print ("Time taken : {0} seconds".format(seconds)) fps = frame / seconds fps_text="FPS".format(fps) cv2.putText(frame,fps_text, (5, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255),1) cv2.imshow('frame',frame) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows() # + #fps start = time.time() 31 32 # Grab a few frames 33 for i in range(0, num_frames) : 34 ret, frame = video.read() 35 36 # End time 37 end = time.time() 38 39 # Time elapsed 40 seconds = end - start 41 print ("Time taken : {0} seconds".format(seconds)) 42 43 # Calculate frames per second 44 fps = num_frames / seconds 45 print("Estimated frames per second : {0}".format(fps)) 46 47 # Release video 48 video.release() # + ## video saving import numpy as np import cv2 cap = cv2.VideoCapture(0) fps_start_time=datetime.datetime.now() fps=0 total_frames=0 # Define the codec and create VideoWriter object fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480)) while(cap.isOpened()): ret, frame = cap.read() if ret==True: frame = cv2.flip(frame,0) total_frames=total_frames+1 fps_end_time=datetime.datetime.now() time_diff=fps_start_time-fps_end_time if time_diff.seconds==0: fps=0.0 else: fps=(total_frames/time_diff.seconds) fps_text="FPS:{:.2f}".format(fps) cv2.putText(frame,fps_text, (5, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255),1) # write the flipped frame out.write(frame) cv2.imshow('frame',frame) if cv2.waitKey(1) & 0xFF == ord('q'): break else: break # Release everything if job is finished cap.release() out.release() cv2.destroyAllWindows() # - # + colab={} colab_type="code" executionInfo={"elapsed": 2933, "status": "ok", "timestamp": 1593378613419, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgzE51W_GUZ7p7e7vvuyiuD-4fVF9W7cz8suYA=s64", "userId": "10371228985714256553"}, "user_tz": -330} id="_niN4lz_NDvA" # video_path = 'data/Student_video.mp4' # frame_path = 'data/Student_video_frame_{}.jpg' # frame_number = [100,200,300,400,500] # video = cv2.VideoCapture(video_path) # frame_count = 0 # while True: # ret,frame = video.read() # if not ret: # break # frame_count+=1 # if frame_count in frame_number: # print("Frame number : ",frame_count, "Saved") # cv2.imwrite(frame_path.format(frame_count),frame) # video.release() # - # # We use this mouce click events function to get 4 points in the image # + #We use this mouce click events function to get 4 points in the image def getting_points(event,x,y,flags,param): if event == cv2.EVENT_LBUTTONDOWN: cv2.circle(img,(x,y),5,(255,0,0),-1) print(x,y) img = cv2.imread("walk_final.png") cv2.namedWindow('image') cv2.setMouseCallback('image',getting_points) while(1): cv2.imshow('image',img) if cv2.waitKey(20) & 0xFF == 27: #press esc for exiting the program break cv2.destroyAllWindows() # - # # load image # + frame=cv2.imread("walk_final.png") original_image_RGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) original_image_RGB_copy = original_image_RGB.copy() cv2.imshow('img',frame) cv2.waitKey(0) cv2.destroyAllWindows() # - '''256 91 lt 397 134 rt 281 272 rb 109 183 lb''' 556 372 697 420 541 533 423 419 #latest mouse_pts= np.float32([[530,289], [657,338], [592,412], [442,348]]) # + import matplotlib.pyplot as plt for point in mouse_pts: cv2.circle(original_image_RGB_copy, tuple(point), 5, (255, 0, 0), -1) points = mouse_pts.reshape((-1,1,2)).astype(np.int32) cv2.polylines(original_image_RGB_copy, [points], True, (0,255,0), thickness=4) plt.figure(figsize=(12, 12)) plt.imshow(original_image_RGB_copy) plt.show() # + ## rectangle 530 289 657 338 592 412 442 348 ## he,vr pt 568 374 585 387 602 378 ## machine centroid 508 343 # - # # perspectiveTransform and warped_pt # + import cv2 import numpy as np (H, W) = frame.shape[:2] points = mouse_pts # - pts_1=[[572,380],[590 ,391],[609 ,380]] pts=np.float32([pts_1]) # + src = np.float32(np.array(points[:4])) dst = np.float32([[0, H], [W, H], [W, 0], [0, 0]]) prespective_transform = cv2.getPerspectiveTransform(src, dst) #276 197 #317 212 #343 185 pts_1=[[568,374],[585,387],[602,378]] pts=np.float32([pts_1]) #pts = np.float32(np.array([points[3:7]])) warped_pt = cv2.perspectiveTransform(pts, prespective_transform)[0] # since bird eye view has property that all points are equidistant in horizontal and vertical direction. # distance_w and distance_h will give us 180 cm distance in both horizontal and vertical directions # (how many pixels will be there in 180cm length in horizontal and vertical direction of birds eye view distance_w = np.sqrt((warped_pt[0][0] - warped_pt[1][0]) ** 2 + (warped_pt[0][1] - warped_pt[1][1]) ** 2) distance_h = np.sqrt((warped_pt[0][0] - warped_pt[2][0]) ** 2 + (warped_pt[0][1] - warped_pt[2][1]) ** 2) h_ratio=100/distance_h w_ratio=50/distance_w #tf.get_logger().setLevel("ERROR") # - prespective_transform print(pts) warped_pt dst_size=(1800,900) dst = dst * np.float32(dst_size) # + warped = cv2.warpPerspective(original_image_RGB_copy, prespective_transform, dst_size) plt.figure(figsize=(12, 12)) plt.imshow(warped) plt.show() # - # # image testing- birdeye view # + import cv2 import math face_cascade=cv2.CascadeClassifier('H:\CV projects\Basic_CV\haarcascade_fullbody.xml') frame=cv2.imread('walk_final.png') gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.1, 4) print(faces) #681. 453 p1 = 508 # image pont p2 = 343 #cv2.rectangle(frame,10,20, (255, 0, 0), 3) cv2.circle(frame, (p1, p2), 5, ((139,0,0)), -1) for face in faces: (x, y, w, h) = face (x1, y1, w1, h1) = faces[0] cx1, cy1 = int(x + w / 2), int(y + h / 2) cx2, cy2 = int(x1 + w1 / 2), int(y1 + h1 / 2) cv2.circle(frame, (cx1, cy1), 2, (0, 245, 30), -1) cv2.circle(frame, (cx2, cy2), 2, (0, 245, 30), -1) # dist=math.sqrt(((cx1-cx2)**2)+(cy2-cy1)**2)) cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 3) cv2.circle(frame,(pts_1[0][0],pts_1[0][1]), 4, (0, 255, 255), -1) cv2.circle(frame,(pts_1[1][0],pts_1[1][1]), 4, (0, 255, 255), -1) cv2.circle(frame,(pts_1[2][0],pts_1[2][1]),4 , (0, 255, 255), -1) person_centroid=[[cx1,cy1]] person_pts=np.float32([person_centroid]) warped_person_centre_pt=cv2.perspectiveTransform(person_pts, prespective_transform)[0] print('person_centroid',warped_person_centre_pt) machine_centroid=[[p1,p2]] machine_pt=np.float32([machine_centroid]) warped_machine_centre_pt=cv2.perspectiveTransform(machine_pt,prespective_transform)[0] wrapped_x1=warped_machine_centre_pt[0][0] wrapped_y1=warped_machine_centre_pt[0][1] wrapped_x2=warped_person_centre_pt[0][0] wrapped_y2=warped_person_centre_pt[0][1] wrapped_x1=wrapped_x1*w_ratio wrapped_x2=wrapped_x2*w_ratio wrapped_y1=wrapped_y1*h_ratio wrapped_y2=wrapped_y2*h_ratio dis = (math.sqrt(((wrapped_x1 -wrapped_x2 ) ** 2) + (( wrapped_y1-wrapped_y2 ) ** 2)) ** 0.5) dis = int(dis) print("distance is ",dis) cv2.line(frame, (cx1, cy1), (p1,p2), (0, 234, 76), 2) (x,y,w,h)=face cv2.putText(frame, '{}CM'.format(dis), (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.4, (255,255, 0)) ''' for (x, y, w, h) in faces: cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 3) print(w) print(h) # put text and highlight the center #x=xmin y=ymax cX = int(x+(w/2.0)) cY = int(y +(h/ 2.0)) # put text and highlight the center cv2.circle(frame, (cX, cY), 10, (205, 0, 255), -1) ''' cv2.imshow('img',frame) cv2.waitKey(0) cv2.destroyAllWindows() # - # ### video testing----- # # + import cv2 import math import datetime import numpy face_cascade=cv2.CascadeClassifier('haarcascade_fullbody.xml') cap=cv2.VideoCapture('walking.avi') ### h,w of video frame_width = int(cap.get(3)) frame_height = int(cap.get(4)) size = (frame_width, frame_height) ## save video fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter('akash_final_dist.avi',fourcc, 20.0, size) #fps fps_start_time = datetime.datetime.now() fps = 0 total_frames = 0 while(cap.isOpened()): ret,frame=cap.read() frame = imutils.resize(frame) total_frames = total_frames + 1 fps_end_time = datetime.datetime.now() time_diff = fps_end_time - fps_start_time if time_diff.seconds == 0: fps = 0.0 else: fps = (total_frames / time_diff.seconds) fps_text = "FPS: {:.2f}".format(fps) cv2.putText(frame, fps_text, (5, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.1, 4) print(faces) p1 = 508 # image pont p2 = 343 cv2.circle(frame, (p1, p2), 3, ((139,0,0)), -1) for face in faces: (x, y, w, h) = face (x1, y1, w1, h1) = faces[0] cx1, cy1 = int(x + w / 2), int(y + h / 2) cx2, cy2 = int(x1 + w1 / 2), int(y1 + h1 / 2) cv2.circle(frame, (cx1, cy1), 2, (0, 245, 30), -1) cv2.circle(frame, (cx2, cy2), 2, (0, 245, 30), -1) # dist=math.sqrt(((cx1-cx2)**2)+(cy2-cy1)**2)) cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 3) cv2.circle(frame,(pts_1[0][0],pts_1[0][1]), 5, (0, 255, 255), -1) cv2.circle(frame,(pts_1[1][0],pts_1[1][1]), 5, (0, 255, 255), -1) cv2.circle(frame,(pts_1[2][0],pts_1[2][1]), 5, (0, 255, 255), -1) person_centroid=[[cx1,cy1]] person_pts=np.float32([person_centroid]) warped_person_centre_pt=cv2.perspectiveTransform(person_pts, prespective_transform)[0] print('person_centroid',warped_person_centre_pt) machine_centroid=[[p1,p2]] machine_pt=np.float32([machine_centroid]) warped_machine_centre_pt=cv2.perspectiveTransform(machine_pt,prespective_transform)[0] wrapped_x1=warped_machine_centre_pt[0][0] wrapped_y1=warped_machine_centre_pt[0][1] wrapped_x2=warped_person_centre_pt[0][0] wrapped_y2=warped_person_centre_pt[0][1] wrapped_x1=wrapped_x1*w_ratio wrapped_x2=wrapped_x2*w_ratio wrapped_y1=wrapped_y1*h_ratio wrapped_y2=wrapped_y2*h_ratio dis = (math.sqrt(((wrapped_x1 -wrapped_x2 ) ** 2) + (( wrapped_y1-wrapped_y2 ) ** 2)) ** 0.5) dis = int(dis) print("distance is ",dis) cv2.line(frame, (cx1, cy1), (p1,p2), (0, 234, 76), 2) (x,y,w,h)=face cv2.putText(frame, '{}CM'.format(dis), (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.6, (255,255, 0)) out.write(frame) cv2.imshow('frame',frame) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() out.release() cv2.destroyAllWindows() # - # + import cv2 import datetime import imutils def main(): cap = cv2.VideoCapture('walking.avi') fps_start_time = datetime.datetime.now() fps = 0 total_frames = 0 while True: ret, frame = cap.read() frame = imutils.resize(frame, width=400) total_frames = total_frames + 1 fps_end_time = datetime.datetime.now() time_diff = fps_end_time - fps_start_time if time_diff.seconds == 0: fps = 0.0 else: fps = (total_frames / time_diff.seconds) fps_text = "FPS: {:.2f}".format(fps) cv2.putText(frame, fps_text, (5, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1) cv2.imshow("Application", frame) key = cv2.waitKey(1) if key == ord('q'): break cv2.destroyAllWindows() main() # - # + ## FPS # + import cv2 import numpy import datetime import math import imutils face_cascade=cv2.CascadeClassifier('haarcascade_fullbody.xml') cap=cv2.VideoCapture('walking.avi') fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter('final_ad_output.avi',fourcc, 20.0, (640,480)) #gray = cv2.cvtColor(cap, cv2.COLOR_BGR2GRAY) fps_start_time=datetime.datetime.now() fps=0 total_frames=0 while (cap.isOpened()): ret,frame=cap.read() if ret == True: # frame=imutils.resize(frame,width=500) total_frames=total_frames+1 fps_end_time=datetime.datetime.now() time_diff=fps_start_time-fps_end_time if time_diff.seconds==0: fps=0.0 else: fps=(total_frames/time_diff.seconds) fps_text="FPS:{:.2f}".format(fps) cv2.putText(frame,fps_text, (5, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255),1) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.1, 4) print(faces) for face in faces: (x, y, w, h) = face (x1, y1, w1, h1) = faces[0] cx1, cy1 = int(x + w / 2), int(y + h / 2) cx2, cy2 = int(x1 + w1 / 2), int(y1 + h1 / 2) # cv2.circle(frame,(cx1,cy1),2,(0,245,30),-1) # cv2.circle(frame, (cx2, cy2), 2, (0, 245, 30), -1) # dist=math.sqrt(((cx1-cx2)**2)+(cy2-cy1)**2)) cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 3) dis = (math.sqrt(((cx2 - cx1) ** 2) + ((cy2 - cy1) ** 2)) ** 0.5) dis = int(dis) print(dis) cv2.line(frame, (cx1, cy1), (cx2, cy2), (0, 234, 76), 5) cv2.putText(frame, 'distance={}'.format(dis), (10, 10), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 76)) # cv2.imshow(frame) # out.write(frame) out.write(frame) cv2.imshow('frame',frame) if cv2.waitKey(1) & 0xFF == ord('q'): break ''' for (x, y, w, h) in faces: cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 3) print(w) print(h) # put text and highlight the center #x=xmin y=ymax cX = int(x+(w/2.0)) cY = int(y +(h/ 2.0)) # put text and highlight the center cv2.circle(frame, (cX, cY), 10, (205, 0, 255), -1) ''' cap.release() out.release() cv2.destroyAllWindows() # - # + ##fpn+video # + import cv2 import numpy import datetime import math import imutils face_cascade=cv2.CascadeClassifier('/content/drive/MyDrive/birdseye/haarcascade_fullbody.xml') cap=cv2.VideoCapture('/content/drive/MyDrive/birdseye/walking.avi') fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter('outputtttt.avi',fourcc, 20.0, (640,480)) frame_width=int(cap.get(3)) frame_height=int(cap.get(4)) fps_start_time=datetime.datetime.now() fps=0 total_frames=0 while(cap.isOpened()): ret,frame=cap.read() frame=imutils.resize(frame) total_frames=total_frames+1 fps_end_time=datetime.datetime.now() time_diff=fps_start_time-fps_end_time if time_diff.seconds==0: fps=0.0 else: fps=(total_frames/time_diff.seconds) fps_text="FPS:{:.2f}".format(fps) cv2.putText(frame,fps_text, (5, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255),1) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.1, 4) print(faces) for face in faces: (x, y, w, h) = face (x1, y1, w1, h1) = faces[0] cx1, cy1 = int(x + w / 2), int(y + h / 2) cx2, cy2 = int(x1 + w1 / 2), int(y1 + h1 / 2) # cv2.circle(frame,(cx1,cy1),2,(0,245,30),-1) # cv2.circle(frame, (cx2, cy2), 2, (0, 245, 30), -1) # dist=math.sqrt(((cx1-cx2)**2)+(cy2-cy1)**2)) cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 3) dis = (math.sqrt(((cx2 - cx1) ** 2) + ((cy2 - cy1) ** 2)) ** 0.5) dis = int(dis) print(dis) cv2.line(frame, (cx1, cy1), (cx2, cy2), (0, 234, 76), 5) cv2.putText(frame, 'distance={}'.format(dis), (10, 10), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 76)) out.write(frame) cv2_imshow(frame) ''' for (x, y, w, h) in faces: cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 3) print(w) print(h) # put text and highlight the center #x=xmin y=ymax cX = int(x+(w/2.0)) cY = int(y +(h/ 2.0)) # put text and highlight the center cv2.circle(frame, (cX, cY), 10, (205, 0, 255), -1) ''' cap.release() out.release() #cv2.destroyAllWindows() cv2.destroyAllWindows() # - # ### all rough work # + import cv2 import math face_cascade=cv2.CascadeClassifier('H:\CV projects\Basic_CV\haarcascade_fullbody.xml') frame=cv2.imread('H:\CV projects\Basic_CV\person.png') gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.1, 4) print(faces) p1 = 361 # image pont p2 = 237 cv2.circle(frame, (p1, p2), 5, ((139,0,0)), -1) for face in faces: (x, y, w, h) = face (x1, y1, w1, h1) = faces[0] cx1, cy1 = int(x + w / 2), int(y + h / 2) cx2, cy2 = int(x1 + w1 / 2), int(y1 + h1 / 2) cv2.circle(frame, (cx1, cy1), 2, (0, 245, 30), -1) cv2.circle(frame, (cx2, cy2), 2, (0, 245, 30), -1) # dist=math.sqrt(((cx1-cx2)**2)+(cy2-cy1)**2)) cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 3) cv2.circle(frame,(pts_1[0][0],pts_1[0][1]), 5, (0, 255, 255), -1) cv2.circle(frame,(pts_1[1][0],pts_1[1][1]), 5, (0, 255, 255), -1) cv2.circle(frame,(pts_1[2][0],pts_1[2][1]), 5, (0, 255, 255), -1) person_centroid=[[cx1,cy1]] person_pts=np.float32([person_centroid]) warped_person_centre_pt=cv2.perspectiveTransform(person_pts, prespective_transform)[0] print('person_centroid',warped_person_centre_pt) machine_centroid=[[p1,p2]] machine_pt=np.float32([machine_centroid]) warped_machine_centre_pt=cv2.perspectiveTransform(machine_pt,prespective_transform)[0] wrapped_x1=warped_machine_centre_pt[0][0] wrapped_y1=warped_machine_centre_pt[0][1] wrapped_x2=warped_person_centre_pt[0][0] wrapped_y2=warped_person_centre_pt[0][1] wrapped_x1=wrapped_x1*w_ratio wrapped_x2=wrapped_x2*w_ratio wrapped_y1=wrapped_y1*h_ratio wrapped_y2=wrapped_y2*h_ratio dis = (math.sqrt(((wrapped_x1 -wrapped_x2 ) ** 2) + (( wrapped_y1-wrapped_y2 ) ** 2)) ** 0.5) dis = int(dis) print("distance is ",dis) cv2.line(frame, (cx1, cy1), (p1,p2), (0, 234, 76), 2) (x,y,w,h)=face cv2.putText(frame, '{}CM'.format(dis), (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.6, (255,255, 0)) ''' for (x, y, w, h) in faces: cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 3) print(w) print(h) # put text and highlight the center #x=xmin y=ymax cX = int(x+(w/2.0)) cY = int(y +(h/ 2.0)) # put text and highlight the center cv2.circle(frame, (cX, cY), 10, (205, 0, 255), -1) ''' cv2.imshow('img',frame) cv2.waitKey(0) cv2.destroyAllWindows() # - # + person_centroid=[[cx1,cy1]] person_pts=np.float32([person_centroid]) warped_person_centre_pt=cv2.perspectiveTransform(person_pts, prespective_transform)[0] machine_centroid=[[p1,p2]] machine_pt=np.float32([machine_centroid]) warped_machine_centre_pt=cv2.perspectiveTransform(machine_pt,prespective_transform)[0] wrapped_x1=warped_machine_centre_pt[0][0] wrapped_y1=warped_machine_centre_pt[0][1] wrapped_x2=warped_person_centre_pt[0][0] wrapped_y2=warped_person_centre_pt[0][1] wraped_x1=wraped_x1*w_ratio wraped_x2=wraped_x2*w_ratio wraped_y1=wraped_y1*h_ratio wraped_y2=wraped_y2*h_ratio # - # + distance=np.sqrt((wrapped_x1-wrapped_x2)**2)+(wrapped_y1-wrapped_y2)**2) frame=cv2.putText(frame, ('{}CM'.format(np.round(distance))), (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.6, (139, 0, 0)) p1=(int(),int()) p2=(int(),int()) frame=cv2.rectangle(frame, p1,p2, (255, 0, 0), 3) # + person_centroid=[[cx1,cy1]] person_pts=np.float32([person_centroid]) warped_person_centre_pt=cv2.perspectiveTransform(person_pts, prespective_transform)[0] print('person_pts',person_pts) machine_centroid=(p1,p2) machine_pt=np.float32([machine_centroid]) warped_machine_centre_pt=cv2.perspectiveTransform(machine_pt,prespective_transform)[0] print('warped_machine_centre_pt',warped_machine_centre_pt) wrapped_x1=warped_machine_centre_pt[0][0] wrapped_y1=warped_machine_centre_pt[0][1] wrapped_x2=warped_person_centre_pt[0][0] wrapped_y2=warped_person_centre_pt[0][1] wrapped_x1=wrapped_x1*w_ratio wrapped_x2=wrapped_x2*w_ratio wrapped_y1=wrapped_y1*h_ratio wrapped_y2=wrapped_y2*h_ratio distance=math.sqrt((wrapped_x1 - wrapped_x2)**2+(wrapped_y1-wrapped_y2)**2) print(distance) cv2.putText(frame, ('{}CM'.format(np.round(distance))), (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.6, (139, 0, 0 #cv2.rectangle(frame,p1,p2,5, (255, 0, 0), 3) # - # + import cv2 import numpy as np weightsPath = "yolov3.weights" configPath = "yolov3.cfg" vid_path = "walking.avi" net_yl = cv2.dnn.readNetFromDarknet(configPath, weightsPath) ln = net_yl.getLayerNames() ln1 = [ln[i[0] - 1] for i in net_yl.getUnconnectedOutLayers()] vs = cv2.VideoCapture(vid_path) count = 0 while True: (grabbed, frame) = vs.read() if not grabbed: print('here') break (H, W) = frame.shape[:2] blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False) net.setInput(blob) layerOutputs = net.forward(ln1) boxes = [] confidences = [] classIDs = [] for output in layerOutputs: for detection in output: scores = detection[5:] classID = np.argmax(scores) confidence = scores[classID] # detecting humans in frame if classID == 0: if confidence > confid: box = detection[0:4] * np.array([W, H, W, H]) (centerX, centerY, width, height) = box.astype("int") x = int(centerX - (width / 2)) y = int(centerY - (height / 2)) boxes.append([x, y, int(width), int(height)]) confidences.append(float(confidence)) classIDs.append(classID) idxs = cv2.dnn.NMSBoxes(boxes, confidences, confid, thresh) font = cv2.FONT_HERSHEY_PLAIN boxes1 = [] for i in range(len(boxes)): if i in idxs: boxes1.append(boxes[i]) x,y,w,h = boxes[i] print(boxes1) # - # + import cv2 import numpy as np (H, W) = frame.shape[:2] points = mouse_pts # Using first 4 points or coordinates for perspective transformation. The region marked by these 4 points are # considered ROI. This polygon shaped ROI is then warped into a rectangle which becomes the bird eye view. # This bird eye view then has the property property that points are distributed uniformally horizontally and # vertically(scale for horizontal and vertical direction will be different). So for bird eye view points are # equally distributed, which was not case for normal view. src = np.float32(np.array(points[:4])) dst = np.float32([[0, H], [W, H], [W, 0], [0, 0]]) prespective_transform = cv2.getPerspectiveTransform(src, dst) # using next 3 points for horizontal and vertical unit length(in this case 180 cm) pts = np.float32(np.array([points[4:7]])) warped_pt = cv2.perspectiveTransform(pts, prespective_transform)[0] # since bird eye view has property that all points are equidistant in horizontal and vertical direction. # distance_w and distance_h will give us 180 cm distance in both horizontal and vertical directions # (how many pixels will be there in 180cm length in horizontal and vertical direction of birds eye view), # which we can use to calculate distance between two humans in transformed view or bird eye view distance_w = np.sqrt((warped_pt[0][0] - warped_pt[1][0]) ** 2 + (warped_pt[0][1] - warped_pt[1][1]) ** 2) distance_h = np.sqrt((warped_pt[0][0] - warped_pt[2][0]) ** 2 + (warped_pt[0][1] - warped_pt[2][1]) ** 2) pnts = np.array(points[:4], np.int32) cv2.polylines(frame, [pnts], True, (70, 70, 70), thickness=2) # + ########################### # - # + [markdown] colab_type="text" id="O6Kh5doF8DLr" # ## Select Points for Perspective Transform # + colab={} colab_type="code" executionInfo={"elapsed": 8656, "status": "ok", "timestamp": 1593378693835, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgzE51W_GUZ7p7e7vvuyiuD-4fVF9W7cz8suYA=s64", "userId": "10371228985714256553"}, "user_tz": -330} id="Ke8OyaOVSoMT" # For Select Manualy # source_points = get_points(original_image_BGR_copy, 4, image_size=(image_width,image_height)) # points for TownCenter.mp4 or TownCenter_frames and pedestrians.avi or pedestrians_frames # source_points = np.float32([[ 796., 180.], # [1518., 282.], # [1080., 719.], # [ 128., 480.]]) # points for pedestrianWalking.mp4 or pedestrianWalking_frames source_points = np.float32([[361., 212.], [673., 248.], [597., 338.], [265., 286.]]) # points for Student_video.mp4 or Student_video_frames # source_points = np.float32([[ 141., 546.], # [ 695., 449.], # [1014., 519.], # [ 437., 692.]]) # points for MOT20_02_raw.mp4 or MOT20_02_raw_frames #source_points = np.float32([[142., 298.], # [784., 315.], # [811., 371.], # [ 82., 347.]]) # - # + colab={"base_uri": "https://localhost:8080/", "height": 428} colab_type="code" executionInfo={"elapsed": 6345, "status": "ok", "timestamp": 1593378693844, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgzE51W_GUZ7p7e7vvuyiuD-4fVF9W7cz8suYA=s64", "userId": "10371228985714256553"}, "user_tz": -330} id="xFq4qePi8bk3" outputId="bee40581-1584-48b7-ba8c-81ec4b57a6fb" for point in source_points: cv2.circle(original_image_RGB_copy, tuple(point), 8, (255, 0, 0), -1) points = source_points.reshape((-1,1,2)).astype(np.int32) cv2.polylines(original_image_RGB_copy, [points], True, (0,255,0), thickness=4) plt.figure(figsize=(12, 12)) plt.imshow(original_image_RGB_copy) plt.show() # + [markdown] colab_type="text" id="3kdOhWWa8RDC" # ## perspective transform matrix # + colab={"base_uri": "https://localhost:8080/", "height": 90} colab_type="code" executionInfo={"elapsed": 6225, "status": "ok", "timestamp": 1593378729072, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgzE51W_GUZ7p7e7vvuyiuD-4fVF9W7cz8suYA=s64", "userId": "10371228985714256553"}, "user_tz": -330} id="ERrknA3o87ls" outputId="fae939ee-2f36-4d01-af8e-f71536a2c80a" src=source_points # destination points for Towncenter.mp4 or TownCenter_frames # dst=np.float32([(0.1,0.5), (0.69, 0.5), (0.69,0.8), (0.1,0.8)]) # destination points for pedestrianWalking.mp4 or pedestrianWalking_frames # dst=np.float32([(0.49,0.5), (0.77, 0.5), (0.77,0.65), (0.49,0.65)]) # destination points for Student_video.mp4 or Student_video_frames # dst=np.float32([(0.1,0.8), (0.37, 0.8), (0.37,0.90), (0.1,0.90)]) # destination points for MOT20_02_raw.mp4 or MOT20_02_raw_frames dst=np.float32([(0.2,0.82), (0.80, 0.82), (0.80,0.87), (0.2,0.87)]) dst_size=(800,1080) dst = dst * np.float32(dst_size) H_matrix = cv2.getPerspectiveTransform(src, dst) print("The perspective transform matrix:") print(H_matrix) # + [markdown] colab_type="text" id="7sbfDZ9c9San" # ## Get warpPerspective Image (TOP View) # + colab={"base_uri": "https://localhost:8080/", "height": 704} colab_type="code" executionInfo={"elapsed": 10795, "status": "ok", "timestamp": 1593378736976, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgzE51W_GUZ7p7e7vvuyiuD-4fVF9W7cz8suYA=s64", "userId": "10371228985714256553"}, "user_tz": -330} id="XzJ9DSgM9ETD" outputId="621ba860-0dae-4364-aead-ebb3d77d6248" warped = cv2.warpPerspective(original_image_RGB_copy, H_matrix, dst_size) plt.figure(figsize=(12, 12)) plt.imshow(warped) plt.show() # + [markdown] colab_type="text" id="-IQ6H8YA93-e" # ## Create YOLO model with weights and set parameters # + colab={} colab_type="code" executionInfo={"elapsed": 11263, "status": "ok", "timestamp": 1593378755719, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgzE51W_GUZ7p7e7vvuyiuD-4fVF9W7cz8suYA=s64", "userId": "10371228985714256553"}, "user_tz": -330} id="lgBjN70J92Un" confidence_threshold = 0.2 nms_threshold = 0.6 min_distance = 60 width = 608 height = 608 config = 'yolov3.cfg' weights = 'yolov3.weights' classes = 'coco.names' with open(classes, 'rt') as f: coco_classes = f.read().strip('\n').split('\n') model = create_model(config, weights) output_layers = get_output_layers(model) # + [markdown] colab_type="text" id="VzAL7zaf-P7A" # ## prediction with YOLO # + colab={} colab_type="code" executionInfo={"elapsed": 7980, "status": "ok", "timestamp": 1593378767586, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgzE51W_GUZ7p7e7vvuyiuD-4fVF9W7cz8suYA=s64", "userId": "10371228985714256553"}, "user_tz": -330} id="EShByS8l9dvU" blob = blob_from_image(original_image_RGB, (width, height)) outputs = predict(blob, model, output_layers) # - print(outputs) # + [markdown] colab_type="text" id="392AbEs3-XdQ" # ## Get Detected Person Boxes # + colab={} colab_type="code" executionInfo={"elapsed": 6850, "status": "ok", "timestamp": 1593378767592, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgzE51W_GUZ7p7e7vvuyiuD-4fVF9W7cz8suYA=s64", "userId": "10371228985714256553"}, "user_tz": -330} id="qBOiqGOD-W9e" boxes = get_image_boxes(outputs, 100, 150, coco_classes) # + [markdown] colab_type="text" id="2QTJc0h6-zyb" # ## Get Points as Birds Eye View # + colab={} colab_type="code" executionInfo={"elapsed": 6149, "status": "ok", "timestamp": 1593378767598, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgzE51W_GUZ7p7e7vvuyiuD-4fVF9W7cz8suYA=s64", "userId": "10371228985714256553"}, "user_tz": -330} id="mIIHae86_BlR" birds_eye_points = compute_point_perspective_transformation(H_matrix, boxes) # + [markdown] colab_type="text" id="0or1yRp-_GdI" # ## Get Red and Green Box Cordinates # + colab={} colab_type="code" executionInfo={"elapsed": 6982, "status": "ok", "timestamp": 1593378770333, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgzE51W_GUZ7p7e7vvuyiuD-4fVF9W7cz8suYA=s64", "userId": "10371228985714256553"}, "user_tz": -330} id="_-5Pq8z-_J0k" green_box, red_box = get_red_green_boxes(min_distance, birds_eye_points, boxes) # + [markdown] colab_type="text" id="pXs2RTnp_Yzg" # ## Generate Birds-Eye-View Image # + colab={"base_uri": "https://localhost:8080/", "height": 486} colab_type="code" executionInfo={"elapsed": 5945, "status": "ok", "timestamp": 1593378772526, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgzE51W_GUZ7p7e7vvuyiuD-4fVF9W7cz8suYA=s64", "userId": "10371228985714256553"}, "user_tz": -330} id="RB3fNtgP_ef0" outputId="9957e84b-5001-41f8-dc98-9545639b47d3" birds_eye_view_image = get_birds_eye_view_image(green_box, red_box,eye_view_height=image_height,eye_view_width=image_width//2) plt.figure(figsize=(8, 8)) plt.imshow(cv2.cvtColor(birds_eye_view_image, cv2.COLOR_RGB2BGR)) plt.show() # + [markdown] colab_type="text" id="WCi68syq_rfS" # ## Draw red and green boxes on detected Human # + colab={"base_uri": "https://localhost:8080/", "height": 596} colab_type="code" executionInfo={"elapsed": 11391, "status": "ok", "timestamp": 1593378789421, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgzE51W_GUZ7p7e7vvuyiuD-4fVF9W7cz8suYA=s64", "userId": "10371228985714256553"}, "user_tz": -330} id="AtYfFric_6BE" outputId="5688279d-1576-4d42-e919-32392db95308" box_red_green_image = get_red_green_box_image(original_image_BGR.copy(),green_box,red_box) plt.figure(figsize=(20, 20)) plt.imshow(cv2.cvtColor(box_red_green_image, cv2.COLOR_RGB2BGR)) plt.show() # + [markdown] colab_type="text" id="ToLxRNtwAGRh" # ## Combine both image horizontally and template # + colab={"base_uri": "https://localhost:8080/", "height": 464} colab_type="code" executionInfo={"elapsed": 12386, "status": "ok", "timestamp": 1593378796353, "user": {"displayName": "<NAME>", "photoUrl": "https://<KEY>", "userId": "10371228985714256553"}, "user_tz": -330} id="S81eHLzyAf9O" outputId="e932970b-53e5-499c-ba5a-dc388d21397e" combined_image = np.concatenate((birds_eye_view_image,box_red_green_image), axis=1) main_header = cv2.resize(main_header,(combined_image.shape[1],main_header.shape[0])) deshboard_image = np.concatenate((main_header,combined_image), axis=0) plt.figure(figsize=(25, 20)) plt.imshow(cv2.cvtColor(deshboard_image, cv2.COLOR_RGB2BGR)) plt.show() # + [markdown] colab_type="text" id="-ryWuL5NCgR0" # ## Social Distance On Video # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="fgmEbEvIBddr" outputId="871fac1f-6e22-40c5-dec1-0d142ba821f1" # %%time video = cv2.VideoCapture('data/MOT20-02-raw.webm') writer = None frame_number = 0 print('%-20s%-26s%-26s%-26s' % ('Processing Frame','|Total Detected Person','|Red Markerd Person','|Green Marked Person')) while True: ret,frame = video.read() if not ret: break image_height, image_width = frame.shape[:2] image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) blob = blob_from_image(image, (width, height)) outputs = predict(blob, model, output_layers) boxes = get_image_boxes(outputs, image_width, image_height, coco_classes) birds_eye_points = compute_point_perspective_transformation(H_matrix, boxes) green_box, red_box = get_red_green_boxes(min_distance, birds_eye_points,boxes) birds_eye_view_image = get_birds_eye_view_image(green_box, red_box, eye_view_height=image_height,eye_view_width=image_width//2) box_red_green_image = get_red_green_box_image(frame.copy(), green_box,red_box) combined_image = np.concatenate((birds_eye_view_image,box_red_green_image), axis=1) main_header = cv2.resize(main_header,(combined_image.shape[1],main_header.shape[0])) deshboard_image = np.concatenate((main_header,combined_image), axis=0) frame_number += 1 sys.stdout.write('%-20i|%-25i|%-25i|%-25i\n' % (frame_number,len(boxes),len(red_box),len(green_box))) # if frame_number >=20: # break if writer is None: fourcc = cv2.VideoWriter_fourcc(*"DIVX") writer = cv2.VideoWriter('result/result_15_MOT20-02-raw_1.avi', fourcc, 15, (deshboard_image.shape[1], deshboard_image.shape[0]), True) writer.write(deshboard_image) del image,outputs,combined_image,deshboard_image,birds_eye_view_image print(' ') writer.release() video.release() # + colab={} colab_type="code" id="-RXpfVUyKtYz"
person_distance measurement_final_100.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/shalabh147/Brain-Tumor-Segmentation-and-Survival-Prediction-using-Deep-Neural-Networks/blob/master/2d_4class_models/Axis%202/2d_4class_axis2train.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" id="OOrTsoBlCBsO" colab_type="code" colab={} import random import pandas as pd import numpy as np import matplotlib.pyplot as plt # #%matplotlib inline import tensorflow as tf import keras import keras.backend as K from keras.utils import to_categorical from keras import metrics from keras.models import Model, load_model from keras.layers import Input, BatchNormalization, Activation, Dense, Dropout,Maximum from keras.layers.core import Lambda, RepeatVector, Reshape from keras.layers.convolutional import Conv2D, Conv2DTranspose,Conv3D,Conv3DTranspose from keras.layers.pooling import MaxPooling2D, GlobalMaxPool2D,MaxPooling3D from keras.layers.merge import concatenate, add from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img from skimage.io import imread, imshow, concatenate_images from skimage.transform import resize from sklearn.utils import class_weight from keras.callbacks import ModelCheckpoint from keras.callbacks import CSVLogger from keras.callbacks import EarlyStopping import os from skimage.io import imread, imshow, concatenate_images from skimage.transform import resize # from medpy.io import load import numpy as np #import cv2 import nibabel as nib from PIL import Image def conv_block(input_mat,num_filters,kernel_size,batch_norm): X = Conv2D(num_filters,kernel_size=(kernel_size,kernel_size),strides=(1,1),padding='same')(input_mat) if batch_norm: X = BatchNormalization()(X) X = Activation('relu')(X) X = Conv2D(num_filters,kernel_size=(kernel_size,kernel_size),strides=(1,1),padding='same')(X) if batch_norm: X = BatchNormalization()(X) X = Activation('relu')(X) return X def Unet_with_slice(input_img, n_filters = 16 , dropout = 0.3 , batch_norm = True): c1 = Conv2D(16,kernel_size = (1,6) , strides = (1,1) ,padding = 'valid')(input_img) if batch_norm: c1 = BatchNormalization()(c1) #print(c1.shape) c1 = Activation('relu')(c1) c1 = Conv2D(n_filters,kernel_size=(3,3),strides=(1,1),padding='same')(c1) if batch_norm: c1 = BatchNormalization()(c1) c1 = Activation('relu')(c1) p1 = MaxPooling2D(pool_size = (2,2) , strides = 2)(c1) p1 = Dropout(dropout)(p1) #print(p1.shape) c2 = conv_block(p1 , n_filters*2,3,batch_norm) p2 = MaxPooling2D(pool_size=(3,3), strides=3)(c2) p2 = Dropout(dropout)(p2) #print(p2.shape) c3 = conv_block(p2, n_filters*4,3,batch_norm) #print(c3.shape) p3 = MaxPooling2D(pool_size = (2,1) , strides = (2,1))(c3) p3 = Dropout(dropout)(p3) #print(p3.shape) c4 = conv_block(p3, n_filters*8,3,batch_norm) p4 = MaxPooling2D(pool_size = (4,4) , strides = (4,5))(c4) p4 = Dropout(dropout)(p4) c5 = conv_block(p4,n_filters*16,3,batch_norm) u6 = Conv2DTranspose(n_filters*8,kernel_size = (4,4) , strides = (4,5) , padding = 'same')(c5) u6 = concatenate([u6,c4]) c6 = conv_block(u6,n_filters*8,3,batch_norm) c6 = Dropout(dropout)(c6) u7 = Conv2DTranspose(n_filters*4,kernel_size = (3,3) , strides = (2,1) , padding = 'same')(c6) u7 = concatenate([u7,c3]) c7 = conv_block(u7,n_filters*4,3,batch_norm) c7 = Dropout(dropout)(c7) u8 = Conv2DTranspose(n_filters*2,kernel_size = (3,3) , strides = (3,3) , padding = 'same')(c7) u8 = concatenate([u8,c2]) c8 = conv_block(u8,n_filters*2,3,batch_norm) c8 = Dropout(dropout)(c8) u9 = Conv2DTranspose(n_filters,kernel_size = (3,3) , strides = (2,2) , padding = 'same')(c8) u9 = concatenate([u9,c1]) c9 = conv_block(u9,n_filters,3,batch_norm) c9 = Dropout(dropout)(c9) c10 = Conv2DTranspose(n_filters, kernel_size = (1,6) , strides = (1,1), padding = 'valid')(c9) outputs = Conv2D(4, kernel_size = (1,1), activation = 'softmax')(c10) model = Model(inputs = input_img , outputs = outputs) return model def standardize(image): standardized_image = np.zeros(image.shape) # # iterate over the `z` dimension for z in range(image.shape[2]): # get a slice of the image # at channel c and z-th dimension `z` image_slice = image[:,:,z] # subtract the mean from image_slice centered = image_slice - np.mean(image_slice) # divide by the standard deviation (only if it is different from zero) if(np.std(centered)!=0): centered = centered/np.std(centered) # update the slice of standardized image # with the scaled centered and scaled image standardized_image[:, :, z] = centered ### END CODE HERE ### return standardized_image def dice_coef(y_true, y_pred, epsilon=0.00001): """ Dice = (2*|X & Y|)/ (|X|+ |Y|) = 2*sum(|A*B|)/(sum(A^2)+sum(B^2)) ref: https://arxiv.org/pdf/1606.04797v1.pdf """ axis = (0,1,2) dice_numerator = 2. * K.sum(y_true * y_pred, axis=axis) + epsilon dice_denominator = K.sum(y_true*y_true, axis=axis) + K.sum(y_pred*y_pred, axis=axis) + epsilon return K.mean((dice_numerator)/(dice_denominator)) def dice_coef_loss(y_true, y_pred): return 1-dice_coef(y_true, y_pred) input_img = Input((240,155,4)) model = Unet_with_slice(input_img,32,0.3,True) learning_rate = 0.00095 #epochs = 5000 decay_rate = 0.0000002 model.compile(optimizer=Adam(lr=learning_rate, decay = decay_rate), loss=dice_coef_loss, metrics=[dice_coef]) model.summary() path = '../input/vs-brats2018/miccai_brats_2018_data_training/HGG' all_images = os.listdir(path) #print(len(all_images)) all_images.sort() data = np.zeros((240,240,155,4)) image_data2=np.zeros((240,240,155)) loss_hist = [] accu_hist = [] epoch_wise_loss = [] epoch_wise_accu = [] for epochs in range(40): epoch_loss = 0 epoch_accu = 0 for image_num in range(180): x_to = [] y_to = [] print(epochs) print(image_num) # data preprocessing starts here x = all_images[image_num] print(x) folder_path = path + '/' + x; modalities = os.listdir(folder_path) modalities.sort() #data = [] w = 0 for j in range(len(modalities)): #print(modalities[j]) image_path = folder_path + '/' + modalities[j] if not(image_path.find('seg.nii') == -1): img = nib.load(image_path); image_data2 = img.get_data() image_data2 = np.asarray(image_data2) print("Entered ground truth") else: img = nib.load(image_path); image_data = img.get_data() image_data = np.asarray(image_data) image_data = standardize(image_data) data[:,:,:,w] = image_data print("Entered modality") w = w+1 print(data.shape) print(image_data2.shape) ''' reshaped_data=data[56:184,75:203,13:141,:] reshaped_data=reshaped_data.reshape(1,128,128,128,4) reshaped_image_data2=image_data2[56:184,75:203,13:141] reshaped_image_data2=reshaped_image_data2.reshape(1,128,128,128) reshaped_image_data2[reshaped_image_data2==4] = 3 hello = reshaped_image_data2.flatten() #y_to = keras.utils.to_categorical(y_to,num_classes=2) print(reshaped_image_data2.shape) #print(hello[hello==3].shape) print("Number of classes",np.unique(hello)) class_weights = class_weight.compute_class_weight('balanced',np.unique(hello),hello) print(class_weights) ''' for slice_no in range(0,240): a = slice_no X = data[:,slice_no,:,:] Y = image_data2[:,slice_no,:] # imgplot = plt.imshow(X[:,:,2]) # plt.show(block=False) # plt.pause(0.3) # plt.close() # imgplot = plt.imshow(Y) # plt.show(block=False) # plt.pause(0.3) # plt.close() if(X.any()!=0 and Y.any()!=0 and len(np.unique(Y)) == 4): #print(slice_no) x_to.append(X) y_to.append(Y) if len(y_to)>=63: break; #reshaped_image_data2 = to_categorical(reshaped_image_data2, num_classes = 4) #print(reshaped_data.shape) #print(reshaped_image_data2.shape) #print(type(reshaped_data)) x_to = np.asarray(x_to) y_to = np.asarray(y_to) print(x_to.shape) print(y_to.shape) y_to[y_to==4] = 3 #y_to = one_hot_encode(y_to) #y_to[y_to==2] = 1 #y_to[y_to==1] = 1 #y_to[y_to==0] = 0 print(y_to.shape) from sklearn.utils import shuffle x_to,y_to = shuffle(x_to,y_to) hello = y_to.flatten() #print(hello[hello==3].shape) print("Number of classes",np.unique(hello)) class_weights = class_weight.compute_class_weight('balanced',np.unique(hello),hello) #class_weights.insert(3,0) print("class_weights",class_weights) y_to = keras.utils.to_categorical(y_to,num_classes=4) history = model.fit(x=x_to,y=y_to, epochs = 1 , batch_size = 63 ,class_weight = class_weights) print(history.history['loss']) epoch_loss += history.history['loss'][0] epoch_accu += history.history['dice_coef'][0] loss_hist.append(history.history['loss']) accu_hist.append(history.history['dice_coef']) model.save('../working/2d_4class_axis2.h5') epoch_loss = epoch_loss/180 epoch_accu = epoch_accu/180 epoch_wise_loss.append(epoch_loss) epoch_wise_accu.append(epoch_accu) plt.plot(epoch_wise_loss) plt.title('Model_loss vs epochs') plt.ylabel('Loss') plt.xlabel('epochs') s = '../working/epochwise_loss_' + str(epochs) plt.savefig(s) plt.show() plt.close() plt.plot(epoch_wise_accu) plt.title('Model_Accuracy vs epochs') plt.ylabel('Accuracy') plt.xlabel('epochs') s = '../working/epochwise_accu_' + str(epochs) plt.savefig(s) plt.show() plt.close() plt.plot(accu_hist) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') s = '../working/accuracy_plot_' + str(epochs) plt.savefig(s) plt.show() plt.close() plt.plot(loss_hist) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') s = '../working/loss_plot_' + str(epochs) plt.savefig(s) plt.show() plt.close() model.save('../working/2d_4class_axis2.h5') # + _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" id="vBrqb4WFCBsU" colab_type="code" colab={}
2d_4class_models/Axis 2/2d_4class_axis2train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import pandas as pd import numpy as np import ipywidgets as widgets import bqplot.pyplot as plt import bqplot as bq # - # ## Image # + image_path = '../figures/image_nagra_nab14-17.png' with open(image_path, 'rb') as f: raw_image = f.read() ipyimage = widgets.Image(value=raw_image, format='png') ipyimage # - # ## With pyplot-API of bqplot plt.figure(padding_y=0) axes_options = {'x': {'visible': False}, 'y': {'visible': False}} plt.imshow(image_path, 'filename') plt.show() # ### Display inside a bqplot Figure fig = plt.figure(title='Cross-Section', padding_x=0, padding_y=0) image = plt.imshow(ipyimage, 'widget') fig # # Scatter Chart - Pyplot API # data generation price_data = pd.DataFrame(np.cumsum(np.random.randn(150, 2).dot([[1.0, -0.8], [-0.8, 1.0]]), axis=0) + 100, columns=['Security 1', 'Security 2'], index=pd.date_range(start='01-01-2007', periods=150)) size = 100 np.random.seed(0) x_data = range(size) y_data = np.cumsum(np.random.randn(size) * 100.0) ord_keys = np.array(['A', 'B', 'C', 'D', 'E', 'F']) ordinal_data = np.random.randint(5, size=size) # + # Scatter plot where you can move points and have an updated mean fig2 = plt.figure() scat = plt.scatter(x_data[:10], y_data[:10], colors=['SeaGreen'], enable_move=True, restrict_y=True) lin = plt.plot([], [], line_style='dotted', colors=['DodgerBlue']) def update_mean(change=10): with lin.hold_sync(): lin.x = [np.min(scat.x), np.max(scat.x)] lin.y = [np.mean(scat.y), np.mean(scat.y)] update_mean() # update line on change of x or y of scatter scat.observe(update_mean, names=['x']) scat.observe(update_mean, names=['y']) fig2 # + # Scatter plot where you can add and move points fig2 = plt.figure() scat = plt.scatter(x_data[:10], y_data[:10], colors=['SeaGreen'], enable_move=True, restrict_y=True) lin = plt.plot([], [], line_style='dotted', colors=['DodgerBlue']) def update_mean(change=None): with lin.hold_sync(): lin.x = [np.min(scat.x), np.max(scat.x)] lin.y = [np.mean(scat.y), np.mean(scat.y)] with scat.hold_sync(): scat.interactions = {'click': 'add'} update_mean() # update line on change of x or y of scatter scat.observe(update_mean, names=['x']) scat.observe(update_mean, names=['y']) fig2 # + # Image and scatter in one fig3 = plt.figure(padding_x=0, padding_y=0) # xz dimensions in km xdim = np.array([0, 30.]) ydim = np.array([-6.5, 1.]) plt.scales(scales = {'x': bq.LinearScale(min=np.min(xdim), max=np.max(xdim)), 'y': bq.LinearScale(min=np.min(ydim), max=np.max(ydim))}) image = plt.imshow(x=xdim, y=ydim, image=ipyimage, format='widget') scat = plt.scatter(x=[], y=[], colors=['SeaGreen'], enable_move=True, restrict_y=False) def add_points(change=None): with scat.hold_sync(): scat.interactions = {'click': 'add'} add_points() fig3 # + code_folding=[] # Image and scatter in one with buttons fig4 = plt.figure(padding_x=0, padding_y=0) # dict for layers and colors - replace with architecture in gempy cord=['DodgerBlue', 'DarkSlateGray', 'Yellow', 'HotPink', 'OrangeRed'] layers = ['GrabenFill', 'PostGraben1', 'Postgraben2', 'Jura', 'MainFault'] laycol = dict(zip(layers, cord)) # buttons add_btn = widgets.Button(description='add', button_style='success') delete_btn = widgets.Button(description='delete', button_style='danger') series_btn = widgets.Button(description='save to file', button_style='warning') layers_btn = widgets.Dropdown(description='unit', options=layers) # xz dimensions in km xdim = np.array([0, 30.]) ydim = np.array([-6.5, 1.]) plt.scales(scales = {'x': bq.LinearScale(min=np.min(xdim), max=np.max(xdim)), 'y': bq.LinearScale(min=np.min(ydim), max=np.max(ydim))}) image = plt.imshow(x=xdim, y=ydim, image=ipyimage, format='widget') scat = plt.scatter(x=[], y=[], colors=['SeaGreen'], enable_move=True, restrict_y=False) plt.xlabel('x [km]') plt.ylabel('z [km]') def add_points(): with image.hold_sync(): scat.interactions = {'click': 'add'} def delete_points(): with scat.hold_sync(): scat.interactions = {'click': 'delete'} def save_fid(): # update the y attribute of line mark xy = np.stack([scat.x*1000,scat.y*1000],axis=1) np.savetxt(layers_btn.value+'.csv', xy, fmt='%.2f', header='x[m],z[m]', delimiter=',') # create a callback which updates the plot when dropdown item is selected, caution, deletes all points # new scat instance has to be called each time this function is called def change_interface(*args): selected_ticker = layers_btn.value scat.x = [] scat.x = [] # update the y attribute of the mark by selecting # the column from the price data frame scat.colors = [laycol[selected_ticker]] # register the callback by using the 'observe' method layers_btn.observe(change_interface, 'value') # register the on_click function add_btn.on_click(lambda btn: add_points()) delete_btn.on_click(lambda btn: delete_points()) series_btn.on_click(lambda btn: save_fid()) # stack button and figure using VBox widgets.VBox([fig4, widgets.HBox([add_btn, delete_btn, series_btn, layers_btn])]) # - # Problem is, this does not work well. New points cannot be added on top of the image. # One has to click on the side of the plot (next to the image) and drag the point to its destination. # Needs to be fixed...maybe by loading the image differently?
notebooks/00_Test_notebook-pyplot_API.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib notebook # %matplotlib widget # %matplotlib inline import json prefix = "../data/output/adversarial/" tc_normal = json.load(open(prefix + "tourist_tc_normal.json")) tc_advs = json.load(open(prefix + "tourist_tc_adversarial.json")) mmr_normal = json.load(open(prefix + "tourist_mmr_normal.json")) mmr_advs = json.load(open(prefix + "tourist_mmr_adversarial.json")) # + #import seaborn as sns from seaborn import lineplot import matplotlib.pyplot as plt #plt.rcParams["figure.figsize"] = (3.5,4*3.5/5) #sns.set(rc={'figure.figsize':(3.7,3.7)}) get_ratings = lambda js : [1500] + [c['rating_mu'] for c in js] jsons = [tc_normal, tc_advs, mmr_normal, mmr_advs] zorder = [1, 0, 1, 0] names = ["Topcoder (honest)", "Topcoder (adversarial)", "Elo-MMR (honest)", "Elo-MMR (adversarial)"] for idx, (rating_json, name, z) in enumerate(zip(jsons, names, zorder)): ratings = get_ratings(rating_json) contestnum = list(range(len(ratings))) # if name == names[0] or name == names[2]: # plt.figure(figsize=(6.4, 4)) plot = lineplot(contestnum, ratings, label=name, zorder=z) plot.legend(frameon=False, fontsize=14, loc="upper left") plot.set_xlabel("Contest #", fontsize=14) plot.set_ylabel("Rating", fontsize=14) plot.set_xlim(0, 105) plot.set_ylim(1750, 3850) # plot.axes.set_aspect(1.0/15) # if idx == 2 or idx == 3: # plot.axes.get_yaxis().set_visible(False) plt.tight_layout() if idx == 1: plt.savefig('topcoder.eps', bbox_inches='tight', dpi = 400) plt.figure() if idx == 3: plt.savefig('elo-mmr.eps', bbox_inches='tight', dpi = 400) # - len(ratings) # get_ratings(jsons[0])
scripts/notebooks/adversary-plot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # + # %matplotlib inline from __future__ import division import numpy as np import scipy.optimize as op import time import copy as cp import matplotlib.pyplot as plt import seaborn from scipy.stats import multivariate_normal seaborn.set(font_scale=2) seaborn.set_style("white") # - # # Define Hyperparameter Functions def kernel(t,t2,tav,k,sigma_s,clean=False): a0 =np.meshgrid(t,t2) dst = a0[0] - a0[1] K = k*np.exp(-.5*tav*dst**2) if clean: return K,dst else: K += np.eye(len(t))*sigma_s return K,dst def logP(params,t,t2,y): tav = params[0]; k = params[1]; eps = params[2] tav = np.exp(tav); k = np.exp(k); eps = np.exp(eps) K, _ = kernel(t,t2,tav,k=k,sigma_s = eps) K_inv = np.linalg.inv(K) a,b = np.linalg.slogdet(K) t1 = -.5*np.dot(np.dot(y,K_inv),y) - .5*(a*b) - len(t)*np.log(1*np.pi)/2 return -t1 def logP_grad_Y(params,t,t2,y): #this one seems to work tav = params[0]; k = params[1]; eps = params[2] tav = np.exp(tav); k = np.exp(k); eps = np.exp(eps) K, dst = kernel(t,t2,tav,k=k,sigma_s=eps) sqDst = .5*tav*dst**2 K_inv = np.linalg.inv(K) a0 = np.dot(K_inv,y) alpha = np.array([a0]) t1 = alpha.T.dot(alpha) - K_inv dKdt = sqDst*K dKdk = -(K-np.eye(len(t))*eps) dKdS = -eps*np.eye(len(t)) g0 = .5*np.trace(t1.dot(dKdt)) g1 = .5*np.trace(t1.dot(dKdk)) g2 = .5*np.trace(t1.dot(dKdS)) return np.array([g0,g1,g2]) # ## Generate Some Fake Data # + n_timePoints = 200 t = np.arange(n_timePoints) k = 1; sigmaS = .2; tav = .02 true_params = {'tav':tav, 'k': k, 'sigmaS': sigmaS} K,_ = kernel(t,t,tav,k,sigmaS) # - plt.imshow(K,interpolation='None') samp = np.random.multivariate_normal(np.zeros(n_timePoints),K) plt.plot(samp) #samp = np.concatenate([[.1,.4,1.,1.5,2.2,4],np.linspace(4,0,num=20)]) import pickle samp = pickle.load(open('/home/yves/Desktop/testD.p')) t = np.arange(len(samp)) # + #ans2 = op.approx_fprime([2,1,.1], # logP, # 1e-7, # t,t,samp)# # #print ans2 # + #logP_grad_Y([2,1,.1],t,t,samp) # - res = op.fmin_tnc(func=logP, x0=np.array([0,.1,1]), fprime=logP_grad_Y, args=(t,t,samp), maxfun=1000) np.exp(res[0]) print true_params st = time.time() res = op.minimize(fun=logP, x0=np.array([0,0,0]), jac=logP_grad_Y, args=(t,t,samp), method='TNC', options = {'disp': 1,'gtol':1e-16,'xtol':1e-16,'ftol':1e-16,'maxiter':200}) print time.time() - st print res print np.exp(res.x) print [i for i in np.exp(res.x)] print true_params # $$ # + def fit(y,t,t2,params): tav = params[0] sigmaS = params[2] k = params[1] if np.array_equal(t,t2): print 'smoothing' K,_ = kernel(t,t2,tav,k,sigmaS,clean=True) new_mu = K.dot(np.linalg.inv(K+np.eye(len(y))*sigmaS)).dot(y) new_cov = K - K.dot(np.linalg.inv(K+np.eye(len(y))*sigmaS)).dot(K) else: print 'inference' K,_ = kernel(t,t,tav,k,sigmaS,clean=True) Kst,_ = kernel(t,t2,tav,k,sigmaS,clean=True) Kstst, _ = kernel(t2,t2,tav,k,sigmaS,clean=True) temp1 = Kst.dot( np.linalg.inv(K + np.eye(len(t))*sigmaS)) new_mu = np.dot(temp1,y) new_cov = Kstst - np.dot(temp1,Kst.T) return new_mu, new_cov def predict(x,y,y_pred): return None # - params = np.exp(res.x) ssst = samp[20] # + sti = 45;ndi = 55 t = np.arange(len(samp)) #t1 = np.concatenate([t[:sti],t[ndi:]]) #t2 = t[sti:ndi] #samp[20] = ssst #y = np.concatenate([samp[:sti],samp[ndi:]]) mu,c = fit(samp,t,t,params) varnc = 2*np.sqrt(np.diag(c)) plt.plot(mu,color='k') plt.plot(t1,y,'o',alpha=.4) plt.plot(t2,samp[sti:ndi],'o',alpha=.4) plt.fill_between(t,mu+varnc,mu-varnc,color=[.5]*3) seaborn.despine(trim=0,offset=20) plt.xlabel('$x$') plt.ylabel('$f(x)$') # + sti = 45;ndi = 55 t = np.arange(len(samp)) t1 = np.concatenate([t[:sti],t[ndi:]]) t2 = t[sti:ndi] samp[20] = 10 y = np.concatenate([samp[:sti],samp[ndi:]]) mu,c = fit(y,t1,t,params) varnc = 2*np.sqrt(np.diag(c)) plt.plot(mu,color='k') plt.plot(t1,y,'o',alpha=.4) plt.plot(t2,samp[sti:ndi],'o',alpha=.4) plt.fill_between(t,mu+varnc,mu-varnc,color=[.5]*3) seaborn.despine(trim=0,offset=20) plt.xlabel('$x$') plt.ylabel('$f(x)$') # -
GP_Regression_1D.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Kontrollstrukuren in Python # ### if # Bislang haben wir nur Code gesehen, der von oben nach unten ausgeführt wird. In einer **If-Else-Struktur** werden bestimmte Abschnitte des Codes nur dann ausgeführt, wenn auch bestimmte Bedingungen erfüllt sind. Damit können wir Entscheidungen programmieren! # # Schauen wir uns ein Beispiel an: # + n = 30 if n < 42: print("Die Zahl n ist kleiner als 42") print("Ich bin auch eingerückt") print("Ich bin nicht mehr eingerückt!") # + a = 10 if a < 12: print("Die Zahl a ist kleiner als 12.") # - # Es ist wichtig, dass der Code unterhalb der Bedingung eingerückt ist! # ### else # Diese if-Struktur lässt sich um ein **else** erweitern, sodass auch in dem Fall, dass die Bedingung nicht zutrifft, ein bestimmter Code ausgeführt wird. Falls die if-Bedingung erfüllt ist, wird natürlich der else-Block nicht mehr ausgeführt. # + m = 4 if m < 5: print("m ist kleiner als 5") else: print("ist nicht der Fall") # - # ## elif - noch mehr Entscheidungen # Wir können in einer if-else-Struktur nur zwei Fälle abfragen, nämlich, ob eine Bedingung wahr ist (True) oder nicht (False). Wenn wir ein Entscheidungsmodell programmieren wollen, in dem noch mehr Bedingungen gecheckt werden sollen, müssen wir bislang mehrere if-else-Strukturen ineinander verschachteln. # + currency = "€" if currency == "$": print("US-Dollar") else: if currency == "¥": print("Japanischer Yen") else: if currency == "€": print("Euro") else: if currency == "฿": print("Thai Baht") # - # Durch die Erweiterung um elif können wir innerhalb einer if-else-Struktur beliebig viele Bedingungen checken. # # elif ist die Kurzfassung von else if: Eine elif-Option deckt also den Fall ab, dass die if-Bedingung nicht erfüllt (False) ist, aber eine weitere Bedingung True ist. # + currency = "HKD" if currency == "$": print("US-Dollar") elif currency == "¥": print("Japanischer Yen") elif currency == "€": print("Euro") elif currency == "฿": print("Thai Baht") else: print("Ich habe keine Ahnung, was das für eine Währung ist.") # - # ## Übung # Wir möchten in einem Online-Shop eine Rabattaktion starten, um das Geschäft anzukurbeln. # Du sollst die Berechnung der reduzierten Preise mit einer if-elif-else-Struktur vereinfachen. # # Dabei ist zu beachten: # # * Artikel, die zwischen 0 und 20 (einschließlich) CHF kosten, werden um 20 % reduziert; # * Artikel, die zwischen 20 (nicht einschließlich) und 50 CHF (einschließlich) kosten, werden um 40 % reduziert. # * Alle anderen Artikel, also solche, die mehr als 50 CHF kosten, werden um 60 % reduziert. # # + price = 60 if price <= 20: print("Dieser Artikel ist um 20 % reduziert.") elif price <= 50: print("Dieser Artikel ist um 40 % reduziert.") elif price > 50: print("Dieser Artikel ist um 60 % reduziert.") else: print("Ist gratis.") # + price = 33.45 if price <= 20: newprice = price*0.8 elif price <= 50: newprice = price*0.6 else: newprice = price*0.4 print("Dieser Artikel kostet " + str(round(newprice, 2)) +", statt " + str(price) + " Franken.") # + print("%.2f" % 1.2399) # rundet auf zwei Stellen # -
04 Python Teil 2/01 Kontrollstrukturen.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Documentation generator # + hide_input=true from fastai.gen_doc.nbdoc import * # - # Generates documentation for `fastai` classes, functions, types. # View documentation inside a Jupyter Notebook with [`show_doc`](/gen_doc.nbdoc.html#show_doc) or at [`docs.fast.ai`](http://docs.fast.ai) # ## Show the documentation of a function # + hide_input=true show_doc(show_doc, doc_string=False) # - # Show the documentation of an `elt` (function, class or enum). `doc_string` decides if we show the doc string of the element or not, `full_name` will override the name shown, `arg_comments` is a dictionary that will then show list the arguments with comments. `title_level` is the level of the corresponding cell in the TOC, `alt_doc_string` is a text that can replace the `doc_string`. `ignore_warn` will ignore warnings if you pass arguments in `arg_comments` that don't appear to belong to this function and `markdown` decides if the return is a Markdown cell or plain text. # # Plenty of examples of uses of this cell can been seen through the documentation, and you will want to *hide input* those cells for a clean final result. # ## Convenience functions # + hide_input=true show_doc(get_source_link) # + hide_input=true show_doc(show_video) # + hide_input=true show_doc(show_video_from_youtube) # - # ## Functions for internal fastai library use # + hide_input=true show_doc(get_exports) # - # Get the exports of `mod`. # + hide_input=true show_doc(get_fn_link) # + hide_input=true show_doc(get_ft_names) # + hide_input=true show_doc(is_enum) # - # Check if something is an enumerator. # + hide_input=true show_doc(import_mod) # + hide_input=true show_doc(link_docstring) # - # ## Undocumented Methods - Methods moved below this line will intentionally be hidden # ## New Methods - Please document or move to the undocumented section # + hide_input=true show_doc(jekyll_important) # - # # + hide_input=true show_doc(jekyll_warn) # - # # + hide_input=true show_doc(jekyll_note) # - # # + hide_input=true show_doc(doc) # - #
docs_src/gen_doc.nbdoc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Target data: import pandas target_df = pandas.read_csv('../data/model_data_2.csv') target_df.head() len(target_df) # 250374 different clients (unique clients). There a multiple policies (300K). # # Let's use the above dataset to split the pictures across the folder structure: training_one_df = target_df[(target_df['DESIRED_CL_CCPCEC'] == 1) & (target_df['sample'] == 'training')] training_one_df.head() len(training_one_df) training_zero_df = target_df[(target_df['DESIRED_CL_CCPC'] == 0) & (target_df['sample'] == 'training')] training_zero_df.head() len(training_zero_df) 12556 + 187952 validation_one_df = target_df[(target_df['DESIRED_CL_CCPC'] == 1) & (target_df['sample'] == 'holdout')] validation_zero_df = target_df[(target_df['DESIRED_CL_CCPC'] == 0) & (target_df['sample'] == 'holdout')] training_one_df = training_one_df['FILE_NAME'] training_zero_df = training_zero_df['FILE_NAME'] validation_one_df = validation_one_df['FILE_NAME'] validation_zero_df = validation_zero_df['FILE_NAME'] training_one = training_one_df.tolist() training_zero = training_zero_df.tolist() validation_one = validation_one_df.tolist() validation_zero = validation_zero_df.tolist() 'file_1886.png' in training_one # + import os from os import path import shutil src = "../imgs/" dst = "../data/train/positive/" files = [i for i in training_one] for f in files: try: shutil.copy(src + str(f),dst) except: pass # + import os from os import path import shutil src = "../imgs/" dst = "../data/train/negative/" files = [i for i in training_zero] for f in files: try: shutil.copy(src + str(f),dst) except: pass # + import os from os import path import shutil src = "../imgs/" dst = "../data/validation/positive/" files = [i for i in validation_one] for f in files: try: shutil.copy(src + str(f),dst) except: pass # + import os from os import path import shutil src = "../imgs/" dst = "../data/validation/negative/" files = [i for i in validation_zero] for f in files: try: shutil.copy(src + str(f),dst) except: pass # + import os path, dirs, files = next(os.walk("../data/train/positive/")) file_count = len(files) print(file_count) print(len(training_one_df)) # - path, dirs, files = next(os.walk("../data/train/negative/")) file_count = len(files) print(file_count) print(len(training_zero_df)) path, dirs, files = next(os.walk("../data/validation/positive/")) file_count = len(files) print(file_count) print(len(validation_one_df)) path, dirs, files = next(os.walk("../data/validation/negative/")) file_count = len(files) print(file_count) print(len(validation_zero_df)) 8735 + 16139 + 2065 + 10482 12556 + 187952 + 3123 + 46743 # + import os path, dirs, files = next(os.walk("../imgs/")) file_count = len(files) print(file_count) # -
code/Target-Preparation-v01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt true_predictions = np.load('PCA_Coefficients_q1_test.npy') pred_predictions = np.load('PCA_Coefficients_q1_pred.npy') mode_num = 0 for i in range(10): plt.figure() plt.plot(true_predictions[mode_num,i*10:(i+1)*10],label='True') plt.plot(pred_predictions[mode_num,i*10:(i+1)*10],label='Predicted') plt.legend() plt.title('Test simulation '+str(i)) plt.show() mode_num = 1 for i in range(10): plt.figure() plt.plot(true_predictions[mode_num,i*10:(i+1)*10],label='True') plt.plot(pred_predictions[mode_num,i*10:(i+1)*10],label='Predicted') plt.legend() plt.title('Test simulation '+str(i)) plt.show() mode_num = 2 for i in range(10): plt.figure() plt.plot(true_predictions[mode_num,i*10:(i+1)*10],label='True') plt.plot(pred_predictions[mode_num,i*10:(i+1)*10],label='Predicted') plt.legend() plt.title('Test simulation '+str(i)) plt.show() mode_num = 3 for i in range(10): plt.figure() plt.plot(true_predictions[mode_num,i*10:(i+1)*10],label='True') plt.plot(pred_predictions[mode_num,i*10:(i+1)*10],label='Predicted') plt.legend() plt.title('Test simulation '+str(i)) plt.show()
ROM_Demos/Nonlinear_SWE_GP/Visualization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # DATASET :D read, seed, urm import pandas as pd import numpy as np import scipy.sparse as sp from tqdm import tqdm import seaborn as sns import math import random train = pd.read_csv("original/train_interactions.csv", sep='\t') np.random.seed(0) # + # collect data to build urm playlists = train['pid'].values tracks = train['tid'].values n_tracks = 2262292 n_interactions = tracks.size # - urm = sp.csr_matrix((np.ones(n_interactions), (playlists,tracks)), shape=(1000000, n_tracks), dtype=np.int32) #riepilogo def riepilogo(df): df_copy = df.copy() df_copy['quantity'] = np.zeros(len(df_copy)) df_copy['quantity'] = df_copy.groupby(df_copy['pid']).transform('count') print("numero di tracks>\t", len(np.unique(df_copy.tid.values))) print("numero di playlist\t", len(np.unique(df_copy.pid.values))) print("lunghezza media>\t", len(df)/len(np.unique(df_copy.pid.values))) print("max songs> \t\t", max(np.unique(df_copy.quantity.values))) print("num inter> \t\t", len(df_copy)) valori = df_copy.groupby('pid').count()['quantity'].values sns.distplot(valori, bins=np.arange(min(valori),max(valori))) # # costanti prime_n_tracks = 28000 prime_n_playlists = 75000 max_playlist_lenght = 100 num_interactions_cut = 2 # # TAGLIO TRAIN alle prime 21k canzoni. e 90k pl. tolgo le>100songs num_tracks = np.diff(urm.indptr) mask_cut = np.where(num_tracks<=max_playlist_lenght )[0] print(len(mask_cut),mask_cut) train_cut = train[train.pid.isin(mask_cut)] train_cut = train_cut[(train_cut.tid < prime_n_tracks)] train_cut = train_cut[(train_cut.pid < prime_n_playlists)] train_cut = train_cut.reset_index(drop=True) # # qui ho 18kk interactions, 20k songs, pl sotto i 100 # ### taglio via i duplicati ora che posso fare una groupby in tempi umani train_finale = train_cut.groupby(['pid','tid'], as_index=False )['pos'].min() train_finale = train_finale.reset_index(drop=True) # # qualche stat e tolgo le playlist sotto le 5 interazioni # train_finale['quantity'] = np.zeros(len(train_finale)) train_finale['quantity'] = train_finale.groupby(train_finale['pid']).transform('count') train_finale = train_finale[train_finale.quantity>=5] riepilogo(train_finale) train_finale.head() # # taglio canzoni con meno di 3 interazioni interazioni = train_finale.groupby('tid').count().values[:, [0]].ravel() print(len(interazioni), interazioni) mask_cut_interazioni = np.where(interazioni>=num_interactions_cut )[0] print(len(mask_cut_interazioni),mask_cut_interazioni) train_finale = train_finale[train_finale.tid.isin(mask_cut_interazioni)] train_finale.drop('quantity', axis=1, inplace=True) train_finale['quantity'] = np.zeros(len(train_finale)) train_finale['quantity'] = train_finale.groupby(train_finale['pid']).transform('count') train_finale = train_finale[train_finale.quantity>=5] # # riepilogo riepilogo(train_finale) # # scelgole target pl playlist_con_almeno_10 = train_finale[(train_finale.quantity>=10)] test_pl = np.sort(np.random.choice(np.unique(playlist_con_almeno_10.pid.values), 10000,replace=False)) prime_5k_pl = test_pl[:5000] prime_5k_pl seconde_5k_pl = test_pl[5000:10000] seconde_5k_pl # + len( prime_5k_pl)+len( seconde_5k_pl) # - # # init TRAIN_DATASET = pd.DataFrame({'pid' : [] , 'tid':[]}) TEST_DATASET = pd.DataFrame({'pid' : [] , 'tid':[]}) print(len(train_finale)) train_finale.head() # + TRAIN_DATASET = pd.DataFrame({'pid' : [] , 'tid':[]}, dtype=np.int32) TEST_DATASET = pd.DataFrame({'pid' : [] , 'tid':[]}, dtype=np.int32) df_test = 0 df_train = 0 df = 0 set_tracce_tolte = set() set_tracce_riman = set() count_tolte = 0 tolte_display = list() rimanenti_display = list() # - # # taglio 20% delle prime 5k, solo ultime track # # # train for pl in tqdm(np.unique(train_finale.pid.values)): df = train_finale[train_finale.pid == pl ] if pl in prime_5k_pl: pass elif pl in seconde_5k_pl: pass else: df = df.sample(frac=1).reset_index(drop=True) TRAIN_DATASET = TRAIN_DATASET.append( df[['pid','tid']]) # # prime, sequenziali for pl in tqdm(prime_5k_pl): df = train_finale[train_finale.pid == pl ] df = df.sort_values(['pos']) da_togliere = int(round(len(df)*0.2)) + random.randint(0, 1) da_tenere = len(df)-da_togliere df_train = df[:-da_togliere] df_test = df[da_tenere:] TRAIN_DATASET = TRAIN_DATASET.append( df_train[['pid','tid']]) TEST_DATASET = TEST_DATASET.append( df_test[['pid','tid']]) # # seconde, random # + for pl in tqdm(seconde_5k_pl): df = train_finale[train_finale.pid == pl ] da_togliere = int(round(len(df)*0.2)) + random.randint(0, 1) da_tenere = len(df)-da_togliere canzoni = TRAIN_DATASET.tid.values prog = 0 while True: df = df.sample(frac=1).reset_index(drop=True) da_togliere = math.ceil(len(df)*0.2) df_train = df[:-da_togliere] df_test = df[da_tenere:] tolta_da_non_togliere = False for tid_to_check in df_test.tid.values: if tid_to_check not in canzoni: tolta_da_non_togliere=True if not tolta_da_non_togliere: break if prog ==10: print('prog') break prog+=1 TRAIN_DATASET = TRAIN_DATASET.append( df_train[['pid','tid']]) TEST_DATASET = TEST_DATASET.append( df_test[['pid','tid']]) # - print( len(TRAIN_DATASET), len(TEST_DATASET), len(TRAIN_DATASET)+len(TEST_DATASET)) print(len(train_finale)) print( len(TRAIN_DATASET), len(TEST_DATASET), len(TRAIN_DATASET)+len(TEST_DATASET)) print(len(train_finale)) print(len(np.unique(TRAIN_DATASET.pid.values))) print(len(np.unique(TEST_DATASET.pid.values))) # # check len(np.unique(train_finale.pid.values)) import gc gc.collect() del(train,playlists ) del(train_cut) del(urm) gc.collect() len(np.unique(train_finale.tid.values)) len(np.unique(TRAIN_DATASET.pid.values)) len( np.unique(TEST_DATASET.pid.values) ) # # controlli pre scrittura file TRAIN_DATASET.reset_index(inplace=True, drop=True) TRAIN_DATASET TEST_DATASET.reset_index(inplace=True, drop=True) TEST_DATASET # # SHUFFLE # ### shuffle playlist # + playlists_ids = np.sort(np.unique(TRAIN_DATASET.pid.values)) my_range_p = np.arange(len(playlists_ids)) np.random.shuffle(my_range_p) print(len(playlists_ids), len(my_range_p)) dict_playlists = dict(zip(playlists_ids, my_range_p)) # dict_playlists # + tracks_ids = np.sort(np.unique(TRAIN_DATASET.tid.values)) my_range_t = np.arange(len(tracks_ids)) np.random.shuffle(my_range_t) print(len(tracks_ids), len(my_range_t)) dict_tracks = dict(zip(tracks_ids, my_range_t)) # dict_tracks # - # # dict fatti> dict_playlists dict_tracks TRAIN_DATASET['new_tid'] = TRAIN_DATASET['tid'].map(dict_tracks) TRAIN_DATASET['new_pid'] = TRAIN_DATASET['pid'].map(dict_playlists) TRAIN_DATASET TRAIN_DATASET[TRAIN_DATASET.isnull().any(axis=1)] TEST_DATASET['new_tid'] = TEST_DATASET['tid'].map(dict_tracks) TEST_DATASET['new_pid'] = TEST_DATASET['pid'].map(dict_playlists) TEST_DATASET TEST_DATASET[TEST_DATASET.isnull().any(axis=1)] TEST_DATASET.dropna(inplace=True) TEST_DATASET[TEST_DATASET.isnull().any(axis=1)] # # SCRITTURA DI TRAIN recsys18_train = TRAIN_DATASET.copy() recsys18_train.drop(['pid','tid'], axis=1, inplace=True) recsys18_train.rename(index=str, columns={"new_pid": "playlist_id", "new_tid": "track_id"}, inplace=True) recsys18_train recsys18_train.sort_values(['playlist_id'],inplace=True) recsys18_train = recsys18_train.reset_index(drop=True) recsys18_train = recsys18_train[['playlist_id','track_id']] recsys18_train recsys18_train.to_csv("recsys18_train.csv", sep=',',index=False) # # SCRITTURA DI TEST recsys18_test = TEST_DATASET.copy() recsys18_test.drop(['pid','tid'], axis=1, inplace=True) recsys18_test.rename(index=str, columns={"new_pid": "playlist_id", "new_tid": "track_id"}, inplace=True) recsys18_test.sort_values(['playlist_id'],inplace=True) recsys18_test = recsys18_test.reset_index(drop=True) recsys18_test = recsys18_test[['playlist_id','track_id']] recsys18_test.track_id = recsys18_test.track_id.astype(np.int32) recsys18_test.head() recsys18_test.to_csv("recsys18_test_per_noi.csv", sep=',',index=False) test_series = recsys18_test.groupby('playlist_id')['track_id'].apply(list) test_indices = test_series.index test_values = test_series.values file = open("recsys18_solution.csv","w") file.write("playlist_id, track_ids\n") for i, target_pl_tmp in enumerate(test_indices): file.write(str(target_pl_tmp)+",") for num_to_write in test_values[i]: file.write(str(num_to_write)+" ") file.write("\n") file.close() # # WRITE SAMPLE SUBMISSION file = open("recsys18_sample_submission.csv","w") file.write("playlist_id, track_ids\n") for i, target_pl_tmp in enumerate(test_indices): file.write(str(target_pl_tmp)+",") for j in range(0,9): file.write(str(j)+" ") file.write(str(j+1)) file.write("\n") file.close() # # scrittura di target target_sequenziali = np.sort(np.array([dict_playlists[x] for x in prime_5k_pl])) print(len(target_sequenziali),np.sort( np.array(target_sequenziali))) target_rng = np.sort(np.array([dict_playlists[x] for x in seconde_5k_pl])) print(len(target_rng), np.sort(np.array(target_rng))) target_playlists = pd.DataFrame({'target_playlist' : [] }) target_playlists['target_playlist'] = np.concatenate([target_sequenziali,target_rng]) target_playlists.to_csv("recsys18_target_playlists.csv", sep=',',index=False) # # TRACKS.csv tracks = pd.read_csv("original/tracks.csv", sep='\t') tracks[tracks.tid.isin(TRAIN_DATASET.tid)] tracks = tracks[tracks.tid.isin(TRAIN_DATASET.tid)] tracks['track_id'] = tracks['tid'].map(dict_tracks) # + def apply_random_perc_and_cut(x): x = x + x*random.choice(list(range(-20,0))+list(range(1,21)))*0.01 x = x/1000 return int(round(x)) tracks['duration_sec'] = tracks['duration_ms'].apply(lambda x: apply_random_perc_and_cut(x)) tracks # - # ### albums e artisti da dentro tracks # + album_ids = np.sort(np.unique(tracks.alid.values)) artist_ids = np.sort(np.unique(tracks.arid.values)) my_range_album = np.arange(len(album_ids)) my_range_artist = np.arange(len(artist_ids)) np.random.shuffle(my_range_album) np.random.shuffle(my_range_artist) print("albums:",len(album_ids), len(my_range_album)) print("artist:",len(artist_ids), len(my_range_artist)) dict_albums = dict(zip(album_ids, my_range_album)) dict_artist = dict(zip(artist_ids, my_range_artist)) # - tracks['album_id'] = tracks['alid'].map(dict_albums) tracks['artist_id'] = tracks['arid'].map(dict_artist) recsys18_tracks = tracks[['track_id','album_id','artist_id','duration_sec']] recsys18_tracks = recsys18_tracks.sort_values('track_id') recsys18_tracks.reset_index(drop=True,inplace=True) recsys18_tracks.to_csv("recsys18_tracks.csv", sep=',',index=False) # # PLAYLISTS.csv playlists = pd.read_csv("original/train_playlists.csv", sep='\t') playlists = playlists[playlists.pid.isin(TRAIN_DATASET.pid.values)] playlists['playlist_id'] = playlists['pid'].map(dict_playlists) recsys18_playlists = playlists[['playlist_id','num_edits','num_followers']] recsys18_playlists = recsys18_playlists.sort_values('playlist_id') recsys18_playlists.reset_index(drop=True,inplace=True) recsys18_playlists.head() recsys18_playlists.to_csv("recsys18_playlists.csv", sep=',',index=False) # # ALBUMS.csv albums = pd.read_csv("original/albums.csv", sep='\t') albums = albums[albums.alid.isin(tracks.alid.values)] albums['album_id'] = albums['alid'].map(dict_albums) albums.head() recsys18_albums = albums[['album_id']] recsys18_albums = recsys18_albums.sort_values('album_id') recsys18_albums.reset_index(drop=True,inplace=True) recsys18_albums.head() recsys18_albums.to_csv("recsys18_albums.csv", sep=',',index=False) # # ARTIST.csv # + artists = pd.read_csv("original/artists.csv", sep='\t') artists = artists[artists.arid.isin(tracks.arid.values)] artists['artist_id'] = artists['arid'].map(dict_artist) recsys18_artists = artists[['artist_id']] recsys18_artists = recsys18_artists.sort_values('artist_id') recsys18_artists.reset_index(drop=True,inplace=True) recsys18_artists.head() # - recsys18_artists.to_csv("recsys18_artists.csv", sep=',',index=False) # # DICTS # + import json def json_write(name, data): with open(name+'.json', 'w') as fp: json.dump(data, fp) def json_read(name): with open(name+'.json', 'r') as fp: data = json.load(fp) return data # + json_write("recsys18_dict_albums",dict_albums) json_write("recsys18_dict_artist",dict_artist) json_write("recsys18_dict_playlists",dict_playlists) json_write("recsys18_dict_tracks",dict_tracks) dict_albums dict_artist dict_playlists dict_tracks
splitter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from flockers import * import numpy as np import matplotlib.pyplot as plt # %matplotlib inline def draw_boids(model): x_vals = [] y_vals = [] for boid in model.schedule.agents: x, y = boid.pos x_vals.append(x) y_vals.append(y) fig = plt.figure(figsize=(10,10)) ax = fig.add_subplot(111) ax.scatter(x_vals, y_vals) model = BoidModel(100, 100, 100, speed=5, vision=5, separation=1) for i in range(50): model.step() draw_boids(model)
examples/Flockers/Flocker Test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/TMUITLab/EAFR/blob/master/EA6_break_2_sub.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="Mv8gecYb39Zy" # !git clone https://github.com/TMUITLab/EAFR # !wget -O 'data.zip' 'https://efsgpq-ch3301.files.1drv.com/y4muoyVficiYL6mAlmm7s9m65fhNRboLtxg7FmaufA9QzY2tVhsyi-nXNtgahgN8NhrumVKCHB-d_lfi_5OTy1e5NFe2walhCu2Z1zF3zcp_hammSHuJHk5BeG6YbT7STynmA3SDPP39sNzn9V2Iv2suqlHkIrDRvRuvvM_r6IKuiRmJ35YirCUrY_Rojf5d-oQrxyQTj86Wz70JyiwrAYxfA' # !unzip '/content/data.zip' -d '/content/EAFR' # !pip install torch-scatter -f https://data.pyg.org/whl/torch-1.10.0+cu111.html # !pip install torch-sparse -f https://data.pyg.org/whl/torch-1.10.0+cu111.html # !pip install torch-geometric # !pip install igraph # !git pull https://github.com/TMUITLab/EAFR # + id="6NSkNdKdtj1a" # %cd '/content/EAFR' # !CUDA_VISIBLE_DEVICES=0 python3 run.py # + id="5h5lXsCs4mR3" # !fuser -v /dev/nvidia* # + id="7Z4iLewwRq_u" # !kill -9 1661 # + id="kCsR8niV5tpH" # !nvcc --version https://github.com/MaoXinn/RREA # + id="hwZ9zE0n-vpZ" colab={"base_uri": "https://localhost:8080/"} outputId="587eb546-e80c-439b-c42a-2e10e7771dc3" # !python -c "import torch; print(torch.__version__)" # !python -c "import torch; print(torch.version.cuda)" # + id="ijyhjY1p5EnG" # !git pull https://github.com/TMUITLab/EAFR # + id="nim_lcF35NhG" # %cd '/content/EAFR' # !CUDA_VISIBLE_DEVICES=0 python3 run.py # + id="m_DHAfBCiV1r" # !git clone 'https://github.com/vinhsuhi/EMGCN' # + id="Uvfm_eLo-hdT" import requests def download_file_from_google_drive(id, destination): URL = "https://docs.google.com/uc?export=download" session = requests.Session() response = session.get(URL, params = { 'id' : id }, stream = True) token = get_confirm_token(response) if token: params = { 'id' : id, 'confirm' : token } response = session.get(URL, params = params, stream = True) save_response_content(response, destination) def get_confirm_token(response): for key, value in response.cookies.items(): if key.startswith('download_warning'): return value return None def save_response_content(response, destination): CHUNK_SIZE = 32768 with open(destination, "wb") as f: for chunk in response.iter_content(CHUNK_SIZE): if chunk: # filter out keep-alive new chunks f.write(chunk) # + id="p_lCpFlo95Z0" # %cd /content/ download_file_from_google_drive('12XL08tB8zplCNhzLE-9qbsFFum7RoV6r','emgcn.rar') # !pip install patool import patoolib patoolib.extract_archive("/content/emgcn.rar", outdir="/content/EMGCN/") # + id="IMQwyPaP-TQI" # %cd '/content/EMGCN' # !python -u network_alignment.py --dataset_name zh_en --source_dataset data/networkx/zh_enDI/zh/graphsage/ --target_dataset data/networkx/zh_enDI/en/graphsage --groundtruth data/networkx/zh_enDI/dictionaries/groundtruth EMGCN --sparse --log # + id="p-rVgHOzSMNK" # !pip install torch-scatter -f https://data.pyg.org/whl/torch-1.10.0+cu111.html # !pip install torch-sparse -f https://data.pyg.org/whl/torch-1.10.0+cu111.html # !pip install torch-geometric # + id="Eotb-M7RyvfH" # %cd '/content' # !git clone https://github.com/zhurboo/RAGA # + id="6aw346iWS4CL" download_file_from_google_drive('1uJ2omzIs0NCtJsGQsyFCBHCXUhoK1mkO','/content/RAGA/data.tar.gz') # + id="Qh-_IAUFTOzV" outputId="aa6da7be-03a7-49de-a53d-fbb02bff94af" colab={"base_uri": "https://localhost:8080/"} # %cd '/content/RAGA' # !tar -xf data.tar.gz # + id="tIshNjf2OnKv" outputId="95441fa7-ae6e-4877-bb17-5c03191430bb" colab={"base_uri": "https://localhost:8080/"} # %%writefi!le setup.sh git clone https://github.com/NVIDIA/apex pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./apex # + id="IVdwj8EhXAkv" # !sh setup.sh # + id="wFL8jtmeOakt" # %cd '/content/RAGA' # !python train.py # + colab={"base_uri": "https://localhost:8080/"} id="WjActARX7sCY" outputId="3621522f-7eb5-4e17-a04a-3bccdcf11053" # %cd '/content' # !git clone https://github.com/1049451037/GCN-Align # + colab={"base_uri": "https://localhost:8080/"} id="0G6_LeBQ3PFb" outputId="13999c44-4ddc-4c75-dd7c-03798800f256" # %cd '/content' # !git clone https://github.com/MaoXinn/RREA # + id="ZB2YBVkb4pYB" import warnings warnings.filterwarnings('ignore') from importlib.machinery import SourceFileLoader layer = SourceFileLoader("layer", "/content/RREA/CIKM/layer.py").load_module() utils = SourceFileLoader("utils", "/content/RREA/CIKM/utils.py").load_module() CSLS = SourceFileLoader("CSLS", "/content/RREA/CIKM/CSLS.py").load_module() import tensorflow as tf import os import random import keras from tqdm import * import numpy as np from utils import * from CSLS import * import tensorflow as tf import keras.backend as K from keras.layers import * from layer import NR_GraphAttention os.environ["CUDA_VISIBLE_DEVICES"] = "0" os.environ["TF_CPP_MIN_LOG_LEVEL"]="2" tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) # + colab={"base_uri": "https://localhost:8080/"} id="oYvCCmAe62rP" outputId="3e2d686c-4d3f-4868-c8b2-f05d3397242e" lang = 'zh' radj,train_pair,dev_pair,adj_matrix,r_index,r_val,adj_features,rel_features = load_data('/content/GCN-Align/data/%s_en/'%lang,train_ratio=0.30) train_pair_main=train_pair adj_matrix = np.stack(adj_matrix.nonzero(),axis = 1) rel_matrix,rel_val = np.stack(rel_features.nonzero(),axis = 1),rel_features.data ent_matrix,ent_val = np.stack(adj_features.nonzero(),axis = 1),adj_features.data # + colab={"base_uri": "https://localhost:8080/"} id="CGPkuTC4lYS6" outputId="586a7930-027f-43c9-a826-1a3d232facfd" print(r_index[:,0].max(),adj_matrix.shape,r_val.__len__()) # + id="qjqNt7oeBXjK" entity1, rel1, triples1 = load_triples('/content/GCN-Align/data/%s_en/'%lang + 'triples_1') num_entity_1 = len(entity1) num_rel_1 = len(rel1) # + id="t29mIfJT68i3" layer = SourceFileLoader("layer", "/content/RREA/CIKM/layer.py").load_module() from layer import NR_GraphAttention,N3,AvgClass tf.keras.backend.clear_session() node_size = adj_features.shape[0] rel_size = rel_features.shape[1] triple_size = len(adj_matrix) batch_size = node_size class TokenEmbedding(keras.layers.Embedding): """Embedding layer with weights returned.""" def compute_output_shape(self, input_shape): return self.input_dim, self.output_dim def compute_mask(self, inputs, mask=None): return None def call(self, inputs): return self.embeddings def get_embedding(): inputs = [adj_matrix,r_index,r_val,rel_matrix,ent_matrix,radj] inputs = [np.expand_dims(item,axis=0) for item in inputs] return get_emb.predict_on_batch(inputs) def get_losses_values(): inputs = [adj_matrix,r_index,r_val,rel_matrix,ent_matrix,radj,train_set] inputs = [np.expand_dims(item,axis=0) for item in inputs] return get_losses.predict_on_batch(inputs ) def test(wrank = None): vec = get_embedding() return get_hits(vec,dev_pair,wrank=wrank) def CSLS_test(thread_number = 16, csls=10,accurate = True): A = 0 for [model,get_losses,get_losses] in models: vec = get_embedding() Lvec = np.array([vec[e1] for e1, e2 in dev_pair]) Rvec = np.array([vec[e2] for e1, e2 in dev_pair]) Lvec = Lvec / np.linalg.norm(Lvec,axis=-1,keepdims=True) Rvec = Rvec / np.linalg.norm(Rvec,axis=-1,keepdims=True) A += sim_handler(Lvec, Rvec, csls, thread_number) eval_alignment_by_mat(A, [1, 5, 10], thread_number, csls=csls, accurate=accurate) return None def get_train_set(batch_size = batch_size): negative_ratio = batch_size // len(train_pair) + 1 train_set = np.reshape(np.repeat(np.expand_dims(train_pair,axis=0),axis=0,repeats=negative_ratio),newshape=(-1,2)) np.random.shuffle(train_set); train_set = train_set[:batch_size] train_set = np.concatenate([train_set,np.random.randint(0,node_size,[train_set.shape[0],3])],axis = -1) return train_set def get_train_set1(batch_size = batch_size): train_set = train_pair np.random.shuffle(train_set); train_set = np.concatenate([train_set,np.random.randint(0,node_size,train_set.shape)],axis = -1) return train_set def get_trgat(node_size,rel_size,node_hidden,rel_hidden,triple_size,n_attn_heads = 2,dropout_rate = 0,gamma = 3,lr = 0.005,depth = 2): adj_input = Input(shape=(None,2)) index_input = Input(shape=(None,2),dtype='int64') val_input = Input(shape = (None,)) rel_adj = Input(shape=(None,2)) ent_adj = Input(shape=(None,2)) radj = Input(shape=(None,3),dtype='int64') ent_emb = TokenEmbedding(node_size,node_hidden,trainable = True)(val_input) rel_emb = TokenEmbedding(rel_size,node_hidden,trainable = True)(val_input) E = TokenEmbedding(node_hidden,node_hidden,trainable = True)(val_input) R = TokenEmbedding(node_hidden,node_hidden,trainable = True)(val_input) # avg = AvgClass(node_size,rel_size) # ent_feature,rel_feature = avg([ent_emb,rel_emb,radj]) def avg(tensor,size,highway = None): adj = K.cast(K.squeeze(tensor[0],axis = 0),dtype = "int64") adj = tf.SparseTensor(indices=adj, values=tf.ones_like(adj[:,0],dtype = 'float32'), dense_shape=(node_size,size)) adj = tf.compat.v1.sparse_softmax(adj) l_adj = tf.compat.v1.sparse_tensor_dense_matmul(adj,tensor[1]) if highway != None: #l_adj = highway([l_adj,tensor[1]]) l_adj = 0.5 * l_adj + 0.5 * tensor[1] return l_adj rel_feature = Lambda(avg,arguments={'size':rel_size})([rel_adj,rel_emb]) ent_feature = Lambda(avg,arguments={'size':node_size})([ent_adj,ent_emb]) encoder = NR_GraphAttention(node_size,activation="relu", rel_size = rel_size, depth = depth, attn_heads=n_attn_heads, triple_size = triple_size, attn_heads_reduction='average', dropout_rate=dropout_rate) opt1 = [rel_emb,adj_input,index_input,val_input,radj] #att = Attention((node_size,node_size)) elements = [ent_emb] + encoder([rel_feature]+opt1)+encoder([ent_feature]+opt1) #elements = [0.3 * ent_emb[:,:node_hidden//2]]+ el[0:3]+el[6:9]+[0.2 * ent_emb[:,node_hidden//2:]] + el[3:6]+el[9:12] num_el = elements.__len__() weight = tf.Variable(1 * [-0.5] + (num_el-1) * [1.0], trainable=False) weight = 5 * tf.math.softmax(weight) elements = [weight[idx]*el for idx, el in enumerate(elements)] out_feature = Concatenate(-1)(elements) out_feature = Dropout(dropout_rate)(out_feature) alignment_input = Input(shape=(None,5),dtype = "int32") #find = Lambda(lambda x:K.gather(reference=x[0],indices=K.cast(K.squeeze(x[1],axis=0), 'int32')))([out_feature,alignment_input]) I = K.squeeze(alignment_input,axis=0) A = K.sum(K.abs(K.gather(out_feature,I[:,0])-K.gather(out_feature,I[:,1])),axis=-1,keepdims=True) B = K.sum(K.abs(K.gather(out_feature,I[:,0])-K.gather(out_feature,I[:,3])),axis=-1,keepdims=True) C = K.sum(K.abs(K.gather(out_feature,I[:,2])-K.gather(out_feature,I[:,1])),axis=-1,keepdims=True) D = K.sum(K.abs(K.gather(out_feature,I[:,0])-K.gather(out_feature,I[:,2])),axis=-1,keepdims=True) E = K.sum(K.abs(K.gather(out_feature,I[:,3])-K.gather(out_feature,I[:,1])),axis=-1,keepdims=True) A1 = K.sum(K.abs(K.gather(out_feature,I[:,0])-K.gather(out_feature,I[:,1])),axis=0,keepdims=True) losses = K.mean(tf.reshape(A1,[-1,node_hidden]),axis=-1)/ (batch_size) loss = K.relu(gamma + A - B) + K.relu(gamma + A-C)+ K.relu(gamma + A-D)+ K.relu(gamma + A-E) loss = tf.compat.v1.reduce_sum(loss,keep_dims=True) / (batch_size) inputs = [adj_input,index_input,val_input,rel_adj,ent_adj,radj] train_model = keras.Model(inputs = inputs + [alignment_input],outputs = loss) train_model.compile(loss=lambda y_true,y_pred: y_pred,optimizer=tf.keras.optimizers.RMSprop(lr=lr)) feature_model = keras.Model(inputs = inputs,outputs = out_feature) losses_model = keras.Model(inputs = inputs + [alignment_input],outputs = losses) return train_model,feature_model,losses_model # + id="Nwoj1iWf69WX" models = [] for i in range(5): model,get_emb,get_losses = get_trgat(dropout_rate=0.30,node_size=node_size,rel_size=rel_size,n_attn_heads = 1,depth=2,gamma =3,node_hidden=20,rel_hidden = 20,triple_size = triple_size) #model.summary(); initial_weights = model.get_weights() models.append([model,get_losses,get_losses]) # + colab={"base_uri": "https://localhost:8080/"} id="lMdo-YeF7Hp_" outputId="d5707e08-34b0-4779-a39f-550804dcd6f5" train_pair = train_pair_main tf.keras.backend.clear_session() config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth=True sess = tf.compat.v1.Session(config=config) rest_set_1 = [e1 for e1, e2 in dev_pair] rest_set_2 = [e2 for e1, e2 in dev_pair] np.random.shuffle(rest_set_1) np.random.shuffle(rest_set_2) epoch = 1200 for turn in range(5): print("iteration %d start."%turn) for i in trange(epoch): train_set = get_train_set() inputs = [adj_matrix,r_index,r_val,rel_matrix,ent_matrix,radj,train_set] inputs = [np.expand_dims(item,axis=0) for item in inputs] for [model,_,_] in models: model.train_on_batch(inputs,np.zeros((1,1))) if i%100 == 99: #print(get_losses_values()) CSLS_test() new_pair = [] vec = get_embedding() Lvec = np.array([vec[e] for e in rest_set_1]) Rvec = np.array([vec[e] for e in rest_set_2]) Lvec = Lvec / np.linalg.norm(Lvec,axis=-1,keepdims=True) Rvec = Rvec / np.linalg.norm(Rvec,axis=-1,keepdims=True) A,_ = eval_alignment_by_sim_mat(Lvec, Rvec, [1, 5, 10], 16,10,True,False) B,_ = eval_alignment_by_sim_mat(Rvec, Lvec,[1, 5, 10], 16,10,True,False) A = sorted(list(A)); B = sorted(list(B)) for a,b in A: if B[b][1] == a: new_pair.append([rest_set_1[a],rest_set_2[b]]) print("generate new semi-pairs: %d." % len(new_pair)) train_pair = np.concatenate([train_pair,np.array(new_pair)],axis = 0) for e1,e2 in new_pair: if e1 in rest_set_1: rest_set_1.remove(e1) for e1,e2 in new_pair: if e2 in rest_set_2: rest_set_2.remove(e2) # + id="WB2inR--Jwyy" outputId="d0b2d3c5-0dbc-47ca-c546-1b8f77de9b56" colab={"base_uri": "https://localhost:8080/"} # !git clone https://github.com/MaoXinn/MRAEA # + id="83YKGWHdVOLz" # + id="QjQ1F-zW-VvX" import os import tqdm import numpy as np import tensorflow as tf import keras from importlib.machinery import SourceFileLoader utils = SourceFileLoader("utils", "/content/MRAEA/utils.py").load_module() model = SourceFileLoader("model", "/content/MRAEA/model.py").load_module() from utils import * from model import * os.environ["CUDA_VISIBLE_DEVICES"] = "0" os.environ["TF_CPP_MIN_LOG_LEVEL"]="2" tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth=True sess = tf.compat.v1.Session(config=config) # + id="fWmEVYhMJ0-s" outputId="2082368b-5f75-4948-8331-a9ff8a7279cd" colab={"base_uri": "https://localhost:8080/"} lang = 'zh' train_pair,test_pair,adj_matrix,r_index,r_val,adj_features,rel_features = load_data('/content/GCN-Align/data/%s_en/'%lang,train_ratio=0.3) adj_matrix = np.stack(adj_matrix.nonzero(),axis = 1) rel_matrix,rel_val = np.stack(rel_features.nonzero(),axis = 1),rel_features.data ent_matrix,ent_val = np.stack(adj_features.nonzero(),axis = 1),adj_features.data # + id="1OROgbA2J2fa" outputId="45effb85-0cc3-4761-a041-006da6a9004b" colab={"base_uri": "https://localhost:8080/"} model = SourceFileLoader("model", "/content/MRAEA/model.py").load_module() node_size = adj_features.shape[1] rel_size = rel_features.shape[1] triple_size = len(adj_matrix) batch_size = node_size model,get_emb = get_model(lr=0.001,dropout_rate=0.30,node_size=node_size,rel_size=rel_size,n_attn_heads = 2, depth=2,gamma = 3,node_hidden=100,rel_hidden = 100,triple_size = triple_size,batch_size = batch_size) model.summary(); # + id="_EFRVgwIJ3yT" def get_train_set(batch_size,train_pair): negative_ratio = batch_size // len(train_pair) + 1 train_set = np.reshape(np.repeat(np.expand_dims(train_pair,axis=0),axis=0,repeats=negative_ratio),newshape=(-1,2)) np.random.shuffle(train_set); train_set = train_set[:batch_size] train_set = np.concatenate([train_set,np.random.randint(0,node_size,train_set.shape)],axis = -1) return train_set def test(): inputs = [adj_matrix,r_index,r_val,rel_matrix,ent_matrix] inputs = [np.expand_dims(item,axis=0) for item in inputs] se_vec = get_emb.predict_on_batch(inputs) get_hits(se_vec,test_pair) print() return se_vec for epoch in tqdm.tnrange(5000): train_set = get_train_set(batch_size,train_pair) inputs = [adj_matrix,r_index,r_val,rel_matrix,ent_matrix,train_set] inputs = [np.expand_dims(item,axis=0) for item in inputs] model.train_on_batch(inputs,np.zeros((1,1))) if (epoch%1000 == 999): test() # + id="piOgQ6jLVPVa" outputId="2570e659-3f92-4266-d56c-3cbbef7e3896" colab={"base_uri": "https://localhost:8080/"} # !git clone https://github.com/MaoXinn/Dual-AMN # + id="aVawtMnSVT6i" import warnings warnings.filterwarnings('ignore') from importlib.machinery import SourceFileLoader utils = SourceFileLoader("utils", "/content/Dual-AMN/utils.py").load_module() evaluate = SourceFileLoader("evaluate", "/content/Dual-AMN/evaluate.py").load_module() layer = SourceFileLoader("layer", "/content/Dual-AMN/layer.py").load_module() import os import keras import numpy as np import numba as nb from utils import * from tqdm import * from evaluate import evaluate import tensorflow as tf import keras.backend as K from keras.layers import * from layer import NR_GraphAttention os.environ["CUDA_VISIBLE_DEVICES"] = "0" os.environ["TF_CPP_MIN_LOG_LEVEL"]="2" tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth=True sess = tf.compat.v1.Session(config=config) seed = 12306 np.random.seed(seed) tf.compat.v1.set_random_seed(seed) # + id="qd3oaK-AWBkF" outputId="eac5a4df-91e7-4d11-9e4f-18be1dbfb61e" colab={"base_uri": "https://localhost:8080/"} train_pair,dev_pair,adj_matrix,r_index,r_val,adj_features,rel_features = load_data("/content/GCN-Align/data/zh_en/",train_ratio=0.30) adj_matrix = np.stack(adj_matrix.nonzero(),axis = 1) rel_matrix,rel_val = np.stack(rel_features.nonzero(),axis = 1),rel_features.data ent_matrix,ent_val = np.stack(adj_features.nonzero(),axis = 1),adj_features.data # + id="Cryqiq2FWMFd" node_size = adj_features.shape[0] rel_size = rel_features.shape[1] triple_size = len(adj_matrix) node_hidden = 100 rel_hidden = 100 batch_size = 1024 dropout_rate = 0.3 lr = 0.005 gamma = 1 depth = 2 # + id="7GvbgAPbWNNN" colab={"base_uri": "https://localhost:8080/", "height": 306} outputId="a99d5620-f52d-4580-b4e3-ff026211371c" layer = SourceFileLoader("layer", "/content/Dual-AMN/layer.py").load_module() from layer import NR_GraphAttention def get_embedding(index_a,index_b,vec = None): if vec is None: inputs = [adj_matrix,r_index,r_val,rel_matrix,ent_matrix] inputs = [np.expand_dims(item,axis=0) for item in inputs] vec = get_emb.predict_on_batch(inputs) Lvec = np.array([vec[e] for e in index_a]) Rvec = np.array([vec[e] for e in index_b]) Lvec = Lvec / (np.linalg.norm(Lvec,axis=-1,keepdims=True)+1e-5) Rvec = Rvec / (np.linalg.norm(Rvec,axis=-1,keepdims=True)+1e-5) return Lvec,Rvec class TokenEmbedding(keras.layers.Embedding): """Embedding layer with weights returned.""" def compute_output_shape(self, input_shape): return self.input_dim, self.output_dim def compute_mask(self, inputs, mask=None): return None def call(self, inputs): return self.embeddings def get_trgat(node_hidden,rel_hidden,triple_size=triple_size,node_size=node_size,rel_size=rel_size,dropout_rate = 0,gamma = 3,lr = 0.005,depth = 2): adj_input = Input(shape=(None,2)) index_input = Input(shape=(None,2),dtype='int64') val_input = Input(shape = (None,)) rel_adj = Input(shape=(None,2)) ent_adj = Input(shape=(None,2)) ent_emb = TokenEmbedding(node_size,node_hidden,trainable = True)(val_input) rel_emb = TokenEmbedding(rel_size,node_hidden,trainable = True)(val_input) def avg(tensor,size): adj = K.cast(K.squeeze(tensor[0],axis = 0),dtype = "int64") adj = tf.SparseTensor(indices=adj, values=tf.ones_like(adj[:,0],dtype = 'float32'), dense_shape=(node_size,size)) adj = tf.compat.v1.sparse_softmax(adj) return tf.compat.v1.sparse_tensor_dense_matmul(adj,tensor[1]) opt = [rel_emb,adj_input,index_input,val_input] ent_feature = Lambda(avg,arguments={'size':node_size})([ent_adj,ent_emb]) rel_feature = Lambda(avg,arguments={'size':rel_size})([rel_adj,rel_emb]) e_encoder = NR_GraphAttention(node_size,activation="tanh", rel_size = rel_size, use_bias = True, depth = depth, triple_size = triple_size) r_encoder = NR_GraphAttention(node_size,activation="tanh", rel_size = rel_size, use_bias = True, depth = depth, triple_size = triple_size) out_feature = Concatenate(-1)([e_encoder([ent_feature]+opt),r_encoder([rel_feature]+opt)]) out_feature = Dropout(dropout_rate)(out_feature) alignment_input = Input(shape=(None,2)) def align_loss(tensor): def squared_dist(x): A,B = x row_norms_A = tf.reduce_sum(tf.square(A), axis=1) row_norms_A = tf.reshape(row_norms_A, [-1, 1]) # Column vector. row_norms_B = tf.reduce_sum(tf.square(B), axis=1) row_norms_B = tf.reshape(row_norms_B, [1, -1]) # Row vector. return row_norms_A + row_norms_B - 2 * tf.matmul(A, B,transpose_b=True) emb = tensor[1] l,r = K.cast(tensor[0][0,:,0],'int32'),K.cast(tensor[0][0,:,1],'int32') l_emb,r_emb = K.gather(reference=emb,indices=l),K.gather(reference=emb,indices=r) pos_dis = K.sum(K.square(l_emb-r_emb),axis=-1,keepdims=True) r_neg_dis = squared_dist([r_emb,emb]) l_neg_dis = squared_dist([l_emb,emb]) l_loss = pos_dis - l_neg_dis + gamma l_loss = l_loss *(1 - K.one_hot(indices=l,num_classes=node_size) - K.one_hot(indices=r,num_classes=node_size)) r_loss = pos_dis - r_neg_dis + gamma r_loss = r_loss *(1 - K.one_hot(indices=l,num_classes=node_size) - K.one_hot(indices=r,num_classes=node_size)) r_loss = (r_loss - K.stop_gradient(K.mean(r_loss,axis=-1,keepdims=True))) / K.stop_gradient(K.std(r_loss,axis=-1,keepdims=True)) l_loss = (l_loss - K.stop_gradient(K.mean(l_loss,axis=-1,keepdims=True))) / K.stop_gradient(K.std(l_loss,axis=-1,keepdims=True)) lamb,tau = 30, 10 l_loss = K.logsumexp(lamb*l_loss+tau,axis=-1) r_loss = K.logsumexp(lamb*r_loss+tau,axis=-1) return K.mean(l_loss + r_loss) loss = Lambda(align_loss)([alignment_input,out_feature]) inputs = [adj_input,index_input,val_input,rel_adj,ent_adj] train_model = keras.Model(inputs = inputs + [alignment_input],outputs = loss) train_model.compile(loss=lambda y_true,y_pred: y_pred,optimizer=tf.keras.optimizers.RMSprop(lr)) feature_model = keras.Model(inputs = inputs,outputs = out_feature) return train_model,feature_model # + id="nOEzAJqbWUwt" outputId="a209cc6e-9c88-41b0-aee1-3a4bcecee0f7" colab={"base_uri": "https://localhost:8080/"} model,get_emb = get_trgat(dropout_rate=dropout_rate, node_size=node_size, rel_size=rel_size, depth=depth, gamma =gamma, node_hidden=node_hidden, rel_hidden=rel_hidden, lr=lr) evaluater = evaluate(dev_pair) model.summary() # + [markdown] id="rofNjxmJXeZb" # # + id="vaq-VKGCWXWU" outputId="5696af3d-d30e-4ae0-ba7f-26a6ee59263e" colab={"base_uri": "https://localhost:8080/", "height": 675} rest_set_1 = [e1 for e1, e2 in dev_pair] rest_set_2 = [e2 for e1, e2 in dev_pair] np.random.shuffle(rest_set_1) np.random.shuffle(rest_set_2) epoch = 20 for turn in range(10): for i in trange(epoch): np.random.shuffle(train_pair) for pairs in [train_pair[i*batch_size:(i+1)*batch_size] for i in range(len(train_pair)//batch_size + 1)]: if len(pairs) == 0: continue inputs = [adj_matrix,r_index,r_val,rel_matrix,ent_matrix,pairs] inputs = [np.expand_dims(item,axis=0) for item in inputs] model.train_on_batch(inputs,np.zeros((1,1))) if i==epoch-1: Lvec,Rvec = get_embedding(dev_pair[:,0],dev_pair[:,1]) evaluater.test(Lvec,Rvec) new_pair = [] Lvec,Rvec = get_embedding(rest_set_1,rest_set_2) A,B = evaluater.CSLS_cal(Lvec,Rvec,False) for i,j in enumerate(A): if B[j] == i: new_pair.append([rest_set_1[j],rest_set_2[i]]) train_pair = np.concatenate([train_pair,np.array(new_pair)],axis = 0) for e1,e2 in new_pair: if e1 in rest_set_1: rest_set_1.remove(e1) for e1,e2 in new_pair: if e2 in rest_set_2: rest_set_2.remove(e2) epoch = 5
EA6_break_2_sub.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (TensorFlow 2.1 Python 3.6 CPU Optimized) # language: python # name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/tensorflow-2.1-cpu-py36 # --- # !pip install smdebug matplotlib # + [markdown] papermill={"duration": 0.015745, "end_time": "2021-06-01T00:12:47.822109", "exception": false, "start_time": "2021-06-01T00:12:47.806364", "status": "completed"} tags=[] # # Amazon SageMaker Debugger Tutorial: How to Use the Built-in Debugging Rules # + [markdown] papermill={"duration": 0.015761, "end_time": "2021-06-01T00:12:47.853783", "exception": false, "start_time": "2021-06-01T00:12:47.838022", "status": "completed"} tags=[] # [Amazon SageMaker Debugger](https://docs.aws.amazon.com/sagemaker/latest/dg/train-debugger.html) is a feature that offers capability to debug training jobs of your machine learning model and identify training problems in real time. While a training job looks like it's working like a charm, the model might have some common problems, such as loss not decreasing, overfitting, and underfitting. To better understand, practitioners have to debug the training job, while it can be challenging to track and analyze all of the output tensors. # # SageMaker Debugger covers the major deep learning frameworks (TensorFlow, PyTorch, and MXNet) and machine learning algorithm (XGBoost) to do the debugging jobs with minimal coding. Debugger provides an automatic detection of training problems through its built-in rules, and you can find a full list of the built-in rules for debugging at [List of Debugger Built-in Rules](https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-built-in-rules.html). # # In this tutorial, you will learn how to use SageMaker Debugger and its built-in rules to debug your model. # # The workflow is as follows: # * [Step 1: Import SageMaker Python SDK and the Debugger client library smdebug](#step1) # * [Step 2: Create a Debugger built-in rule list object](#step2) # * [Step 3: Construct a SageMaker estimator](#step3) # * [Step 4: Run the training job](#step4) # * [Step 5: Check training progress on Studio Debugger insights dashboard and the built-in rules evaluation status](#step5) # * [Step 6: Create a Debugger trial object to access the saved tensors](#step6) # + [markdown] papermill={"duration": 0.015708, "end_time": "2021-06-01T00:12:47.885275", "exception": false, "start_time": "2021-06-01T00:12:47.869567", "status": "completed"} tags=[] # <a class="anchor" id="step2"></a> # ## Step 1: Import SageMaker Python SDK and the SMDebug client library # + [markdown] papermill={"duration": 0.015697, "end_time": "2021-06-01T00:12:47.916797", "exception": false, "start_time": "2021-06-01T00:12:47.901100", "status": "completed"} tags=[] # <font color='red'>**Important**</font>: To use the new Debugger features, you need to upgrade the SageMaker Python SDK and the SMDebug libary. In the following cell, change the third line to `install_needed=True` and run to upgrade the libraries. # + papermill={"duration": 0.023813, "end_time": "2021-06-01T00:12:47.956349", "exception": false, "start_time": "2021-06-01T00:12:47.932536", "status": "completed"} tags=[] import sys import IPython install_needed = False # Set to True to upgrade if install_needed: print("installing deps and restarting kernel") # !{sys.executable} -m pip install -U sagemaker # !{sys.executable} -m pip install -U smdebug IPython.Application.instance().kernel.do_shutdown(True) # + [markdown] papermill={"duration": 0.015886, "end_time": "2021-06-01T00:12:47.988259", "exception": false, "start_time": "2021-06-01T00:12:47.972373", "status": "completed"} tags=[] # Check the SageMaker Python SDK and the SMDebug library versions. # + papermill={"duration": 0.469517, "end_time": "2021-06-01T00:12:48.473846", "exception": false, "start_time": "2021-06-01T00:12:48.004329", "status": "completed"} tags=[] import sagemaker sagemaker.__version__ # + papermill={"duration": 0.102182, "end_time": "2021-06-01T00:12:48.592688", "exception": true, "start_time": "2021-06-01T00:12:48.490506", "status": "failed"} tags=[] import smdebug smdebug.__version__ # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # <a class="anchor" id="step1"></a> # ## Step 2: Create a Debugger built-in rule list object # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] from sagemaker.debugger import Rule, ProfilerRule, rule_configs # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # The following code cell shows how to configure a rule object for debugging and profiling. For more information about the Debugger built-in rules, see [List of Debugger Built-in Rules](https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-built-in-rules.html). # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] built_in_rules = [ Rule.sagemaker(rule_configs.overfit()), ProfilerRule.sagemaker(rule_configs.ProfilerReport()), ] # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # <a class="anchor" id="step3"></a> # ## Step 3: Construct a SageMaker estimator # # Using the rule object created in the previous cell, construct a SageMaker estimator. # # The estimator can be one of the SageMaker framework estimators, [TensorFlow](https://sagemaker.readthedocs.io/en/stable/frameworks/tensorflow/sagemaker.tensorflow.html#tensorflow-estimator), [PyTorch](https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/sagemaker.pytorch.html), [MXNet](https://sagemaker.readthedocs.io/en/stable/frameworks/mxnet/sagemaker.mxnet.html#mxnet-estimator), and [XGBoost](https://sagemaker.readthedocs.io/en/stable/frameworks/xgboost/xgboost.html), or the [SageMaker generic estimator](https://sagemaker.readthedocs.io/en/stable/api/training/estimators.html#sagemaker.estimator.Estimator). For more information about what framework versions are supported, see [Debugger-supported Frameworks and Algorithms](https://docs.aws.amazon.com/sagemaker/latest/dg/train-debugger.html#debugger-supported-aws-containers). # # In this tutorial, the SageMaker TensorFlow estimator is constructed to run a TensorFlow training script with the Keras ResNet50 model and the cifar10 dataset. # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] import boto3 from sagemaker.tensorflow import TensorFlow session = boto3.session.Session() region = session.region_name estimator = TensorFlow( role=sagemaker.get_execution_role(), instance_count=1, instance_type="ml.p3.8xlarge", image_uri=f"763104351884.dkr.ecr.{region}.amazonaws.com/tensorflow-training:2.3.1-gpu-py37-cu110-ubuntu18.04", # framework_version='2.3.1', # py_version="py37", max_run=3600, source_dir="./src", entry_point="tf-resnet50-cifar10.py", # Debugger Parameters rules=built_in_rules, ) # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # <a class="anchor" id="step4"></a> # ## Step 4: Run the training job # With the `wait=False` option, you can proceed to the next notebook cell without waiting for the training job logs to be printed out. # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] estimator.fit(wait=False) # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # <a class="anchor" id="step5"></a> # ## Step 5: Check training progress on Studio Debugger insights dashboard and the built-in rules evaluation status # # - **Option 1** - Use SageMaker Studio Debugger insights and Experiments. This is a non-coding approach. # - **Option 2** - Use the following code cells. This is a code-based approach. # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # #### Option 1 - Open Studio Debugger insights dashboard to get insights into the training job # # Through the Debugger insights dashboard on Studio, you can check the training jobs status, system resource utilization, and suggestions to optimize model performance. The following screenshot shows the Debugger insights dashboard interface. # # <IMG src=images/studio-debugger-insights-dashboard.png/> # # The following heatmap shows the `ml.p3.8xlarge` instance utilization while the training job is running or after the job has completed. To learn how to access the Debugger insights dashboard, see [Debugger on Studio](https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-on-studio.html) in the [SageMaker Debugger developer guide](https://docs.aws.amazon.com/sagemaker/latest/dg/train-debugger.html). # # <IMG src=images/studio-debugger-insights-heatmap.png/> # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # #### Option 2 - Run the following scripts for the code-based option # # The following two code cells return the current training job name, status, and the rule status in real time. # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ##### Print the training job name # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] job_name = estimator.latest_training_job.name print("Training job name: {}".format(job_name)) # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ##### Print the training job and rule evaluation status # # The following script returns the status in real time every 15 seconds, until the secondary training status turns to one of the descriptions, `Training`, `Stopped`, `Completed`, or `Failed`. Once the training job status turns into the `Training`, you will be able to retrieve tensors from the default S3 bucket. # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] import time client = estimator.sagemaker_session.sagemaker_client description = client.describe_training_job(TrainingJobName=job_name) if description["TrainingJobStatus"] != "Completed": while description["SecondaryStatus"] not in {"Training", "Stopped", "Completed", "Failed"}: description = client.describe_training_job(TrainingJobName=job_name) primary_status = description["TrainingJobStatus"] secondary_status = description["SecondaryStatus"] print( "Current job status: [PrimaryStatus: {}, SecondaryStatus: {}] | {} Rule Evaluation Status: {}".format( primary_status, secondary_status, estimator.latest_training_job.rule_job_summary()[0]["RuleConfigurationName"], estimator.latest_training_job.rule_job_summary()[0]["RuleEvaluationStatus"], ) ) time.sleep(30) # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # <a class="anchor" id="step6"></a> # ## Step 6: Create a Debugger trial object to access the saved model parameters # # To access the saved tensors by Debugger, use the `smdebug` client library to create a Debugger trial object. The following code cell sets up a `tutorial_trial` object, and waits until it finds available tensors from the default S3 bucket. # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] from smdebug.trials import create_trial tutorial_trial = create_trial(estimator.latest_job_debugger_artifacts_path()) # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # The Debugger trial object accesses the SageMaker estimator's Debugger artifact path, and fetches the output tensors saved for debugging. # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # #### Print the default S3 bucket URI where the Debugger output tensors are stored # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] tutorial_trial.path # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # #### Print the Debugger output tensor names # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] tutorial_trial.tensor_names() # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # #### Print the list of steps where the tensors are saved # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # The smdebug `ModeKeys` class provides training phase mode keys that you can use to sort training (`TRAIN`) and validation (`EVAL`) steps and their corresponding values. # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] from smdebug.core.modes import ModeKeys # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] tutorial_trial.steps(mode=ModeKeys.TRAIN) # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] tutorial_trial.steps(mode=ModeKeys.EVAL) # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # #### Plot the loss curve # # The following script plots the loss and accuracy curves of training and validation loops. # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] trial = tutorial_trial def get_data(trial, tname, mode): tensor = trial.tensor(tname) steps = tensor.steps(mode=mode) vals = [tensor.value(s, mode=mode) for s in steps] return steps, vals # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import host_subplot def plot_tensor(trial, tensor_name): tensor_name = tensor_name steps_train, vals_train = get_data(trial, tensor_name, mode=ModeKeys.TRAIN) steps_eval, vals_eval = get_data(trial, tensor_name, mode=ModeKeys.EVAL) fig = plt.figure(figsize=(10, 7)) host = host_subplot(111) par = host.twiny() host.set_xlabel("Steps (TRAIN)") par.set_xlabel("Steps (EVAL)") host.set_ylabel(tensor_name) (p1,) = host.plot(steps_train, vals_train, label=tensor_name) (p2,) = par.plot(steps_eval, vals_eval, label="val_" + tensor_name) leg = plt.legend() host.xaxis.get_label().set_color(p1.get_color()) leg.texts[0].set_color(p1.get_color()) par.xaxis.get_label().set_color(p2.get_color()) leg.texts[1].set_color(p2.get_color()) plt.ylabel(tensor_name) plt.show() plot_tensor(trial, "loss") plot_tensor(trial, "accuracy") # - # > ## Note : Rerun the above cell if you don't see any plots! # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ## Conclusion # # In this tutorial, you learned how to use SageMaker Debugger with the minimal coding through SageMaker Studio and Jupyter notebook. The Debugger built-in rules detect training anomalies while concurrently reading in the output tensors, such as weights, activation outputs, gradients, accuracy, and loss, from your training jobs. In the next tutorial videos, you will learn more features of Debugger, such as how to analyze the tensors, change the built-in debugging rule parameters and thresholds, and save the tensors at your preferred S3 bucket URI. # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
sagemaker-debugger/tensorflow_builtin_rule/tf-mnist-builtin-rule.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + raw_mimetype="text/restructuredtext" active="" # ==================== # Parsing and Visiting # ==================== # # LibCST provides helpers to parse source code string as concrete syntax tree. In order to perform static analysis to identify patterns in the tree or modify the tree programmatically, we can use visitor pattern to traverse the tree. In this tutorial, we demonstrate a common three-step-workflow to build an automated refactoring (codemod) application: # # 1. `Parse Source Code <#Parse-Source-Code>`_ # 2. `Build Visitor or Transformer <#Build-Visitor-or-Transformer>`_ # 3. `Generate Source Code <#Generate-Source-Code>`_ # # Parse Source Code # ================= # LibCST provides various helpers to parse source code as concrete syntax tree: :func:`~libcst.parse_module`, :func:`~libcst.parse_expression` and :func:`~libcst.parse_statement` (see :doc:`Parsing <parser>` for more detail). The default :class:`~libcst.CSTNode` repr provides pretty print formatting for reading the tree easily. # + nbsphinx="hidden" import sys sys.path.append("../../") # + import libcst as cst cst.parse_expression("1 + 2") # + raw_mimetype="text/restructuredtext" active="" # Example: add typing annotation from pyi stub file to Python source # ------------------------------------------------------------------ # Python `typing annotation <https://mypy.readthedocs.io/en/latest/cheat_sheet_py3.html>`_ was added in Python 3.5. Some Python applications add typing annotations in separate ``pyi`` stub files in order to support old Python versions. When applications decide to stop supporting old Python versions, they'll want to automatically copy the type annotation from a pyi file to a source file. Here we demonstrate how to do that easliy using LibCST. The first step is to parse the pyi stub and source files as trees. # + py_source = ''' class PythonToken(Token): def __repr__(self): return ('TokenInfo(type=%s, string=%r, start_pos=%r, prefix=%r)' % self._replace(type=self.type.name)) def tokenize(code, version_info, start_pos=(1, 0)): """Generate tokens from a the source code (string).""" lines = split_lines(code, keepends=True) return tokenize_lines(lines, version_info, start_pos=start_pos) ''' pyi_source = ''' class PythonToken(Token): def __repr__(self) -> str: ... def tokenize( code: str, version_info: PythonVersionInfo, start_pos: Tuple[int, int] = (1, 0) ) -> Generator[PythonToken, None, None]: ... ''' source_tree = cst.parse_module(py_source) stub_tree = cst.parse_module(pyi_source) # + raw_mimetype="text/restructuredtext" active="" # Build Visitor or Transformer # ============================ # For traversing and modifying the tree, LibCST provides Visitor and Transformer classes similar to the `ast module <https://docs.python.org/3/library/ast.html#ast.NodeVisitor>`_. To implement a visitor (read only) or transformer (read/write), simply implement a subclass of :class:`~libcst.CSTVisitor` or :class:`~libcst.CSTTransformer` (see :doc:`Visitors <visitors>` for more detail). # In the typing example, we need to implement a visitor to collect typing annotation from the stub tree and a transformer to copy the annotation to the function signature. In the visitor, we implement ``visit_FunctionDef`` to collect annotations. Later in the transformer, we implement ``leave_FunctionDef`` to add the collected annotations. # + from typing import List, Tuple, Dict, Optional class TypingCollector(cst.CSTVisitor): def __init__(self): # stack for storing the canonical name of the current function self.stack: List[Tuple[str, ...]] = [] # store the annotations self.annotations: Dict[ Tuple[str, ...], # key: tuple of cononical class/function name Tuple[cst.Parameters, Optional[cst.Annotation]], # value: (params, returns) ] = {} def visit_ClassDef(self, node: cst.ClassDef) -> Optional[bool]: self.stack.append(node.name.value) def leave_ClassDef(self, node: cst.ClassDef) -> None: self.stack.pop() def visit_FunctionDef(self, node: cst.FunctionDef) -> Optional[bool]: self.stack.append(node.name.value) self.annotations[tuple(self.stack)] = (node.params, node.returns) return ( False ) # pyi files don't support inner functions, return False to stop the traversal. def leave_FunctionDef(self, node: cst.FunctionDef) -> None: self.stack.pop() class TypingTransformer(cst.CSTTransformer): def __init__(self, annotations): # stack for storing the canonical name of the current function self.stack: List[Tuple[str, ...]] = [] # store the annotations self.annotations: Dict[ Tuple[str, ...], # key: tuple of cononical class/function name Tuple[cst.Parameters, Optional[cst.Annotation]], # value: (params, returns) ] = annotations def visit_ClassDef(self, node: cst.ClassDef) -> Optional[bool]: self.stack.append(node.name.value) def leave_ClassDef( self, original_node: cst.ClassDef, updated_node: cst.ClassDef ) -> cst.CSTNode: self.stack.pop() return updated_node def visit_FunctionDef(self, node: cst.FunctionDef) -> Optional[bool]: self.stack.append(node.name.value) return ( False ) # pyi files don't support inner functions, return False to stop the traversal. def leave_FunctionDef( self, original_node: cst.FunctionDef, updated_node: cst.FunctionDef ) -> cst.CSTNode: key = tuple(self.stack) self.stack.pop() if key in self.annotations: annotations = self.annotations[key] return updated_node.with_changes( params=annotations[0], returns=annotations[1] ) return updated_node visitor = TypingCollector() stub_tree.visit(visitor) transformer = TypingTransformer(visitor.annotations) modified_tree = source_tree.visit(transformer) # + raw_mimetype="text/restructuredtext" active="" # Generate Source Code # ==================== # Generating the source code from a cst tree is as easy as accessing the :attr:`~libcst.Module.code` attribute on :class:`~libcst.Module`. After the code generation, we often use `Black <https://black.readthedocs.io/en/stable/>`_ and `isort <https://isort.readthedocs.io/en/stable/>`_ to reformate the code to keep a consistent coding style. # - print(modified_tree.code) # + # Use difflib to show the changes to verify type annotations were added as expected. import difflib print( "".join( difflib.unified_diff(py_source.splitlines(1), modified_tree.code.splitlines(1)) ) ) # + raw_mimetype="text/restructuredtext" active="" # For the sake of efficiency, we don't want to re-write the file when the transformer doesn't change the source code. We can use :meth:`~libcst.CSTNode.deep_equals` to check whether two trees have the same source code. Note that ``==`` checks the identity of tree object instead of representation. # - if not modified_tree.deep_equals(source_tree): ... # write to file
docs/source/tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import numpy as np import matplotlib.pyplot as plt import pandas as pd from glob import glob import xarray as xr import os os.environ["CUDA_VISIBLE_DEVICES"] = "7" from keras.models import load_model from keras.utils import plot_model from deepsky.gan import gan_loss, rescale_data, rescale_multivariate_data, unscale_multivariate_data from IPython.display import Image from netCDF4 import Dataset import h5py from os.path import join def load_storm_patch_data(data_path, variable_names): data_patches = [] data_files = sorted(glob(join(data_path, "*20160526*.nc"))) for data_file in data_files: print(data_file.split("/")[-1]) ds = xr.open_dataset(data_file) patch_arr = [] for variable in variable_names: patch_arr.append(ds[variable].values) data_patches.append(np.stack(patch_arr, axis=-1)) data = np.vstack(data_patches) return data def load_storm_patch_data(data_path, variable_names): data_patches = [] centers = [] valid_dates = [] data_files = sorted(glob(join(data_path, "*20160526*.nc"))) for data_file in data_files: print(data_file) ds = xr.open_dataset(data_file) patch_arr = [] all_vars = list(ds.variables.keys()) if np.all(np.in1d(variable_names, all_vars)): centers.append(np.array([ds["longitude"][:, 32, 32], ds["latitude"][:, 32, 32]]).T) valid_dates.append(ds["valid_date"].values) for variable in variable_names: patch_arr.append(ds[variable][:, 16:-16, 16:-16].values) data_patches.append(np.stack(patch_arr, axis=-1)) ds.close() center_arr = np.vstack(centers) valid_date_index = pd.DatetimeIndex(np.concatenate(valid_dates)) data = np.vstack(data_patches) return data, center_arr, valid_date_index data_path = "/scratch/dgagne/ncar_ens_storm_patches/" #variable_names = ["composite_reflectivity_entire_atmosphere_prev", # "temperature_2_m_above_ground_prev", # "dew_point_temperature_2_m_above_ground_prev", # "u-component_of_wind_10_m_above_ground_prev", # "v-component_of_wind_10_m_above_ground_prev"] variable_names = ['composite_reflectivity_entire_atmosphere_current', 'precipitable_water_entire_atmosphere_(considered_as_a_single_layer)_current', 'geopotential_height_level_of_adiabatic_condensation_from_sfc_prev', 'convective_available_potential_energy_180-0_mb_above_ground_prev', 'vertical_u-component_shear_0-6000_m_above_ground_prev', 'vertical_v-component_shear_0-6000_m_above_ground_prev', ] storm_data, storm_centers, storm_dates = load_storm_patch_data(data_path, variable_names) scaled_storm_data, scaling_values = rescale_multivariate_data(storm_data) plt.contourf(storm_data[203, :, :, 0], np.arange(0, 80, 5), cmap="gist_ncar") gan_history_files = sorted(glob("/scratch/dgagne/storm_gan_20170622/gan_loss_history_*.csv")) gan_history_coll = [] for gan_history_file in gan_history_files: print(gan_history_file) gan_history_coll.append(pd.read_csv(gan_history_file, index_col="Time", parse_dates=["Time"])) fig, axes = plt.subplots(3,2, figsize=(10, 10)) axef = axes.ravel() for g in range(len(gan_history_coll)): axef[g].plot(np.arange(gan_history_coll[g].shape[0]), gan_history_coll[g]["Gen Loss"]) axef[g].plot(np.arange(gan_history_coll[g].shape[0]), gan_history_coll[g]["Disc Loss"]) epoch_vals = np.where(gan_history_coll[g]["Batch"].values == 0)[0] print(epoch_vals) axef[g].set_xticks(epoch_vals) axef[g].set_xticklabels(gan_history_coll[g].iloc[epoch_vals]["Epoch"].values.astype(int) - 1) axef[g].set_yscale("log") axef[g].set(title="GAN Config {0:d}".format(g)) for c in range(5): print(gan_history_coll[c].index[-1] - gan_history_coll[c].index[0]) fig, axes = plt.subplots(3,2, figsize=(10, 10)) axef = axes.ravel() for g in range(len(gan_history_coll)): axef[g].plot(np.arange(gan_history_coll[g].shape[0]), gan_history_coll[g]["Gen_Enc Loss"]) epoch_vals = np.where(gan_history_coll[g]["Batch"].values == 0)[0] print(epoch_vals) axef[g].set_xticks(epoch_vals) axef[g].set_xticklabels(gan_history_coll[g].iloc[epoch_vals]["Epoch"].values.astype(int) - 1) #axef[g].set_yscale("log") axef[g].set(title="GAN Config {0:d}".format(g)) # + ds = xr.open_dataset("/scratch/dgagne/storm_gan_20170622/gan_gen_patches_003_epoch_010.nc") fig, axes = plt.subplots(4,4, figsize=(10, 10)) plt.subplots_adjust(0.03, 0.03, 0.95, 0.95, wspace=0, hspace=0) axef = axes.ravel() for a, ax in enumerate(axef): ax.contourf(ds["gen_patch"][a, :, :, 0], np.arange(0, 80, 5), cmap="gist_ncar") ax.quiver(np.arange(0, 32, 4), np.arange(0, 32, 4), ds["gen_patch"][a, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, -2], ds["gen_patch"][a, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, -1], scale=150) ax.axes.get_xaxis().set_ticks([]) ax.axes.get_yaxis().set_ticks([]) fig.suptitle("GAN Generated Radar Reflectivity and 0-6 km Shear Vectors", fontsize=20) ds.close() plt.savefig("/scratch/dgagne/hail_models/gan_radar_shear.pdf", bbox_inches="tight", transparent=True) # - gan_param_configs = pd.read_csv("/scratch/dgagne/storm_gan_20170622/gan_param_combos.csv") gan_param_configs # + enc= load_model("/scratch/dgagne/storm_gan_20170622/gan_encoder_000004_epoch_0010.h5") gen = load_model("/scratch/dgagne/storm_gan_20170622/gan_generator_000004_epoch_0010.h5") # - print(enc.summary()) print(gen.summary()) max_vals = storm_data.max(axis=0).max(axis=0).max(axis=0) min_vals = storm_data.min(axis=0).min(axis=0).min(axis=0) enc_vecs = enc.predict(scaled_storm_data[104:204]) regen = unscale_multivariate_data(gen.predict(enc_vecs), scaling_values) fig, axes = plt.subplots(2, 6, figsize=(12, 4)) plt.subplots_adjust(wspace=0, hspace=0.05) short_var_names = ["Reflectivity", "Precipitable Water", "LCL Height", "CAPE", "0-6 km Shear-U", "0-6 km Shear-V"] axes[0, 0].contourf(storm_data[105, :, :, 0], np.arange(0, 80, 5), cmap="gist_ncar") axes[1, 0].contourf(regen[1, :, :, 0], np.arange(0, 80, 5), cmap="gist_ncar") axes[0, 0].set_ylabel("Original") axes[1, 0].set_ylabel("Reconstructed") axes[0, 0].set_title(short_var_names[0]) axes[0, 0].axes.get_xaxis().set_ticks([]) axes[0, 0].axes.get_yaxis().set_ticks([]) axes[1, 0].axes.get_xaxis().set_ticks([]) axes[1, 0].axes.get_yaxis().set_ticks([]) for var in range(1, 6): axes[0, var].contourf(storm_data[105, :, :, var], np.linspace(min_vals[var], max_vals[var], 20)) axes[1, var].contourf(regen[1, :, :, var],np.linspace(min_vals[var], max_vals[var], 20)) axes[0, var].set_title(short_var_names[var]) axes[0, var].axes.get_xaxis().set_ticks([]) axes[0, var].axes.get_yaxis().set_ticks([]) axes[1, var].axes.get_xaxis().set_ticks([]) axes[1, var].axes.get_yaxis().set_ticks([]) interp_vecs = np.zeros((12, enc_vecs.shape[1])) interp_vecs[0] = enc_vecs[0] interp_vecs[-1] = enc_vecs[-50] for i in range(interp_vecs.shape[1]): interp_vecs[:, i] = np.linspace(interp_vecs[0, i], interp_vecs[-1, i], 12) plt.pcolormesh(interp_vecs) plt.colorbar() interp_regen = unscale_multivariate_data(gen.predict(interp_vecs), scaling_values) fig, axes = plt.subplots(3, 4, figsize=(16, 12)) axef = axes.ravel() for a, ax in enumerate(axef): ax.contourf(interp_regen[a, :, :, 0], np.arange(0, 80, 5), cmap="gist_ncar") ax.axes.get_xaxis().set_ticks([]) ax.axes.get_yaxis().set_ticks([]) ax.set_title(a) input_const_vecs = np.tile(np.linspace(-3, 3, 20), 32).reshape(32, 20).T print(input_const_vecs) output_const_vecs = enc.predict(gen.predict(input_const_vecs)) output_const_vecs[15] # + plt.figure(figsize=(5, 5)) plt.fill_between(input_const_vecs.mean(axis=1), np.percentile(output_const_vecs, 90, axis=1), np.percentile(output_const_vecs, 10, axis=1), color='r', alpha=0.3) plt.scatter(np.mean(input_const_vecs, axis=1), np.median(output_const_vecs, axis=1), color='r') plt.plot(np.arange(-3, 3.1, 0.1), np.arange(-3, 3.1, 0.1), 'k--') plt.xlabel("Input Vector Value") plt.ylabel("Encoded Vector Value") plt.title("") # - plot_model(enc, to_file="/scratch/dgagne/storm_gan_20170622/gan_encoder_000.png", show_shapes=True) Image("/scratch/dgagne/storm_gan_20170622/gan_encoder_000.png") d = h5py.File("/scratch/dgagne/storm_gan_20170519/gan_encoder_000001_epoch_0010.h5") dir(d) for key in d.keys(): print(key) list(d["model_weights"].keys()) list(d["model_weights"]["conv2d_1"]["conv2d_1"].keys())
notebooks/StormGANs20170622.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # !pip3 install matplotlib import pandas as pd # !pip3 install xlrd df = pd.read_excel("richpeople.xlsx") # # 1)What country are most billionaires from? For the top ones, how many billionaires per billion people? df.head(3) df.columns recent = df[df['year']==2014] recent.head() df['citizenship'].value_counts().head(5) #I am going to skip the second part of the question #because we would have to create a new column with the number of people per country. Easier joining tables? # # 2)Who are the top 10 richest billionaires? recent.sort_values(by='rank').head(10) # # 3)What's the average wealth of a billionaire? Male? Female? recent['networthusbillion'].describe() # + females = recent[recent['gender'] == 'female'] males = recent[recent['gender'] == 'male'] females['networthusbillion'].describe() # + males['networthusbillion'].describe() # - # # 4)Who is the poorest billionaire? Who are the top 10 poorest billionaires? recent.sort_values(by='rank').tail(1) recent.sort_values(by='rank').tail(10) # # 5)'What is relationship to company'? And what are the most common relationships? recent['relationshiptocompany'].value_counts().head(10) # # 6)Most common source of wealth? Male vs. female? # recent['sourceofwealth'].value_counts().head(10) # + females = recent[recent['gender'] == 'female'] males = recent[recent['gender'] == 'male'] females['sourceofwealth'].value_counts().head(10) # - males['sourceofwealth'].value_counts().head(10) # # 9)What are the most common industries for billionaires to come from? What's the total amount of billionaire money from each industry? recent['industry'].value_counts().head(10) recent.groupby('industry')['networthusbillion'].sum() # # 10)How many self made billionaires vs. others? # recent['selfmade'].value_counts() # # 11)How old are billionaires? How old are billionaires self made vs. non self made? or different industries? # + billionaires_age = ['name', 'age'] recent[billionaires_age] # - recent.groupby('selfmade')['age'].describe() recent.groupby('industry')['age'].describe() # # 12)Who are the youngest billionaires? The oldest? Age distribution - maybe make a graph about it? recent.sort_values('age', ascending=True).head(10) df.sort_values('age', ascending=False).head(10) import matplotlib.pyplot as plt # %matplotlib inline import matplotlib.pyplot as plt plt.style.available plt.style.use('dark_background') young_age_ordered = recent.sort_values('age', ascending=True).head(10) young_age_ordered.plot(kind='scatter', x='age', y='networthusbillion') #oops misread instructions # + old_age_ordered = recent.sort_values('age', ascending=False).head(10) old_age_ordered.plot(kind='scatter', x='age', y='networthusbillion') #oops misread instructions # - plt.style.use('seaborn-bright') age_distribution = recent['age'].value_counts() age_distribution.describe() age_distribution.head(30).plot(kind='bar', x='', y='') #i am not sure how to comple x,y fields in this case # # Maybe just made a graph about how wealthy they are in general? # # + recent.plot(kind='bar', x='name', y='networthusbillion') #I know this is awful but looks cool lol # - ordered_by_wealth = recent.sort_values('networthusbillion', ascending=False) ordered_by_wealth.head(30).plot(kind='bar', x='rank', y='networthusbillion', color=['g']) # # Maybe plot their net worth vs age (scatterplot) # recent.plot(kind='scatter', x='age', y='networthusbillion') # # Make a bar graph of the top 10 or 20 richest # + top_10 = recent.sort_values(by='networthusbillion', ascending=False).head(10) top_10.plot(kind='barh', x='name', y='networthusbillion', color="r") # -
foundations_hw/07/07/Homework7_part2_benzaquen.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.6.13 64-bit (''open3d'': conda)' # name: python3 # --- # # Clutter Removal Experiment # # Use this notebook to analyze the results of a clutter removal experiment. import os os.chdir('..') # + from pathlib import Path from vgn.experiments import clutter_removal # - # Path to the log directory of the experiment. logdir = Path("data/experiments/...") data = clutter_removal.Data(logdir) # First, we compute the following metrics for the experiment: # # * **Success rate**: the ratio of successful grasp executions, # * **Percent cleared**: the percentage of objects removed during each round, # * **Planning time**: the time between receiving a voxel grid/point cloud and returning a list of grasp candidates. print("Num grasps: ", data.num_grasps()) print("Success rate: ", data.success_rate()) print("Percent cleared: ", data.percent_cleared()) print("Avg planning time: ", data.avg_planning_time()) # Next, we visualize the failure cases. Make sure to have a ROS core running and open `config/sim.rviz` in RViz. # + import rospy from vgn import vis rospy.init_node("vgn_vis", anonymous=True) # - failures = data.grasps[data.grasps["label"] == 0].index.tolist() iterator = iter(failures) # + i = next(iterator) points, grasp, score, label = data.read_grasp(i) vis.clear() vis.draw_workspace(0.3) vis.draw_points(points) vis.draw_grasp(grasp, label, 0.05) # -
notebooks/clutter_removal.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import numpy as np from tqdm import tqdm # import pandas as pd import modin.pandas as pd # ! python -m pip install "dask[dataframe]" import dask.dataframe as dd # - # ## Load metadata from HD5 ECGs fpaths_mrns_old = os.path.expanduser("~/dropbox/ecg/explore/mgh/tensors_all_union.csv") df_old = pd.read_csv(fpaths_mrns_old) print(f"Loaded {fpaths_mrns_old} into df") # ## Load new VM metadata (MRNs, ECG datetime, location) # + # If current CSV does nost exist, create it by merging MRN + locations list fpath_mrns_new = os.path.expanduser("~/dropbox/ecg/new-vms/mgh-updated.csv") fpath_mrns_new_locs = os.path.expanduser("~/dropbox/ecg/new-vms/mgh-updated-locations.csv") fpath_mrns_updated = os.path.expanduser("~/dropbox/ecg/new-vms/mgh-updated-merged.csv") ecg_datetime_key = 'ECG_datetime' ecg_mrn_key = 'PatientID' if os.path.exists(fpath_mrns_updated): df_new = pd.read_csv(fpath_mrns_updated) # Convert column to datetime format df_new[ecg_datetime_key] = pd.to_datetime(df_new[ecg_datetime_key]) print(f"Loaded {fpath_mrns_updated} into df with keys {df_new.keys()}") else: df_new = dd.read_csv(fpath_mrns_new) print(f"Read {fpath_mrns_new} to dask dataframe with {len(df_new.index)} rows") df_new_locs = dd.read_csv(fpath_mrns_new_locs) print(f"Read {fpath_mrns_new_locs} to dask dataframe with {len(df_new_locs.index)} rows") # Merge the csv files. df_merged = dd.merge(df_new, df_new_locs, how='outer', on=['PatientID', 'ECG_datetime']) print(f'Merged two Dask dfs into one df with length {len(df_merged.index)}') # Convert Dask DF to Pandas DF, and overwrite df_new df_new = df_merged.compute() # Convert column to datetime format df_new[ecg_datetime_key] = pd.to_datetime(df_new[ecg_datetime_key]) # Write the output df_new.to_csv(fpath_mrns_updated, index=False) print(f"Saved merged df to {fpath_mrns_updated}") # - # Preview merged new DF df_new.head() # ## Load CSV of reference cohort # + # STS # fpath_ref = os.path.expanduser("~/dropbox/sts-data/mgh-all-features-labels.csv") # cohort_name = "sts" # date_key = "surgdt" # ref_key = "medrecn" # outcome_key = 'mtopd' # Apollo fpath_ref = os.path.expanduser("~/dropbox/apollo/ecg_pressures_labs_processed.csv") cohort_name = "apollo" date_key = "Date_of_Cath" ref_key = "Patient_ID" df_ref = pd.read_csv(fpath_ref) print(f"Loaded {fpath_ref} into df") df_ref[date_key] = pd.to_datetime(df_ref[date_key]) df_ref # - # Get all MRNs from reference CSV mrn_ref = pd.to_numeric(df_ref[ref_key], errors="coerce") mrn_ref_unique = np.unique(mrn_ref) df_ref[ref_key] = mrn_ref print(f'Reference: {len(mrn_ref)} total MRNs and {len(mrn_ref_unique)} unique MRNs') # + # Get all MRNs in tensors_all mrn_old = pd.to_numeric(df_old['ecg_patientid_clean'], errors="coerce") mrn_old_unique = np.unique(mrn_old) df_old['ecg_patientid_clean'] = mrn_old print(f'Existing HD5 dataset: {len(mrn_old)} total MRNs and {len(mrn_old_unique)} unique MRNs') mrn_intersect_old_ref = np.intersect1d(mrn_ref_unique, mrn_old_unique) print(f'Intersect between existing HD5 dataset and reference cohort: {len(mrn_intersect_old_ref)} unique MRNs found in both') # - # Get all MRNs from new VM CSV mrn_new = pd.to_numeric(df_new['PatientID'], errors="coerce") mrn_new_unique = np.unique(mrn_new) df_new['PatientID'] = mrn_new print(f'CSV from new VM: {len(mrn_new)} total MRNs and {len(mrn_new_unique)} unique MRNs') # Get intersect between reference cohort and MRN mrn_intersect_new_ref = np.intersect1d(mrn_ref_unique, mrn_new_unique) print(f'Intersect between new VM CSVs and reference cohort: {len(mrn_intersect_new_ref)} unique MRNs found in both') new_mrns = set(mrn_intersect_new_ref) - set(mrn_intersect_old_ref) new_mrns = list(new_mrns) print(f"Found {len(new_mrns)} new reference cohort MRNs in new VM CSV") fpaths_mrns_prioritize = os.path.expanduser(f"~/dropbox/ecg/new-vms/mrns-to-prioritize-mgh-{cohort_name}.csv") mrns_formatted = [f"{int(mrn):09}" for mrn in new_mrns] pd.DataFrame(mrns_formatted).to_csv(fpaths_mrns_prioritize, index=False, header=False) print(f"Saved new reference cohort MRNs to {fpaths_mrns_prioritize}") # Print a few formatted MRNs to ensure zero padding mrns_formatted[0:10] # + # ecg_hits = {} # for mrn in tqdm(new_mrns): # ecg_hits[mrn] = {} # # Isolate DF rows for this MRN from reference cohort # df_ref_mrn = df_ref[df_ref[ref_key] == mrn] # # Save label for this MRN # ecg_hits[mrn][outcome_key] = int(df_ref_mrn[outcome_key]) # # Isolate DF rows for this MRN from CSV from new VM # df_new_ecgs = df_new[df_new[ecg_mrn_key] == mrn] # # Get date of surgery for latest row of reference data for this patient # start_date = df_ref_mrn[date_key].iloc[-1] - pd.Timedelta(value=30, unit='days') # end_date = df_ref_mrn[date_key].iloc[-1] # # Get boolean mask of all ECGs that are within window # mask = (df_new_ecgs[ecg_datetime_key] > start_date) & (df_new_ecgs[ecg_datetime_key] < end_date) # # If any hits, get the first date of the hit # if mask.any(): # ecg_hits[mrn]['hit_dates'] = df_new_ecgs[ecg_datetime_key][mask].to_list() # else: # ecg_hits[mrn]['hit_dates'] = [] # # Add info to dicts # ecg_hits[mrn]['start_date'] = start_date # ecg_hits[mrn]['end_date'] = end_date # ecg_hits[mrn]['hit_count'] = sum(mask) # df_ecg_hits = pd.DataFrame(ecg_hits).T # + # df_ecg_hits # + # print(f"Reference MRNs (total new from VM): {df_ecg_hits.shape[0]}") # print(f"Reference MRNs (1+ ECG in window): {sum(df_ecg_hits['hit_count'] > 0)}") # print(f"Total ECGs in window: {df_ecg_hits['hit_count'].sum()}") # print(f"Positive labels: {df_ecg_hits[outcome_key].sum()}") # print(f"Positive labels (with ECGs in window): {df_ecg_hits[df_ecg_hits['hit_count'] > 0]['mtopd'].sum()}")
notebooks/ecg/assess-ecg-coverage.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Hourly Hashtag Tweet Collection # * Iterate seed's most recent tweets ( restrict by `config.collection.search_languages`) # * Gather all hashtags (should these be filtered?) # * Merge these hashtags with the `config.seed.hashtags` # * Search Twitter for those hashtags and insert those tweets # Maybe create an index on `created_at` if this query starts getting too slow # required imports to access api_db, misc, misc.CONFIG, ... import sys sys.path = ['.', '..', '../..'] + sys.path from collection import * # ### Conditional Execution # Each file needs to verify if it should be executed or not based on the configurations (for some files this is not optional but all should have this section, even if it is tautological). Example: # ```python # if not misc.CONFIG["collection"]["execute_this_script"]: exit() # ``` # Conditional execution pass # <hr> # <h1 align="center">driver code</h1> # Get the seed ids from pytictoc import TicToc with TicToc(): print("Loading seed_ids from database...", end="", flush=True) seed_ids = [s["_id"] for s in api_db.col_users.find({"depth": 0}, {}).limit(len(misc.CONFIG["seed"]["usernames"]))] print("got %d seed users, done." % len(seed_ids)) # Query the database for the tweets since yesterday at midnight yesterday = datetime.datetime.now() - datetime.timedelta(days=1) print("Yesterday at this time: %s" % yesterday) yesterday_seed_tweets = list(api_db.col_tweets.find({ "user": {"$in": seed_ids}, "created_at": {"$gte": yesterday} }).limit(10_000)) # in practice this limit is unlikely to be reached unless for a very large seed print("Found %d/10000 seed tweets" % len(yesterday_seed_tweets)) # Extract the hastags from those tweets hashtags_l = [h.lower() for t in yesterday_seed_tweets for h in dict_key_or_default(t, "hashtags", []) ] from collections import Counter print("hashtag counter: %s" % Counter(hashtags_l)) hashtags = set(hashtags_l) print("Found a total of %d unique hashtags" % len(hashtags)) # ### perform the search # #### Search goals # * between yesterday(`since`) and today(`until`) in `YYYY-MM-DD` format # * perform the search once for each hastag # * perform the search once for each language # * result_type `mixed`: recent and popular s_yesterday = yesterday.strftime("%Y-%m-%d") s_today = datetime.date.today().strftime("%Y-%m-%d") print("yesterday: %s, today: %s" % (s_yesterday, s_today)) langs = list(set(misc.CONFIG["collection"]["search_languages"]) - {"und"}) for lang in langs: print("Searching for tweets in language=%s" % lang) for h in hashtags: print(" with hashtag [#%s]..." % h, end="", flush=True) tweets = search_hashtag(h, since=s_yesterday, until=s_today, lang=lang) insert_tweets(tweets) print("got %d tweets, done." % len(tweets)) print("DONE")
core/collection/ignored/30_hashtag_tweet_collection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # ================================================= # SVM-Anova: SVM with univariate feature selection # ================================================= # # This example shows how to perform univariate feature selection before running a # SVC (support vector classifier) to improve the classification scores. We use # the iris dataset (4 features) and add 36 non-informative features. We can find # that our model achieves best performance when we select around 10% of features. # # # + print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import load_iris from sklearn.feature_selection import SelectPercentile, chi2 from sklearn.model_selection import cross_val_score from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC # ############################################################################# # Import some data to play with X, y = load_iris(return_X_y=True) # Add non-informative features np.random.seed(0) X = np.hstack((X, 2 * np.random.random((X.shape[0], 36)))) # ############################################################################# # Create a feature-selection transform, a scaler and an instance of SVM that we # combine together to have an full-blown estimator clf = Pipeline([('anova', SelectPercentile(chi2)), ('scaler', StandardScaler()), ('svc', SVC(gamma="auto"))]) # ############################################################################# # Plot the cross-validation score as a function of percentile of features score_means = list() score_stds = list() percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100) for percentile in percentiles: clf.set_params(anova__percentile=percentile) this_scores = cross_val_score(clf, X, y, cv=5) score_means.append(this_scores.mean()) score_stds.append(this_scores.std()) plt.errorbar(percentiles, score_means, np.array(score_stds)) plt.title( 'Performance of the SVM-Anova varying the percentile of features selected') plt.xticks(np.linspace(0, 100, 11, endpoint=True)) plt.xlabel('Percentile') plt.ylabel('Accuracy Score') plt.axis('tight') plt.show() # -
svm_sklearn/plot_svm_anova.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 64-bit (''cv'': conda)' # name: python385jvsc74a57bd02089a5dd6ace16632af9925dc9b4a12cb9f6cef15bceb80b067c3287157600a6 # --- from model.model import VireoNet from model.model_dev import VireoDev from model.x3dm import X3Dm from model.attention3d import Attention3d import torch import torch.nn.functional as F import math model_v = VireoNet(51) sum(p.numel() for p in model_v.parameters() if p.requires_grad) # for k,v in model.named_parameters(): # print(k, v.numel()) model = X3Dm(51) sum(p.numel() for p in model.parameters() if p.requires_grad) x = torch.rand((1, 3, 16, 224, 224)) model(x).shape model_v(x).shape x = torch.rand((1, 160, 14, 14, 14)) att = Attention3d(160) sum(p.numel() for p in att.parameters() if p.requires_grad)
test_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.5 64-bit (''venv'': venv)' # language: python # name: python37564bitvenvvenv7153db2e85324c8ea2b6eb355f534b21 # --- # # Example of DOV search methods for interpretations (gecodeerde lithologie) # [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/DOV-Vlaanderen/pydov/master?filepath=docs%2Fnotebooks%2Fsearch_gecodeerde_lithologie.ipynb) # ## Use cases explained below # * Get 'gecodeerde lithologie' in a bounding box # * Get 'gecodeerde lithologie' with specific properties within a distance from a point # * Get 'gecodeerde lithologie' in a bounding box with specific properties # * Get 'gecodeerde lithologie' based on fields not available in the standard output dataframe # * Get 'gecodeerde lithologie' data, returning fields not available in the standard output dataframe # %matplotlib inline import inspect, sys # check pydov path import pydov # ## Get information about the datatype 'Gecodeerde lithologie' from pydov.search.interpretaties import GecodeerdeLithologieSearch itp = GecodeerdeLithologieSearch() # A description is provided for the 'Gecodeerde lithologie' datatype: itp.get_description() # The different fields that are available for objects of the 'Gecodeerde lithologie' datatype can be requested with the get_fields() method: # + fields = itp.get_fields() # print available fields for f in fields.values(): print(f['name']) # - # You can get more information of a field by requesting it from the fields dictionary: # * *name*: name of the field # * *definition*: definition of this field # * *cost*: currently this is either 1 or 10, depending on the datasource of the field. It is an indication of the expected time it will take to retrieve this field in the output dataframe. # * *notnull*: whether the field is mandatory or not # * *type*: datatype of the values of this field fields['Datum'] # ## Example use cases # ### Get 'Gecodeerde lithologie' in a bounding box # Get data for all the 'Gecodeerde lithologie' interpretations that are geographically located within the bounds of the specified box. # # The coordinates are in the Belgian Lambert72 (EPSG:31370) coordinate system and are given in the order of lower left x, lower left y, upper right x, upper right y. # + from pydov.util.location import Within, Box df = itp.search(location=Within(Box(153145, 206930, 153150, 206935))) df.head() # - # The dataframe contains one 'Gecodeerde lithologie' interpretation where five layers ('laag') were identified. The available data are flattened to represent unique attributes per row of the dataframe. # # Using the *pkey_interpretatie* field one can request the details of this interpretation in a webbrowser: for pkey_interpretatie in set(df.pkey_interpretatie): print(pkey_interpretatie) # ### Get 'Gecodeerde lithologie' with specific properties within a distance from a point # Next to querying interpretations based on their geographic location within a bounding box, we can also search for interpretations matching a specific set of properties. For this we can build a query using a combination of the 'Gecodeerde lithologie' fields and operators provided by the WFS protocol. # # A list of possible operators can be found below: [i for i,j in inspect.getmembers(sys.modules['owslib.fes'], inspect.isclass) if 'Property' in i] # In this example we build a query using the *PropertyIsGreaterThan* and *PropertyIsEqualTo* operators to find all interpretations that are at least 20 m deep, that are deemed appropriate for a range of 1 km from a defined point: # + from owslib.fes import And, PropertyIsGreaterThan, PropertyIsEqualTo from pydov.util.location import WithinDistance, Point query = And([PropertyIsEqualTo(propertyname='Betrouwbaarheid', literal='goed'), PropertyIsGreaterThan(propertyname='diepte_tot_m', literal='20'), ]) df = itp.search(query=query, location=WithinDistance(Point(153145, 206930), 1000)) df.head() # - # Once again we can use the *pkey_interpretatie* as a permanent link to the information of these interpretations: for pkey_interpretatie in set(df.pkey_interpretatie): print(pkey_interpretatie) # ### Get 'Gecodeerde lithologie' in a bounding box based on specific properties # We can combine a query on attributes with a query on geographic location to get the interpretations within a bounding box that have specific properties. # # The following example requests the interpretations of boreholes only, within the given bounding box. # # (Note that the datatype of the *literal* parameter should be a string, regardless of the datatype of this field in the output dataframe.) # + from owslib.fes import PropertyIsEqualTo query = PropertyIsEqualTo( propertyname='Type_proef', literal='Boring') df = itp.search( location=Within(Box(153145, 206930, 154145, 207930)), query=query ) df.head() # - # We can look at one of the interpretations in a webbrowser using its *pkey_interpretatie*: for pkey_interpretatie in set(df.pkey_interpretatie): print(pkey_interpretatie) # ### Get 'Gecodeerde lithologie' based on fields not available in the standard output dataframe # To keep the output dataframe size acceptable, not all available WFS fields are included in the standard output. However, one can use this information to select interpretations as illustrated below. # # For example, make a selection of the interpretations in municipality the of Antwerp, before 1/1/1990: # # !*remark: mind that the municipality attribute is merely an attribute that is defined by the person entering the data. It can be ok, empty, outdated or wrong*! # + from owslib.fes import And, PropertyIsEqualTo, PropertyIsLessThan query = And([PropertyIsEqualTo(propertyname='gemeente', literal='Antwerpen'), PropertyIsLessThan(propertyname='Datum', literal='2010-01-01')] ) df = itp.search(query=query, return_fields=('pkey_interpretatie', 'Datum')) df.head() # - # ### Get 'Gecodeerde lithologie' data, returning fields not available in the standard output dataframe # As denoted in the previous example, not all available fields are available in the default output frame to keep its size limited. However, you can request any available field by including it in the *return_fields* parameter of the search: # + query = PropertyIsEqualTo( propertyname='gemeente', literal='Leuven') df = itp.search(query=query, return_fields=('pkey_interpretatie', 'pkey_boring', 'x', 'y', 'Z_mTAW', 'gemeente', 'Auteurs', 'Proefnummer')) df.head() # - # ## Visualize results # Using Folium, we can display the results of our search on a map. # import the necessary modules (not included in the requirements of pydov!) import folium from folium.plugins import MarkerCluster from pyproj import Transformer # + # convert the coordinates to lat/lon for folium def convert_latlon(x1, y1): transformer = Transformer.from_crs("epsg:31370", "epsg:4326", always_xy=True) x2,y2 = transformer.transform(x1, y1) return x2, y2 df['lon'], df['lat'] = zip(*map(convert_latlon, df['x'], df['y'])) # convert to list loclist = df[['lat', 'lon']].values.tolist() # - # initialize the Folium map on the centre of the selected locations, play with the zoom until ok fmap = folium.Map(location=[df['lat'].mean(), df['lon'].mean()], zoom_start=12) marker_cluster = MarkerCluster().add_to(fmap) for loc in range(0, len(loclist)): folium.Marker(loclist[loc], popup=df['Proefnummer'][loc]).add_to(marker_cluster) fmap
docs/notebooks/search_gecodeerde_lithologie.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia # language: julia # name: julia # --- # + [markdown] id="si4Pw-AfF3Zi" # # Работу выполнили: # # * <NAME> # * <NAME> # # # + [markdown] id="OM14LFU5FNe3" # # Темы: # # # * Форматы данных # * Строки # * Функции # * Массивы данных # # # + [markdown] id="w0ghBeOfWtNt" # # Types # + [markdown] id="v1FCq64uYKB9" # ## Некоторые общие слова про форматы данных # + [markdown] id="UvD2jRtsYb4q" # У Джулии есть много видов переменных: integers (например 3, -10), float numbers (1.0 и -3.9), bools (Trure / False) и bitarrays (схожи с bool переменными, но они эффективнее используют память компьютера) , строки («привет»), даты (2021-01-25) и многие другие. # + [markdown] id="CNJsO-KUbIok" # ##Integers and Float # + colab={"base_uri": "https://localhost:8080/"} id="t6ZSeuAbbNVW" outputId="227f805a-4174-4841-b6d9-334bf094fb94" a = 5 #integer, Int (Int64 в большинстве случаев) b = 5.0 #floating point, (Float64 в большинстве случаев) A = [1;2] B = [1.0;2.0] # + [markdown] id="wgROYhMuU7Uw" # Проверим командой typeof форматы данных # + colab={"base_uri": "https://localhost:8080/"} id="FNSUwMcRUxzF" outputId="b0fb4de6-989a-4d80-c438-2233aabad374" println("a: ",typeof(a)) println(a) # + colab={"base_uri": "https://localhost:8080/"} id="i8hwfMh6U1B3" outputId="97138f6d-d177-4e37-e747-e6fa5710ee21" println("\nb: ",typeof(b)) println(b) # + colab={"base_uri": "https://localhost:8080/"} id="pES-wLr3U1Mi" outputId="105c4b95-5ed5-4993-fca2-7944096417e3" println("\nA: ",typeof(A)) println(A) # + colab={"base_uri": "https://localhost:8080/"} id="GjezKRVnU1aS" outputId="8af8a7c3-0f23-4f28-dbeb-5286ac0a653e" println("\nB: ",typeof(B)) println(B) # + [markdown] id="NuEZdbDjcMGV" # ## Bools and BitArrays # + colab={"base_uri": "https://localhost:8080/"} id="A2ZaapDCcLmh" outputId="9d1bc441-4151-424b-f734-4799b35d8c84" c = 5 > 1.1 println("c: ",typeof(c)) println(c) # + colab={"base_uri": "https://localhost:8080/"} id="eZa6lk_CVcvX" outputId="01b73dc9-a5c7-4b47-8adc-2c340d0a68db" A = [1;2] # + colab={"base_uri": "https://localhost:8080/"} id="KRytxJ66VM1c" outputId="b29fe24d-6c52-478d-b9a5-304f1761c15d" C = A .> 1.5 println("\nC: ",typeof(C)) println(C) # + colab={"base_uri": "https://localhost:8080/"} id="d16uhhjtVRln" outputId="be3ec4d8-480d-4891-c9e9-e0dda951077e" println("BitArray более экономичная версия Bool.\n", "Заметьте, что typeof(C[1]) даёт: ",typeof(C[1])) # + [markdown] id="lFRLU7NVdXaX" # Стоит заметить, что, чтобы что-либо применялось к списку не как к одному объекту, а ко всем элементам списка (что верно и для других массивов), необходимо использовать точку после массива. # + [markdown] id="ZhvNesOUdh1x" # ## Операции с переменными разных форматов и изменение форматов # + [markdown] id="mLHPCqjLd23p" # Выполнение операции суммирования "integer" + "float" работает, и в результате получается число формата float. Подобным образом сумма bool + integer даст integer # + colab={"base_uri": "https://localhost:8080/"} id="KNz694OQd2JG" outputId="fd983456-e56e-41b0-aaf3-e67bc4e11dad" #Сложение integer и float println("Int + Float64: ",15 + 27.0) # + colab={"base_uri": "https://localhost:8080/"} id="qCGHuxMPVlsn" outputId="c738115a-2988-428c-eeb3-d98a1d7059b9" #Сложение bool и int println("Bool + Int: ",(10 .> 0) + 1) # + [markdown] id="7ipxbZ1oeVhV" # ##Перевод формата из Int в Float и наоборот # + colab={"base_uri": "https://localhost:8080/"} id="P6cNDzZTeewj" outputId="1947cf3b-ca3b-4a67-dac9-23815b9e3866" A = [2.5 ; 5.3; 100.1] println("A: ",typeof(A)) println(A) # + [markdown] id="xznIz_NvYrdL" # Переводим элементы массива из float64 в int # + colab={"base_uri": "https://localhost:8080/"} id="gZ0G6n2iYpS4" outputId="fd0a5e80-bb13-4f92-873b-2d6edebbe8ee" A_to_Int = round.(Int,A) println("Округление A в Int: ",typeof(A_to_Int)) println(A_to_Int) # + colab={"base_uri": "https://localhost:8080/"} id="VDcimno-YwMK" outputId="4fe5fabc-2ee6-45b0-8e21-29503aa3bd91" B = [5;6] println("B: ",typeof(B)) println(B) # + [markdown] id="hCUob2oIYyOc" # Изменение формата: Int -> Float64 # + colab={"base_uri": "https://localhost:8080/"} id="2P-M5DcOVzeQ" outputId="fe8f50e9-1b6c-4e25-d268-9213f7d6fd2f" B_to_Float64 = convert.(Float64,B) println("После перевода B в Float64: ",typeof(B_to_Float64)) println(B_to_Float64) #Float64.(A) также работает # + [markdown] id="JVvzhfYGYbBm" # ## Перевод из Bool и BitArrays в Int и наоборот # + [markdown] id="dkFdlz_fY7GH" # C = A .> 5 # # Перевод из BitArray в Int # + colab={"base_uri": "https://localhost:8080/"} id="B62M0DWJhMca" outputId="b6f3e6f0-83d7-49d4-c575-ad378c896eb3" A = [3.9 ; 7.3; 20.1] C = A .> 5 #Перевод из BitArray в Int C_to_Int = convert.(Int,C) println(typeof(C_to_Int)) #Int.(C) также работает println(C_to_Int) # + [markdown] id="1NHcHaFlY9Mq" # Перевод из int в BitArray # + colab={"base_uri": "https://localhost:8080/"} id="WGb4OcdqWS0L" outputId="1ad53e36-c458-4ee6-e26c-4714ae28d01b" D = [1;0;1] D_to_Bool = convert.(Bool,D) println(typeof(D_to_Bool)) #Bool.(D) также работает println(D_to_Bool) # + [markdown] id="d95Q9cPSiSqX" # ## Проверка типа переменной # + [markdown] id="iL_eR7hSiYj5" # Самый простой способ проверить, принадлежит ли переменная определенному типу - это использовать функцию isa(variable, Type). Тип в функции может быть представлен также в виде массива из нескольких типов. Примеры ниже. # + colab={"base_uri": "https://localhost:8080/"} id="L-D9xvz9i-XB" outputId="fa05e27b-abf0-42ca-fec1-1ef48ea49f51" A = 2.5 B = [3.5,6.5] # + [markdown] id="f07bTXSBZB93" # Является элемент A числом? # + colab={"base_uri": "https://localhost:8080/"} id="aKaKeN-xWpfR" outputId="909a4a04-4f83-4c9d-d7a3-104b4e98ef5d" println("$A is a Number: ",isa(A,Number)) # + [markdown] id="K4oXNln4ZF-9" # Является ли число A integer? # + colab={"base_uri": "https://localhost:8080/"} id="W8e8iO6XW1Hs" outputId="0700ed83-58ad-48a6-e818-1747396bade7" println("$A is an Int: ",isa(A,Int)) # + [markdown] id="YISPo69WZIjb" # Является ли число A integer или float? # + colab={"base_uri": "https://localhost:8080/"} id="RNysHg_AW_Rp" outputId="edb245ca-d8c3-44cf-bb11-ee4d2f0c1b82" println("$A is an Int or a Float64: ",isa(A,Union{Int,Float64})) # + [markdown] id="iUtCJ7d7ZL0Z" # Является ли элемент B числом формата float? # + colab={"base_uri": "https://localhost:8080/"} id="PqD-xpKOXHMT" outputId="7a8242dc-89a7-4f5d-e63b-fb40e0bc648b" println("$B is a Float64: ",isa(B,Float64)) # + [markdown] id="lYBX2a86ZOKO" # Является ли B массивом? # + colab={"base_uri": "https://localhost:8080/"} id="WzKs0LwUXVPi" outputId="4ee8aafe-81ba-4c48-e2f5-885cedf69145" println("$B is an Array: ",isa(B,Array)) # + [markdown] id="L2osrpd5lNpa" # # Строки # + [markdown] id="BCUfJYPzlas4" # ## Базовые операции # + [markdown] id="WvqdMzNilqNQ" # # Далее будет показано, как: # # * соединять несколько строк в одну строку, используя string(str1, str2) или str1 * str2 # * проверять, содержит ли строка конкретную подстроку # * заменять часть строки чем-либо # * разделять строку в массив слов (и затем объединять их назад в строку) # * сортировать вектор слов в алфавитном порядке # # + colab={"base_uri": "https://localhost:8080/"} id="A5kzDFHbiPwG" outputId="0b77b2bd-10ef-4138-d015-b01d6f56eeea" str1 = "Привет" str2 = "Мир!\n" str3 = "Как твои дела?" new_str = string(str1," ",str2,str3) #объединить в одну строку println(new_str) # + [markdown] id="4ilGarfcXpuW" # Проверяем, содержит ли строка "Я посетил США" слово "США". # + colab={"base_uri": "https://localhost:8080/"} id="HhpPf3dYhwdG" outputId="60b5f9a0-2f29-4815-f35c-2dd81d8376c9" str4 = "Я посетил США" if occursin("США",str4) println("Предложение") println(str4) println("содержит слово 'США'") end # + [markdown] id="IlUiyV1IX05O" # Заменяем слово "США" на "Германию" # + colab={"base_uri": "https://localhost:8080/"} id="JDHzL7iTXjoX" outputId="69c865bc-ea35-4519-f0bf-dbaf1deaadf3" str4 = replace(str4,"США" => "Германию") println("\nСтрока после замены: ") println(str4) # + [markdown] id="qPm7pJe4YB1W" # Разделим предложение на слова, представленные в виде массива # + colab={"base_uri": "https://localhost:8080/"} id="kBkdjlkYnkhS" outputId="e3f6b509-1cd3-4a0c-830d-8d884de10a5c" words = split(str4) println("Разделение предложения в массив слов:") println(words) # + [markdown] id="cTbbdrLxYNmw" # А теперь снова соединим слова в предложение # + colab={"base_uri": "https://localhost:8080/"} id="ld1_L5f-YLGq" outputId="2deba49e-b9bd-48c2-bb0f-65436f080e50" println("\nА теперь объединение слов назад в строку:") println(join(words," ")) # + [markdown] id="hNIclSEIYVT1" # Отсортируем слова в алфавитном порядке # + colab={"base_uri": "https://localhost:8080/"} id="BSDyR8ukpSQA" outputId="1dd596da-3f87-40ae-e241-7937bd1c1c4a" println("Отсортировать в алфавитном порядке:") println(sort(words,lt=isless)) # + [markdown] id="RuU2B_JbqsgE" # ##Сроки и индексы # + [markdown] id="I52Gk23pscww" # Чтобы обратиться к i-ому элементу строки str можно использовать str[i] # + colab={"base_uri": "https://localhost:8080/"} id="KbigCqWSpi8A" outputId="33670987-2fee-4429-b1c4-f41e7e433a03" str = "Привет, мир!" println(str) println(string("Второй элемент строки: ", 'р')) # + [markdown] id="jCpYD1H-tIid" # Стоит отметить, что может возникнуть ситуация, когда элемент в строке не будет выводиться данной командой, поскольку он требует больше 1 бита для хранения, и будет выдаваться ошибка. В таком случае есть обход в виде следующей команжды: # + colab={"base_uri": "https://localhost:8080/"} id="J2_3m2vVs_-I" outputId="9ae6a461-f540-4fe9-b3d8-68748fe12d72" str = "Δx = -0.9x" println(str[nextind(str,1)]) # + [markdown] id="rzdziea1ts-0" # ## Проход по всем символам в строке # + colab={"base_uri": "https://localhost:8080/"} id="0GYLkbsrtdsU" outputId="757b6b96-03e6-4bef-a16d-b7e506019454" i = 1 for j in str #global i println(i," ",j) i = nextind(str,i) end # + [markdown] id="yWQSLxt0w_sT" # ## Больше общих операций # + [markdown] id="2QQnmh5HxOv8" # Можно лексикографически сравнивать строки, используя стандартные операторы сравнения: # + colab={"base_uri": "https://localhost:8080/"} id="CZzTct7GxaS8" outputId="537f6629-af19-46eb-bb6a-079f53c695eb" "abracadabra" < "xylophone" # + colab={"base_uri": "https://localhost:8080/"} id="--hMsFW4xcAG" outputId="d59569ca-6547-41b6-b7ba-041c5ab588aa" "abracadabra" == "xylophone" # + colab={"base_uri": "https://localhost:8080/"} id="K0sZslqqxeLV" outputId="0fec71e1-659b-4e10-8a73-17fca65bedaa" "Hello, world." != "Goodbye, world." # + colab={"base_uri": "https://localhost:8080/"} id="KtyhYgOwxg6w" outputId="16f0aea2-b28d-48af-d3cb-1a55d14f1888" "1 + 2 = 3" == "1 + 2 = $(1 + 2)" # + [markdown] id="ZUDYKsIVxrCT" # Можно искать индекс конкретного символа, используя функции findfirst и findlast: # + colab={"base_uri": "https://localhost:8080/"} id="jHHe32yVx-ll" outputId="f1757a8f-ad33-4f90-d2b9-1e2967d4b991" findfirst(isequal('а'), "машина") # + colab={"base_uri": "https://localhost:8080/"} id="uPu8en_myQEV" outputId="d66584d4-e99a-4708-cc96-14ca177eb8d2" findlast(isequal('и'), "велосипед") # + colab={"base_uri": "https://localhost:8080/"} id="LH1ni4D6ySaV" outputId="c09c45ab-5da6-4fe4-837c-519fcc07b82d" findfirst(isequal('т'), "самолет") # + [markdown] id="CQjPrbaIyeHQ" # Можно начать поиск символа по заданному смещению, используя функции findnext и findprev: # + colab={"base_uri": "https://localhost:8080/"} id="IeLyqSYsyvmS" outputId="79d199ba-6ced-4bc6-f67c-02c31a90b7e6" findnext(isequal('o'), "xylophone", 1) # + colab={"base_uri": "https://localhost:8080/"} id="y-gbSFPZzbAY" outputId="e0c174c9-70d1-457a-d8b7-8916a84abdd6" findnext(isequal('o'), "xylophone", 5) # + colab={"base_uri": "https://localhost:8080/"} id="18dd7lM5zeIG" outputId="122f0adc-3110-4d23-9294-cf8e0202bc65" findprev(isequal('o'), "xylophone", 5) # + id="CtF7fAJvzf2G" findnext(isequal('o'), "xylophone", 8) # + [markdown] id="hq49D_T5E2Me" # # Функции # + [markdown] id="G8ii4HWxE6m1" # ## Функции с одним выводом # + [markdown] id="DPl2q4ufE_2m" # В коде часто необходимо выполнять один и тот же набор команд, например мне необходим поиск дискриминанта. Для удобной реализации данных команд лучше всего использовать функции. Базовый подход к написанию функции выглядит следующим образом. # # # # ``` # function yourFunction(a, b, c) # ... ваш код # return D # end # ``` # # После того как я определил функцию, я могу вызывать ее в любом удобном для меня месте. Например, # # ``` # someVariable = yourFunction(1, 2, 4) # ``` # # В переменной `someVariable` будет храниться значение, полученное в функции `yourFunction` с значениями коэффицентов $1, 2, 4$. # # Перейду непосредственно к написании функции `Discriminant`, которая на вход берет значения коэффиентов квадратного уравнения $a, b, c$ ($ax^2+bx+c=0$), а на выходе возвращает дискриминант. # # # + id="u79XYnJDE4ra" colab={"base_uri": "https://localhost:8080/"} outputId="888131c1-c861-4f40-93e2-d97ebcf95a23" function Discriminant(a, b, c) D = b^2 - 4 * a * c return D end # + [markdown] id="GpMZY1NiHjR0" # Рассмотрим простое уравнение $x^2 + 4x + 3$. Дискриминант в нем равен $D = b^2 - 4ac = 4^2 - 4 \cdot 1 \cdot 3 = 16 - 12 = 4$. Теперь вызовем написанную функцию с параметрами (1, 4, 3) # + colab={"base_uri": "https://localhost:8080/"} id="rUJzWHaXH29A" outputId="5a6abe85-9eef-4433-e2da-d7d9eafb37f6" Discriminant(1, 4, 3) # + [markdown] id="iXzL0i4IICey" # Получаем заветный ответ 4. Прелесть функций что я могу подставить в нее любые значения коэффицентов, и мне не нужно для этого опять писать один и тот же код. Посмотрим теперь на уравнение $x^2+7x+6=0$ # + colab={"base_uri": "https://localhost:8080/"} id="vQ4jn8pbH-tX" outputId="38304368-2596-48a7-f5ac-fdd87df81b7b" Discriminant(1, 7, 6) # + [markdown] id="UV7vltzwIb0g" # ### Дефолтные значения для функций # + [markdown] id="OcRsETrpIksZ" # В Julia можно создавать дефолтные значения для коэффициентов. Представим, что я хочу построить длину доверительного интервала для коэффициентов регрессии. Эмпирически, я знаю что почти всегда мое $t_{crit}$ это по модулю 2 ($t_{crit} = |2|$). Соотвественно, я могу написать функцию, задав необходимое мне значение $t_{crit}$. # # Замечу, что дефолтные значения должны идти в самом конце входных переменных. # + colab={"base_uri": "https://localhost:8080/"} id="_1ZOe82XIOLn" outputId="ea1a86f0-d8c5-497e-9329-e9b0a30b326f" function ciLength(se_bhat, t_crit = 2) ciLength = 2 * se_bhat * t_crit return ciLength end # + [markdown] id="4PZr6di3KVxp" # Допустим, что $s.e.(\hat{b}_0) = 0.5$. Посчитаем длину доверительного интервала # + colab={"base_uri": "https://localhost:8080/"} id="uGKtJsDbIjwN" outputId="d36fd26f-2640-47fc-8710-bed9c69d5d73" ciLength(0.5) # + [markdown] id="KcIBtrRqKqRh" # Отмечу, что мне не пришлось прописывать дефолтное значение $t_{crit}$. Теперь я могу вызывать данную функцию в любой части моего кода и находить длину доверительного интервала. # + [markdown] id="q8ZxcvBULFw-" # ### Функции поэлементно # + [markdown] id="-HSoU9c2LQFU" # Не всегда бывает ситуация, когда мои критические значения равны $|2|$. Существует случаи, что они равны например, $1.64$, что существенно повлияет на ответ. # # Теперь я хочу вызывать функцию, перебирая мои возможные искомые значения критических значений: $[1.64, 1.96, 2]$ для следующих стандратных ошибок $[0.5, 0.8, 1.5]$. Соотвественно, стандартной ошибке $0.5$ соотвествует критическое значение $1.64$. # # На выходе у меня должно получится три значения. Базовый подход для осуществления данной идее выглядит следующим образом. Достаточно, задать списки и подставить точку `.` после функции. # # ``` # firstArray = [1, 2, 3] # secondArray = [4, 5, 6] # yourFunction.(firstArray, secondArray) # ``` # # # + colab={"base_uri": "https://localhost:8080/"} id="JUkAJm-tKn2u" outputId="0c1f9d95-f9ae-498b-c546-b0d1ef3a0277" seArray = [0.5, 0.8, 1.5] tcritArray = [1.64, 1.96, 2] ciLength.(seArray, tcritArray) # + [markdown] id="pt_WcBKxOc0r" # Как и ожидалось я получил 3 значения. Для первого случая оно равно 1.64, по сути происходит следующая команда для случая когда индекс равен единице. # + colab={"base_uri": "https://localhost:8080/"} id="Yc8E4uroN29H" outputId="05200a55-d44c-4b01-c708-d6e27f98c71f" ciLength(seArray[1], tcritArray[1]) # + [markdown] id="EspE6l5pakCv" # ## Функции с множественным выводом # + [markdown] id="DLehpKaka1p_" # ### Базовый подход для функций с множественным выводом # + [markdown] id="iZ8jt5yJccWd" # Конечно, функции могут возвращать не только одно значения, а несколько значений. Вернемся к примеру с дискриминантом, теперь мы хотим не только возвращать значения дискримината, но и его корни, реализую это следующим образом. # + colab={"base_uri": "https://localhost:8080/"} id="hSlNnITeaSdh" outputId="7f9a0272-357e-4b54-8411-f94e27b42fd6" function solveQuadratic(a, b, c) D = b^2 - 4 * a * c x1 = (-b - D^0.5) / 2 * a x2 = (-b + D^0.5) / 2 * a return D, x1, x2 end # + [markdown] id="xu53gBq0j-IL" # Теперь, возьмем квадратное выражение, например $x^2 - 8x + 12 = 0$. По теореме Виета очевидно, что корни будут следующими $x_1 = 2, x_2 = 6$. Воспользуемся функцией, чтобы решить данное уравнение. # + colab={"base_uri": "https://localhost:8080/"} id="ivnqsxsPa0M2" outputId="6ccc197a-b60f-4a64-cdd5-76b9703a5ab8" solveQuadratic(1, -8, 12) # + [markdown] id="mP6yWxErkT-A" # Получаем искомое значение в формате кортежа. # + [markdown] id="1MkqhJJ0kd4e" # Допустим, мне нужны только корни уравнения и я не хочу никак видеть дискриминант - он меня сбивает и из-за невнимательности я могу его написать в контрольной. Реализовать это можно следующим образом. # # # # ``` # (_, Output1, Output2) = yourFunction(a, b, c) # ``` # # # + colab={"base_uri": "https://localhost:8080/"} id="hUSmbrnckSzE" outputId="0db522ba-96ce-4c5e-9ece-f63fccc46d7e" (_, x1, x2) = solveQuadratic(1, -8, 12) println("First solution: ", x1, " | Second solution: ", x2) # + [markdown] id="WGNorlT0lBaz" # А теперь я хочу получать только дискриминант, без корней выражения. Реализуется это следующим образом # + colab={"base_uri": "https://localhost:8080/"} id="1qEikXgVksLX" outputId="b62258c5-d1fe-4073-d9ca-dfaa86cfa103" (D,) = solveQuadratic(1, -8, 12) println("Discriminant: ", D) # + [markdown] id="Mhedol0Elh8C" # Каждый раз писать в print `First Solution`, `Second solution`, `Discriminant` и так далее немного сложно. К тому же, это может привести к внезапным ошибкам в коде. Лучше задать имена возвращенным значения в функции. # # # + colab={"base_uri": "https://localhost:8080/"} id="eF1jcRQ4lPj0" outputId="78690f0d-c418-4cba-a55a-e9cdbb4a56f0" function solveQuadraticWithNames(a, b, c) D = b^2 - 4 * a * c x1 = (-b - D^0.5) / 2 * a x2 = (-b + D^0.5) / 2 * a solveTuple = (Discriminant = D, x_1 = x1, x_2 = x2) return solveTuple end # + colab={"base_uri": "https://localhost:8080/"} id="eSBd91MZl_nw" outputId="7b4849af-418a-43fa-8bf2-e40b470993d4" solveQuadraticWithNames(1, -8, 12) # + [markdown] id="-vXogPchmC-g" # Теперь возвращаются подписанные значения, соотвественно риск что-то напутать в принте минимален и код становится более читаемым. # + [markdown] id="XsIYHVZ-ml5t" # Раньше я писал о том, что можно перебирать список для функции. Попробуем реализовать тоже самое в функции с множественным выводом. # + colab={"base_uri": "https://localhost:8080/"} id="ZCYw61VRmBpX" outputId="b98d1dc5-d6e0-43d3-bc78-5fc238622d13" solveQuadraticWithNames.([5, 1, 2], 10, 5) # + [markdown] id="HwjxZVq_nEWH" # Конечно, все работает и для функции с множественным выводом. В данном случае получаем корни уравнений # $(1) 5x^2 + 10x + 5 = 0; (2) x^2 + 10x + 5 = 0; (3) 2x^2+10x+5=0$ # + [markdown] id="GCwx_kC7oOEc" # Пожалуй, это одни из наиболее важных деталей, которые касаются функций в Julia. Далее я перейду к теме Data Containers. # + [markdown] id="MVgh8f0fobtV" # # Data Containers # + [markdown] id="OWEY8JYwojMc" # ## Списки # Один из наиболее популярных видов структуры данных. # + [markdown] id="4dbWeD-1o5yY" # Базовый подход к созданию списков через квадратные скобки. Допустим, у меня есть данные о объеме сделок по акциям Сбербанка на московской бирже за 5 дней. Тогда мой список будет выглядить следующим образом. # + colab={"base_uri": "https://localhost:8080/"} id="KILDlF9lodX5" outputId="91e0c5be-2135-425b-f57d-72bc70988d6b" sberbankArray = [1 2 1 3 4] # + [markdown] id="jQw4vkymqGyj" # Если я хочу узнать объем торгов сбербанком за i-ый день могу воспользоваться следующей командой. # # # ``` # sberbankArray[i] # ``` # # # + [markdown] id="2i3x6bluqSf7" # Узнаю объем торгов Сбером за первый день # + colab={"base_uri": "https://localhost:8080/"} id="f8wiiT_eqQhR" outputId="fabd630a-cc34-4695-af9c-d0ac5d5eba96" sberbankArray[1] # + [markdown] id="VaAQkVBPpjNJ" # <NAME> разведал данные и теперь у меня есть данные и об объемах сделок акциями Facebook и Google за 5 дней. У Facebook $[4, 3, 2, 1, 5]$ миллионов сделок, у Google $[1, 5, 3, 7, 2]$ # + colab={"base_uri": "https://localhost:8080/"} id="zS8b7caPoihB" outputId="7e1fc24c-3db0-4944-a91d-4cc739702fb0" sellyourdataArray = [4 3 2 1 5;1 5 7 3 2] # + [markdown] id="-N-VvIVfqvls" # Теперь я могу объединить эти данные для этого создам новый список из двух выше # + colab={"base_uri": "https://localhost:8080/"} id="xWwZKMiNqjHc" outputId="eff77381-7a50-418b-e867-a9ab074cd008" stocksArray = [sberbankArray; sellyourdataArray] # + [markdown] id="kSaPXM8Srrcr" # Теперь у меня есть список размера 3x5, в котором хранятся объемы сделок по акциям :) # + [markdown] id="5toWbm7CsI8B" # ## Кортежи # + [markdown] id="9lB52Fi2savk" # Смысл кортежей в том, что они неизменяемые. Кортежи используются как входные и выходные данные в функциях - мы кстати их и использовали, когда работали с функциями # + colab={"base_uri": "https://localhost:8080/"} id="pWC9xS-qsbEb" outputId="6631f702-34c7-4ece-9711-d49b6c7b1908" randomArray = rand(1, 3) ### Сгенерируем рандомно список из трех элементов loveJulia = "I love julia" ### Создадим string (на русском строковой тип) randomTuple = (randomArray, loveJulia) ### Создаем кортеж или tuple # + [markdown] id="WCWLyx2YtNIe" # В кортеже выше смысла не так много, однако иногда необходимы осознанные кортежи с названиями. Сейчас я создам кортеж, где будет название дискриминанту и корням квадратного уравнения. # # + colab={"base_uri": "https://localhost:8080/"} id="qvkELyNBsl7p" outputId="6a60aaab-b9e3-4212-935e-63fff29cad79" solveTuple = (Discriminant = 10, x_1 = 1, x_2 = 2) # + [markdown] id="G4h0_ssjthSr" # Такую же логику мы использовали, когда писали функцию для нахождения корней квадратного уравнения. # # Конечно, можно найти дискриминант, указав его индекс в кортеже # + colab={"base_uri": "https://localhost:8080/"} id="MO3oPKl3tlvB" outputId="c7f0923c-3dcc-41e6-8134-e8497264cd13" solveTuple[1] # + [markdown] id="itPfO0DeuBC9" # Дискриминант можно найти и иначе, указав его название следующим образом: # + colab={"base_uri": "https://localhost:8080/"} id="5_AcakW5t_AX" outputId="43638cf9-52a6-431a-a0e7-07d10d2ff66b" solveTuple.Discriminant # + [markdown] id="rDFbh0LItv8E" # Или можно достать string или список из первого кортежа # + colab={"base_uri": "https://localhost:8080/"} id="pWiDi5ietvqj" outputId="732db1a8-a253-48ac-9a7a-f6236976d9c2" randomTuple[1] # + [markdown] id="B6e5iBNIub5J" # Или loveJulia # + colab={"base_uri": "https://localhost:8080/"} id="SUSvaVulubpM" outputId="41ee1bff-3b41-40f6-9406-2a997ad7863f" randomTuple[2] # + [markdown] id="_nLijMj4uvYl" # Таким образом, мы научились создавать кортежи и вставлять туда списки, данные строчного вида(strings), integers и не только. Более того, мы научились называть данные в кортеже и обращаться к ним. # + [markdown] id="3OwIzJM6vAIX" # ## Словарь # + [markdown] id="h_561Q6yvNGz" # У меня есть название компании на бирже и их тикеры(tickers), например компания Apple называется как ни странно APPLE, однако их тикер на бирже "AAPL", у Facebook: "FB". # # Необходимо эти данные структировать в одном месте, для этого используется словари. Словари в Julia создаются по следующему принципу: # # # # ``` # yourDict = Dict("key"=>"value") # ``` # # # + colab={"base_uri": "https://localhost:8080/"} id="60Adc_NXtuq0" outputId="a3af664c-5fe6-4e6c-c104-d3ab3aabc128" stockDict = Dict("APPL"=>"Apple", "FB"=>"Facebook") # + [markdown] id="M_ps0YXjwhmK" # Предположим, что я хочу узнать какой компании принадлежит тикер "FB" # + colab={"base_uri": "https://localhost:8080/"} id="tffO6G_PwB20" outputId="b875c6f5-79a8-49c4-e10a-a14e39d7fbc4" stockDict["FB"] # + [markdown] id="67mABTFYwqCl" # Теперь знаю, что принадлежит Фейсбуку! # + [markdown] id="nCtBMvUnwyWS" # Крокодил утундрий узнал новую для нас компанию: Google с тикером "GOOGL". Добавим ее в наш словарь. # + colab={"base_uri": "https://localhost:8080/"} id="U5k7I4l8wpZP" outputId="f19d1541-1196-4560-e520-2dfb394c584c" stockDict[:"GOOGL"] = "Google" # + [markdown] id="hKc03lvSxgt_" # Теперь посмотрим на наш словарь # + colab={"base_uri": "https://localhost:8080/", "height": 85} id="G4aoq7svxeoE" outputId="d40d872a-8b40-4b39-b102-636f06ae0151" display(stockDict) # + [markdown] id="OOJOaCPOxoCo" # Хочу более читаемые данные, тогда воспользуемся циклом и покажем ему что в этих данных есть что. # + colab={"base_uri": "https://localhost:8080/"} id="70yJznZWxfrK" outputId="14522df2-23a7-48a3-e80d-16b5bf52433b" for (key,value) in stockDict #loop over a dictionary println("Ticker: ", "$key | ", "Company name: ", "$value") end # + [markdown] id="Tw07kO3GyY9L" # ## Собственный контейнер # + [markdown] id="TGwr72SByb-x" # Иногда полезно создать собственный контейнер из данных. # # Мой тип данных будет состоять из цифр вначале, далее string и далее список. # + colab={"base_uri": "https://localhost:8080/"} id="96b74-WAyhx0" outputId="27937c4f-d0ec-4b58-b0fc-2b743c76d8cf" dogAge = 3 dogName = "Chop" dogGirlfriendsArray = ["Sara", "Ri", "Eric"] # + [markdown] id="io67M8rPzZTE" # Создаю данные через struct # + id="5oWd6zHXzQvv" struct dog dogAge::Int # Возраст должен быть цифрой dogName::String # Имя должно быть string dogGirlfriendsArray::Array # Список из собак подружек end # + colab={"base_uri": "https://localhost:8080/"} id="K9jFqJkzyYX_" outputId="2fc51a8f-aef3-4815-d908-7d0fdb9c2a7f" Chop = dog(dogAge, dogName, dogGirlfriendsArray) # + [markdown] id="Hlgb6tjBzxhE" # Могу легко узнать возраст собаки по кличке Chop # + colab={"base_uri": "https://localhost:8080/"} id="ADbYhUtOzkF4" outputId="7bcf8d35-0668-4fef-f001-1ade14391d83" Chop.dogAge # + [markdown] id="GQWmlgXpz38i" # Узнать его полное имя # + colab={"base_uri": "https://localhost:8080/"} id="5L6SF03Dz3WY" outputId="b0d4bfb8-02fe-4d2e-c169-6b62f0ecae02" Chop.dogName # + [markdown] id="CNmimsTPz8EN" # и посмотреть его подружек # + colab={"base_uri": "https://localhost:8080/"} id="CVyEv_SEz7gY" outputId="a40b146c-dd4c-4b1b-cea9-9aab24d19ca0" Chop.dogGirlfriendsArray # + [markdown] id="iEwNPs4S0BLi" # А в финале посмотреть полную карточку собаки Chop # + colab={"base_uri": "https://localhost:8080/"} id="jlIr3-WXz_M9" outputId="95ed11cb-4d15-4b72-c85b-c33eb400f983" Chop # + [markdown] id="dcsJEchc0LqJ" # Таким образом, создание собственных структур данных часто бывает удобно. Прикладной пример - создание карточки собаки для ветеринарной клиники. # + id="_DSRl1uPLm3K"
Basic data operations/Julia_Домашнее_задание_ipynb""-2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <center> # <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork/labs/Module%204/logo.png" width="300" alt="cognitiveclass.ai logo" /> # </center> # # # Assignment # # * [Story](#story) # * [Components of the report items](#components-of-the-report-items) # * [Expected layout](#expected-layout) # * [Requirements to create the dashboard](#requirements-to-create-the-dashboard) # * [What is new in this exercise compared to other labs?](#what-is-new-in-this-exercise-compared-to-other-labs?) # * [Review](#review) # * [Hints to complete TODOs](#hints-to-complete-todos) # * [Application](#application) # # ## Story: # # As a data analyst, you have been given a task to monitor and report US domestic airline flights performance. Goal is to analyze the performance of the reporting airline to improve fight reliability thereby improving customer relaibility. # # Below are the key report items, # # * Yearly airline performance report  # * Yearly average flight delay statistics # # *NOTE:* Year range is between 2005 and 2020. # # ## Components of the report items # # 1. Yearly airline performance report # # For the chosen year provide, # # * Number of flights under different cancellation categories using bar chart. # * Average flight time by reporting airline using line chart. # * Percentage of diverted airport landings per reporting airline using pie chart. # * Number of flights flying from each state using choropleth map. # * Number of flights flying to each state from each reporting airline using treemap chart. # 2. Yearly average flight delay statistics # # For the chosen year provide, # # * Monthly average carrier delay by reporting airline for the given year. # * Monthly average weather delay by reporting airline for the given year. # * Monthly average natioanl air system delay by reporting airline for the given year. # * Monthly average security delay by reporting airline for the given year. # * Monthly average late aircraft delay by reporting airline for the given year. # # *NOTE:* You have worked created the same dashboard components in `Flight Delay Time Statistics Dashboard` section. We will be reusing the same. # # ## Expected Layout # # <center> # <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork/labs/Module%205/images/Layout.png" width="2000" alt="cognitiveclass.ai logo"/> # </center> # # ## Requirements to create the dashboard # # * Create dropdown using the reference [here](https://dash.plotly.com/dash-core-components/dropdown?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01) # * Create two HTML divisions that can accomodate two components (in one division) side by side. One is HTML heading and the other one is dropdown. # * Add graph components. # * Callback function to compute data, create graph and return to the layout. # # ## What's new in this exercise compared to other labs? # # * Make sure the layout is clean without any defualt graphs or graph layouts. We will do this by 3 changes: # # 1. Add `app.config.suppress_callback_exceptions = True` right after `app = JupyterDash(__name__)`. # # 2. Having empty html.Div and use the callback to Output the dcc.graph as the Children of that Div. # # 3. Add a state variable in addition to callback decorator input and output parameter. This will allow us to pass extra values without firing the callbacks. # Here, we need to pass two inputs `chart type` and `year`. Input is read only after user entering all the information. # # * Use new html display style `flex` to arrange the dropdown menu with description. # # * Update app run step to avoid getting error message before initiating callback. # # *NOTE:* These steps are only for review. # # ## Review # # Search/Look for review to know how commands are used and computations are carried out. There are 7 review items. # # * REVIEW1: Clear the layout and do not display exception till callback gets executed. # * REVIEW2: Dropdown creation. # * REVIEW3: Observe how we add an empty division and providing an id that will be updated during callback. # * REVIEW4: Holding output state till user enters all the form information. In this case, it will be chart type and year. # * REVIEW5: Number of flights flying from each state using choropleth # * REVIEW6: Return dcc.Graph component to the empty division # * REVIEW7: This covers chart type 2 and we have completed this exercise under Flight Delay Time Statistics Dashboard section # # ## Hints to complete TODOs # # ### TODO1 # # Reference [link](https://dash.plotly.com/dash-html-components/h1?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01) # # * Provide title of the dash application title as `US Domestic Airline Flights Performance`. # * Make the heading center aligned, set color as `#503D36`, and font size as `24`. # Sample: style={'textAlign': 'left', 'color': '#000000', 'font-size': 0} # # ### TODO2 # # Reference [link](https://dash.plotly.com/dash-core-components/dropdown?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01) # # Create a dropdown menu and add two chart options to it. # # Parameters to be updated in `dcc.Dropdown`: # # * Set `id` as `input-type`. # * Set `options` to list containing dictionaries with key as `label` and user provided value for labels in `value`. # # *1st dictionary* # # * label: Yearly Airline Performance Report # * value: OPT1 # # *2nd dictionary* # # * label: Yearly Airline Delay Report # * value: OPT2 # * Set placeholder to `Select a report type`. # * Set width as `80%`, padding as `3px`, font size as `20px`, text-align-last as `center` inside style parameter dictionary. # # #### Skeleton: # # ``` # dcc.Dropdown(id='....', # options=[ # {'label': '....', 'value': '...'}, # {'label': '....', 'value': '...'} # ], # placeholder='....', # style={....}) # ``` # # ### TODO3 # # Add a division with two empty divisions inside. For reference, observe how code under `REVIEW` has been structured. # # Provide division ids as `plot4` and `plot5`. Display style as `flex`. # # #### Skeleton # # ``` # html.Div([ # html.Div([ ], id='....'), # html.Div([ ], id='....') # ], style={....}) # ``` # # ### TODO4 # # Our layout has 5 outputs so we need to create 5 output components. Review how input components are constructured to fill in for output component. # # It is a list with 5 output parameters with component id and property. Here, the component property will be `children` as we have created empty division and passing in `dcc.Graph` after computation. # # Component ids will be `plot1` , `plot2`, `plot2`, `plot4`, and `plot5`. # # #### Skeleton # # ``` # [Output(component_id='plot1', component_property='children'), # Output(....), # Output(....), # Output(....), # Output(....)] # ``` # # ### TODO5 # # Deals with creating line plots using returned dataframes from the above step using `plotly.express`. Link for reference is [here](https://plotly.com/python/line-charts/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01) # # Average flight time by reporting airline # # * Set figure name as `line_fig`, data as `line_data`, x as `Month`, y as `AirTime`, color as `Reporting_Airline` and `title` as `Average monthly flight time (minutes) by airline`. # # #### Skeleton # # ``` # carrier_fig = px.line(avg_car, x='Month', y='CarrierDelay', color='Reporting_Airline', title='Average carrrier delay time (minutes) by airline')` # ``` # # ) # # ### TODO6 # # Deals with creating treemap plot using returned dataframes from the above step using `plotly.express`. Link for reference is [here](https://plotly.com/python/treemaps/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01) # # Number of flights flying to each state from each reporting airline # # * Set figure name as `tree_fig`, data as `tree_data`, path as `['DestState', 'Reporting_Airline']`, values as `Flights`, colors as `Flights`, color_continuous_scale as `'RdBu'`, and title as `'Flight count by airline to destination state'` # # #### Skeleton # # ``` # tree_fig = px.treemap(data, path=['...', '...'], # values='...', # color='...', # color_continuous_scale='...', # title='...' # ) # ``` # # ## Application # # + # Import required libraries import pandas as pd import dash from dash import dcc from dash import html from dash.dependencies import Input, Output, State from jupyter_dash import JupyterDash import plotly.graph_objects as go import plotly.express as px from dash import no_update # Create a dash application app = JupyterDash(__name__) JupyterDash.infer_jupyter_proxy_config() # REVIEW1: Clear the layout and do not display exception till callback gets executed app.config.suppress_callback_exceptions = True # Read the airline data into pandas dataframe airline_data = pd.read_csv('https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork/Data%20Files/airline_data.csv', encoding = "ISO-8859-1", dtype={'Div1Airport': str, 'Div1TailNum': str, 'Div2Airport': str, 'Div2TailNum': str}) # List of years year_list = [i for i in range(2005, 2021, 1)] """Compute graph data for creating yearly airline performance report Function that takes airline data as input and create 5 dataframes based on the grouping condition to be used for plottling charts and grphs. Argument: df: Filtered dataframe Returns: Dataframes to create graph. """ def compute_data_choice_1(df): # Cancellation Category Count bar_data = df.groupby(['Month','CancellationCode'])['Flights'].sum().reset_index() # Average flight time by reporting airline line_data = df.groupby(['Month','Reporting_Airline'])['AirTime'].mean().reset_index() # Diverted Airport Landings div_data = df[df['DivAirportLandings'] != 0.0] # Source state count map_data = df.groupby(['OriginState'])['Flights'].sum().reset_index() # Destination state count tree_data = df.groupby(['DestState', 'Reporting_Airline'])['Flights'].sum().reset_index() return bar_data, line_data, div_data, map_data, tree_data """Compute graph data for creating yearly airline delay report This function takes in airline data and selected year as an input and performs computation for creating charts and plots. Arguments: df: Input airline data. Returns: Computed average dataframes for carrier delay, weather delay, NAS delay, security delay, and late aircraft delay. """ def compute_data_choice_2(df): # Compute delay averages avg_car = df.groupby(['Month','Reporting_Airline'])['CarrierDelay'].mean().reset_index() avg_weather = df.groupby(['Month','Reporting_Airline'])['WeatherDelay'].mean().reset_index() avg_NAS = df.groupby(['Month','Reporting_Airline'])['NASDelay'].mean().reset_index() avg_sec = df.groupby(['Month','Reporting_Airline'])['SecurityDelay'].mean().reset_index() avg_late = df.groupby(['Month','Reporting_Airline'])['LateAircraftDelay'].mean().reset_index() return avg_car, avg_weather, avg_NAS, avg_sec, avg_late # Application layout app.layout = html.Div(children=[ # TODO1: Add title to the dashboard # REVIEW2: Dropdown creation # Create an outer division html.Div([ # Add an division html.Div([ # Create an division for adding dropdown helper text for report type html.Div( [ html.H2('Report Type:', style={'margin-right': '2em'}), ] ), # TODO2: Add a dropdown # Place them next to each other using the division style ], style={'display':'flex'}), # Add next division html.Div([ # Create an division for adding dropdown helper text for choosing year html.Div( [ html.H2('Choose Year:', style={'margin-right': '2em'}) ] ), dcc.Dropdown(id='input-year', # Update dropdown values using list comphrehension options=[{'label': i, 'value': i} for i in year_list], placeholder="Select a year", style={'width':'80%', 'padding':'3px', 'font-size': '20px', 'text-align-last' : 'center'}), # Place them next to each other using the division style ], style={'display': 'flex'}), ]), # Add Computed graphs # REVIEW3: Observe how we add an empty division and providing an id that will be updated during callback html.Div([ ], id='plot1'), html.Div([ html.Div([ ], id='plot2'), html.Div([ ], id='plot3') ], style={'display': 'flex'}), # TODO3: Add a division with two empty divisions inside. See above disvision for example. ]) # Callback function definition # TODO4: Add 5 ouput components @app.callback( [....], [Input(component_id='input-type', component_property='value'), Input(component_id='input-year', component_property='value')], # REVIEW4: Holding output state till user enters all the form information. In this case, it will be chart type and year [State("plot1", 'children'), State("plot2", "children"), State("plot3", "children"), State("plot4", "children"), State("plot5", "children") ]) # Add computation to callback function and return graph def get_graph(chart, year, children1, children2, c3, c4, c5): # Select data df = airline_data[airline_data['Year']==int(year)] if chart == 'OPT1': # Compute required information for creating graph from the data bar_data, line_data, div_data, map_data, tree_data = compute_data_choice_1(df) # Number of flights under different cancellation categories bar_fig = px.bar(bar_data, x='Month', y='Flights', color='CancellationCode', title='Monthly Flight Cancellation') # TODO5: Average flight time by reporting airline # Percentage of diverted airport landings per reporting airline pie_fig = px.pie(div_data, values='Flights', names='Reporting_Airline', title='% of flights by reporting airline') # REVIEW5: Number of flights flying from each state using choropleth map_fig = px.choropleth(map_data, # Input data locations='OriginState', color='Flights', hover_data=['OriginState', 'Flights'], locationmode = 'USA-states', # Set to plot as US States color_continuous_scale='GnBu', range_color=[0, map_data['Flights'].max()]) map_fig.update_layout( title_text = 'Number of flights from origin state', geo_scope='usa') # Plot only the USA instead of globe # TODO6: Number of flights flying to each state from each reporting airline # REVIEW6: Return dcc.Graph component to the empty division return [dcc.Graph(figure=tree_fig), dcc.Graph(figure=pie_fig), dcc.Graph(figure=map_fig), dcc.Graph(figure=bar_fig), dcc.Graph(figure=line_fig) ] else: # REVIEW7: This covers chart type 2 and we have completed this exercise under Flight Delay Time Statistics Dashboard section # Compute required information for creating graph from the data avg_car, avg_weather, avg_NAS, avg_sec, avg_late = compute_data_choice_2(df) # Create graph carrier_fig = px.line(avg_car, x='Month', y='CarrierDelay', color='Reporting_Airline', title='Average carrrier delay time (minutes) by airline') weather_fig = px.line(avg_weather, x='Month', y='WeatherDelay', color='Reporting_Airline', title='Average weather delay time (minutes) by airline') nas_fig = px.line(avg_NAS, x='Month', y='NASDelay', color='Reporting_Airline', title='Average NAS delay time (minutes) by airline') sec_fig = px.line(avg_sec, x='Month', y='SecurityDelay', color='Reporting_Airline', title='Average security delay time (minutes) by airline') late_fig = px.line(avg_late, x='Month', y='LateAircraftDelay', color='Reporting_Airline', title='Average late aircraft delay time (minutes) by airline') return[dcc.Graph(figure=carrier_fig), dcc.Graph(figure=weather_fig), dcc.Graph(figure=nas_fig), dcc.Graph(figure=sec_fig), dcc.Graph(figure=late_fig)] # Run the app if __name__ == '__main__': # REVIEW8: Adding dev_tools_ui=False, dev_tools_props_check=False can prevent error appearing before calling callback function app.run_server(mode="inline", host="localhost", debug=False, dev_tools_ui=False, dev_tools_props_check=False) # - # ## Summary # # Congratulations for completing your dash and plotly assignment. # # More information about the libraries can be found [here](https://dash.plotly.com/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01) # # ## Author # # [<NAME>](https://www.linkedin.com/in/saishruthi-swaminathan/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01) # # ## Changelog # # | Date | Version | Changed by | Change Description | # | ---------- | ------- | ---------- | ------------------------------------ | # | 12-18-2020 | 1.0 | Nayef | Added dataset link and upload to Git | # # ## <h3 align="center"> © IBM Corporation 2020. All rights reserved. <h3/> #
8_Data Visualization with Python/5_Peer_Graded_Assignment_Questions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Load Data # + # %load_ext autoreload # %autoreload 2 import os; import sys; sys.path.append('../') import pandas as pd import tqdm import warnings import copy warnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning) import networkx as nx import numpy as np from collections import Counter from collections import OrderedDict import matplotlib.pyplot as plt # - ## Configure file and folder names datafolder = "../data" spadl_h5 = os.path.join(datafolder,"spadl-statsbomb.h5") games = pd.read_hdf(spadl_h5,"games") games = games[games.competition_name == "FIFA World Cup"] print("nb of games:", len(games)) # # Helper Functions def teams_in_game(actions): team1 = actions.loc[0]["team_name"] for i in range(1, len(actions)): team2 = actions.loc[i]["team_name"] if (team2 != team1): return team1, team2 def game_pass_list(actions, team1): edges1 = [] edges2 = [] pass_action = ['pass', 'throw_in', 'corner_crossed', 'freekick_crossed', 'cross', 'freekick_short', 'goalkick', 'corner_short'] for i in range(len(actions)): action = actions.loc[i]["type_name"] if action in pass_action: if actions.loc[i]["result_name"] == 'success': passer = actions.loc[i]["player_name"] team = actions.loc[i]["team_name"] j = 1 while i+j < len(actions) and (actions.loc[i+j]["team_name"] != team): j += 1 try: passer = actions.loc[i]["player_name"] receiver = actions.loc[i+j]["player_name"] if team == team1: edges1.append((passer, receiver)) else: edges2.append((passer, receiver)) except: continue return edges1, edges2 def create_graph(passes): G = nx.DiGraph((x, y, {'weight': v}) for (x, y), v in Counter(passes).items()) return G def get_total_links(G): DV = G.degree(weight='weight') return sum(deg for n, deg in DV)/2 def get_metrics(G): total_links = get_total_links(G) density = nx.density(G) diameter = nx.diameter(G.to_undirected()) average_clustering = nx.average_clustering(G) return total_links, density, diameter, average_clustering def compute_average(team_props): average = {} for team in team_props: average[team] = [np.mean(team_props[team][0]), np.mean(team_props[team][1]), np.mean(team_props[team][2]), np.mean(team_props[team][3])] return average def world_cup_team_placements(): placements = {} placements['France'] = 1.5 placements['Croatia'] = 1.5 placements['Belgium'] = 3.5 placements['England'] = 3.5 placements['Uruguay'] = 6.5 placements['Brazil'] = 6.5 placements['Russia'] = 6.5 placements['Sweden'] = 6.5 placements['Portugal'] = 12.5 placements['Argentina'] = 12.5 placements['Mexico'] = 12.5 placements['Japan'] = 12.5 placements['Spain'] = 12.5 placements['Denmark'] = 12.5 placements['Switzerland'] = 12.5 placements['Colombia'] = 12.5 placements['Saudi Arabia'] = 24.5 placements['Iran'] = 24.5 placements['Peru'] = 24.5 placements['Nigeria'] = 24.5 placements['Serbia'] = 24.5 placements['South Korea'] = 24.5 placements['Tunisia'] = 24.5 placements['Senegal'] = 24.5 placements['Egypt'] = 24.5 placements['Morocco'] = 24.5 placements['Australia'] = 24.5 placements['Iceland'] = 24.5 placements['Costa Rica'] = 24.5 placements['Germany'] = 24.5 placements['Panama'] = 24.5 placements['Poland'] = 24.5 return placements # # Compute Network Metrics players = pd.read_hdf(spadl_h5,"players") teams = pd.read_hdf(spadl_h5,"teams") actiontypes = pd.read_hdf(spadl_h5, "actiontypes") bodyparts = pd.read_hdf(spadl_h5, "bodyparts") results = pd.read_hdf(spadl_h5, "results") # + team_props = {} for game in tqdm.tqdm(list(games.itertuples())): actions = pd.read_hdf(spadl_h5,f"actions/game_{game.game_id}") actions = ( actions.merge(actiontypes) .merge(results) .merge(bodyparts) .merge(players,"left",on="player_id") .merge(teams,"left",on="team_id") .sort_values(["period_id", "time_seconds", "timestamp"]) .reset_index(drop=True) ) team1, team2 = teams_in_game(actions) if team1 is np.nan or team2 is np.nan: continue passes1, passes2 = game_pass_list(actions, team1) G1 = create_graph(passes1) G2 = create_graph(passes2) total_links1, density1, diameter1, avg_clustering1 = get_metrics(G1) total_links2, density2, diameter2, avg_clustering2 = get_metrics(G2) if team1 in team_props: team_props[team1][0].append(total_links1) team_props[team1][1].append(density1) team_props[team1][2].append(diameter1) team_props[team1][3].append(avg_clustering1) else: team_props[team1] = [[total_links1], [density1], [diameter1], [avg_clustering1]] if team2 in team_props: team_props[team2][0].append(total_links2) team_props[team2][1].append(density2) team_props[team2][2].append(diameter2) team_props[team2][3].append(avg_clustering2) else: team_props[team2] = [[total_links2], [density2], [diameter2], [avg_clustering2]] # - average = compute_average(team_props) placements = world_cup_team_placements() ordered_teams = OrderedDict(sorted(average.items(), key=lambda x: x[1][0], reverse=True)) for team in ordered_teams: print(team + " (" + str(placements[team]) + ") : " + str(ordered_teams[team])) # # Placements import scipy.stats as stats # + place = [24.5, 12.5, 6.5, 3.5, 1.5] total_links = {} density = {} diameter = {} clustering_coefficient = {} for p in place: total_links[p] = [] density[p] = [] diameter[p] = [] clustering_coefficient[p] = [] for team in team_props: total_links[placements[team]] += team_props[team][0] density[placements[team]] += team_props[team][1] diameter[placements[team]] += team_props[team][2] clustering_coefficient[placements[team]] += team_props[team][3] for p in place: print("Placement:", p) print("Total Links:", np.mean(total_links[p])) print("Density:", np.mean(density[p])) print("Diameter:", np.mean(diameter[p])) print("Clustering Coefficient:", np.mean(clustering_coefficient[p])) print() # - # # Group vs Knockout ANOVA Test # + net1 = [] net2 = [] net3 = [] net4 = [] for i in range(1, len(place)): net1 += total_links[place[i]] net2 += density[place[i]] net3 += diameter[place[i]] net4 += clustering_coefficient[place[i]] f_scores = [] f_scores.append(stats.f_oneway(total_links[24.5], net1)) f_scores.append(stats.f_oneway(density[24.5], net2)) f_scores.append(stats.f_oneway(diameter[24.5], net3)) f_scores.append(stats.f_oneway(clustering_coefficient[24.5], net4)) for f in f_scores: print(f) # - # # Quarters vs Semifinals ANOVA Test # + f_scores = [] f_scores.append(stats.f_oneway(total_links[6.5], total_links[3.5]+total_links[1.5])) f_scores.append(stats.f_oneway(density[6.5], density[3.5]+density[1.5])) f_scores.append(stats.f_oneway(diameter[6.5], diameter[3.5]+diameter[1.5])) f_scores.append(stats.f_oneway(clustering_coefficient[6.5], clustering_coefficient[3.5]+clustering_coefficient[1.5])) for f in f_scores: print(f) # - # # World Cup Regression from scipy import stats # + metrics = ["Total Links", "Density", "Diameter", "Clustering"] for i in range(4): X = [] y = [] for team in ordered_teams: X.append(ordered_teams[team][i]) y.append(placements[team]) slope, intercept, r_value, p_value, std_err = stats.linregress(X,y) yPred1 = [intercept + slope * x for x in X] plt.scatter(X, y,alpha=0.5) plt.plot(X, yPred1, 'r', label="Linear") plt.title(metrics[i] + " L2 Linear Regression") plt.ylabel("Placement") plt.xlabel(metrics[i]) plt.show() print("slope:", slope) print("r:", r_value) print("p:", p_value) print("std_err", std_err) print() # -
Network Analysis/World Cup Network Properties.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from fastai.text import * import pandas as pd from path import Path as path path2 = path("model/").mkdir_p() df = pd.DataFrame() for f in path("./").files(): if f.ext == ".csv": df = df.append(pd.read_csv(f,index_col = 0).dropna().sample(1200)) print(len(df)) df.to_csv("model/mbti.csv", index=None) # Language model data data_lm = TextLMDataBunch.from_csv(path2, 'mbti.csv', num_workers = 0) # Classifier model data data_clas = TextClasDataBunch.from_csv(path2, 'mbti.csv', vocab=data_lm.train_ds.vocab, bs=32, num_workers = 0) data_lm.save('data_lm_export.pkl') data_clas.save('data_clas_export.pkl') data_lm = load_data(path2, 'data_lm_export.pkl') data_clas = load_data(path2, 'data_clas_export.pkl', bs=16) learn = language_model_learner(data_lm, AWD_LSTM, drop_mult=0.5) learn.fit_one_cycle(1, 1e-2) learn.unfreeze() learn.fit_one_cycle(1, 1e-3) learn.predict("This is a review about", n_words=10) learn.save_encoder('ft_enc') learn = text_classifier_learner(data_clas, AWD_LSTM, drop_mult=0.5) learn.load_encoder('ft_enc') data_clas.show_batch() learn.fit_one_cycle(1, 1e-2) learn.freeze_to(-2) learn.fit_one_cycle(1, slice(5e-3/2., 5e-3)) learn.export("model") learn.save("model1") learn.unfreeze() learn.fit_one_cycle(1, slice(2e-3/100, 2e-3))
mbti.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Final Report # ## NCAA Predictor # <NAME> and <NAME> # Spring 2021, May 5 # CPSC322 - Data Science Algorithms (Sprint) # Final Project # # ## Introduction # We decided to use Men's Basketball NCAA Statistics from the 2020-21 season to try to come up with classifiers that would determine winning percentage of the individual teams based on their statistics in the areas studied. # # We found that there were several moderately strong indicators of winning percentage, particularly in Rebound margin and in Effective Field Goal Percentage, which we will cover further down in this report. We found that Scoring Margin was a fantastic predictor thereto, so much so that we dropped it as a used attribute towards finding interesting results. # # ## Data Analysis # ### Preface # We'll be using the following statistics, as noted by the side-by-side notation in LaTeX: # - $PTS$: The number of points the team scored. # - $PTS_{opp}$: The number of points the team allowed. # - $3PM$: The number of 3-Point shots made by the team. # - $2PM$: The number of 2-Point shots made by the team. # - $FGA$: The number of Field Goals (3's and 2's) attempted by the team. # - $REB$: The number of rebounds the team recovered. # - $REB_{opp}$: The number of rebounds the team's opponent recovered. # - $SPG$: The number of steals per game the team got. # - $BPG$: The number of blocks per game the team achieved. # - $W$: The number of wins a team has on the season. # - $L$: The number of losses a team has on the season. # - $G$: The number of games a team played on the season. # # Additionally, if you wish to see the data we used in a csv format, check out [NCAA_Statistics.csv](input_data/NCAA_Statistics.csv). # # ### Attribute Selection # There are four attributes used in the classification schemas, and one classification. They are as follows: # # - _**Scoring Margin**_: $SCM = \frac{PTS}{PTS_{opp}}$ # The margin of a team's scored points to their opponent's scored points. Average should be 1.0, when weighting for points scored for the whole game (which we are not doing). # # - _**Effective Field Goal Percentage**_: $EFG\% = \frac{3PM * 1.5 + 2PM}{FGA}$ # A team's likelihood of making a given shot given historical data, with added weight to three pointers (for their point value). Has no default average. # # - _**Rebound Margin**_: $RBM = \frac{REB}{REB_{opp}}$ # A team's ratio of rebounds taken versus their opponent. Average should be 1.0, given weight for number of rebounds recovered for the whole game. # # - _**Steals Plus Blocks Per Game**_: $SPB = SPG + BPG$ # The total number of steals and blocks a team gets in a game. No default average. # # And the classification... # - _**Winning Percentage**_: $W\% = \frac{W}{G}$ # The percentage of games a team played over the season that they won. Expressed as a value $x$ such that $0 \le x \le 1$. # # You can find all of these data in [NCAA_Statistics_Parsed.csv](input_data/NCAA_Statistics_Parsed.csv), in the input_data folder. To see the code used, check out [data_parser.ipynb](data_parser.ipynb). # ### Normalization # We normalized using min-max scaling on each attribute, from its minimum to its maximum. The exception was in the winning percentage, which we normalized along 0 to 1, the possible minimum and maximum for the value, and one acheivable historically, in both directions. # # You can see the result of this in [NCAA_Statistics_Normalized.csv](input_data/NCAA_Statistics_Normalized.csv). # # ### Discretization # We checked a ton of splitting methods here, and upon looking at a ton of ditribution charts and generation of many decision trees, we decided to split each attribute into 4 discrete labels. We also decided here to drop the Scoring Margin feature, as we felt it was not helpful, and if one had the information thereof, it would be trivial to take a stab at what classification each deserves. With this discretization schema, we were able to generate interesting data, as seen below, further down. # # Upon this Discretization, we got [NCAA_Statistics_44444.csv](input_data/NCAA_Statistics_44444.csv). # # See how we did both of these tasks in [norm_and_disc.ipynb](norm_and_disc.ipynb). # ### Summary Statistics # Using the data at the end of [EDA.ipynb](EDA.ipynb)... # ![Summary Stats](img/SummaryStats.png) # Seeing this, our earlier inferences align with the proof here. # # ### Distributions and Regression Analysis # We made several histograms for each attribute to gather the frequency between certain bounds, and linear regression plots to show our work. Rather than copy over the work, [here's a link to EDA.ipynb instead](EDA.ipynb). # # Some analysis is twofold, here. For one, it's visible to the naked eye that these are normal distributions, skewed as they may be. The bell curve shape is obvious enough. Second, we have one amazing correlative stat, being Scoring Margin, with around a .9 correlation coefficient, two moderately good ones in Rebound Margin and Effective Field Goal Percentage, each with around .6 r, and a poor one, in Steals + Bloacks, which has around a .2 r. # # ## Classification # You can see all of our classification work in [classification_eval.ipynb](classification_eval.ipynb), alongside the confusion matrices and the like listed below. # # ### kNN Classification # We used our standard kNN classification upon the dataset, with n_neighbors=10. The kNN performed without issue using stratified k-fold cross-validation with 10 folds (we'll use this for all our classifiers). We got the following info: # ![kNN Confusion Matrix](img/knnConfusionMatrix.png) # A 53% accuracy rate is nothing to scoff at with 4 classification possibilities. Notably, this classifier was excellent in detecting bad teams; A 71% recognition is notable therewith. The detection rates in 2's and 3's were also solid, but they were abysmal for detecting 4's. It is tough to detect 4's in general, as there are few trees that lead there, but alas there should be some. Overall, for our worst classifier, kNN performed adaquetly. # # ### Decision Tree Classification # Moving to decision trees, we decided to run two separate subprojects. # # The first subproject (found in [decision_tree.ipynb](decision_tree.ipynb)) helped with testing, and let us flex our pruning muscles. We used this to decide our splits, to make them interesting, and this is how we got the \_4444 splits. We manually pruned [this tree](tree_vis/_4444_tree.pdf) to get [this tree](tree_vis/_4444_tree_pruned.pdf); A good contrast. Because we used the whole dataset to build the former, we decided not to run tests upon this tree, instead using unpruned trees for our testing. On a side note, some of the alternate trees we got with Scoring Margin were, um, [bad](tree_vis/24444_tree.pdf). It was a good call to drop Scoring Margin. # # The second subproject involved actually testing out decision tree results using our defined function. Using the \_4444 format, we came out with these results: # ![Decision Tree Confusion Matrix](img/DecisionTreeConfMatrix.PNG) # Excuse the difference in formatting; It's a result of us splitting up the work even to the most minor of problems. # # Anyway, here we see that the dataset worked well at detecting 3's, which by the distributions shown, were proven to be (ever so slightly) the most populous group, with 2's leading right behind. This suggests the classifier is good at detecting prevelent cases as themself, and terrible at pointing out fringe cases, as shown by the bad detection results above. To improve this in the future, we'd implement automatic pruning. # # ### Random Forest Classification # On to our ensemble classifier, Random Forests. We implemented Forests with two varying aspects for randomization: Attributes used and subdataset used. Let's go over each. # # For the problem of using attributes, I (Ben) decided to implement my own strategy to get a good mix, but with weight towards using all the attributes. It's the following, as mentioned in [myclassifiers.py](mysklearn/myclassifiers.py): # # # 1. Set n to num_atts # # 2. If n == min_atts, return the current atts # # 3. Flip a coin, heads or tails # # 4. If heads, return the current attributes # # 5. Otherwise: # # 6. Remove a random attribute from the list # # 7. n -= 1 # # 8. Repeat from Step 2 onwards # # Essentially, we wanted a formula that continuously split the odds in half of using the set as is or not. This was successful, as we got a generally good number of removals via testing in our [testing file](random_forest.ipnyb) (About half included them, as verified through prints). # # As for the latter problem, we implemented bagging. Not much to see here, though it should be noted we stored validation accuracy, in order to implement optional weighted voting. In essence, if the user wanted to weight the votes of accurate "experts" over the common citizenry, they could set weighted=True, and each vote would be multiplied by the validation accuracy. Upon testing this, there wasn't a noticeable difference in accuracy, so we didn't use it for our testing. # # We ran trials from seeds 10-15, and found Random Forests had an approximate average accuracy of 57%, contributed to by the following run on seed 15: # ![Random Forest Confusion Matrix](img/RandomForestConfusionMatrix.png) # As you probably see, this is very similar to the Decision Tree classifier's results, which makes a ton of sense! We decided also to use m=10 and n=50, giving a community of experts approach. This was our best result (though not by much), and we used it for our Heroku rollout. # # Before that, a reminder that you can find all this work on [classification_eval.ipynb](classification_eval.ipynb). # ### Heroku Rollout of Random Forests # For our Heroku implementation of our best classifer, we implemented our MyRandom Forest Classifer. # # [Home Screen](https://ncaa-predictor-app-bclark.herokuapp.com/) # # [Predicting Screen Example](https://ncaa-predictor-app-bclark.herokuapp.com/predict?Scoring_Margin=1&efg=2&spg_bpg=2&rebound_margin=2) # # ## Conclusion # We conclude that the attributes and advanced statistics we used were very helpful in finding classifications. While there is some degree of variability in a team's winning percentage, these 3 or 4 attributes really help to decipher such performance's occurance. # # The data we used was from [the NCAA Statistics Page and API](http://stats.ncaa.org/rankings/change_sport_year_div). We simply moved the API data from the site to Excel sheets, which were converted into csv files via Windows Excel. From there, we did all the work explained about this section. There were no problems for classification. # # For our approach and challenges, I have to say candidly, we crushed the splitting of work and accomplishing our set tasks. Each of us did equal parts and communicated when they were done, leading to a smooth rollout. Here's the list of who did what: # # Brandon: # - Put together data from API # - Exploratory Data Analysis # - kNN implementation # - Heroku implementation and creation # # Ben: # - Statistic Parsing # - Discretization and Normalization # - Decision Tree implementation # - Random Forest implementation # # The challenges we ran into were technicalities, such as some bugs in our Heroku setup, struggles to normalize the data (given my (Ben's) faulty myutils functions up to that point) and git merges. Though we were able to get through them fairly steadily. # # To improve the performance, we think adding pruning to the decision trees and therefore the random forests would help a ton. Additionally, we could expand the scope of our data beyond the current season. This would have been fairly easy, though tedious in setup and in computational time. # # If you have any questions, feel free to shoot either of us a message through your preferred service. Have a great summer!
final_report.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Download data at: https://data.4tu.nl/repository/uuid:20f1c784-1143-4c61-a03d-7a3454914abb # Run all cells to generate the figures used in the paper. # + import numpy as np import holoviews as hv import holoviews_rc import kwant from fun import * import os def ticks(plot, x=True, y=True): hooks = [tick_marks] if x: xticks = [0, 1, 2] else: xticks = [(0,''), (1,''), (2,'')] hooks.append(hide_x) if y: yticks = [0, 17, 35] else: yticks = [(0, ''), (17, ''), (35, '')] hooks.append(hide_y) return plot(plot={'Image': {'xticks': xticks, 'yticks': yticks}, 'Overlay': {'final_hooks': hooks}}) def tick_marks(plot, element): ax = plot.handles['axis'] fig = plot.state ax.tick_params(which='major', color='k', size=3) def hide_x(plot, element): ax = plot.handles['axis'] ax.set_xlabel('') def hide_y(plot, element): ax = plot.handles['axis'] ax.set_ylabel('') hv.notebook_extension() # %output size=100 dpi=250 css={'width': '3.4in'} renderer = hv.Store.renderers['matplotlib'].instance(fig='pdf', size=100, dpi=250) from holoviews.plotting.mpl import MPLPlot MPLPlot.fig_inches = (3.4, None) # - # # Load data and create a custom cmap import matplotlib.cm import matplotlib.colors as mcolors colors1 = matplotlib.cm.binary_r(np.linspace(0.5, 1, 128)) colors2 = matplotlib.cm.gist_heat_r(np.linspace(0, 0.8, 127)) colors = np.vstack((colors1, colors2)) mymap = mcolors.LinearSegmentedColormap.from_list('my_colormap', colors) sc_on_side_alpha100 = create_holoviews('data/0_to_2T_1x1_angles_sc_on_side_mu_ranging_from_minus_2_to_plus_2_full_phase_diagram_with_correction_A_alpha100.h5') sc_on_side_no_orb_alpha100 = create_holoviews('data/0_to_2T_1x1_angles_sc_on_side_mu_ranging_from_minus_2_to_plus_2_full_phase_diagram_with_correction_A_no_orbital_alpha100.h5') sc_on_side = create_holoviews('data/0_to_2T_1x1_angles_sc_on_side_mu_ranging_from_minus_2_to_plus_2_full_phase_diagram_with_correction_A.h5') sc_on_side_no_orb = create_holoviews('data/0_to_2T_1x1_angles_sc_on_side_mu_ranging_from_minus_2_to_plus_2_full_phase_diagram_with_correction_A_no_orbital.h5') # # Full phase diagram for superconductor on side of wire # ## Band gaps # + # %%opts Layout [vspace=0] Image (cmap=mymap clims=(-197, 197)) # %%opts Layout [sublabel_position=(-0.4, 0.9) sublabel_format='({alpha})' sublabel_size=13] # %%opts Path (color='g') im1 = sc_on_side_no_orb.Phase_diagram.Band_gap[0.5, 0] im2 = sc_on_side.Phase_diagram.Band_gap[0.5, 0] im1 = im1.relabel(r"$\bm{B} \parallel x, \; \bm{A} = 0$", depth=1) im2 = im2.relabel(r"$\bm{B} \parallel x, \; \bm{A} \ne 0$", depth=1) max1 = np.nanmax(im1.Im.Band_gap.data) max2 = np.nanmax(im2.Im.Band_gap.data) max_gap = np.max((max1, max2)) sc_on_side_hist = (ticks(im1, x=False).hist(bin_range=(0, max_gap)) + ticks(im2).hist(bin_range=(0, max_gap))) sc_on_side_hist.cols(1) # - # print the maximum band gaps print("""The maximum band gap of the top plot is {:.4} meV. The maximum band gap of the lower plot is {:.4} meV.""".format(max1, max2)) # + # %%opts Layout [vspace=0] Image (cmap=mymap clims=(-197, 197)) # %%opts Layout [sublabel_position=(-0.4, 0.9) sublabel_format='({alpha})' sublabel_size=13] # %%opts Path (color='g') im1_alpha100 = sc_on_side_no_orb_alpha100.Phase_diagram.Band_gap[0.5, 0] im2_alpha100 = sc_on_side_alpha100.Phase_diagram.Band_gap[0.5, 0] im1_alpha100 = im1_alpha100.relabel(r"$\bm{B} \parallel x, \; \bm{A} = 0$", depth=1) im2_alpha100 = im2_alpha100.relabel(r"$\bm{B} \parallel x, \; \bm{A} \ne 0$", depth=1) max1_alpha100 = np.nanmax(im1_alpha100.Im.Band_gap.data) max2_alpha100 = np.nanmax(im2_alpha100.Im.Band_gap.data) max_gap_alpha100 = np.max((max1_alpha100, max2_alpha100)) sc_on_side_hist_alpha100 = (ticks(im1_alpha100, x=False).hist(bin_range=(0, max_gap_alpha100)) + ticks(im2_alpha100).hist(bin_range=(0, max_gap_alpha100))) (sc_on_side_hist_alpha100).cols(1) # + # renderer.save(sc_on_side_hist, 'paper/figures/sc_on_side_hist', fmt='pdf') # - # print the maximum band gaps print("""The maximum band gap of the top plot is {:.4} meV. The maximum band gap of the lower plot is {:.4} meV.""".format(max1_alpha100, max2_alpha100)) # ## Inverse decay length # + # %%opts Layout [vspace=0] Image (clims=(0, 1.5)) # %%opts Layout [sublabel_position=(-0.4, 0.9) sublabel_format='({alpha})' sublabel_size=13] # %%opts Path (color='g') im1 = sc_on_side_no_orb.Phase_diagram.Inverse_decay_length[0.5, 0] im2 = sc_on_side.Phase_diagram.Inverse_decay_length[0.5, 0] im1 = im1.relabel(r"$\bm{B} \parallel x, \; \bm{A} = 0$", depth=1) im2 = im2.relabel(r"$\bm{B} \parallel x, \; \bm{A} \ne 0$", depth=1) dat1 = im1.Im.Inverse_decay_length.data dat2 = im2.Im.Inverse_decay_length.data dat1[dat1<0] = np.nan dat2[dat2<0] = np.nan sc_on_side_length = (ticks(im1, x=False).hist(bin_range=(0, 1)) + ticks(im2).hist(bin_range=(0, 1))) sc_on_side_length.cols(1) # + # %%opts Layout [vspace=0] Image (clims=(0, 1.5)) # %%opts Layout [sublabel_position=(-0.4, 0.9) sublabel_format='({alpha})' sublabel_size=13] # %%opts Path (color='g') im1_alpha100 = sc_on_side_no_orb_alpha100.Phase_diagram.Inverse_decay_length[0.5, 0] im2_alpha100 = sc_on_side_alpha100.Phase_diagram.Inverse_decay_length[0.5, 0] im1_alpha100 = im1_alpha100.relabel(r"$\bm{B} \parallel x, \; \bm{A} = 0$", depth=1) im2_alpha100 = im2_alpha100.relabel(r"$\bm{B} \parallel x, \; \bm{A} \ne 0$", depth=1) dat1_alpha100 = im1_alpha100.Im.Inverse_decay_length.data dat2_alpha100 = im2_alpha100.Im.Inverse_decay_length.data dat1_alpha100[dat1_alpha100<0] = np.nan dat2_alpha100[dat2_alpha100<0] = np.nan sc_on_side_length = (ticks(im1_alpha100, x=False).hist(bin_range=(0, 1)) + ticks(im2_alpha100).hist(bin_range=(0, 1))) sc_on_side_length.cols(1) # + # renderer.save(sc_on_side_length, 'paper/figures/sc_on_side_length', fmt='pdf') # - # print the minimum decay lengths in nm print("""The minimum decay length of the top plot is {:.3} nm. The minimum decay length of the lower plot is {:.3} nm.""".format(1000 / np.nanmax(dat1), 1000 / np.nanmax(dat2))) # print the mode of the decay lengths frequencies, edges = np.histogram(dat1[dat1>0].reshape(-1), bins=400) max_mode1 = edges[np.argmax(frequencies)] frequencies, edges = np.histogram(dat2[dat2>0].reshape(-1), bins=400) max_mode2 = edges[np.argmax(frequencies)] print("""The maximum mode of the top plot is {:.2} µm^-1. The maximum mode of the lower plot is {:.2} µm^-1. The ratio is {:.3}""".format(max_mode1, max_mode2, max_mode1 / max_mode2)) # # Band structures # + p = make_params(mu=4.8, orbital=True, V=lambda x,y,z: 2/50 * z, t_interface=7*constants.t/8, Delta=5, alpha=50, A_correction=False) momenta = np.linspace(-0.6, 0.6, 200) def bands(B): p.B_x, p.B_y, p.B_z = B bands_fun = kwant.physics.Bands(lead, args=[p]) _bands = np.array([bands_fun(k=k) for k in momenta]) return hv.Path((momenta, _bands), kdims=[r'$k$', r'$E$']) E = (-1.5, 1.5) k = (-0.65, 0.65) lead = make_3d_wire_external_sc(a=constants.a, angle=0) x1 = bands((0.5, 0, 0)).select(E=E, k=k) y1 = bands((0, 0.5, 0)).select(E=E, k=k) z1 = bands((0, 0, 0.5)).select(E=E, k=k) lead = make_3d_wire_external_sc(a=constants.a) x2 = bands((0.5, 0, 0)).select(E=E, k=k) y2 = bands((0, 0.5, 0)).select(E=E, k=k) z2 = bands((0, 0, 0.5)).select(E=E, k=k) # + # %%output fig='svg' # %%opts Layout [vspace=0.1 hspace=0.1 sublabel_format=''] # %%opts Path (color='k') def labels(plot, x=False, y=False, label=''): hooks = [tick_marks] if not x: hooks.append(hide_x) if not y: hooks.append(hide_y) plot *= hv.HLine(0)(style=dict(lw=0.5, color='k', ls=(1, (3.0, 3.0)))) return plot.relabel(label)(plot={'Path': {'xticks': 0, 'yticks': 0}, 'Overlay': {'final_hooks': hooks}}) opts = {'x': -0.62, 'y': 1.40, 'fontsize': 10, 'valign':'top', 'halign':'left'} def rectangle(x=opts['x'], y=opts['y']-0.38, width=0.55, height=0.47): box = np.array([(x,y), (x+width, y), (x+width, y+height), (x, y+height)]) return hv.Polygons([box])(style={'facecolor': '#F0F0F0'}) box2 = rectangle(width=0.55) box3 = rectangle(width=0.80) x1_txt = hv.Text(text="$\mathcal{P}$, $\mathcal{R}_x$, $\mathcal{C}'$", **opts) * box3 y1_txt = hv.Text(text="$\mathcal{P}$", **opts) z1_txt = hv.Text(text="$\mathcal{P}$, $\mathcal{C}'$", **opts) * box2 x2_txt = hv.Text(text="$\mathcal{P}$, $\mathcal{R}_x$", **opts) * box2 y2_txt = hv.Text(text="$\mathcal{P}$", **opts) z2_txt = hv.Text(text="$\mathcal{P}$", **opts) gap_line = lambda x: hv.HLine(np.abs(np.array(x.data)[:, :, 1]).min())(style=dict(lw='0.5', c='r', ls=(1., (3., 3.)))) bands_layout = (labels(x1 * x1_txt * gap_line(x1), label=r"$\bm{B}\parallel \hat{x}$", y=True)+ labels((y1 * y1_txt),label=r"$\bm{B}\parallel \hat{y}$") + labels((z1 * z1_txt * gap_line(z1)), label=r"$\bm{B}\parallel \hat{z}$") + labels((x2 * x2_txt * gap_line(x2)), x=True, y=True) + labels((y2 * y2_txt), x=True) + labels((z2 * z2_txt), x=True)).cols(3) bands_layout # + # renderer.save(bands_layout, 'paper/figures/bandstructure_annotated', fmt='pdf') # - # # Comparing phase diagrams orb = create_holoviews('data/0_to_2T_4x4_angles_misaligned_with_electric_field.h5') no_orb = create_holoviews('data/0_to_2T_4x4_angles_misaligned_no_orbital_with_electric_field.h5') # + # %%opts Path (color='g') # %%opts Image.d [colorbar=True cbar_ticks=np.linspace(0, 140, 5).tolist()] # %%opts Layout [vspace=0.20 hspace=0.15 sublabel_position=(-0.07, 0.79) sublabel_size=10 sublabel_format='({alpha})'] # %%opts VLine (linewidth=0.5 color='k') test = orb.Phase_diagram.Band_gap[0, 0.5] comparing_phase_diagrams = ( ticks((no_orb.Phase_diagram.Band_gap * hv.VLine(1)).relabel(r"$\bm{B} \parallel \hat{x}, \; \bm{A} = 0$")[0.5, 0], x=False) + ticks(no_orb.Phase_diagram.Band_gap.relabel(label=r"$\bm{B} \parallel \hat{z}, \; \bm{A} = 0$")[0, 0.5], x=False, y=False) + ticks(orb.Phase_diagram.Band_gap.relabel(r"$\bm{B} \parallel \hat{x}, \; \bm{A} \ne 0$")[0.5, 0]) + ticks(orb.Phase_diagram.Band_gap.relabel(label=r"$\bm{B} \parallel \hat{z}, \; \bm{A} \ne 0$", group='d', depth=2)[0, 0.5], y=False)).cols(2) comparing_phase_diagrams # + # renderer.save(comparing_phase_diagrams, 'paper/figures/comparing_phase_diagrams', fmt='pdf') # - # # Comparing phase diagrams, misaligned fields # + # %%opts Path (color='g') # %%opts Image.d [colorbar=True cbar_ticks=np.linspace(0, 120, 5).tolist()] # %%opts Layout [vspace=0.20 hspace=0.15 sublabel_position=(-0.07, 0.79) sublabel_size=10 sublabel_format='({alpha})'] kys = no_orb.Phase_diagram.Band_gap.keys() test = orb.Phase_diagram.Band_gap[nearest(kys, 0.05), 0.5] misaligned = ( ticks(no_orb.Phase_diagram.Band_gap.relabel(label=r"$\bm{B} \parallel (10, 1, 0)^T, \; \bm{A} = 0$")[0.5, nearest(kys, 0.05)], x=False) + ticks(no_orb.Phase_diagram.Band_gap.relabel(label=r"$\bm{B} \parallel (0, 1, 10)^T, \; \bm{A} = 0$")[nearest(kys, 0.05), 0.5], x=False, y=False) + ticks(orb.Phase_diagram.Band_gap.relabel(label=r"$\bm{B} \parallel (10, 1, 0)^T, \; \bm{A} \ne 0$")[0.5, nearest(kys, 0.05)]) + ticks(orb.Phase_diagram.Band_gap.relabel(label=r"$\bm{B} \parallel (0, 1, 10)^T, \; \bm{A} \ne 0$", group='d', depth=2)[nearest(kys, 0.05), 0.5], y=False)).cols(2) misaligned # + # renderer.save(misaligned, 'paper/figures/misaligned', fmt='pdf') # - # # Eigenvalue problem graphic # Uncomment the lower cells and start an `ipcluster` to calculate the spectrum. # + # import os # from scripts.hpc05 import HPC05Client # os.environ['SSH_AUTH_SOCK'] = os.path.join(os.path.expanduser('~'), 'ssh-agent.socket') # cluster = HPC05Client() # + # v = cluster[:] # v.use_dill() # lview = cluster.load_balanced_view() # len(v) # + # # %%px # import sys # import os # sys.path.append(os.path.join(os.path.expanduser('~'), 'orbitalfield')) # import kwant # import numpy as np # from fun import * # lead = make_3d_wire() # p = make_params(orbital=False, B_x=1) # + lead = make_3d_wire() p = make_params(orbital=False, B_x=1) mus = np.linspace(0, 35, 2000) if os.path.exists('data/gaps_plot.npy'): gaps = np.load('data/gaps_plot.npy') else: print('Start cluster with the cells above.') gaps = lview.map_async(lambda mu: find_gap(lead, p, ((1, 0, 0), mu, True), tol=1e-4), mus).result() np.save('data/gaps_plot', gaps) if os.path.exists('data/spectrum_ev_plot.npy'): Es = np.load('data/spectrum_ev_plot.npy') else: Es = np.array([kwant.physics.Bands(lead, args=[p])(k=0) for p.mu in mus]) np.save('data/spectrum_ev_plot', Es) # + # %%output fig='svg' # %%opts VLine (lw=0.5) HLine (lw=0.5, color='g') # %%opts Layout [vspace=.35 aspect_weight=1 sublabel_position=(-0.3, 0.9) sublabel_format='({alpha})' sublabel_size=13] # %%opts Overlay [yticks=3 aspect=1.5 vspace=0.] E_dim = hv.Dimension(('E_k0', r'$E(k=0)$'), unit='meV') spectrum = hv.Path((mus, Es), kdims=[dimensions.mu, E_dim]) ind_E = 100 idx = np.argsort(np.min(np.abs(Es), axis=1)) VPoints = hv.Points([(mus[ind_E], E) for E in Es[ind_E]]) p.mu = 0 phase_bounds = np.sort(find_phase_bounds(lead, p, (1, 0, 0), num_bands=40).real)[::2] HPoints = hv.Points([(x, 0) for x in phase_bounds if x > 0])(style={'color': 'g'}) ev_plot = (spectrum * hv.VLine(mus[ind_E]) * VPoints * HPoints * hv.HLine(0))[:35, -10:10] bool_array = np.array(np.digitize(mus, phase_bounds)%2, dtype=bool) gaps_plot = (spectrum * hv.Area((mus, np.array(gaps) * bool_array))(style={'facecolor': '#FF6700'}) * hv.Area((mus, np.array(gaps) * ~bool_array))(style={'facecolor': '#a9a9a9'}) * hv.HLine(0) * HPoints) gaps_plot = gaps_plot.map(lambda x: x.clone(extents=(0, 0, 35, 0.2)), [hv.Element]) ev_problem = (ev_plot[:, -8:8](plot={'xticks':[(0, ''), (8, ''), (16, ''), (24, ''), (32, '')], 'final_hooks': [tick_marks, hide_x]}) + gaps_plot(plot={'xticks': 5, 'final_hooks': [tick_marks]})).cols(1) ev_problem # + # renderer.save(ev_problem, 'paper/figures/ev_problem', fmt='pdf')
Paper-figures.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # fa-archive #install.packages(c("FactoMineR", "factoextra")) library("FactoMineR") library("factoextra") library(readxl) library(dplyr) library(psych) #install.packages("GPArotation") library(GPArotation) #install.packages("psy") library(psy) #install.packages("nFactors") library(nFactors) library(mvtnorm) # col.norm library(tilting) # col.norm library(corrplot) library(writexl) #install.packages("scales") library(scales) #install.packages("Rcsdp") library(Rcsdp) #install.packages('reshape2') library(reshape2) library(ggplot2) library(devtools) library(factoextra) # sources: http://www.di.fc.ul.pt/~jpn/r/factoranalysis/factoranalysis.html # Pricipal Components Analysis # entering raw data and extracting PCs # from the correlation matrix # fit <- princomp(df.wa[,c(2:18)], cor=TRUE) summary(fit) # print variance accounted for loadings(fit) # pc loadings plot(fit,type="lines") # scree plot # # n.factors = 7 fit <- factanal(df.wa[,c(2:18)], n.factors , # number of factors to extract scores=c("Bartlett"), rotation="promax") # # par(mar=c(1,1,1,1)) # # png( "scree-plot.png", width = 5, height = 5, units = "in", res = 700, #pointsize = 6 ) # # pdf(file = "../../docs/manuscript/pdf-image/scree-plot.pdf" , width = 5, height = 5 ) # # scree.plot(fit$correlation) dev.off() # + df.wa <- read_excel("../../results/df-water-access.xlsx",sheet=1) df.wa.nocountry <- df.wa[,c(2:18)] options(repr.plot.width=10, repr.plot.height=10) ev <- eigen(cor(df.wa.nocountry)) # get eigenvalues ap <- parallel(subject=nrow(df.wa.nocountry),var=ncol(df.wa.nocountry), rep=100, cent=.05) nS <- nScree(x=ev$values, aparallel=ap$eigen$qevpea) plotnScree(nS) scree(df.wa.nocountry,factors=TRUE,pc=TRUE,main="Scree plot",hline=NULL,add=FALSE) # + solution.mr <- fa(r = cor(df.wa.nocountry), nfactors = 5, rotate = "oblimin", fm = "minrank" , scores= "tenBerge" , impute ="mean", max.iter = 1000) fa.diagram(solution.mr) # - # FA # The goal of this study is to analyze the characteristics of water accessibility # # sources: http://www.sthda.com/english/articles/31-principal-component-methods-in-r-practical-guide/115-famd-factor-analysis-of-mixed-data-in-r-essentials/ # + df.wa <- read_excel("../../results/df-water-access.xlsx",sheet=1) res.famd <- FAMD(df.wa, graph = FALSE) print(res.famd) #Eigenvalues / Variances eig.val <- get_eigenvalue(res.famd) head(eig.val) options(repr.plot.width=10, repr.plot.height=10) fviz_screeplot(res.famd) #All variables var <- get_famd_var(res.famd) var # Coordinates of variables head(var$coord,7) # Cos2: quality of representation on the factore map head(var$cos2,7) # Contributions to the dimensions head(var$contrib,7) # Plot of variables fviz_famd_var(res.famd, repel = TRUE) # Contribution to the first dimension fviz_contrib(res.famd, "var", axes = 1) # Contribution to the second dimension fviz_contrib(res.famd, "var", axes = 2) #The red dashed line on the graph above indicates the expected average value, If the contributions were uniform. #From the plots above, it can be seen that: #variables that contribute the most to the first dimension are: iwsp and watp. #variables that contribute the most to the second dimension are: basw and ptap. # Quantitative variables quanti.var <- get_famd_var(res.famd, "quanti.var") quanti.var fviz_famd_var(res.famd, "quanti.var", col.var = "contrib", gradient.cols = c("#66c2a5", "#fc8d62", "#8da0cb"), repel = TRUE) # Color by cos2 values: quality on the factor map fviz_famd_var(res.famd, "quanti.var", col.var = "cos2", gradient.cols = c("#66c2a5", "#fc8d62", "#8da0cb"), repel = TRUE) # Graph of qualitative variables fviz_famd_var(res.famd, "quali.var", col.var = "contrib", gradient.cols = c("#66c2a5", "#fc8d62", "#8da0cb") ) # Graph of individuals ind <- get_famd_ind(res.famd) ind fviz_famd_ind(res.famd, col.ind = "cos2", gradient.cols = c("#66c2a5", "#fc8d62", "#8da0cb"), repel = TRUE) #The factor analysis of mixed data (FAMD) makes it possible to analyze a data set, in which individuals are described by both qualitative and quantitative variables. result <- PCA(df.wa[,(2:18)]) result # PCA head(df.wa.nocountry,7) # PCA Scree plot without standardizing data hpca_dfsimple <- prcomp(df.wa.nocountry, scale=FALSE) # Scale to 0 to 1 # We can extract the information summarized above (and much more) # from the attributes of the object hpca_dfsimple standard_deviation_of_each_component <- hpca_dfsimple$sdev var_per_dim <- standard_deviation_of_each_component^2 var_tot <- sum(var_per_dim) var_tot var_per_dim/var_tot var_prop <- var_per_dim / sum(var_per_dim) var_prop cum_var <- cumsum(var_prop) cum_var plot(cum_var,xlab="Principal component", ylab="Proportion of variance explained", ylim=c(0,1), type='b') apply(df.wa.nocountry, 2, mean) apply(df.wa.nocountry, 2, var) hpca_cor <- prcomp(df.wa.nocountry, scale=TRUE) #Using the correlation matrix to obtain the eigenvalue #Single decomposition of the variance matrix. standard_deviation_of_each_component <- hpca_cor$sdev var_per_dim <- standard_deviation_of_each_component^2 var_tot <- sum(var_per_dim) var_prop <- var_per_dim / sum(var_per_dim) cum_var <- cumsum(var_prop) plot(cum_var,xlab="Principal component", ylab="Proportion of variance explained", ylim=c(0,1), type='b') eigenvectors <- hpca_cor$rotation col.norm(eigenvectors) eigenvectors PC_contr <- eigenvectors[,c("PC1")] # Let us plot the contribution of the original dimension to the 1st PCA # PC_contr ord <- order( -abs(PC_contr) )# We order by the magnitude of the contribution # We use the - sign because we want a descending order PC_contr <- PC_contr[ord] #PC_contr PC_contr1 <- PC_contr[1:7] # We just select the 7 highest contributing dimensions (highest loading) PC_contr1 barplot(PC_contr1, main="Contribution to the 1st component", xlab="Original Dimensions") # Second principal component vector PC_contr <- eigenvectors[,c("PC2")] # We order by the magnitude of the contribution ord <- order( -abs(PC_contr) ) # We use the - sign because we want a descending order PC_contr <- PC_contr[ord] PC_contr2 <- PC_contr[1:7] options(repr.plot.width=12, repr.plot.height=5) barplot(PC_contr2, main="Contribution to the 2nd component",xlab="Original Dimensions") # Third principal component vector PC_contr <- eigenvectors[,c("PC3")] ord <- order( -abs(PC_contr) ) PC_contr <- PC_contr[ord] PC_contr3 <- PC_contr[1:7] options(repr.plot.width=12, repr.plot.height=8) barplot(PC_contr3, main="Contribution to the 3rd component",xlab="Original Dimensions") # 4 principal component vector PC_contr <- eigenvectors[,c("PC4")] ord <- order( -abs(PC_contr) ) PC_contr <- PC_contr[ord] PC_contr4 <- PC_contr[1:7] options(repr.plot.width=12, repr.plot.height=8) barplot(PC_contr4, main="Contribution to the 4th component",xlab="Original Dimensions") # 5 principal component vector PC_contr <- eigenvectors[,c("PC5")] ord <- order( -abs(PC_contr) ) PC_contr <- PC_contr[ord] PC_contr5 <- PC_contr[1:7] options(repr.plot.width=12, repr.plot.height=8) barplot(PC_contr5, main="Contribution to the 5th component",xlab="Original Dimensions") # 6 principal component vector PC_contr <- eigenvectors[,c("PC6")] ord <- order( -abs(PC_contr) ) PC_contr <- PC_contr[ord] PC_contr6 <- PC_contr[1:7] options(repr.plot.width=12, repr.plot.height=8) barplot(PC_contr6, main="Contribution to the 6th component",xlab="Original Dimensions") # 7 principal component vector PC_contr <- eigenvectors[,c("PC7")] ord <- order( -abs(PC_contr) ) PC_contr <- PC_contr[ord] PC_contr7 <- PC_contr[1:7] options(repr.plot.width=12, repr.plot.height=8) barplot(PC_contr7, main="Contribution to the 7th component",xlab="Original Dimensions") res.pca <- PCA(df.wa.nocountry, graph = FALSE) print(res.pca) eig.val <- get_eigenvalue(res.pca) #The object that is created using the function PCA() contains many information found in many different lists and matrices. These values are described in the next section. head(eig.val,7) #An eigenvalue > 1 indicates that PCs account for more variance than accounted by one of the original variables in #standardized data. This is commonly used as a cutoff point for which PCs are retained. This holds true only when #the data are standardized. fviz_eig(res.pca, addlabels = TRUE, ylim = c(0, 76)) var <- get_pca_var(res.pca) var # Coordinates head(var$coord, 17) # Cos2: quality on the factor map head(var$cos2,17) # Contributions to the principal components head(var$contrib, 17) #For all the 17 variables. fviz_pca_var(res.pca, col.var = "black") #The plot above is also known as variable correlation plots. It shows the relationships between all variables. It can be interpreted as follow: #Positively correlated variables are grouped together. #Negatively correlated variables are positioned on opposite sides of the plot origin (opposed quadrants). #The distance between variables and the origin measures the quality of the variables on the factor map. Variables that #are away from the origin are well represented on the factor map. corrplot(var$cos2, is.corr=FALSE, tl.cex = 1 ) # Total cos2 of variables on Dim.1 and Dim.2 fviz_cos2(res.pca, choice = "var", axes = 1:2, tl.cex = 1) #A high cos2 indicates a good representation of the variable on the principal component. In this case the variable is #positioned close to the circumference of the correlation circle. #A low cos2 indicates that the variable is not perfectly represented by the PCs. #In this case the variable is close to the center of the circle. #The cos2 values are used to estimate the quality of the representation #The closer a variable is to the circle of correlations, #the better its representation on the factor map (and the more important it is to interpret these components) #Variables that are closed to the center of the plot are less important for the first components. # Color by cos2 values: quality on the factor map fviz_pca_var(res.pca, col.var = "cos2", gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"), repel = TRUE # Avoid text overlapping, ) #variables with low cos2 values will be colored in "white" #variables with mid cos2 values will be colored in "blue" #variables with high cos2 values will be colored in red corrplot(var$contrib, is.corr=FALSE,tl.cex = 1) #The larger the value of the contribution, the more the variable contributes to the component. # Contributions of variables to PC1 fviz_contrib(res.pca, choice = "var", axes = 1, top = 10) # Contributions of variables to PC2 fviz_contrib(res.pca, choice = "var", axes = 2, top = 10) # Contributions of variables to PC3 fviz_contrib(res.pca, choice = "var", axes = 3, top = 10) # Contributions of variables to PC4 fviz_contrib(res.pca, choice = "var", axes = 4, top = 10) # Contributions of variables to PC5 fviz_contrib(res.pca, choice = "var", axes = 5, top = 10) fviz_contrib(res.pca, choice = "var", axes = 1:2, top = 10) #It can be seen that the variables H IWS P and H W - contribute the most to the dimensions 1 and 2. fviz_pca_var(res.pca, col.var = "contrib", gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07") ) res.desc <- dimdesc(res.pca, axes = c(1,2), proba = 0.05) # Description of dimension 1 res.desc$Dim.1 res.desc$Dim.2 ind <- get_pca_ind(res.pca) # Coordinates of individuals head(ind$coord) # Quality of individuals head(ind$cos2) # Contributions of individuals fviz_pca_ind(res.pca, col.ind = "cos2", pointsize = "cos2", gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"), repel = TRUE # Avoid text overlapping (slow if many points) ) # Total contribution on PC1 and PC2 fviz_contrib(res.pca, choice = "ind", axes = 1:2, tl.cex = .5) # -
bin/jupyter/code-archive/.ipynb_checkpoints/fa-archive-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Overview # # This notebook sets up the following directories for the final project. # ``` # project # ├── data # ├── LICENSE # ├── models # ├── notebooks # │ └── 0_setup_project_folders.ipynb # ├── README.md # ├── report # │ ├── interim.md # │ ├── proposal.md # │ └── report.md # └── src # ``` # # You are free to rename any the directory or add other directories. You can also switch the LICENSE to any other license type tha you prefer. # + import os basedir = os.path.dirname(os.getcwd()) subdirs = ['src', 'data', 'models'] for d in subdirs: full_path = os.path.join(basedir, d) if not os.path.exists(full_path): os.makedirs(full_path) assert all([os.path.exists(os.path.join(basedir, d)) for d in subdirs]) # - # !tree ../
notebooks/0_setup_project_folders.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.3.1 # language: julia # name: julia-1.3 # --- # # Bechmark: Vern9 vs IRKGL16 (Float64) # # <ul id="top"> # <li><a href="#Loading-packages"> # Loading Packages</a></li> # # <li><a href="#Initial-value-problem:-Burrau-problem"> # Initial value problem: Burrau problem</a></li> # # <li><a href="#Integration-with-Vern9"> # Integration with Vern9</a></li> # # <li><a href="#Integration-with-IRKGL16-(Adaptive-step)"> # Integration with IRKGL16 (Adaptive-step)</a></li> # # <li><a href="#Error-in-energy"> # Error in energy</a></li> # # </ul> # # ## Loading packages using Pkg using IRKGaussLegendre,LinearAlgebra,Plots,Statistics using OrdinaryDiffEq using BenchmarkTools,DiffEqDevTools using RecursiveArrayTools using JLD2, FileIO BenchmarkTools.DEFAULT_PARAMETERS.samples=10 # <a href="#top">Back to the top</a> # # ## Initial value problem: Solar-System15 problem # The vector $Gm = (G\, m_1, \ldots, G\, m_N)$ include("../examples/InitialNBody15.jl") u0, Gm =InitialNBody15() N = length(Gm) (size(u0), typeof(u0),typeof(Gm)) # Just to check that the output of the function NbodyODE has the expected type: include("../examples/Nbody.jl") du0=similar(u0) NbodyODE!(du0,u0,Gm,0.) @show size(du0); # + t0 = 0. T = 100. # Final time q0=u0[2,:,:] v0=u0[1,:,:] tspan=(t0,T) prob=ODEProblem(NbodyODE!,u0,tspan,Gm); probdyn = DynamicalODEProblem(NbodyODEq!,NbodyODEv!,q0,v0,tspan,Gm); # - setprecision(BigFloat, 106) u0128, Gm =InitialNBody15(BigFloat) tspan128=(BigFloat(0.0),BigFloat(T)) prob128=ODEProblem(NbodyODE!,u0128,tspan128,Gm); # <a href="#top">Back to the top</a> # # # ## Integration with Vern9 # Recommend methods: Non-Stiff Problems # # - For high accuracy non-stiff solving ( BigFloat and tolerances like <1e-12 ), JuliaDiffeq recommend # the Vern9 method # #sol0 = solve(prob128,Vern9(),saveat=10,abstol=1e-20,reltol=1e-20); #@save "./Data/solarsystem15small_test0_solF64.jld2" sol0 @load "./Data/solarsystem15small_test0_solF64.jld2" sol0 test_sol = TestSolution(sol0); sol0.destats # ## Integration with IRKGL16 (Adaptive-step) # (sol1,iter1,steps1)=solve(prob,IRKGL16(), reltol=1e-12, abstol=1e-12,myoutputs=true,initial_interp=true); sol1.destats (sol2,iter2,steps2)=solve(probdyn,IRKGL163(), reltol=1e-12, abstol=1e-12,myoutputs=true,initial_interp=true); sol1.destats plot(sol1.t[2:end],iter1[2:end], title="Iteration numbers", legend=false) plot(sol1.t,steps1, title="step sizes in the integration", legend=false) plot!(sol2.t,steps2) # ## Integration with IRKGL16 (Fixed-step) dt = 10 # time-step length (sol3,iter3,steps3)=solve(prob,IRKGL16(),adaptive=false,dt=dt,myoutputs=true,initial_interp=true) sol3.destats (sol4,iter4,steps4)=solve(probdyn,IRKGL163(),adaptive=false,dt=dt,myoutputs=true,initial_interp=true) sol4.destats # ### Plots # + bodylist = ["sun", "mercury", "venus", "EMB", "Mars"] ulist0 = sol0.u tlist0 = sol0.t pl0 = plot(title="Vern9",aspect_ratio=1) for j = 1:5 xlist = map(u->u[2,1,j], ulist0) ylist = map(u->u[2,2,j], ulist0) pl0 = plot!(xlist,ylist,label = bodylist[j]) end pl1 = plot(title="IRKGL16(adaptive-ODEProblem)",aspect_ratio=1) ulist1 = sol1.u[1:end] tlist1 = sol1.t[1:end] for j = 1:5 xlist = map(u->u[2,1,j], ulist1) ylist = map(u->u[2,2,j], ulist1) pl1 = plot!(xlist,ylist,label = bodylist[j]) end pl2 = plot(title="IRKGL16 (adaptive-Dynamic)",aspect_ratio=1) ulist2 = sol2.u[1:end] tlist2 = sol2.t[1:end] for j = 1:5 xlist = map(u->u[1,1,j], ulist2) # erantzunak alderantziz itzultzen ditu ylist = map(u->u[1,2,j], ulist2) # erantzunak alderantziz itzultzen ditu pl2 = plot!(xlist,ylist,label = bodylist[j]) end pl3 = plot(title="IRKGL16 (fixed-ODEProblem)",aspect_ratio=1) ulist3 = sol3.u[1:end] tlist3 = sol3.t[1:end] for j = 1:5 xlist = map(u->u[2,1,j], ulist3) ylist = map(u->u[2,2,j], ulist3) pl3 = plot!(xlist,ylist,label = bodylist[j]) end pl4 = plot(title="IRKGL16 (fixed-Dynamic)",aspect_ratio=1) ulist4 = sol4.u[1:end] tlist4 = sol4.t[1:end] for j = 1:5 xlist = map(u->u[1,1,j], ulist4) # erantzunak alderantziz itzultzen ditu ylist = map(u->u[1,2,j], ulist4) # erantzunak alderantziz itzultzen ditu pl4 = plot!(xlist,ylist,label = bodylist[j]) end #plot(pl) plot(pl1,pl2, layout=2) # - plot(pl3,pl4, layout=2) plot(pl2) savefig("InnerPlanets.png") # <a href="#top">Back to the top</a> # ## Error in energy # + setprecision(BigFloat, 256) u0128, Gm128 =InitialNBody15(BigFloat) E0=NbodyEnergy(u0128,Gm128) ΔE0 = map(x->NbodyEnergy(BigFloat.(x),Gm128), sol0.u)./E0.-1 ΔE1 = map(x->NbodyEnergy(BigFloat.(x),Gm128), sol1.u)./E0.-1 ΔE2 = map(x->NbodyEnergy2(BigFloat.(x),Gm128), sol2.u)./E0.-1 ΔE3 = map(x->NbodyEnergy(BigFloat.(x),Gm128), sol3.u)./E0.-1 ΔE4 = map(x->NbodyEnergy2(BigFloat.(x),Gm128), sol4.u)./E0.-1 (Float32(maximum(abs.(ΔE0))),Float32(maximum(abs.(ΔE1))),Float32(maximum(abs.(ΔE2))),Float32(maximum(abs.(ΔE3))), Float32(maximum(abs.(ΔE4)))) # - plot(sol0.t,log10.(abs.(ΔE0)), label="Test solution") plot(sol1.t,log10.(abs.(ΔE1)), label="IRKGL16 (Adaptive-ODEProblem)") plot!(sol2.t,log10.(abs.(ΔE2)), label="IRKGL16 (Adaptive-Dynamic)") plot(sol3.t,log10.(abs.(ΔE3)), label="IRKGL16 (Fixed-ODEProblem)") plot!(sol4.t,log10.(abs.(ΔE4)), label="IRKGL16 (Fixed-Dynamic)") # ## Work-Precision abstols = 1.0 ./ 10.0 .^ (8:14) reltols = 1.0 ./ 10.0 .^ (8:14); dts=20.0./2.0.^(0:length(reltols)-1) setups = [ # Dict(:alg=>DP8()) Dict(:alg=>Vern9()) # Dict(:alg=>Vern8()) Dict(:alg=>IRKGL16()) Dict(:alg=>IRKGL16(),:adaptive=>false,:dts=>dts) ]; wp = WorkPrecisionSet(prob,abstols,reltols,setups;appxsol=test_sol,save_everystep=false,numruns=1) plot(wp)
Tutorials/.ipynb_checkpoints/Solar-System15-Work-Precision-Float64-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Create a binary tree # ## Task 01: build a node # # * on a piece of paper, draw a tree. # * Define a node, what are the three things you'd expect in a node? # * Define class called `Node`, and define a constructor that takes no arguments, and sets the three instance variables to `None`. # * Note: coding from a blank cell (or blank piece of paper) is good practice for interviews! ## Define a node ## Solution class Node(object): def __init__(self): self.value = None self.left = None self.right = None node0 = Node() print(f""" value: {node0.value} left: {node0.left} right: {node0.right} """) # ## Task 02: add a constructor that takes the value as a parameter # # Copy what you just made, and modify the constructor so that it takes in an optional value, which it assigns as the node's value. Otherwise, it sets the node's value to `None`. # # + ## Your code here # + ## Solution class Node(object): def __init__(self,value=None): self.value = value self.left = None self.right = None # + ## Check node0 = Node() print(f""" value: {node0.value} left: {node0.left} right: {node0.right} """) node0 = Node("apple") print(f""" value: {node0.value} left: {node0.left} right: {node0.right} """) # - # ## Task 03: add functions to set and get the value of the node # # Add functions `get_value` and `set_value` # + # add set_value and get_value functions # + # solution class Node(object): def __init__(self,value=None): self.value = value self.left = None self.right = None def set_value(self,value): self.value = value def get_value(): return self.value # - # ## Task 04: add functions that assign a left child, or right child # # Define a function `set_left_child` and a function `set_right_child`. Each function takes in a node that it assigns as the left or right child, respectively. Note that we can assume that this will replace any existing node if it's already assigned as a left or right child. # # Also, define `get_left_child` and `get_right_child` functions. # + ## your code here # + ## Solution class Node(object): def __init__(self,value = None): self.value = value self.left = None self.right = None def set_value(value): self.value = value def get_value(): return self.value def set_left_child(self,node): self.left = node def set_right_child(self, node): self.right = node def get_left_child(self): return self.left def get_right_child(self): return self.right # + ## check node0 = Node("apple") node1 = Node("banana") node2 = Node("orange") node0.set_left_child(node1) node0.set_right_child(node2) print(f""" node 0: {node0.value} node 0 left child: {node0.left.value} node 0 right child: {node0.right.value} """) # - # ## Task 05: check if left or right child exists # # Define functions `has_left_child`, `has_right_child`, so that they return true if the node has left child, or right child respectively. # + ## Solution class Node(object): def __init__(self,value = None): self.value = value self.left = None self.right = None def set_value(self,value): self.value = value def get_value(self): return self.value def set_left_child(self,node): self.left = node def set_right_child(self, node): self.right = node def get_left_child(self): return self.left def get_right_child(self): return self.right def has_left_child(self): return self.left != None """ #alternative solutions if self.left != None: return True else: return False """ def has_right_child(self): return self.right != None # + ## check node0 = Node("apple") node1 = Node("banana") node2 = Node("orange") print(f"has left child? {node0.has_left_child()}") print(f"has right child? {node0.has_right_child()}") print("adding left and right children") node0.set_left_child(node1) node0.set_right_child(node2) print(f"has left child? {node0.has_left_child()}") print(f"has right child? {node0.has_right_child()}") # - # ## Task 06: Create a binary tree # # Create a class called `Tree` that has a "root" instance variable of type `Node`. # # Also define a get_root method that returns the root node. # + # define a Tree class here # - # solution class Tree(): def __init__(self): self.root = None # ## Task 07: setting root node in constructor # # Let's modify the `Tree` constructor so that it takes an input that initializes the root node. Choose between one of two options: # 1) the constructor takes a `Node` object # 2) the constructor takes a value, then creates a new `Node` object using that value. # # Which do you think is better? # + # choose option 1 or 2 (you can try both), and explain why you made this choice # - # solution for 1 class Tree(): def __init__(self,node=None): self.root = node # solution 2 class Tree(): def __init__(self,value=None): self.root = Node(value) # #### Discussion # It would be easier for others to use your Tree class if you let them pass in the value that they want to store, rather than ask them to create the Node object, because that requires that they know about the Node class and how to use it. # ## Task 08: add get method for root node # # + # add a get_root() # - class Tree(): def __init__(self,value=None): self.root = Node(value) def get_root(self): return self.root # ## Next: # # Before we learn how to insert values into a tree, we'll first want to learn how to traverse a tree. We'll practice tree traversal next!
2/4/01 Create_a_binary_tree_solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import matplotlib.pyplot as plt import SimpleITK as sitk print(sitk.Version()) from myshow import myshow # Download data to work on # %run update_path_to_download_script from downloaddata import fetch_data as fdata OUTPUT_DIR = "Output" # - # This section of the Visible Human Male is about 1.5GB. To expedite processing and registration we crop the region of interest, and reduce the resolution. Take note that the physical space is maintained through these operations. fixed_rgb = sitk.ReadImage(fdata("vm_head_rgb.mha")) fixed_rgb = fixed_rgb[735:1330,204:975,:] fixed_rgb = sitk.BinShrink(fixed_rgb,[3,3,1]) moving = sitk.ReadImage(fdata("vm_head_mri.mha")) myshow(moving) # Segment blue ice seeds = [[10,10,10]] fixed_mask = sitk.VectorConfidenceConnected(fixed_rgb, seedList=seeds, initialNeighborhoodRadius=5, numberOfIterations=4, multiplier=8) # Invert the segment and choose largest component fixed_mask = sitk.RelabelComponent(sitk.ConnectedComponent(fixed_mask==0))==1 myshow(sitk.Mask(fixed_rgb, fixed_mask)); # + # pick red channel fixed = sitk.VectorIndexSelectionCast(fixed_rgb,0) fixed = sitk.Cast(fixed,sitk.sitkFloat32) moving = sitk.Cast(moving,sitk.sitkFloat32) # - initialTransform = sitk.Euler3DTransform() initialTransform = sitk.CenteredTransformInitializer(sitk.Cast(fixed_mask,moving.GetPixelID()), moving, initialTransform, sitk.CenteredTransformInitializerFilter.MOMENTS) print(initialTransform) def command_iteration(method) : print(f"{method.GetOptimizerIteration()} = {method.GetMetricValue()} : {method.GetOptimizerPosition()}", end='\n'); sys.stdout.flush(); tx = initialTransform R = sitk.ImageRegistrationMethod() R.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50) R.SetOptimizerAsGradientDescentLineSearch(learningRate=1,numberOfIterations=100) R.SetOptimizerScalesFromIndexShift() R.SetShrinkFactorsPerLevel([4,2,1]) R.SetSmoothingSigmasPerLevel([8,4,2]) R.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn() R.SetMetricSamplingStrategy(R.RANDOM) R.SetMetricSamplingPercentage(0.1) R.SetInitialTransform(tx) R.SetInterpolator(sitk.sitkLinear) # + import sys R.RemoveAllCommands() R.AddCommand( sitk.sitkIterationEvent, lambda: command_iteration(R) ) outTx = R.Execute(sitk.Cast(fixed,sitk.sitkFloat32), sitk.Cast(moving,sitk.sitkFloat32)) print("-------") print(tx) print(f"Optimizer stop condition: {R.GetOptimizerStopConditionDescription()}") print(f" Iteration: {R.GetOptimizerIteration()}") print(f" Metric value: {R.GetMetricValue()}") # + tx = sitk.CompositeTransform([initialTransform, sitk.AffineTransform(3)]) R.SetOptimizerAsGradientDescentLineSearch(learningRate=1,numberOfIterations=100) R.SetOptimizerScalesFromIndexShift() R.SetShrinkFactorsPerLevel([2,1]) R.SetSmoothingSigmasPerLevel([4,1]) R.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn() R.SetInitialTransform(tx) # - outTx = R.Execute(sitk.Cast(fixed,sitk.sitkFloat32), sitk.Cast(moving,sitk.sitkFloat32)) R.GetOptimizerStopConditionDescription() resample = sitk.ResampleImageFilter() resample.SetReferenceImage(fixed_rgb) resample.SetInterpolator(sitk.sitkBSpline) resample.SetTransform(outTx) resample.AddCommand(sitk.sitkProgressEvent, lambda: print(f"\rProgress: {100*resample.GetProgress():03.1f}%...",end='')) resample.AddCommand(sitk.sitkProgressEvent, lambda: sys.stdout.flush()) resample.AddCommand(sitk.sitkEndEvent, lambda: print("Done")) out = resample.Execute(moving) out_rgb = sitk.Cast( sitk.Compose( [sitk.RescaleIntensity(out)]*3), sitk.sitkVectorUInt8) vis_xy = sitk.CheckerBoard(fixed_rgb, out_rgb, checkerPattern=[8,8,1]) vis_xz = sitk.CheckerBoard(fixed_rgb, out_rgb, checkerPattern=[8,1,8]) vis_xz = sitk.PermuteAxes(vis_xz, [0,2,1]) myshow(vis_xz,dpi=30) # + import os sitk.WriteImage(out, os.path.join(OUTPUT_DIR, "example_registration.mha")) sitk.WriteImage(vis_xy, os.path.join(OUTPUT_DIR, "example_registration_xy.mha")) sitk.WriteImage(vis_xz, os.path.join(OUTPUT_DIR, "example_registration_xz.mha"))
Python/56_VH_Registration1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Code to Display Daily Maximum Winds expected over Fire Danger Zones # # ### Objective: # This Python 3 Jupyter notebook calculates the daily maximum wind speed (mph) expected at Fire Danger Climate Zones in California (Zones 1-9, does NOT include the Central Valley). # # The code should run in about 10 minutes. # # The result is a map of maximum daily wind speed by zone. # # ### Inputs: # WRF_FILE : the WRF model dataset with hourly weather forecasts for a random day, in NetCDF format # # SHAPE_PATH : Path to folder with the FDCZ (Fire Danger Climate Zone) polygons, as shape files # # MASK_FILE : land/sea mask file, in NetCDF format # # ### Outputs: # OUTPUT_FILE : a map displaying each climate zone color coded by the maximum daily wind speed, in miles per hour. # # # #### Created by <NAME>, on 2020 Apr.27: Last modification on 2020 Apr.27 by <EMAIL> # # ## Environment Setup # Global Input Variables WRF_FILE = 'wrf_daily_ts_2017-10-08.nc' MASK_FILE='invariant_d01.nc' SHAPE_PATH='./fdcz_pl' OUTPUT_FILE = 'fdcz_max_winds_mph.png' # Dependencies import numpy as np import pandas as pd from netCDF4 import Dataset import geopandas import matplotlib.pyplot as plt # ## Read NetCDF files # # References: # # https://anaconda.org/anaconda/netcdf4 # http://atmos.colostate.edu/~btrabing/Read_WRF.html # https://stackoverflow.com/questions/35297771/wrf-netcdf-file-subset-smaller-array-out-of-dataset-based-on-coordinate-bounda # Input WRF dataset wrf_data = Dataset(WRF_FILE, "r") mask_data= Dataset(MASK_FILE, "r") # Get the horizontal grid lats = mask_data.variables['XLAT'][:] # degree_north longs = mask_data.variables['XLONG'][:] # degree_east # Get the Wind speed u10 = wrf_data.variables['U10'][:] # U component of wind at 10m, in m/s v10 = wrf_data.variables['V10'][:] # V component of wind at 10m, in m/s # ## Read Shape files with GeoPandas # # Using only Climate zones 1-9 (don’t include the Central Valley). # # References: # # https://pypi.org/project/geopandas/ # https://geopandas.org/projections.html # # Ingest dataset geodf = geopandas.read_file(SHAPE_PATH) # Remove Central Valley zone geodf = geodf.drop(geodf[geodf['FDCZ']=='CV'].index).reset_index() # Convert geometries into Latitude and Longitude geodf_geo = geodf.to_crs(epsg=4326) # ## Expand Multipolygons into one Polygon per row # Use reset_index to avoid complex indices that would complicate FOR loops geodf_geo_exploded = geodf_geo.explode().reset_index() # Create a new column that combines information on the climate zone and the shape geometry # (to identify geometries comming from a multipolygon) geodf_geo_exploded["polygon"] = geodf_geo_exploded.apply(lambda x: f'{x["FDCZ"]}_{x["level_1"]}', axis=1) # ## Calculate the maximum daily wind speeds # # Using the Point in Polygon (Ray Casting) algorithm, to associate WRF grid points to particular Fire Danger Climate Zones. # # References: # # https://en.wikipedia.org/wiki/Point_in_polygon#Ray_casting_algorithm # # https://automating-gis-processes.github.io/CSC18/lessons/L4/point-in-polygon.html max_winds_df = pd.DataFrame(columns=['polygon']) max_winds_df['polygon'] = geodf_geo_exploded['polygon'] max_winds_df['speed'] = np.nan # This part should run in less than 9 minutes for wrf_x_indx in range(lats.shape[1]): # Loop over WRF grid points for wrf_y_indx in range(lats.shape[2]): # Create point shapes from WRF grid point df_pnt = pd.DataFrame( {'Latitude': [ lats[ 0,wrf_x_indx,wrf_y_indx] ], 'Longitude':[ longs[0,wrf_x_indx,wrf_y_indx] ] }) geodf_geo_pnt = geopandas.GeoDataFrame( df_pnt, geometry=geopandas.points_from_xy(df_pnt.Longitude, df_pnt.Latitude)) # Is this point in the Fire Danger Climatic Zone? for polygon_indx in range(geodf_geo_exploded.shape[0]): # Loop over the Fire Danger Climate Zones which_polygon = str( geodf_geo_exploded['polygon'].iloc[polygon_indx] ) within = geodf_geo_pnt.within(geodf_geo_exploded.loc[polygon_indx,'geometry']) if within[0]: # Calculate daily maximum wind speed only if inside a Fire Danger Climate Zone u10_time = u10[:,wrf_x_indx,wrf_y_indx] v10_time = v10[:,wrf_x_indx,wrf_y_indx] speed_time = (u10_time**2 + v10_time**2)**0.5 current_max_wind = max_winds_df['speed'].loc[ max_winds_df['polygon'] == which_polygon ] current_max_wind = current_max_wind.item() new_max_wind = max( max(speed_time),current_max_wind ) max_winds_df['speed'].loc[ max_winds_df['polygon'] == which_polygon ] = new_max_wind factor_from_mps_to_mph = 2.23694 # 1 m/s = 2.23694 miles per hour geodf_geo_exploded['MaxWind'] = max_winds_df["speed"] # In meters/second geodf_geo_exploded['MaxWind_mph'] = max_winds_df["speed"] * factor_from_mps_to_mph # In miles/hour # ## Results # - Dataframe with desired values # - Map displaying each climate zone color coded by the maximum daily wind speed. print(geodf_geo_exploded) # + # Visualize exploded geometries in Lat Lon coordinates map ax = geodf_geo_exploded.plot(column='MaxWind_mph',legend=True) ax.set_title("Maximum Daily Wind Speeds (mph)\n at Fire Danger Climate Zones") ax.set_xlabel("Longitude (deg.East)") ax.set_ylabel("Latitude (deg.North)") # Save figure as PNG file plt.savefig(OUTPUT_FILE) # -
ComputeFireZoneWinds.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # # Getting Data Ready # # The overall process for using Amazon Forecast is the following: # # 1. Create a Dataset Group, this is the large box that isolates models and the data they are trained on from each other. # 1. Create a Dataset, in Forecast there are 3 types of dataset, Target Time Series, Related Time Series, and Item Metadata. The Target Time Series is required, the others provide additional context with certain algorithms. # 1. Import data, this moves the information from S3 into a storage volume where the data can be used for training and validation. # 1. Train a model, Forecast automates this process for you but you can also select particular algorithms, and you can provide your own hyper parameters or use Hyper Parameter Optimization(HPO) to determine the most performant values for you. # 1. Deploy a Predictor, here you are deploying your model so you can use it to generate a forecast. # 1. Query the Forecast, given a request bounded by time for an item, return the forecast for it. Once you have this you can evaluate its performance or use it to guide your decisions about the future. # # In this notebook we will be walking through the first 3 steps outlined above. One additional task that will be done here is to trim part of our training and validation data so that we can measure the accuracy of a forecast against our predictions. # # # ## Table Of Contents # * Setup # * Data Preparation # * Creating the Dataset Group and Dataset # * Next Steps # # # **Read Every Cell FULLY before executing it** # # For more informations about APIs, please check the [documentation](https://docs.aws.amazon.com/forecast/latest/dg/what-is-forecast.html) # ## Setup # Import the standard Python libraries that are used in this lesson. # + import sys import os import json import time import pandas as pd import boto3 # importing forecast notebook utility from notebooks/common directory sys.path.insert( 0, os.path.abspath("../../common") ) import util # - # Configure the S3 bucket name and region name for this lesson. # # - If you don't have an S3 bucket, create it first on S3. If you used CloudFormation Wizard to set up the environment, use same bucket name as you specified in the setup process. # - Although we have set the region to us-west-2 as a default value below, you can choose any of the regions that the service is available in. text_widget_bucket = util.create_text_widget( "bucket_name", "input your S3 bucket name" ) text_widget_region = util.create_text_widget( "region", "input region name.", default_value="us-east-1" ) # + bucket_name = text_widget_bucket.value assert bucket_name, "bucket_name not set." region = text_widget_region.value assert region, "region was not set." # - # The last part of the setup process is to validate that your account can communicate with Amazon Forecast, the cell below does just that. session = boto3.Session(region_name=region) forecast = session.client(service_name='forecast') forecastquery = session.client(service_name='forecastquery') # ## Data Preparation<a class="anchor" id="DataPrep"></a> # # For this exercise, we use the individual household electric power consumption dataset. (<NAME>. and <NAME>. (2017). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science.) We aggregate the usage data hourly. # # To begin, use Pandas to read the CSV and to show a sample of the data. df = pd.read_csv("../../common/data/res_splits_history.csv", dtype = object, names=['timestamp','value','item']) df.head(3) # Notice in the output above there are 3 columns of data: # # 1. The Timestamp # 1. A Value # 1. An Item # # These are the 3 key required pieces of information to generate a forecast with Amazon Forecast. More can be added but these 3 must always remain present. # # The dataset happens to span January 01, 1995 to December 31, 2019. For our testing we would like to keep the last month of information in a different CSV. We are also going to save January to November to a different CSV as well. # # You may notice a variable named `df` this is a popular convention when using Pandas if you are using the library's dataframe object, it is similar to a table in a database. You can learn more here: https://pandas.pydata.org/pandas-docs/stable/getting_started/10min.html # # + # Select January 1995 to December 2007 for one dataframe. jan_to_dec = df[(df['timestamp'] >= '1995-01-01') & (df['timestamp'] <= '2007-12-01')] # Select January 2008 to December 2019 for another dataframe. df = pd.read_csv("../../common/data/res_splits_history.csv", dtype = object, names=['timestamp','value','item']) remaining_df = df[(df['timestamp'] >= '2008-01-01') & (df['timestamp'] <= '2019-12-01')] # - # Now export them to CSV files and place them into your `data` folder. jan_to_dec.to_csv("data/item-demand-time-train.csv", header=False, index=False) remaining_df.to_csv("data/item-demand-time-validation.csv", header=False, index=False) # At this time the data is ready to be sent to S3 where Forecast will use it later. The following cells will upload the data to S3. # + key="elec_data/item-demand-time-train.csv" boto3.Session().resource('s3').Bucket(bucket_name).Object(key).upload_file("data/item-demand-time-train.csv") # - # ## Creating the Dataset Group and Dataset <a class="anchor" id="dataset"></a> # # In Amazon Forecast , a dataset is a collection of file(s) which contain data that is relevant for a forecasting task. A dataset must conform to a schema provided by Amazon Forecast. # # More details about `Domain` and dataset type can be found on the [documentation](https://docs.aws.amazon.com/forecast/latest/dg/howitworks-domains-ds-types.html) . For this example, we are using [CUSTOM](https://docs.aws.amazon.com/forecast/latest/dg/custom-domain.html) domain with 3 required attributes `timestamp`, `target_value` and `item_id`. # # # It is importan to also convey how Amazon Forecast can understand your time-series information. That the cell immediately below does that, the next one configures your variable names for the Project, DatasetGroup, and Dataset. DATASET_FREQUENCY = "D" TIMESTAMP_FORMAT = "yyyy-MM-dd" project = 'ressplits_demand_forecastdemo' datasetName= project+'_ds' datasetGroupName= project +'_dsg' s3DataPath = "s3://"+bucket_name+"/"+key # Now save things # %store project # ### Create the Dataset Group create_dataset_group_response = forecast.create_dataset_group(DatasetGroupName=datasetGroupName, Domain="CUSTOM", ) datasetGroupArn = create_dataset_group_response['DatasetGroupArn'] forecast.describe_dataset_group(DatasetGroupArn=datasetGroupArn) # ### Create the Schema # Specify the schema of your dataset here. Make sure the order of columns matches the raw data files. schema ={ "Attributes":[ { "AttributeName":"timestamp", "AttributeType":"timestamp" }, { "AttributeName":"item_id", "AttributeType":"string" }, { "AttributeName":"target_value", "AttributeType":"float" } ] } # ### Create the Dataset response=forecast.create_dataset( Domain="CUSTOM", DatasetType='TARGET_TIME_SERIES', DatasetName=datasetName, DataFrequency=DATASET_FREQUENCY, Schema = schema ) datasetArn = response['DatasetArn'] forecast.describe_dataset(DatasetArn=datasetArn) # ### Add Dataset to Dataset Group forecast.update_dataset_group(DatasetGroupArn=datasetGroupArn, DatasetArns=[datasetArn]) # + [markdown] toc-hr-collapsed=false # ### Create IAM Role for Forecast # # Like many AWS services, Forecast will need to assume an IAM role in order to interact with your S3 resources securely. In the sample notebooks, we use the get_or_create_iam_role() utility function to create an IAM role. Please refer to ["notebooks/common/util/fcst_utils.py"](../../common/util/fcst_utils.py) for implementation. # - # Create the role to provide to Amazon Forecast. role_name = "ForecastNotebookRole-Tutorial" role_arn = util.get_or_create_iam_role( role_name = role_name ) # ### Create Data Import Job # # # Now that Forecast knows how to understand the CSV we are providing, the next step is to import the data from S3 into Amazon Forecaast. datasetImportJobName = 'RS_DSIMPORT_JOB_TARGET' ds_import_job_response=forecast.create_dataset_import_job(DatasetImportJobName=datasetImportJobName, DatasetArn=datasetArn, DataSource= { "S3Config" : { "Path":s3DataPath, "RoleArn": role_arn } }, TimestampFormat=TIMESTAMP_FORMAT ) ds_import_job_arn=ds_import_job_response['DatasetImportJobArn'] print(ds_import_job_arn) # Check the status of dataset, when the status change from **CREATE_IN_PROGRESS** to **ACTIVE**, we can continue to next steps. Depending on the data size. It can take 10 mins to be **ACTIVE**. This process will take 5 to 10 minutes. # + status_indicator = util.StatusIndicator() while True: status = forecast.describe_dataset_import_job(DatasetImportJobArn=ds_import_job_arn)['Status'] status_indicator.update(status) if status in ('ACTIVE', 'CREATE_FAILED'): break time.sleep(10) status_indicator.end() # - forecast.describe_dataset_import_job(DatasetImportJobArn=ds_import_job_arn) # ## Next Steps # # At this point you have successfully imported your data into Amazon Forecast and now it is time to get started in the next notebook to build your first model. To Continue, execute the cell below to store important variables where they can be used in the next notebook, then open `2.Building_Your_Predictor.ipynb`. # %store datasetGroupArn # %store datasetArn # %store role_name # %store key # %store bucket_name # %store region # %store ds_import_job_arn
notebooks/basic/Tutorial/1.Getting_Data_Ready.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy import pickle from glob import glob from tqdm.auto import tqdm import math import torch # %pylab inline from IPython.display import set_matplotlib_formats from matplotlib.ticker import FormatStrFormatter set_matplotlib_formats('png') import scipy from run_experiment import generate_sequence_space, config_factory # - plt.rcParams['font.family'] = 'serif' plt.rcParams['font.serif'] = ['Times'] + plt.rcParams['font.serif'] plt.rcParams['font.size'] = 10 config = config_factory(1) seq_space = generate_sequence_space(config) lengths = (seq_space != 7).sum(1) df = pickle.load(open('<PATH TO RESULTS FROM compute_pkl.py>', 'rb')) # + # df1 = pickle.load(open('../../mode_recovery_processed_pkls/02_12_wintrs.pkl', 'rb')) # df2 = pickle.load(open('../../mode_recovery_processed_pkls/02_14_wintrs_moreseeds.pkl', 'rb')) # df = pd.concat([df1, df2], axis=0, ignore_index=True) # + # Fig.1 gt_confs = { r'$\alpha = 0.0$': { "gt_interpolation": 0.0, "gt_interpolation_mode": 'p', }, r'$\alpha = 0.3$': { "gt_interpolation": 0.3, "gt_interpolation_mode": 'logp', }, r'$\alpha = 1.0$': { "gt_interpolation": 1.0, "gt_interpolation_mode": 'p', }, } fig, ax = plt.subplots(1,1, figsize=(6,3)) k_list = df['k_list'][0] training_set_sizes = df['training_set_size'].sort_values().unique() ts_to_plot = training_set_sizes[1] metric_name = 'cost_true_emp' ylabel = { "cost_true_emp": r"$\mathcal{O}_{k}(p^*_{\alpha}|| p_\mathrm{emp})$" } for i_offset, (gt_label, gt_config) in enumerate(gt_confs.items()): true_emp_cost = numpy.array(df.loc[(df['gt_interpolation'] == gt_config['gt_interpolation']) & (df['gt_interpolation_mode'] == gt_config['gt_interpolation_mode']) & (df['true_hs'] > 0) & (df['training_set_size'] == ts_to_plot) & (df['model_hs'] == 512)][metric_name].to_list()) median_cost = numpy.quantile(true_emp_cost, axis=0, q=0.5) lower_quantile = numpy.quantile(true_emp_cost, axis=0, q=0.25) #print(lower_quantile) upper_quntile = numpy.quantile(true_emp_cost, axis=0, q=0.75) ax.errorbar(k_list, y=median_cost, yerr=[median_cost-lower_quantile, upper_quntile-median_cost], label=gt_label, errorevery=10, elinewidth=0.5, capsize=5., marker='o', markersize=4, markevery=10) #plot_len_segments(ax) ax.plot(k_list, k_list, label=r'$\mathcal{O}_{k}(p^*_{\alpha}|| p_\mathrm{emp})=k$', ls='--') ax.set_ylabel(ylabel[metric_name]) ax.set_xlabel(r'$k$') ax.set_ylim(-100,1200) ax.grid(lw=0.2, ls='--') #ax.set_title(ts_to_plot) ax.legend() plt.tight_layout() # + # Fig. 2 gt_confs = { r'$\alpha = 0.0$': { "gt_interpolation": 0.0, "gt_interpolation_mode": 'p', }, r'$\alpha = 0.3$': { "gt_interpolation": 0.3, "gt_interpolation_mode": 'logp', }, r'$\alpha = 1.0$': { "gt_interpolation": 1.0, "gt_interpolation_mode": 'p', }, } cost_ks = [200] fig, ax = plt.subplots(1,1, figsize=(6,3)) for axi, cost_k in enumerate(cost_ks): metric_name = f'cost_true_emp' training_set_sizes = df['training_set_size'].sort_values().unique() k_list = df['k_list'][0] k_mapped = k_list.index(cost_k) for i_offset, (gt_label, gt_config) in enumerate(gt_confs.items()): cost_fixed_k = [] for ts in training_set_sizes: cost_all_k = numpy.array(df.loc[(df['gt_interpolation'] == gt_config['gt_interpolation']) & (df['gt_interpolation_mode'] == gt_config['gt_interpolation_mode']) & (df['true_hs'] > 0) & (df['training_set_size'] == ts) & (df['model_hs'] == 512)][metric_name].to_list()) cost_fixed_k.append(cost_all_k[:, k_mapped, None]) cost_fixed_k = numpy.concatenate(cost_fixed_k, axis=1) median_cost = numpy.quantile(cost_fixed_k, axis=0, q=0.5) lower_quantile = numpy.quantile(cost_fixed_k, axis=0, q=0.25) upper_quntile = numpy.quantile(cost_fixed_k, axis=0, q=0.75) xs = training_set_sizes ax.errorbar(xs, y=median_cost, yerr=[median_cost-lower_quantile, upper_quntile-median_cost], label=gt_label, elinewidth=0.5, capsize=5.5) ax.plot(xs, [cost_k]*xs.size, linestyle='--', label=r'$\mathcal{O}_{k}(p^*_{\alpha}|| p_\mathrm{emp})=k$') ax.legend() ax.set_yscale('log') ax.set_xscale('log') ax.set_xlabel(r'$N_\mathrm{train}$') ax.set_ylabel(r'$\mathcal{O}_{k}(p^*_{\alpha}|| p_\mathrm{emp}), k=%d$' % cost_k) ax.grid(lw=0.2) plt.tight_layout() plt.show() # + # Fig. 3 gt_confs = { r'$\alpha = 0.0$': { "gt_interpolation": 0.0, "gt_interpolation_mode": 'p', }, r'$\alpha = 0.3$': { "gt_interpolation": 0.3, "gt_interpolation_mode": 'logp', }, r'$\alpha = 1.0$': { "gt_interpolation": 1.0, "gt_interpolation_mode": 'p', }, } fig, ax = plt.subplots(1,1, figsize=(6,3)) k_list = df['k_list'][0] training_set_sizes = df['training_set_size'].sort_values().unique() ts_to_plot = training_set_sizes[1] metric_name1 = 'cost_true_emp' metric_name2 = 'cost_true_model' for i_offset, (gt_label, gt_config) in enumerate(gt_confs.items()): true_emp_cost = numpy.array(df.loc[(df['gt_interpolation'] == gt_config['gt_interpolation']) & (df['gt_interpolation_mode'] == gt_config['gt_interpolation_mode']) & (df['true_hs'] > 0) & (df['training_set_size'] == ts_to_plot) & (df['model_hs'] > 0)][metric_name1].to_list()) true_model_cost = numpy.array(df.loc[(df['gt_interpolation'] == gt_config['gt_interpolation']) & (df['gt_interpolation_mode'] == gt_config['gt_interpolation_mode']) & (df['true_hs'] > 0) & (df['training_set_size'] == ts_to_plot) & (df['model_hs'] > 0)][metric_name2].to_list()) cost_reduction = numpy.log(true_emp_cost) - numpy.log(true_model_cost) median_cost = numpy.quantile(cost_reduction, axis=0, q=0.5) lower_quantile = numpy.quantile(cost_reduction, axis=0, q=0.25) upper_quntile = numpy.quantile(cost_reduction, axis=0, q=0.75) ax.errorbar(k_list, y=median_cost, yerr=[median_cost-lower_quantile, upper_quntile-median_cost], label=gt_label, errorevery=15+i_offset, elinewidth=0.5, capsize=5.5) ax.set_ylabel(r'$\log \frac{O_k(p^*_{\alpha}|| p_\mathrm{emp})}{O_k(p^*_{\alpha}|| p_\mathrm{model})}$') ax.set_xlabel(r'$k$') ax.set_xlim(10,500) ax.grid(lw=0.2, ls='--') ax.legend() plt.tight_layout() # + # Fig. 4 gt_confs = { r'$\alpha = 0.0$': { "gt_interpolation": 0.0, "gt_interpolation_mode": 'p', }, r'$\alpha = 0.3$': { "gt_interpolation": 0.3, "gt_interpolation_mode": 'logp', }, r'$\alpha = 1.0$': { "gt_interpolation": 1.0, "gt_interpolation_mode": 'p', }, } cost_ks = [200] fig, ax = plt.subplots(1,1, figsize=(6,3)) cost_from = 'emp' for axi, cost_k in enumerate(cost_ks): metric_name = f'cost_{cost_from}_model' training_set_sizes = df['training_set_size'].sort_values().unique() k_list = df['k_list'][0] k_mapped = k_list.index(cost_k) for i_offset, (gt_label, gt_config) in enumerate(gt_confs.items()): cost_fixed_k = [] for ts in training_set_sizes: cost_all_k = numpy.array(df.loc[(df['gt_interpolation'] == gt_config['gt_interpolation']) & (df['gt_interpolation_mode'] == gt_config['gt_interpolation_mode']) & (df['true_hs'] > 0) & (df['training_set_size'] == ts) & (df['model_hs'] == 512)][metric_name].to_list()) cost_fixed_k.append(cost_all_k[:, k_mapped, None]) cost_fixed_k = numpy.concatenate(cost_fixed_k, axis=1) median_cost = numpy.quantile(cost_fixed_k, axis=0, q=0.5) lower_quantile = numpy.quantile(cost_fixed_k, axis=0, q=0.25) upper_quntile = numpy.quantile(cost_fixed_k, axis=0, q=0.75) xs = training_set_sizes ax.errorbar(xs, y=median_cost, yerr=[median_cost-lower_quantile, upper_quntile-median_cost], label=gt_label, elinewidth=0.5, capsize=5.5) ax.axhline(y=cost_k, color='red', linestyle='--', label=r'$\mathcal{O}_{k}(p_\mathrm{%s}|| p_\mathrm{model})=k$'%cost_from) ax.legend() ax.set_yscale('log') ax.set_xscale('log') ax.set_xlabel(r'$N_\mathrm{train}}$') ax.set_ylabel(r'$\mathcal{O}_{k}(p_\mathrm{%s}|| p_\mathrm{model}), k=$'%cost_from+f"{cost_k}") ax.grid(lw=0.2) plt.tight_layout() plt.show() # + # 6 structurally different datasets dataset_confs = { r'$N_\mathrm{train}=5 \times 10^5, \alpha=0.0$': { 'training_set_size': 5*int(1e5), 'gt_interpolation': 0.0, }, r'$N_\mathrm{train}=5 \times 10^6, \alpha=0.0$': { 'training_set_size': 5*int(1e6), 'gt_interpolation': 0.0, }, r'$N_\mathrm{train}=5 \times 10^5, \alpha=0.3$': { 'training_set_size': 5*int(1e5), 'gt_interpolation': 0.3, }, r'$N_\mathrm{train}=5 \times 10^6, \alpha=0.3$': { 'training_set_size': 5*int(1e6), 'gt_interpolation': 0.3, }, r'$N_\mathrm{train}=5 \times 10^5, \alpha=1.0$': { 'training_set_size': 5*int(1e5), 'gt_interpolation': 1.0, }, r'$N_\mathrm{train}=5 \times 10^6, \alpha=1.0$': { 'training_set_size': 5*int(1e6), 'gt_interpolation': 1.0, }, } colors = { 0.0: 'blue', 0.3: 'green', 1.0: 'purple', } lstyles = { 5*int(1e5): '-', 5*int(1e6): '--', } # + # Fig. 5 left dec_width = 500 model_size = 512 algo = 'beam' p_distr = 'model' k_list = df['k_list'][0] fig, ax = plt.subplots(1,1, figsize=(6,3)) for data_descr, data_conf in dataset_confs.items(): subdf = df.loc[(df['training_set_size'] == data_conf['training_set_size']) & (df['gt_interpolation'] == data_conf['gt_interpolation']) & (df['model_hs'] == model_size)] if algo == 'beam': intersection = subdf[f'intersection_{p_distr}_{algo}{dec_width}'] elif algo == 'anc': intersection = subdf[f'intersection_{p_distr}_{algo}{dec_width}_t_10'] numpy_intersection = numpy.array(intersection.to_list()) median_intersection = numpy.quantile(numpy_intersection, axis=0, q=0.5) lower_quantile = numpy.quantile(numpy_intersection, axis=0, q=0.25) upper_quntile = numpy.quantile(numpy_intersection, axis=0, q=0.75) ax.errorbar(k_list, y=median_intersection, yerr=[median_intersection-lower_quantile, upper_quntile-median_intersection], label=data_descr, errorevery=15, elinewidth=0.5, capsize=5.5, color=colors[data_conf['gt_interpolation']], ls=lstyles[data_conf['training_set_size']]) ax.plot(k_list, k_list, ls='dotted', label='Recovery succeeds', color='red') ax.set_xlabel(r'$k$') ax.set_ylabel(r'$\mathcal{I}_k(p_\mathrm{%s}||p_\mathrm{%s})$' % (p_distr,algo)) ax.set_ylim(-2, dec_width+10) ax.grid(lw=0.2) ax.legend(loc='upper left') # + # Fig. 5 right dec_width = 500 model_size = 512 algo = 'anc' p_distr = 'model' k_list = df['k_list'][0] fig, ax = plt.subplots(1,1, figsize=(6,3)) for data_descr, data_conf in dataset_confs.items(): subdf = df.loc[(df['training_set_size'] == data_conf['training_set_size']) & (df['gt_interpolation'] == data_conf['gt_interpolation']) & (df['model_hs'] == model_size)] if algo == 'beam': intersection = subdf[f'intersection_{p_distr}_{algo}{dec_width}'] elif algo == 'anc': intersection = subdf[f'intersection_{p_distr}_{algo}{dec_width}_t_10'] numpy_intersection = numpy.array(intersection.to_list()) median_intersection = numpy.quantile(numpy_intersection, axis=0, q=0.5) lower_quantile = numpy.quantile(numpy_intersection, axis=0, q=0.25) upper_quntile = numpy.quantile(numpy_intersection, axis=0, q=0.75) ax.errorbar(k_list, y=median_intersection, yerr=[median_intersection-lower_quantile, upper_quntile-median_intersection], label=data_descr, errorevery=15, elinewidth=0.5, capsize=5.5, color=colors[data_conf['gt_interpolation']], ls=lstyles[data_conf['training_set_size']]) ax.plot(k_list, k_list, ls='dotted', label='Recovery succeeds', color='red') ax.set_xlabel(r'$k$') ax.set_ylabel(r'$\mathcal{I}_k(p_\mathrm{%s}||p_\mathrm{%s})$' % (p_distr,algo)) ax.set_ylim(-2, dec_width+10) ax.grid(lw=0.2) ax.legend(loc='upper left') # + # Fig. 6 left dec_width = 500 model_size = 512 algo = 'beam' k_list = df['k_list'][0] fig, ax = plt.subplots(1,1, figsize=(6,3)) for data_descr, data_conf in dataset_confs.items(): pdistr_intersections = [] for p_distr in ['true', 'model']: subdf = df.loc[(df['training_set_size'] == data_conf['training_set_size']) & (df['gt_interpolation'] == data_conf['gt_interpolation']) & (df['model_hs'] == model_size)] if algo == 'beam': intersection = subdf[f'intersection_{p_distr}_{algo}{dec_width}'] elif algo == 'anc': intersection = subdf[f'intersection_{p_distr}_{algo}{dec_width}_t_10'] numpy_intersection = numpy.array(intersection.to_list()) pdistr_intersections.append(numpy_intersection) intersection_reduction = pdistr_intersections[0] - pdistr_intersections[1] median_intersection = numpy.quantile(intersection_reduction, axis=0, q=0.5) lower_quantile = numpy.quantile(intersection_reduction, axis=0, q=0.25) upper_quntile = numpy.quantile(intersection_reduction, axis=0, q=0.75) ax.errorbar(k_list, y=median_intersection, yerr=[median_intersection-lower_quantile, upper_quntile-median_intersection], label=data_descr, errorevery=15, elinewidth=0.5, capsize=5.5, color=colors[data_conf['gt_interpolation']], ls=lstyles[data_conf['training_set_size']]) ax.set_xlabel(r'$k$') ax.set_ylabel(r'$\mathcal{I}_k(p^*_{\alpha}||p_\mathrm{%s}) - \mathcal{I}_k(p_\mathrm{model}||p_\mathrm{%s})$' % (algo,algo)) ax.set_ylim(-100,10) ax.grid(lw=0.2) ax.legend(loc='lower left') # + # Fig. 6 right dec_width = 500 model_size = 512 algo = 'anc' k_list = df['k_list'][0] fig, ax = plt.subplots(1,1, figsize=(6,3)) for data_descr, data_conf in dataset_confs.items(): pdistr_intersections = [] for p_distr in ['true', 'model']: subdf = df.loc[(df['training_set_size'] == data_conf['training_set_size']) & (df['gt_interpolation'] == data_conf['gt_interpolation']) & (df['model_hs'] == model_size)] if algo == 'beam': intersection = subdf[f'intersection_{p_distr}_{algo}{dec_width}'] elif algo == 'anc': intersection = subdf[f'intersection_{p_distr}_{algo}{dec_width}_t_10'] numpy_intersection = numpy.array(intersection.to_list()) pdistr_intersections.append(numpy_intersection) intersection_reduction = pdistr_intersections[0] - pdistr_intersections[1] median_intersection = numpy.quantile(intersection_reduction, axis=0, q=0.5) lower_quantile = numpy.quantile(intersection_reduction, axis=0, q=0.25) upper_quntile = numpy.quantile(intersection_reduction, axis=0, q=0.75) ax.errorbar(k_list, y=median_intersection, yerr=[median_intersection-lower_quantile, upper_quntile-median_intersection], label=data_descr, errorevery=15, elinewidth=0.5, capsize=5.5, color=colors[data_conf['gt_interpolation']], ls=lstyles[data_conf['training_set_size']]) ax.set_xlabel(r'$k$') ax.set_ylabel(r'$\mathcal{I}_k(p^*_{\alpha}||p_\mathrm{%s}) - \mathcal{I}_k(p_\mathrm{model}||p_\mathrm{%s})$' % (algo,algo)) ax.set_ylim(-10, 20) ax.grid(lw=0.2) ax.legend(loc='upper left') # -
plots.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Explaining Performance Drivers # ### Summary # # In this note I team up <NAME> from Rates Structuring Strats to take a closer look at hedging a popular bond trade and present a new framework for explaining pnl that can be applied across any trade or portfolio in `gs_quant`. Traders, PMs, risk managers or operationally oriented users can deploy this framework to improve their understanding of historical performance drivers and in turn drive better hedging, risk management and cash management decisions. # # While foreign fixed income assets can present attractive investment opportunities, domestic investors may need to use overlays if they want to receive domestic currency. One popular example of this has been buying JGBs and using a fixfix swap to receive in local. In this bond + swap package, FX of the swap will fully match the bond FX component but clients can still face pnl volatility due to basis and IR differences not matched by the bond accounted at cost. # # In this notebook I will take a closer look at the fixfix swap tailored for a specific JGB bond and decompose its historical pnl drivers into rates, cross currency, fx and cashflow components to better understand the drivers of this volatility. # # # The content of this notebook is split into: # * [1 - Let's get started with gs quant](#1---Let's-get-started-with-gs-quant) # * [2 - Model bond as FixFix swap](#2---Model-bond-as-fixfix-swap) # * [3 - Attribute pnl and calculate cashflows](#3---Attribute-pnl-and-calculate-cashflows) # * [4 - Putting it all together](#4---Putting-it-all-together) # * [What's New](#What's-New) # # ### 1 - Let's get started with gs quant # Start every session with authenticating with your unique client id and secret. If you don't have a registered app, create one [here](https://marquee.gs.com/s/developer/myapps/register). `run_analytics` scope is required for the risk functionality and `read_product_data` is required for pulling data covered in this example. Below produced using gs-quant version 0.8.155. from gs_quant.session import GsSession GsSession.use(client_id=None, client_secret=None, scopes=('run_analytics', 'read_product_data')) # ## 2 - Model bond as fixfix swap # # Let's pick a JGB bond to analyze - in this example we will look at `JGB #53 JP1300531GC0` (ISIN) and fill in the relative details. Note here we are manually inputting the details but we'll remove this step once you're able to model bonds in gs quant directly. # + from datetime import date bond_notional = 1e8 bond_coupon = 0.006 coupon_freq = '6m' bond_maturity = date(2046, 12, 20) last_cpn_date = date(2018, 12, 20) # Last paid coupon date as seen from backtest start date maturity = bond_maturity bond_dirty_price = 97.66 # historical window we'll examine start_date = date(2019, 1, 2) end_date = date(2019, 11, 1) CSA = 'EUR-OIS' # - # Now, in order to receive payments in local - let's say that is EUR rather than JPY - we can structure a `IRXccySwapFixFix` swap that matches our bond's characteristics outlined above. # # To do this, we need to size it to the bond notional using the EUR/JPY FX rate at the start of our window. Let's pull it from [Marquee data catalogue](https://marquee.gs.com/s/discover/data-services/catalog) as a first step. # + from gs_quant.data import Dataset ds = Dataset('FXSPOT_STANDARD') eurjpy_data = ds.get_data(start_date, end_date, bbid='JPYEUR') fx_rate = eurjpy_data.loc[start_date].spot # - # With the FX spot as of `start_date` in hand, let's define our swap and `resolve()` to fix any relative parameters as of the same `start date`. We can use `as_dict()` to view what these are. I'll also calculate swap cashflows here that we will use later to add to the total pv we're attributing. Note below we can choose between par or proceeds asset swap format that impact the fixed notional and fee used. # + from gs_quant.markets import PricingContext from gs_quant.instrument import IRXccySwapFixFix from gs_quant import risk ASWType = 'Proceeds' # Can either be par or proceeds fixed_notional = bond_notional * fx_rate * bond_dirty_price / 100 if ASWType=='Proceeds' else bond_notional * fx_rate fee = 0 if ASWType=='Proceeds' else -(bond_dirty_price - 100) / 100 * bond_notional swap = IRXccySwapFixFix(effective_date=last_cpn_date, termination_date=bond_maturity, notional_amount=bond_notional, payer_currency='JPY', receiver_notional_amount=fixed_notional, payer_rate=bond_coupon, receiver_currency='EUR', payer_frequency=coupon_freq, receiver_frequency='1y', payer_day_count_fraction='act/act ISDA', fee=fee, receiver_rate='ATM') with PricingContext(pricing_date=start_date, market_data_location='LDN', csa_term=CSA): swap.resolve() cf = swap.calc(risk.Cashflows) swap.as_dict() # - # ## 3 - Attribute pnl and calculate cashflows # # Now, let's break out our swap pv into contributions from various drivers - to do this, we'll use a newly minted gs-quant analytic called `PnlExplain` which attributes the change in value of an individual trade (or portfolio) to market moves. Note the values returned are in USD. In the below, I look at `PnLExplain` every day to get a daily attribution but you can use this for any time period. I also group by`mkt_type` for clarity but you can use the measure to get a more granular view by removing the grouping. You'll notice a `CROSSES` PnL, which represents the cross-effects among the other types. # + from gs_quant.markets import CloseMarket result_dict = {} for d in eurjpy_data.index: exp_measure = risk.PnlExplain(CloseMarket(date=d.date())) with PricingContext(pricing_date=start_date, market_data_location='LDN', is_async=True, csa_term=CSA): exp_res = swap.calc(exp_measure) result_dict[d.date()] = exp_res # + def format_res(r, d): # here we group and sum by market type - you can get a more granular view when skipping this step df = r.groupby('mkt_type').sum().reset_index() df['date'] = d return df.set_index('date') result_clean = pd.concat([format_res(r.result(), d) for d, r in result_dict.items() if len(r.result())]) result = result_clean.groupby(['date', 'mkt_type']).sum().unstack() result.columns = result.columns.droplevel(0) result.loc[start_date] = 0 # first day is 0 pnl result.head() # let's take a peak! # - # Now for the final bit - cashflows. We have already calculated our cashflows in [step 2](#2---Model-bond-as-fixfix-swap). Let's remove anything that's later than our `end_date` and convert the cashflow amount into USD. # + import pandas as pd cfs = cf.result() cfs = cfs[cfs['payment_date'] < end_date] cash = pd.Series(cfs.payment_amount.values, index=cfs.payment_date) cash = (cash * eurjpy_data.spot).fillna(0) result['CASH'] = cash # - # ## 4 - Putting it all together # Finally, with all the results in hand, let's take a look! result['Total'] = result.sum(axis=1) result.plot(figsize=(12, 8), title='PnL Drivers') # As we can see in the chart above, over the backtest period rates drive most of the positive swap performance while cross currency effects are largely a negative drag over the period. FX, although a small positive contributor through mid 2019, ultimately drives much of the negative contribution through the remainder of the year. Remember we are looking at the swap hedge only here so FX will be entirely offset by the bond in the bond+swap package. # # In this note we decomposed the fixfix swap but you can use this framework to analyse any trade or portfolio - looking forward to hearing your feedback! # ### What's New # * `PnlExplain` which we covered in this note! # * `Portfolio.from_frame` and `Portfolio.from_csv` to help map and represent your portfolio object from a dataframe or csv file. # * `to_frame` to view complex results - see example [here](https://nbviewer.jupyter.org/github/goldmansachs/gs-quant/blob/master/gs_quant/examples/01_pricing_and_risk/00_rates/010014_spread_option_grid_pricing.ipynb) # * Solvers for different fields to allow to solve for a strike or fixed rate such that PV=x. Examples to come - please reach out in the meantime.
gs_quant/content/made_with_gs_quant/10-Explaining Performance Drivers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="Qajx0lAWa1Jb" import numpy as np import pandas as pd from konlpy.tag import Okt callcount = 0 def to_noun_sentence(text): global callcount global twitter callcount += 1 if callcount % 100 == 0: print(callcount) stopwords = ['질문', '문의', '관련', '그대로', '계속', '답변', '선생님', '관련문의', '한지', '자주', '좀', '쪽', '자꾸', '요즘', '몇개', '무조건', '하나요', '안해','요', '경우', '최근', '및', '몇', '달', '일반', '전날', '저번', '말', '일어나지', '며칠', '먹기', '지난번', '글', '때문', '너', '무', '오늘', '시', '잔', '뒤', '지속', '막', '것', '이건', '뭔가', '다시', '그', '무슨', '안', '난', '도', '기', '후', '거리', '이', '뭘', '저', '뭐', '답젼', '평생', '회복', '반', '감사', '의사', '보험', '학생', '제발', '살짝', '느낌', '제', '대해','갑자기','문제', '전','정도', '왜', '거', '가요', '의심', '어제', '추천', '를', '지금', '무엇', '내일', '관해', '리', '세', '로', '목적', '그냥', '거의', '고민', '다음', '이틀', '항상', '뭐', '때', '요', '가끔', '이후', '혹시', ] twitter = Okt() nouns = twitter.nouns(text) for word in nouns: if word in stopwords: while word in nouns: nouns.remove(word) return ' '.join(nouns) def preprocess_sentence(sentence): twitter = Okt() nouns = twitter.nouns(sentence) for word in nouns: if word in stopwords: while word in nouns: nouns.remove(word) nouns. def csv_to_datasets(filepath, vocab_num, sequencelength=10): import numpy as np import pandas as pd import tensorflow as tf import tensorflow.keras as keras from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from sklearn.model_selection import train_test_split from sklearn.utils import class_weight df = pd.read_csv(filepath) df['nounlist'] = df['nouns'].str.split() df.dropna(inplace=True) from sklearn.model_selection import train_test_split x_train_, x_test_, y_train, y_test = train_test_split( df['nounlist'], df['label'], test_size=0.2, random_state=1234, stratify=df['label'] ) vocab_size = vocab_num t = Tokenizer(num_words=vocab_size) t.fit_on_texts(df['nounlist']) sequence_length = sequencelength trunc_type = 'post' padding_type = 'post' x_train = t.texts_to_sequences(x_train_) padded_x_train = pad_sequences(x_train, truncating=trunc_type, padding=padding_type, maxlen=sequence_length) x_test = t.texts_to_sequences(x_test_) padded_x_test = pad_sequences(x_test, truncating=trunc_type, padding=padding_type, maxlen=sequence_length) weight = class_weight.compute_class_weight('balanced', np.unique(y_train), y_train) weight = {i : weight[i] for i in range(26)} y_train = keras.utils.to_categorical(y_train) y_test = keras.utils.to_categorical(y_test) def preprocess_csv(filepath, exportpath, startindex=0, endindex=None): df = pd.read_csv(filepath) if endindex != None: df = df[startindex:endindex] df.dropna(inplace=True) df['symptom'] = df['symptom'].str.replace(pat='([ㄱ-ㅎㅏ-ㅣ]+)', repl=r' ', regex=True) df['symptom'] = df['symptom'].str.replace(pat='[^\w\s]', repl=r' ', regex=True) df['nouns'] = df['symptom'].apply(to_noun_sentence) #라벨 정수화 class_to_label = {'DERM': 0, 'GS': 1, 'IP': 2, 'GI':3, 'OPH':4, 'NR': 5, 'ENT': 6, 'PSY': 7, 'HEON': 8, 'RHEU': 9, 'REHM': 10, 'NS': 11, 'AN': 12, 'DENT': 13, 'PS': 14, 'CS': 15, 'INFC': 16, 'OS': 17, 'EMR': 18, 'ENDO': 19, 'CA': 20, 'KTM': 21, 'OBGY': 22, 'URO': 23, 'ALL': 24, 'NPH': 25,'LAB': 26} df['label'] = df['class'].map(class_to_label) # 2개 컬럼만 남김 df = df[['nouns', 'label']] # NaN 삭제 df.dropna(inplace=True) df['label'] = df['label'].astype(int) df.to_csv(exportpath, encoding='utf-8', index=False) print('done')
models/Okt_LSTM/preprocess.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PyCharm (machine-learning-lab) # language: python # name: pycharm-7004eb84 # --- # %matplotlib inline # # Training a Classifier # ===================== # # This is it. You have seen how to define neural networks, compute loss and make # updates to the weights of the network. # # Now you might be thinking, # # What about data? # ---------------- # # Generally, when you have to deal with image, text, audio or video data, # you can use standard python packages that load data into a numpy array. # Then you can convert this array into a ``torch.*Tensor``. # # - For images, packages such as Pillow, OpenCV are useful # - For audio, packages such as scipy and librosa # - For text, either raw Python or Cython based loading, or NLTK and # SpaCy are useful # # Specifically for vision, we have created a package called # ``torchvision``, that has data loaders for common datasets such as # Imagenet, CIFAR10, MNIST, etc. and data transformers for images, viz., # ``torchvision.datasets`` and ``torch.utils.data.DataLoader``. # # This provides a huge convenience and avoids writing boilerplate code. # # For this tutorial, we will use the CIFAR10 dataset. # It has the classes: ‘airplane’, ‘automobile’, ‘bird’, ‘cat’, ‘deer’, # ‘dog’, ‘frog’, ‘horse’, ‘ship’, ‘truck’. The images in CIFAR-10 are of # size 3x32x32, i.e. 3-channel color images of 32x32 pixels in size. # # .. figure:: /_static/img/cifar10.png # :alt: cifar10 # # cifar10 # # # Training an image classifier # ---------------------------- # # We will do the following steps in order: # # 1. Load and normalizing the CIFAR10 training and test datasets using # ``torchvision`` # 2. Define a Convolutional Neural Network # 3. Define a loss function # 4. Train the network on the training data # 5. Test the network on the test data # # 1. Loading and normalizing CIFAR10 # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # Using ``torchvision``, it’s extremely easy to load CIFAR10. # # import torch import torchvision import torchvision.transforms as transforms # The output of torchvision datasets are PILImage images of range [0, 1]. # We transform them to Tensors of normalized range [-1, 1]. # <div class="alert alert-info"><h4>Note</h4><p>If running on Windows and you get a BrokenPipeError, try setting # the num_worker of torch.utils.data.DataLoader() to 0.</p></div> # # # + transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) trainset = torchvision.datasets.CIFAR10(root='../data', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR10(root='../data', train=False, download=True, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') # - # Let us show some of the training images, for fun. # # # + import matplotlib.pyplot as plt import numpy as np # functions to show an image def imshow(img): img = img / 2 + 0.5 # unnormalize npimg = img.numpy() plt.imshow(np.transpose(npimg, (1, 2, 0))) plt.show() # get some random training images dataiter = iter(trainloader) images, labels = dataiter.next() # show images imshow(torchvision.utils.make_grid(images)) # print labels print(' '.join('%5s' % classes[labels[j]] for j in range(4))) # - # 2. Define a Convolutional Neural Network # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # Copy the neural network from the Neural Networks section before and modify it to # take 3-channel images (instead of 1-channel images as it was defined). # # # + import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x net = Net() # - # 3. Define a Loss function and optimizer # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # Let's use a Classification Cross-Entropy loss and SGD with momentum. # # # + import torch.optim as optim criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) # - # 4. Train the network # ^^^^^^^^^^^^^^^^^^^^ # # This is when things start to get interesting. # We simply have to loop over our data iterator, and feed the inputs to the # network and optimize. # # # + pycharm={"is_executing": true} for epoch in range(2): # loop over the dataset multiple times running_loss = 0.0 for i, data in enumerate(trainloader, 0): # get the inputs; data is a list of [inputs, labels] inputs, labels = data # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # print statistics running_loss += loss.item() if i % 2000 == 1999: # print every 2000 mini-batches print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000)) running_loss = 0.0 print('Finished Training') # - # Let's quickly save our trained model: # # # + pycharm={"is_executing": true} PATH = './cifar_net.pth' torch.save(net.state_dict(), PATH) # - # See `here <https://pytorch.org/docs/stable/notes/serialization.html>`_ # for more details on saving PyTorch models. # # 5. Test the network on the test data # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # We have trained the network for 2 passes over the training dataset. # But we need to check if the network has learnt anything at all. # # We will check this by predicting the class label that the neural network # outputs, and checking it against the ground-truth. If the prediction is # correct, we add the sample to the list of correct predictions. # # Okay, first step. Let us display an image from the test set to get familiar. # # # + pycharm={"is_executing": true} dataiter = iter(testloader) images, labels = dataiter.next() # print images imshow(torchvision.utils.make_grid(images)) print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4))) # - # Next, let's load back in our saved model (note: saving and re-loading the model # wasn't necessary here, we only did it to illustrate how to do so): # # # + pycharm={"is_executing": true} net = Net() net.load_state_dict(torch.load(PATH)) # - # Okay, now let us see what the neural network thinks these examples above are: # # # + pycharm={"is_executing": true} outputs = net(images) # - # The outputs are energies for the 10 classes. # The higher the energy for a class, the more the network # thinks that the image is of the particular class. # So, let's get the index of the highest energy: # # # + pycharm={"is_executing": true} _, predicted = torch.max(outputs, 1) print('Predicted: ', ' '.join('%5s' % classes[predicted[j]] for j in range(4))) # - # The results seem pretty good. # # Let us look at how the network performs on the whole dataset. # # # + pycharm={"is_executing": true} correct = 0 total = 0 with torch.no_grad(): for data in testloader: images, labels = data outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total)) # - # That looks way better than chance, which is 10% accuracy (randomly picking # a class out of 10 classes). # Seems like the network learnt something. # # Hmmm, what are the classes that performed well, and the classes that did # not perform well: # # # + pycharm={"is_executing": true} class_correct = list(0. for i in range(10)) class_total = list(0. for i in range(10)) with torch.no_grad(): for data in testloader: images, labels = data outputs = net(images) _, predicted = torch.max(outputs, 1) c = (predicted == labels).squeeze() for i in range(4): label = labels[i] class_correct[label] += c[i].item() class_total[label] += 1 for i in range(10): print('Accuracy of %5s : %2d %%' % ( classes[i], 100 * class_correct[i] / class_total[i])) # - # Okay, so what next? # # How do we run these neural networks on the GPU? # # Training on GPU # ---------------- # Just like how you transfer a Tensor onto the GPU, you transfer the neural # net onto the GPU. # # Let's first define our device as the first visible cuda device if we have # CUDA available: # # # + pycharm={"is_executing": true} device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Assuming that we are on a CUDA machine, this should print a CUDA device: print(device) # - # The rest of this section assumes that ``device`` is a CUDA device. # # Then these methods will recursively go over all modules and convert their # parameters and buffers to CUDA tensors: # # .. code:: python # # net.to(device) # # # Remember that you will have to send the inputs and targets at every step # to the GPU too: # # .. code:: python # # inputs, labels = data[0].to(device), data[1].to(device) # # Why dont I notice MASSIVE speedup compared to CPU? Because your network # is really small. # # **Exercise:** Try increasing the width of your network (argument 2 of # the first ``nn.Conv2d``, and argument 1 of the second ``nn.Conv2d`` – # they need to be the same number), see what kind of speedup you get. # # **Goals achieved**: # # - Understanding PyTorch's Tensor library and neural networks at a high level. # - Train a small neural network to classify images # # Training on multiple GPUs # ------------------------- # If you want to see even more MASSIVE speedup using all of your GPUs, # please check out :doc:`data_parallel_tutorial`. # # Where do I go next? # ------------------- # # - :doc:`Train neural nets to play video games </intermediate/reinforcement_q_learning>` # - `Train a state-of-the-art ResNet network on imagenet`_ # - `Train a face generator using Generative Adversarial Networks`_ # - `Train a word-level language model using Recurrent LSTM networks`_ # - `More examples`_ # - `More tutorials`_ # - `Discuss PyTorch on the Forums`_ # - `Chat with other users on Slack`_ # # # # + pycharm={"is_executing": true}
notebooks/cifar10_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/sauravkumarkash1/Coursera-Full-Stack-Web-Development/blob/master/Missing_data_handle.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="ltCeDC8OE5nc" import numpy as np import pandas as pd # + colab={"base_uri": "https://localhost:8080/", "height": 357} id="g1utSz2KHfOB" outputId="dba10bf3-e143-4534-c5ee-56a14eae8c31" df=pd.read_csv("/content/student_data.csv") df # + colab={"base_uri": "https://localhost:8080/", "height": 357} id="dOvyPJmmIv-1" outputId="85392520-2338-418a-bbb7-07e3af528e3e" df.fillna(0) # + colab={"base_uri": "https://localhost:8080/", "height": 357} id="l4oimtBKI3NM" outputId="b0a2957c-d882-4b5d-9752-ff5ce177d679" new_df=df.fillna({ 'CS':0,'Math':0,'Name':'No Category' }) new_df # + colab={"base_uri": "https://localhost:8080/", "height": 357} id="W_H9no-OOOgZ" outputId="e90f10d0-2466-4f8b-aef9-baf14a6db17e" new_df=df.fillna(method="ffill") new_df # + colab={"base_uri": "https://localhost:8080/", "height": 357} id="8mEu394KqGmn" outputId="c64f5dd9-e23f-4172-bcb3-b5950fb90d6e" new_df=df.fillna(method="bfill") new_df # + colab={"base_uri": "https://localhost:8080/", "height": 357} id="IP9RYcfKqnon" outputId="b34f4965-6b9a-4d83-b54b-c0ef3b4d75e4" newdf=df.fillna(method='bfill',axis="columns") #index for row,columns for columns newdf # + colab={"base_uri": "https://localhost:8080/", "height": 357} id="OE5Rd8BPrGCc" outputId="77942a6a-7721-4879-f4cb-a774b43fd008" newdf=df.fillna(method='ffill',axis="columns") #index for row,columns for columns newdf # + colab={"base_uri": "https://localhost:8080/", "height": 357} id="6R-ngUaRrxNo" outputId="25fe3431-8c4a-478e-c44e-5b39b9150b96" newdf=df.fillna(method='bfill',axis="index") #index for row,columns for columns newdf # + colab={"base_uri": "https://localhost:8080/", "height": 357} id="tA0-Mrkkr9KU" outputId="dcd0f4f0-aa34-4f96-ac58-753148bf0aa5" newdf=df.fillna(method='ffill',axis="columns") #index for row,columns for columns newdf # + colab={"base_uri": "https://localhost:8080/", "height": 357} id="T0v06gICsBZc" outputId="11a394b8-e977-4388-ae9e-b1b7a356e6b1" newdf=df.fillna(method='bfill',limit=1) #index for row,columns for columns newdf # + colab={"base_uri": "https://localhost:8080/", "height": 357} id="va_wQ97GsWKf" outputId="14ab4afc-f4c0-43fe-c4ce-97ef321ac5a3" newdf=df.fillna(method='ffill',limit=3) #index for row,columns for columns newdf # + colab={"base_uri": "https://localhost:8080/", "height": 357} id="_UNxDQ9xtJx8" outputId="1b655111-4c89-4595-8c69-9385ec945ad5" newdf=df.fillna(method='bfill',limit=3,inplace=True) #index for row,columns for columns newdf df # + colab={"base_uri": "https://localhost:8080/", "height": 357} id="L4zvVxFHtXys" outputId="7d3b7f2f-a688-4146-c5cb-6835cc29d459" newdf=df.interpolate() newdf # + colab={"base_uri": "https://localhost:8080/", "height": 172} id="u_jzfdrCuvV9" outputId="11163e2c-9c91-486f-fbe0-9967deada0c8" newdf=df.dropna() newdf # + colab={"base_uri": "https://localhost:8080/", "height": 357} id="9lzfHzA4u42Q" outputId="ebd45d63-9d9f-4c7a-ec78-19e408ac0555" newdf=df.dropna(how="all") newdf # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="TMU1EekkvODO" outputId="cb6a59c4-df33-4d6a-dbad-03011bcf6e7a" newdf=df.dropna(thresh=3) newdf # + colab={"base_uri": "https://localhost:8080/", "height": 357} id="DbV-nSaRvyXj" outputId="494c29b6-2901-4d3e-f7b1-fc441dd29dbc" newdf=df.replace(np.NaN,999) newdf # + colab={"base_uri": "https://localhost:8080/", "height": 357} id="_okpGqZ01U41" outputId="151dcba1-6e39-4411-c16f-aba3922d633f" newdf=df.replace(np.NaN,{ 'CS':999,'Math':100 }) newdf # + [markdown] id="7M-FAnrW2GRA" # # Boston Housing Price Prediction # + id="-gYEv38A19Q1" import pandas as pd import numpy as np import matplotlib.pyplot as plt import sklearn from sklearn.linear_model import LinearRegression # + id="MO51PxrA4FlI" from sklearn.datasets import load_boston # + colab={"base_uri": "https://localhost:8080/"} id="oSe-XIJ84xRx" outputId="960ddc37-df81-44d3-9d51-d237bac94426" boston=load_boston() print(boston) # + colab={"base_uri": "https://localhost:8080/"} id="YAiTTu4h5JnN" outputId="2dcba3c3-ab26-4467-a600-253a8c532baa" boston.keys() # + colab={"base_uri": "https://localhost:8080/"} id="VGKVy9aW5QP3" outputId="ec7ceffb-c0a3-44bb-eaaf-09ef24c7b1ab" boston['data'].shape # + colab={"base_uri": "https://localhost:8080/"} id="AFvgLEn65XIU" outputId="20bda383-8c36-4a68-ed3f-a5d8e6d9b561" bos=pd.DataFrame(boston.data) #boston['data](we can also write in this way) print(bos.head(2)) # + colab={"base_uri": "https://localhost:8080/"} id="BMKm4k-N6dtt" outputId="a10a051b-99ea-4889-c20e-91d3f409b15a" print(boston.feature_names) # + colab={"base_uri": "https://localhost:8080/"} id="E1G8_MOq7v5n" outputId="01712f07-b3ba-447a-c555-c6565f79bd2c" print(boston.DESCR) # + colab={"base_uri": "https://localhost:8080/"} id="ZS4HUEzS7887" outputId="76417c83-3283-473a-f368-a7fe55aeb3e8" print(boston.target) # + colab={"base_uri": "https://localhost:8080/", "height": 417} id="qaVhdPHe8nrb" outputId="2fd25334-d41c-4c56-e322-f44a5a2ac89b" bos # + colab={"base_uri": "https://localhost:8080/", "height": 417} id="9X_efBwO8zm9" outputId="e67dd4fb-20bc-4687-cc14-40ac8313eec8" bos.columns=boston.feature_names bos # + colab={"base_uri": "https://localhost:8080/", "height": 203} id="lR5Mu7Dy9TRU" outputId="0cee0d25-441a-4176-924f-c9b73e90bb2c" bos.head() # + colab={"base_uri": "https://localhost:8080/", "height": 417} id="qrKa_ojj9hoR" outputId="c57fbf6a-5c51-4e40-9473-20d15f7dee51" bos['Price']=boston.target bos # + id="WtjhWzPF9_QV" y=bos['Price'] # + colab={"base_uri": "https://localhost:8080/"} id="9fWDIZMg-EWL" outputId="f6cb4064-2b94-4021-88d9-2203cc999672" x=bos.drop('Price',axis=1) print(x.head()) print(y.head()) # + id="4cPzr8kOB2zL" from sklearn.model_selection import train_test_split x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.33,random_state=5) # + colab={"base_uri": "https://localhost:8080/"} id="BhPScfERCM1j" outputId="a3d6a2e1-f980-4345-def2-1c0e7925c374" lm=LinearRegression() lm.fit(x_train,y_train) # + id="wTY_2cBwC1gA" y_pred_train=lm.predict(x_train) #actual y_train y_pred_test=lm.predict(x_test) #actual y_test # + colab={"base_uri": "https://localhost:8080/", "height": 234} id="MKr5ZNX1DLY7" outputId="88e2b7ea-aa26-4d11-da94-4dd1649bc09a" df=pd.DataFrame(y_pred_test,y_test) df.head() # + colab={"base_uri": "https://localhost:8080/"} id="cLExKfvWDjZE" outputId="c0f064ba-9dc2-4b44-a16d-fa96cdbe55c5" from sklearn.metrics import mean_squared_error mse=mean_squared_error(y_test,y_pred_test) print(mse) # + [markdown] id="JdsbdKqnECEp" # #Visualization # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="S6qrj5FeEFXA" outputId="e6c4cbaf-1cba-4a06-ce4a-9712a6974e44" #plt.scatter(y_test,y_pred_test,c='red',marker='*') plt.plot(x_test,lm.predict(x_test)) plt.show()
Missing_data_handle.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Object Oriented Programming # # After going through the basics of python, you would be interested to know more about further and bit more advance topics of the Python3 programming language. # This article covers them. # Please remember that Python completely works on indentation and it is advised to practice it a bit by running some programs. Use the tab key to provide indentation to your code # ### Classes # Just like every other Object Oriented Programming language Python supports classes. Let’s look at some points on Python classes. # <ul> # <li>Classes are created by keyword class.</li> # <li>Attributes are the variables that belong to class.</li> # <li>Attributes are always public and can be accessed using dot (.) operator. Eg.: Myclass.Myattribute</li> # </ul> # A sample E.g for classes: # + # creates a class named MyClass class MyClass: # assign the values to the MyClass attributes number = 0 name = "noname" def Main(): # Creating an object of the MyClass. # Here, 'me' is the object me = MyClass() # Accessing the attributes of MyClass # using the dot(.) operator me.number = 1337 me.name = "Harssh" # str is an build-in function that # creates an string print(me.name + " " + str(me.number)) # telling python that there is main in the program. if __name__=='__main__': Main() # - # ### Methods # Method is a bunch of code that is intended to perform a particular task in your Python’s code. # <ul> # <li>Function that belongs to a class is called an Method.</li> # <li>All methods require ‘self’ parameter. If you have coded in other OOP language you can think of ‘self’ as the ‘this’ keyword which is used for the current object. It unhides the current instance variable.’self’ mostly work like ‘this’.</li> # <li>‘def’ keyword is used to create a new method.</li> # </ul> # + # A Python program to demonstrate working of class # methods class Vector2D: x = 0.0 y = 0.0 # Creating a method named Set def Set(self, x, y): self.x = x self.y = y def Main(): # vec is an object of class Vector2D vec = Vector2D() # Passing values to the function Set # by using dot(.) operator. vec.Set(5, 6) print("X: " + str(vec.x) + ", Y: " + str(vec.y)) if __name__=='__main__': Main() # - # ### Inheritance # Inheritance is defined as a way in which a particular class inherits features from its base class.Base class is also knows as ‘Superclass’ and the class which inherits from the Superclass is knows as ‘Subclass’ # + # A Python program to demonstrate working of inheritance class Pet: #__init__ is an constructor in Python def __init__(self, name, age): self.name = name self.age = age # Class Cat inheriting from the class Pet class Cat(Pet): def __init__(self, name, age): # calling the super-class function __init__ # using the super() function super().__init__(name, age) def Main(): thePet = Pet("Pet", 1) jess = Cat("Jess", 3) # isinstance() function to check whether a class is # inherited from another class print("Is jess a cat? " +str(isinstance(jess, Cat))) print("Is jess a pet? " +str(isinstance(jess, Pet))) print("Is the pet a cat? "+str(isinstance(thePet, Cat))) print("Is thePet a Pet? " +str(isinstance(thePet, Pet))) print(jess.name) if __name__=='__main__': Main() # - # ### Iterators # Iterators are objects that can be iterated upon. # <ul> # <li>Python uses the __iter__() method to return an iterator object of the class.</li> # <li>The iterator object then uses the __next__() method to get the next item.</li> # <li>for loops stops when StopIteration Exception is raised.</li> # # </ul> # + # This program will reverse the string that is passed # to it from the main function class Reverse: def __init__(self, data): self.data = data self.index = len(data) def __iter__(self): return self def __next__(self): if self.index == 0: raise StopIteration self.index-= 1 return self.data[self.index] def Main(): rev = Reverse('Drapsicle') for char in rev: print(char) if __name__=='__main__': Main() # - # ### Generators # <ul> # <li>Another way of creating iterators.</li> # <li>Uses a function rather than a separate class</li> # <li>Generates the background code for the next() and iter() methods</li> # <li>Uses a special statement called yield which saves the state of the generator and set a resume point for when next() is called again</li> # + # A Python program to demonstrate working of Generators def Reverse(data): # this is like counting from 100 to 1 by taking one(-1) # step backward. for index in range(len(data)-1, -1, -1): yield data[index] def Main(): rev = Reverse('Harssh') for char in rev: print(char) data ='Harssh' print(list(data[i] for i in range(len(data)-1, -1, -1))) if __name__=="__main__": Main() # -
Day3/Object Oriented Programming.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="uQ5bgaFxiOEI" import numpy as np import pandas as pd from IPython.display import display from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.models import Sequential from keras.layers import Embedding, Conv1D, MaxPooling1D, LSTM, Dense, Dropout from keras.layers import TextVectorization from sklearn.metrics import accuracy_score import tensorflow as tf import keras import nltk from nltk.stem import WordNetLemmatizer from nltk.stem.porter import PorterStemmer from nltk.corpus import stopwords import preprocessor as p import re # + id="JpT_S-EN5arH" # nltk.download('punkt') # nltk.download('wordnet') # nltk.download('stopwords') stop_words = set(stopwords.words('english')) # + id="7Mguzkp_DorD" # # !wget http://nlp.stanford.edu/data/glove.6B.zip # # !unzip -q glove.6B.zip # + colab={"base_uri": "https://localhost:8080/"} id="rBAegYEipqrl" outputId="221aee9e-4e85-4d9a-dacf-4765052e83dd" # %cd 550\ Final\ Project # + [markdown] id="p3qOhBJbpmDG" # Load the datasets # + id="x8n3A8EKpkrJ" train = pd.read_csv("data/Constraint_Train.csv") val = pd.read_csv("data/Constraint_Val.csv") test = pd.read_csv("data/english_test_with_labels.csv") train_c, train_l = train['tweet'].to_numpy(), train['label'].to_numpy() train_l = np.array([0 if i == 'fake' else 1 for i in train_l]) val_c, val_l = val['tweet'].to_numpy(), val['label'].to_numpy() val_l = np.array([0 if i == 'fake' else 1 for i in val_l]) test_c, test_l = test['tweet'].to_numpy(), test['label'].to_numpy() test_l = np.array([0 if i == 'fake' else 1 for i in test_l]) # print(train_c) # display(train.head()) # + [markdown] id="pL6aveYz744E" # Preprocess # + [markdown] id="i3EJj8zVJvoe" # --- # + id="CJjylA2n733O" wordnet_lemmatizer = WordNetLemmatizer() porter_stemmer = PorterStemmer() p.set_options(p.OPT.URL, p.OPT.EMOJI) def preprocess(row, lemmatizer, stemmer): txt = row txt = p.clean(txt) tokenization = nltk.word_tokenize(txt) tokenization = [w for w in tokenization if not w in stop_words] # txt = ' '.join([porter_stemmer.stem(w) for w in tokenization]) # txt = ' '.join([lemmatizer.lemmatize(w) for w in txt]) txt = re.sub(r'[^a-zA-Z ]', '', txt).lower().strip() return txt train_c = [preprocess(x, wordnet_lemmatizer, porter_stemmer) for x in train_c] val_c = [preprocess(x, wordnet_lemmatizer, porter_stemmer) for x in val_c] test_c = [preprocess(x, wordnet_lemmatizer, porter_stemmer) for x in test_c] # + [markdown] id="EP1SW0DXJsq1" # --- # + id="S5bAwFKea44B" # the model will remember only the top 20000 most common words max_words = 20000 max_len = 300 voc = np.array(train_c + val_c + test_c) token = Tokenizer(num_words=max_words, lower=True, split=' ') token.fit_on_texts(voc) sequences = token.texts_to_sequences(train_c) train_sequences_padded = pad_sequences(sequences, maxlen=max_len) sequences = token.texts_to_sequences(val_c) val_sequences_padded = pad_sequences(sequences, maxlen=max_len) sequences = token.texts_to_sequences(test_c) test_sequences_padded = pad_sequences(sequences, maxlen=max_len) # + id="EpRS9c05Sv7-" def net(): model = Sequential([ Embedding(max_words, 100, input_length=300), Conv1D(32, 8, activation='relu', padding="same"), MaxPooling1D(2), LSTM(32), Dense(10, activation="relu"), Dropout(0.5), Dense(1, activation="sigmoid") ]) opt = tf.keras.optimizers.Adam(learning_rate=0.0005) model.compile(loss=tf.keras.losses.BinaryCrossentropy(), optimizer=opt, metrics=['accuracy']) return model # + colab={"base_uri": "https://localhost:8080/"} id="_HoK6Z8qQqgO" outputId="7c714dc8-7d58-469a-9ee6-82abc6412129" model = net() model.fit(train_sequences_padded, train_l, batch_size=32, epochs=5, validation_data=(val_sequences_padded, val_l)) # + id="inENrYlzgv7f" pred = model.predict(test_sequences_padded) pred = pred.reshape(2140) pred = np.array([0 if i < 0.5 else 1 for i in pred]) # + colab={"base_uri": "https://localhost:8080/"} id="whnyAK2hhhVV" outputId="916214dd-4836-4eab-894e-3e76b8c8e991" acc = accuracy_score(test_l, pred) print(acc)
fp_hybrid.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Running simulations # # The basic functionality for performing the AWS simulations is implemented by the `aws` package. With this in place, running simulations is a matter of setting up the simulations with the desired parameters and then executing them via IPyParallel. # %matplotlib inline import matplotlib.pyplot as plt import numpy as np import ipyparallel as ipp import os import glob from aws.retrieval import Simulation, Retrieval from aws.data import Profiles from aws.sensor import ATMS, AWS from aws import aws_path import random # ## Connecting to IPyParallel # # Assuming that the IPyParallel controller, hub and engines are running we create a client, which holds the connection to the controller. from aws.retrieval import Retrieval client = ipp.Client(profile="gold") view = client.load_balanced_view() # Since we are already running 4 engines on each of the gold machines, which only have 4 cores, we set the `OMP_NUM_THREADS` environment variable to 1 on all engines to avoid ARTS from launching to many threads. # # Note that this notebook is running on your local machine, not on the gold cluster. To execute a cell on the engines we need to use # the `%%px` cell magic, which executes the cell on the remote engines instead on the local machine. # %%px # %env OMP_NUM_THREADS=1 # ## Setting up the simulation # # The next step is to setup the simulation with the desired configuration. # ## Choosing the sensor # # Currently two sensor configurations are available: # # * `ATMS`: Simplified version of the upper channels of the # ATMS sensor. # * `AWS`: The channel setup to use for the full simulations. def simulate_orbit(filename): import os import numpy as np import os from aws.retrieval import Simulation, Retrieval from aws.data import Profiles from aws.sensor import ATMS, AWS from aws import aws_path # Simulation settings sensor = ATMS() ice_shape = np.random.choice(["Perpendicular3BulletRosette", "LargeColumnAggregate", "LargePlateAggregate"]) ice_shape_name = ice_shape.rjust(30) # Input data data_provider = Profiles(filename) retrieval = Retrieval(data_provider, ice_shape) data_provider.ice_shape = ice_shape_name # Setup the simulation simulation = Simulation(sensor, retrieval, ice_shape) # Output data inputs = [("ice_shape", ("name_length",)),] # List of inputs to store in results name, _ = os.path.splitext(os.path.basename(filename)) output_file = os.path.join(os.path.dirname(filename), "..", "ATMS", name + ".nc") # path = os.path.expanduser("~/aws/data") # output_file = os.path.join(path, name + ".nc") simulation.initialize_output_file(output_file, [("cases", -1, 0)], # Name of dimension for different simulations inputs=inputs) # Loop over profiles in file simulation.setup() append = True if append and "cases" in simulation.output_file.dimensions: n = simulation.output_file.dimensions["cases"] simulation.run_ranges(range(n, data_provider.n_profiles)) else: simulation.run_ranges(range(data_provider.n_profiles)) # ## Execute on view files = glob.glob(os.path.expanduser("~/Dendrite/Projects/AWS-325GHz/CasesV1/*.mat")) random.shuffle(files) # Try for 24 files for now results = view.map(simulate_orbit, files[:48], block=False) results.status # ## Run clearsky simulations def simulate_orbit_clearsky(filename): import os import numpy as np import os from aws.retrieval import Simulation, Retrieval from aws.data import Profiles from aws.sensor import ATMS, AWS from aws import aws_path # Simulation settings sensor = ATMS() ice_shape = np.random.choice(["Perpendicular3BulletRosette", "LargeColumnAggregate", "LargePlateAggregate"]) ice_shape_name = ice_shape.rjust(30) # Input data data_provider = Profiles(filename) retrieval = Retrieval(data_provider, ice_shape) data_provider.ice_shape = ice_shape_name # Setup the simulation simulation = Simulation(sensor, retrieval, ice_shape) # Output data inputs = [("ice_shape", ("name_length",)),] # List of inputs to store in results name, _ = os.path.splitext(os.path.basename(filename)) + "_clearsky" output_file = os.path.join(os.path.dirname(filename), "..", "ATMS", name + ".nc") simulation.initialize_output_file(output_file, [("cases", -1, 0)], # Name of dimension for different simulations inputs=inputs) # Loop over profiles in file simulation.setup() append = True if append: n = simulation.output_file.dimensions["cases"] simulation.run_ranges(range(n, data_provider.n_profiles), clearsky=True) else: simulation.run_ranges(range(data_provider.n_profiles), "wb", clearsky=True) files = glob.glob(os.path.expanduser("~/Dendrite/Projects/AWS-325GHz/CasesV1/*.mat")) print(files) # Try for 24 files for now results = view.map(simulate_orbit_clearsky, files[:24], block=False)
notebooks/simulation/run_simulations_by_orbit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="mz0tl581YjZ0" # ##### Copyright 2020 The TensorFlow Authors. # + cellView="form" id="hi0OrWAIYjZ4" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="gyGdPCvQYjaI" # # TensorFlow 애드온 콜백: TimeStopping # + [markdown] id="Z5csJXPVYjaM" # <table class="tfo-notebook-buttons" align="left"> # <td><a target="_blank" href="https://www.tensorflow.org/addons/tutorials/time_stopping"><img src="https://www.tensorflow.org/images/tf_logo_32px.png">TensorFlow.org에서 보기</a></td> # <td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/addons/tutorials/time_stopping.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png">Google Colab에서 실행하기</a></td> # <td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/addons/tutorials/time_stopping.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub에서 소스 보기</a></td> # <td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/addons/tutorials/time_stopping.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">노트북 다운로드하기</a></td> # </table> # + [markdown] id="BJhody3KYjaP" # ## 개요 # # 이 노트북은 TensorFlow 애드온에서 TimeStopping 콜백을 사용하는 방법을 보여줍니다. # + [markdown] id="SaZsCaGbYjaU" # ## 설정 # + id="VgJGPL3ts_1i" # !pip install -U tensorflow-addons # + id="fm_dHPvEYjar" import tensorflow_addons as tfa from tensorflow.keras.datasets import mnist from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout, Flatten # + [markdown] id="vg0y1DrQYja4" # ## 데이터 가져오기 및 정규화 # + id="HydkzZTuYja8" # the data, split between train and test sets (x_train, y_train), (x_test, y_test) = mnist.load_data() # normalize data x_train, x_test = x_train / 255.0, x_test / 255.0 # + [markdown] id="uX02I1kxYjbL" # ## 간단한 MNIST CNN 모델 빌드하기 # + id="Tlk0MyEfYjbN" # build the model using the Sequential API model = Sequential() model.add(Flatten(input_shape=(28, 28))) model.add(Dense(128, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax')) model.compile(optimizer='adam', loss = 'sparse_categorical_crossentropy', metrics=['accuracy']) # + [markdown] id="b5Xcyt0qYjbX" # ## 간단한 TimeStopping 사용법 # + id="W82_IZ6iYjbZ" # initialize TimeStopping callback time_stopping_callback = tfa.callbacks.TimeStopping(seconds=5, verbose=1) # train the model with tqdm_callback # make sure to set verbose = 0 to disable # the default progress bar. model.fit(x_train, y_train, batch_size=64, epochs=100, callbacks=[time_stopping_callback], validation_data=(x_test, y_test))
site/ko/addons/tutorials/time_stopping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/jair226/daa_2021_1/blob/master/02Diciembre.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="-HQyYn60IaAm" def fnRecInfinita(): print("Hola") fnRecInfinita() # + id="Z4FIShq6IkNz" def fnRec(x): if x==0: print("stop") else : print(x) fnRec(x-1) def main(): print("inicio del programa") fnRec(5) print("Fin del programa") main() # + id="Wmrdi2N_Io1s" def printRev(x): if x>0: printRev(x-1) print(x) printRev(3) # + colab={"base_uri": "https://localhost:8080/"} id="KyI7ClUHI9Mw" outputId="d8908538-2cf5-499c-dcba-095e96be2ccd" def fibonacci( n ): if n == 0 or n == 1: return n else: return( fibonacci(n - 1) + fibonacci (n - 2) ) print (fibonacci(8))
02Diciembre.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import os import torch # + data_path = '../data/wn18rr' def read_triple(file_path, entity2id, relation2id): ''' Read triples and map them into ids. ''' triples = [] with open(file_path) as fin: for line in fin: h, r, t = line.strip().split('\t') triples.append((entity2id[h], relation2id[r], entity2id[t])) return triples with open(os.path.join(data_path, 'entities.dict')) as fin: entity2id = dict() for line in fin: eid, entity = line.strip().split('\t') entity2id[entity] = int(eid) with open(os.path.join(data_path, 'relations.dict')) as fin: relation2id = dict() for line in fin: rid, relation = line.strip().split('\t') relation2id[relation] = int(rid) train_triples = read_triple(os.path.join(data_path, 'train.txt'), entity2id, relation2id) valid_triples = read_triple(os.path.join(data_path, 'valid.txt'), entity2id, relation2id) test_triples = read_triple(os.path.join(data_path, 'test.txt'), entity2id, relation2id) # triples = train_triples + valid_triples + test_triples triples = test_triples triples = torch.LongTensor(triples) # - # # Categorize relations # + num_relations = len(relation2id) one_many, one_one, many_one, many_many = 0., 0., 0., 0. one_many_num, one_one_num, many_one_num, many_many_num = 0., 0., 0., 0. many_thresh = 1.5 relation_dict = {} for i in range(num_relations): relation_mask = (triples[:, 1] == i) if torch.sum(relation_mask) == 0: relation_dict[list(relation2id.keys())[i]] = 'None' continue head = triples[relation_mask, 0].data.tolist() tail = triples[relation_mask, 2].data.tolist() head = set(head) tail = set(tail) pairs = triples[relation_mask, :] pairs_tail = pairs[:, 2].unsqueeze(1).expand(-1, len(tail)) tensor_tail = torch.Tensor(list(tail)).view(1, len(tail)) n_heads = (tensor_tail == pairs_tail).sum(dim=0) avg_head = torch.mean(n_heads.float()) pairs_head = pairs[:, 0].unsqueeze(1).expand(-1, len(head)) tensor_head = torch.Tensor(list(head)).view(1, len(head)) n_tails = (tensor_head == pairs_head).sum(dim=0) avg_tail = torch.mean(n_tails.float()) n = torch.sum(relation_mask).item() if avg_head > many_thresh: if avg_tail > many_thresh: cat = 'M-M' many_many += 1 many_many_num += n else: cat = 'M-1' many_one += 1 many_one_num += n else: if avg_tail > many_thresh: cat = '1-M' one_many += 1 one_many_num += n else: cat = '1-1' one_one += 1 one_one_num += n relation_dict[list(relation2id.keys())[i]] = cat print(i, list(relation2id.keys())[i], cat, n, avg_head.item(), avg_tail.item()) # - relation_dict print(one_many, one_one, many_one, many_many) print(one_many_num, one_one_num, many_one_num, many_many_num) print(one_many_num + one_one_num + many_one_num + many_many_num) print(len(test_triples)) with open(os.path.join(data_path, 'relation_category.txt'), 'w') as f: for key, value in relation_dict.items(): f.write('%s\t%s\n' % (key, value))
misc/.ipynb_checkpoints/categorize_relations-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Guidelines for ETL Project # # This document contains guidelines, requirements, and suggestions for Project 1. # # ## Project Proposal # # Before you start writing any code, remember that you only have one week to complete this project. View this project as a typical assignment from work. Imagine a bunch of data came in and you and your team are tasked with migrating it to a production data base. # # Take advantage of your Instructor and TA support during office hours and class project work time. They are a valuable resource and can help you stay on track. # # ## Finding Data # # Your project must use 2 or more sources of data. We recommend the following sites to use as sources of data: # # * [data.world](https://data.world/) # # * [Kaggle](https://www.kaggle.com/) # # You can also use APIs or data scraped from the web. However, get approval from your instructor first. Again, there is only a week to complete this! # # ## Data Cleanup & Analysis # # Once you have identified your datasets, perform ETL on the data. Make sure to plan and document the following: # # * The sources of data that you will extract from. # # * The type of transformation needed for this data (cleaning, joining, filtering, aggregating, etc). # # * The type of final production database to load the data into (relational or non-relational). # # * The final tables or collections that will be used in the production database. # # You will be required to submit a final technical report with the above information and steps required to reproduce your ETL process. # # ## Project Report # # At the end of the week, your team will submit a Final Report that describes the following: # # * **E**xtract: your original data sources and how the data was formatted (CSV, JSON, MySQL, etc). # # * **T**ransform: what data cleaning or transformation was required. # # * **L**oad: the final database, tables/collections, and why this was chosen. # # Please upload the report to Github and submit a link to Bootcampspot. # # - - - # ### Example of ETL import pandas as pd from sqlalchemy import create_engine import numpy as np # Connect to local database #import pymysql #pymysql.install_as_MySQLdb() from sqlalchemy import create_engine engine = create_engine('postgres://postgres:postgres@localhost:5432/superhero_db') # + # Check for tables # - engine.table_names() conn = engine.connect() # + # Use pandas to load csv converted DataFrame into database # - characters_stats=pd.read_sql_query('select * from characters_stats', con=engine) characters_stats.head() characters_info=pd.read_sql_query('select * from characters_info', con=engine) characters_info.head() combined_data = pd.merge(characters_stats, characters_info, on="Name", how="inner") combined_data.head() # + #Who are the strongest strongest=combined_data.loc[:, ["Name", "Alignment_x", "Total","Publisher"]].sort_values(by="Total",ascending=False) strongest=pd.DataFrame(strongest,columns = ["Name", "Alignment_x", "Total","Publisher"]) renamed_strongest = strongest.rename(columns={"Alignment_x":"Alignment"}) hero=renamed_strongest[renamed_strongest['Alignment']=='good'] hero.head(10) # + villan=renamed_strongest[renamed_strongest['Alignment']=='bad'] villan.head(10) # + stats_df = combined_data.loc[:, ["Intelligence","Strength","Speed","Durability","Power","Combat","Total"]].sort_values(by="Total",ascending=False) stats_df.head(20) # - stats_percentage = stats_df.loc[:, ["Intelligence","Strength","Speed","Durability","Power","Combat", "Total"]].div(stats_df["Total"] / 100, axis=0) stats_percentage.head() characters_power=pd.read_sql_query('select * from characters_power', con=engine) characters_power.head() superheroes_power_matrix_df = characters_power.drop(columns=["Name"]) transpose_df = superheroes_power_matrix_df.apply(pd.Series.value_counts).T.reset_index() transpose_df = transpose_df.rename(columns={'index': 'Name'}) top_transpose_df=transpose_df.loc[:, ["Name", True] ].sort_values(by=True,ascending=False) renamed_top_transpose_df = top_transpose_df.rename(columns={"Name":"Skills","True":"Points"}) renamed_top_transpose_df.head()
SuperHeroAnalysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Py3.7 (mimii_base_TF2_GPU) # language: python # name: mimiibasetf2gpu # --- # + import scipy.signal as sig import librosa import matplotlib.pyplot as plt def PSD_fileplot(file_path, PSD_window='hamming', PSD_nperseg = 128, PSD_nfft=512, PSD_scaling='spectrum', ChannelNr=[0], color='blue'): audio_ch, sr = librosa.load(file_path, sr=None, mono=False) for ch in ChannelNr: f, Pxx = sig.welch(audio_ch[ch],sr, window=PSD_window, nperseg=PSD_nperseg, noverlap=False, nfft=PSD_nfft, scaling=PSD_scaling) plt.plot(f, Pxx, color=color) ax = plt.gca() ax.set_xscale('log') ax.set_yscale('log') PSD_fileplot(r'A:\Dev\NF_Prj_MIMII_Dataset\dataset\6dB\pump\id_00\normal\00000001.wav') # -
utility/workshop/PSD_fileplot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Import the needed libraries import PicSureHpdsLib import pandas import matplotlib # ## Create an instance of the datasource adapter and get a reference to the data resource adapter = PicSureHpdsLib.BypassAdapter("http://pic-sure-hpds-nhanes:8080/PIC-SURE") resource = adapter.useResource() # ## Get a listing of all "demographics" entries in the data dictionary. Show what actions can be done with the "demographic_results" object demographic_entries = resource.dictionary().find("\\demographics\\") demographic_entries.help() # ## Examine the demographic_entries results by converting it into a pandas DataFrame demographic_entries.DataFrame() resource.query().help() resource.query().filter().help() # + query_male = resource.query() query_male.filter().add("\\demographics\\SEX\\", ["male"]) query_female = resource.query() query_female.filter().add("\\demographics\\SEX\\", ["female"]) # + field_age = resource.dictionary().find("\\AGE\\") field_BMI = resource.dictionary().find("\\Body Mass Index") query_male.require().add(field_age.keys()) query_male.require().add(field_BMI.keys()) query_female.require().add(field_age.keys()) query_female.require().add(field_BMI.keys()) query_female.show() # - # ## Convert the query results for females into a DataFrame and plot it by BMI and Age # + df_f = query_female.getResultsDataFrame() plot_f = df_f.plot.scatter(x="\\demographics\\AGE\\", y="\\examination\\body measures\\Body Mass Index (kg per m**2)\\", c="#ffbabb40") # ____ Uncomment if graphs are not displaying ____ #plot_f.plot() #matplotlib.pyplot.show() # - # ## Convert the query results for males into a DataFrame and plot it by BMI and Age # + df_m = query_male.getResultsDataFrame() plot_m = df_m.plot.scatter(x="\\demographics\\AGE\\", y="\\examination\\body measures\\Body Mass Index (kg per m**2)\\", c="#5a7dd040") # ____ Uncomment if graphs are not displaying ____ #plot_m.plot() #matplotlib.pyplot.show() # - # ## Replot the results using a single DataFrame containing both male and female # + d = resource.dictionary() criteria = [] criteria.extend(d.find("\\SEX\\").keys()) criteria.extend(d.find("\\Body Mass Index").keys()) criteria.extend(d.find("\\AGE\\").keys()) query_unified = resource.query() query_unified.require().add(criteria) df_mf = query_unified.getResultsDataFrame() # map a color field for the plot to use sex_colors = {'male':'#5a7dd040', 'female':'#ffbabb40'} df_mf['\\sex_color\\'] = df_mf['\\demographics\\SEX\\'].map(sex_colors) # plot data plot_mf = df_mf.plot.scatter(x="\\demographics\\AGE\\", y="\\examination\\body measures\\Body Mass Index (kg per m**2)\\", c=df_mf['\\sex_color\\']) # ____ Uncomment if graphs are not displaying ____ #plot_mf.plot() #matplotlib.pyplot.show() # - # ## Replot data but trim outliers # + q = df_mf["\\examination\\body measures\\Body Mass Index (kg per m**2)\\"].quantile(0.9999) # create a masked array to remove outliers test = df_mf.mask(df_mf["\\examination\\body measures\\Body Mass Index (kg per m**2)\\"] > q) # plot data plot_mf = test.plot.scatter(x="\\demographics\\AGE\\", y="\\examination\\body measures\\Body Mass Index (kg per m**2)\\", c=df_mf['\\sex_color\\']) # ____ Uncomment if graphs are not displaying ____ #plot_mf.plot() #matplotlib.pyplot.show() # -
jupyter-notebooks/BMI-Age Plot by Gender.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Activity 3.02: Creating a Bar Plot for Movie Comparison. # In this activity, we will use a bar plot to compare movie scores. You are given five movies with scores from Rotten Tomatoes. The Tomatometer is the percentage of approved Tomatometer critics who have given a positive review for the movie. The Audience Score is the percentage of users who have given a score of 3.5 or higher out of 5. Compare these two scores among the five movies. # + # Import statements import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline # - # Use pandas to read the data located in the subfolder data. # Load dataset movie_scores = pd.read_csv('../../Datasets/movie_scores.csv') # Use Matplotlib to create a visually-appealing bar plot comparing the two scores for all five movies. # Use the movie titles as labels for the x-axis. Use percentages in an interval of 20 for the y-axis and minor ticks in interval of 5. Add a legend and a suitable title to the plot. # Create figure plt.figure(figsize=(10, 5), dpi=300) # Create bar plot pos = np.arange(len(movie_scores['MovieTitle'])) width = 0.3 plt.bar(pos - width / 2, movie_scores['Tomatometer'], width, label='Tomatometer') plt.bar(pos + width / 2, movie_scores['AudienceScore'], width, label='Audience Score') # Specify ticks plt.xticks(pos, rotation=10) plt.yticks(np.arange(0, 101, 20)) # Get current Axes for setting tick labels and horizontal grid ax = plt.gca() # Set tick labels ax.set_xticklabels(movie_scores['MovieTitle']) ax.set_yticklabels(['0%', '20%', '40%', '60%', '80%', '100%']) # Add minor ticks for y-axis in the interval of 5 ax.set_yticks(np.arange(0, 100, 5), minor=True) # Add major horizontal grid with solid lines ax.yaxis.grid(which='major') # Add minor horizontal grid with dashed lines ax.yaxis.grid(which='minor', linestyle='--') # Add title plt.title('Movie comparison') # Add legend plt.legend() # Show plot plt.show()
Chapter03/Activity3.02/Activity3.02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="tFJJK7JDL6Q1" # # GenCode Explore # # Explore the human RNA sequences from GenCode. # # Assume user downloaded files from GenCode 38 [FTP](http://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_38/) # to a subdirectory called data. # # Improve on GenCode_Explore_101.ipynb # # Use ORF_counter. # # Use MatPlotLib to make box plots and heat maps. # + colab={"base_uri": "https://localhost:8080/"} id="eDgS-z1tL6Q2" outputId="0f75fd81-e513-4175-9c66-3985ecf75c00" import time def show_time(): t = time.time() s = time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(t)) print(s) show_time() # + colab={"base_uri": "https://localhost:8080/"} id="tckHr4SuL6Q5" outputId="7bb16c7d-5831-44fb-9c33-ab91151e60b1" import numpy as np import pandas as pd import gzip import sys try: from google.colab import drive IN_COLAB = True print("On Google CoLab, mount cloud-local file, get our code from GitHub.") PATH='/content/drive/' #drive.mount(PATH,force_remount=True) # hardly ever need this drive.mount(PATH) # Google will require login credentials DATAPATH=PATH+'My Drive/data/' # must end in "/" import requests s = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_describe.py') with open('RNA_describe.py', 'w') as f: f.write(s.text) # writes to cloud local, delete the file later? s = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/GenCodeTools.py') with open ('GenCodeTools.py', 'w') as f: f.write(s.text) s = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/plot_generator.py') with open('plot_generator.py', 'w') as f: f.write(s.text) s = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_gen.py') with open('RNA_gen.py', 'w') as f: f.write(s.text) from RNA_describe import * from GenCodeTools import * from plot_generator import * from RNA_gen import * except: print("CoLab not working. On my PC, use relative paths.") IN_COLAB = False DATAPATH='../data/' # must end in "/" sys.path.append("..") # append parent dir in order to use sibling dirs from SimTools.RNA_describe import * from SimTools.GenCodeTools import * from SimTools.plot_generator import * from SimTools.RNA_gen import * MODELPATH="BestModel" # saved on cloud instance and lost after logout #MODELPATH=DATAPATH+MODELPATH # saved on Google Drive but requires login if not assert_imported_RNA_describe(): print("ERROR: Cannot use RNA_describe.") # + id="qdPZNYUwL6Q6" PC_FILENAME='gencode.v38.pc_transcripts.fa.gz' NC_FILENAME='gencode.v38.lncRNA_transcripts.fa.gz' # + [markdown] id="-6bcVG0iL6Q9" # ## Load the GenCode data. # Warning: GenCode has # over 100K protein-coding RNA (mRNA) # and almost 50K non-coding RNA (lncRNA). # + colab={"base_uri": "https://localhost:8080/"} id="FwwunxFxL6Q-" outputId="a5cadf1b-ecbd-4bd6-8501-c6c8eee6a54b" # Full GenCode ver 38 human is 106143 pc + 48752 nc and loads in 7 sec. # Expect fewer transcripts if special filtering is used. PC_FULLPATH=DATAPATH+PC_FILENAME NC_FULLPATH=DATAPATH+NC_FILENAME loader=GenCodeLoader() show_time() loader.set_label(1) loader.set_check_list(None) loader.set_check_utr(True) pcdf=loader.load_file(PC_FULLPATH) print("PC seqs loaded:",len(pcdf)) show_time() loader.set_label(0) loader.set_check_list(None) loader.set_check_utr(False) ncdf=loader.load_file(NC_FULLPATH) print("NC seqs loaded:",len(ncdf)) show_time() # + [markdown] id="FvMZvbIKOkI3" # ###OPTIONS # --- # + id="suGzSVVJOmoV" SAMPLE_FRACTION = 0.5 REPRODUCABILITY_SEED = 314159 NUM_BINS = 8 BINNING_SCALE = 100 # + [markdown] id="FoOJgdkwPTS4" # ###Take sample of mRNA and lncRNA sequence data sets # --- # + id="7iW7zf4pL6Q_" pcdf_sample = pcdf.sample(frac=SAMPLE_FRACTION, random_state=REPRODUCABILITY_SEED) #Take a sample of a fraction of the mRNA data frame ncdf_sample = ncdf.sample(frac=SAMPLE_FRACTION, random_state=REPRODUCABILITY_SEED) #Take a sample of a fraction of the lncRNA data frame # + [markdown] id="pKPUQbz23T_f" # ###Generate bins # --- # + id="lr2EGm_-NOJJ" bins = [] for b in range(1, NUM_BINS + 1): bin = (2 ** (b) * BINNING_SCALE, 2 ** (b + 1) * BINNING_SCALE) bins.append(bin) NUM_BINS = len(bins) # + [markdown] id="uLr8gzjJvQCi" # ###Generate simulated random RNA sequences # # --- # + id="3sphjJXGvaXS" random.seed(REPRODUCABILITY_SEED) def generate_sequence(seq_len): """ Generate an RNA sequence of a given length. """ return "".join(random.choices(['A', 'C', 'G', 'T'], k=seq_len)) def generate_sim_sequences(): """ Generate random sequences of the bases A, C, G, and T. TODO: optimize Fastest runtime - 2.5 minutes Fastest runtime (w/ 0.5 sampling) ~ 1 minute """ sim_sequences = [] seq_cnt = (len(pcdf_sample) + len(ncdf_sample)) // 2 // NUM_BINS #Guarantees same number of sequences for each bin for bin in bins: for i in range(seq_cnt): sim_sequences.append(generate_sequence((bin[0] + bin[1]) // 2)) return sim_sequences # + colab={"base_uri": "https://localhost:8080/"} id="tvrGdy4P2tnL" outputId="d0f41f49-9588-4005-efb0-683b25e3a16c" sim_sequences = generate_sim_sequences() show_time() # + [markdown] id="zL192OyG3Ysu" # ###Bin sequences by sequence length # --- # + id="qudbA6e9HtON" def subset_list_by_len_bounds(input_list, min_len, max_len): return list(filter(lambda x: len(x) > min_len and len(x) < max_len, input_list)) # + colab={"base_uri": "https://localhost:8080/"} id="03eoMss5AIop" outputId="0c1e8a9e-143e-4b43-8f9f-ee5ae6eb0774" #Bin the RNA sequences binned_pc_sequences = [] binned_nc_sequences = [] binned_sim_sequences = [] for i in range(0, NUM_BINS): bin = bins[i] binned_pc_sequences.append([]) binned_nc_sequences.append([]) binned_sim_sequences.append([]) binned_pc_sequences[i] = subset_list_by_len_bounds(pcdf_sample['sequence'].tolist(), bin[0], bin[1]) binned_nc_sequences[i] = subset_list_by_len_bounds(ncdf_sample['sequence'].tolist(), bin[0], bin[1]) binned_sim_sequences[i] = subset_list_by_len_bounds(sim_sequences, bin[0], bin[1]) show_time() # + [markdown] id="UJpC4SpAE5cg" # ##Gather data on ORF lengths and the number of contained and non-contained ORFs # # --- # + colab={"base_uri": "https://localhost:8080/"} id="TMMnTdSiL6RD" outputId="24299728-e4eb-4a4a-bdbc-ead94b8ab0e8" #TODO: optimize. combine data? pc_max_len_data = np.empty(NUM_BINS, dtype=object) pc_max_cnt_data = np.empty(NUM_BINS, dtype=object) pc_contain_data = np.empty(NUM_BINS, dtype=object) nc_max_len_data = np.empty(NUM_BINS, dtype=object) nc_max_cnt_data = np.empty(NUM_BINS, dtype=object) nc_contain_data = np.empty(NUM_BINS, dtype=object) sim_max_len_data = np.empty(NUM_BINS, dtype=object) sim_max_cnt_data = np.empty(NUM_BINS, dtype=object) sim_contain_data = np.empty(NUM_BINS, dtype=object) oc = ORF_counter() for bin in range(0, NUM_BINS): pc_max_len_data[bin] = np.zeros(len(binned_pc_sequences[bin])) pc_max_cnt_data[bin] = np.zeros(len(binned_pc_sequences[bin])) pc_contain_data[bin] = np.zeros(len(binned_pc_sequences[bin])) nc_max_len_data[bin] = np.zeros(len(binned_nc_sequences[bin])) nc_max_cnt_data[bin] = np.zeros(len(binned_nc_sequences[bin])) nc_contain_data[bin] = np.zeros(len(binned_nc_sequences[bin])) sim_max_len_data[bin] = np.zeros(len(binned_sim_sequences[bin])) sim_max_cnt_data[bin] = np.zeros(len(binned_sim_sequences[bin])) sim_contain_data[bin] = np.zeros(len(binned_sim_sequences[bin])) #Gather protein-coding sequence data for seq in range(0, len(binned_pc_sequences[bin])): oc.set_sequence(binned_pc_sequences[bin][seq]) pc_max_len_data[bin][seq] = oc.get_max_orf_len() pc_max_cnt_data[bin][seq] = oc.count_maximal_orfs() pc_contain_data[bin][seq] = oc.count_contained_orfs() #Gather non-coding sequence data for seq in range(0, len(binned_nc_sequences[bin])): oc.set_sequence(binned_nc_sequences[bin][seq]) nc_max_len_data[bin][seq] = oc.get_max_orf_len() nc_max_cnt_data[bin][seq] = oc.count_maximal_orfs() nc_contain_data[bin][seq] = oc.count_contained_orfs() #Gather simulated sequence data for seq in range(0, len(binned_sim_sequences[bin])): oc.set_sequence(binned_sim_sequences[bin][seq]) sim_max_len_data[bin][seq] = oc.get_max_orf_len() sim_max_cnt_data[bin][seq] = oc.count_maximal_orfs() sim_contain_data[bin][seq] = oc.count_contained_orfs() show_time() # + [markdown] id="bveYgMe5Hg1B" # ##Prepare data for heatmap # # --- # + id="M6SBWyf6HwIZ" colab={"base_uri": "https://localhost:8080/"} outputId="e7fdd3c5-7e82-40ef-8134-44bb5067d829" #Get the means of all of the data mean_pc_max_len_data = np.zeros(NUM_BINS) mean_pc_max_cnt_data = np.zeros(NUM_BINS) mean_pc_contain_data = np.zeros(NUM_BINS) mean_nc_max_len_data = np.zeros(NUM_BINS) mean_nc_max_cnt_data = np.zeros(NUM_BINS) mean_nc_contain_data = np.zeros(NUM_BINS) mean_sim_max_len_data = np.zeros(NUM_BINS) mean_sim_max_cnt_data = np.zeros(NUM_BINS) mean_sim_contain_data = np.zeros(NUM_BINS) for i in range(0, NUM_BINS): mean_pc_max_len_data[i] = np.mean(pc_max_len_data[i]) mean_pc_max_cnt_data[i] = np.mean(pc_max_cnt_data[i]) mean_pc_contain_data[i] = np.mean(pc_contain_data[i]) mean_nc_max_len_data[i] = np.mean(nc_max_len_data[i]) mean_nc_max_cnt_data[i] = np.mean(nc_max_cnt_data[i]) mean_nc_contain_data[i] = np.mean(nc_contain_data[i]) mean_sim_max_len_data[i] = np.mean(sim_max_len_data[i]) mean_sim_max_cnt_data[i] = np.mean(sim_max_cnt_data[i]) mean_sim_contain_data[i] = np.mean(sim_contain_data[i]) show_time() # + [markdown] id="Yuhfv3tc9T12" # ###Prepare data for plot of bin sizes # # --- # + colab={"base_uri": "https://localhost:8080/"} id="K3xZCQfU8-yh" outputId="59a6c91f-87de-4f67-e9b6-6639b66607d3" pc_bin_sizes = np.zeros(NUM_BINS) nc_bin_sizes = np.zeros(NUM_BINS) sim_bin_sizes = np.zeros(NUM_BINS) for i in range(0, NUM_BINS): pc_bin_sizes[i] = len(binned_pc_sequences[i]) nc_bin_sizes[i] = len(binned_nc_sequences[i]) sim_bin_sizes[i] = len(binned_sim_sequences[i]) show_time() # + [markdown] id="MuXn7WiLJVIk" # ###Prepare data for plot of number of sequences with no ORFs and plot of number of sequences with max ORF lengths equal to or less than 100 # # --- # + id="eTWjyVivLCUJ" """ Count the number of values in a given data set that are within a given inclusive range. """ def count_data_in_range(data, min, max): return np.sum((data >= min) & (data <= max)) # + colab={"base_uri": "https://localhost:8080/"} id="a4huROzBJdLI" outputId="2006a757-703c-4949-8a36-f2bf47d63a5a" pc_no_orf_count = np.zeros(NUM_BINS) nc_no_orf_count = np.zeros(NUM_BINS) sim_no_orf_count = np.zeros(NUM_BINS) pc_max_orf_len_less_than_100 = np.zeros(NUM_BINS) nc_max_orf_len_less_than_100 = np.zeros(NUM_BINS) sim_max_orf_len_less_than_100 = np.zeros(NUM_BINS) for i in range(0, NUM_BINS): pc_no_orf_count[i] = count_data_in_range(pc_max_len_data[i], 0, 0) nc_no_orf_count[i] = count_data_in_range(nc_max_len_data[i], 0, 0) sim_no_orf_count[i] = count_data_in_range(sim_max_len_data[i], 0, 0) pc_max_orf_len_less_than_100[i] = count_data_in_range(pc_max_len_data[i], 0, 100) nc_max_orf_len_less_than_100[i] = count_data_in_range(nc_max_len_data[i], 0, 100) sim_max_orf_len_less_than_100[i] = count_data_in_range(sim_max_len_data[i], 0, 100) show_time() # + [markdown] id="0vtJ42ruTcJG" # ## Plot the data # # --- # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="8GzgZJXqTPz1" outputId="586ab9e5-351c-4ec0-d531-43c5527aa049" #Generate x-axis labels x_axis_labels = [] for bin in bins: x_axis_labels.append(str(bin[0]) + "-" + str(bin[1])) data_set_names = ['mRNA', 'lncRNA', 'sim'] #Set up plot generator pg = PlotGenerator() pg.set_text_options(45, 'right', 0, 'center') #Bar plots pg.set_text('Number of Sequences per Sequence Length Range', 'Sequence Length Ranges', 'Number of Sequences', x_axis_labels, None) pg.bar_plot([pc_bin_sizes, nc_bin_sizes, sim_bin_sizes], data_set_names) pg.set_text('Number of Sequences without ORFs', 'Sequence Length Ranges', 'Number of Sequences', x_axis_labels, None) pg.bar_plot([pc_no_orf_count, nc_no_orf_count, sim_no_orf_count], data_set_names) pg.set_text('Number of Sequences of Max ORF Length Equal to or Less than 100', 'Sequence Length Ranges', 'Number of Sequences', x_axis_labels, None) pg.bar_plot([pc_max_orf_len_less_than_100, nc_max_orf_len_less_than_100, sim_max_orf_len_less_than_100], data_set_names) #Box plots pg.set_axis_options('linear', 10, 'log', 2) pg.set_text('Length of Longest ORF in RNA Sequences', 'Sequence Length Ranges', 'ORF Length', x_axis_labels, None) pg.box_plot([pc_max_len_data, nc_max_len_data, sim_max_len_data], data_set_names, True) pg.set_text('Number of Non-contained ORFs in RNA Sequences', 'Sequence Length Ranges', 'Number of Non-contained ORFs', x_axis_labels, None) pg.box_plot([pc_max_cnt_data, nc_max_cnt_data, sim_max_cnt_data], data_set_names, True) pg.set_text('Number of Contained ORFs in RNA Sequences', 'Sequence Length Ranges', 'Number of Contained ORFs', x_axis_labels, None) pg.box_plot([pc_contain_data, nc_contain_data, sim_contain_data], data_set_names, True) #Heatmaps pg.set_axis_options('linear', 10, 'linear', 10) pg.set_text('mRNA Mean Longest ORF Length', 'Sequence Length Ranges', '', x_axis_labels, ['']) pg.heatmap([mean_pc_max_len_data]) pg.set_text('mRNA Mean Number of Non-contained ORFs', 'Sequence Length Ranges', '', x_axis_labels, ['']) pg.heatmap([mean_pc_max_cnt_data]) pg.set_text('mRNA Mean Number of Contained ORFs', 'Sequence Length Ranges', '', x_axis_labels, ['']) pg.heatmap([mean_pc_contain_data]) pg.set_text('lncRNA Mean Longest ORF Length', 'Sequence Length Ranges', '', x_axis_labels, ['']) pg.heatmap([mean_nc_max_len_data]) pg.set_text('lncRNA Mean Number of Non-contained ORFs', 'Sequence Length Ranges', '', x_axis_labels, ['']) pg.heatmap([mean_nc_max_cnt_data]) pg.set_text('lncRNA Mean Number of Contained ORFs', 'Sequence Length Ranges', '', x_axis_labels, ['']) pg.heatmap([mean_nc_contain_data]) pg.set_text('sim Mean Longest ORF Length', 'Sequence Length Ranges', '', x_axis_labels, ['']) pg.heatmap([mean_sim_max_len_data]) pg.set_text('sim Mean Number of Non-contained ORFs', 'Sequence Length Ranges', '', x_axis_labels, ['']) pg.heatmap([mean_sim_max_cnt_data]) pg.set_text('sim Mean Number of Contained ORFs', 'Sequence Length Ranges', '', x_axis_labels, ['']) pg.heatmap([mean_sim_contain_data]) # + [markdown] id="YWM3UVGjCIT7" # # + [markdown] id="WvQyyhudL6RE" # ## Plotting examples # [boxplot doc](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.boxplot.html) # [boxplot demo](https://matplotlib.org/stable/gallery/pyplots/boxplot_demo_pyplot.html) # [heatmap examples](https://stackoverflow.com/questions/33282368/plotting-a-2d-heatmap-with-matplotlib) - scroll down!
Notebooks/GenCode_Explore_211.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Analysis of Variation Types in Feature Spaces # ## 1. Comparison of Performance of a Traditional Learner in Different Variation Patterns # We try to observe any recurring pattern in terms of performance when variation pattern differs. # + import model import dataloader as dl import numpy as np import trainer import parameters as p import copy import seaborn as sns import matplotlib.pyplot as plt import torch datasets = ['magic04']#['german', 'ionosphere', 'spambase', 'magic04', 'a8a'] model_type = 'hinge_oco'#'jeev_backprop' torch.manual_seed(p.random_state) np.random.seed(p.random_state) # initialize results dict results = {} masks = {} occurrences = {} losses = {} for dataset in datasets: results[dataset] = {} masks[dataset] = {} losses[dataset] = {} occurrences[dataset] = {} for scenario in p.scenarios: results[dataset][scenario] = 0 masks[dataset][scenario] = [] losses[dataset][scenario] = [] for scenario in p.scenarios: for dataset_name in datasets: Xpath, ypath = dl.get_path(dataset_name) X, y = dl.read_dataset(Xpath, ypath) num_features = len(X[0]) m = p.models[model_type](num_features, p.learning_rate) fold_errors, fold_losses, fold_weights, fold_masks, fold_lists = trainer.cross_validation(X, y, m, p.folds, p.scenarios[scenario]) masks[dataset_name][scenario] = fold_masks losses[dataset_name][scenario] = fold_losses results[dataset_name][scenario] = np.mean(fold_errors) print(dataset_name, scenario, model_type, np.mean(fold_errors)) print() # plot results for each dataset for dataset_name in datasets: plt.title(dataset_name) plt.ylabel('Error Rate') plt.bar(results[dataset_name].keys(), results[dataset_name].values()) plt.show() # - for key in masks: for scenario in p.scenarios: sum_masks = copy.deepcopy(masks[key][scenario][0]) for i in range(1, len(masks[key][scenario])): sum_masks += masks[key][scenario][i] avg_sum_masks = sum_masks / len(masks[key][scenario]) avg_sum_masks = np.sum(avg_sum_masks, axis=0) / len(sum_masks) occurrences[key][scenario] = avg_sum_masks plt.title(key + scenario) plt.plot(occurrences[key][scenario]) plt.show() # **We have made two observations so far:** # 1. Performance in non-uniformly distributed variation in feature spaces is worse than uniform. # 2. Checking the average availability frequency of features don't immediately give an explanation about this. # # After this, Jeev suggested to look at the co-occurrences of features. # ## Performance vs. Feature Co-Occurrence Patterns # Let us take the *german* dataset and see how feature co-occurrences look like in different versions of the variation. # To do this, we first merge the masks from different folds of cross-validation we have. # Masks represent the feature availability in a training instance, therefore, useful when we are working on structures of feature spaces. # # + def show_cov_mat(masks, dataset, setting): m = masks[dataset][setting] joint_mask = m[0] for i in range(1, len(m)): joint_mask = np.vstack((joint_mask, m[i])) cov_mat = np.cov(joint_mask.T) sns.heatmap(cov_mat) plt.show() return cov_mat settings = ['full', 'varying_uniform', 'varying_gaussian'] for dataset in datasets: for setting in settings: print(dataset) show_cov_mat(masks, dataset, setting) # - # We see that an important is that varying feature spaces distribution in a uniform fashion have low covariance -> feature co-occurrence. This means feature occurrences don't follow a particular pattern. On the other hand, for gaussian, there exists various amounts of covariance between features, starting to form a pattern. This seems to be making learning harder for some reason. # # **Note:** In both cases, the diagonal of the matrix seems to be 0.25. Diagonals of a covariance matrix are the variances of the components of the vector. # ## Relationship Between the Amount of Covariance in Varying Gaussian and the Traditional Model Performance # In this section, we take the generator of varying_gaussian and modify it in a way that it removes features in different levels of variation. variations = [0, 0.25, 0.5, 0.75] for variation in variations: p.cov_strength = variation print("Cov Strength: %f" % variation) # initialize results dict results = {} masks = {} occurrences = {} for dataset in datasets: results[dataset] = {} masks[dataset] = {} occurrences[dataset] = {} for scenario in p.scenarios: results[dataset][scenario] = 0 masks[dataset][scenario] = [] for scenario in ['varying_gaussian']: for dataset_name in p.datasets: Xpath, ypath = dl.get_path(dataset_name) X, y = dl.read_dataset(Xpath, ypath) num_features = len(X[0]) m = p.models[model_type](num_features, p.learning_rate) fold_errors, fold_losses, fold_weights, fold_masks = trainer.cross_validation(X, y, m, p.folds, p.scenarios[scenario]) masks[dataset_name][scenario] = fold_masks print('Total features received: %f' % np.sum(fold_masks)) print('Avg. features per instance: %f' % (np.sum(fold_masks) / p.folds / len(fold_masks[0]))) results[dataset_name][scenario] = np.mean(fold_errors) print(dataset_name, scenario, model_type, np.mean(fold_errors)) print() print() print() # # Additional Observations # 1. Jeev's backprop version performs better than mine for some reason -> understand why. # 2. Jeev's backprop version's performance degrades if local biases are removed. # 3. Performance won't change too much if the trainable weights in between the layers are removed. # 4. Performance won't change if weights are initialized to 0 instead of Xavier. plt.xlabel('instance (100th)') plt.ylabel('num. features per instance') plt.title('Constant Feature Space') plt.plot(np.sum(masks['magic04']['full'][0], axis=1)[::100]) plt.xlabel('instance (100th)') plt.ylabel('num. features per instance') plt.title('Varying Feature Space (uniform)') plt.plot(np.sum(masks['magic04']['varying_uniform'][0], axis=1)[::100]) plt.xlabel('instance (100th)') plt.ylabel('num. features per instance') plt.title('Varying Feature Space (Gaussian)') plt.plot(np.sum(masks['magic04']['varying_gaussian'][0], axis=1)[::100]) plt.xlabel('instance (100th)') plt.ylabel('num. features per instance') plt.title('Varying Feature Space (Gaussian) + 0.75') plt.plot(np.sum(masks['magic04']['varying_gaussian'][0], axis=1)[::100]) plt.xlabel('instance (100th)') plt.ylabel('num. features per instance') plt.title('Varying Feature Space (Gaussian) + 0.5') plt.plot(np.sum(masks['magic04']['varying_gaussian'][0], axis=1)[::100]) cov_mat = np.cov(masks['magic04']['varying_gaussian'][0].T) sns.heatmap(cov_mat) cov_mat_uni = np.cov(masks['magic04']['varying_uniform'][0].T) sns.heatmap(cov_mat_uni) cov_mat = np.cov(masks['magic04']['varying_gaussian'][0].T) sns.heatmap(cov_mat)
OnlinePredictiveCoding/.ipynb_checkpoints/variation_analysis-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: admercs # language: python # name: admercs # --- # # Generate Synth Dataset 01 # # Generate a dataset useful for the synth project. # # Preliminaries # %load_ext lab_black # ## Imports # + import numpy as np import pandas as pd import altair as alt from pathlib import Path # - from nba_anomaly_generator.data import load_lal from nba_anomaly_generator.anom import ( insert_dependency_anomaly, insert_contextual_anomaly, insert_swap_anomaly, insert_transformation_anomaly, ft_to_m, lb_to_kg, ) from nba_anomaly_generator.anom.utils import init_rng, init_row_idx # ## Constants # # Naming conventions etc. N_JOBS = 4 VERBOSE = 51 # + NBA_DATA_DIR = Path().resolve().parent.parent / "data" PLYR_DIR = NBA_DATA_DIR / "players" TEAM_DIR = NBA_DATA_DIR / "rosters" CLEAN_DIR = NBA_DATA_DIR / "clean" FP1 = CLEAN_DIR / "nba-synth-01-season-data.csv" FP2 = CLEAN_DIR / "nba-synth-01-aggregate-sal.csv" FP1 # + NORMALIZE = False CONTAMINATION = 5 DROP_NA = True # Row filter N_YEARS = 5 MAX_AGE = 40 BEGIN_SEASON = ( 2020 - N_YEARS ) # First season that we want in the eventual data (season filter) # Column Filter NUMERIC_COLUMNS_ONLY = False # + POI_LOOKUP = {"LeBron", "Carmelo", "Kawhi", "Harden"} SALARIES = dict( LeBron=37.44 * 10 ** 6, Harden=38.2 * 10 ** 6, Carmelo=2.159 * 10 ** 6, Kawhi=32.37 * 10 ** 6, ) # - # ### Attribute Types # # Also a kind of constant, but data-specific for obvious reasons... # + NOMINAL_ATTRIBUTES = [ "PLAYER_ID", "PLAYER_NAME", "TEAM_ABBREVIATION", ] NUMERIC_ATTRIBUTES = [ "SEASON", "PTS", "FGM", "FGA", "FG3M", "FG3A", "FTM", "FTA", "OREB", "DREB", "REB", "AST", "STL", "BLK", ] # - AGGREGATE_ATTRIBUTES = [ "PLAYER_ID", "PLAYER_NAME", "PTS", "REB", "AST", "STL", "BLK", ] # # Functions # + from sklearn.preprocessing import LabelEncoder, StandardScaler, MinMaxScaler def normalize(df, scaler=MinMaxScaler): columns_to_scale = [c for c in df.columns if "label" not in c] for c in columns_to_scale: df[c] = scaler().fit_transform(df[c].values.reshape(-1, 1)) return df def dataframe_to_dataset(df, copy=True): if copy: df = df.copy() relevant_cols = [c for c in df.columns if c not in {"a_lbl", "i", "cluster_label"}] return df[relevant_cols].values # - # # Create DataSet # # Create the standard NBA-dataset. # ## Collect # + dfs = [] for idx, fn in enumerate(PLYR_DIR.glob("*.csv")): df = pd.read_csv(fn, index_col=0) dfs.append(df) df = pd.concat(dfs) df.reset_index(drop=True, inplace=True) df.head() # - # ## Filter Player of Interest # + from nba_api.stats.static import players, teams player_dict = players.get_players() poi = [p for p in player_dict if any([l in p["full_name"] for l in POI_LOOKUP])] for p in poi: for l in POI_LOOKUP: if l in p["full_name"]: p["salary"] = SALARIES[l] poi_ids = {p["id"]: p["full_name"] for p in poi} poi_sal = {p["id"]: np.round(p["salary"], decimals=0) for p in poi} # - poi_sal poi_ids df = df[df["PLAYER_ID"].isin(poi_ids)] df["PLAYER_NAME"] = df.apply(lambda r: poi_ids[r.PLAYER_ID], axis=1) df.head() # ## Types and `None` # # Let us take a look at which attributes are in the dataset. df.columns.tolist() # Now let us see which one of these attributes are accounted for in the beginning of this notebook. set(df.columns) - set(NOMINAL_ATTRIBUTES) - set(NUMERIC_ATTRIBUTES) # Please verify that the attributes that are unaccounted for a truly not of interest, since they will not be included in the final dataset. if DROP_NA: print("Number of rows BEFORE dropping NA: {}".format(df.shape[0])) df = df.dropna() print("Number of rows AFTER dropping NA: {}".format(df.shape[0])) for attribute in NOMINAL_ATTRIBUTES + NUMERIC_ATTRIBUTES: if attribute in df.columns: if attribute in NOMINAL_ATTRIBUTES: df[attribute] = df[attribute].astype("category") if attribute in NUMERIC_ATTRIBUTES: df[attribute] = df[attribute].astype(float) df.dtypes, df.shape # ## Column Generation # ### Season Column def _season_id_to_season(season_id): return float(season_id.split("-")[0]) df["SEASON"] = df.apply(lambda r: _season_id_to_season(r.SEASON_ID), axis=1) df.head() # ## Row Filters # ### Season Filter # # Convenient subsampling to only get relatively recent data. Otherwise the dataset becomes huge, and also the very old data shows some every strange patterns. # + df = df[df.SEASON > BEGIN_SEASON] df # - # ## Column Filters # ### Desired Columns set(df.columns) - set(NOMINAL_ATTRIBUTES) - set(NUMERIC_ATTRIBUTES) df = df[NOMINAL_ATTRIBUTES + NUMERIC_ATTRIBUTES] df # ### Numeric only # # For now, this is a limitation of `ADMERCS` and also most other methods. if NUMERIC_COLUMNS_ONLY: print("Dropping all nominal columns") df = df[NUMERIC_ATTRIBUTES] pd.set_option("display.max_columns", 22) df.head(15) # ## Normalize # # Standard datascience practice. if NORMALIZE: print("normalizing...") df = normalize(df) print("normalizing done...") df.shape df.head(10) # ## Reindex # # This needs to be done before applying any algorithmic filters. df = df.reset_index(drop=True) df.head() # # Split In Two Tables # # We need an aggregated table as well. # df1 = df df1 # + df2 = df1.groupby(by=["PLAYER_ID"])[NUMERIC_ATTRIBUTES].mean() df2 = df2.drop(columns="SEASON", errors="ignore").reset_index() df2["PLAYER_NAME"] = df2.apply(lambda r: poi_ids[r.PLAYER_ID], axis=1) df2 = df2[AGGREGATE_ATTRIBUTES] df2["SALARY"] = df2.apply(lambda r: poi_sal[r.PLAYER_ID], axis=1) df2 # - # # Save # # Retain what you have created df1.to_csv(FP1, index=False) FP1.exists() df2.to_csv(FP2, index=False) FP2.exists() pd.read_csv(FP1).head() pd.read_csv(FP2).head()
note/datasets/generate-synth-01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (herschelhelp_internal) # language: python # name: helpint # --- # # AKARI-SEP: Validation Checks (FULL) # + # %matplotlib inline # #%config InlineBackend.figure_format = 'svg' import matplotlib.pyplot as plt plt.rc('figure', figsize=(10, 6)) plt.style.use('ggplot') import numpy as np from astropy.table import Table import itertools import time # + t0 = time.time() catname = "../../dmu1/dmu1_ml_AKARI-SEP/data/master_catalogue_akari-sep_20180221.fits" master_catalogue = Table.read(catname) print('Elapsed time(secs): ', time.time() - t0) print("Number of sources in master catalogue: ", len(master_catalogue)) # - field = master_catalogue["field"][0] field = field.rstrip() # remove whitespaces at the end of the sting print(field) # + language="javascript" # IPython.OutputArea.auto_scroll_threshold = 9999; # + u_bands = [] g_bands = ["DECam g"] r_bands = ["DECam r"] i_bands = ["DECam i"] z_bands = ["DECam z"] y_bands = ["DECam y"] J_bands = [ "VISTA J"] H_bands = [ "VISTA H"] K_bands = [ "VISTA K"] all_bands = [] irac_mags = ["IRAC i1", "IRAC i2"] other_mags = [] opt_mags = g_bands + r_bands + i_bands + z_bands + y_bands ir_mags = J_bands + H_bands + K_bands + irac_mags all_mags = opt_mags + ir_mags + other_mags # - # ## 1/ Magnitude errors def mag_vs_err(x, y, fig, ax, labels=("x", "y"), savefig=False): x_label, y_label = labels print(x_label) # Use only finite values mask = np.isfinite(x) & np.isfinite(y) & (x!=99.) & (y!=99.) x = np.copy(x[mask]) y = np.copy(y[mask]) if len(x) > 0: print(" Error max: {:.0f}".format(np.max(y))) err10 = y > 10 if len(x[err10]) > 0: print(" magerr > 10: Number of objects = {:d}, min mag = {:.1f}".format(len(x[err10]), np.min(x[err10]))) else: print(" magerr > 10: Number of objects = {:d}, min mag = {:.1f}".format(len(x[err10]), np.nan)) err100 = y > 100 if len(x[err100]) > 0: print(" magerr > 100: Number of objects = {:d}, min mag = {:.1f}".format(len(x[err100]), np.min(x[err100]))) else: print(" magerr > 100: Number of objects = {:d}, min mag = {:.1f}".format(len(x[err100]), np.nan)) err1000 = y > 1000 if len(x[err1000]) > 0: print(" magerr > 1000: Number of objects = {:d}, min mag = {:.1f}".format(len(x[err1000]), np.min(x[err1000]))) else: print(" magerr > 1000: Number of objects = {:d}, min mag = {:.1f}".format(len(x[err1000]), np.nan)) else: print(" no data") print("") # Plot ax.set_yscale('log') # to place before scatter to avoid issues ax.scatter(x, y, marker='.', alpha=0.1, s=50) ax.invert_xaxis() #ax.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) #ax.get_xaxis().get_major_formatter().labelOnlyBase = False ax.set_xlabel(labels[0]) ax.set_ylabel(labels[1]) # Save ex. fig if savefig: survey_label = ((x_label.replace(" ", "_")).replace("(", "")).replace(")", "") figname = field + "_magVSmagerr_" + survey_label + ".png" plt.savefig("/data/help/plots/" + figname, bbox_inches='tight') #plt.show() for mag in all_mags: fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(16, 6)) basecol = mag.replace(" ", "_").lower() if basecol == "decam_g": savefig = True else: savefig=False col, ecol = "m_ap_{}".format(basecol), "merr_ap_{}".format(basecol) mag_vs_err(master_catalogue[col], master_catalogue[ecol], fig, ax1, labels=("{} mag (aperture)".format(mag), "{} magerr (aperture)".format(mag)), savefig=False) col, ecol = "m_{}".format(basecol), "merr_{}".format(basecol) mag_vs_err(master_catalogue[col], master_catalogue[ecol], fig, ax2, labels=("{} mag (total)".format(mag), "{} magerr (total)".format(mag)), savefig=savefig) display(fig) plt.close() # ## 2/ Magnitude and error issues def flag_mag(mask, x1, y1, x2, y2, mask2=None, x3=None, y3=None, mask3=None, labels1=("x", "y"), labels2=("x", "y"), labels3=("x", "y"), nb=2, savefig=False): if nb == 2 or (nb == 1 and x3 is None): fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(16, 6)) else: fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(16, 6)) # mag vs magerr ax1.set_yscale('log') # to place before scatter to avoid issues ax1.scatter(x1, y1, marker='.', alpha=0.1, s=50) ax1.plot(x1[mask], y1[mask], 'b.') if mask2 is not None and nb >= 2: ax1.plot(x1[mask2], y1[mask2], 'g.') ax1.invert_xaxis() ax1.set_xlabel(labels1[0]) ax1.set_ylabel(labels1[1]) if nb == 1: # IRAC mag ax2.set_yscale('log') # to place before scatter to avoid issues ax2.scatter(x2, y2, marker='.', alpha=0.1, s=50) ax2.plot(x2[mask2], y2[mask2], 'b.') ax2.invert_xaxis() ax2.set_xlabel(labels2[0]) ax2.set_ylabel(labels2[1]) if nb == 1 and x3 is not None: # IRAC mag with i3 ax3.set_yscale('log') # to place before scatter to avoid issues ax3.scatter(x3, y3, marker='.', alpha=0.1, s=50) ax3.plot(x3[mask3], y2[mask3], 'b.') ax3.invert_xaxis() ax3.set_xlabel(labels3[0]) ax3.set_ylabel(labels3[1]) # Comparing magnitudes if nb >= 2: ax2.scatter(x2, y2, marker='.', alpha=0.1, s=50) ax2.plot(x2[mask], y2[mask], 'b.') if mask2 is not None: ax2.plot(x2[mask2], y2[mask2], 'g.') ax2.invert_xaxis() ax2.invert_yaxis() ax2.set_xlabel(labels2[0]) ax2.set_ylabel(labels2[1]) if nb >= 3: ax3.scatter(x3, y3, marker='.', alpha=0.1, s=50) ax3.plot(x3[mask], y3[mask], 'b.') if mask2 is not None: ax3.plot(x3[mask2], y3[mask2], 'g.') ax3.invert_xaxis() ax3.invert_yaxis() ax3.set_xlabel(labels3[0]) ax3.set_ylabel(labels3[1]) # Save ex. fig if savefig: survey_label = ((labels1[0].replace(" ", "_")).replace("(", "")).replace(")", "") if "GPC1 " in labels1[0]: figname = field + "_gpc1Issues_" + survey_label + ".png" elif "DECam" in labels1[0]: figname = field + "_decamIssues_" + survey_label + ".png" elif "IRAC" in labels1[0]: figname = field + "_iracIssues_i1_i2.png" plt.savefig("/data/help/plots/" + figname, bbox_inches='tight') display(fig) plt.close() # ### 2.c IRAC Aperture magnitude # + irac_mag = 3.9000000001085695 bands = ['IRAC i1', 'IRAC i2'] basecol1, basecol2 = bands[0].replace(" ", "_").lower(), bands[1].replace(" ", "_").lower() col1, col2 = "m_ap_{}".format(basecol1), "m_ap_{}".format(basecol2) ecol1, ecol2 = "merr_ap_{}".format(basecol1), "merr_ap_{}".format(basecol2) mask1 = np.where(master_catalogue[col1] == irac_mag)[0] print ('IRAC i1: Number of flagged objects:', len(master_catalogue[col1][mask1])) mask2 = np.where(master_catalogue[col2] == irac_mag)[0] print ('IRAC i2: Number of flagged objects:', len(master_catalogue[col2][mask2])) flag_mag(mask1, master_catalogue[col1], master_catalogue[ecol1], master_catalogue[col2], master_catalogue[ecol2], mask2=mask2, labels1=("{} mag (aperture)".format(bands[0]), "{} magerr (aperture)".format(bands[0])), labels2=("{} mag (aperture)".format(bands[1]), "{} magerr (aperture)".format(bands[1])), nb=1, savefig=True) # - # ## 3/ Outliers # $$chi^2 = \frac{(mag_{1}-mag_{2})^2}{magerr_{1}^2 + magerr_{2}^2}$$ # <br> # __Interquartile range (IQR) and outliers:__<br> # We consider as outliers objects which have a high $chi^2$, about $5\sigma$ away from the mean.<br> # $25th, 75th \;percentile = 0.6745\sigma$<br> # $IQR = (75th \;percentile - 25th \;percentile) = 0.6745\sigma * 2 = 1.349\sigma$<br> # $75th \;percentile + 3.2\times IQR = 0.6745\sigma + 3.2\times1.349\sigma = 5\sigma$ # <br><br> # $$outliers == [chi^2 > (75th \;percentile + 3.2\times (75th \;percentile - 25th \;percentile))]$$ # <br><br> # __NB:__<br> # Bright sources tend to have their errors underestimated with values as low as $10^{-6}$, which is unrealistic. So to avoid high $chi^2$ due to unrealistic small errors, we clip the error to get a minimum value of 0.1% (i.e. all errors smaller then $10^{-3}$ are set to $10^{-3}$). def outliers(x, y, xerr, yerr, labels=["x", "y"], savefig=False): import matplotlib import matplotlib.gridspec as gridspec from astropy import visualization as vz fig = plt.figure(figsize=(13, 6)) gs1 = gridspec.GridSpec(1, 1) gs1.update(left=0.05, right=0.4, wspace=0.05) ax1 = plt.subplot(gs1[:, :-1]) gs2 = gridspec.GridSpec(1, 3) gs2.update(left=0.47, right=0.98, hspace=0.05, wspace=0.05) ax2 = plt.subplot(gs2[:, :-1]) ax3 = plt.subplot(gs2[:, -1], sharey=ax2) # Use only finite values mask = np.isfinite(x) & np.isfinite(y) & np.isfinite(xerr) & np.isfinite(yerr) & (xerr !=99.) & (yerr !=99.) x = np.copy(x[mask]) y = np.copy(y[mask]) xerr = np.copy(xerr[mask]) yerr = np.copy(yerr[mask]) # mag1 - mag2 diff = y - x x_label, y_label = labels # If the difference is all NaN there is nothing to compare. if np.isnan(diff).all(): print("No sources have both {} and {} values.".format( x_label, y_label)) print("") return # Set the minimum error to 10^-3 np.clip(xerr, 1e-3, np.max(xerr), out=xerr) np.clip(yerr, 1e-3, np.max(yerr), out=yerr) # Median, Median absolute deviation and 1% and 99% percentiles diff_median = np.median(diff) diff_mad = np.median(np.abs(diff - diff_median)) diff_1p, diff_99p = np.percentile(diff, [1., 99.]) diff_25p, diff_75p = np.percentile(diff, [25., 75.]) diff_label = "{} - {}".format(y_label, x_label) print("{} ({} sources):".format(diff_label, len(x))) print("- Median: {:.2f}".format(diff_median)) print("- Median Absolute Deviation: {:.2f}".format(diff_mad)) print("- 1% percentile: {}".format(diff_1p)) print("- 99% percentile: {}".format(diff_99p)) # Chi2 (Normalized difference) ichi2 = np.power(diff, 2) / (np.power(xerr, 2) + np.power(yerr, 2)) # Use only non-null values of ichi2 mask2 = ichi2 != 0.0 diff, ichi2 = np.copy(diff[mask2]), np.copy(ichi2[mask2]) x, y, xerr, yerr = np.copy(x[mask2]), np.copy(y[mask2]), np.copy(xerr[mask2]), np.copy(yerr[mask2]) # Outliers (5sigma) log_ichi2_25p, log_ichi2_75p = np.percentile(np.log10(ichi2), [25., 75.]) out_lim = log_ichi2_75p + 3.2*abs(log_ichi2_25p-log_ichi2_75p) outliers = np.log10(ichi2) > out_lim nb_outliers = len(x[outliers]) print("Outliers separation: log(chi2) = {:.2f}".format(out_lim)) print("Number of outliers: {}".format(nb_outliers)) print("") # Comparing mag ax1.scatter(x, y, marker='.', alpha=0.1, s=50) ax1.scatter(x[outliers], y[outliers], marker='.', c='b', alpha=0.3, s=50, label='Outliers ({})'.format(nb_outliers)) min_val = np.min(np.r_[x, y]) max_val = np.max(np.r_[x, y]) ax1.autoscale(False) ax1.plot([min_val, max_val], [min_val, max_val], "k:") ax1.invert_xaxis() ax1.invert_yaxis() ax1.set_xlabel(x_label) ax1.set_ylabel(y_label) ax1.legend(loc='lower right', numpoints=1) # Chi2 vs Diff #ax1.set_yscale('log') # to place before scatter to avoid issues ax2.scatter(diff, np.log10(ichi2), marker='.', alpha=0.1, s=50) if nb_outliers != 0: ax2.scatter(diff[outliers], np.log10(ichi2[outliers]), marker='.', alpha=0.3, s=50, color='b',\ label='Outliers ({})'.format(nb_outliers)) ax2.axhline(out_lim, color='grey', linestyle=':') ax2.set_xlabel(diff_label) ax2.set_ylabel('log(chi2)') ax2.legend(loc='lower right', numpoints=1) # Hist n, bins, patches = vz.hist(np.log10(ichi2), ax=ax3, bins='knuth', facecolor='red', lw = 2, alpha=0.5,\ orientation="horizontal") if nb_outliers > 3: n, bins, patches = vz.hist(np.log10(ichi2[outliers]), ax=ax3, bins='knuth', facecolor='b', lw = 2, alpha=0.7,\ orientation="horizontal") ax3.axhline(out_lim, color='grey', linestyle=':') ax3.yaxis.set_tick_params(labelleft=False) # Save ex. fig if savefig: survey_label = ((diff_label.replace(" ", "_")).replace("(", "")).replace(")", "") figname = field + "_outliers_" + survey_label + ".png" plt.savefig("/data/help/plots/" + figname, bbox_inches='tight') display(fig) plt.close() for band_of_a_kind in all_bands: for band1, band2 in itertools.combinations(band_of_a_kind, 2): basecol1, basecol2 = band1.replace(" ", "_").lower(), band2.replace(" ", "_").lower() if basecol1 == "decam_z" and basecol2 == "vista_z": savefig = True else: savefig = False # Aperture mag col1, col2 = "m_ap_{}".format(basecol1), "m_ap_{}".format(basecol2) ecol1, ecol2 = "merr_ap_{}".format(basecol1), "merr_ap_{}".format(basecol2) outliers(master_catalogue[col1], master_catalogue[col2], master_catalogue[ecol1], master_catalogue[ecol2], labels=("{} (aperture)".format(band1), "{} (aperture)".format(band2))) # Tot mag col1, col2 = "m_{}".format(basecol1), "m_{}".format(basecol2) ecol1, ecol2 = "merr_{}".format(basecol1), "merr_{}".format(basecol2) outliers(master_catalogue[col1], master_catalogue[col2], master_catalogue[ecol1], master_catalogue[ecol2], labels=("{} (total)".format(band1), "{} (total)".format(band2)), savefig=savefig) # ## 4. Aperture correction issues def apcor_check(x, y, stellarity, labels=["x", "y"], savefig=False): import matplotlib.gridspec as gridspec from astropy import visualization as vz #fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(16, 6)) # fig = plt.figure(figsize=(13, 6)) gs1 = gridspec.GridSpec(1, 1) gs1.update(left=0.05, right=0.4, wspace=0.05) ax1 = plt.subplot(gs1[:, :-1]) gs2 = gridspec.GridSpec(1, 3) gs2.update(left=0.47, right=0.98, hspace=0.05, wspace=0.05) ax2 = plt.subplot(gs2[:, :-1]) ax3 = plt.subplot(gs2[:, -1], sharey=ax2) # Use only finite values mask = np.isfinite(x) & np.isfinite(y) & np.isfinite(stellarity) x = np.copy(x[mask]) y = np.copy(y[mask]) stellarity = np.copy(stellarity[mask]) diff = y - x x_label, y_label = labels # If the difference is all NaN there is nothing to compare. if np.isnan(diff).all(): print("No sources have both {} and {} values.".format( x_label, y_label)) print("") return diff_label = "{} - {}".format(y_label, x_label) print("{}:".format(diff_label)) # Subsample zoom = (x > 16) & (x < 20) # Comparing mag ax1.scatter(x, diff, marker='.', alpha=0.1, s=50) ax1.invert_xaxis() ax1.set_ylabel(diff_label) ax1.set_xlabel(x_label) # Zoom Plot y_min, y_max = np.percentile(diff[zoom], [1., 99.]) y_delta = .1 * (y_max - y_min) y_min -= y_delta y_max += y_delta if len(x[zoom]) < 1000: alpha = 0.4 else: alpha = 0.1 print(len(x[zoom])) pl = ax2.scatter(x[zoom], diff[zoom], marker='.', alpha=alpha, s=50, c=stellarity[zoom], cmap="jet") ax2.invert_xaxis() ax2.set_ylabel(diff_label) ax2.set_xlabel(x_label) ax2.set_ylim([y_min, y_max]) fig.colorbar(pl, label="stellarity (1=star)") #ax2.legend(loc='lower right', numpoints=1) # Hist n, bins, patches = vz.hist(diff[zoom], ax=ax3, bins='knuth', facecolor='black', lw = 2, alpha=0.5,\ orientation="horizontal") ax3.yaxis.set_tick_params(labelleft=False) # Save ex. fig if savefig: survey_label = ((diff_label.replace(" ", "_")).replace("(", "")).replace(")", "") figname = field + "_apcorrIssues_" + survey_label + ".png" plt.savefig("/data/help/plots/" + figname, bbox_inches='tight') display(fig) plt.close() for band_of_a_kind in all_bands: for band1, band2 in itertools.combinations(band_of_a_kind, 2): basecol1, basecol2 = band1.replace(" ", "_").lower(), band2.replace(" ", "_").lower() if basecol1 == "decam_y" and basecol2 == "vista_y": savefig = True else: savefig = False # Aperture mag col1, col2 = "m_ap_{}".format(basecol1), "m_ap_{}".format(basecol2) apcor_check(master_catalogue[col1], master_catalogue[col2], master_catalogue['stellarity'], labels=("{} (aperture)".format(band1), "{} (aperture)".format(band2)), savefig=savefig)
dmu6/dmu6_v_AKARI-SEP/help_akari-sep_checks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Theory # # ## Forward-Propagation # # $$ # X \rightarrow Z=WX+b_1 \rightarrow H=\sigma(Z) \rightarrow U = CH + b_2\rightarrow S=F_{softmax}(U) \rightarrow \rho(S, y) = \log S_y, # $$ # where $S_y=\frac{\exp(U_y)}{\sum_{j=0}^{K-1}\exp(U_j)}$ is the y-th element of the $S$ and $U_y$ is the y-th element of the $U$. # ## Backward-Propagation # # $$ # \frac{\partial \rho}{\partial U_t} = # \begin{cases} # S_t(U), & t\neq y \\ # 1- S_t(U). & t = y # \end{cases} # \Longrightarrow # \frac{\partial \rho}{\partial U} = e_y - S(U), # $$ # # where $e_y$ is the unit vector, which y-th coordinate equals to 1 and 0 elsewhere. # # \begin{align*} # & \frac{\partial \rho}{\partial b_2} = \frac{\partial \rho}{\partial U}\frac{\partial U}{\partial b_2} = e_y - S(U) \\ # & \frac{\partial \rho}{\partial C} = \frac{\partial \rho}{\partial U}\frac{\partial U}{\partial C} = (e_y - S(U))H^T \\ # & \frac{\partial \rho}{\partial H} = \frac{\partial \rho}{\partial U}\frac{\partial U}{\partial H} = C^T\frac{\partial \rho}{\partial U} =C^T(e_y - S(U)) \\ # & \frac{\partial \rho}{\partial b_1} = \frac{\partial \rho}{\partial H}\frac{\partial H}{\partial Z}\frac{\partial Z}{\partial b_1} = \frac{\partial \rho}{\partial H} \odot \sigma'(Z)\\ # & \frac{\partial \rho}{\partial W} = \big(\frac{\partial \rho}{\partial H} \odot \sigma'(Z)\big)X^T # \end{align*} # ## Algorithm # Mini-Batch Stochastic gradient algorithm for updating $\theta = \{W, b_1, C, b_2\}$: # * Step1: Specify batch_size $M$, activation function $\sigma(z)$, and initialize $W^{(0)}, b_1^{(0)}, C^{(0)}, b_2^{(0)}$; # * Step2: At iteration $t$: # * a. Select $M$ data samples $\{X^{(t,m)},y^{(t,m)}\}_{m=1}^M$ uniform at random from the full dataset $\{X^{(n)},y^{(n)}\}_{n=1}^N$ # * b. Compute forward-propagation: # * $Z^{(t,m)}=W^{(t)}X^{(t,m)}+b_1^{(t)}$ # * $H^{(t,m)}=\sigma(Z^{(t,m)})$ # * $U^{(t,m)} = C^{(t)}H^{(t,m)} + b_2^{(t)}$ # * $S^{(t,m)}=F_{softmax}(U^{(t,m)})$ # * c. Compute backward-propagation: # * $\frac{\partial \rho}{\partial b_2} = \frac{1}{M}\sum_{m=1}^M e_{y^{(t,m)}} - S^{(t,m)}$ # * $\frac{\partial \rho}{\partial C} = \frac{1}{M}\sum_{m=1}^M (e_{y^{(t,m)}} - S^{(t,m)}){H^{(t,m)}}^T$ # * $\frac{\partial \rho}{\partial H} = \frac{1}{M}\sum_{m=1}^M C^T(e_{y^{(t,m)}} - S^{(t,m)})$ # * $\frac{\partial \rho}{\partial b_1} = \frac{1}{M}\sum_{m=1}^M \frac{\partial \rho}{\partial H} \odot \sigma'(Z^{(t,m)})$ # * $\frac{\partial \rho}{\partial W} = \frac{1}{M}\sum_{m=1}^M \big(\frac{\partial \rho}{\partial H} \odot \sigma'(Z^{(t,m)})\big){X^{(t,m)}}^T$ # * Given learning rate $\eta_t$, update parameters as follows: # * $b_2^{(t+1)}) \leftarrow b_2^{(t)}) + \eta_t \frac{\partial \rho}{\partial b_2}$ # * $C^{(t+1)}) \leftarrow C^{(t)}) + \eta_t \frac{\partial \rho}{\partial C}$ # * $b_1^{(t+1)}) \leftarrow b_1^{(t)}) + \eta_t \frac{\partial \rho}{\partial b_1}$ # * $W^{(t+1)}) \leftarrow W^{(t)}) + \eta_t \frac{\partial \rho}{\partial W}$ # * Step3: Repeat Step2 until some convergence criteria is met. # To avoid unnecessary `for-loop` we can vectoruize the above algorithm. # # * Step1: Specify batch_size $M$, activation function $\sigma(z)$, and initialize $W^{(0)}, b_1^{(0)}, C^{(0)}, b_2^{(0)}$; # * Step2: At iteration $t$: # * a. Select $M$ data samples $\{X^{(t,m)},y^{(t,m)}\}_{m=1}^M$ uniform at random from the full dataset $\{X^{(n)},y^{(n)}\}_{n=1}^N$ # * b. Compute forward-propagation: # * $Z^{(t)}=W^{(t)}X^{(t)}+b_1^{(t)}$, where $X^{(t)} = (X^{(t,1)},...,X^{(t,M)})$ and the summation on $b_1$ will be column-wise. # * $H^{(t)}=\sigma(Z^{(t)})$, where $H^{(t)} = (H^{(t,1)},...,H^{(t,M)})$ and $\sigma(.)$ is element wise operation. # * $U^{(t)} = C^{(t)}H^{(t)} + b_2^{(t)}$ # * $S^{(t)}=F_{softmax}(U^{(t)})$, where the $F_{softmax}$ is column-wise operation. # * c. Compute backward-propagation: # * $\frac{\partial \rho}{\partial b_2} = \text{np.mean}(e_{y^{(t)}} - S^{(t)}, \text{axis=1})$ # * $\frac{\partial \rho}{\partial C} = \frac{1}{M} (e_{y^{(t)}} - S^{(t)}){H^{(t)}}^T$ # * $\frac{\partial \rho}{\partial H} = \text{np.mean}(C^T(e_{y^{(t)}} - S^{(t)}), \text{axis=1})$ # * $\frac{\partial \rho}{\partial b_1} = \text{np.mean}(\frac{\partial \rho}{\partial H} \odot \sigma'(Z^{(t)}), \text{axis=1})$ # * $\frac{\partial \rho}{\partial W} = \frac{1}{M}(\big(\frac{\partial \rho}{\partial H} \odot \sigma'(Z^{(t)})\big){X^{(t)}}^T)$ # * Given learning rate $\eta_t$, update parameters as follows: # * $b_2^{(t+1)}) \leftarrow b_2^{(t)}) + \eta_t \frac{\partial \rho}{\partial b_2}$ # * $C^{(t+1)}) \leftarrow C^{(t)}) + \eta_t \frac{\partial \rho}{\partial C}$ # * $b_1^{(t+1)}) \leftarrow b_1^{(t)}) + \eta_t \frac{\partial \rho}{\partial b_1}$ # * $W^{(t+1)}) \leftarrow W^{(t)} + \eta_t \frac{\partial \rho}{\partial W}$ # * Step3: Repeat Step2 until some convergence criteria is met. # # Numerical Experiment import numpy as np import h5py import time import copy #import logging #from helperfunctions import create_log #logger = create_log(file_name="task.log", log_level=logging.DEBUG) file_name = "../data/MNISTdata.hdf5" #logger.info("Load the MNIST dataset...") data = h5py.File(file_name, "r") x_train = np.float32(data["x_train"][:]) y_train = np.int32(np.hstack(np.array(data["y_train"]))) x_test = np.float32(data["x_test"][:]) y_test = np.int32(np.hstack(np.array(data["y_test"]))) data.close() #logger.info("Finished!") class MnistModel(): def __init__(self, x_train, y_train, x_test, y_test, hidden_units=100, learning_rate=0.01, batch_size=20, num_epochs=5, seed=None): self.x_train = x_train self.x_test = x_test self.y_train = y_train self.y_test = y_test self.num_inputs = self.x_train.shape[1] self.num_outputs = 10 self.hidden_units = hidden_units self.learning_rate = learning_rate self.batch_size = batch_size self.num_epochs = num_epochs self.params = {} self.gradients = {} if seed is not None: r = np.random.RandomState(seed) self.params["W"] = r.randn(self.hidden_units, self.num_inputs) / np.sqrt(self.num_inputs) self.params["b1"] = np.zeros((self.hidden_units, 1)) self.params["C"] = r.randn(self.num_outputs, self.hidden_units) / np.sqrt(self.num_inputs) self.params["b2"] = np.zeros((self.num_outputs, 1)) else: self.params["W"] = np.random.randn(self.hidden_units, self.num_inputs) / np.sqrt(self.num_inputs) self.params["b1"] = np.zeros((self.hidden_units, 1)) self.params["C"] = np.random.randn(self.num_outputs, self.hidden_units) / np.sqrt(self.num_inputs) self.params["b2"] = np.zeros((self.num_outputs, 1)) print("training sample size: [{}]\ntest sample size:[{}]\nhidden units number: [{}]\nbatch_size:[{}]".format(self.x_train.shape, self.x_test.shape, self.hidden_units, self.batch_size)) def activation(self, z): """ z: must be of size (hidden_units * 1) """ return [*map(lambda x: x if x > 0 else 0, z)] def activation_gradient(self, z): """ z: must be of size (hidden_units * 1) """ return [*map(lambda x: 1 if x > 0 else 0, z)] def softmax(self, U): temp = np.exp(U) return temp / np.sum(temp) def forward_propagation(self): random_index = np.random.choice(self.x_train.shape[0], replace=False, size=self.batch_size) self.x_train_sub_samples = self.x_train[random_index].reshape((-1, self.batch_size)) self.y_train_sub_samples = self.y_train[random_index] self.forward_results = {} self.forward_results["Z"] = np.dot(self.params["W"], self.x_train_sub_samples) + self.params["b1"] self.forward_results["H"] = np.apply_along_axis(self.activation, 0, self.forward_results["Z"]) self.forward_results["U"] = np.dot(self.params["C"], self.forward_results["H"]) + self.params["b2"] self.forward_results["S"] = np.apply_along_axis(self.softmax, 0, self.forward_results["U"]) def create_unit_matrix(self): ey = np.zeros((self.num_outputs, self.batch_size)) for col_index, row_index in enumerate(self.y_train_sub_samples): ey[row_index, col_index] = 1 return(ey) def back_propagation(self): ey = self.create_unit_matrix() temp = - (ey - self.forward_results["S"]) self.gradients["db2"] = np.mean(temp, axis=1, keepdims=True) self.gradients["dC"] = np.dot(temp, self.forward_results["H"].T) / self.batch_size self.gradients["dH"] = np.mean(np.dot(self.params["C"].T, temp), axis=1, keepdims=True) H_gradient = np.apply_along_axis(self.activation_gradient, 0, self.forward_results["Z"]) temp2 = np.multiply(self.gradients["dH"], H_gradient) self.gradients["db1"] = np.mean(temp2, axis=1, keepdims=True) self.gradients["dW"] = np.dot(temp2, self.x_train_sub_samples.T) / self.batch_size def train(self): for epoch in range(self.num_epochs): if (epoch > 5): self.learning_rate = 0.001 if (epoch > 10): self.learning_rate = 0.0001 if (epoch > 15): self.learning_rate = 0.00001 total_correct = 0 for i in range(int(self.x_train.shape[0] / self.batch_size)): self.forward_propagation() prediction_train = np.argmax(self.forward_results["S"], axis=0) total_correct += np.sum(prediction_train == self.y_train_sub_samples) self.back_propagation() self.params["W"] -= self.learning_rate * self.gradients["dW"] self.params["b1"] -= self.learning_rate * self.gradients["db1"] self.params["C"] -= self.learning_rate * self.gradients["dC"] self.params["b2"] -= self.learning_rate * self.gradients["db2"] print("epoch:{} | Training Accuracy:[{}]".format(epoch+1, total_correct/len(self.x_train))) def test(self): self.Z = np.dot(self.params["W"], self.x_test.T) + self.params["b1"] self.H = np.apply_along_axis(self.activation, 0, self.Z) self.U = np.dot(self.params["C"], self.H) + self.params["b2"] self.S = np.apply_along_axis(self.softmax, 0, self.U) self.prediction = np.apply_along_axis(np.argmax, 0, self.S) correct_ratio = np.mean(self.prediction == self.y_test) return correct_ratio # ## Batch_Size = 1 nn = MnistModel(x_train, y_train, x_test, y_test, hidden_units=100, batch_size=1, learning_rate=0.01, num_epochs=5, seed=1234) start = time.time() nn.train() end = time.time() print("Running Time: [{}] second".format(end - start)) print("Test Accuracy: [{}]".format(nn.test())) # ## Bug happens when `batch_size>1` class MnistModel(): def __init__(self, x_train, y_train, x_test, y_test, hidden_units=100, learning_rate=0.01, batch_size=20, num_epochs=5, seed=None): self.x_train = x_train self.x_test = x_test self.y_train = y_train self.y_test = y_test self.num_inputs = self.x_train.shape[1] self.num_outputs = 10 self.hidden_units = hidden_units self.learning_rate = learning_rate self.batch_size = batch_size self.num_epochs = num_epochs self.params = {} self.gradients = {} if seed is not None: r = np.random.RandomState(seed) self.params["W"] = r.randn(self.hidden_units, self.num_inputs) / np.sqrt(self.num_inputs) self.params["b1"] = np.zeros((self.hidden_units, 1)) self.params["C"] = r.randn(self.num_outputs, self.hidden_units) / np.sqrt(self.num_inputs) self.params["b2"] = np.zeros((self.num_outputs, 1)) else: self.params["W"] = np.random.randn(self.hidden_units, self.num_inputs) / np.sqrt(self.num_inputs) self.params["b1"] = np.zeros((self.hidden_units, 1)) self.params["C"] = np.random.randn(self.num_outputs, self.hidden_units) / np.sqrt(self.num_inputs) self.params["b2"] = np.zeros((self.num_outputs, 1)) print("training sample size: [{}]\ntest sample size:[{}]\nhidden units number: [{}]\nbatch_size:[{}]".format(self.x_train.shape, self.x_test.shape, self.hidden_units, self.batch_size)) def activation(self, z): """ z: must be of size (hidden_units * 1) """ return [*map(lambda x: x if x > 0 else 0, z)] def activation_gradient(self, z): """ z: must be of size (hidden_units * 1) """ return [*map(lambda x: 1 if x > 0 else 0, z)] def softmax(self, U): temp = np.exp(U) return temp / np.sum(temp) def forward_propagation(self): random_index = np.random.choice(self.x_train.shape[0], replace=False, size=self.batch_size) self.x_train_sub_samples = self.x_train[random_index].reshape((-1, self.batch_size)) self.y_train_sub_samples = self.y_train[random_index] self.forward_results = {} self.forward_results["Z"] = np.dot(self.params["W"], self.x_train_sub_samples) + self.params["b1"] self.forward_results["H"] = np.apply_along_axis(self.activation, 0, self.forward_results["Z"]) self.forward_results["U"] = np.dot(self.params["C"], self.forward_results["H"]) + self.params["b2"] self.forward_results["S"] = np.apply_along_axis(self.softmax, 0, self.forward_results["U"]) def create_unit_matrix(self): ey = np.zeros((self.num_outputs, self.batch_size)) for col_index, row_index in enumerate(self.y_train_sub_samples): ey[row_index, col_index] = 1 return(ey) def back_propagation(self): ey = self.create_unit_matrix() temp = - (ey - self.forward_results["S"]) self.gradients["db2"] = np.mean(temp, axis=1, keepdims=True) self.gradients["dC"] = np.dot(temp, self.forward_results["H"].T) / self.batch_size temp2 = np.dot(self.params["C"].T, temp) self.gradients["dH"] = np.mean(temp2, axis=1, keepdims=True) H_gradient = np.apply_along_axis(self.activation_gradient, 0, self.forward_results["Z"]) temp3 = np.multiply(temp2, H_gradient) self.gradients["db1"] = np.mean(temp3, axis=1, keepdims=True) #self.gradients["dW"] = np.dot(temp3, self.x_train_sub_samples.T) / self.batch_size self.gradients["dW"] = 0 for i in range(int(temp3.shape[1])): #print(temp3[i].reshape(-1,1).shape, self.x_train_sub_samples.shape) self.gradients["dW"] += np.dot(temp3[i].reshape(-1,1), self.x_train_sub_samples[:,i].reshape(1, self.num_inputs)) self.gradients["dW"] /= self.batch_size def train(self): for epoch in range(self.num_epochs): if (epoch > 5): self.learning_rate = 0.001 if (epoch > 10): self.learning_rate = 0.0001 if (epoch > 15): self.learning_rate = 0.00001 total_correct = 0 for i in range(int(self.x_train.shape[0] / self.batch_size)): self.forward_propagation() prediction_train = np.argmax(self.forward_results["S"], axis=0) total_correct += np.sum(prediction_train == self.y_train_sub_samples) self.back_propagation() #print(self.params["W"].shape, self.gradients["dW"].shape) self.params["W"] -= self.learning_rate * self.gradients["dW"] self.params["b1"] -= self.learning_rate * self.gradients["db1"] self.params["C"] -= self.learning_rate * self.gradients["dC"] self.params["b2"] -= self.learning_rate * self.gradients["db2"] print("epoch:{} | Training Accuracy:[{}]".format(epoch+1, total_correct/len(self.x_train))) def test(self): self.Z = np.dot(self.params["W"], self.x_test.T) + self.params["b1"] self.H = np.apply_along_axis(self.activation, 0, self.Z) self.U = np.dot(self.params["C"], self.H) + self.params["b2"] self.S = np.apply_along_axis(self.softmax, 0, self.U) self.prediction = np.apply_along_axis(np.argmax, 0, self.S) correct_ratio = np.mean(self.prediction == self.y_test) return correct_ratio nn = MnistModel(x_train, y_train, x_test, y_test, hidden_units=100, batch_size=100, learning_rate=0.01, num_epochs=20, seed=1234) nn.train() a = np.array([[1,2],[3,4]]) b = np.array([[1],[2]]) np.multiply(a,b)
Python_IE534/hw1/problem-hw1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import math import numpy as np import pandas as pd import seaborn as sns from matplotlib import pyplot from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder from sklearn.metrics import classification_report import tensorflow as tf from tensorflow.keras import optimizers from tensorflow.keras.datasets import mnist from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Flatten, Dense, Conv2D, MaxPooling2D from tensorflow.keras.layers import Dropout, BatchNormalization, LeakyReLU, Activation from tensorflow.keras.callbacks import Callback, EarlyStopping, ReduceLROnPlateau from tensorflow.keras.preprocessing.image import ImageDataGenerator from keras.utils import np_utils import matplotlib.pyplot as plt import os # - df = pd.read_csv('C:/Users/LENOVO/Desktop/fer2013.csv') print(df.shape) df.head() math.sqrt(len(df.pixels[0].split(' '))) INTERESTED_LABELS = [3, 4, 6] df = df[df.emotion.isin(INTERESTED_LABELS)] df.shape img_array = df.pixels.apply(lambda x: np.array(x.split(' ')).reshape(48, 48, 1).astype('float32')) img_array = np.stack(img_array, axis=0) img_array.shape le = LabelEncoder() img_labels = le.fit_transform(df.emotion) img_labels = np_utils.to_categorical(img_labels) img_labels.shape le_name_mapping = dict(zip(le.classes_, le.transform(le.classes_))) print(le_name_mapping) X_train, X_valid, y_train, y_valid = train_test_split(img_array, img_labels,shuffle=True, stratify=img_labels,test_size=0.1, random_state=42) X_train.shape, X_valid.shape, y_train.shape, y_valid.shape img_width = X_train.shape[1] img_height = X_train.shape[2] img_depth = X_train.shape[3] num_classes = y_train.shape[1] # Normalizing results, as neural networks are very sensitive to unnormalized data. X_train = X_train / 255. X_valid = X_valid / 255. new_model = tf.keras.models.load_model('model.h5') new_model.summary() loss, acc = new_model.evaluate(X_valid, y_valid, verbose=1) print('Restored model, accuracy: {:5.2f}%'.format(100 * acc)) loss, acc = new_model.evaluate(X_train, y_train, verbose=1) print('Restored model, accuracy: {:5.2f}%'.format(100 * acc)) # + mapper = { 0: "happy", 1: "sad", 2: "neutral", } objects = ('happy', 'sad', 'neutral') y_pos = np.arange(len(objects)) print(y_pos) # + import matplotlib.pyplot as plt def emotion_analysis(emotions): objects = ['happy', 'sad', 'neutral'] y_pos = np.arange(len(objects)) plt.bar(y_pos, emotions, align='center', alpha=0.9) plt.tick_params(axis='x', which='both', pad=10,width=4,length=10) plt.xticks(y_pos, objects) plt.ylabel('percentage') plt.title('emotion') plt.show() # + from skimage import io from keras.preprocessing import image from keras.preprocessing.image import ImageDataGenerator img = image.load_img('./test1.jpg', color_mode = "grayscale", target_size=(48, 48)) show_img=image.load_img('./test1.jpg', target_size=(200, 200)) x = image.img_to_array(img) x = np.expand_dims(x, axis = 0) x /= 255 custom = new_model.predict(x) #print(custom[0]) emotion_analysis(custom[0]) x = np.array(x, 'float32') x = x.reshape([48, 48]); plt.gray() plt.imshow(show_img) plt.show() m=0.000000000000000000001 a=custom[0] for i in range(0,len(a)): if a[i]>m: m=a[i] ind=i print('Expression Prediction:',objects[ind]) # + from skimage import io from keras.preprocessing import image from keras.preprocessing.image import ImageDataGenerator img = image.load_img('./test2.jpg', color_mode = "grayscale", target_size=(48, 48)) show_img=image.load_img('./test2.jpg', target_size=(200, 200)) x = image.img_to_array(img) x = np.expand_dims(x, axis = 0) x /= 255 custom = new_model.predict(x) #print(custom[0]) emotion_analysis(custom[0]) x = np.array(x, 'float32') x = x.reshape([48, 48]); plt.gray() plt.imshow(show_img) plt.show() m=0.000000000000000000001 a=custom[0] for i in range(0,len(a)): if a[i]>m: m=a[i] ind=i print('Expression Prediction:',objects[ind]) # -
recognize_emotion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Lambda Notebook (Python 3) # language: python # name: lambda-notebook # --- # # Definite article tutorial # ### Authors: <NAME>, <NAME> # # This notebook goes through what is involved in adding a new variable-binding operator to the metalanguage and instantiating it in a lexical entry, by working through iota. # + #reload_lamb() # can be uncommented for debugging purposes (this will reload any changes made to the source files) # - # This notebook walks through the process of adding a new operator, iota, to the lambda notebook. # # The notebook has two main parts: the metalanguage, and the object language. To add iota, we need to modify the meta-language, and then using it in the object language is straightforward. # # Every expression in the metalanguage is represented by an object that inherits from the class _TypedExpr_. Operators that bind a single variable and have a nuclear scope inherit from the subclass _BindingOp_, for example _ExistsUnary_ and _ForallUnary_. To add Iota, we also subclass from _BindingOp_. This basically involves specifying type constraints for the variable, the body, and the entire expression. The default is that the body type and overal type are the same, but this is wrong for iota. For iota, the body type is type t, and the variable / output type are equal. A stricter version of this might enforce that both are type e. # # To add this to the parser, I also added a hook in the function _TypedExpr.try_parse_op_expr_ in meta.py. (In the future this should happen more automatically.) # + # note: IotaUnary has been added to meta.py, parallel code here for exemplification. class DemoIotaUnary(meta.BindingOp): canonical_name = "Iota" # set the token for parsing this operator op_name_uni = "ι" op_name_latex = "\\iota{}" secondary_names = {"ι"} def __init__(self, var_or_vtype, body, varname=None, assignment=None, type_check=True): super().__init__(var_or_vtype=var_or_vtype, typ=None, varname=varname, body=body, body_type=types.type_t, assignment=assignment, type_check=type_check) self.type = self.vartype # output type is the bound variable's type def copy(self): return DemoIotaUnary(self.vartype, self.body, varname=self.varname) def copy_local(self, var, arg, type_check=True): return DemoIotaUnary(var, arg, type_check=type_check) meta.BindingOp.add_op(DemoIotaUnary) # add to the registry of binding operators so that it will be parsed. This will trigger a warning as it overrides the existing IotaUnary class. # - # The following examples demonstrate instantiating this class through python class constructions. (Recall that _lang.te_ calls the metalanguage parser.) test = DemoIotaUnary("x_e", lang.te("P(x)")) test test2 = meta.LFun("f_<e,t>", DemoIotaUnary("x_e", lang.te("f_<e,t>(x_e)"))) test2 # The following cells demonstrate instatiating iota via the metalanguage parser, and test out combining it with a property. the = lang.te("L f_<e,t> : Iota x_e : f(x)") the # %%lamb catexpr = L x_e : Cat(x) # build a property 'catexpr' dp = the(catexpr) dp dp.reduce() dp.reduce_all() dp.reduce_all().derivation # It's worth checking that the type inference is working correctly. What happens when something not of type t is supplied as the body? Is the output type right? # # The following code catches and displays a TypeMismatch if any. (To see the full stack trace, you can remove the `try...except` part.) result = None try: lang.te("Iota x_e : x") except types.TypeMismatch as e: result = e result lang.te("Iota x_e : P(x)").type lang.te("Iota x_e : P(x)").__class__ # another way of constructing this: #test3 = lang.te("P_<e,t>")(lang.te("Iota x_e : Q(x)")) test3 = lang.te("P_<e,t>(Iota x_e : Q(x))") test3 test3.type # Now let's use this in a lexical item. With the metalanguage modifications in place, this is straightforward. # %%lamb ||the|| = L f_<e,t> : Iota x_e : f(x) ||cat|| = L x_e : Cat(x) r = the * cat r.reduce_all() r r.tree(derivations=True) # What is missing here? The biggest thing is presuppositions, but interpretation of iota relative to a model would also be helpful. See the version in lamb.meta for one take on presuppositions.
notebooks/tutorials/definite article tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.8 64-bit (''base'': conda)' # name: python388jvsc74a57bd098b0a9b7b4eaaa670588a142fd0a9b87eaafe866f1db4228be72b4211d12040f # --- # # Use Kitti Dataset # # ### In this notebook a sample of the Kitti-Dataset will be downloaded, converted to a rosbag-file and the velodyne data will converted to a pcs-dataset. # ### Some usage examples are provided, too. import pointcloudset as pcs from pathlib import Path # The following terminal commands will download and unzip the kitti sample. # We didn't want to include the bag file in the repo as it is too large and to avoid any license issues. Also KITTI does not provide bag files directly. In order to get the data you need to run the following cell. If you are using pointcloudset in a docker container you need to run the commands outside of the docker to avoid nested docker commands. This is the reason for the commented docker command. # # The docker container (https://hub.docker.com/r/yoyo860224/kitti2bag) is used to convert the kitta data into a rosbag-file. To run the docker under windows use "docker run -v "absolutepathtofolderwheredatais":/data -it yoyo860224/kitti2bag -t 2011_09_26 -r 0002 raw_synced". # # # + tags=["outputPrepend"] # !wget https://s3.eu-central-1.amazonaws.com/avg-kitti/raw_data/2011_09_26_drive_0002/2011_09_26_drive_0002_sync.zip # !wget https://s3.eu-central-1.amazonaws.com/avg-kitti/raw_data/2011_09_26_calib.zip # !unzip 2011_09_26_drive_0002_sync.zip # !unzip 2011_09_26_calib.zip # #!docker run -v `pwd`:/data -it yoyo860224/kitti2bag -t 2011_09_26 -r 0002 raw_synced # - kittibag = Path().cwd().joinpath("kitti_2011_09_26_drive_0002_synced.bag") testpath = Path().cwd().parent.joinpath("tests/testdata/kitti_velodyne") # Convert bagfile to pcs-dataset: kittiset = pcs.Dataset.from_file(kittibag,topic="/kitti/velo/pointcloud",keep_zeros=False) kittiset[0:3].to_file(testpath) # Do some analysis: def isolate_target(frame: pcs.PointCloud) -> pcs.PointCloud: return frame.limit("i",0.9,1) intense_points = isolate_target(kittiset[0]) intense_points.plot(color = "i")
doc/sphinx/source/tutorial_notebooks/Working_with_kitti_dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from iobjectspy import (overlay, open_datasource) import os import sys # 设置示例数据路径 example_data_dir = '' # 设置结果输出路径 out_dir = os.path.join(example_data_dir, 'out') if not os.path.exists(out_dir): os.makedirs(out_dir) def progress_func(step_event): sys.stdout.write('%s,%s %d %%\n' % (step_event.title, step_event.message, step_event.percent)) def overlay_test(): ds = open_datasource(os.path.join(example_data_dir, 'example_data.udb')) result = overlay(ds['Landuse_R'], ds['Geomor_R'], 'INTERSECT', 'LandType,Area', out_data=os.path.join(out_dir, 'out_overlay_analyst.udb'), progress=progress_func) ds.close() if result is not None: if isinstance(result, str): print('叠加分析成功,结果数据集为 ' + result) else: print('叠加分析成功,结果数据集为 ' + result.name) else: print('叠加分析失败') if __name__ == '__main__': # 叠加分析 overlay_test() # - exit()
gis/iobjectspy/overlay_analyst.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Fun with FFT and sound files # Based on: https://realpython.com/python-scipy-fft/ # Define a function for generating pure sine wave tones # + import numpy as np import matplotlib.pyplot as plt SAMPLE_RATE = 44100 # Hertz DURATION = 5 # Seconds def generate_sine_wave(freq, sample_rate, duration): x = np.linspace(0, duration, sample_rate * duration, endpoint=False) frequencies = x * freq # 2pi because np.sin takes radians y = np.sin(2 * np.pi * frequencies) return x, y # Generate a 2 hertz sine wave that lasts for 5 seconds x, y = generate_sine_wave(2, SAMPLE_RATE, DURATION) plt.plot(x, y) plt.show() # - # Produce two tones, e.g. 400 Hz signal and a 4 kHz high-pitch noise # + _, nice_tone = generate_sine_wave(400, SAMPLE_RATE, DURATION) _, noise_tone = generate_sine_wave(4000, SAMPLE_RATE, DURATION) noise_tone = noise_tone * 0.3 mixed_tone = nice_tone + noise_tone #mixed_tone = noise_tone # - # For the purposes of storing the tones in an audio file, the amplitude needs to be normalized to the range of 16-bit integer # + normalized_tone = np.int16((mixed_tone / mixed_tone.max()) * 32767) plt.plot(normalized_tone[:1000]) plt.show() # - # Store the sound for playback # + from scipy.io import wavfile as wf # Remember SAMPLE_RATE = 44100 Hz is our playback rate wf.write("mysinewave.wav", SAMPLE_RATE, normalized_tone) # - # Can also try to record the sound (NB: won't work on datahub !) # + # import required libraries # %pip install sounddevice import sounddevice as sd print("Recording...") # Start recorder with the given values # of duration and sample frequency recording = sd.rec(int(DURATION * SAMPLE_RATE), samplerate=SAMPLE_RATE, channels=1) # Record audio for the given number of seconds sd.wait() print("Done") # This will convert the NumPy array to an audio # file with the given sampling frequency wf.write("recording0.wav", SAMPLE_RATE, recording) # - # ### Fourier transforms # # Now try to transform the time stream into frequency space using FFT # + from scipy.fft import fft, fftfreq # Number of samples in normalized_tone N = SAMPLE_RATE * DURATION yf = fft(normalized_tone) xf = fftfreq(N, 1 / SAMPLE_RATE) print('Type of the output array: ',type(yf[0])) print('Size of the input array: ',N) print('Size of the Fourier transform: ',len(xf)) df = xf[1]-xf[0] print(f'Width of the frequency bins: {df} Hz') plt.plot(xf, np.abs(yf)) plt.xlabel('Frequency (Hz)') plt.ylabel('FFT magnitude (a.u.)') plt.show() plt.figure() plt.yscale('log') plt.plot(xf, np.abs(yf)) plt.xlabel('Frequency (Hz)') plt.ylabel('FFT magnitude (a.u.)') plt.xlim(350,4050) plt.show() # - # You notice that fft returns data for both positive and negative frequencies, produces the output array of the same size as input, and the output is a set of *complex* numbers. However, the information is reduntant: only half of the output values are unique. The magnitudes of the Fourier coefficients at negative frequencies are the same as at the corresponding positive frequencies. This is the property of the *real* Fourier transform, i.e. the transform applied to real-value signals. More precisely, $\mathrm{fft}(f)=\mathrm{fft}^*(-f)$ print(xf[1],xf[-1]) print(yf[1],yf[-1]) # We can use this fact to save computational time and storage by computing only half of the Fourier coefficients: # + from scipy.fft import rfft, rfftfreq # Note the extra 'r' at the front yf = rfft(normalized_tone) xf = rfftfreq(N, 1 / SAMPLE_RATE) print('Type of the output array: ',type(yf[0])) print('Size of the input array: ',N) print('Size of the Fourier transform: ',len(xf)) df = xf[1]-xf[0] print(f'Width of the frequency bins: {df} Hz') plt.plot(xf, np.abs(yf)) plt.xlim(1,5000) plt.xlabel('Frequency (Hz)') plt.ylabel('FFT magnitude (a.u.)') plt.show() # - # Now let's look at the Fourier transorm of a recorded sound: # + rate, data = wf.read("recording0.wav") N=len(data) print(rate, N) time=np.arange(0, N)/rate plt.plot(time, data) plt.xlabel('time (sec)') plt.ylabel('Sound a.u.)') plt.show() yf = rfft(data) xf = rfftfreq(len(data), 1 / rate) print('Type of the output array: ',type(yf[0])) print('Size of the input array: ',len(data)) print('Size of the Fourier transform: ',len(xf)) df = xf[1]-xf[0] print(f'Width of the frequency bins: {df} Hz') mag = np.abs(yf) plt.figure() plt.loglog(xf, mag) plt.xlabel('Frequency (Hz)') plt.ylabel('FFT magnitude (a.u.)') plt.show() plt.figure() plt.plot(xf, mag) plt.yscale('log') plt.xlim(100,2000) plt.xlabel('Frequency (Hz)') plt.ylabel('FFT magnitude (a.u.)') plt.show() fMax = xf[np.argmax(mag)] print(f'Frequency of loudest tone: {fMax}') # - # See for example a chart of fundamental frequencies of notes <a href="https://pages.mtu.edu/~suits/notefreqs.html">here</a>
Week14/L12/Lecture12a_FFT.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import spacy from spacy.lang.en import English import nltk nltk.download('wordnet') from nltk.corpus import wordnet as wn from nltk.stem.wordnet import WordNetLemmatizer from gensim import corpora import pickle import gensim import pandas as pd parser = English() def tokenize(text): lda_tokens = [] tokens = parser(text) for token in tokens: if token.orth_.isspace(): continue elif token.like_url: lda_tokens.append('URL') elif token.orth_.startswith('@'): lda_tokens.append('SCREEN_NAME') else: lda_tokens.append(token.lower_) return lda_tokens def get_lemma(word): lemma = wn.morphy(word) if lemma is None: return word else: return lemma nltk.download('stopwords') en_stop = set(nltk.corpus.stopwords.words('english')) def prepare_text_for_lda(text): tokens = tokenize(text) tokens = [token for token in tokens if len(token) > 4] #discard short words tokens = [token for token in tokens if token not in en_stop] #remove if stop word tokens = [get_lemma(token) for token in tokens] #lemmatize each word return tokens dataset = open('nytimes_news_articles.txt').readlines() ds = [] for line in dataset: if line[:4] == 'URL:': ''.join(newstring) ds.append(newstring) newstring = [] newstring.append(line) links = [i[0] for i in ds] ds = [''.join(i[2:]).replace('\n',' ') for i in ds] text_data = [] for article in ds: tokens = prepare_text_for_lda(article) text_data.append(tokens) # creates dictionary generator dictionary = corpora.Dictionary(text_data) # creates a list of lists of tuples, with index for each word in bag of words corpus = [dictionary.doc2bow(text) for text in text_data] #creates a pickle file and dictionary file to save progress pickle.dump(corpus, open('corpus.pkl', 'wb')) dictionary.save('dictionary.gensim') # + #LDA Model instantiation , corpus is the list of tuples, dictionary maps the words to indices NUM_TOPICS = 5 # arbitrary ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics = NUM_TOPICS, id2word=dictionary, passes=15) ldamodel.save('model5.gensim') # + #LDA Model instantiation , corpus is the list of tuples, dictionary maps the words to indices NUM_TOPICS = 25 # arbitrary ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics = NUM_TOPICS, id2word=dictionary, passes=15) ldamodel.save('model25.gensim') # -
notebooks/lda_train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] tags=["remove_cell"] # # Representing Qubit States # - # You now know something about bits, and about how our familiar digital computers work. All the complex variables, objects and data structures used in modern software are basically all just big piles of bits. Those of us who work on quantum computing call these *classical variables.* The computers that use them, like the one you are using to read this article, we call *classical computers*. # # In quantum computers, our basic variable is the _qubit:_ a quantum variant of the bit. These have exactly the same restrictions as normal bits do: they can store only a single binary piece of information, and can only ever give us an output of `0` or `1`. However, they can also be manipulated in ways that can only be described by quantum mechanics. This gives us new gates to play with, allowing us to find new ways to design algorithms. # # To fully understand these new gates, we first need to understand how to write down qubit states. For this we will use the mathematics of vectors, matrices, and complex numbers. Though we will introduce these concepts as we go, it would be best if you are comfortable with them already. If you need a more in-depth explanation or a refresher, you can find the guide [here](../ch-prerequisites/linear_algebra.html). # # # # # ## Contents # # 1. [Classical vs Quantum Bits](#cvsq) # 1.1 [Statevectors](#statevectors) # 1.2 [Qubit Notation](#notation) # 1.3 [Exploring Qubits with Qiskit](#exploring-qubits) # 2. [The Rules of Measurement](#rules-measurement) # 2.1 [A Very Important Rule](#important-rule) # 2.2 [The Implications of this Rule](#implications) # 3. [The Bloch Sphere](#bloch-sphere) # 3.1 [Describing the Restricted Qubit State](#bloch-sphere-1) # 3.2 [Visually Representing a Qubit State](#bloch-sphere-2) # # ## 1. Classical vs Quantum Bits <a id="cvsq"></a> # # ### 1.1 Statevectors<a id="statevectors"></a> # # In quantum physics we use _statevectors_ to describe the state of our system. Say we wanted to describe the position of a car along a track, this is a classical system so we could use a number $x$: # # ![tracking a car with scalars](images/car_track_1.jpg) # # $$ x=4 $$ # # Alternatively, we could instead use a collection of numbers in a vector called a _statevector._ Each element in the statevector contains the probability of finding the car in a certain place: # # ![tracking a car with vectors](images/car_track_2.jpg) # # $$ # |x\rangle = \begin{bmatrix} 0\\ \vdots \\ 0 \\ 1 \\ 0 \\ \vdots \\ 0 \end{bmatrix} # \begin{matrix} \\ \\ \\ \leftarrow \\ \\ \\ \\ \end{matrix} # \begin{matrix} \\ \\ \text{Probability of} \\ \text{car being at} \\ \text{position 4} \\ \\ \\ \end{matrix} # $$ # # This isn’t limited to position, we could also keep a statevector of all the possible speeds the car could have, and all the possible colours the car could be. With classical systems (like the car example above), this is a silly thing to do as it requires keeping huge vectors when we only really need one number. But as we will see in this chapter, statevectors happen to be a very good way of keeping track of quantum systems, including quantum computers. # # # ### 1.2 Qubit Notation <a id="notation"></a> # # Classical bits always have a completely well-defined state: they are either `0` or `1` at every point during a computation. There is no more detail we can add to the state of a bit than this. So to write down the state of a of classical bit (`c`), we can just use these two binary values. For example: # # c = 0 # # This restriction is lifted for quantum bits. Whether we get a `0` or a `1` from a qubit only needs to be well-defined when a measurement is made to extract an output. At that point, it must commit to one of these two options. At all other times, its state will be something more complex than can be captured by a simple binary value. # # To see how to describe these, we can first focus on the two simplest cases. As we saw in the last section, it is possible to prepare a qubit in a state for which it definitely gives the outcome `0` when measured. # # We need a name for this state. Let's be unimaginative and call it $0$ . Similarly, there exists a qubit state that is certain to output a `1`. We'll call this $1$. These two states are completely mutually exclusive. Either the qubit definitely outputs a ```0```, or it definitely outputs a ```1```. There is no overlap. One way to represent this with mathematics is to use two orthogonal vectors. # # $$ # |0\rangle = \begin{bmatrix} 1 \\ 0 \end{bmatrix} \, \, \, \, |1\rangle =\begin{bmatrix} 0 \\ 1 \end{bmatrix}. # $$ # # This is a lot of notation to take in all at once. First, let's unpack the weird $|$ and $\rangle$. Their job is essentially just to remind us that we are talking about the vectors that represent qubit states labelled $0$ and $1$. This helps us distinguish them from things like the bit values ```0``` and ```1``` or the numbers 0 and 1. It is part of the bra-ket notation, introduced by Dirac. # # If you are not familiar with vectors, you can essentially just think of them as lists of numbers which we manipulate using certain rules. If you are familiar with vectors from your high school physics classes, you'll know that these rules make vectors well-suited for describing quantities with a magnitude and a direction. For example, the velocity of an object is described perfectly with a vector. However, the way we use vectors for quantum states is slightly different to this, so don't hold on too hard to your previous intuition. It's time to do something new! # # With vectors we can describe more complex states than just $|0\rangle$ and $|1\rangle$. For example, consider the vector # # $$ # |q_0\rangle = \begin{bmatrix} \tfrac{1}{\sqrt{2}} \\ \tfrac{i}{\sqrt{2}} \end{bmatrix} . # $$ # # To understand what this state means, we'll need to use the mathematical rules for manipulating vectors. Specifically, we'll need to understand how to add vectors together and how to multiply them by scalars. # # <p> # <details> # <summary>Reminder: Matrix Addition and Multiplication by Scalars (Click here to expand)</summary> # <p>To add two vectors, we add their elements together: # $$|a\rangle = \begin{bmatrix}a_0 \\ a_1 \\ \vdots \\ a_n \end{bmatrix}, \quad # |b\rangle = \begin{bmatrix}b_0 \\ b_1 \\ \vdots \\ b_n \end{bmatrix}$$ # $$|a\rangle + |b\rangle = \begin{bmatrix}a_0 + b_0 \\ a_1 + b_1 \\ \vdots \\ a_n + b_n \end{bmatrix} $$ # </p> # <p>And to multiply a vector by a scalar, we multiply each element by the scalar: # $$x|a\rangle = \begin{bmatrix}x \times a_0 \\ x \times a_1 \\ \vdots \\ x \times a_n \end{bmatrix}$$ # </p> # <p>These two rules are used to rewrite the vector $|q_0\rangle$ (as shown above): # $$ # \begin{aligned} # |q_0\rangle & = \tfrac{1}{\sqrt{2}}|0\rangle + \tfrac{i}{\sqrt{2}}|1\rangle \\ # & = \tfrac{1}{\sqrt{2}}\begin{bmatrix}1\\0\end{bmatrix} + \tfrac{i}{\sqrt{2}}\begin{bmatrix}0\\1\end{bmatrix}\\ # & = \begin{bmatrix}\tfrac{1}{\sqrt{2}}\\0\end{bmatrix} + \begin{bmatrix}0\\\tfrac{i}{\sqrt{2}}\end{bmatrix}\\ # & = \begin{bmatrix}\tfrac{1}{\sqrt{2}} \\ \tfrac{i}{\sqrt{2}} \end{bmatrix}\\ # \end{aligned} # $$ # </details> # </p> # <p> # <details> # <summary>Reminder: Orthonormal Bases (Click here to expand)</summary> # <p> # It was stated before that the two vectors $|0\rangle$ and $|1\rangle$ are orthonormal, this means they are both <i>orthogonal</i> and <i>normalised</i>. Orthogonal means the vectors are at right angles: # </p><p><img src="images/basis.svg"></p> # <p>And normalised means their magnitudes (length of the arrow) is equal to 1. The two vectors $|0\rangle$ and $|1\rangle$ are <i>linearly independent</i>, which means we cannot describe $|0\rangle$ in terms of $|1\rangle$, and vice versa. However, using both the vectors $|0\rangle$ and $|1\rangle$, and our rules of addition and multiplication by scalars, we can describe all possible vectors in 2D space: # </p><p><img src="images/basis2.svg"></p> # <p>Because the vectors $|0\rangle$ and $|1\rangle$ are linearly independent, and can be used to describe any vector in 2D space using vector addition and scalar multiplication, we say the vectors $|0\rangle$ and $|1\rangle$ form a <i>basis</i>. In this case, since they are both orthogonal and normalised, we call it an <i>orthonormal basis</i>. # </details> # </p> # # Since the states $|0\rangle$ and $|1\rangle$ form an orthonormal basis, we can represent any 2D vector with a combination of these two states. This allows us to write the state of our qubit in the alternative form: # # $$ |q_0\rangle = \tfrac{1}{\sqrt{2}}|0\rangle + \tfrac{i}{\sqrt{2}}|1\rangle $$ # # This vector, $|q_0\rangle$ is called the qubit's _statevector,_ it tells us everything we could possibly know about this qubit. For now, we are only able to draw a few simple conclusions about this particular example of a statevector: it is not entirely $|0\rangle$ and not entirely $|1\rangle$. Instead, it is described by a linear combination of the two. In quantum mechanics, we typically describe linear combinations such as this using the word 'superposition'. # # Though our example state $|q_0\rangle$ can be expressed as a superposition of $|0\rangle$ and $|1\rangle$, it is no less a definite and well-defined qubit state than they are. To see this, we can begin to explore how a qubit can be manipulated. # # ### 1.3 Exploring Qubits with Qiskit <a id="exploring-qubits"></a> # # First, we need to import all the tools we will need: # + tags=["thebelab-init"] from qiskit import QuantumCircuit, execute, Aer from qiskit.visualization import plot_histogram, plot_bloch_vector from math import sqrt, pi # - # In Qiskit, we use the `QuantumCircuit` object to store our circuits, this is essentially a list of the quantum operations on our circuit and the qubits they are applied to. qc = QuantumCircuit(1) # Create a quantum circuit with one qubit # In our quantum circuits, our qubits always start out in the state $|0\rangle$. We can use the `initialize()` method to transform this into any state. We give `initialize()` the vector we want in the form of a list, and tell it which qubit(s) we want to initialise in this state: qc = QuantumCircuit(1) # Create a quantum circuit with one qubit initial_state = [0,1] # Define initial_state as |1> qc.initialize(initial_state, 0) # Apply initialisation operation to the 0th qubit qc.draw() # Let's view our circuit # We can then use one of Qiskit’s simulators to view the resulting state of our qubit. To begin with we will use the statevector simulator, but we will explain the different simulators and their uses later. # + tags=["thebelab-init"] backend = Aer.get_backend('statevector_simulator') # Tell Qiskit how to simulate our circuit # - # To get the results from our circuit, we use `execute` to run our circuit, giving the circuit and the backend as arguments. We then use `.result()` to get the result of this: qc = QuantumCircuit(1) # Create a quantum circuit with one qubit initial_state = [0,1] # Define initial_state as |1> qc.initialize(initial_state, 0) # Apply initialisation operation to the 0th qubit result = execute(qc,backend).result() # Do the simulation, returning the result # from `result`, we can then get the final statevector using `.get_statevector()`: qc = QuantumCircuit(1) # Create a quantum circuit with one qubit initial_state = [0,1] # Define initial_state as |1> qc.initialize(initial_state, 0) # Apply initialisation operation to the 0th qubit result = execute(qc,backend).result() # Do the simulation, returning the result out_state = result.get_statevector() print(out_state) # Display the output state vector # **Note:** Python uses `j` to represent $i$ in complex numbers. We see a vector with two complex elements: `0.+0.j` = 0, and `1.+0.j` = 1. # # Let’s now measure our qubit as we would in a real quantum computer and see the result: qc.measure_all() qc.draw() # This time, instead of the statevector we will get the counts for the `0` and `1` results using `.get_counts()`: result = execute(qc,backend).result() counts = result.get_counts() plot_histogram(counts) # We can see that we (unsurprisingly) have a 100% chance of measuring $|1\rangle$. This time, let’s instead put our qubit into a superposition and see what happens. We will use the state $|q_0\rangle$ from earlier in this section: # # $$ |q_0\rangle = \tfrac{1}{\sqrt{2}}|0\rangle + \tfrac{i}{\sqrt{2}}|1\rangle $$ # # We need to add these amplitudes to a python list. To add a complex amplitude we use `complex`, giving the real and imaginary parts as arguments: initial_state = [1/sqrt(2), 1j/sqrt(2)] # Define state |q_0> # And we then repeat the steps for initialising the qubit as before: qc = QuantumCircuit(1) # Must redefine qc qc.initialize(initial_state, 0) # Initialise the 0th qubit in the state `initial_state` state = execute(qc,backend).result().get_statevector() # Execute the circuit print(state) # Print the result results = execute(qc,backend).result().get_counts() plot_histogram(results) # We can see we have equal probability of measuring either $|0\rangle$ or $|1\rangle$. To explain this, we need to talk about measurement. # # ## 2. The Rules of Measurement <a id="rules-measurement"></a> # ### 2.1 A Very Important Rule <a id="important-rule"></a> # # There is a simple rule for measurement. To find the probability of measuring a state $|\psi \rangle$ in the state $|x\rangle$ we do: # # $$p(|x\rangle) = | \langle x| \psi \rangle|^2$$ # # The symbols $\langle$ and $|$ tell us $\langle x |$ is a row vector. In quantum mechanics we call the column vectors _kets_ and the row vectors _bras._ Together they make up _bra-ket_ notation. Any ket $|a\rangle$ has a corresponding bra $\langle a|$, and we convert between them using the conjugate transpose. # # <details> # <summary>Reminder: The Inner Product (Click here to expand)</summary> # <p>There are different ways to multiply vectors, here we use the <i>inner product</i>. The inner product is a generalisation of the <i>dot product</i> which you may already be familiar with. In this guide, we use the inner product between a bra (row vector) and a ket (column vector), and it follows this rule: # # $$\langle a| = \begin{bmatrix}a_0^*, & a_1^*, & \dots & a_n^* \end{bmatrix}, \quad # |b\rangle = \begin{bmatrix}b_0 \\ b_1 \\ \vdots \\ b_n \end{bmatrix}$$ # $$\langle a|b\rangle = a_0^* b_0 + a_1^* b_1 \dots a_n^* b_n$$ # </p> # <p>We can see that the inner product of two vectors always gives us a scalar. A useful thing to remember is that the inner product of two orthogonal vectors is 0, for example if we have the orthogonal vectors $|0\rangle$ and $|1\rangle$: # $$\langle1|0\rangle = \begin{bmatrix} 0 , & 1\end{bmatrix}\begin{bmatrix}1 \\ 0\end{bmatrix} = 0$$ # </p> # <p>Additionally, remember that the vectors $|0\rangle$ and $|1\rangle$ are also normalised (magnitudes are equal to 1): # # $$ # \begin{aligned} # \langle0|0\rangle & = \begin{bmatrix} 1 , & 0\end{bmatrix}\begin{bmatrix}1 \\ 0\end{bmatrix} = 1 \\ # \langle1|1\rangle & = \begin{bmatrix} 0 , & 1\end{bmatrix}\begin{bmatrix}0 \\ 1\end{bmatrix} = 1 # \end{aligned} # $$ # </p> # </details> # # In the equation above, $|x\rangle$ can be any qubit state. To find the probability of measuring $|x\rangle$, we take the inner product of $|x\rangle$ and the state we are measuring (in this case $|\psi\rangle$), then square the magnitude. This may seem a little convoluted, but it will soon become second nature. # # If we look at the state $|q_0\rangle$ from before, we can see the probability of measuring $|0\rangle$ is indeed $0.5$: # # $$ # \begin{aligned} # |q_0\rangle & = \tfrac{1}{\sqrt{2}}|0\rangle + \tfrac{i}{\sqrt{2}}|1\rangle \\ # \langle 0| q_0 \rangle & = \tfrac{1}{\sqrt{2}}\langle 0|0\rangle + \tfrac{i}{\sqrt{2}}\langle 0|1\rangle \\ # & = \tfrac{1}{\sqrt{2}}\cdot 1 + \tfrac{i}{\sqrt{2}} \cdot 0\\ # & = \tfrac{1}{\sqrt{2}}\\ # |\langle 0| q_0 \rangle|^2 & = \tfrac{1}{2} # \end{aligned} # $$ # # You should verify the probability of measuring $|1\rangle$ as an exercise. # # This rule governs how we get information out of quantum states. It is therefore very important for everything we do in quantum computation. It also immediately implies several important facts. # # ### 2.2 The Implications of this Rule <a id="implications"></a> # ### #1 Normalisation # # The rule shows us that amplitudes are related to probabilities. If we want the probabilities to add up to 1 (which they should!), we need to ensure that the statevector is properly normalized. Specifically, we need the magnitude of the state vector to be 1. # # $$ \langle\psi|\psi\rangle = 1 \\ $$ # # Thus if: # # $$ |\psi\rangle = \alpha|0\rangle + \beta|1\rangle $$ # # Then: # # $$ \sqrt{|\alpha|^2 + |\beta|^2} = 1 $$ # # This explains the factors of $\sqrt{2}$ you have seen throughout this chapter. In fact, if we try to give `initialize()` a vector that isn’t normalised, it will give us an error: # + tags=["raises-exception"] vector = [1,1] qc.initialize(vector, 0) # - # #### Quick Exercise # 1. Create a state vector that will give a $1/3$ probability of measuring $|0\rangle$. # 2. Create a different state vector that will give the same measurement probabilities. # 3. Verify that the probability of measuring $|1\rangle$ for these two states is $2/3$. # You can check your answer in the widget below (accepts answers ±1% accuracy, you can use numpy terms such as '`pi`' and '`sqrt()`' in the vector): # Run the code in this cell to interact with the widget from qiskit_textbook.widgets import state_vector_exercise state_vector_exercise(target=1/3) # ### #2 Alternative measurement # # The measurement rule gives us the probability $p(|x\rangle)$ that a state $|\psi\rangle$ is measured as $|x\rangle$. Nowhere does it tell us that $|x\rangle$ can only be either $|0\rangle$ or $|1\rangle$. # # The measurements we have considered so far are in fact only one of an infinite number of possible ways to measure a qubit. For any orthogonal pair of states, we can define a measurement that would cause a qubit to choose between the two. # # This possibility will be explored more in the next section. For now, just bear in mind that $|x\rangle$ is not limited to being simply $|0\rangle$ or $|1\rangle$. # ### #3 Global Phase # # We know that measuring the state $|1\rangle$ will give us the output `1` with certainty. But we are also able to write down states such as # # $$\begin{bmatrix}0 \\ i\end{bmatrix} = i|1\rangle.$$ # # To see how this behaves, we apply the measurement rule. # # $$ |\langle x| (i|1\rangle) |^2 = | i \langle x|1\rangle|^2 = |\langle x|1\rangle|^2 $$ # # Here we find that the factor of $i$ disappears once we take the magnitude of the complex number. This effect is completely independent of the measured state $|x\rangle$. It does not matter what measurement we are considering, the probabilities for the state $i|1\rangle$ are identical to those for $|1\rangle$. Since measurements are the only way we can extract any information from a qubit, this implies that these two states are equivalent in all ways that are physically relevant. # # More generally, we refer to any overall factor $\gamma$ on a state for which $|\gamma|=1$ as a 'global phase'. States that differ only by a global phase are physically indistinguishable. # # $$ |\langle x| ( \gamma |a\rangle) |^2 = | \gamma \langle x|a\rangle|^2 = |\langle x|a\rangle|^2 $$ # # Note that this is distinct from the phase difference _between_ terms in a superposition, which is known as the 'relative phase'. This becomes relevant once we consider different types of measurement and multiple qubits. # # # ### #4 The Observer Effect # # We know that the amplitudes contain information about the probability of us finding the qubit in a specific state, but once we have measured the qubit, we know with certainty what the state of the qubit is. For example, if we measure a qubit in the state: # # $$ |q\rangle = \alpha|0\rangle + \beta|1\rangle$$ # # And find it in the state $|0\rangle$, if we measure again, there is a 100% chance of finding the qubit in the state $|0\rangle$. This means the act of measuring _changes_ the state of our qubits. # # $$ |q\rangle = \begin{bmatrix} \alpha \\ \beta \end{bmatrix} \xrightarrow{\text{Measure }|0\rangle} |q\rangle = |0\rangle = \begin{bmatrix} 1 \\ 0 \end{bmatrix}$$ # # We sometimes refer to this as _collapsing_ the state of the qubit. It is a potent effect, and so one that must be used wisely. For example, were we to constantly measure each of our qubits to keep track of their value at each point in a computation, they would always simply be in a well-defined state of either $|0\rangle$ or $|1\rangle$. As such, they would be no different from classical bits and our computation could be easily replaced by a classical computation. To achieve truly quantum computation we must allow the qubits to explore more complex states. Measurements are therefore only used when we need to extract an output. This means that we often place all the measurements at the end of our quantum circuit. # # We can demonstrate this using Qiskit’s statevector simulator. Let's initialise a qubit in superposition: qc = QuantumCircuit(1) # We are redefining qc initial_state = [0.+1.j/sqrt(2),1/sqrt(2)+0.j] qc.initialize(initial_state, 0) qc.draw() # This should initialise our qubit in the state: # # $$ |q\rangle = \tfrac{i}{\sqrt{2}}|0\rangle + \tfrac{1}{\sqrt{2}}|1\rangle $$ # # We can verify this using the simulator: state = execute(qc, backend).result().get_statevector() print("Qubit State = " + str(state)) # We can see here the qubit is initialised in the state `[0.+0.70710678j 0.70710678+0.j]`, which is the state we expected. # # Let’s now measure this qubit: qc.measure_all() qc.draw() # When we simulate this entire circuit, we can see that one of the amplitudes is _always_ 0: state = execute(qc, backend).result().get_statevector() print("State of Measured Qubit = " + str(state)) # You can re-run this cell a few times to reinitialise the qubit and measure it again. You will notice that either outcome is equally probable, but that the state of the qubit is never a superposition of $|0\rangle$ and $|1\rangle$. Somewhat interestingly, the global phase on the state $|0\rangle$ survives, but since this is global phase, we can never measure it on a real quantum computer. # # ### A Note about Quantum Simulators # # We can see that writing down a qubit’s state requires keeping track of two complex numbers, but when using a real quantum computer we will only ever receive a yes-or-no (`0` or `1`) answer for each qubit. The output of a 10-qubit quantum computer will look like this: # # `0110111110` # # Just 10 bits, no superposition or complex amplitudes. When using a real quantum computer, we cannot see the states of our qubits mid-computation, as this would destroy them! This behaviour is not ideal for learning, so Qiskit provides different quantum simulators: The `qasm_simulator` behaves as if you are interacting with a real quantum computer, and will not allow you to use `.get_statevector()`. Alternatively, `statevector_simulator`, (which we have been using in this chapter) does allow peeking at the quantum states before measurement, as we have seen. # # # # ## 3. The Bloch Sphere <a id="bloch-sphere"></a> # ### 3.1 Describing the Restricted Qubit State <a id="bloch-sphere-1"></a> # # We saw earlier in this chapter that the general state of a qubit ($|q\rangle$) is: # # $$ # |q\rangle = \alpha|0\rangle + \beta|1\rangle # $$ # # $$ # \alpha, \beta \in \mathbb{C} # $$ # # (The second line tells us $\alpha$ and $\beta$ are complex numbers). The first two implications in section 2 tell us that we cannot differentiate between some of these states. This means we can be more specific in our description of the qubit. # # Firstly, since we cannot measure global phase, we can only measure the difference in phase between the states $|0\rangle$ and $|1\rangle$. Instead of having $\alpha$ and $\beta$ be complex, we can confine them to the real numbers and add a term to tell us the relative phase between them: # # $$ # |q\rangle = \alpha|0\rangle + e^{i\phi}\beta|1\rangle # $$ # # $$ # \alpha, \beta, \phi \in \mathbb{R} # $$ # # Finally, since the qubit state must be normalised, i.e. # # $$ # \sqrt{\alpha^2 + \beta^2} = 1 # $$ # # we can use the trigonometric identity: # # $$ # \sqrt{\sin^2{x} + \cos^2{x}} = 1 # $$ # # to describe the real $\alpha$ and $\beta$ in terms of one variable, $\theta$: # # $$ # \alpha = \cos{\tfrac{\theta}{2}}, \quad \beta=\sin{\tfrac{\theta}{2}} # $$ # # From this we can describe the state of any qubit using the two variables $\phi$ and $\theta$: # # $$ # |q\rangle = \cos{\tfrac{\theta}{2}}|0\rangle + e^{i\phi}\sin{\tfrac{\theta}{2}}|1\rangle # $$ # # $$ # \theta, \phi \in \mathbb{R} # $$ # # ### 3.2 Visually Representing a Qubit State <a id="bloch-sphere-2"></a> # # We want to plot our general qubit state: # # $$ # |q\rangle = \cos{\tfrac{\theta}{2}}|0\rangle + e^{i\phi}\sin{\tfrac{\theta}{2}}|1\rangle # $$ # # If we interpret $\theta$ and $\phi$ as spherical co-ordinates ($r = 1$, since the magnitude of the qubit state is $1$), we can plot any single qubit state on the surface of a sphere, known as the _Bloch sphere._ # # Below we have plotted a qubit in the state $|{+}\rangle$. In this case, $\theta = \pi/2$ and $\phi = 0$. # # (Qiskit has a function to plot a bloch sphere, `plot_bloch_vector()`, but at the time of writing it only takes cartesian coordinates. We have included a function that does the conversion automatically). # from qiskit_textbook.widgets import plot_bloch_vector_spherical coords = [pi/2,0,1] # [Theta, Phi, Radius] plot_bloch_vector_spherical(coords) # Bloch Vector with spherical coordinates # #### Warning! # When first learning about qubit states, it's easy to confuse the qubits _statevector_ with its _Bloch vector_. Remember the statevector is the vector disucssed in [1.1](#notation), that holds the amplitudes for the two states our qubit can be in. The Bloch vector is a visualisation tool that maps the 2D, complex statevector onto real, 3D space. # #### Quick Exercise # Use `plot_bloch_vector()` or `plot_bloch_sphere_spherical()` to plot a qubit in the states: # 1. $|0\rangle$ # 2. $|1\rangle$ # 3. $\tfrac{1}{\sqrt{2}}(|0\rangle + |1\rangle)$ # 4. $\tfrac{1}{\sqrt{2}}(|0\rangle - i|1\rangle)$ # 5. $\tfrac{1}{\sqrt{2}}\begin{bmatrix}i\\1\end{bmatrix}$ # We have also included below a widget that converts from spherical co-ordinates to cartesian, for use with `plot_bloch_vector()`: from qiskit_textbook.widgets import bloch_calc bloch_calc() import qiskit qiskit.__qiskit_version__
content/ch-states/representing-qubit-states.ipynb